Deleted Added
full compact
sec.c (193579) sec.c (194101)
1/*-
2 * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
17 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
19 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
21 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
22 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
23 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26/*
27 * Freescale integrated Security Engine (SEC) driver. Currently SEC 2.0 and
28 * 3.0 are supported.
29 */
30
31#include <sys/cdefs.h>
1/*-
2 * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
17 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
19 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
21 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
22 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
23 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26/*
27 * Freescale integrated Security Engine (SEC) driver. Currently SEC 2.0 and
28 * 3.0 are supported.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/sec/sec.c 193579 2009-06-06 09:37:55Z raj $");
32__FBSDID("$FreeBSD: head/sys/dev/sec/sec.c 194101 2009-06-13 08:57:04Z raj $");
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/bus.h>
37#include <sys/endian.h>
38#include <sys/kernel.h>
39#include <sys/lock.h>
40#include <sys/malloc.h>
41#include <sys/mbuf.h>
42#include <sys/module.h>
43#include <sys/mutex.h>
44#include <sys/random.h>
45#include <sys/rman.h>
46
47#include <machine/bus.h>
48#include <machine/ocpbus.h>
49#include <machine/resource.h>
50
51#include <opencrypto/cryptodev.h>
52#include "cryptodev_if.h"
53
54#include <dev/sec/sec.h>
55
56static int sec_probe(device_t dev);
57static int sec_attach(device_t dev);
58static int sec_detach(device_t dev);
59static int sec_suspend(device_t dev);
60static int sec_resume(device_t dev);
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/bus.h>
37#include <sys/endian.h>
38#include <sys/kernel.h>
39#include <sys/lock.h>
40#include <sys/malloc.h>
41#include <sys/mbuf.h>
42#include <sys/module.h>
43#include <sys/mutex.h>
44#include <sys/random.h>
45#include <sys/rman.h>
46
47#include <machine/bus.h>
48#include <machine/ocpbus.h>
49#include <machine/resource.h>
50
51#include <opencrypto/cryptodev.h>
52#include "cryptodev_if.h"
53
54#include <dev/sec/sec.h>
55
56static int sec_probe(device_t dev);
57static int sec_attach(device_t dev);
58static int sec_detach(device_t dev);
59static int sec_suspend(device_t dev);
60static int sec_resume(device_t dev);
61static void sec_shutdown(device_t dev);
61static int sec_shutdown(device_t dev);
62static void sec_primary_intr(void *arg);
63static void sec_secondary_intr(void *arg);
64static int sec_setup_intr(struct sec_softc *sc, struct resource **ires,
65 void **ihand, int *irid, driver_intr_t handler, const char *iname);
66static void sec_release_intr(struct sec_softc *sc, struct resource *ires,
67 void *ihand, int irid, const char *iname);
68static int sec_controller_reset(struct sec_softc *sc);
69static int sec_channel_reset(struct sec_softc *sc, int channel, int full);
70static int sec_init(struct sec_softc *sc);
71static int sec_alloc_dma_mem(struct sec_softc *sc,
72 struct sec_dma_mem *dma_mem, bus_size_t size);
73static int sec_desc_map_dma(struct sec_softc *sc,
74 struct sec_dma_mem *dma_mem, void *mem, bus_size_t size, int type,
75 struct sec_desc_map_info *sdmi);
76static void sec_free_dma_mem(struct sec_dma_mem *dma_mem);
77static void sec_enqueue(struct sec_softc *sc);
78static int sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc,
79 int channel);
80static int sec_eu_channel(struct sec_softc *sc, int eu);
81static int sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
82 u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype);
83static int sec_make_pointer_direct(struct sec_softc *sc,
84 struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize);
85static int sec_alloc_session(struct sec_softc *sc);
86static int sec_newsession(device_t dev, u_int32_t *sidp,
87 struct cryptoini *cri);
88static int sec_freesession(device_t dev, uint64_t tid);
89static int sec_process(device_t dev, struct cryptop *crp, int hint);
90static int sec_split_cri(struct cryptoini *cri, struct cryptoini **enc,
91 struct cryptoini **mac);
92static int sec_split_crp(struct cryptop *crp, struct cryptodesc **enc,
93 struct cryptodesc **mac);
94static int sec_build_common_ns_desc(struct sec_softc *sc,
95 struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp,
96 struct cryptodesc *enc, int buftype);
97static int sec_build_common_s_desc(struct sec_softc *sc,
98 struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp,
99 struct cryptodesc *enc, struct cryptodesc *mac, int buftype);
100
101static struct sec_session *sec_get_session(struct sec_softc *sc, u_int sid);
102static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr);
103
104/* AESU */
105static int sec_aesu_newsession(struct sec_softc *sc,
106 struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
107static int sec_aesu_make_desc(struct sec_softc *sc,
108 struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
109 int buftype);
110
111/* DEU */
112static int sec_deu_newsession(struct sec_softc *sc,
113 struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
114static int sec_deu_make_desc(struct sec_softc *sc,
115 struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
116 int buftype);
117
118/* MDEU */
119static int sec_mdeu_can_handle(u_int alg);
120static int sec_mdeu_config(struct cryptodesc *crd,
121 u_int *eu, u_int *mode, u_int *hashlen);
122static int sec_mdeu_newsession(struct sec_softc *sc,
123 struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
124static int sec_mdeu_make_desc(struct sec_softc *sc,
125 struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
126 int buftype);
127
128static device_method_t sec_methods[] = {
129 /* Device interface */
130 DEVMETHOD(device_probe, sec_probe),
131 DEVMETHOD(device_attach, sec_attach),
132 DEVMETHOD(device_detach, sec_detach),
133
134 DEVMETHOD(device_suspend, sec_suspend),
135 DEVMETHOD(device_resume, sec_resume),
136 DEVMETHOD(device_shutdown, sec_shutdown),
137
138 /* Bus interface */
139 DEVMETHOD(bus_print_child, bus_generic_print_child),
140 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
141
142 /* Crypto methods */
143 DEVMETHOD(cryptodev_newsession, sec_newsession),
144 DEVMETHOD(cryptodev_freesession,sec_freesession),
145 DEVMETHOD(cryptodev_process, sec_process),
146
147 { 0, 0 }
148};
149static driver_t sec_driver = {
150 "sec",
151 sec_methods,
152 sizeof(struct sec_softc),
153};
154
155static devclass_t sec_devclass;
156DRIVER_MODULE(sec, ocpbus, sec_driver, sec_devclass, 0, 0);
157MODULE_DEPEND(sec, crypto, 1, 1, 1);
158
159static struct sec_eu_methods sec_eus[] = {
160 {
161 sec_aesu_newsession,
162 sec_aesu_make_desc,
163 },
164 {
165 sec_deu_newsession,
166 sec_deu_make_desc,
167 },
168 {
169 sec_mdeu_newsession,
170 sec_mdeu_make_desc,
171 },
172 { NULL, NULL }
173};
174
175static inline void
176sec_sync_dma_mem(struct sec_dma_mem *dma_mem, bus_dmasync_op_t op)
177{
178
179 /* Sync only if dma memory is valid */
180 if (dma_mem->dma_vaddr != NULL)
181 bus_dmamap_sync(dma_mem->dma_tag, dma_mem->dma_map, op);
182}
183
184static inline void
185sec_free_session(struct sec_softc *sc, struct sec_session *ses)
186{
187
188 SEC_LOCK(sc, sessions);
189 ses->ss_used = 0;
190 SEC_UNLOCK(sc, sessions);
191}
192
193static inline void *
194sec_get_pointer_data(struct sec_desc *desc, u_int n)
195{
196
197 return (desc->sd_ptr_dmem[n].dma_vaddr);
198}
199
200static int
201sec_probe(device_t dev)
202{
203 struct sec_softc *sc;
204 device_t parent;
205 uintptr_t devtype;
206 uint64_t id;
207 int error;
208
209 parent = device_get_parent(dev);
210 error = BUS_READ_IVAR(parent, dev, OCPBUS_IVAR_DEVTYPE, &devtype);
211 if (error)
212 return (error);
213
214 if (devtype != OCPBUS_DEVTYPE_SEC)
215 return (ENXIO);
216
217 sc = device_get_softc(dev);
218
219 sc->sc_rrid = 0;
220 sc->sc_rres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->sc_rrid,
221 0ul, ~0ul, SEC_IO_SIZE, RF_ACTIVE);
222
223 if (sc->sc_rres == NULL)
224 return (ENXIO);
225
226 sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
227 sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
228
229 id = SEC_READ(sc, SEC_ID);
230
231 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
232
233 switch (id) {
234 case SEC_20_ID:
235 device_set_desc(dev, "Freescale Security Engine 2.0");
236 sc->sc_version = 2;
237 break;
238 case SEC_30_ID:
239 device_set_desc(dev, "Freescale Security Engine 3.0");
240 sc->sc_version = 3;
241 break;
242 default:
243 device_printf(dev, "unknown SEC ID 0x%016llx!\n", id);
244 return (ENXIO);
245 }
246
247 return (0);
248}
249
250static int
251sec_attach(device_t dev)
252{
253 struct sec_softc *sc;
254 struct sec_hw_lt *lt;
255 int error = 0;
256 int i;
257
258 sc = device_get_softc(dev);
259 sc->sc_dev = dev;
260 sc->sc_blocked = 0;
261 sc->sc_shutdown = 0;
262
263 sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
264 if (sc->sc_cid < 0) {
265 device_printf(dev, "could not get crypto driver ID!\n");
266 return (ENXIO);
267 }
268
269 /* Init locks */
270 mtx_init(&sc->sc_controller_lock, device_get_nameunit(dev),
271 "SEC Controller lock", MTX_DEF);
272 mtx_init(&sc->sc_descriptors_lock, device_get_nameunit(dev),
273 "SEC Descriptors lock", MTX_DEF);
274 mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev),
275 "SEC Sessions lock", MTX_DEF);
276
277 /* Allocate I/O memory for SEC registers */
278 sc->sc_rrid = 0;
279 sc->sc_rres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->sc_rrid,
280 0ul, ~0ul, SEC_IO_SIZE, RF_ACTIVE);
281
282 if (sc->sc_rres == NULL) {
283 device_printf(dev, "could not allocate I/O memory!\n");
284 goto fail1;
285 }
286
287 sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
288 sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
289
290 /* Setup interrupts */
291 sc->sc_pri_irid = 0;
292 error = sec_setup_intr(sc, &sc->sc_pri_ires, &sc->sc_pri_ihand,
293 &sc->sc_pri_irid, sec_primary_intr, "primary");
294
295 if (error)
296 goto fail2;
297
298 sc->sc_sec_irid = 1;
299 error = sec_setup_intr(sc, &sc->sc_sec_ires, &sc->sc_sec_ihand,
300 &sc->sc_sec_irid, sec_secondary_intr, "secondary");
301
302 if (error)
303 goto fail3;
304
305 /* Alloc DMA memory for descriptors and link tables */
306 error = sec_alloc_dma_mem(sc, &(sc->sc_desc_dmem),
307 SEC_DESCRIPTORS * sizeof(struct sec_hw_desc));
308
309 if (error)
310 goto fail4;
311
312 error = sec_alloc_dma_mem(sc, &(sc->sc_lt_dmem),
313 (SEC_LT_ENTRIES + 1) * sizeof(struct sec_hw_lt));
314
315 if (error)
316 goto fail5;
317
318 /* Fill in descriptors and link tables */
319 for (i = 0; i < SEC_DESCRIPTORS; i++) {
320 sc->sc_desc[i].sd_desc =
321 (struct sec_hw_desc*)(sc->sc_desc_dmem.dma_vaddr) + i;
322 sc->sc_desc[i].sd_desc_paddr = sc->sc_desc_dmem.dma_paddr +
323 (i * sizeof(struct sec_hw_desc));
324 }
325
326 for (i = 0; i < SEC_LT_ENTRIES + 1; i++) {
327 sc->sc_lt[i].sl_lt =
328 (struct sec_hw_lt*)(sc->sc_lt_dmem.dma_vaddr) + i;
329 sc->sc_lt[i].sl_lt_paddr = sc->sc_lt_dmem.dma_paddr +
330 (i * sizeof(struct sec_hw_lt));
331 }
332
333 /* Last entry in link table is used to create a circle */
334 lt = sc->sc_lt[SEC_LT_ENTRIES].sl_lt;
335 lt->shl_length = 0;
336 lt->shl_r = 0;
337 lt->shl_n = 1;
338 lt->shl_ptr = sc->sc_lt[0].sl_lt_paddr;
339
340 /* Init descriptor and link table queues pointers */
341 SEC_CNT_INIT(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS);
342 SEC_CNT_INIT(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS);
343 SEC_CNT_INIT(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS);
344 SEC_CNT_INIT(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS);
345 SEC_CNT_INIT(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS);
346 SEC_CNT_INIT(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS);
347 SEC_CNT_INIT(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES);
348 SEC_CNT_INIT(sc, sc_lt_free_cnt, SEC_LT_ENTRIES);
349
350 /* Create masks for fast checks */
351 sc->sc_int_error_mask = 0;
352 for (i = 0; i < SEC_CHANNELS; i++)
353 sc->sc_int_error_mask |= (~0ULL & SEC_INT_CH_ERR(i));
354
355 switch (sc->sc_version) {
356 case 2:
357 sc->sc_channel_idle_mask =
358 (SEC_CHAN_CSR2_FFLVL_M << SEC_CHAN_CSR2_FFLVL_S) |
359 (SEC_CHAN_CSR2_MSTATE_M << SEC_CHAN_CSR2_MSTATE_S) |
360 (SEC_CHAN_CSR2_PSTATE_M << SEC_CHAN_CSR2_PSTATE_S) |
361 (SEC_CHAN_CSR2_GSTATE_M << SEC_CHAN_CSR2_GSTATE_S);
362 break;
363 case 3:
364 sc->sc_channel_idle_mask =
365 (SEC_CHAN_CSR3_FFLVL_M << SEC_CHAN_CSR3_FFLVL_S) |
366 (SEC_CHAN_CSR3_MSTATE_M << SEC_CHAN_CSR3_MSTATE_S) |
367 (SEC_CHAN_CSR3_PSTATE_M << SEC_CHAN_CSR3_PSTATE_S) |
368 (SEC_CHAN_CSR3_GSTATE_M << SEC_CHAN_CSR3_GSTATE_S);
369 break;
370 }
371
372 /* Init hardware */
373 error = sec_init(sc);
374
375 if (error)
376 goto fail6;
377
378 /* Register in OCF (AESU) */
379 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
380
381 /* Register in OCF (DEU) */
382 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
383 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
384
385 /* Register in OCF (MDEU) */
386 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
387 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
388 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
389 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
390 crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0);
391 if (sc->sc_version >= 3) {
392 crypto_register(sc->sc_cid, CRYPTO_SHA2_384_HMAC, 0, 0);
393 crypto_register(sc->sc_cid, CRYPTO_SHA2_512_HMAC, 0, 0);
394 }
395
396 return (0);
397
398fail6:
399 sec_free_dma_mem(&(sc->sc_lt_dmem));
400fail5:
401 sec_free_dma_mem(&(sc->sc_desc_dmem));
402fail4:
403 sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
404 sc->sc_sec_irid, "secondary");
405fail3:
406 sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
407 sc->sc_pri_irid, "primary");
408fail2:
409 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
410fail1:
411 mtx_destroy(&sc->sc_controller_lock);
412 mtx_destroy(&sc->sc_descriptors_lock);
413 mtx_destroy(&sc->sc_sessions_lock);
414
415 return (ENXIO);
416}
417
418static int
419sec_detach(device_t dev)
420{
421 struct sec_softc *sc = device_get_softc(dev);
422 int i, error, timeout = SEC_TIMEOUT;
423
424 /* Prepare driver to shutdown */
425 SEC_LOCK(sc, descriptors);
426 sc->sc_shutdown = 1;
427 SEC_UNLOCK(sc, descriptors);
428
429 /* Wait until all queued processing finishes */
430 while (1) {
431 SEC_LOCK(sc, descriptors);
432 i = SEC_READY_DESC_CNT(sc) + SEC_QUEUED_DESC_CNT(sc);
433 SEC_UNLOCK(sc, descriptors);
434
435 if (i == 0)
436 break;
437
438 if (timeout < 0) {
439 device_printf(dev, "queue flush timeout!\n");
440
441 /* DMA can be still active - stop it */
442 for (i = 0; i < SEC_CHANNELS; i++)
443 sec_channel_reset(sc, i, 1);
444
445 break;
446 }
447
448 timeout -= 1000;
449 DELAY(1000);
450 }
451
452 /* Disable interrupts */
453 SEC_WRITE(sc, SEC_IER, 0);
454
455 /* Unregister from OCF */
456 crypto_unregister_all(sc->sc_cid);
457
458 /* Free DMA memory */
459 for (i = 0; i < SEC_DESCRIPTORS; i++)
460 SEC_DESC_FREE_POINTERS(&(sc->sc_desc[i]));
461
462 sec_free_dma_mem(&(sc->sc_lt_dmem));
463 sec_free_dma_mem(&(sc->sc_desc_dmem));
464
465 /* Release interrupts */
466 sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
467 sc->sc_pri_irid, "primary");
468 sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
469 sc->sc_sec_irid, "secondary");
470
471 /* Release memory */
472 if (sc->sc_rres) {
473 error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid,
474 sc->sc_rres);
475 if (error)
476 device_printf(dev, "bus_release_resource() failed for"
477 " I/O memory, error %d\n", error);
478
479 sc->sc_rres = NULL;
480 }
481
482 mtx_destroy(&sc->sc_controller_lock);
483 mtx_destroy(&sc->sc_descriptors_lock);
484 mtx_destroy(&sc->sc_sessions_lock);
485
486 return (0);
487}
488
489static int
490sec_suspend(device_t dev)
491{
492
493 return (0);
494}
495
496static int
497sec_resume(device_t dev)
498{
499
500 return (0);
501}
502
62static void sec_primary_intr(void *arg);
63static void sec_secondary_intr(void *arg);
64static int sec_setup_intr(struct sec_softc *sc, struct resource **ires,
65 void **ihand, int *irid, driver_intr_t handler, const char *iname);
66static void sec_release_intr(struct sec_softc *sc, struct resource *ires,
67 void *ihand, int irid, const char *iname);
68static int sec_controller_reset(struct sec_softc *sc);
69static int sec_channel_reset(struct sec_softc *sc, int channel, int full);
70static int sec_init(struct sec_softc *sc);
71static int sec_alloc_dma_mem(struct sec_softc *sc,
72 struct sec_dma_mem *dma_mem, bus_size_t size);
73static int sec_desc_map_dma(struct sec_softc *sc,
74 struct sec_dma_mem *dma_mem, void *mem, bus_size_t size, int type,
75 struct sec_desc_map_info *sdmi);
76static void sec_free_dma_mem(struct sec_dma_mem *dma_mem);
77static void sec_enqueue(struct sec_softc *sc);
78static int sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc,
79 int channel);
80static int sec_eu_channel(struct sec_softc *sc, int eu);
81static int sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
82 u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype);
83static int sec_make_pointer_direct(struct sec_softc *sc,
84 struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize);
85static int sec_alloc_session(struct sec_softc *sc);
86static int sec_newsession(device_t dev, u_int32_t *sidp,
87 struct cryptoini *cri);
88static int sec_freesession(device_t dev, uint64_t tid);
89static int sec_process(device_t dev, struct cryptop *crp, int hint);
90static int sec_split_cri(struct cryptoini *cri, struct cryptoini **enc,
91 struct cryptoini **mac);
92static int sec_split_crp(struct cryptop *crp, struct cryptodesc **enc,
93 struct cryptodesc **mac);
94static int sec_build_common_ns_desc(struct sec_softc *sc,
95 struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp,
96 struct cryptodesc *enc, int buftype);
97static int sec_build_common_s_desc(struct sec_softc *sc,
98 struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp,
99 struct cryptodesc *enc, struct cryptodesc *mac, int buftype);
100
101static struct sec_session *sec_get_session(struct sec_softc *sc, u_int sid);
102static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr);
103
104/* AESU */
105static int sec_aesu_newsession(struct sec_softc *sc,
106 struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
107static int sec_aesu_make_desc(struct sec_softc *sc,
108 struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
109 int buftype);
110
111/* DEU */
112static int sec_deu_newsession(struct sec_softc *sc,
113 struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
114static int sec_deu_make_desc(struct sec_softc *sc,
115 struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
116 int buftype);
117
118/* MDEU */
119static int sec_mdeu_can_handle(u_int alg);
120static int sec_mdeu_config(struct cryptodesc *crd,
121 u_int *eu, u_int *mode, u_int *hashlen);
122static int sec_mdeu_newsession(struct sec_softc *sc,
123 struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
124static int sec_mdeu_make_desc(struct sec_softc *sc,
125 struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
126 int buftype);
127
128static device_method_t sec_methods[] = {
129 /* Device interface */
130 DEVMETHOD(device_probe, sec_probe),
131 DEVMETHOD(device_attach, sec_attach),
132 DEVMETHOD(device_detach, sec_detach),
133
134 DEVMETHOD(device_suspend, sec_suspend),
135 DEVMETHOD(device_resume, sec_resume),
136 DEVMETHOD(device_shutdown, sec_shutdown),
137
138 /* Bus interface */
139 DEVMETHOD(bus_print_child, bus_generic_print_child),
140 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
141
142 /* Crypto methods */
143 DEVMETHOD(cryptodev_newsession, sec_newsession),
144 DEVMETHOD(cryptodev_freesession,sec_freesession),
145 DEVMETHOD(cryptodev_process, sec_process),
146
147 { 0, 0 }
148};
149static driver_t sec_driver = {
150 "sec",
151 sec_methods,
152 sizeof(struct sec_softc),
153};
154
155static devclass_t sec_devclass;
156DRIVER_MODULE(sec, ocpbus, sec_driver, sec_devclass, 0, 0);
157MODULE_DEPEND(sec, crypto, 1, 1, 1);
158
159static struct sec_eu_methods sec_eus[] = {
160 {
161 sec_aesu_newsession,
162 sec_aesu_make_desc,
163 },
164 {
165 sec_deu_newsession,
166 sec_deu_make_desc,
167 },
168 {
169 sec_mdeu_newsession,
170 sec_mdeu_make_desc,
171 },
172 { NULL, NULL }
173};
174
175static inline void
176sec_sync_dma_mem(struct sec_dma_mem *dma_mem, bus_dmasync_op_t op)
177{
178
179 /* Sync only if dma memory is valid */
180 if (dma_mem->dma_vaddr != NULL)
181 bus_dmamap_sync(dma_mem->dma_tag, dma_mem->dma_map, op);
182}
183
184static inline void
185sec_free_session(struct sec_softc *sc, struct sec_session *ses)
186{
187
188 SEC_LOCK(sc, sessions);
189 ses->ss_used = 0;
190 SEC_UNLOCK(sc, sessions);
191}
192
193static inline void *
194sec_get_pointer_data(struct sec_desc *desc, u_int n)
195{
196
197 return (desc->sd_ptr_dmem[n].dma_vaddr);
198}
199
200static int
201sec_probe(device_t dev)
202{
203 struct sec_softc *sc;
204 device_t parent;
205 uintptr_t devtype;
206 uint64_t id;
207 int error;
208
209 parent = device_get_parent(dev);
210 error = BUS_READ_IVAR(parent, dev, OCPBUS_IVAR_DEVTYPE, &devtype);
211 if (error)
212 return (error);
213
214 if (devtype != OCPBUS_DEVTYPE_SEC)
215 return (ENXIO);
216
217 sc = device_get_softc(dev);
218
219 sc->sc_rrid = 0;
220 sc->sc_rres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->sc_rrid,
221 0ul, ~0ul, SEC_IO_SIZE, RF_ACTIVE);
222
223 if (sc->sc_rres == NULL)
224 return (ENXIO);
225
226 sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
227 sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
228
229 id = SEC_READ(sc, SEC_ID);
230
231 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
232
233 switch (id) {
234 case SEC_20_ID:
235 device_set_desc(dev, "Freescale Security Engine 2.0");
236 sc->sc_version = 2;
237 break;
238 case SEC_30_ID:
239 device_set_desc(dev, "Freescale Security Engine 3.0");
240 sc->sc_version = 3;
241 break;
242 default:
243 device_printf(dev, "unknown SEC ID 0x%016llx!\n", id);
244 return (ENXIO);
245 }
246
247 return (0);
248}
249
250static int
251sec_attach(device_t dev)
252{
253 struct sec_softc *sc;
254 struct sec_hw_lt *lt;
255 int error = 0;
256 int i;
257
258 sc = device_get_softc(dev);
259 sc->sc_dev = dev;
260 sc->sc_blocked = 0;
261 sc->sc_shutdown = 0;
262
263 sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
264 if (sc->sc_cid < 0) {
265 device_printf(dev, "could not get crypto driver ID!\n");
266 return (ENXIO);
267 }
268
269 /* Init locks */
270 mtx_init(&sc->sc_controller_lock, device_get_nameunit(dev),
271 "SEC Controller lock", MTX_DEF);
272 mtx_init(&sc->sc_descriptors_lock, device_get_nameunit(dev),
273 "SEC Descriptors lock", MTX_DEF);
274 mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev),
275 "SEC Sessions lock", MTX_DEF);
276
277 /* Allocate I/O memory for SEC registers */
278 sc->sc_rrid = 0;
279 sc->sc_rres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->sc_rrid,
280 0ul, ~0ul, SEC_IO_SIZE, RF_ACTIVE);
281
282 if (sc->sc_rres == NULL) {
283 device_printf(dev, "could not allocate I/O memory!\n");
284 goto fail1;
285 }
286
287 sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
288 sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
289
290 /* Setup interrupts */
291 sc->sc_pri_irid = 0;
292 error = sec_setup_intr(sc, &sc->sc_pri_ires, &sc->sc_pri_ihand,
293 &sc->sc_pri_irid, sec_primary_intr, "primary");
294
295 if (error)
296 goto fail2;
297
298 sc->sc_sec_irid = 1;
299 error = sec_setup_intr(sc, &sc->sc_sec_ires, &sc->sc_sec_ihand,
300 &sc->sc_sec_irid, sec_secondary_intr, "secondary");
301
302 if (error)
303 goto fail3;
304
305 /* Alloc DMA memory for descriptors and link tables */
306 error = sec_alloc_dma_mem(sc, &(sc->sc_desc_dmem),
307 SEC_DESCRIPTORS * sizeof(struct sec_hw_desc));
308
309 if (error)
310 goto fail4;
311
312 error = sec_alloc_dma_mem(sc, &(sc->sc_lt_dmem),
313 (SEC_LT_ENTRIES + 1) * sizeof(struct sec_hw_lt));
314
315 if (error)
316 goto fail5;
317
318 /* Fill in descriptors and link tables */
319 for (i = 0; i < SEC_DESCRIPTORS; i++) {
320 sc->sc_desc[i].sd_desc =
321 (struct sec_hw_desc*)(sc->sc_desc_dmem.dma_vaddr) + i;
322 sc->sc_desc[i].sd_desc_paddr = sc->sc_desc_dmem.dma_paddr +
323 (i * sizeof(struct sec_hw_desc));
324 }
325
326 for (i = 0; i < SEC_LT_ENTRIES + 1; i++) {
327 sc->sc_lt[i].sl_lt =
328 (struct sec_hw_lt*)(sc->sc_lt_dmem.dma_vaddr) + i;
329 sc->sc_lt[i].sl_lt_paddr = sc->sc_lt_dmem.dma_paddr +
330 (i * sizeof(struct sec_hw_lt));
331 }
332
333 /* Last entry in link table is used to create a circle */
334 lt = sc->sc_lt[SEC_LT_ENTRIES].sl_lt;
335 lt->shl_length = 0;
336 lt->shl_r = 0;
337 lt->shl_n = 1;
338 lt->shl_ptr = sc->sc_lt[0].sl_lt_paddr;
339
340 /* Init descriptor and link table queues pointers */
341 SEC_CNT_INIT(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS);
342 SEC_CNT_INIT(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS);
343 SEC_CNT_INIT(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS);
344 SEC_CNT_INIT(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS);
345 SEC_CNT_INIT(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS);
346 SEC_CNT_INIT(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS);
347 SEC_CNT_INIT(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES);
348 SEC_CNT_INIT(sc, sc_lt_free_cnt, SEC_LT_ENTRIES);
349
350 /* Create masks for fast checks */
351 sc->sc_int_error_mask = 0;
352 for (i = 0; i < SEC_CHANNELS; i++)
353 sc->sc_int_error_mask |= (~0ULL & SEC_INT_CH_ERR(i));
354
355 switch (sc->sc_version) {
356 case 2:
357 sc->sc_channel_idle_mask =
358 (SEC_CHAN_CSR2_FFLVL_M << SEC_CHAN_CSR2_FFLVL_S) |
359 (SEC_CHAN_CSR2_MSTATE_M << SEC_CHAN_CSR2_MSTATE_S) |
360 (SEC_CHAN_CSR2_PSTATE_M << SEC_CHAN_CSR2_PSTATE_S) |
361 (SEC_CHAN_CSR2_GSTATE_M << SEC_CHAN_CSR2_GSTATE_S);
362 break;
363 case 3:
364 sc->sc_channel_idle_mask =
365 (SEC_CHAN_CSR3_FFLVL_M << SEC_CHAN_CSR3_FFLVL_S) |
366 (SEC_CHAN_CSR3_MSTATE_M << SEC_CHAN_CSR3_MSTATE_S) |
367 (SEC_CHAN_CSR3_PSTATE_M << SEC_CHAN_CSR3_PSTATE_S) |
368 (SEC_CHAN_CSR3_GSTATE_M << SEC_CHAN_CSR3_GSTATE_S);
369 break;
370 }
371
372 /* Init hardware */
373 error = sec_init(sc);
374
375 if (error)
376 goto fail6;
377
378 /* Register in OCF (AESU) */
379 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
380
381 /* Register in OCF (DEU) */
382 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
383 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
384
385 /* Register in OCF (MDEU) */
386 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
387 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
388 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
389 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
390 crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0);
391 if (sc->sc_version >= 3) {
392 crypto_register(sc->sc_cid, CRYPTO_SHA2_384_HMAC, 0, 0);
393 crypto_register(sc->sc_cid, CRYPTO_SHA2_512_HMAC, 0, 0);
394 }
395
396 return (0);
397
398fail6:
399 sec_free_dma_mem(&(sc->sc_lt_dmem));
400fail5:
401 sec_free_dma_mem(&(sc->sc_desc_dmem));
402fail4:
403 sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
404 sc->sc_sec_irid, "secondary");
405fail3:
406 sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
407 sc->sc_pri_irid, "primary");
408fail2:
409 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
410fail1:
411 mtx_destroy(&sc->sc_controller_lock);
412 mtx_destroy(&sc->sc_descriptors_lock);
413 mtx_destroy(&sc->sc_sessions_lock);
414
415 return (ENXIO);
416}
417
418static int
419sec_detach(device_t dev)
420{
421 struct sec_softc *sc = device_get_softc(dev);
422 int i, error, timeout = SEC_TIMEOUT;
423
424 /* Prepare driver to shutdown */
425 SEC_LOCK(sc, descriptors);
426 sc->sc_shutdown = 1;
427 SEC_UNLOCK(sc, descriptors);
428
429 /* Wait until all queued processing finishes */
430 while (1) {
431 SEC_LOCK(sc, descriptors);
432 i = SEC_READY_DESC_CNT(sc) + SEC_QUEUED_DESC_CNT(sc);
433 SEC_UNLOCK(sc, descriptors);
434
435 if (i == 0)
436 break;
437
438 if (timeout < 0) {
439 device_printf(dev, "queue flush timeout!\n");
440
441 /* DMA can be still active - stop it */
442 for (i = 0; i < SEC_CHANNELS; i++)
443 sec_channel_reset(sc, i, 1);
444
445 break;
446 }
447
448 timeout -= 1000;
449 DELAY(1000);
450 }
451
452 /* Disable interrupts */
453 SEC_WRITE(sc, SEC_IER, 0);
454
455 /* Unregister from OCF */
456 crypto_unregister_all(sc->sc_cid);
457
458 /* Free DMA memory */
459 for (i = 0; i < SEC_DESCRIPTORS; i++)
460 SEC_DESC_FREE_POINTERS(&(sc->sc_desc[i]));
461
462 sec_free_dma_mem(&(sc->sc_lt_dmem));
463 sec_free_dma_mem(&(sc->sc_desc_dmem));
464
465 /* Release interrupts */
466 sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
467 sc->sc_pri_irid, "primary");
468 sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
469 sc->sc_sec_irid, "secondary");
470
471 /* Release memory */
472 if (sc->sc_rres) {
473 error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid,
474 sc->sc_rres);
475 if (error)
476 device_printf(dev, "bus_release_resource() failed for"
477 " I/O memory, error %d\n", error);
478
479 sc->sc_rres = NULL;
480 }
481
482 mtx_destroy(&sc->sc_controller_lock);
483 mtx_destroy(&sc->sc_descriptors_lock);
484 mtx_destroy(&sc->sc_sessions_lock);
485
486 return (0);
487}
488
489static int
490sec_suspend(device_t dev)
491{
492
493 return (0);
494}
495
496static int
497sec_resume(device_t dev)
498{
499
500 return (0);
501}
502
503static void
503static int
504sec_shutdown(device_t dev)
505{
504sec_shutdown(device_t dev)
505{
506
507 return (0);
506}
507
508static int
509sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand,
510 int *irid, driver_intr_t handler, const char *iname)
511{
512 int error;
513
514 (*ires) = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, irid,
515 RF_ACTIVE);
516
517 if ((*ires) == NULL) {
518 device_printf(sc->sc_dev, "could not allocate %s IRQ\n", iname);
519 return (ENXIO);
520 }
521
522 error = bus_setup_intr(sc->sc_dev, *ires, INTR_MPSAFE | INTR_TYPE_NET,
523 NULL, handler, sc, ihand);
524
525 if (error) {
526 device_printf(sc->sc_dev, "failed to set up %s IRQ\n", iname);
527 if (bus_release_resource(sc->sc_dev, SYS_RES_IRQ, *irid, *ires))
528 device_printf(sc->sc_dev, "could not release %s IRQ\n",
529 iname);
530
531 (*ires) = NULL;
532 return (error);
533 }
534
535 return (0);
536}
537
538static void
539sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand,
540 int irid, const char *iname)
541{
542 int error;
543
544 if (ires == NULL)
545 return;
546
547 error = bus_teardown_intr(sc->sc_dev, ires, ihand);
548 if (error)
549 device_printf(sc->sc_dev, "bus_teardown_intr() failed for %s"
550 " IRQ, error %d\n", iname, error);
551
552 error = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, irid, ires);
553 if (error)
554 device_printf(sc->sc_dev, "bus_release_resource() failed for %s"
555 " IRQ, error %d\n", iname, error);
556}
557
558static void
559sec_primary_intr(void *arg)
560{
561 struct sec_softc *sc = arg;
562 struct sec_desc *desc;
563 uint64_t isr;
564 int i, wakeup = 0;
565
566 SEC_LOCK(sc, controller);
567
568 /* Check for errors */
569 isr = SEC_READ(sc, SEC_ISR);
570 if (isr & sc->sc_int_error_mask) {
571 /* Check each channel for error */
572 for (i = 0; i < SEC_CHANNELS; i++) {
573 if ((isr & SEC_INT_CH_ERR(i)) == 0)
574 continue;
575
576 device_printf(sc->sc_dev,
577 "I/O error on channel %i!\n", i);
578
579 /* Find and mark problematic descriptor */
580 desc = sec_find_desc(sc, SEC_READ(sc,
581 SEC_CHAN_CDPR(i)));
582
583 if (desc != NULL)
584 desc->sd_error = EIO;
585
586 /* Do partial channel reset */
587 sec_channel_reset(sc, i, 0);
588 }
589 }
590
591 /* ACK interrupt */
592 SEC_WRITE(sc, SEC_ICR, 0xFFFFFFFFFFFFFFFFULL);
593
594 SEC_UNLOCK(sc, controller);
595 SEC_LOCK(sc, descriptors);
596
597 /* Handle processed descriptors */
598 SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
599
600 while (SEC_QUEUED_DESC_CNT(sc) > 0) {
601 desc = SEC_GET_QUEUED_DESC(sc);
602
603 if (desc->sd_desc->shd_done != 0xFF && desc->sd_error == 0) {
604 SEC_PUT_BACK_QUEUED_DESC(sc);
605 break;
606 }
607
608 SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD |
609 BUS_DMASYNC_PREWRITE);
610
611 desc->sd_crp->crp_etype = desc->sd_error;
612 crypto_done(desc->sd_crp);
613
614 SEC_DESC_FREE_POINTERS(desc);
615 SEC_DESC_FREE_LT(sc, desc);
616 SEC_DESC_QUEUED2FREE(sc);
617 }
618
619 SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
620
621 if (!sc->sc_shutdown) {
622 wakeup = sc->sc_blocked;
623 sc->sc_blocked = 0;
624 }
625
626 SEC_UNLOCK(sc, descriptors);
627
628 /* Enqueue ready descriptors in hardware */
629 sec_enqueue(sc);
630
631 if (wakeup)
632 crypto_unblock(sc->sc_cid, wakeup);
633}
634
635static void
636sec_secondary_intr(void *arg)
637{
638 struct sec_softc *sc = arg;
639
640 device_printf(sc->sc_dev, "spurious secondary interrupt!\n");
641 sec_primary_intr(arg);
642}
643
644static int
645sec_controller_reset(struct sec_softc *sc)
646{
647 int timeout = SEC_TIMEOUT;
648
649 /* Reset Controller */
650 SEC_WRITE(sc, SEC_MCR, SEC_MCR_SWR);
651
652 while (SEC_READ(sc, SEC_MCR) & SEC_MCR_SWR) {
653 DELAY(1000);
654 timeout -= 1000;
655
656 if (timeout < 0) {
657 device_printf(sc->sc_dev, "timeout while waiting for "
658 "device reset!\n");
659 return (ETIMEDOUT);
660 }
661 }
662
663 return (0);
664}
665
666static int
667sec_channel_reset(struct sec_softc *sc, int channel, int full)
668{
669 int timeout = SEC_TIMEOUT;
670 uint64_t bit = (full) ? SEC_CHAN_CCR_R : SEC_CHAN_CCR_CON;
671 uint64_t reg;
672
673 /* Reset Channel */
674 reg = SEC_READ(sc, SEC_CHAN_CCR(channel));
675 SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg | bit);
676
677 while (SEC_READ(sc, SEC_CHAN_CCR(channel)) & bit) {
678 DELAY(1000);
679 timeout -= 1000;
680
681 if (timeout < 0) {
682 device_printf(sc->sc_dev, "timeout while waiting for "
683 "channel reset!\n");
684 return (ETIMEDOUT);
685 }
686 }
687
688 if (full) {
689 reg = SEC_CHAN_CCR_CDIE | SEC_CHAN_CCR_NT | SEC_CHAN_CCR_BS;
690
691 switch(sc->sc_version) {
692 case 2:
693 reg |= SEC_CHAN_CCR_CDWE;
694 break;
695 case 3:
696 reg |= SEC_CHAN_CCR_AWSE | SEC_CHAN_CCR_WGN;
697 break;
698 }
699
700 SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg);
701 }
702
703 return (0);
704}
705
706static int
707sec_init(struct sec_softc *sc)
708{
709 uint64_t reg;
710 int error, i;
711
712 /* Reset controller twice to clear all pending interrupts */
713 error = sec_controller_reset(sc);
714 if (error)
715 return (error);
716
717 error = sec_controller_reset(sc);
718 if (error)
719 return (error);
720
721 /* Reset channels */
722 for (i = 0; i < SEC_CHANNELS; i++) {
723 error = sec_channel_reset(sc, i, 1);
724 if (error)
725 return (error);
726 }
727
728 /* Enable Interrupts */
729 reg = SEC_INT_ITO;
730 for (i = 0; i < SEC_CHANNELS; i++)
731 reg |= SEC_INT_CH_DN(i) | SEC_INT_CH_ERR(i);
732
733 SEC_WRITE(sc, SEC_IER, reg);
734
735 return (error);
736}
737
738static void
739sec_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
740{
741 struct sec_dma_mem *dma_mem = arg;
742
743 if (error)
744 return;
745
746 KASSERT(nseg == 1, ("Wrong number of segments, should be 1"));
747 dma_mem->dma_paddr = segs->ds_addr;
748}
749
750static void
751sec_dma_map_desc_cb(void *arg, bus_dma_segment_t *segs, int nseg,
752 int error)
753{
754 struct sec_desc_map_info *sdmi = arg;
755 struct sec_softc *sc = sdmi->sdmi_sc;
756 struct sec_lt *lt = NULL;
757 bus_addr_t addr;
758 bus_size_t size;
759 int i;
760
761 SEC_LOCK_ASSERT(sc, descriptors);
762
763 if (error)
764 return;
765
766 for (i = 0; i < nseg; i++) {
767 addr = segs[i].ds_addr;
768 size = segs[i].ds_len;
769
770 /* Skip requested offset */
771 if (sdmi->sdmi_offset >= size) {
772 sdmi->sdmi_offset -= size;
773 continue;
774 }
775
776 addr += sdmi->sdmi_offset;
777 size -= sdmi->sdmi_offset;
778 sdmi->sdmi_offset = 0;
779
780 /* Do not link more than requested */
781 if (sdmi->sdmi_size < size)
782 size = sdmi->sdmi_size;
783
784 lt = SEC_ALLOC_LT_ENTRY(sc);
785 lt->sl_lt->shl_length = size;
786 lt->sl_lt->shl_r = 0;
787 lt->sl_lt->shl_n = 0;
788 lt->sl_lt->shl_ptr = addr;
789
790 if (sdmi->sdmi_lt_first == NULL)
791 sdmi->sdmi_lt_first = lt;
792
793 sdmi->sdmi_lt_used += 1;
794
795 if ((sdmi->sdmi_size -= size) == 0)
796 break;
797 }
798
799 sdmi->sdmi_lt_last = lt;
800}
801
802static void
803sec_dma_map_desc_cb2(void *arg, bus_dma_segment_t *segs, int nseg,
804 bus_size_t size, int error)
805{
806
807 sec_dma_map_desc_cb(arg, segs, nseg, error);
808}
809
810static int
811sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem,
812 bus_size_t size)
813{
814 int error;
815
816 if (dma_mem->dma_vaddr != NULL)
817 return (EBUSY);
818
819 error = bus_dma_tag_create(NULL, /* parent */
820 SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */
821 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
822 BUS_SPACE_MAXADDR, /* highaddr */
823 NULL, NULL, /* filtfunc, filtfuncarg */
824 size, 1, /* maxsize, nsegments */
825 size, 0, /* maxsegsz, flags */
826 NULL, NULL, /* lockfunc, lockfuncarg */
827 &(dma_mem->dma_tag)); /* dmat */
828
829 if (error) {
830 device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
831 " %i!\n", error);
832 goto err1;
833 }
834
835 error = bus_dmamem_alloc(dma_mem->dma_tag, &(dma_mem->dma_vaddr),
836 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &(dma_mem->dma_map));
837
838 if (error) {
839 device_printf(sc->sc_dev, "failed to allocate DMA safe"
840 " memory, error %i!\n", error);
841 goto err2;
842 }
843
844 error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
845 dma_mem->dma_vaddr, size, sec_alloc_dma_mem_cb, dma_mem,
846 BUS_DMA_NOWAIT);
847
848 if (error) {
849 device_printf(sc->sc_dev, "cannot get address of the DMA"
850 " memory, error %i\n", error);
851 goto err3;
852 }
853
854 dma_mem->dma_is_map = 0;
855 return (0);
856
857err3:
858 bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map);
859err2:
860 bus_dma_tag_destroy(dma_mem->dma_tag);
861err1:
862 dma_mem->dma_vaddr = NULL;
863 return(error);
864}
865
866static int
867sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem, void *mem,
868 bus_size_t size, int type, struct sec_desc_map_info *sdmi)
869{
870 int error;
871
872 if (dma_mem->dma_vaddr != NULL)
873 return (EBUSY);
874
875 switch (type) {
876 case SEC_MEMORY:
877 break;
878 case SEC_UIO:
879 size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE;
880 break;
881 case SEC_MBUF:
882 size = m_length((struct mbuf*)mem, NULL);
883 break;
884 default:
885 return (EINVAL);
886 }
887
888 error = bus_dma_tag_create(NULL, /* parent */
889 SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */
890 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
891 BUS_SPACE_MAXADDR, /* highaddr */
892 NULL, NULL, /* filtfunc, filtfuncarg */
893 size, /* maxsize */
894 SEC_FREE_LT_CNT(sc), /* nsegments */
895 SEC_MAX_DMA_BLOCK_SIZE, 0, /* maxsegsz, flags */
896 NULL, NULL, /* lockfunc, lockfuncarg */
897 &(dma_mem->dma_tag)); /* dmat */
898
899 if (error) {
900 device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
901 " %i!\n", error);
902 dma_mem->dma_vaddr = NULL;
903 return (error);
904 }
905
906 error = bus_dmamap_create(dma_mem->dma_tag, 0, &(dma_mem->dma_map));
907
908 if (error) {
909 device_printf(sc->sc_dev, "failed to create DMA map, error %i!"
910 "\n", error);
911 bus_dma_tag_destroy(dma_mem->dma_tag);
912 return (error);
913 }
914
915 switch (type) {
916 case SEC_MEMORY:
917 error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
918 mem, size, sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT);
919 break;
920 case SEC_UIO:
921 error = bus_dmamap_load_uio(dma_mem->dma_tag, dma_mem->dma_map,
922 mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT);
923 break;
924 case SEC_MBUF:
925 error = bus_dmamap_load_mbuf(dma_mem->dma_tag, dma_mem->dma_map,
926 mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT);
927 break;
928 }
929
930 if (error) {
931 device_printf(sc->sc_dev, "cannot get address of the DMA"
932 " memory, error %i!\n", error);
933 bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
934 bus_dma_tag_destroy(dma_mem->dma_tag);
935 return (error);
936 }
937
938 dma_mem->dma_is_map = 1;
939 dma_mem->dma_vaddr = mem;
940
941 return (0);
942}
943
944static void
945sec_free_dma_mem(struct sec_dma_mem *dma_mem)
946{
947
948 /* Check for double free */
949 if (dma_mem->dma_vaddr == NULL)
950 return;
951
952 bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map);
953
954 if (dma_mem->dma_is_map)
955 bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
956 else
957 bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr,
958 dma_mem->dma_map);
959
960 bus_dma_tag_destroy(dma_mem->dma_tag);
961 dma_mem->dma_vaddr = NULL;
962}
963
964static int
965sec_eu_channel(struct sec_softc *sc, int eu)
966{
967 uint64_t reg;
968 int channel = 0;
969
970 SEC_LOCK_ASSERT(sc, controller);
971
972 reg = SEC_READ(sc, SEC_EUASR);
973
974 switch (eu) {
975 case SEC_EU_AFEU:
976 channel = SEC_EUASR_AFEU(reg);
977 break;
978 case SEC_EU_DEU:
979 channel = SEC_EUASR_DEU(reg);
980 break;
981 case SEC_EU_MDEU_A:
982 case SEC_EU_MDEU_B:
983 channel = SEC_EUASR_MDEU(reg);
984 break;
985 case SEC_EU_RNGU:
986 channel = SEC_EUASR_RNGU(reg);
987 break;
988 case SEC_EU_PKEU:
989 channel = SEC_EUASR_PKEU(reg);
990 break;
991 case SEC_EU_AESU:
992 channel = SEC_EUASR_AESU(reg);
993 break;
994 case SEC_EU_KEU:
995 channel = SEC_EUASR_KEU(reg);
996 break;
997 case SEC_EU_CRCU:
998 channel = SEC_EUASR_CRCU(reg);
999 break;
1000 }
1001
1002 return (channel - 1);
1003}
1004
1005static int
1006sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel)
1007{
1008 u_int fflvl = SEC_MAX_FIFO_LEVEL;
1009 uint64_t reg;
1010 int i;
1011
1012 SEC_LOCK_ASSERT(sc, controller);
1013
1014 /* Find free channel if have not got one */
1015 if (channel < 0) {
1016 for (i = 0; i < SEC_CHANNELS; i++) {
1017 reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
1018
1019 if ((reg & sc->sc_channel_idle_mask) == 0) {
1020 channel = i;
1021 break;
1022 }
1023 }
1024 }
1025
1026 /* There is no free channel */
1027 if (channel < 0)
1028 return (-1);
1029
1030 /* Check FIFO level on selected channel */
1031 reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
1032
1033 switch(sc->sc_version) {
1034 case 2:
1035 fflvl = (reg >> SEC_CHAN_CSR2_FFLVL_S) & SEC_CHAN_CSR2_FFLVL_M;
1036 break;
1037 case 3:
1038 fflvl = (reg >> SEC_CHAN_CSR3_FFLVL_S) & SEC_CHAN_CSR3_FFLVL_M;
1039 break;
1040 }
1041
1042 if (fflvl >= SEC_MAX_FIFO_LEVEL)
1043 return (-1);
1044
1045 /* Enqueue descriptor in channel */
1046 SEC_WRITE(sc, SEC_CHAN_FF(channel), desc->sd_desc_paddr);
1047
1048 return (channel);
1049}
1050
1051static void
1052sec_enqueue(struct sec_softc *sc)
1053{
1054 struct sec_desc *desc;
1055 int ch0, ch1;
1056
1057 SEC_LOCK(sc, descriptors);
1058 SEC_LOCK(sc, controller);
1059
1060 while (SEC_READY_DESC_CNT(sc) > 0) {
1061 desc = SEC_GET_READY_DESC(sc);
1062
1063 ch0 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel0);
1064 ch1 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel1);
1065
1066 /*
1067 * Both EU are used by the same channel.
1068 * Enqueue descriptor in channel used by busy EUs.
1069 */
1070 if (ch0 >= 0 && ch0 == ch1) {
1071 if (sec_enqueue_desc(sc, desc, ch0) >= 0) {
1072 SEC_DESC_READY2QUEUED(sc);
1073 continue;
1074 }
1075 }
1076
1077 /*
1078 * Only one EU is free.
1079 * Enqueue descriptor in channel used by busy EU.
1080 */
1081 if ((ch0 >= 0 && ch1 < 0) || (ch1 >= 0 && ch0 < 0)) {
1082 if (sec_enqueue_desc(sc, desc, (ch0 >= 0) ? ch0 : ch1)
1083 >= 0) {
1084 SEC_DESC_READY2QUEUED(sc);
1085 continue;
1086 }
1087 }
1088
1089 /*
1090 * Both EU are free.
1091 * Enqueue descriptor in first free channel.
1092 */
1093 if (ch0 < 0 && ch1 < 0) {
1094 if (sec_enqueue_desc(sc, desc, -1) >= 0) {
1095 SEC_DESC_READY2QUEUED(sc);
1096 continue;
1097 }
1098 }
1099
1100 /* Current descriptor can not be queued at the moment */
1101 SEC_PUT_BACK_READY_DESC(sc);
1102 break;
1103 }
1104
1105 SEC_UNLOCK(sc, controller);
1106 SEC_UNLOCK(sc, descriptors);
1107}
1108
1109static struct sec_desc *
1110sec_find_desc(struct sec_softc *sc, bus_addr_t paddr)
1111{
1112 struct sec_desc *desc = NULL;
1113 int i;
1114
1115 SEC_LOCK_ASSERT(sc, descriptors);
1116
1117 for (i = 0; i < SEC_CHANNELS; i++) {
1118 if (sc->sc_desc[i].sd_desc_paddr == paddr) {
1119 desc = &(sc->sc_desc[i]);
1120 break;
1121 }
1122 }
1123
1124 return (desc);
1125}
1126
1127static int
1128sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n,
1129 bus_addr_t data, bus_size_t dsize)
1130{
1131 struct sec_hw_desc_ptr *ptr;
1132
1133 SEC_LOCK_ASSERT(sc, descriptors);
1134
1135 ptr = &(desc->sd_desc->shd_pointer[n]);
1136 ptr->shdp_length = dsize;
1137 ptr->shdp_extent = 0;
1138 ptr->shdp_j = 0;
1139 ptr->shdp_ptr = data;
1140
1141 return (0);
1142}
1143
1144static int
1145sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
1146 u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype)
1147{
1148 struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 };
1149 struct sec_hw_desc_ptr *ptr;
1150 int error;
1151
1152 SEC_LOCK_ASSERT(sc, descriptors);
1153
1154 /* For flat memory map only requested region */
1155 if (dtype == SEC_MEMORY) {
1156 data = (uint8_t*)(data) + doffset;
1157 sdmi.sdmi_offset = 0;
1158 }
1159
1160 error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), data, dsize,
1161 dtype, &sdmi);
1162
1163 if (error)
1164 return (error);
1165
1166 sdmi.sdmi_lt_last->sl_lt->shl_r = 1;
1167 desc->sd_lt_used += sdmi.sdmi_lt_used;
1168
1169 ptr = &(desc->sd_desc->shd_pointer[n]);
1170 ptr->shdp_length = dsize;
1171 ptr->shdp_extent = 0;
1172 ptr->shdp_j = 1;
1173 ptr->shdp_ptr = sdmi.sdmi_lt_first->sl_lt_paddr;
1174
1175 return (0);
1176}
1177
1178static int
1179sec_split_cri(struct cryptoini *cri, struct cryptoini **enc,
1180 struct cryptoini **mac)
1181{
1182 struct cryptoini *e, *m;
1183
1184 e = cri;
1185 m = cri->cri_next;
1186
1187 /* We can haldle only two operations */
1188 if (m && m->cri_next)
1189 return (EINVAL);
1190
1191 if (sec_mdeu_can_handle(e->cri_alg)) {
1192 cri = m;
1193 m = e;
1194 e = cri;
1195 }
1196
1197 if (m && !sec_mdeu_can_handle(m->cri_alg))
1198 return (EINVAL);
1199
1200 *enc = e;
1201 *mac = m;
1202
1203 return (0);
1204}
1205
1206static int
1207sec_split_crp(struct cryptop *crp, struct cryptodesc **enc,
1208 struct cryptodesc **mac)
1209{
1210 struct cryptodesc *e, *m, *t;
1211
1212 e = crp->crp_desc;
1213 m = e->crd_next;
1214
1215 /* We can haldle only two operations */
1216 if (m && m->crd_next)
1217 return (EINVAL);
1218
1219 if (sec_mdeu_can_handle(e->crd_alg)) {
1220 t = m;
1221 m = e;
1222 e = t;
1223 }
1224
1225 if (m && !sec_mdeu_can_handle(m->crd_alg))
1226 return (EINVAL);
1227
1228 *enc = e;
1229 *mac = m;
1230
1231 return (0);
1232}
1233
1234static int
1235sec_alloc_session(struct sec_softc *sc)
1236{
1237 struct sec_session *ses = NULL;
1238 int sid = -1;
1239 u_int i;
1240
1241 SEC_LOCK(sc, sessions);
1242
1243 for (i = 0; i < SEC_MAX_SESSIONS; i++) {
1244 if (sc->sc_sessions[i].ss_used == 0) {
1245 ses = &(sc->sc_sessions[i]);
1246 ses->ss_used = 1;
1247 ses->ss_ivlen = 0;
1248 ses->ss_klen = 0;
1249 ses->ss_mklen = 0;
1250 sid = i;
1251 break;
1252 }
1253 }
1254
1255 SEC_UNLOCK(sc, sessions);
1256
1257 return (sid);
1258}
1259
1260static struct sec_session *
1261sec_get_session(struct sec_softc *sc, u_int sid)
1262{
1263 struct sec_session *ses;
1264
1265 if (sid >= SEC_MAX_SESSIONS)
1266 return (NULL);
1267
1268 SEC_LOCK(sc, sessions);
1269
1270 ses = &(sc->sc_sessions[sid]);
1271
1272 if (ses->ss_used == 0)
1273 ses = NULL;
1274
1275 SEC_UNLOCK(sc, sessions);
1276
1277 return (ses);
1278}
1279
1280static int
1281sec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
1282{
1283 struct sec_softc *sc = device_get_softc(dev);
1284 struct sec_eu_methods *eu = sec_eus;
1285 struct cryptoini *enc = NULL;
1286 struct cryptoini *mac = NULL;
1287 struct sec_session *ses;
1288 int error = -1;
1289 int sid;
1290
1291 error = sec_split_cri(cri, &enc, &mac);
1292 if (error)
1293 return (error);
1294
1295 /* Check key lengths */
1296 if (enc && enc->cri_key && (enc->cri_klen / 8) > SEC_MAX_KEY_LEN)
1297 return (E2BIG);
1298
1299 if (mac && mac->cri_key && (mac->cri_klen / 8) > SEC_MAX_KEY_LEN)
1300 return (E2BIG);
1301
1302 /* Only SEC 3.0 supports digests larger than 256 bits */
1303 if (sc->sc_version < 3 && mac && mac->cri_klen > 256)
1304 return (E2BIG);
1305
1306 sid = sec_alloc_session(sc);
1307 if (sid < 0)
1308 return (ENOMEM);
1309
1310 ses = sec_get_session(sc, sid);
1311
1312 /* Find EU for this session */
1313 while (eu->sem_make_desc != NULL) {
1314 error = eu->sem_newsession(sc, ses, enc, mac);
1315 if (error >= 0)
1316 break;
1317
1318 eu++;
1319 }
1320
1321 /* If not found, return EINVAL */
1322 if (error < 0) {
1323 sec_free_session(sc, ses);
1324 return (EINVAL);
1325 }
1326
1327 /* Save cipher key */
1328 if (enc && enc->cri_key) {
1329 ses->ss_klen = enc->cri_klen / 8;
1330 memcpy(ses->ss_key, enc->cri_key, ses->ss_klen);
1331 }
1332
1333 /* Save digest key */
1334 if (mac && mac->cri_key) {
1335 ses->ss_mklen = mac->cri_klen / 8;
1336 memcpy(ses->ss_mkey, mac->cri_key, ses->ss_mklen);
1337 }
1338
1339 ses->ss_eu = eu;
1340 *sidp = sid;
1341
1342 return (0);
1343}
1344
1345static int
1346sec_freesession(device_t dev, uint64_t tid)
1347{
1348 struct sec_softc *sc = device_get_softc(dev);
1349 struct sec_session *ses;
1350 int error = 0;
1351
1352 ses = sec_get_session(sc, CRYPTO_SESID2LID(tid));
1353 if (ses == NULL)
1354 return (EINVAL);
1355
1356 sec_free_session(sc, ses);
1357
1358 return (error);
1359}
1360
1361static int
1362sec_process(device_t dev, struct cryptop *crp, int hint)
1363{
1364 struct sec_softc *sc = device_get_softc(dev);
1365 struct sec_desc *desc = NULL;
1366 struct cryptodesc *mac, *enc;
1367 struct sec_session *ses;
1368 int buftype, error = 0;
1369
1370 /* Check Session ID */
1371 ses = sec_get_session(sc, CRYPTO_SESID2LID(crp->crp_sid));
1372 if (ses == NULL) {
1373 crp->crp_etype = EINVAL;
1374 crypto_done(crp);
1375 return (0);
1376 }
1377
1378 /* Check for input length */
1379 if (crp->crp_ilen > SEC_MAX_DMA_BLOCK_SIZE) {
1380 crp->crp_etype = E2BIG;
1381 crypto_done(crp);
1382 return (0);
1383 }
1384
1385 /* Get descriptors */
1386 if (sec_split_crp(crp, &enc, &mac)) {
1387 crp->crp_etype = EINVAL;
1388 crypto_done(crp);
1389 return (0);
1390 }
1391
1392 SEC_LOCK(sc, descriptors);
1393 SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1394
1395 /* Block driver if there is no free descriptors or we are going down */
1396 if (SEC_FREE_DESC_CNT(sc) == 0 || sc->sc_shutdown) {
1397 sc->sc_blocked |= CRYPTO_SYMQ;
1398 SEC_UNLOCK(sc, descriptors);
1399 return (ERESTART);
1400 }
1401
1402 /* Prepare descriptor */
1403 desc = SEC_GET_FREE_DESC(sc);
1404 desc->sd_lt_used = 0;
1405 desc->sd_error = 0;
1406 desc->sd_crp = crp;
1407
1408 if (crp->crp_flags & CRYPTO_F_IOV)
1409 buftype = SEC_UIO;
1410 else if (crp->crp_flags & CRYPTO_F_IMBUF)
1411 buftype = SEC_MBUF;
1412 else
1413 buftype = SEC_MEMORY;
1414
1415 if (enc && enc->crd_flags & CRD_F_ENCRYPT) {
1416 if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1417 memcpy(desc->sd_desc->shd_iv, enc->crd_iv,
1418 ses->ss_ivlen);
1419 else
1420 arc4rand(desc->sd_desc->shd_iv, ses->ss_ivlen, 0);
1421
1422 if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0)
1423 crypto_copyback(crp->crp_flags, crp->crp_buf,
1424 enc->crd_inject, ses->ss_ivlen,
1425 desc->sd_desc->shd_iv);
1426 } else if (enc) {
1427 if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1428 memcpy(desc->sd_desc->shd_iv, enc->crd_iv,
1429 ses->ss_ivlen);
1430 else
1431 crypto_copydata(crp->crp_flags, crp->crp_buf,
1432 enc->crd_inject, ses->ss_ivlen,
1433 desc->sd_desc->shd_iv);
1434 }
1435
1436 if (enc && enc->crd_flags & CRD_F_KEY_EXPLICIT) {
1437 if ((enc->crd_klen / 8) <= SEC_MAX_KEY_LEN) {
1438 ses->ss_klen = enc->crd_klen / 8;
1439 memcpy(ses->ss_key, enc->crd_key, ses->ss_klen);
1440 } else
1441 error = E2BIG;
1442 }
1443
1444 if (!error && mac && mac->crd_flags & CRD_F_KEY_EXPLICIT) {
1445 if ((mac->crd_klen / 8) <= SEC_MAX_KEY_LEN) {
1446 ses->ss_mklen = mac->crd_klen / 8;
1447 memcpy(ses->ss_mkey, mac->crd_key, ses->ss_mklen);
1448 } else
1449 error = E2BIG;
1450 }
1451
1452 if (!error) {
1453 memcpy(desc->sd_desc->shd_key, ses->ss_key, ses->ss_klen);
1454 memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, ses->ss_mklen);
1455
1456 error = ses->ss_eu->sem_make_desc(sc, ses, desc, crp, buftype);
1457 }
1458
1459 if (error) {
1460 SEC_DESC_FREE_POINTERS(desc);
1461 SEC_DESC_PUT_BACK_LT(sc, desc);
1462 SEC_PUT_BACK_FREE_DESC(sc);
1463 SEC_UNLOCK(sc, descriptors);
1464 crp->crp_etype = error;
1465 crypto_done(crp);
1466 return (0);
1467 }
1468
1469 /*
1470 * Skip DONE interrupt if this is not last request in burst, but only
1471 * if we are running on SEC 3.X. On SEC 2.X we have to enable DONE
1472 * signaling on each descriptor.
1473 */
1474 if ((hint & CRYPTO_HINT_MORE) && sc->sc_version == 3)
1475 desc->sd_desc->shd_dn = 0;
1476 else
1477 desc->sd_desc->shd_dn = 1;
1478
1479 SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1480 SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_POSTREAD |
1481 BUS_DMASYNC_POSTWRITE);
1482 SEC_DESC_FREE2READY(sc);
1483 SEC_UNLOCK(sc, descriptors);
1484
1485 /* Enqueue ready descriptors in hardware */
1486 sec_enqueue(sc);
1487
1488 return (0);
1489}
1490
1491static int
1492sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc,
1493 struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc,
1494 int buftype)
1495{
1496 struct sec_hw_desc *hd = desc->sd_desc;
1497 int error;
1498
1499 hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1500 hd->shd_eu_sel1 = SEC_EU_NONE;
1501 hd->shd_mode1 = 0;
1502
1503 /* Pointer 0: NULL */
1504 error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1505 if (error)
1506 return (error);
1507
1508 /* Pointer 1: IV IN */
1509 error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr +
1510 offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen);
1511 if (error)
1512 return (error);
1513
1514 /* Pointer 2: Cipher Key */
1515 error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1516 offsetof(struct sec_hw_desc, shd_key), ses->ss_klen);
1517 if (error)
1518 return (error);
1519
1520 /* Pointer 3: Data IN */
1521 error = sec_make_pointer(sc, desc, 3, crp->crp_buf, enc->crd_skip,
1522 enc->crd_len, buftype);
1523 if (error)
1524 return (error);
1525
1526 /* Pointer 4: Data OUT */
1527 error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip,
1528 enc->crd_len, buftype);
1529 if (error)
1530 return (error);
1531
1532 /* Pointer 5: IV OUT (Not used: NULL) */
1533 error = sec_make_pointer_direct(sc, desc, 5, 0, 0);
1534 if (error)
1535 return (error);
1536
1537 /* Pointer 6: NULL */
1538 error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1539
1540 return (error);
1541}
1542
1543static int
1544sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc,
1545 struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc,
1546 struct cryptodesc *mac, int buftype)
1547{
1548 struct sec_hw_desc *hd = desc->sd_desc;
1549 u_int eu, mode, hashlen;
1550 int error;
1551
1552 if (mac->crd_len < enc->crd_len)
1553 return (EINVAL);
1554
1555 if (mac->crd_skip + mac->crd_len != enc->crd_skip + enc->crd_len)
1556 return (EINVAL);
1557
1558 error = sec_mdeu_config(mac, &eu, &mode, &hashlen);
1559 if (error)
1560 return (error);
1561
1562 hd->shd_desc_type = SEC_DT_HMAC_SNOOP;
1563 hd->shd_eu_sel1 = eu;
1564 hd->shd_mode1 = mode;
1565
1566 /* Pointer 0: HMAC Key */
1567 error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr +
1568 offsetof(struct sec_hw_desc, shd_mkey), ses->ss_mklen);
1569 if (error)
1570 return (error);
1571
1572 /* Pointer 1: HMAC-Only Data IN */
1573 error = sec_make_pointer(sc, desc, 1, crp->crp_buf, mac->crd_skip,
1574 mac->crd_len - enc->crd_len, buftype);
1575 if (error)
1576 return (error);
1577
1578 /* Pointer 2: Cipher Key */
1579 error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1580 offsetof(struct sec_hw_desc, shd_key), ses->ss_klen);
1581 if (error)
1582 return (error);
1583
1584 /* Pointer 3: IV IN */
1585 error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr +
1586 offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen);
1587 if (error)
1588 return (error);
1589
1590 /* Pointer 4: Data IN */
1591 error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip,
1592 enc->crd_len, buftype);
1593 if (error)
1594 return (error);
1595
1596 /* Pointer 5: Data OUT */
1597 error = sec_make_pointer(sc, desc, 5, crp->crp_buf, enc->crd_skip,
1598 enc->crd_len, buftype);
1599 if (error)
1600 return (error);
1601
1602 /* Pointer 6: HMAC OUT */
1603 error = sec_make_pointer(sc, desc, 6, crp->crp_buf, mac->crd_inject,
1604 hashlen, buftype);
1605
1606 return (error);
1607}
1608
1609/* AESU */
1610
1611static int
1612sec_aesu_newsession(struct sec_softc *sc, struct sec_session *ses,
1613 struct cryptoini *enc, struct cryptoini *mac)
1614{
1615
1616 if (enc == NULL)
1617 return (-1);
1618
1619 if (enc->cri_alg != CRYPTO_AES_CBC)
1620 return (-1);
1621
1622 ses->ss_ivlen = AES_BLOCK_LEN;
1623
1624 return (0);
1625}
1626
1627static int
1628sec_aesu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1629 struct sec_desc *desc, struct cryptop *crp, int buftype)
1630{
1631 struct sec_hw_desc *hd = desc->sd_desc;
1632 struct cryptodesc *enc, *mac;
1633 int error;
1634
1635 error = sec_split_crp(crp, &enc, &mac);
1636 if (error)
1637 return (error);
1638
1639 if (!enc)
1640 return (EINVAL);
1641
1642 hd->shd_eu_sel0 = SEC_EU_AESU;
1643 hd->shd_mode0 = SEC_AESU_MODE_CBC;
1644
1645 if (enc->crd_alg != CRYPTO_AES_CBC)
1646 return (EINVAL);
1647
1648 if (enc->crd_flags & CRD_F_ENCRYPT) {
1649 hd->shd_mode0 |= SEC_AESU_MODE_ED;
1650 hd->shd_dir = 0;
1651 } else
1652 hd->shd_dir = 1;
1653
1654 if (mac)
1655 error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac,
1656 buftype);
1657 else
1658 error = sec_build_common_ns_desc(sc, desc, ses, crp, enc,
1659 buftype);
1660
1661 return (error);
1662}
1663
1664/* DEU */
1665
1666static int
1667sec_deu_newsession(struct sec_softc *sc, struct sec_session *ses,
1668 struct cryptoini *enc, struct cryptoini *mac)
1669{
1670
1671 if (enc == NULL)
1672 return (-1);
1673
1674 switch (enc->cri_alg) {
1675 case CRYPTO_DES_CBC:
1676 case CRYPTO_3DES_CBC:
1677 break;
1678 default:
1679 return (-1);
1680 }
1681
1682 ses->ss_ivlen = DES_BLOCK_LEN;
1683
1684 return (0);
1685}
1686
1687static int
1688sec_deu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1689 struct sec_desc *desc, struct cryptop *crp, int buftype)
1690{
1691 struct sec_hw_desc *hd = desc->sd_desc;
1692 struct cryptodesc *enc, *mac;
1693 int error;
1694
1695 error = sec_split_crp(crp, &enc, &mac);
1696 if (error)
1697 return (error);
1698
1699 if (!enc)
1700 return (EINVAL);
1701
1702 hd->shd_eu_sel0 = SEC_EU_DEU;
1703 hd->shd_mode0 = SEC_DEU_MODE_CBC;
1704
1705 switch (enc->crd_alg) {
1706 case CRYPTO_3DES_CBC:
1707 hd->shd_mode0 |= SEC_DEU_MODE_TS;
1708 break;
1709 case CRYPTO_DES_CBC:
1710 break;
1711 default:
1712 return (EINVAL);
1713 }
1714
1715 if (enc->crd_flags & CRD_F_ENCRYPT) {
1716 hd->shd_mode0 |= SEC_DEU_MODE_ED;
1717 hd->shd_dir = 0;
1718 } else
1719 hd->shd_dir = 1;
1720
1721 if (mac)
1722 error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac,
1723 buftype);
1724 else
1725 error = sec_build_common_ns_desc(sc, desc, ses, crp, enc,
1726 buftype);
1727
1728 return (error);
1729}
1730
1731/* MDEU */
1732
1733static int
1734sec_mdeu_can_handle(u_int alg)
1735{
1736 switch (alg) {
1737 case CRYPTO_MD5:
1738 case CRYPTO_SHA1:
1739 case CRYPTO_MD5_HMAC:
1740 case CRYPTO_SHA1_HMAC:
1741 case CRYPTO_SHA2_256_HMAC:
1742 case CRYPTO_SHA2_384_HMAC:
1743 case CRYPTO_SHA2_512_HMAC:
1744 return (1);
1745 default:
1746 return (0);
1747 }
1748}
1749
1750static int
1751sec_mdeu_config(struct cryptodesc *crd, u_int *eu, u_int *mode, u_int *hashlen)
1752{
1753
1754 *mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT;
1755 *eu = SEC_EU_NONE;
1756
1757 switch (crd->crd_alg) {
1758 case CRYPTO_MD5_HMAC:
1759 *mode |= SEC_MDEU_MODE_HMAC;
1760 /* FALLTHROUGH */
1761 case CRYPTO_MD5:
1762 *eu = SEC_EU_MDEU_A;
1763 *mode |= SEC_MDEU_MODE_MD5;
1764 *hashlen = MD5_HASH_LEN;
1765 break;
1766 case CRYPTO_SHA1_HMAC:
1767 *mode |= SEC_MDEU_MODE_HMAC;
1768 /* FALLTHROUGH */
1769 case CRYPTO_SHA1:
1770 *eu = SEC_EU_MDEU_A;
1771 *mode |= SEC_MDEU_MODE_SHA1;
1772 *hashlen = SHA1_HASH_LEN;
1773 break;
1774 case CRYPTO_SHA2_256_HMAC:
1775 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA256;
1776 *eu = SEC_EU_MDEU_A;
1777 break;
1778 case CRYPTO_SHA2_384_HMAC:
1779 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA384;
1780 *eu = SEC_EU_MDEU_B;
1781 break;
1782 case CRYPTO_SHA2_512_HMAC:
1783 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA512;
1784 *eu = SEC_EU_MDEU_B;
1785 break;
1786 default:
1787 return (EINVAL);
1788 }
1789
1790 if (*mode & SEC_MDEU_MODE_HMAC)
1791 *hashlen = SEC_HMAC_HASH_LEN;
1792
1793 return (0);
1794}
1795
1796static int
1797sec_mdeu_newsession(struct sec_softc *sc, struct sec_session *ses,
1798 struct cryptoini *enc, struct cryptoini *mac)
1799{
1800
1801 if (mac && sec_mdeu_can_handle(mac->cri_alg))
1802 return (0);
1803
1804 return (-1);
1805}
1806
1807static int
1808sec_mdeu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1809 struct sec_desc *desc, struct cryptop *crp, int buftype)
1810{
1811 struct cryptodesc *enc, *mac;
1812 struct sec_hw_desc *hd = desc->sd_desc;
1813 u_int eu, mode, hashlen;
1814 int error;
1815
1816 error = sec_split_crp(crp, &enc, &mac);
1817 if (error)
1818 return (error);
1819
1820 if (enc)
1821 return (EINVAL);
1822
1823 error = sec_mdeu_config(mac, &eu, &mode, &hashlen);
1824 if (error)
1825 return (error);
1826
1827 hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1828 hd->shd_eu_sel0 = eu;
1829 hd->shd_mode0 = mode;
1830 hd->shd_eu_sel1 = SEC_EU_NONE;
1831 hd->shd_mode1 = 0;
1832
1833 /* Pointer 0: NULL */
1834 error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1835 if (error)
1836 return (error);
1837
1838 /* Pointer 1: Context In (Not used: NULL) */
1839 error = sec_make_pointer_direct(sc, desc, 1, 0, 0);
1840 if (error)
1841 return (error);
1842
1843 /* Pointer 2: HMAC Key (or NULL, depending on digest type) */
1844 if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC)
1845 error = sec_make_pointer_direct(sc, desc, 2,
1846 desc->sd_desc_paddr + offsetof(struct sec_hw_desc,
1847 shd_mkey), ses->ss_mklen);
1848 else
1849 error = sec_make_pointer_direct(sc, desc, 2, 0, 0);
1850
1851 if (error)
1852 return (error);
1853
1854 /* Pointer 3: Input Data */
1855 error = sec_make_pointer(sc, desc, 3, crp->crp_buf, mac->crd_skip,
1856 mac->crd_len, buftype);
1857 if (error)
1858 return (error);
1859
1860 /* Pointer 4: NULL */
1861 error = sec_make_pointer_direct(sc, desc, 4, 0, 0);
1862 if (error)
1863 return (error);
1864
1865 /* Pointer 5: Hash out */
1866 error = sec_make_pointer(sc, desc, 5, crp->crp_buf,
1867 mac->crd_inject, hashlen, buftype);
1868 if (error)
1869 return (error);
1870
1871 /* Pointer 6: NULL */
1872 error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1873
1874 return (0);
1875}
508}
509
510static int
511sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand,
512 int *irid, driver_intr_t handler, const char *iname)
513{
514 int error;
515
516 (*ires) = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, irid,
517 RF_ACTIVE);
518
519 if ((*ires) == NULL) {
520 device_printf(sc->sc_dev, "could not allocate %s IRQ\n", iname);
521 return (ENXIO);
522 }
523
524 error = bus_setup_intr(sc->sc_dev, *ires, INTR_MPSAFE | INTR_TYPE_NET,
525 NULL, handler, sc, ihand);
526
527 if (error) {
528 device_printf(sc->sc_dev, "failed to set up %s IRQ\n", iname);
529 if (bus_release_resource(sc->sc_dev, SYS_RES_IRQ, *irid, *ires))
530 device_printf(sc->sc_dev, "could not release %s IRQ\n",
531 iname);
532
533 (*ires) = NULL;
534 return (error);
535 }
536
537 return (0);
538}
539
540static void
541sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand,
542 int irid, const char *iname)
543{
544 int error;
545
546 if (ires == NULL)
547 return;
548
549 error = bus_teardown_intr(sc->sc_dev, ires, ihand);
550 if (error)
551 device_printf(sc->sc_dev, "bus_teardown_intr() failed for %s"
552 " IRQ, error %d\n", iname, error);
553
554 error = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, irid, ires);
555 if (error)
556 device_printf(sc->sc_dev, "bus_release_resource() failed for %s"
557 " IRQ, error %d\n", iname, error);
558}
559
560static void
561sec_primary_intr(void *arg)
562{
563 struct sec_softc *sc = arg;
564 struct sec_desc *desc;
565 uint64_t isr;
566 int i, wakeup = 0;
567
568 SEC_LOCK(sc, controller);
569
570 /* Check for errors */
571 isr = SEC_READ(sc, SEC_ISR);
572 if (isr & sc->sc_int_error_mask) {
573 /* Check each channel for error */
574 for (i = 0; i < SEC_CHANNELS; i++) {
575 if ((isr & SEC_INT_CH_ERR(i)) == 0)
576 continue;
577
578 device_printf(sc->sc_dev,
579 "I/O error on channel %i!\n", i);
580
581 /* Find and mark problematic descriptor */
582 desc = sec_find_desc(sc, SEC_READ(sc,
583 SEC_CHAN_CDPR(i)));
584
585 if (desc != NULL)
586 desc->sd_error = EIO;
587
588 /* Do partial channel reset */
589 sec_channel_reset(sc, i, 0);
590 }
591 }
592
593 /* ACK interrupt */
594 SEC_WRITE(sc, SEC_ICR, 0xFFFFFFFFFFFFFFFFULL);
595
596 SEC_UNLOCK(sc, controller);
597 SEC_LOCK(sc, descriptors);
598
599 /* Handle processed descriptors */
600 SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
601
602 while (SEC_QUEUED_DESC_CNT(sc) > 0) {
603 desc = SEC_GET_QUEUED_DESC(sc);
604
605 if (desc->sd_desc->shd_done != 0xFF && desc->sd_error == 0) {
606 SEC_PUT_BACK_QUEUED_DESC(sc);
607 break;
608 }
609
610 SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD |
611 BUS_DMASYNC_PREWRITE);
612
613 desc->sd_crp->crp_etype = desc->sd_error;
614 crypto_done(desc->sd_crp);
615
616 SEC_DESC_FREE_POINTERS(desc);
617 SEC_DESC_FREE_LT(sc, desc);
618 SEC_DESC_QUEUED2FREE(sc);
619 }
620
621 SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
622
623 if (!sc->sc_shutdown) {
624 wakeup = sc->sc_blocked;
625 sc->sc_blocked = 0;
626 }
627
628 SEC_UNLOCK(sc, descriptors);
629
630 /* Enqueue ready descriptors in hardware */
631 sec_enqueue(sc);
632
633 if (wakeup)
634 crypto_unblock(sc->sc_cid, wakeup);
635}
636
637static void
638sec_secondary_intr(void *arg)
639{
640 struct sec_softc *sc = arg;
641
642 device_printf(sc->sc_dev, "spurious secondary interrupt!\n");
643 sec_primary_intr(arg);
644}
645
646static int
647sec_controller_reset(struct sec_softc *sc)
648{
649 int timeout = SEC_TIMEOUT;
650
651 /* Reset Controller */
652 SEC_WRITE(sc, SEC_MCR, SEC_MCR_SWR);
653
654 while (SEC_READ(sc, SEC_MCR) & SEC_MCR_SWR) {
655 DELAY(1000);
656 timeout -= 1000;
657
658 if (timeout < 0) {
659 device_printf(sc->sc_dev, "timeout while waiting for "
660 "device reset!\n");
661 return (ETIMEDOUT);
662 }
663 }
664
665 return (0);
666}
667
668static int
669sec_channel_reset(struct sec_softc *sc, int channel, int full)
670{
671 int timeout = SEC_TIMEOUT;
672 uint64_t bit = (full) ? SEC_CHAN_CCR_R : SEC_CHAN_CCR_CON;
673 uint64_t reg;
674
675 /* Reset Channel */
676 reg = SEC_READ(sc, SEC_CHAN_CCR(channel));
677 SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg | bit);
678
679 while (SEC_READ(sc, SEC_CHAN_CCR(channel)) & bit) {
680 DELAY(1000);
681 timeout -= 1000;
682
683 if (timeout < 0) {
684 device_printf(sc->sc_dev, "timeout while waiting for "
685 "channel reset!\n");
686 return (ETIMEDOUT);
687 }
688 }
689
690 if (full) {
691 reg = SEC_CHAN_CCR_CDIE | SEC_CHAN_CCR_NT | SEC_CHAN_CCR_BS;
692
693 switch(sc->sc_version) {
694 case 2:
695 reg |= SEC_CHAN_CCR_CDWE;
696 break;
697 case 3:
698 reg |= SEC_CHAN_CCR_AWSE | SEC_CHAN_CCR_WGN;
699 break;
700 }
701
702 SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg);
703 }
704
705 return (0);
706}
707
708static int
709sec_init(struct sec_softc *sc)
710{
711 uint64_t reg;
712 int error, i;
713
714 /* Reset controller twice to clear all pending interrupts */
715 error = sec_controller_reset(sc);
716 if (error)
717 return (error);
718
719 error = sec_controller_reset(sc);
720 if (error)
721 return (error);
722
723 /* Reset channels */
724 for (i = 0; i < SEC_CHANNELS; i++) {
725 error = sec_channel_reset(sc, i, 1);
726 if (error)
727 return (error);
728 }
729
730 /* Enable Interrupts */
731 reg = SEC_INT_ITO;
732 for (i = 0; i < SEC_CHANNELS; i++)
733 reg |= SEC_INT_CH_DN(i) | SEC_INT_CH_ERR(i);
734
735 SEC_WRITE(sc, SEC_IER, reg);
736
737 return (error);
738}
739
740static void
741sec_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
742{
743 struct sec_dma_mem *dma_mem = arg;
744
745 if (error)
746 return;
747
748 KASSERT(nseg == 1, ("Wrong number of segments, should be 1"));
749 dma_mem->dma_paddr = segs->ds_addr;
750}
751
752static void
753sec_dma_map_desc_cb(void *arg, bus_dma_segment_t *segs, int nseg,
754 int error)
755{
756 struct sec_desc_map_info *sdmi = arg;
757 struct sec_softc *sc = sdmi->sdmi_sc;
758 struct sec_lt *lt = NULL;
759 bus_addr_t addr;
760 bus_size_t size;
761 int i;
762
763 SEC_LOCK_ASSERT(sc, descriptors);
764
765 if (error)
766 return;
767
768 for (i = 0; i < nseg; i++) {
769 addr = segs[i].ds_addr;
770 size = segs[i].ds_len;
771
772 /* Skip requested offset */
773 if (sdmi->sdmi_offset >= size) {
774 sdmi->sdmi_offset -= size;
775 continue;
776 }
777
778 addr += sdmi->sdmi_offset;
779 size -= sdmi->sdmi_offset;
780 sdmi->sdmi_offset = 0;
781
782 /* Do not link more than requested */
783 if (sdmi->sdmi_size < size)
784 size = sdmi->sdmi_size;
785
786 lt = SEC_ALLOC_LT_ENTRY(sc);
787 lt->sl_lt->shl_length = size;
788 lt->sl_lt->shl_r = 0;
789 lt->sl_lt->shl_n = 0;
790 lt->sl_lt->shl_ptr = addr;
791
792 if (sdmi->sdmi_lt_first == NULL)
793 sdmi->sdmi_lt_first = lt;
794
795 sdmi->sdmi_lt_used += 1;
796
797 if ((sdmi->sdmi_size -= size) == 0)
798 break;
799 }
800
801 sdmi->sdmi_lt_last = lt;
802}
803
804static void
805sec_dma_map_desc_cb2(void *arg, bus_dma_segment_t *segs, int nseg,
806 bus_size_t size, int error)
807{
808
809 sec_dma_map_desc_cb(arg, segs, nseg, error);
810}
811
812static int
813sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem,
814 bus_size_t size)
815{
816 int error;
817
818 if (dma_mem->dma_vaddr != NULL)
819 return (EBUSY);
820
821 error = bus_dma_tag_create(NULL, /* parent */
822 SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */
823 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
824 BUS_SPACE_MAXADDR, /* highaddr */
825 NULL, NULL, /* filtfunc, filtfuncarg */
826 size, 1, /* maxsize, nsegments */
827 size, 0, /* maxsegsz, flags */
828 NULL, NULL, /* lockfunc, lockfuncarg */
829 &(dma_mem->dma_tag)); /* dmat */
830
831 if (error) {
832 device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
833 " %i!\n", error);
834 goto err1;
835 }
836
837 error = bus_dmamem_alloc(dma_mem->dma_tag, &(dma_mem->dma_vaddr),
838 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &(dma_mem->dma_map));
839
840 if (error) {
841 device_printf(sc->sc_dev, "failed to allocate DMA safe"
842 " memory, error %i!\n", error);
843 goto err2;
844 }
845
846 error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
847 dma_mem->dma_vaddr, size, sec_alloc_dma_mem_cb, dma_mem,
848 BUS_DMA_NOWAIT);
849
850 if (error) {
851 device_printf(sc->sc_dev, "cannot get address of the DMA"
852 " memory, error %i\n", error);
853 goto err3;
854 }
855
856 dma_mem->dma_is_map = 0;
857 return (0);
858
859err3:
860 bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map);
861err2:
862 bus_dma_tag_destroy(dma_mem->dma_tag);
863err1:
864 dma_mem->dma_vaddr = NULL;
865 return(error);
866}
867
868static int
869sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem, void *mem,
870 bus_size_t size, int type, struct sec_desc_map_info *sdmi)
871{
872 int error;
873
874 if (dma_mem->dma_vaddr != NULL)
875 return (EBUSY);
876
877 switch (type) {
878 case SEC_MEMORY:
879 break;
880 case SEC_UIO:
881 size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE;
882 break;
883 case SEC_MBUF:
884 size = m_length((struct mbuf*)mem, NULL);
885 break;
886 default:
887 return (EINVAL);
888 }
889
890 error = bus_dma_tag_create(NULL, /* parent */
891 SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */
892 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
893 BUS_SPACE_MAXADDR, /* highaddr */
894 NULL, NULL, /* filtfunc, filtfuncarg */
895 size, /* maxsize */
896 SEC_FREE_LT_CNT(sc), /* nsegments */
897 SEC_MAX_DMA_BLOCK_SIZE, 0, /* maxsegsz, flags */
898 NULL, NULL, /* lockfunc, lockfuncarg */
899 &(dma_mem->dma_tag)); /* dmat */
900
901 if (error) {
902 device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
903 " %i!\n", error);
904 dma_mem->dma_vaddr = NULL;
905 return (error);
906 }
907
908 error = bus_dmamap_create(dma_mem->dma_tag, 0, &(dma_mem->dma_map));
909
910 if (error) {
911 device_printf(sc->sc_dev, "failed to create DMA map, error %i!"
912 "\n", error);
913 bus_dma_tag_destroy(dma_mem->dma_tag);
914 return (error);
915 }
916
917 switch (type) {
918 case SEC_MEMORY:
919 error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
920 mem, size, sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT);
921 break;
922 case SEC_UIO:
923 error = bus_dmamap_load_uio(dma_mem->dma_tag, dma_mem->dma_map,
924 mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT);
925 break;
926 case SEC_MBUF:
927 error = bus_dmamap_load_mbuf(dma_mem->dma_tag, dma_mem->dma_map,
928 mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT);
929 break;
930 }
931
932 if (error) {
933 device_printf(sc->sc_dev, "cannot get address of the DMA"
934 " memory, error %i!\n", error);
935 bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
936 bus_dma_tag_destroy(dma_mem->dma_tag);
937 return (error);
938 }
939
940 dma_mem->dma_is_map = 1;
941 dma_mem->dma_vaddr = mem;
942
943 return (0);
944}
945
946static void
947sec_free_dma_mem(struct sec_dma_mem *dma_mem)
948{
949
950 /* Check for double free */
951 if (dma_mem->dma_vaddr == NULL)
952 return;
953
954 bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map);
955
956 if (dma_mem->dma_is_map)
957 bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
958 else
959 bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr,
960 dma_mem->dma_map);
961
962 bus_dma_tag_destroy(dma_mem->dma_tag);
963 dma_mem->dma_vaddr = NULL;
964}
965
966static int
967sec_eu_channel(struct sec_softc *sc, int eu)
968{
969 uint64_t reg;
970 int channel = 0;
971
972 SEC_LOCK_ASSERT(sc, controller);
973
974 reg = SEC_READ(sc, SEC_EUASR);
975
976 switch (eu) {
977 case SEC_EU_AFEU:
978 channel = SEC_EUASR_AFEU(reg);
979 break;
980 case SEC_EU_DEU:
981 channel = SEC_EUASR_DEU(reg);
982 break;
983 case SEC_EU_MDEU_A:
984 case SEC_EU_MDEU_B:
985 channel = SEC_EUASR_MDEU(reg);
986 break;
987 case SEC_EU_RNGU:
988 channel = SEC_EUASR_RNGU(reg);
989 break;
990 case SEC_EU_PKEU:
991 channel = SEC_EUASR_PKEU(reg);
992 break;
993 case SEC_EU_AESU:
994 channel = SEC_EUASR_AESU(reg);
995 break;
996 case SEC_EU_KEU:
997 channel = SEC_EUASR_KEU(reg);
998 break;
999 case SEC_EU_CRCU:
1000 channel = SEC_EUASR_CRCU(reg);
1001 break;
1002 }
1003
1004 return (channel - 1);
1005}
1006
1007static int
1008sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel)
1009{
1010 u_int fflvl = SEC_MAX_FIFO_LEVEL;
1011 uint64_t reg;
1012 int i;
1013
1014 SEC_LOCK_ASSERT(sc, controller);
1015
1016 /* Find free channel if have not got one */
1017 if (channel < 0) {
1018 for (i = 0; i < SEC_CHANNELS; i++) {
1019 reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
1020
1021 if ((reg & sc->sc_channel_idle_mask) == 0) {
1022 channel = i;
1023 break;
1024 }
1025 }
1026 }
1027
1028 /* There is no free channel */
1029 if (channel < 0)
1030 return (-1);
1031
1032 /* Check FIFO level on selected channel */
1033 reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
1034
1035 switch(sc->sc_version) {
1036 case 2:
1037 fflvl = (reg >> SEC_CHAN_CSR2_FFLVL_S) & SEC_CHAN_CSR2_FFLVL_M;
1038 break;
1039 case 3:
1040 fflvl = (reg >> SEC_CHAN_CSR3_FFLVL_S) & SEC_CHAN_CSR3_FFLVL_M;
1041 break;
1042 }
1043
1044 if (fflvl >= SEC_MAX_FIFO_LEVEL)
1045 return (-1);
1046
1047 /* Enqueue descriptor in channel */
1048 SEC_WRITE(sc, SEC_CHAN_FF(channel), desc->sd_desc_paddr);
1049
1050 return (channel);
1051}
1052
1053static void
1054sec_enqueue(struct sec_softc *sc)
1055{
1056 struct sec_desc *desc;
1057 int ch0, ch1;
1058
1059 SEC_LOCK(sc, descriptors);
1060 SEC_LOCK(sc, controller);
1061
1062 while (SEC_READY_DESC_CNT(sc) > 0) {
1063 desc = SEC_GET_READY_DESC(sc);
1064
1065 ch0 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel0);
1066 ch1 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel1);
1067
1068 /*
1069 * Both EU are used by the same channel.
1070 * Enqueue descriptor in channel used by busy EUs.
1071 */
1072 if (ch0 >= 0 && ch0 == ch1) {
1073 if (sec_enqueue_desc(sc, desc, ch0) >= 0) {
1074 SEC_DESC_READY2QUEUED(sc);
1075 continue;
1076 }
1077 }
1078
1079 /*
1080 * Only one EU is free.
1081 * Enqueue descriptor in channel used by busy EU.
1082 */
1083 if ((ch0 >= 0 && ch1 < 0) || (ch1 >= 0 && ch0 < 0)) {
1084 if (sec_enqueue_desc(sc, desc, (ch0 >= 0) ? ch0 : ch1)
1085 >= 0) {
1086 SEC_DESC_READY2QUEUED(sc);
1087 continue;
1088 }
1089 }
1090
1091 /*
1092 * Both EU are free.
1093 * Enqueue descriptor in first free channel.
1094 */
1095 if (ch0 < 0 && ch1 < 0) {
1096 if (sec_enqueue_desc(sc, desc, -1) >= 0) {
1097 SEC_DESC_READY2QUEUED(sc);
1098 continue;
1099 }
1100 }
1101
1102 /* Current descriptor can not be queued at the moment */
1103 SEC_PUT_BACK_READY_DESC(sc);
1104 break;
1105 }
1106
1107 SEC_UNLOCK(sc, controller);
1108 SEC_UNLOCK(sc, descriptors);
1109}
1110
1111static struct sec_desc *
1112sec_find_desc(struct sec_softc *sc, bus_addr_t paddr)
1113{
1114 struct sec_desc *desc = NULL;
1115 int i;
1116
1117 SEC_LOCK_ASSERT(sc, descriptors);
1118
1119 for (i = 0; i < SEC_CHANNELS; i++) {
1120 if (sc->sc_desc[i].sd_desc_paddr == paddr) {
1121 desc = &(sc->sc_desc[i]);
1122 break;
1123 }
1124 }
1125
1126 return (desc);
1127}
1128
1129static int
1130sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n,
1131 bus_addr_t data, bus_size_t dsize)
1132{
1133 struct sec_hw_desc_ptr *ptr;
1134
1135 SEC_LOCK_ASSERT(sc, descriptors);
1136
1137 ptr = &(desc->sd_desc->shd_pointer[n]);
1138 ptr->shdp_length = dsize;
1139 ptr->shdp_extent = 0;
1140 ptr->shdp_j = 0;
1141 ptr->shdp_ptr = data;
1142
1143 return (0);
1144}
1145
1146static int
1147sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
1148 u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype)
1149{
1150 struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 };
1151 struct sec_hw_desc_ptr *ptr;
1152 int error;
1153
1154 SEC_LOCK_ASSERT(sc, descriptors);
1155
1156 /* For flat memory map only requested region */
1157 if (dtype == SEC_MEMORY) {
1158 data = (uint8_t*)(data) + doffset;
1159 sdmi.sdmi_offset = 0;
1160 }
1161
1162 error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), data, dsize,
1163 dtype, &sdmi);
1164
1165 if (error)
1166 return (error);
1167
1168 sdmi.sdmi_lt_last->sl_lt->shl_r = 1;
1169 desc->sd_lt_used += sdmi.sdmi_lt_used;
1170
1171 ptr = &(desc->sd_desc->shd_pointer[n]);
1172 ptr->shdp_length = dsize;
1173 ptr->shdp_extent = 0;
1174 ptr->shdp_j = 1;
1175 ptr->shdp_ptr = sdmi.sdmi_lt_first->sl_lt_paddr;
1176
1177 return (0);
1178}
1179
1180static int
1181sec_split_cri(struct cryptoini *cri, struct cryptoini **enc,
1182 struct cryptoini **mac)
1183{
1184 struct cryptoini *e, *m;
1185
1186 e = cri;
1187 m = cri->cri_next;
1188
1189 /* We can haldle only two operations */
1190 if (m && m->cri_next)
1191 return (EINVAL);
1192
1193 if (sec_mdeu_can_handle(e->cri_alg)) {
1194 cri = m;
1195 m = e;
1196 e = cri;
1197 }
1198
1199 if (m && !sec_mdeu_can_handle(m->cri_alg))
1200 return (EINVAL);
1201
1202 *enc = e;
1203 *mac = m;
1204
1205 return (0);
1206}
1207
1208static int
1209sec_split_crp(struct cryptop *crp, struct cryptodesc **enc,
1210 struct cryptodesc **mac)
1211{
1212 struct cryptodesc *e, *m, *t;
1213
1214 e = crp->crp_desc;
1215 m = e->crd_next;
1216
1217 /* We can haldle only two operations */
1218 if (m && m->crd_next)
1219 return (EINVAL);
1220
1221 if (sec_mdeu_can_handle(e->crd_alg)) {
1222 t = m;
1223 m = e;
1224 e = t;
1225 }
1226
1227 if (m && !sec_mdeu_can_handle(m->crd_alg))
1228 return (EINVAL);
1229
1230 *enc = e;
1231 *mac = m;
1232
1233 return (0);
1234}
1235
1236static int
1237sec_alloc_session(struct sec_softc *sc)
1238{
1239 struct sec_session *ses = NULL;
1240 int sid = -1;
1241 u_int i;
1242
1243 SEC_LOCK(sc, sessions);
1244
1245 for (i = 0; i < SEC_MAX_SESSIONS; i++) {
1246 if (sc->sc_sessions[i].ss_used == 0) {
1247 ses = &(sc->sc_sessions[i]);
1248 ses->ss_used = 1;
1249 ses->ss_ivlen = 0;
1250 ses->ss_klen = 0;
1251 ses->ss_mklen = 0;
1252 sid = i;
1253 break;
1254 }
1255 }
1256
1257 SEC_UNLOCK(sc, sessions);
1258
1259 return (sid);
1260}
1261
1262static struct sec_session *
1263sec_get_session(struct sec_softc *sc, u_int sid)
1264{
1265 struct sec_session *ses;
1266
1267 if (sid >= SEC_MAX_SESSIONS)
1268 return (NULL);
1269
1270 SEC_LOCK(sc, sessions);
1271
1272 ses = &(sc->sc_sessions[sid]);
1273
1274 if (ses->ss_used == 0)
1275 ses = NULL;
1276
1277 SEC_UNLOCK(sc, sessions);
1278
1279 return (ses);
1280}
1281
1282static int
1283sec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
1284{
1285 struct sec_softc *sc = device_get_softc(dev);
1286 struct sec_eu_methods *eu = sec_eus;
1287 struct cryptoini *enc = NULL;
1288 struct cryptoini *mac = NULL;
1289 struct sec_session *ses;
1290 int error = -1;
1291 int sid;
1292
1293 error = sec_split_cri(cri, &enc, &mac);
1294 if (error)
1295 return (error);
1296
1297 /* Check key lengths */
1298 if (enc && enc->cri_key && (enc->cri_klen / 8) > SEC_MAX_KEY_LEN)
1299 return (E2BIG);
1300
1301 if (mac && mac->cri_key && (mac->cri_klen / 8) > SEC_MAX_KEY_LEN)
1302 return (E2BIG);
1303
1304 /* Only SEC 3.0 supports digests larger than 256 bits */
1305 if (sc->sc_version < 3 && mac && mac->cri_klen > 256)
1306 return (E2BIG);
1307
1308 sid = sec_alloc_session(sc);
1309 if (sid < 0)
1310 return (ENOMEM);
1311
1312 ses = sec_get_session(sc, sid);
1313
1314 /* Find EU for this session */
1315 while (eu->sem_make_desc != NULL) {
1316 error = eu->sem_newsession(sc, ses, enc, mac);
1317 if (error >= 0)
1318 break;
1319
1320 eu++;
1321 }
1322
1323 /* If not found, return EINVAL */
1324 if (error < 0) {
1325 sec_free_session(sc, ses);
1326 return (EINVAL);
1327 }
1328
1329 /* Save cipher key */
1330 if (enc && enc->cri_key) {
1331 ses->ss_klen = enc->cri_klen / 8;
1332 memcpy(ses->ss_key, enc->cri_key, ses->ss_klen);
1333 }
1334
1335 /* Save digest key */
1336 if (mac && mac->cri_key) {
1337 ses->ss_mklen = mac->cri_klen / 8;
1338 memcpy(ses->ss_mkey, mac->cri_key, ses->ss_mklen);
1339 }
1340
1341 ses->ss_eu = eu;
1342 *sidp = sid;
1343
1344 return (0);
1345}
1346
1347static int
1348sec_freesession(device_t dev, uint64_t tid)
1349{
1350 struct sec_softc *sc = device_get_softc(dev);
1351 struct sec_session *ses;
1352 int error = 0;
1353
1354 ses = sec_get_session(sc, CRYPTO_SESID2LID(tid));
1355 if (ses == NULL)
1356 return (EINVAL);
1357
1358 sec_free_session(sc, ses);
1359
1360 return (error);
1361}
1362
1363static int
1364sec_process(device_t dev, struct cryptop *crp, int hint)
1365{
1366 struct sec_softc *sc = device_get_softc(dev);
1367 struct sec_desc *desc = NULL;
1368 struct cryptodesc *mac, *enc;
1369 struct sec_session *ses;
1370 int buftype, error = 0;
1371
1372 /* Check Session ID */
1373 ses = sec_get_session(sc, CRYPTO_SESID2LID(crp->crp_sid));
1374 if (ses == NULL) {
1375 crp->crp_etype = EINVAL;
1376 crypto_done(crp);
1377 return (0);
1378 }
1379
1380 /* Check for input length */
1381 if (crp->crp_ilen > SEC_MAX_DMA_BLOCK_SIZE) {
1382 crp->crp_etype = E2BIG;
1383 crypto_done(crp);
1384 return (0);
1385 }
1386
1387 /* Get descriptors */
1388 if (sec_split_crp(crp, &enc, &mac)) {
1389 crp->crp_etype = EINVAL;
1390 crypto_done(crp);
1391 return (0);
1392 }
1393
1394 SEC_LOCK(sc, descriptors);
1395 SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1396
1397 /* Block driver if there is no free descriptors or we are going down */
1398 if (SEC_FREE_DESC_CNT(sc) == 0 || sc->sc_shutdown) {
1399 sc->sc_blocked |= CRYPTO_SYMQ;
1400 SEC_UNLOCK(sc, descriptors);
1401 return (ERESTART);
1402 }
1403
1404 /* Prepare descriptor */
1405 desc = SEC_GET_FREE_DESC(sc);
1406 desc->sd_lt_used = 0;
1407 desc->sd_error = 0;
1408 desc->sd_crp = crp;
1409
1410 if (crp->crp_flags & CRYPTO_F_IOV)
1411 buftype = SEC_UIO;
1412 else if (crp->crp_flags & CRYPTO_F_IMBUF)
1413 buftype = SEC_MBUF;
1414 else
1415 buftype = SEC_MEMORY;
1416
1417 if (enc && enc->crd_flags & CRD_F_ENCRYPT) {
1418 if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1419 memcpy(desc->sd_desc->shd_iv, enc->crd_iv,
1420 ses->ss_ivlen);
1421 else
1422 arc4rand(desc->sd_desc->shd_iv, ses->ss_ivlen, 0);
1423
1424 if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0)
1425 crypto_copyback(crp->crp_flags, crp->crp_buf,
1426 enc->crd_inject, ses->ss_ivlen,
1427 desc->sd_desc->shd_iv);
1428 } else if (enc) {
1429 if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1430 memcpy(desc->sd_desc->shd_iv, enc->crd_iv,
1431 ses->ss_ivlen);
1432 else
1433 crypto_copydata(crp->crp_flags, crp->crp_buf,
1434 enc->crd_inject, ses->ss_ivlen,
1435 desc->sd_desc->shd_iv);
1436 }
1437
1438 if (enc && enc->crd_flags & CRD_F_KEY_EXPLICIT) {
1439 if ((enc->crd_klen / 8) <= SEC_MAX_KEY_LEN) {
1440 ses->ss_klen = enc->crd_klen / 8;
1441 memcpy(ses->ss_key, enc->crd_key, ses->ss_klen);
1442 } else
1443 error = E2BIG;
1444 }
1445
1446 if (!error && mac && mac->crd_flags & CRD_F_KEY_EXPLICIT) {
1447 if ((mac->crd_klen / 8) <= SEC_MAX_KEY_LEN) {
1448 ses->ss_mklen = mac->crd_klen / 8;
1449 memcpy(ses->ss_mkey, mac->crd_key, ses->ss_mklen);
1450 } else
1451 error = E2BIG;
1452 }
1453
1454 if (!error) {
1455 memcpy(desc->sd_desc->shd_key, ses->ss_key, ses->ss_klen);
1456 memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, ses->ss_mklen);
1457
1458 error = ses->ss_eu->sem_make_desc(sc, ses, desc, crp, buftype);
1459 }
1460
1461 if (error) {
1462 SEC_DESC_FREE_POINTERS(desc);
1463 SEC_DESC_PUT_BACK_LT(sc, desc);
1464 SEC_PUT_BACK_FREE_DESC(sc);
1465 SEC_UNLOCK(sc, descriptors);
1466 crp->crp_etype = error;
1467 crypto_done(crp);
1468 return (0);
1469 }
1470
1471 /*
1472 * Skip DONE interrupt if this is not last request in burst, but only
1473 * if we are running on SEC 3.X. On SEC 2.X we have to enable DONE
1474 * signaling on each descriptor.
1475 */
1476 if ((hint & CRYPTO_HINT_MORE) && sc->sc_version == 3)
1477 desc->sd_desc->shd_dn = 0;
1478 else
1479 desc->sd_desc->shd_dn = 1;
1480
1481 SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1482 SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_POSTREAD |
1483 BUS_DMASYNC_POSTWRITE);
1484 SEC_DESC_FREE2READY(sc);
1485 SEC_UNLOCK(sc, descriptors);
1486
1487 /* Enqueue ready descriptors in hardware */
1488 sec_enqueue(sc);
1489
1490 return (0);
1491}
1492
1493static int
1494sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc,
1495 struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc,
1496 int buftype)
1497{
1498 struct sec_hw_desc *hd = desc->sd_desc;
1499 int error;
1500
1501 hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1502 hd->shd_eu_sel1 = SEC_EU_NONE;
1503 hd->shd_mode1 = 0;
1504
1505 /* Pointer 0: NULL */
1506 error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1507 if (error)
1508 return (error);
1509
1510 /* Pointer 1: IV IN */
1511 error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr +
1512 offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen);
1513 if (error)
1514 return (error);
1515
1516 /* Pointer 2: Cipher Key */
1517 error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1518 offsetof(struct sec_hw_desc, shd_key), ses->ss_klen);
1519 if (error)
1520 return (error);
1521
1522 /* Pointer 3: Data IN */
1523 error = sec_make_pointer(sc, desc, 3, crp->crp_buf, enc->crd_skip,
1524 enc->crd_len, buftype);
1525 if (error)
1526 return (error);
1527
1528 /* Pointer 4: Data OUT */
1529 error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip,
1530 enc->crd_len, buftype);
1531 if (error)
1532 return (error);
1533
1534 /* Pointer 5: IV OUT (Not used: NULL) */
1535 error = sec_make_pointer_direct(sc, desc, 5, 0, 0);
1536 if (error)
1537 return (error);
1538
1539 /* Pointer 6: NULL */
1540 error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1541
1542 return (error);
1543}
1544
1545static int
1546sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc,
1547 struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc,
1548 struct cryptodesc *mac, int buftype)
1549{
1550 struct sec_hw_desc *hd = desc->sd_desc;
1551 u_int eu, mode, hashlen;
1552 int error;
1553
1554 if (mac->crd_len < enc->crd_len)
1555 return (EINVAL);
1556
1557 if (mac->crd_skip + mac->crd_len != enc->crd_skip + enc->crd_len)
1558 return (EINVAL);
1559
1560 error = sec_mdeu_config(mac, &eu, &mode, &hashlen);
1561 if (error)
1562 return (error);
1563
1564 hd->shd_desc_type = SEC_DT_HMAC_SNOOP;
1565 hd->shd_eu_sel1 = eu;
1566 hd->shd_mode1 = mode;
1567
1568 /* Pointer 0: HMAC Key */
1569 error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr +
1570 offsetof(struct sec_hw_desc, shd_mkey), ses->ss_mklen);
1571 if (error)
1572 return (error);
1573
1574 /* Pointer 1: HMAC-Only Data IN */
1575 error = sec_make_pointer(sc, desc, 1, crp->crp_buf, mac->crd_skip,
1576 mac->crd_len - enc->crd_len, buftype);
1577 if (error)
1578 return (error);
1579
1580 /* Pointer 2: Cipher Key */
1581 error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1582 offsetof(struct sec_hw_desc, shd_key), ses->ss_klen);
1583 if (error)
1584 return (error);
1585
1586 /* Pointer 3: IV IN */
1587 error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr +
1588 offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen);
1589 if (error)
1590 return (error);
1591
1592 /* Pointer 4: Data IN */
1593 error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip,
1594 enc->crd_len, buftype);
1595 if (error)
1596 return (error);
1597
1598 /* Pointer 5: Data OUT */
1599 error = sec_make_pointer(sc, desc, 5, crp->crp_buf, enc->crd_skip,
1600 enc->crd_len, buftype);
1601 if (error)
1602 return (error);
1603
1604 /* Pointer 6: HMAC OUT */
1605 error = sec_make_pointer(sc, desc, 6, crp->crp_buf, mac->crd_inject,
1606 hashlen, buftype);
1607
1608 return (error);
1609}
1610
1611/* AESU */
1612
1613static int
1614sec_aesu_newsession(struct sec_softc *sc, struct sec_session *ses,
1615 struct cryptoini *enc, struct cryptoini *mac)
1616{
1617
1618 if (enc == NULL)
1619 return (-1);
1620
1621 if (enc->cri_alg != CRYPTO_AES_CBC)
1622 return (-1);
1623
1624 ses->ss_ivlen = AES_BLOCK_LEN;
1625
1626 return (0);
1627}
1628
1629static int
1630sec_aesu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1631 struct sec_desc *desc, struct cryptop *crp, int buftype)
1632{
1633 struct sec_hw_desc *hd = desc->sd_desc;
1634 struct cryptodesc *enc, *mac;
1635 int error;
1636
1637 error = sec_split_crp(crp, &enc, &mac);
1638 if (error)
1639 return (error);
1640
1641 if (!enc)
1642 return (EINVAL);
1643
1644 hd->shd_eu_sel0 = SEC_EU_AESU;
1645 hd->shd_mode0 = SEC_AESU_MODE_CBC;
1646
1647 if (enc->crd_alg != CRYPTO_AES_CBC)
1648 return (EINVAL);
1649
1650 if (enc->crd_flags & CRD_F_ENCRYPT) {
1651 hd->shd_mode0 |= SEC_AESU_MODE_ED;
1652 hd->shd_dir = 0;
1653 } else
1654 hd->shd_dir = 1;
1655
1656 if (mac)
1657 error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac,
1658 buftype);
1659 else
1660 error = sec_build_common_ns_desc(sc, desc, ses, crp, enc,
1661 buftype);
1662
1663 return (error);
1664}
1665
1666/* DEU */
1667
1668static int
1669sec_deu_newsession(struct sec_softc *sc, struct sec_session *ses,
1670 struct cryptoini *enc, struct cryptoini *mac)
1671{
1672
1673 if (enc == NULL)
1674 return (-1);
1675
1676 switch (enc->cri_alg) {
1677 case CRYPTO_DES_CBC:
1678 case CRYPTO_3DES_CBC:
1679 break;
1680 default:
1681 return (-1);
1682 }
1683
1684 ses->ss_ivlen = DES_BLOCK_LEN;
1685
1686 return (0);
1687}
1688
1689static int
1690sec_deu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1691 struct sec_desc *desc, struct cryptop *crp, int buftype)
1692{
1693 struct sec_hw_desc *hd = desc->sd_desc;
1694 struct cryptodesc *enc, *mac;
1695 int error;
1696
1697 error = sec_split_crp(crp, &enc, &mac);
1698 if (error)
1699 return (error);
1700
1701 if (!enc)
1702 return (EINVAL);
1703
1704 hd->shd_eu_sel0 = SEC_EU_DEU;
1705 hd->shd_mode0 = SEC_DEU_MODE_CBC;
1706
1707 switch (enc->crd_alg) {
1708 case CRYPTO_3DES_CBC:
1709 hd->shd_mode0 |= SEC_DEU_MODE_TS;
1710 break;
1711 case CRYPTO_DES_CBC:
1712 break;
1713 default:
1714 return (EINVAL);
1715 }
1716
1717 if (enc->crd_flags & CRD_F_ENCRYPT) {
1718 hd->shd_mode0 |= SEC_DEU_MODE_ED;
1719 hd->shd_dir = 0;
1720 } else
1721 hd->shd_dir = 1;
1722
1723 if (mac)
1724 error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac,
1725 buftype);
1726 else
1727 error = sec_build_common_ns_desc(sc, desc, ses, crp, enc,
1728 buftype);
1729
1730 return (error);
1731}
1732
1733/* MDEU */
1734
1735static int
1736sec_mdeu_can_handle(u_int alg)
1737{
1738 switch (alg) {
1739 case CRYPTO_MD5:
1740 case CRYPTO_SHA1:
1741 case CRYPTO_MD5_HMAC:
1742 case CRYPTO_SHA1_HMAC:
1743 case CRYPTO_SHA2_256_HMAC:
1744 case CRYPTO_SHA2_384_HMAC:
1745 case CRYPTO_SHA2_512_HMAC:
1746 return (1);
1747 default:
1748 return (0);
1749 }
1750}
1751
1752static int
1753sec_mdeu_config(struct cryptodesc *crd, u_int *eu, u_int *mode, u_int *hashlen)
1754{
1755
1756 *mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT;
1757 *eu = SEC_EU_NONE;
1758
1759 switch (crd->crd_alg) {
1760 case CRYPTO_MD5_HMAC:
1761 *mode |= SEC_MDEU_MODE_HMAC;
1762 /* FALLTHROUGH */
1763 case CRYPTO_MD5:
1764 *eu = SEC_EU_MDEU_A;
1765 *mode |= SEC_MDEU_MODE_MD5;
1766 *hashlen = MD5_HASH_LEN;
1767 break;
1768 case CRYPTO_SHA1_HMAC:
1769 *mode |= SEC_MDEU_MODE_HMAC;
1770 /* FALLTHROUGH */
1771 case CRYPTO_SHA1:
1772 *eu = SEC_EU_MDEU_A;
1773 *mode |= SEC_MDEU_MODE_SHA1;
1774 *hashlen = SHA1_HASH_LEN;
1775 break;
1776 case CRYPTO_SHA2_256_HMAC:
1777 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA256;
1778 *eu = SEC_EU_MDEU_A;
1779 break;
1780 case CRYPTO_SHA2_384_HMAC:
1781 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA384;
1782 *eu = SEC_EU_MDEU_B;
1783 break;
1784 case CRYPTO_SHA2_512_HMAC:
1785 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA512;
1786 *eu = SEC_EU_MDEU_B;
1787 break;
1788 default:
1789 return (EINVAL);
1790 }
1791
1792 if (*mode & SEC_MDEU_MODE_HMAC)
1793 *hashlen = SEC_HMAC_HASH_LEN;
1794
1795 return (0);
1796}
1797
1798static int
1799sec_mdeu_newsession(struct sec_softc *sc, struct sec_session *ses,
1800 struct cryptoini *enc, struct cryptoini *mac)
1801{
1802
1803 if (mac && sec_mdeu_can_handle(mac->cri_alg))
1804 return (0);
1805
1806 return (-1);
1807}
1808
1809static int
1810sec_mdeu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1811 struct sec_desc *desc, struct cryptop *crp, int buftype)
1812{
1813 struct cryptodesc *enc, *mac;
1814 struct sec_hw_desc *hd = desc->sd_desc;
1815 u_int eu, mode, hashlen;
1816 int error;
1817
1818 error = sec_split_crp(crp, &enc, &mac);
1819 if (error)
1820 return (error);
1821
1822 if (enc)
1823 return (EINVAL);
1824
1825 error = sec_mdeu_config(mac, &eu, &mode, &hashlen);
1826 if (error)
1827 return (error);
1828
1829 hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1830 hd->shd_eu_sel0 = eu;
1831 hd->shd_mode0 = mode;
1832 hd->shd_eu_sel1 = SEC_EU_NONE;
1833 hd->shd_mode1 = 0;
1834
1835 /* Pointer 0: NULL */
1836 error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1837 if (error)
1838 return (error);
1839
1840 /* Pointer 1: Context In (Not used: NULL) */
1841 error = sec_make_pointer_direct(sc, desc, 1, 0, 0);
1842 if (error)
1843 return (error);
1844
1845 /* Pointer 2: HMAC Key (or NULL, depending on digest type) */
1846 if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC)
1847 error = sec_make_pointer_direct(sc, desc, 2,
1848 desc->sd_desc_paddr + offsetof(struct sec_hw_desc,
1849 shd_mkey), ses->ss_mklen);
1850 else
1851 error = sec_make_pointer_direct(sc, desc, 2, 0, 0);
1852
1853 if (error)
1854 return (error);
1855
1856 /* Pointer 3: Input Data */
1857 error = sec_make_pointer(sc, desc, 3, crp->crp_buf, mac->crd_skip,
1858 mac->crd_len, buftype);
1859 if (error)
1860 return (error);
1861
1862 /* Pointer 4: NULL */
1863 error = sec_make_pointer_direct(sc, desc, 4, 0, 0);
1864 if (error)
1865 return (error);
1866
1867 /* Pointer 5: Hash out */
1868 error = sec_make_pointer(sc, desc, 5, crp->crp_buf,
1869 mac->crd_inject, hashlen, buftype);
1870 if (error)
1871 return (error);
1872
1873 /* Pointer 6: NULL */
1874 error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1875
1876 return (0);
1877}