Deleted Added
full compact
g_eli.c (214163) g_eli.c (214225)
1/*-
2 * Copyright (c) 2005-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2005-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/geom/eli/g_eli.c 214163 2010-10-21 19:44:28Z pjd $");
28__FBSDID("$FreeBSD: head/sys/geom/eli/g_eli.c 214225 2010-10-22 22:13:11Z pjd $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/linker.h>
34#include <sys/module.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/bio.h>
38#include <sys/sysctl.h>
39#include <sys/malloc.h>
40#include <sys/eventhandler.h>
41#include <sys/kthread.h>
42#include <sys/proc.h>
43#include <sys/sched.h>
44#include <sys/smp.h>
45#include <sys/uio.h>
46#include <sys/vnode.h>
47
48#include <vm/uma.h>
49
50#include <geom/geom.h>
51#include <geom/eli/g_eli.h>
52#include <geom/eli/pkcs5v2.h>
53
54
55MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data");
56
57SYSCTL_DECL(_kern_geom);
58SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff");
59int g_eli_debug = 0;
60TUNABLE_INT("kern.geom.eli.debug", &g_eli_debug);
61SYSCTL_INT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RW, &g_eli_debug, 0,
62 "Debug level");
63static u_int g_eli_tries = 3;
64TUNABLE_INT("kern.geom.eli.tries", &g_eli_tries);
65SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RW, &g_eli_tries, 0,
66 "Number of tries for entering the passphrase");
67static u_int g_eli_visible_passphrase = 0;
68TUNABLE_INT("kern.geom.eli.visible_passphrase", &g_eli_visible_passphrase);
69SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RW,
70 &g_eli_visible_passphrase, 0,
71 "Turn on echo when entering the passphrase (for debug purposes only!!)");
72u_int g_eli_overwrites = G_ELI_OVERWRITES;
73TUNABLE_INT("kern.geom.eli.overwrites", &g_eli_overwrites);
74SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RW, &g_eli_overwrites,
75 0, "Number of times on-disk keys should be overwritten when destroying them");
76static u_int g_eli_threads = 0;
77TUNABLE_INT("kern.geom.eli.threads", &g_eli_threads);
78SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RW, &g_eli_threads, 0,
79 "Number of threads doing crypto work");
80u_int g_eli_batch = 0;
81TUNABLE_INT("kern.geom.eli.batch", &g_eli_batch);
82SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RW, &g_eli_batch, 0,
83 "Use crypto operations batching");
84
85static eventhandler_tag g_eli_pre_sync = NULL;
86
87static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp,
88 struct g_geom *gp);
89static void g_eli_init(struct g_class *mp);
90static void g_eli_fini(struct g_class *mp);
91
92static g_taste_t g_eli_taste;
93static g_dumpconf_t g_eli_dumpconf;
94
95struct g_class g_eli_class = {
96 .name = G_ELI_CLASS_NAME,
97 .version = G_VERSION,
98 .ctlreq = g_eli_config,
99 .taste = g_eli_taste,
100 .destroy_geom = g_eli_destroy_geom,
101 .init = g_eli_init,
102 .fini = g_eli_fini
103};
104
105
106/*
107 * Code paths:
108 * BIO_READ:
109 * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
110 * BIO_WRITE:
111 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
112 */
113
114
115/*
116 * EAGAIN from crypto(9) means, that we were probably balanced to another crypto
117 * accelerator or something like this.
118 * The function updates the SID and rerun the operation.
119 */
120int
121g_eli_crypto_rerun(struct cryptop *crp)
122{
123 struct g_eli_softc *sc;
124 struct g_eli_worker *wr;
125 struct bio *bp;
126 int error;
127
128 bp = (struct bio *)crp->crp_opaque;
129 sc = bp->bio_to->geom->softc;
130 LIST_FOREACH(wr, &sc->sc_workers, w_next) {
131 if (wr->w_number == bp->bio_pflags)
132 break;
133 }
134 KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags));
135 G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %ju -> %ju).",
136 bp->bio_cmd == BIO_READ ? "READ" : "WRITE", (uintmax_t)wr->w_sid,
137 (uintmax_t)crp->crp_sid);
138 wr->w_sid = crp->crp_sid;
139 crp->crp_etype = 0;
140 error = crypto_dispatch(crp);
141 if (error == 0)
142 return (0);
143 G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error);
144 crp->crp_etype = error;
145 return (error);
146}
147
148/*
149 * The function is called afer reading encrypted data from the provider.
150 *
151 * g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
152 */
153void
154g_eli_read_done(struct bio *bp)
155{
156 struct g_eli_softc *sc;
157 struct bio *pbp;
158
159 G_ELI_LOGREQ(2, bp, "Request done.");
160 pbp = bp->bio_parent;
161 if (pbp->bio_error == 0)
162 pbp->bio_error = bp->bio_error;
163 /*
164 * Do we have all sectors already?
165 */
166 pbp->bio_inbed++;
167 if (pbp->bio_inbed < pbp->bio_children)
168 return;
169 g_destroy_bio(bp);
170 sc = pbp->bio_to->geom->softc;
171 if (pbp->bio_error != 0) {
172 G_ELI_LOGREQ(0, pbp, "%s() failed", __func__);
173 pbp->bio_completed = 0;
174 if (pbp->bio_driver2 != NULL) {
175 free(pbp->bio_driver2, M_ELI);
176 pbp->bio_driver2 = NULL;
177 }
178 g_io_deliver(pbp, pbp->bio_error);
179 atomic_subtract_int(&sc->sc_inflight, 1);
180 return;
181 }
182 mtx_lock(&sc->sc_queue_mtx);
183 bioq_insert_tail(&sc->sc_queue, pbp);
184 mtx_unlock(&sc->sc_queue_mtx);
185 wakeup(sc);
186}
187
188/*
189 * The function is called after we encrypt and write data.
190 *
191 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver
192 */
193void
194g_eli_write_done(struct bio *bp)
195{
196 struct g_eli_softc *sc;
197 struct bio *pbp;
198
199 G_ELI_LOGREQ(2, bp, "Request done.");
200 pbp = bp->bio_parent;
201 if (pbp->bio_error == 0) {
202 if (bp->bio_error != 0)
203 pbp->bio_error = bp->bio_error;
204 }
205 /*
206 * Do we have all sectors already?
207 */
208 pbp->bio_inbed++;
209 if (pbp->bio_inbed < pbp->bio_children)
210 return;
211 free(pbp->bio_driver2, M_ELI);
212 pbp->bio_driver2 = NULL;
213 if (pbp->bio_error != 0) {
214 G_ELI_LOGREQ(0, pbp, "Crypto WRITE request failed (error=%d).",
215 pbp->bio_error);
216 pbp->bio_completed = 0;
217 }
218 g_destroy_bio(bp);
219 /*
220 * Write is finished, send it up.
221 */
222 pbp->bio_completed = pbp->bio_length;
223 sc = pbp->bio_to->geom->softc;
224 g_io_deliver(pbp, pbp->bio_error);
225 atomic_subtract_int(&sc->sc_inflight, 1);
226}
227
228/*
229 * This function should never be called, but GEOM made as it set ->orphan()
230 * method for every geom.
231 */
232static void
233g_eli_orphan_spoil_assert(struct g_consumer *cp)
234{
235
236 panic("Function %s() called for %s.", __func__, cp->geom->name);
237}
238
239static void
240g_eli_orphan(struct g_consumer *cp)
241{
242 struct g_eli_softc *sc;
243
244 g_topology_assert();
245 sc = cp->geom->softc;
246 if (sc == NULL)
247 return;
248 g_eli_destroy(sc, TRUE);
249}
250
251/*
252 * BIO_READ:
253 * G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
254 * BIO_WRITE:
255 * G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
256 */
257static void
258g_eli_start(struct bio *bp)
259{
260 struct g_eli_softc *sc;
261 struct g_consumer *cp;
262 struct bio *cbp;
263
264 sc = bp->bio_to->geom->softc;
265 KASSERT(sc != NULL,
266 ("Provider's error should be set (error=%d)(device=%s).",
267 bp->bio_to->error, bp->bio_to->name));
268 G_ELI_LOGREQ(2, bp, "Request received.");
269
270 switch (bp->bio_cmd) {
271 case BIO_READ:
272 case BIO_WRITE:
273 case BIO_GETATTR:
274 case BIO_FLUSH:
275 break;
276 case BIO_DELETE:
277 /*
278 * We could eventually support BIO_DELETE request.
279 * It could be done by overwritting requested sector with
280 * random data g_eli_overwrites number of times.
281 */
282 default:
283 g_io_deliver(bp, EOPNOTSUPP);
284 return;
285 }
286 cbp = g_clone_bio(bp);
287 if (cbp == NULL) {
288 g_io_deliver(bp, ENOMEM);
289 return;
290 }
291 bp->bio_driver1 = cbp;
292 bp->bio_pflags = G_ELI_NEW_BIO;
293 switch (bp->bio_cmd) {
294 case BIO_READ:
295 if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) {
296 g_eli_crypto_read(sc, bp, 0);
297 break;
298 }
299 /* FALLTHROUGH */
300 case BIO_WRITE:
301 mtx_lock(&sc->sc_queue_mtx);
302 bioq_insert_tail(&sc->sc_queue, bp);
303 mtx_unlock(&sc->sc_queue_mtx);
304 wakeup(sc);
305 break;
306 case BIO_GETATTR:
307 case BIO_FLUSH:
308 cbp->bio_done = g_std_done;
309 cp = LIST_FIRST(&sc->sc_geom->consumer);
310 cbp->bio_to = cp->provider;
311 G_ELI_LOGREQ(2, cbp, "Sending request.");
312 g_io_request(cbp, cp);
313 break;
314 }
315}
316
317static int
318g_eli_newsession(struct g_eli_worker *wr)
319{
320 struct g_eli_softc *sc;
321 struct cryptoini crie, cria;
322 int error;
323
324 sc = wr->w_softc;
325
326 bzero(&crie, sizeof(crie));
327 crie.cri_alg = sc->sc_ealgo;
328 crie.cri_klen = sc->sc_ekeylen;
329 if (sc->sc_ealgo == CRYPTO_AES_XTS)
330 crie.cri_klen <<= 1;
331 crie.cri_key = sc->sc_ekeys[0];
332 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
333 bzero(&cria, sizeof(cria));
334 cria.cri_alg = sc->sc_aalgo;
335 cria.cri_klen = sc->sc_akeylen;
336 cria.cri_key = sc->sc_akey;
337 crie.cri_next = &cria;
338 }
339
340 switch (sc->sc_crypto) {
341 case G_ELI_CRYPTO_SW:
342 error = crypto_newsession(&wr->w_sid, &crie,
343 CRYPTOCAP_F_SOFTWARE);
344 break;
345 case G_ELI_CRYPTO_HW:
346 error = crypto_newsession(&wr->w_sid, &crie,
347 CRYPTOCAP_F_HARDWARE);
348 break;
349 case G_ELI_CRYPTO_UNKNOWN:
350 error = crypto_newsession(&wr->w_sid, &crie,
351 CRYPTOCAP_F_HARDWARE);
352 if (error == 0) {
353 mtx_lock(&sc->sc_queue_mtx);
354 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
355 sc->sc_crypto = G_ELI_CRYPTO_HW;
356 mtx_unlock(&sc->sc_queue_mtx);
357 } else {
358 error = crypto_newsession(&wr->w_sid, &crie,
359 CRYPTOCAP_F_SOFTWARE);
360 mtx_lock(&sc->sc_queue_mtx);
361 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
362 sc->sc_crypto = G_ELI_CRYPTO_SW;
363 mtx_unlock(&sc->sc_queue_mtx);
364 }
365 break;
366 default:
367 panic("%s: invalid condition", __func__);
368 }
369
370 return (error);
371}
372
373static void
374g_eli_freesession(struct g_eli_worker *wr)
375{
376
377 crypto_freesession(wr->w_sid);
378}
379
380static void
381g_eli_cancel(struct g_eli_softc *sc)
382{
383 struct bio *bp;
384
385 mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
386
387 while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) {
388 KASSERT(bp->bio_pflags == G_ELI_NEW_BIO,
389 ("Not new bio when canceling (bp=%p).", bp));
390 g_io_deliver(bp, ENXIO);
391 }
392}
393
394static struct bio *
395g_eli_takefirst(struct g_eli_softc *sc)
396{
397 struct bio *bp;
398
399 mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
400
401 if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND))
402 return (bioq_takefirst(&sc->sc_queue));
403 /*
404 * Device suspended, so we skip new I/O requests.
405 */
406 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
407 if (bp->bio_pflags != G_ELI_NEW_BIO)
408 break;
409 }
410 if (bp != NULL)
411 bioq_remove(&sc->sc_queue, bp);
412 return (bp);
413}
414
415/*
416 * This is the main function for kernel worker thread when we don't have
417 * hardware acceleration and we have to do cryptography in software.
418 * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM
419 * threads with crypto work.
420 */
421static void
422g_eli_worker(void *arg)
423{
424 struct g_eli_softc *sc;
425 struct g_eli_worker *wr;
426 struct bio *bp;
427 int error;
428
429 wr = arg;
430 sc = wr->w_softc;
431#ifdef SMP
432 /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */
433 if (mp_ncpus > 1 && sc->sc_crypto == G_ELI_CRYPTO_SW &&
434 g_eli_threads == 0) {
435 while (!smp_started)
436 tsleep(wr, 0, "geli:smp", hz / 4);
437 }
438#endif
439 thread_lock(curthread);
440 sched_prio(curthread, PUSER);
441 if (sc->sc_crypto == G_ELI_CRYPTO_SW && g_eli_threads == 0)
442 sched_bind(curthread, wr->w_number);
443 thread_unlock(curthread);
444
445 G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm);
446
447 for (;;) {
448 mtx_lock(&sc->sc_queue_mtx);
449again:
450 bp = g_eli_takefirst(sc);
451 if (bp == NULL) {
452 if (sc->sc_flags & G_ELI_FLAG_DESTROY) {
453 g_eli_cancel(sc);
454 LIST_REMOVE(wr, w_next);
455 g_eli_freesession(wr);
456 free(wr, M_ELI);
457 G_ELI_DEBUG(1, "Thread %s exiting.",
458 curthread->td_proc->p_comm);
459 wakeup(&sc->sc_workers);
460 mtx_unlock(&sc->sc_queue_mtx);
461 kproc_exit(0);
462 }
463 while (sc->sc_flags & G_ELI_FLAG_SUSPEND) {
464 if (sc->sc_inflight > 0) {
465 G_ELI_DEBUG(0, "inflight=%d", sc->sc_inflight);
466 /*
467 * We still have inflight BIOs, so
468 * sleep and retry.
469 */
470 msleep(sc, &sc->sc_queue_mtx, PRIBIO,
471 "geli:inf", hz / 5);
472 goto again;
473 }
474 /*
475 * Suspend requested, mark the worker as
476 * suspended and go to sleep.
477 */
478 if (wr->w_active) {
479 g_eli_freesession(wr);
480 wr->w_active = FALSE;
481 }
482 wakeup(&sc->sc_workers);
483 msleep(sc, &sc->sc_queue_mtx, PRIBIO,
484 "geli:suspend", 0);
485 if (!wr->w_active &&
486 !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) {
487 error = g_eli_newsession(wr);
488 KASSERT(error == 0,
489 ("g_eli_newsession() failed on resume (error=%d)",
490 error));
491 wr->w_active = TRUE;
492 }
493 goto again;
494 }
495 msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0);
496 continue;
497 }
498 if (bp->bio_pflags == G_ELI_NEW_BIO)
499 atomic_add_int(&sc->sc_inflight, 1);
500 mtx_unlock(&sc->sc_queue_mtx);
501 if (bp->bio_pflags == G_ELI_NEW_BIO) {
502 bp->bio_pflags = 0;
503 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
504 if (bp->bio_cmd == BIO_READ)
505 g_eli_auth_read(sc, bp);
506 else
507 g_eli_auth_run(wr, bp);
508 } else {
509 if (bp->bio_cmd == BIO_READ)
510 g_eli_crypto_read(sc, bp, 1);
511 else
512 g_eli_crypto_run(wr, bp);
513 }
514 } else {
515 if (sc->sc_flags & G_ELI_FLAG_AUTH)
516 g_eli_auth_run(wr, bp);
517 else
518 g_eli_crypto_run(wr, bp);
519 }
520 }
521}
522
523/*
524 * Select encryption key. If G_ELI_FLAG_SINGLE_KEY is present we only have one
525 * key available for all the data. If the flag is not present select the key
526 * based on data offset.
527 */
528uint8_t *
529g_eli_crypto_key(struct g_eli_softc *sc, off_t offset, size_t blocksize)
530{
531 u_int nkey;
532
533 if (sc->sc_nekeys == 1)
534 return (sc->sc_ekeys[0]);
535
536 KASSERT(sc->sc_nekeys > 1, ("%s: sc_nekeys=%u", __func__,
537 sc->sc_nekeys));
538 KASSERT((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) == 0,
539 ("%s: SINGLE_KEY flag set, but sc_nekeys=%u", __func__,
540 sc->sc_nekeys));
541
542 /* We switch key every 2^G_ELI_KEY_SHIFT blocks. */
543 nkey = (offset >> G_ELI_KEY_SHIFT) / blocksize;
544
545 KASSERT(nkey < sc->sc_nekeys, ("%s: nkey=%u >= sc_nekeys=%u", __func__,
546 nkey, sc->sc_nekeys));
547
548 return (sc->sc_ekeys[nkey]);
549}
550
551/*
552 * Here we generate IV. It is unique for every sector.
553 */
554void
555g_eli_crypto_ivgen(struct g_eli_softc *sc, off_t offset, u_char *iv,
556 size_t size)
557{
558 uint8_t off[8];
559
560 if ((sc->sc_flags & G_ELI_FLAG_NATIVE_BYTE_ORDER) != 0)
561 bcopy(&offset, off, sizeof(off));
562 else
563 le64enc(off, (uint64_t)offset);
564
565 switch (sc->sc_ealgo) {
566 case CRYPTO_AES_XTS:
567 bcopy(off, iv, sizeof(off));
568 bzero(iv + sizeof(off), size - sizeof(off));
569 break;
570 default:
571 {
572 u_char hash[SHA256_DIGEST_LENGTH];
573 SHA256_CTX ctx;
574
575 /* Copy precalculated SHA256 context for IV-Key. */
576 bcopy(&sc->sc_ivctx, &ctx, sizeof(ctx));
577 SHA256_Update(&ctx, off, sizeof(off));
578 SHA256_Final(hash, &ctx);
579 bcopy(hash, iv, MIN(sizeof(hash), size));
580 break;
581 }
582 }
583}
584
585int
586g_eli_read_metadata(struct g_class *mp, struct g_provider *pp,
587 struct g_eli_metadata *md)
588{
589 struct g_geom *gp;
590 struct g_consumer *cp;
591 u_char *buf = NULL;
592 int error;
593
594 g_topology_assert();
595
596 gp = g_new_geomf(mp, "eli:taste");
597 gp->start = g_eli_start;
598 gp->access = g_std_access;
599 /*
600 * g_eli_read_metadata() is always called from the event thread.
601 * Our geom is created and destroyed in the same event, so there
602 * could be no orphan nor spoil event in the meantime.
603 */
604 gp->orphan = g_eli_orphan_spoil_assert;
605 gp->spoiled = g_eli_orphan_spoil_assert;
606 cp = g_new_consumer(gp);
607 error = g_attach(cp, pp);
608 if (error != 0)
609 goto end;
610 error = g_access(cp, 1, 0, 0);
611 if (error != 0)
612 goto end;
613 g_topology_unlock();
614 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
615 &error);
616 g_topology_lock();
617 if (buf == NULL)
618 goto end;
619 eli_metadata_decode(buf, md);
620end:
621 if (buf != NULL)
622 g_free(buf);
623 if (cp->provider != NULL) {
624 if (cp->acr == 1)
625 g_access(cp, -1, 0, 0);
626 g_detach(cp);
627 }
628 g_destroy_consumer(cp);
629 g_destroy_geom(gp);
630 return (error);
631}
632
633/*
634 * The function is called when we had last close on provider and user requested
635 * to close it when this situation occur.
636 */
637static void
638g_eli_last_close(struct g_eli_softc *sc)
639{
640 struct g_geom *gp;
641 struct g_provider *pp;
642 char ppname[64];
643 int error;
644
645 g_topology_assert();
646 gp = sc->sc_geom;
647 pp = LIST_FIRST(&gp->provider);
648 strlcpy(ppname, pp->name, sizeof(ppname));
649 error = g_eli_destroy(sc, TRUE);
650 KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).",
651 ppname, error));
652 G_ELI_DEBUG(0, "Detached %s on last close.", ppname);
653}
654
655int
656g_eli_access(struct g_provider *pp, int dr, int dw, int de)
657{
658 struct g_eli_softc *sc;
659 struct g_geom *gp;
660
661 gp = pp->geom;
662 sc = gp->softc;
663
664 if (dw > 0) {
665 if (sc->sc_flags & G_ELI_FLAG_RO) {
666 /* Deny write attempts. */
667 return (EROFS);
668 }
669 /* Someone is opening us for write, we need to remember that. */
670 sc->sc_flags |= G_ELI_FLAG_WOPEN;
671 return (0);
672 }
673 /* Is this the last close? */
674 if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0)
675 return (0);
676
677 /*
678 * Automatically detach on last close if requested.
679 */
680 if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) ||
681 (sc->sc_flags & G_ELI_FLAG_WOPEN)) {
682 g_eli_last_close(sc);
683 }
684 return (0);
685}
686
687static int
688g_eli_cpu_is_disabled(int cpu)
689{
690#ifdef SMP
691 return ((hlt_cpus_mask & (1 << cpu)) != 0);
692#else
693 return (0);
694#endif
695}
696
697struct g_geom *
698g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp,
699 const struct g_eli_metadata *md, const u_char *mkey, int nkey)
700{
701 struct g_eli_softc *sc;
702 struct g_eli_worker *wr;
703 struct g_geom *gp;
704 struct g_provider *pp;
705 struct g_consumer *cp;
706 u_int i, threads;
707 int error;
708
709 G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX);
710
711 gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX);
712 gp->softc = NULL; /* for a moment */
713
714 sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO);
715 gp->start = g_eli_start;
716 /*
717 * Spoiling cannot happen actually, because we keep provider open for
718 * writing all the time or provider is read-only.
719 */
720 gp->spoiled = g_eli_orphan_spoil_assert;
721 gp->orphan = g_eli_orphan;
722 gp->dumpconf = g_eli_dumpconf;
723 /*
724 * If detach-on-last-close feature is not enabled and we don't operate
725 * on read-only provider, we can simply use g_std_access().
726 */
727 if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO))
728 gp->access = g_eli_access;
729 else
730 gp->access = g_std_access;
731
732 sc->sc_inflight = 0;
733 sc->sc_crypto = G_ELI_CRYPTO_UNKNOWN;
734 sc->sc_flags = md->md_flags;
735 /* Backward compatibility. */
736 if (md->md_version < 4)
737 sc->sc_flags |= G_ELI_FLAG_NATIVE_BYTE_ORDER;
738 if (md->md_version < 5)
739 sc->sc_flags |= G_ELI_FLAG_SINGLE_KEY;
740 sc->sc_ealgo = md->md_ealgo;
741 sc->sc_nkey = nkey;
742
743 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
744 sc->sc_akeylen = sizeof(sc->sc_akey) * 8;
745 sc->sc_aalgo = md->md_aalgo;
746 sc->sc_alen = g_eli_hashlen(sc->sc_aalgo);
747
748 sc->sc_data_per_sector = bpp->sectorsize - sc->sc_alen;
749 /*
750 * Some hash functions (like SHA1 and RIPEMD160) generates hash
751 * which length is not multiple of 128 bits, but we want data
752 * length to be multiple of 128, so we can encrypt without
753 * padding. The line below rounds down data length to multiple
754 * of 128 bits.
755 */
756 sc->sc_data_per_sector -= sc->sc_data_per_sector % 16;
757
758 sc->sc_bytes_per_sector =
759 (md->md_sectorsize - 1) / sc->sc_data_per_sector + 1;
760 sc->sc_bytes_per_sector *= bpp->sectorsize;
761 }
762
763 gp->softc = sc;
764 sc->sc_geom = gp;
765
766 bioq_init(&sc->sc_queue);
767 mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF);
768
769 pp = NULL;
770 cp = g_new_consumer(gp);
771 error = g_attach(cp, bpp);
772 if (error != 0) {
773 if (req != NULL) {
774 gctl_error(req, "Cannot attach to %s (error=%d).",
775 bpp->name, error);
776 } else {
777 G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).",
778 bpp->name, error);
779 }
780 goto failed;
781 }
782 /*
783 * Keep provider open all the time, so we can run critical tasks,
784 * like Master Keys deletion, without wondering if we can open
785 * provider or not.
786 * We don't open provider for writing only when user requested read-only
787 * access.
788 */
789 if (sc->sc_flags & G_ELI_FLAG_RO)
790 error = g_access(cp, 1, 0, 1);
791 else
792 error = g_access(cp, 1, 1, 1);
793 if (error != 0) {
794 if (req != NULL) {
795 gctl_error(req, "Cannot access %s (error=%d).",
796 bpp->name, error);
797 } else {
798 G_ELI_DEBUG(1, "Cannot access %s (error=%d).",
799 bpp->name, error);
800 }
801 goto failed;
802 }
803
804 sc->sc_sectorsize = md->md_sectorsize;
805 sc->sc_mediasize = bpp->mediasize;
806 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME))
807 sc->sc_mediasize -= bpp->sectorsize;
808 if (!(sc->sc_flags & G_ELI_FLAG_AUTH))
809 sc->sc_mediasize -= (sc->sc_mediasize % sc->sc_sectorsize);
810 else {
811 sc->sc_mediasize /= sc->sc_bytes_per_sector;
812 sc->sc_mediasize *= sc->sc_sectorsize;
813 }
814
815 /*
816 * Remember the keys in our softc structure.
817 */
818 g_eli_mkey_propagate(sc, mkey);
819 sc->sc_ekeylen = md->md_keylen;
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/linker.h>
34#include <sys/module.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/bio.h>
38#include <sys/sysctl.h>
39#include <sys/malloc.h>
40#include <sys/eventhandler.h>
41#include <sys/kthread.h>
42#include <sys/proc.h>
43#include <sys/sched.h>
44#include <sys/smp.h>
45#include <sys/uio.h>
46#include <sys/vnode.h>
47
48#include <vm/uma.h>
49
50#include <geom/geom.h>
51#include <geom/eli/g_eli.h>
52#include <geom/eli/pkcs5v2.h>
53
54
55MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data");
56
57SYSCTL_DECL(_kern_geom);
58SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff");
59int g_eli_debug = 0;
60TUNABLE_INT("kern.geom.eli.debug", &g_eli_debug);
61SYSCTL_INT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RW, &g_eli_debug, 0,
62 "Debug level");
63static u_int g_eli_tries = 3;
64TUNABLE_INT("kern.geom.eli.tries", &g_eli_tries);
65SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RW, &g_eli_tries, 0,
66 "Number of tries for entering the passphrase");
67static u_int g_eli_visible_passphrase = 0;
68TUNABLE_INT("kern.geom.eli.visible_passphrase", &g_eli_visible_passphrase);
69SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RW,
70 &g_eli_visible_passphrase, 0,
71 "Turn on echo when entering the passphrase (for debug purposes only!!)");
72u_int g_eli_overwrites = G_ELI_OVERWRITES;
73TUNABLE_INT("kern.geom.eli.overwrites", &g_eli_overwrites);
74SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RW, &g_eli_overwrites,
75 0, "Number of times on-disk keys should be overwritten when destroying them");
76static u_int g_eli_threads = 0;
77TUNABLE_INT("kern.geom.eli.threads", &g_eli_threads);
78SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RW, &g_eli_threads, 0,
79 "Number of threads doing crypto work");
80u_int g_eli_batch = 0;
81TUNABLE_INT("kern.geom.eli.batch", &g_eli_batch);
82SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RW, &g_eli_batch, 0,
83 "Use crypto operations batching");
84
85static eventhandler_tag g_eli_pre_sync = NULL;
86
87static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp,
88 struct g_geom *gp);
89static void g_eli_init(struct g_class *mp);
90static void g_eli_fini(struct g_class *mp);
91
92static g_taste_t g_eli_taste;
93static g_dumpconf_t g_eli_dumpconf;
94
95struct g_class g_eli_class = {
96 .name = G_ELI_CLASS_NAME,
97 .version = G_VERSION,
98 .ctlreq = g_eli_config,
99 .taste = g_eli_taste,
100 .destroy_geom = g_eli_destroy_geom,
101 .init = g_eli_init,
102 .fini = g_eli_fini
103};
104
105
106/*
107 * Code paths:
108 * BIO_READ:
109 * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
110 * BIO_WRITE:
111 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
112 */
113
114
115/*
116 * EAGAIN from crypto(9) means, that we were probably balanced to another crypto
117 * accelerator or something like this.
118 * The function updates the SID and rerun the operation.
119 */
120int
121g_eli_crypto_rerun(struct cryptop *crp)
122{
123 struct g_eli_softc *sc;
124 struct g_eli_worker *wr;
125 struct bio *bp;
126 int error;
127
128 bp = (struct bio *)crp->crp_opaque;
129 sc = bp->bio_to->geom->softc;
130 LIST_FOREACH(wr, &sc->sc_workers, w_next) {
131 if (wr->w_number == bp->bio_pflags)
132 break;
133 }
134 KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags));
135 G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %ju -> %ju).",
136 bp->bio_cmd == BIO_READ ? "READ" : "WRITE", (uintmax_t)wr->w_sid,
137 (uintmax_t)crp->crp_sid);
138 wr->w_sid = crp->crp_sid;
139 crp->crp_etype = 0;
140 error = crypto_dispatch(crp);
141 if (error == 0)
142 return (0);
143 G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error);
144 crp->crp_etype = error;
145 return (error);
146}
147
148/*
149 * The function is called afer reading encrypted data from the provider.
150 *
151 * g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
152 */
153void
154g_eli_read_done(struct bio *bp)
155{
156 struct g_eli_softc *sc;
157 struct bio *pbp;
158
159 G_ELI_LOGREQ(2, bp, "Request done.");
160 pbp = bp->bio_parent;
161 if (pbp->bio_error == 0)
162 pbp->bio_error = bp->bio_error;
163 /*
164 * Do we have all sectors already?
165 */
166 pbp->bio_inbed++;
167 if (pbp->bio_inbed < pbp->bio_children)
168 return;
169 g_destroy_bio(bp);
170 sc = pbp->bio_to->geom->softc;
171 if (pbp->bio_error != 0) {
172 G_ELI_LOGREQ(0, pbp, "%s() failed", __func__);
173 pbp->bio_completed = 0;
174 if (pbp->bio_driver2 != NULL) {
175 free(pbp->bio_driver2, M_ELI);
176 pbp->bio_driver2 = NULL;
177 }
178 g_io_deliver(pbp, pbp->bio_error);
179 atomic_subtract_int(&sc->sc_inflight, 1);
180 return;
181 }
182 mtx_lock(&sc->sc_queue_mtx);
183 bioq_insert_tail(&sc->sc_queue, pbp);
184 mtx_unlock(&sc->sc_queue_mtx);
185 wakeup(sc);
186}
187
188/*
189 * The function is called after we encrypt and write data.
190 *
191 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver
192 */
193void
194g_eli_write_done(struct bio *bp)
195{
196 struct g_eli_softc *sc;
197 struct bio *pbp;
198
199 G_ELI_LOGREQ(2, bp, "Request done.");
200 pbp = bp->bio_parent;
201 if (pbp->bio_error == 0) {
202 if (bp->bio_error != 0)
203 pbp->bio_error = bp->bio_error;
204 }
205 /*
206 * Do we have all sectors already?
207 */
208 pbp->bio_inbed++;
209 if (pbp->bio_inbed < pbp->bio_children)
210 return;
211 free(pbp->bio_driver2, M_ELI);
212 pbp->bio_driver2 = NULL;
213 if (pbp->bio_error != 0) {
214 G_ELI_LOGREQ(0, pbp, "Crypto WRITE request failed (error=%d).",
215 pbp->bio_error);
216 pbp->bio_completed = 0;
217 }
218 g_destroy_bio(bp);
219 /*
220 * Write is finished, send it up.
221 */
222 pbp->bio_completed = pbp->bio_length;
223 sc = pbp->bio_to->geom->softc;
224 g_io_deliver(pbp, pbp->bio_error);
225 atomic_subtract_int(&sc->sc_inflight, 1);
226}
227
228/*
229 * This function should never be called, but GEOM made as it set ->orphan()
230 * method for every geom.
231 */
232static void
233g_eli_orphan_spoil_assert(struct g_consumer *cp)
234{
235
236 panic("Function %s() called for %s.", __func__, cp->geom->name);
237}
238
239static void
240g_eli_orphan(struct g_consumer *cp)
241{
242 struct g_eli_softc *sc;
243
244 g_topology_assert();
245 sc = cp->geom->softc;
246 if (sc == NULL)
247 return;
248 g_eli_destroy(sc, TRUE);
249}
250
251/*
252 * BIO_READ:
253 * G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
254 * BIO_WRITE:
255 * G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
256 */
257static void
258g_eli_start(struct bio *bp)
259{
260 struct g_eli_softc *sc;
261 struct g_consumer *cp;
262 struct bio *cbp;
263
264 sc = bp->bio_to->geom->softc;
265 KASSERT(sc != NULL,
266 ("Provider's error should be set (error=%d)(device=%s).",
267 bp->bio_to->error, bp->bio_to->name));
268 G_ELI_LOGREQ(2, bp, "Request received.");
269
270 switch (bp->bio_cmd) {
271 case BIO_READ:
272 case BIO_WRITE:
273 case BIO_GETATTR:
274 case BIO_FLUSH:
275 break;
276 case BIO_DELETE:
277 /*
278 * We could eventually support BIO_DELETE request.
279 * It could be done by overwritting requested sector with
280 * random data g_eli_overwrites number of times.
281 */
282 default:
283 g_io_deliver(bp, EOPNOTSUPP);
284 return;
285 }
286 cbp = g_clone_bio(bp);
287 if (cbp == NULL) {
288 g_io_deliver(bp, ENOMEM);
289 return;
290 }
291 bp->bio_driver1 = cbp;
292 bp->bio_pflags = G_ELI_NEW_BIO;
293 switch (bp->bio_cmd) {
294 case BIO_READ:
295 if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) {
296 g_eli_crypto_read(sc, bp, 0);
297 break;
298 }
299 /* FALLTHROUGH */
300 case BIO_WRITE:
301 mtx_lock(&sc->sc_queue_mtx);
302 bioq_insert_tail(&sc->sc_queue, bp);
303 mtx_unlock(&sc->sc_queue_mtx);
304 wakeup(sc);
305 break;
306 case BIO_GETATTR:
307 case BIO_FLUSH:
308 cbp->bio_done = g_std_done;
309 cp = LIST_FIRST(&sc->sc_geom->consumer);
310 cbp->bio_to = cp->provider;
311 G_ELI_LOGREQ(2, cbp, "Sending request.");
312 g_io_request(cbp, cp);
313 break;
314 }
315}
316
317static int
318g_eli_newsession(struct g_eli_worker *wr)
319{
320 struct g_eli_softc *sc;
321 struct cryptoini crie, cria;
322 int error;
323
324 sc = wr->w_softc;
325
326 bzero(&crie, sizeof(crie));
327 crie.cri_alg = sc->sc_ealgo;
328 crie.cri_klen = sc->sc_ekeylen;
329 if (sc->sc_ealgo == CRYPTO_AES_XTS)
330 crie.cri_klen <<= 1;
331 crie.cri_key = sc->sc_ekeys[0];
332 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
333 bzero(&cria, sizeof(cria));
334 cria.cri_alg = sc->sc_aalgo;
335 cria.cri_klen = sc->sc_akeylen;
336 cria.cri_key = sc->sc_akey;
337 crie.cri_next = &cria;
338 }
339
340 switch (sc->sc_crypto) {
341 case G_ELI_CRYPTO_SW:
342 error = crypto_newsession(&wr->w_sid, &crie,
343 CRYPTOCAP_F_SOFTWARE);
344 break;
345 case G_ELI_CRYPTO_HW:
346 error = crypto_newsession(&wr->w_sid, &crie,
347 CRYPTOCAP_F_HARDWARE);
348 break;
349 case G_ELI_CRYPTO_UNKNOWN:
350 error = crypto_newsession(&wr->w_sid, &crie,
351 CRYPTOCAP_F_HARDWARE);
352 if (error == 0) {
353 mtx_lock(&sc->sc_queue_mtx);
354 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
355 sc->sc_crypto = G_ELI_CRYPTO_HW;
356 mtx_unlock(&sc->sc_queue_mtx);
357 } else {
358 error = crypto_newsession(&wr->w_sid, &crie,
359 CRYPTOCAP_F_SOFTWARE);
360 mtx_lock(&sc->sc_queue_mtx);
361 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
362 sc->sc_crypto = G_ELI_CRYPTO_SW;
363 mtx_unlock(&sc->sc_queue_mtx);
364 }
365 break;
366 default:
367 panic("%s: invalid condition", __func__);
368 }
369
370 return (error);
371}
372
373static void
374g_eli_freesession(struct g_eli_worker *wr)
375{
376
377 crypto_freesession(wr->w_sid);
378}
379
380static void
381g_eli_cancel(struct g_eli_softc *sc)
382{
383 struct bio *bp;
384
385 mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
386
387 while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) {
388 KASSERT(bp->bio_pflags == G_ELI_NEW_BIO,
389 ("Not new bio when canceling (bp=%p).", bp));
390 g_io_deliver(bp, ENXIO);
391 }
392}
393
394static struct bio *
395g_eli_takefirst(struct g_eli_softc *sc)
396{
397 struct bio *bp;
398
399 mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
400
401 if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND))
402 return (bioq_takefirst(&sc->sc_queue));
403 /*
404 * Device suspended, so we skip new I/O requests.
405 */
406 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
407 if (bp->bio_pflags != G_ELI_NEW_BIO)
408 break;
409 }
410 if (bp != NULL)
411 bioq_remove(&sc->sc_queue, bp);
412 return (bp);
413}
414
415/*
416 * This is the main function for kernel worker thread when we don't have
417 * hardware acceleration and we have to do cryptography in software.
418 * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM
419 * threads with crypto work.
420 */
421static void
422g_eli_worker(void *arg)
423{
424 struct g_eli_softc *sc;
425 struct g_eli_worker *wr;
426 struct bio *bp;
427 int error;
428
429 wr = arg;
430 sc = wr->w_softc;
431#ifdef SMP
432 /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */
433 if (mp_ncpus > 1 && sc->sc_crypto == G_ELI_CRYPTO_SW &&
434 g_eli_threads == 0) {
435 while (!smp_started)
436 tsleep(wr, 0, "geli:smp", hz / 4);
437 }
438#endif
439 thread_lock(curthread);
440 sched_prio(curthread, PUSER);
441 if (sc->sc_crypto == G_ELI_CRYPTO_SW && g_eli_threads == 0)
442 sched_bind(curthread, wr->w_number);
443 thread_unlock(curthread);
444
445 G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm);
446
447 for (;;) {
448 mtx_lock(&sc->sc_queue_mtx);
449again:
450 bp = g_eli_takefirst(sc);
451 if (bp == NULL) {
452 if (sc->sc_flags & G_ELI_FLAG_DESTROY) {
453 g_eli_cancel(sc);
454 LIST_REMOVE(wr, w_next);
455 g_eli_freesession(wr);
456 free(wr, M_ELI);
457 G_ELI_DEBUG(1, "Thread %s exiting.",
458 curthread->td_proc->p_comm);
459 wakeup(&sc->sc_workers);
460 mtx_unlock(&sc->sc_queue_mtx);
461 kproc_exit(0);
462 }
463 while (sc->sc_flags & G_ELI_FLAG_SUSPEND) {
464 if (sc->sc_inflight > 0) {
465 G_ELI_DEBUG(0, "inflight=%d", sc->sc_inflight);
466 /*
467 * We still have inflight BIOs, so
468 * sleep and retry.
469 */
470 msleep(sc, &sc->sc_queue_mtx, PRIBIO,
471 "geli:inf", hz / 5);
472 goto again;
473 }
474 /*
475 * Suspend requested, mark the worker as
476 * suspended and go to sleep.
477 */
478 if (wr->w_active) {
479 g_eli_freesession(wr);
480 wr->w_active = FALSE;
481 }
482 wakeup(&sc->sc_workers);
483 msleep(sc, &sc->sc_queue_mtx, PRIBIO,
484 "geli:suspend", 0);
485 if (!wr->w_active &&
486 !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) {
487 error = g_eli_newsession(wr);
488 KASSERT(error == 0,
489 ("g_eli_newsession() failed on resume (error=%d)",
490 error));
491 wr->w_active = TRUE;
492 }
493 goto again;
494 }
495 msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0);
496 continue;
497 }
498 if (bp->bio_pflags == G_ELI_NEW_BIO)
499 atomic_add_int(&sc->sc_inflight, 1);
500 mtx_unlock(&sc->sc_queue_mtx);
501 if (bp->bio_pflags == G_ELI_NEW_BIO) {
502 bp->bio_pflags = 0;
503 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
504 if (bp->bio_cmd == BIO_READ)
505 g_eli_auth_read(sc, bp);
506 else
507 g_eli_auth_run(wr, bp);
508 } else {
509 if (bp->bio_cmd == BIO_READ)
510 g_eli_crypto_read(sc, bp, 1);
511 else
512 g_eli_crypto_run(wr, bp);
513 }
514 } else {
515 if (sc->sc_flags & G_ELI_FLAG_AUTH)
516 g_eli_auth_run(wr, bp);
517 else
518 g_eli_crypto_run(wr, bp);
519 }
520 }
521}
522
523/*
524 * Select encryption key. If G_ELI_FLAG_SINGLE_KEY is present we only have one
525 * key available for all the data. If the flag is not present select the key
526 * based on data offset.
527 */
528uint8_t *
529g_eli_crypto_key(struct g_eli_softc *sc, off_t offset, size_t blocksize)
530{
531 u_int nkey;
532
533 if (sc->sc_nekeys == 1)
534 return (sc->sc_ekeys[0]);
535
536 KASSERT(sc->sc_nekeys > 1, ("%s: sc_nekeys=%u", __func__,
537 sc->sc_nekeys));
538 KASSERT((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) == 0,
539 ("%s: SINGLE_KEY flag set, but sc_nekeys=%u", __func__,
540 sc->sc_nekeys));
541
542 /* We switch key every 2^G_ELI_KEY_SHIFT blocks. */
543 nkey = (offset >> G_ELI_KEY_SHIFT) / blocksize;
544
545 KASSERT(nkey < sc->sc_nekeys, ("%s: nkey=%u >= sc_nekeys=%u", __func__,
546 nkey, sc->sc_nekeys));
547
548 return (sc->sc_ekeys[nkey]);
549}
550
551/*
552 * Here we generate IV. It is unique for every sector.
553 */
554void
555g_eli_crypto_ivgen(struct g_eli_softc *sc, off_t offset, u_char *iv,
556 size_t size)
557{
558 uint8_t off[8];
559
560 if ((sc->sc_flags & G_ELI_FLAG_NATIVE_BYTE_ORDER) != 0)
561 bcopy(&offset, off, sizeof(off));
562 else
563 le64enc(off, (uint64_t)offset);
564
565 switch (sc->sc_ealgo) {
566 case CRYPTO_AES_XTS:
567 bcopy(off, iv, sizeof(off));
568 bzero(iv + sizeof(off), size - sizeof(off));
569 break;
570 default:
571 {
572 u_char hash[SHA256_DIGEST_LENGTH];
573 SHA256_CTX ctx;
574
575 /* Copy precalculated SHA256 context for IV-Key. */
576 bcopy(&sc->sc_ivctx, &ctx, sizeof(ctx));
577 SHA256_Update(&ctx, off, sizeof(off));
578 SHA256_Final(hash, &ctx);
579 bcopy(hash, iv, MIN(sizeof(hash), size));
580 break;
581 }
582 }
583}
584
585int
586g_eli_read_metadata(struct g_class *mp, struct g_provider *pp,
587 struct g_eli_metadata *md)
588{
589 struct g_geom *gp;
590 struct g_consumer *cp;
591 u_char *buf = NULL;
592 int error;
593
594 g_topology_assert();
595
596 gp = g_new_geomf(mp, "eli:taste");
597 gp->start = g_eli_start;
598 gp->access = g_std_access;
599 /*
600 * g_eli_read_metadata() is always called from the event thread.
601 * Our geom is created and destroyed in the same event, so there
602 * could be no orphan nor spoil event in the meantime.
603 */
604 gp->orphan = g_eli_orphan_spoil_assert;
605 gp->spoiled = g_eli_orphan_spoil_assert;
606 cp = g_new_consumer(gp);
607 error = g_attach(cp, pp);
608 if (error != 0)
609 goto end;
610 error = g_access(cp, 1, 0, 0);
611 if (error != 0)
612 goto end;
613 g_topology_unlock();
614 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
615 &error);
616 g_topology_lock();
617 if (buf == NULL)
618 goto end;
619 eli_metadata_decode(buf, md);
620end:
621 if (buf != NULL)
622 g_free(buf);
623 if (cp->provider != NULL) {
624 if (cp->acr == 1)
625 g_access(cp, -1, 0, 0);
626 g_detach(cp);
627 }
628 g_destroy_consumer(cp);
629 g_destroy_geom(gp);
630 return (error);
631}
632
633/*
634 * The function is called when we had last close on provider and user requested
635 * to close it when this situation occur.
636 */
637static void
638g_eli_last_close(struct g_eli_softc *sc)
639{
640 struct g_geom *gp;
641 struct g_provider *pp;
642 char ppname[64];
643 int error;
644
645 g_topology_assert();
646 gp = sc->sc_geom;
647 pp = LIST_FIRST(&gp->provider);
648 strlcpy(ppname, pp->name, sizeof(ppname));
649 error = g_eli_destroy(sc, TRUE);
650 KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).",
651 ppname, error));
652 G_ELI_DEBUG(0, "Detached %s on last close.", ppname);
653}
654
655int
656g_eli_access(struct g_provider *pp, int dr, int dw, int de)
657{
658 struct g_eli_softc *sc;
659 struct g_geom *gp;
660
661 gp = pp->geom;
662 sc = gp->softc;
663
664 if (dw > 0) {
665 if (sc->sc_flags & G_ELI_FLAG_RO) {
666 /* Deny write attempts. */
667 return (EROFS);
668 }
669 /* Someone is opening us for write, we need to remember that. */
670 sc->sc_flags |= G_ELI_FLAG_WOPEN;
671 return (0);
672 }
673 /* Is this the last close? */
674 if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0)
675 return (0);
676
677 /*
678 * Automatically detach on last close if requested.
679 */
680 if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) ||
681 (sc->sc_flags & G_ELI_FLAG_WOPEN)) {
682 g_eli_last_close(sc);
683 }
684 return (0);
685}
686
687static int
688g_eli_cpu_is_disabled(int cpu)
689{
690#ifdef SMP
691 return ((hlt_cpus_mask & (1 << cpu)) != 0);
692#else
693 return (0);
694#endif
695}
696
697struct g_geom *
698g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp,
699 const struct g_eli_metadata *md, const u_char *mkey, int nkey)
700{
701 struct g_eli_softc *sc;
702 struct g_eli_worker *wr;
703 struct g_geom *gp;
704 struct g_provider *pp;
705 struct g_consumer *cp;
706 u_int i, threads;
707 int error;
708
709 G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX);
710
711 gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX);
712 gp->softc = NULL; /* for a moment */
713
714 sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO);
715 gp->start = g_eli_start;
716 /*
717 * Spoiling cannot happen actually, because we keep provider open for
718 * writing all the time or provider is read-only.
719 */
720 gp->spoiled = g_eli_orphan_spoil_assert;
721 gp->orphan = g_eli_orphan;
722 gp->dumpconf = g_eli_dumpconf;
723 /*
724 * If detach-on-last-close feature is not enabled and we don't operate
725 * on read-only provider, we can simply use g_std_access().
726 */
727 if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO))
728 gp->access = g_eli_access;
729 else
730 gp->access = g_std_access;
731
732 sc->sc_inflight = 0;
733 sc->sc_crypto = G_ELI_CRYPTO_UNKNOWN;
734 sc->sc_flags = md->md_flags;
735 /* Backward compatibility. */
736 if (md->md_version < 4)
737 sc->sc_flags |= G_ELI_FLAG_NATIVE_BYTE_ORDER;
738 if (md->md_version < 5)
739 sc->sc_flags |= G_ELI_FLAG_SINGLE_KEY;
740 sc->sc_ealgo = md->md_ealgo;
741 sc->sc_nkey = nkey;
742
743 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
744 sc->sc_akeylen = sizeof(sc->sc_akey) * 8;
745 sc->sc_aalgo = md->md_aalgo;
746 sc->sc_alen = g_eli_hashlen(sc->sc_aalgo);
747
748 sc->sc_data_per_sector = bpp->sectorsize - sc->sc_alen;
749 /*
750 * Some hash functions (like SHA1 and RIPEMD160) generates hash
751 * which length is not multiple of 128 bits, but we want data
752 * length to be multiple of 128, so we can encrypt without
753 * padding. The line below rounds down data length to multiple
754 * of 128 bits.
755 */
756 sc->sc_data_per_sector -= sc->sc_data_per_sector % 16;
757
758 sc->sc_bytes_per_sector =
759 (md->md_sectorsize - 1) / sc->sc_data_per_sector + 1;
760 sc->sc_bytes_per_sector *= bpp->sectorsize;
761 }
762
763 gp->softc = sc;
764 sc->sc_geom = gp;
765
766 bioq_init(&sc->sc_queue);
767 mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF);
768
769 pp = NULL;
770 cp = g_new_consumer(gp);
771 error = g_attach(cp, bpp);
772 if (error != 0) {
773 if (req != NULL) {
774 gctl_error(req, "Cannot attach to %s (error=%d).",
775 bpp->name, error);
776 } else {
777 G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).",
778 bpp->name, error);
779 }
780 goto failed;
781 }
782 /*
783 * Keep provider open all the time, so we can run critical tasks,
784 * like Master Keys deletion, without wondering if we can open
785 * provider or not.
786 * We don't open provider for writing only when user requested read-only
787 * access.
788 */
789 if (sc->sc_flags & G_ELI_FLAG_RO)
790 error = g_access(cp, 1, 0, 1);
791 else
792 error = g_access(cp, 1, 1, 1);
793 if (error != 0) {
794 if (req != NULL) {
795 gctl_error(req, "Cannot access %s (error=%d).",
796 bpp->name, error);
797 } else {
798 G_ELI_DEBUG(1, "Cannot access %s (error=%d).",
799 bpp->name, error);
800 }
801 goto failed;
802 }
803
804 sc->sc_sectorsize = md->md_sectorsize;
805 sc->sc_mediasize = bpp->mediasize;
806 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME))
807 sc->sc_mediasize -= bpp->sectorsize;
808 if (!(sc->sc_flags & G_ELI_FLAG_AUTH))
809 sc->sc_mediasize -= (sc->sc_mediasize % sc->sc_sectorsize);
810 else {
811 sc->sc_mediasize /= sc->sc_bytes_per_sector;
812 sc->sc_mediasize *= sc->sc_sectorsize;
813 }
814
815 /*
816 * Remember the keys in our softc structure.
817 */
818 g_eli_mkey_propagate(sc, mkey);
819 sc->sc_ekeylen = md->md_keylen;
820 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
821 /*
822 * Precalculate SHA256 for HMAC key generation.
823 * This is expensive operation and we can do it only once now or
824 * for every access to sector, so now will be much better.
825 */
826 SHA256_Init(&sc->sc_akeyctx);
827 SHA256_Update(&sc->sc_akeyctx, sc->sc_akey,
828 sizeof(sc->sc_akey));
829 }
830 /*
831 * Precalculate SHA256 for IV generation.
832 * This is expensive operation and we can do it only once now or for
833 * every access to sector, so now will be much better.
834 */
835 switch (sc->sc_ealgo) {
836 case CRYPTO_AES_XTS:
837 break;
838 default:
839 SHA256_Init(&sc->sc_ivctx);
840 SHA256_Update(&sc->sc_ivctx, sc->sc_ivkey,
841 sizeof(sc->sc_ivkey));
842 break;
843 }
844
845 LIST_INIT(&sc->sc_workers);
846
847 threads = g_eli_threads;
848 if (threads == 0)
849 threads = mp_ncpus;
850 else if (threads > mp_ncpus) {
851 /* There is really no need for too many worker threads. */
852 threads = mp_ncpus;
853 G_ELI_DEBUG(0, "Reducing number of threads to %u.", threads);
854 }
855 for (i = 0; i < threads; i++) {
856 if (g_eli_cpu_is_disabled(i)) {
857 G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.",
858 bpp->name, i);
859 continue;
860 }
861 wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO);
862 wr->w_softc = sc;
863 wr->w_number = i;
864 wr->w_active = TRUE;
865
866 error = g_eli_newsession(wr);
867 if (error != 0) {
868 free(wr, M_ELI);
869 if (req != NULL) {
870 gctl_error(req, "Cannot set up crypto session "
871 "for %s (error=%d).", bpp->name, error);
872 } else {
873 G_ELI_DEBUG(1, "Cannot set up crypto session "
874 "for %s (error=%d).", bpp->name, error);
875 }
876 goto failed;
877 }
878
879 error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0,
880 "g_eli[%u] %s", i, bpp->name);
881 if (error != 0) {
882 g_eli_freesession(wr);
883 free(wr, M_ELI);
884 if (req != NULL) {
885 gctl_error(req, "Cannot create kernel thread "
886 "for %s (error=%d).", bpp->name, error);
887 } else {
888 G_ELI_DEBUG(1, "Cannot create kernel thread "
889 "for %s (error=%d).", bpp->name, error);
890 }
891 goto failed;
892 }
893 LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next);
894 /* If we have hardware support, one thread is enough. */
895 if (sc->sc_crypto == G_ELI_CRYPTO_HW)
896 break;
897 }
898
899 /*
900 * Create decrypted provider.
901 */
902 pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX);
903 pp->mediasize = sc->sc_mediasize;
904 pp->sectorsize = sc->sc_sectorsize;
905
906 g_error_provider(pp, 0);
907
908 G_ELI_DEBUG(0, "Device %s created.", pp->name);
909 G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo),
910 sc->sc_ekeylen);
911 if (sc->sc_flags & G_ELI_FLAG_AUTH)
912 G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo));
913 G_ELI_DEBUG(0, " Crypto: %s",
914 sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware");
915 return (gp);
916failed:
917 mtx_lock(&sc->sc_queue_mtx);
918 sc->sc_flags |= G_ELI_FLAG_DESTROY;
919 wakeup(sc);
920 /*
921 * Wait for kernel threads self destruction.
922 */
923 while (!LIST_EMPTY(&sc->sc_workers)) {
924 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
925 "geli:destroy", 0);
926 }
927 mtx_destroy(&sc->sc_queue_mtx);
928 if (cp->provider != NULL) {
929 if (cp->acr == 1)
930 g_access(cp, -1, -1, -1);
931 g_detach(cp);
932 }
933 g_destroy_consumer(cp);
934 g_destroy_geom(gp);
935 if (sc->sc_ekeys != NULL) {
936 bzero(sc->sc_ekeys,
937 sc->sc_nekeys * (sizeof(uint8_t *) + G_ELI_DATAKEYLEN));
938 free(sc->sc_ekeys, M_ELI);
939 }
940 bzero(sc, sizeof(*sc));
941 free(sc, M_ELI);
942 return (NULL);
943}
944
945int
946g_eli_destroy(struct g_eli_softc *sc, boolean_t force)
947{
948 struct g_geom *gp;
949 struct g_provider *pp;
950
951 g_topology_assert();
952
953 if (sc == NULL)
954 return (ENXIO);
955
956 gp = sc->sc_geom;
957 pp = LIST_FIRST(&gp->provider);
958 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
959 if (force) {
960 G_ELI_DEBUG(1, "Device %s is still open, so it "
961 "cannot be definitely removed.", pp->name);
962 } else {
963 G_ELI_DEBUG(1,
964 "Device %s is still open (r%dw%de%d).", pp->name,
965 pp->acr, pp->acw, pp->ace);
966 return (EBUSY);
967 }
968 }
969
970 mtx_lock(&sc->sc_queue_mtx);
971 sc->sc_flags |= G_ELI_FLAG_DESTROY;
972 wakeup(sc);
973 while (!LIST_EMPTY(&sc->sc_workers)) {
974 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
975 "geli:destroy", 0);
976 }
977 mtx_destroy(&sc->sc_queue_mtx);
978 gp->softc = NULL;
979 bzero(sc->sc_ekeys,
980 sc->sc_nekeys * (sizeof(uint8_t *) + G_ELI_DATAKEYLEN));
981 free(sc->sc_ekeys, M_ELI);
982 bzero(sc, sizeof(*sc));
983 free(sc, M_ELI);
984
985 if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0))
986 G_ELI_DEBUG(0, "Device %s destroyed.", gp->name);
987 g_wither_geom_close(gp, ENXIO);
988
989 return (0);
990}
991
992static int
993g_eli_destroy_geom(struct gctl_req *req __unused,
994 struct g_class *mp __unused, struct g_geom *gp)
995{
996 struct g_eli_softc *sc;
997
998 sc = gp->softc;
999 return (g_eli_destroy(sc, FALSE));
1000}
1001
1002static int
1003g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider)
1004{
1005 u_char *keyfile, *data, *size;
1006 char *file, name[64];
1007 int i;
1008
1009 for (i = 0; ; i++) {
1010 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
1011 keyfile = preload_search_by_type(name);
1012 if (keyfile == NULL)
1013 return (i); /* Return number of loaded keyfiles. */
1014 data = preload_search_info(keyfile, MODINFO_ADDR);
1015 if (data == NULL) {
1016 G_ELI_DEBUG(0, "Cannot find key file data for %s.",
1017 name);
1018 return (0);
1019 }
1020 data = *(void **)data;
1021 size = preload_search_info(keyfile, MODINFO_SIZE);
1022 if (size == NULL) {
1023 G_ELI_DEBUG(0, "Cannot find key file size for %s.",
1024 name);
1025 return (0);
1026 }
1027 file = preload_search_info(keyfile, MODINFO_NAME);
1028 if (file == NULL) {
1029 G_ELI_DEBUG(0, "Cannot find key file name for %s.",
1030 name);
1031 return (0);
1032 }
1033 G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file,
1034 provider, name);
1035 g_eli_crypto_hmac_update(ctx, data, *(size_t *)size);
1036 }
1037}
1038
1039static void
1040g_eli_keyfiles_clear(const char *provider)
1041{
1042 u_char *keyfile, *data, *size;
1043 char name[64];
1044 int i;
1045
1046 for (i = 0; ; i++) {
1047 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
1048 keyfile = preload_search_by_type(name);
1049 if (keyfile == NULL)
1050 return;
1051 data = preload_search_info(keyfile, MODINFO_ADDR);
1052 size = preload_search_info(keyfile, MODINFO_SIZE);
1053 if (data == NULL || size == NULL)
1054 continue;
1055 data = *(void **)data;
1056 bzero(data, *(size_t *)size);
1057 }
1058}
1059
1060/*
1061 * Tasting is only made on boot.
1062 * We detect providers which should be attached before root is mounted.
1063 */
1064static struct g_geom *
1065g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1066{
1067 struct g_eli_metadata md;
1068 struct g_geom *gp;
1069 struct hmac_ctx ctx;
1070 char passphrase[256];
1071 u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN];
1072 u_int i, nkey, nkeyfiles, tries;
1073 int error;
1074
1075 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
1076 g_topology_assert();
1077
1078 if (root_mounted() || g_eli_tries == 0)
1079 return (NULL);
1080
1081 G_ELI_DEBUG(3, "Tasting %s.", pp->name);
1082
1083 error = g_eli_read_metadata(mp, pp, &md);
1084 if (error != 0)
1085 return (NULL);
1086 gp = NULL;
1087
1088 if (strcmp(md.md_magic, G_ELI_MAGIC) != 0)
1089 return (NULL);
1090 if (md.md_version > G_ELI_VERSION) {
1091 printf("geom_eli.ko module is too old to handle %s.\n",
1092 pp->name);
1093 return (NULL);
1094 }
1095 if (md.md_provsize != pp->mediasize)
1096 return (NULL);
1097 /* Should we attach it on boot? */
1098 if (!(md.md_flags & G_ELI_FLAG_BOOT))
1099 return (NULL);
1100 if (md.md_keys == 0x00) {
1101 G_ELI_DEBUG(0, "No valid keys on %s.", pp->name);
1102 return (NULL);
1103 }
1104 if (md.md_iterations == -1) {
1105 /* If there is no passphrase, we try only once. */
1106 tries = 1;
1107 } else {
1108 /* Ask for the passphrase no more than g_eli_tries times. */
1109 tries = g_eli_tries;
1110 }
1111
1112 for (i = 0; i < tries; i++) {
1113 g_eli_crypto_hmac_init(&ctx, NULL, 0);
1114
1115 /*
1116 * Load all key files.
1117 */
1118 nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name);
1119
1120 if (nkeyfiles == 0 && md.md_iterations == -1) {
1121 /*
1122 * No key files and no passphrase, something is
1123 * definitely wrong here.
1124 * geli(8) doesn't allow for such situation, so assume
1125 * that there was really no passphrase and in that case
1126 * key files are no properly defined in loader.conf.
1127 */
1128 G_ELI_DEBUG(0,
1129 "Found no key files in loader.conf for %s.",
1130 pp->name);
1131 return (NULL);
1132 }
1133
1134 /* Ask for the passphrase if defined. */
1135 if (md.md_iterations >= 0) {
1136 printf("Enter passphrase for %s: ", pp->name);
1137 gets(passphrase, sizeof(passphrase),
1138 g_eli_visible_passphrase);
1139 }
1140
1141 /*
1142 * Prepare Derived-Key from the user passphrase.
1143 */
1144 if (md.md_iterations == 0) {
1145 g_eli_crypto_hmac_update(&ctx, md.md_salt,
1146 sizeof(md.md_salt));
1147 g_eli_crypto_hmac_update(&ctx, passphrase,
1148 strlen(passphrase));
1149 bzero(passphrase, sizeof(passphrase));
1150 } else if (md.md_iterations > 0) {
1151 u_char dkey[G_ELI_USERKEYLEN];
1152
1153 pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt,
1154 sizeof(md.md_salt), passphrase, md.md_iterations);
1155 bzero(passphrase, sizeof(passphrase));
1156 g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey));
1157 bzero(dkey, sizeof(dkey));
1158 }
1159
1160 g_eli_crypto_hmac_final(&ctx, key, 0);
1161
1162 /*
1163 * Decrypt Master-Key.
1164 */
1165 error = g_eli_mkey_decrypt(&md, key, mkey, &nkey);
1166 bzero(key, sizeof(key));
1167 if (error == -1) {
1168 if (i == tries - 1) {
1169 G_ELI_DEBUG(0,
1170 "Wrong key for %s. No tries left.",
1171 pp->name);
1172 g_eli_keyfiles_clear(pp->name);
1173 return (NULL);
1174 }
1175 G_ELI_DEBUG(0, "Wrong key for %s. Tries left: %u.",
1176 pp->name, tries - i - 1);
1177 /* Try again. */
1178 continue;
1179 } else if (error > 0) {
1180 G_ELI_DEBUG(0, "Cannot decrypt Master Key for %s (error=%d).",
1181 pp->name, error);
1182 g_eli_keyfiles_clear(pp->name);
1183 return (NULL);
1184 }
1185 G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name);
1186 break;
1187 }
1188
1189 /*
1190 * We have correct key, let's attach provider.
1191 */
1192 gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey);
1193 bzero(mkey, sizeof(mkey));
1194 bzero(&md, sizeof(md));
1195 if (gp == NULL) {
1196 G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name,
1197 G_ELI_SUFFIX);
1198 return (NULL);
1199 }
1200 return (gp);
1201}
1202
1203static void
1204g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1205 struct g_consumer *cp, struct g_provider *pp)
1206{
1207 struct g_eli_softc *sc;
1208
1209 g_topology_assert();
1210 sc = gp->softc;
1211 if (sc == NULL)
1212 return;
1213 if (pp != NULL || cp != NULL)
1214 return; /* Nothing here. */
1215 sbuf_printf(sb, "%s<Flags>", indent);
1216 if (sc->sc_flags == 0)
1217 sbuf_printf(sb, "NONE");
1218 else {
1219 int first = 1;
1220
1221#define ADD_FLAG(flag, name) do { \
1222 if (sc->sc_flags & (flag)) { \
1223 if (!first) \
1224 sbuf_printf(sb, ", "); \
1225 else \
1226 first = 0; \
1227 sbuf_printf(sb, name); \
1228 } \
1229} while (0)
1230 ADD_FLAG(G_ELI_FLAG_SUSPEND, "SUSPEND");
1231 ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY");
1232 ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER");
1233 ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME");
1234 ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT");
1235 ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH");
1236 ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH");
1237 ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH");
1238 ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN");
1239 ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY");
1240 ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY");
1241#undef ADD_FLAG
1242 }
1243 sbuf_printf(sb, "</Flags>\n");
1244
1245 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) {
1246 sbuf_printf(sb, "%s<UsedKey>%u</UsedKey>\n", indent,
1247 sc->sc_nkey);
1248 }
1249 sbuf_printf(sb, "%s<Crypto>", indent);
1250 switch (sc->sc_crypto) {
1251 case G_ELI_CRYPTO_HW:
1252 sbuf_printf(sb, "hardware");
1253 break;
1254 case G_ELI_CRYPTO_SW:
1255 sbuf_printf(sb, "software");
1256 break;
1257 default:
1258 sbuf_printf(sb, "UNKNOWN");
1259 break;
1260 }
1261 sbuf_printf(sb, "</Crypto>\n");
1262 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
1263 sbuf_printf(sb,
1264 "%s<AuthenticationAlgorithm>%s</AuthenticationAlgorithm>\n",
1265 indent, g_eli_algo2str(sc->sc_aalgo));
1266 }
1267 sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent,
1268 sc->sc_ekeylen);
1269 sbuf_printf(sb, "%s<EncryptionAlgorithm>%s</EncryptionAlgorithm>\n", indent,
1270 g_eli_algo2str(sc->sc_ealgo));
1271}
1272
1273static void
1274g_eli_shutdown_pre_sync(void *arg, int howto)
1275{
1276 struct g_class *mp;
1277 struct g_geom *gp, *gp2;
1278 struct g_provider *pp;
1279 struct g_eli_softc *sc;
1280 int error;
1281
1282 mp = arg;
1283 DROP_GIANT();
1284 g_topology_lock();
1285 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
1286 sc = gp->softc;
1287 if (sc == NULL)
1288 continue;
1289 pp = LIST_FIRST(&gp->provider);
1290 KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name));
1291 if (pp->acr + pp->acw + pp->ace == 0)
1292 error = g_eli_destroy(sc, TRUE);
1293 else {
1294 sc->sc_flags |= G_ELI_FLAG_RW_DETACH;
1295 gp->access = g_eli_access;
1296 }
1297 }
1298 g_topology_unlock();
1299 PICKUP_GIANT();
1300}
1301
1302static void
1303g_eli_init(struct g_class *mp)
1304{
1305
1306 g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
1307 g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
1308 if (g_eli_pre_sync == NULL)
1309 G_ELI_DEBUG(0, "Warning! Cannot register shutdown event.");
1310}
1311
1312static void
1313g_eli_fini(struct g_class *mp)
1314{
1315
1316 if (g_eli_pre_sync != NULL)
1317 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync);
1318}
1319
1320DECLARE_GEOM_CLASS(g_eli_class, g_eli);
1321MODULE_DEPEND(g_eli, crypto, 1, 1, 1);
820
821 LIST_INIT(&sc->sc_workers);
822
823 threads = g_eli_threads;
824 if (threads == 0)
825 threads = mp_ncpus;
826 else if (threads > mp_ncpus) {
827 /* There is really no need for too many worker threads. */
828 threads = mp_ncpus;
829 G_ELI_DEBUG(0, "Reducing number of threads to %u.", threads);
830 }
831 for (i = 0; i < threads; i++) {
832 if (g_eli_cpu_is_disabled(i)) {
833 G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.",
834 bpp->name, i);
835 continue;
836 }
837 wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO);
838 wr->w_softc = sc;
839 wr->w_number = i;
840 wr->w_active = TRUE;
841
842 error = g_eli_newsession(wr);
843 if (error != 0) {
844 free(wr, M_ELI);
845 if (req != NULL) {
846 gctl_error(req, "Cannot set up crypto session "
847 "for %s (error=%d).", bpp->name, error);
848 } else {
849 G_ELI_DEBUG(1, "Cannot set up crypto session "
850 "for %s (error=%d).", bpp->name, error);
851 }
852 goto failed;
853 }
854
855 error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0,
856 "g_eli[%u] %s", i, bpp->name);
857 if (error != 0) {
858 g_eli_freesession(wr);
859 free(wr, M_ELI);
860 if (req != NULL) {
861 gctl_error(req, "Cannot create kernel thread "
862 "for %s (error=%d).", bpp->name, error);
863 } else {
864 G_ELI_DEBUG(1, "Cannot create kernel thread "
865 "for %s (error=%d).", bpp->name, error);
866 }
867 goto failed;
868 }
869 LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next);
870 /* If we have hardware support, one thread is enough. */
871 if (sc->sc_crypto == G_ELI_CRYPTO_HW)
872 break;
873 }
874
875 /*
876 * Create decrypted provider.
877 */
878 pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX);
879 pp->mediasize = sc->sc_mediasize;
880 pp->sectorsize = sc->sc_sectorsize;
881
882 g_error_provider(pp, 0);
883
884 G_ELI_DEBUG(0, "Device %s created.", pp->name);
885 G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo),
886 sc->sc_ekeylen);
887 if (sc->sc_flags & G_ELI_FLAG_AUTH)
888 G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo));
889 G_ELI_DEBUG(0, " Crypto: %s",
890 sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware");
891 return (gp);
892failed:
893 mtx_lock(&sc->sc_queue_mtx);
894 sc->sc_flags |= G_ELI_FLAG_DESTROY;
895 wakeup(sc);
896 /*
897 * Wait for kernel threads self destruction.
898 */
899 while (!LIST_EMPTY(&sc->sc_workers)) {
900 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
901 "geli:destroy", 0);
902 }
903 mtx_destroy(&sc->sc_queue_mtx);
904 if (cp->provider != NULL) {
905 if (cp->acr == 1)
906 g_access(cp, -1, -1, -1);
907 g_detach(cp);
908 }
909 g_destroy_consumer(cp);
910 g_destroy_geom(gp);
911 if (sc->sc_ekeys != NULL) {
912 bzero(sc->sc_ekeys,
913 sc->sc_nekeys * (sizeof(uint8_t *) + G_ELI_DATAKEYLEN));
914 free(sc->sc_ekeys, M_ELI);
915 }
916 bzero(sc, sizeof(*sc));
917 free(sc, M_ELI);
918 return (NULL);
919}
920
921int
922g_eli_destroy(struct g_eli_softc *sc, boolean_t force)
923{
924 struct g_geom *gp;
925 struct g_provider *pp;
926
927 g_topology_assert();
928
929 if (sc == NULL)
930 return (ENXIO);
931
932 gp = sc->sc_geom;
933 pp = LIST_FIRST(&gp->provider);
934 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
935 if (force) {
936 G_ELI_DEBUG(1, "Device %s is still open, so it "
937 "cannot be definitely removed.", pp->name);
938 } else {
939 G_ELI_DEBUG(1,
940 "Device %s is still open (r%dw%de%d).", pp->name,
941 pp->acr, pp->acw, pp->ace);
942 return (EBUSY);
943 }
944 }
945
946 mtx_lock(&sc->sc_queue_mtx);
947 sc->sc_flags |= G_ELI_FLAG_DESTROY;
948 wakeup(sc);
949 while (!LIST_EMPTY(&sc->sc_workers)) {
950 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
951 "geli:destroy", 0);
952 }
953 mtx_destroy(&sc->sc_queue_mtx);
954 gp->softc = NULL;
955 bzero(sc->sc_ekeys,
956 sc->sc_nekeys * (sizeof(uint8_t *) + G_ELI_DATAKEYLEN));
957 free(sc->sc_ekeys, M_ELI);
958 bzero(sc, sizeof(*sc));
959 free(sc, M_ELI);
960
961 if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0))
962 G_ELI_DEBUG(0, "Device %s destroyed.", gp->name);
963 g_wither_geom_close(gp, ENXIO);
964
965 return (0);
966}
967
968static int
969g_eli_destroy_geom(struct gctl_req *req __unused,
970 struct g_class *mp __unused, struct g_geom *gp)
971{
972 struct g_eli_softc *sc;
973
974 sc = gp->softc;
975 return (g_eli_destroy(sc, FALSE));
976}
977
978static int
979g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider)
980{
981 u_char *keyfile, *data, *size;
982 char *file, name[64];
983 int i;
984
985 for (i = 0; ; i++) {
986 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
987 keyfile = preload_search_by_type(name);
988 if (keyfile == NULL)
989 return (i); /* Return number of loaded keyfiles. */
990 data = preload_search_info(keyfile, MODINFO_ADDR);
991 if (data == NULL) {
992 G_ELI_DEBUG(0, "Cannot find key file data for %s.",
993 name);
994 return (0);
995 }
996 data = *(void **)data;
997 size = preload_search_info(keyfile, MODINFO_SIZE);
998 if (size == NULL) {
999 G_ELI_DEBUG(0, "Cannot find key file size for %s.",
1000 name);
1001 return (0);
1002 }
1003 file = preload_search_info(keyfile, MODINFO_NAME);
1004 if (file == NULL) {
1005 G_ELI_DEBUG(0, "Cannot find key file name for %s.",
1006 name);
1007 return (0);
1008 }
1009 G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file,
1010 provider, name);
1011 g_eli_crypto_hmac_update(ctx, data, *(size_t *)size);
1012 }
1013}
1014
1015static void
1016g_eli_keyfiles_clear(const char *provider)
1017{
1018 u_char *keyfile, *data, *size;
1019 char name[64];
1020 int i;
1021
1022 for (i = 0; ; i++) {
1023 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
1024 keyfile = preload_search_by_type(name);
1025 if (keyfile == NULL)
1026 return;
1027 data = preload_search_info(keyfile, MODINFO_ADDR);
1028 size = preload_search_info(keyfile, MODINFO_SIZE);
1029 if (data == NULL || size == NULL)
1030 continue;
1031 data = *(void **)data;
1032 bzero(data, *(size_t *)size);
1033 }
1034}
1035
1036/*
1037 * Tasting is only made on boot.
1038 * We detect providers which should be attached before root is mounted.
1039 */
1040static struct g_geom *
1041g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1042{
1043 struct g_eli_metadata md;
1044 struct g_geom *gp;
1045 struct hmac_ctx ctx;
1046 char passphrase[256];
1047 u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN];
1048 u_int i, nkey, nkeyfiles, tries;
1049 int error;
1050
1051 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
1052 g_topology_assert();
1053
1054 if (root_mounted() || g_eli_tries == 0)
1055 return (NULL);
1056
1057 G_ELI_DEBUG(3, "Tasting %s.", pp->name);
1058
1059 error = g_eli_read_metadata(mp, pp, &md);
1060 if (error != 0)
1061 return (NULL);
1062 gp = NULL;
1063
1064 if (strcmp(md.md_magic, G_ELI_MAGIC) != 0)
1065 return (NULL);
1066 if (md.md_version > G_ELI_VERSION) {
1067 printf("geom_eli.ko module is too old to handle %s.\n",
1068 pp->name);
1069 return (NULL);
1070 }
1071 if (md.md_provsize != pp->mediasize)
1072 return (NULL);
1073 /* Should we attach it on boot? */
1074 if (!(md.md_flags & G_ELI_FLAG_BOOT))
1075 return (NULL);
1076 if (md.md_keys == 0x00) {
1077 G_ELI_DEBUG(0, "No valid keys on %s.", pp->name);
1078 return (NULL);
1079 }
1080 if (md.md_iterations == -1) {
1081 /* If there is no passphrase, we try only once. */
1082 tries = 1;
1083 } else {
1084 /* Ask for the passphrase no more than g_eli_tries times. */
1085 tries = g_eli_tries;
1086 }
1087
1088 for (i = 0; i < tries; i++) {
1089 g_eli_crypto_hmac_init(&ctx, NULL, 0);
1090
1091 /*
1092 * Load all key files.
1093 */
1094 nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name);
1095
1096 if (nkeyfiles == 0 && md.md_iterations == -1) {
1097 /*
1098 * No key files and no passphrase, something is
1099 * definitely wrong here.
1100 * geli(8) doesn't allow for such situation, so assume
1101 * that there was really no passphrase and in that case
1102 * key files are no properly defined in loader.conf.
1103 */
1104 G_ELI_DEBUG(0,
1105 "Found no key files in loader.conf for %s.",
1106 pp->name);
1107 return (NULL);
1108 }
1109
1110 /* Ask for the passphrase if defined. */
1111 if (md.md_iterations >= 0) {
1112 printf("Enter passphrase for %s: ", pp->name);
1113 gets(passphrase, sizeof(passphrase),
1114 g_eli_visible_passphrase);
1115 }
1116
1117 /*
1118 * Prepare Derived-Key from the user passphrase.
1119 */
1120 if (md.md_iterations == 0) {
1121 g_eli_crypto_hmac_update(&ctx, md.md_salt,
1122 sizeof(md.md_salt));
1123 g_eli_crypto_hmac_update(&ctx, passphrase,
1124 strlen(passphrase));
1125 bzero(passphrase, sizeof(passphrase));
1126 } else if (md.md_iterations > 0) {
1127 u_char dkey[G_ELI_USERKEYLEN];
1128
1129 pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt,
1130 sizeof(md.md_salt), passphrase, md.md_iterations);
1131 bzero(passphrase, sizeof(passphrase));
1132 g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey));
1133 bzero(dkey, sizeof(dkey));
1134 }
1135
1136 g_eli_crypto_hmac_final(&ctx, key, 0);
1137
1138 /*
1139 * Decrypt Master-Key.
1140 */
1141 error = g_eli_mkey_decrypt(&md, key, mkey, &nkey);
1142 bzero(key, sizeof(key));
1143 if (error == -1) {
1144 if (i == tries - 1) {
1145 G_ELI_DEBUG(0,
1146 "Wrong key for %s. No tries left.",
1147 pp->name);
1148 g_eli_keyfiles_clear(pp->name);
1149 return (NULL);
1150 }
1151 G_ELI_DEBUG(0, "Wrong key for %s. Tries left: %u.",
1152 pp->name, tries - i - 1);
1153 /* Try again. */
1154 continue;
1155 } else if (error > 0) {
1156 G_ELI_DEBUG(0, "Cannot decrypt Master Key for %s (error=%d).",
1157 pp->name, error);
1158 g_eli_keyfiles_clear(pp->name);
1159 return (NULL);
1160 }
1161 G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name);
1162 break;
1163 }
1164
1165 /*
1166 * We have correct key, let's attach provider.
1167 */
1168 gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey);
1169 bzero(mkey, sizeof(mkey));
1170 bzero(&md, sizeof(md));
1171 if (gp == NULL) {
1172 G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name,
1173 G_ELI_SUFFIX);
1174 return (NULL);
1175 }
1176 return (gp);
1177}
1178
1179static void
1180g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1181 struct g_consumer *cp, struct g_provider *pp)
1182{
1183 struct g_eli_softc *sc;
1184
1185 g_topology_assert();
1186 sc = gp->softc;
1187 if (sc == NULL)
1188 return;
1189 if (pp != NULL || cp != NULL)
1190 return; /* Nothing here. */
1191 sbuf_printf(sb, "%s<Flags>", indent);
1192 if (sc->sc_flags == 0)
1193 sbuf_printf(sb, "NONE");
1194 else {
1195 int first = 1;
1196
1197#define ADD_FLAG(flag, name) do { \
1198 if (sc->sc_flags & (flag)) { \
1199 if (!first) \
1200 sbuf_printf(sb, ", "); \
1201 else \
1202 first = 0; \
1203 sbuf_printf(sb, name); \
1204 } \
1205} while (0)
1206 ADD_FLAG(G_ELI_FLAG_SUSPEND, "SUSPEND");
1207 ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY");
1208 ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER");
1209 ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME");
1210 ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT");
1211 ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH");
1212 ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH");
1213 ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH");
1214 ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN");
1215 ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY");
1216 ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY");
1217#undef ADD_FLAG
1218 }
1219 sbuf_printf(sb, "</Flags>\n");
1220
1221 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) {
1222 sbuf_printf(sb, "%s<UsedKey>%u</UsedKey>\n", indent,
1223 sc->sc_nkey);
1224 }
1225 sbuf_printf(sb, "%s<Crypto>", indent);
1226 switch (sc->sc_crypto) {
1227 case G_ELI_CRYPTO_HW:
1228 sbuf_printf(sb, "hardware");
1229 break;
1230 case G_ELI_CRYPTO_SW:
1231 sbuf_printf(sb, "software");
1232 break;
1233 default:
1234 sbuf_printf(sb, "UNKNOWN");
1235 break;
1236 }
1237 sbuf_printf(sb, "</Crypto>\n");
1238 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
1239 sbuf_printf(sb,
1240 "%s<AuthenticationAlgorithm>%s</AuthenticationAlgorithm>\n",
1241 indent, g_eli_algo2str(sc->sc_aalgo));
1242 }
1243 sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent,
1244 sc->sc_ekeylen);
1245 sbuf_printf(sb, "%s<EncryptionAlgorithm>%s</EncryptionAlgorithm>\n", indent,
1246 g_eli_algo2str(sc->sc_ealgo));
1247}
1248
1249static void
1250g_eli_shutdown_pre_sync(void *arg, int howto)
1251{
1252 struct g_class *mp;
1253 struct g_geom *gp, *gp2;
1254 struct g_provider *pp;
1255 struct g_eli_softc *sc;
1256 int error;
1257
1258 mp = arg;
1259 DROP_GIANT();
1260 g_topology_lock();
1261 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
1262 sc = gp->softc;
1263 if (sc == NULL)
1264 continue;
1265 pp = LIST_FIRST(&gp->provider);
1266 KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name));
1267 if (pp->acr + pp->acw + pp->ace == 0)
1268 error = g_eli_destroy(sc, TRUE);
1269 else {
1270 sc->sc_flags |= G_ELI_FLAG_RW_DETACH;
1271 gp->access = g_eli_access;
1272 }
1273 }
1274 g_topology_unlock();
1275 PICKUP_GIANT();
1276}
1277
1278static void
1279g_eli_init(struct g_class *mp)
1280{
1281
1282 g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
1283 g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
1284 if (g_eli_pre_sync == NULL)
1285 G_ELI_DEBUG(0, "Warning! Cannot register shutdown event.");
1286}
1287
1288static void
1289g_eli_fini(struct g_class *mp)
1290{
1291
1292 if (g_eli_pre_sync != NULL)
1293 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync);
1294}
1295
1296DECLARE_GEOM_CLASS(g_eli_class, g_eli);
1297MODULE_DEPEND(g_eli, crypto, 1, 1, 1);