Deleted Added
full compact
g_eli.c (290406) g_eli.c (293306)
1/*-
2 * Copyright (c) 2005-2011 Pawel Jakub Dawidek <pawel@dawidek.net>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2005-2011 Pawel Jakub Dawidek <pawel@dawidek.net>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/geom/eli/g_eli.c 290406 2015-11-05 17:37:35Z smh $");
28__FBSDID("$FreeBSD: head/sys/geom/eli/g_eli.c 293306 2016-01-07 05:47:34Z allanjude $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/cons.h>
33#include <sys/kernel.h>
34#include <sys/linker.h>
35#include <sys/module.h>
36#include <sys/lock.h>
37#include <sys/mutex.h>
38#include <sys/bio.h>
39#include <sys/sbuf.h>
40#include <sys/sysctl.h>
41#include <sys/malloc.h>
42#include <sys/eventhandler.h>
43#include <sys/kthread.h>
44#include <sys/proc.h>
45#include <sys/sched.h>
46#include <sys/smp.h>
47#include <sys/uio.h>
48#include <sys/vnode.h>
49
50#include <vm/uma.h>
51
52#include <geom/geom.h>
53#include <geom/eli/g_eli.h>
54#include <geom/eli/pkcs5v2.h>
55
56FEATURE(geom_eli, "GEOM crypto module");
57
58MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data");
59
60SYSCTL_DECL(_kern_geom);
61SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff");
62static int g_eli_version = G_ELI_VERSION;
63SYSCTL_INT(_kern_geom_eli, OID_AUTO, version, CTLFLAG_RD, &g_eli_version, 0,
64 "GELI version");
65int g_eli_debug = 0;
66SYSCTL_INT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RWTUN, &g_eli_debug, 0,
67 "Debug level");
68static u_int g_eli_tries = 3;
69SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RWTUN, &g_eli_tries, 0,
70 "Number of tries for entering the passphrase");
71static u_int g_eli_visible_passphrase = GETS_NOECHO;
72SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RWTUN,
73 &g_eli_visible_passphrase, 0,
74 "Visibility of passphrase prompt (0 = invisible, 1 = visible, 2 = asterisk)");
75u_int g_eli_overwrites = G_ELI_OVERWRITES;
76SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RWTUN, &g_eli_overwrites,
77 0, "Number of times on-disk keys should be overwritten when destroying them");
78static u_int g_eli_threads = 0;
79SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RWTUN, &g_eli_threads, 0,
80 "Number of threads doing crypto work");
81u_int g_eli_batch = 0;
82SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RWTUN, &g_eli_batch, 0,
83 "Use crypto operations batching");
84
85/*
86 * Passphrase cached during boot, in order to be more user-friendly if
87 * there are multiple providers using the same passphrase.
88 */
89static char cached_passphrase[256];
90static u_int g_eli_boot_passcache = 1;
91TUNABLE_INT("kern.geom.eli.boot_passcache", &g_eli_boot_passcache);
92SYSCTL_UINT(_kern_geom_eli, OID_AUTO, boot_passcache, CTLFLAG_RD,
93 &g_eli_boot_passcache, 0,
94 "Passphrases are cached during boot process for possible reuse");
95static void
96fetch_loader_passphrase(void * dummy)
97{
98 char * env_passphrase;
99
100 KASSERT(dynamic_kenv, ("need dynamic kenv"));
101
102 if ((env_passphrase = kern_getenv("kern.geom.eli.passphrase")) != NULL) {
103 /* Extract passphrase from the environment. */
104 strlcpy(cached_passphrase, env_passphrase,
105 sizeof(cached_passphrase));
106 freeenv(env_passphrase);
107
108 /* Wipe the passphrase from the environment. */
109 kern_unsetenv("kern.geom.eli.passphrase");
110 }
111}
112SYSINIT(geli_fetch_loader_passphrase, SI_SUB_KMEM + 1, SI_ORDER_ANY,
113 fetch_loader_passphrase, NULL);
114static void
115zero_boot_passcache(void * dummy)
116{
117
118 memset(cached_passphrase, 0, sizeof(cached_passphrase));
119}
120EVENTHANDLER_DEFINE(mountroot, zero_boot_passcache, NULL, 0);
121
122static eventhandler_tag g_eli_pre_sync = NULL;
123
124static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp,
125 struct g_geom *gp);
126static void g_eli_init(struct g_class *mp);
127static void g_eli_fini(struct g_class *mp);
128
129static g_taste_t g_eli_taste;
130static g_dumpconf_t g_eli_dumpconf;
131
132struct g_class g_eli_class = {
133 .name = G_ELI_CLASS_NAME,
134 .version = G_VERSION,
135 .ctlreq = g_eli_config,
136 .taste = g_eli_taste,
137 .destroy_geom = g_eli_destroy_geom,
138 .init = g_eli_init,
139 .fini = g_eli_fini
140};
141
142
143/*
144 * Code paths:
145 * BIO_READ:
146 * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
147 * BIO_WRITE:
148 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
149 */
150
151
152/*
153 * EAGAIN from crypto(9) means, that we were probably balanced to another crypto
154 * accelerator or something like this.
155 * The function updates the SID and rerun the operation.
156 */
157int
158g_eli_crypto_rerun(struct cryptop *crp)
159{
160 struct g_eli_softc *sc;
161 struct g_eli_worker *wr;
162 struct bio *bp;
163 int error;
164
165 bp = (struct bio *)crp->crp_opaque;
166 sc = bp->bio_to->geom->softc;
167 LIST_FOREACH(wr, &sc->sc_workers, w_next) {
168 if (wr->w_number == bp->bio_pflags)
169 break;
170 }
171 KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags));
172 G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %ju -> %ju).",
173 bp->bio_cmd == BIO_READ ? "READ" : "WRITE", (uintmax_t)wr->w_sid,
174 (uintmax_t)crp->crp_sid);
175 wr->w_sid = crp->crp_sid;
176 crp->crp_etype = 0;
177 error = crypto_dispatch(crp);
178 if (error == 0)
179 return (0);
180 G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error);
181 crp->crp_etype = error;
182 return (error);
183}
184
185/*
186 * The function is called afer reading encrypted data from the provider.
187 *
188 * g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
189 */
190void
191g_eli_read_done(struct bio *bp)
192{
193 struct g_eli_softc *sc;
194 struct bio *pbp;
195
196 G_ELI_LOGREQ(2, bp, "Request done.");
197 pbp = bp->bio_parent;
198 if (pbp->bio_error == 0 && bp->bio_error != 0)
199 pbp->bio_error = bp->bio_error;
200 g_destroy_bio(bp);
201 /*
202 * Do we have all sectors already?
203 */
204 pbp->bio_inbed++;
205 if (pbp->bio_inbed < pbp->bio_children)
206 return;
207 sc = pbp->bio_to->geom->softc;
208 if (pbp->bio_error != 0) {
209 G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__,
210 pbp->bio_error);
211 pbp->bio_completed = 0;
212 if (pbp->bio_driver2 != NULL) {
213 free(pbp->bio_driver2, M_ELI);
214 pbp->bio_driver2 = NULL;
215 }
216 g_io_deliver(pbp, pbp->bio_error);
217 atomic_subtract_int(&sc->sc_inflight, 1);
218 return;
219 }
220 mtx_lock(&sc->sc_queue_mtx);
221 bioq_insert_tail(&sc->sc_queue, pbp);
222 mtx_unlock(&sc->sc_queue_mtx);
223 wakeup(sc);
224}
225
226/*
227 * The function is called after we encrypt and write data.
228 *
229 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver
230 */
231void
232g_eli_write_done(struct bio *bp)
233{
234 struct g_eli_softc *sc;
235 struct bio *pbp;
236
237 G_ELI_LOGREQ(2, bp, "Request done.");
238 pbp = bp->bio_parent;
239 if (pbp->bio_error == 0 && bp->bio_error != 0)
240 pbp->bio_error = bp->bio_error;
241 g_destroy_bio(bp);
242 /*
243 * Do we have all sectors already?
244 */
245 pbp->bio_inbed++;
246 if (pbp->bio_inbed < pbp->bio_children)
247 return;
248 free(pbp->bio_driver2, M_ELI);
249 pbp->bio_driver2 = NULL;
250 if (pbp->bio_error != 0) {
251 G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__,
252 pbp->bio_error);
253 pbp->bio_completed = 0;
254 } else
255 pbp->bio_completed = pbp->bio_length;
256
257 /*
258 * Write is finished, send it up.
259 */
260 sc = pbp->bio_to->geom->softc;
261 g_io_deliver(pbp, pbp->bio_error);
262 atomic_subtract_int(&sc->sc_inflight, 1);
263}
264
265/*
266 * This function should never be called, but GEOM made as it set ->orphan()
267 * method for every geom.
268 */
269static void
270g_eli_orphan_spoil_assert(struct g_consumer *cp)
271{
272
273 panic("Function %s() called for %s.", __func__, cp->geom->name);
274}
275
276static void
277g_eli_orphan(struct g_consumer *cp)
278{
279 struct g_eli_softc *sc;
280
281 g_topology_assert();
282 sc = cp->geom->softc;
283 if (sc == NULL)
284 return;
285 g_eli_destroy(sc, TRUE);
286}
287
288/*
289 * BIO_READ:
290 * G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
291 * BIO_WRITE:
292 * G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
293 */
294static void
295g_eli_start(struct bio *bp)
296{
297 struct g_eli_softc *sc;
298 struct g_consumer *cp;
299 struct bio *cbp;
300
301 sc = bp->bio_to->geom->softc;
302 KASSERT(sc != NULL,
303 ("Provider's error should be set (error=%d)(device=%s).",
304 bp->bio_to->error, bp->bio_to->name));
305 G_ELI_LOGREQ(2, bp, "Request received.");
306
307 switch (bp->bio_cmd) {
308 case BIO_READ:
309 case BIO_WRITE:
310 case BIO_GETATTR:
311 case BIO_FLUSH:
312 break;
313 case BIO_DELETE:
314 /*
315 * If the user hasn't set the NODELETE flag, we just pass
316 * it down the stack and let the layers beneath us do (or
317 * not) whatever they do with it. If they have, we
318 * reject it. A possible extension would be an
319 * additional flag to take it as a hint to shred the data
320 * with [multiple?] overwrites.
321 */
322 if (!(sc->sc_flags & G_ELI_FLAG_NODELETE))
323 break;
324 default:
325 g_io_deliver(bp, EOPNOTSUPP);
326 return;
327 }
328 cbp = g_clone_bio(bp);
329 if (cbp == NULL) {
330 g_io_deliver(bp, ENOMEM);
331 return;
332 }
333 bp->bio_driver1 = cbp;
334 bp->bio_pflags = G_ELI_NEW_BIO;
335 switch (bp->bio_cmd) {
336 case BIO_READ:
337 if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) {
338 g_eli_crypto_read(sc, bp, 0);
339 break;
340 }
341 /* FALLTHROUGH */
342 case BIO_WRITE:
343 mtx_lock(&sc->sc_queue_mtx);
344 bioq_insert_tail(&sc->sc_queue, bp);
345 mtx_unlock(&sc->sc_queue_mtx);
346 wakeup(sc);
347 break;
348 case BIO_GETATTR:
349 case BIO_FLUSH:
350 case BIO_DELETE:
351 cbp->bio_done = g_std_done;
352 cp = LIST_FIRST(&sc->sc_geom->consumer);
353 cbp->bio_to = cp->provider;
354 G_ELI_LOGREQ(2, cbp, "Sending request.");
355 g_io_request(cbp, cp);
356 break;
357 }
358}
359
360static int
361g_eli_newsession(struct g_eli_worker *wr)
362{
363 struct g_eli_softc *sc;
364 struct cryptoini crie, cria;
365 int error;
366
367 sc = wr->w_softc;
368
369 bzero(&crie, sizeof(crie));
370 crie.cri_alg = sc->sc_ealgo;
371 crie.cri_klen = sc->sc_ekeylen;
372 if (sc->sc_ealgo == CRYPTO_AES_XTS)
373 crie.cri_klen <<= 1;
374 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) {
375 crie.cri_key = g_eli_key_hold(sc, 0,
376 LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize);
377 } else {
378 crie.cri_key = sc->sc_ekey;
379 }
380 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
381 bzero(&cria, sizeof(cria));
382 cria.cri_alg = sc->sc_aalgo;
383 cria.cri_klen = sc->sc_akeylen;
384 cria.cri_key = sc->sc_akey;
385 crie.cri_next = &cria;
386 }
387
388 switch (sc->sc_crypto) {
389 case G_ELI_CRYPTO_SW:
390 error = crypto_newsession(&wr->w_sid, &crie,
391 CRYPTOCAP_F_SOFTWARE);
392 break;
393 case G_ELI_CRYPTO_HW:
394 error = crypto_newsession(&wr->w_sid, &crie,
395 CRYPTOCAP_F_HARDWARE);
396 break;
397 case G_ELI_CRYPTO_UNKNOWN:
398 error = crypto_newsession(&wr->w_sid, &crie,
399 CRYPTOCAP_F_HARDWARE);
400 if (error == 0) {
401 mtx_lock(&sc->sc_queue_mtx);
402 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
403 sc->sc_crypto = G_ELI_CRYPTO_HW;
404 mtx_unlock(&sc->sc_queue_mtx);
405 } else {
406 error = crypto_newsession(&wr->w_sid, &crie,
407 CRYPTOCAP_F_SOFTWARE);
408 mtx_lock(&sc->sc_queue_mtx);
409 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
410 sc->sc_crypto = G_ELI_CRYPTO_SW;
411 mtx_unlock(&sc->sc_queue_mtx);
412 }
413 break;
414 default:
415 panic("%s: invalid condition", __func__);
416 }
417
418 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0)
419 g_eli_key_drop(sc, crie.cri_key);
420
421 return (error);
422}
423
424static void
425g_eli_freesession(struct g_eli_worker *wr)
426{
427
428 crypto_freesession(wr->w_sid);
429}
430
431static void
432g_eli_cancel(struct g_eli_softc *sc)
433{
434 struct bio *bp;
435
436 mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
437
438 while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) {
439 KASSERT(bp->bio_pflags == G_ELI_NEW_BIO,
440 ("Not new bio when canceling (bp=%p).", bp));
441 g_io_deliver(bp, ENXIO);
442 }
443}
444
445static struct bio *
446g_eli_takefirst(struct g_eli_softc *sc)
447{
448 struct bio *bp;
449
450 mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
451
452 if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND))
453 return (bioq_takefirst(&sc->sc_queue));
454 /*
455 * Device suspended, so we skip new I/O requests.
456 */
457 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
458 if (bp->bio_pflags != G_ELI_NEW_BIO)
459 break;
460 }
461 if (bp != NULL)
462 bioq_remove(&sc->sc_queue, bp);
463 return (bp);
464}
465
466/*
467 * This is the main function for kernel worker thread when we don't have
468 * hardware acceleration and we have to do cryptography in software.
469 * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM
470 * threads with crypto work.
471 */
472static void
473g_eli_worker(void *arg)
474{
475 struct g_eli_softc *sc;
476 struct g_eli_worker *wr;
477 struct bio *bp;
478 int error;
479
480 wr = arg;
481 sc = wr->w_softc;
482#ifdef SMP
483 /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */
484 if (sc->sc_cpubind) {
485 while (!smp_started)
486 tsleep(wr, 0, "geli:smp", hz / 4);
487 }
488#endif
489 thread_lock(curthread);
490 sched_prio(curthread, PUSER);
491 if (sc->sc_cpubind)
492 sched_bind(curthread, wr->w_number % mp_ncpus);
493 thread_unlock(curthread);
494
495 G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm);
496
497 for (;;) {
498 mtx_lock(&sc->sc_queue_mtx);
499again:
500 bp = g_eli_takefirst(sc);
501 if (bp == NULL) {
502 if (sc->sc_flags & G_ELI_FLAG_DESTROY) {
503 g_eli_cancel(sc);
504 LIST_REMOVE(wr, w_next);
505 g_eli_freesession(wr);
506 free(wr, M_ELI);
507 G_ELI_DEBUG(1, "Thread %s exiting.",
508 curthread->td_proc->p_comm);
509 wakeup(&sc->sc_workers);
510 mtx_unlock(&sc->sc_queue_mtx);
511 kproc_exit(0);
512 }
513 while (sc->sc_flags & G_ELI_FLAG_SUSPEND) {
514 if (sc->sc_inflight > 0) {
515 G_ELI_DEBUG(0, "inflight=%d",
516 sc->sc_inflight);
517 /*
518 * We still have inflight BIOs, so
519 * sleep and retry.
520 */
521 msleep(sc, &sc->sc_queue_mtx, PRIBIO,
522 "geli:inf", hz / 5);
523 goto again;
524 }
525 /*
526 * Suspend requested, mark the worker as
527 * suspended and go to sleep.
528 */
529 if (wr->w_active) {
530 g_eli_freesession(wr);
531 wr->w_active = FALSE;
532 }
533 wakeup(&sc->sc_workers);
534 msleep(sc, &sc->sc_queue_mtx, PRIBIO,
535 "geli:suspend", 0);
536 if (!wr->w_active &&
537 !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) {
538 error = g_eli_newsession(wr);
539 KASSERT(error == 0,
540 ("g_eli_newsession() failed on resume (error=%d)",
541 error));
542 wr->w_active = TRUE;
543 }
544 goto again;
545 }
546 msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0);
547 continue;
548 }
549 if (bp->bio_pflags == G_ELI_NEW_BIO)
550 atomic_add_int(&sc->sc_inflight, 1);
551 mtx_unlock(&sc->sc_queue_mtx);
552 if (bp->bio_pflags == G_ELI_NEW_BIO) {
553 bp->bio_pflags = 0;
554 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
555 if (bp->bio_cmd == BIO_READ)
556 g_eli_auth_read(sc, bp);
557 else
558 g_eli_auth_run(wr, bp);
559 } else {
560 if (bp->bio_cmd == BIO_READ)
561 g_eli_crypto_read(sc, bp, 1);
562 else
563 g_eli_crypto_run(wr, bp);
564 }
565 } else {
566 if (sc->sc_flags & G_ELI_FLAG_AUTH)
567 g_eli_auth_run(wr, bp);
568 else
569 g_eli_crypto_run(wr, bp);
570 }
571 }
572}
573
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/cons.h>
33#include <sys/kernel.h>
34#include <sys/linker.h>
35#include <sys/module.h>
36#include <sys/lock.h>
37#include <sys/mutex.h>
38#include <sys/bio.h>
39#include <sys/sbuf.h>
40#include <sys/sysctl.h>
41#include <sys/malloc.h>
42#include <sys/eventhandler.h>
43#include <sys/kthread.h>
44#include <sys/proc.h>
45#include <sys/sched.h>
46#include <sys/smp.h>
47#include <sys/uio.h>
48#include <sys/vnode.h>
49
50#include <vm/uma.h>
51
52#include <geom/geom.h>
53#include <geom/eli/g_eli.h>
54#include <geom/eli/pkcs5v2.h>
55
56FEATURE(geom_eli, "GEOM crypto module");
57
58MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data");
59
60SYSCTL_DECL(_kern_geom);
61SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff");
62static int g_eli_version = G_ELI_VERSION;
63SYSCTL_INT(_kern_geom_eli, OID_AUTO, version, CTLFLAG_RD, &g_eli_version, 0,
64 "GELI version");
65int g_eli_debug = 0;
66SYSCTL_INT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RWTUN, &g_eli_debug, 0,
67 "Debug level");
68static u_int g_eli_tries = 3;
69SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RWTUN, &g_eli_tries, 0,
70 "Number of tries for entering the passphrase");
71static u_int g_eli_visible_passphrase = GETS_NOECHO;
72SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RWTUN,
73 &g_eli_visible_passphrase, 0,
74 "Visibility of passphrase prompt (0 = invisible, 1 = visible, 2 = asterisk)");
75u_int g_eli_overwrites = G_ELI_OVERWRITES;
76SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RWTUN, &g_eli_overwrites,
77 0, "Number of times on-disk keys should be overwritten when destroying them");
78static u_int g_eli_threads = 0;
79SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RWTUN, &g_eli_threads, 0,
80 "Number of threads doing crypto work");
81u_int g_eli_batch = 0;
82SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RWTUN, &g_eli_batch, 0,
83 "Use crypto operations batching");
84
85/*
86 * Passphrase cached during boot, in order to be more user-friendly if
87 * there are multiple providers using the same passphrase.
88 */
89static char cached_passphrase[256];
90static u_int g_eli_boot_passcache = 1;
91TUNABLE_INT("kern.geom.eli.boot_passcache", &g_eli_boot_passcache);
92SYSCTL_UINT(_kern_geom_eli, OID_AUTO, boot_passcache, CTLFLAG_RD,
93 &g_eli_boot_passcache, 0,
94 "Passphrases are cached during boot process for possible reuse");
95static void
96fetch_loader_passphrase(void * dummy)
97{
98 char * env_passphrase;
99
100 KASSERT(dynamic_kenv, ("need dynamic kenv"));
101
102 if ((env_passphrase = kern_getenv("kern.geom.eli.passphrase")) != NULL) {
103 /* Extract passphrase from the environment. */
104 strlcpy(cached_passphrase, env_passphrase,
105 sizeof(cached_passphrase));
106 freeenv(env_passphrase);
107
108 /* Wipe the passphrase from the environment. */
109 kern_unsetenv("kern.geom.eli.passphrase");
110 }
111}
112SYSINIT(geli_fetch_loader_passphrase, SI_SUB_KMEM + 1, SI_ORDER_ANY,
113 fetch_loader_passphrase, NULL);
114static void
115zero_boot_passcache(void * dummy)
116{
117
118 memset(cached_passphrase, 0, sizeof(cached_passphrase));
119}
120EVENTHANDLER_DEFINE(mountroot, zero_boot_passcache, NULL, 0);
121
122static eventhandler_tag g_eli_pre_sync = NULL;
123
124static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp,
125 struct g_geom *gp);
126static void g_eli_init(struct g_class *mp);
127static void g_eli_fini(struct g_class *mp);
128
129static g_taste_t g_eli_taste;
130static g_dumpconf_t g_eli_dumpconf;
131
132struct g_class g_eli_class = {
133 .name = G_ELI_CLASS_NAME,
134 .version = G_VERSION,
135 .ctlreq = g_eli_config,
136 .taste = g_eli_taste,
137 .destroy_geom = g_eli_destroy_geom,
138 .init = g_eli_init,
139 .fini = g_eli_fini
140};
141
142
143/*
144 * Code paths:
145 * BIO_READ:
146 * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
147 * BIO_WRITE:
148 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
149 */
150
151
152/*
153 * EAGAIN from crypto(9) means, that we were probably balanced to another crypto
154 * accelerator or something like this.
155 * The function updates the SID and rerun the operation.
156 */
157int
158g_eli_crypto_rerun(struct cryptop *crp)
159{
160 struct g_eli_softc *sc;
161 struct g_eli_worker *wr;
162 struct bio *bp;
163 int error;
164
165 bp = (struct bio *)crp->crp_opaque;
166 sc = bp->bio_to->geom->softc;
167 LIST_FOREACH(wr, &sc->sc_workers, w_next) {
168 if (wr->w_number == bp->bio_pflags)
169 break;
170 }
171 KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags));
172 G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %ju -> %ju).",
173 bp->bio_cmd == BIO_READ ? "READ" : "WRITE", (uintmax_t)wr->w_sid,
174 (uintmax_t)crp->crp_sid);
175 wr->w_sid = crp->crp_sid;
176 crp->crp_etype = 0;
177 error = crypto_dispatch(crp);
178 if (error == 0)
179 return (0);
180 G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error);
181 crp->crp_etype = error;
182 return (error);
183}
184
185/*
186 * The function is called afer reading encrypted data from the provider.
187 *
188 * g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
189 */
190void
191g_eli_read_done(struct bio *bp)
192{
193 struct g_eli_softc *sc;
194 struct bio *pbp;
195
196 G_ELI_LOGREQ(2, bp, "Request done.");
197 pbp = bp->bio_parent;
198 if (pbp->bio_error == 0 && bp->bio_error != 0)
199 pbp->bio_error = bp->bio_error;
200 g_destroy_bio(bp);
201 /*
202 * Do we have all sectors already?
203 */
204 pbp->bio_inbed++;
205 if (pbp->bio_inbed < pbp->bio_children)
206 return;
207 sc = pbp->bio_to->geom->softc;
208 if (pbp->bio_error != 0) {
209 G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__,
210 pbp->bio_error);
211 pbp->bio_completed = 0;
212 if (pbp->bio_driver2 != NULL) {
213 free(pbp->bio_driver2, M_ELI);
214 pbp->bio_driver2 = NULL;
215 }
216 g_io_deliver(pbp, pbp->bio_error);
217 atomic_subtract_int(&sc->sc_inflight, 1);
218 return;
219 }
220 mtx_lock(&sc->sc_queue_mtx);
221 bioq_insert_tail(&sc->sc_queue, pbp);
222 mtx_unlock(&sc->sc_queue_mtx);
223 wakeup(sc);
224}
225
226/*
227 * The function is called after we encrypt and write data.
228 *
229 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver
230 */
231void
232g_eli_write_done(struct bio *bp)
233{
234 struct g_eli_softc *sc;
235 struct bio *pbp;
236
237 G_ELI_LOGREQ(2, bp, "Request done.");
238 pbp = bp->bio_parent;
239 if (pbp->bio_error == 0 && bp->bio_error != 0)
240 pbp->bio_error = bp->bio_error;
241 g_destroy_bio(bp);
242 /*
243 * Do we have all sectors already?
244 */
245 pbp->bio_inbed++;
246 if (pbp->bio_inbed < pbp->bio_children)
247 return;
248 free(pbp->bio_driver2, M_ELI);
249 pbp->bio_driver2 = NULL;
250 if (pbp->bio_error != 0) {
251 G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__,
252 pbp->bio_error);
253 pbp->bio_completed = 0;
254 } else
255 pbp->bio_completed = pbp->bio_length;
256
257 /*
258 * Write is finished, send it up.
259 */
260 sc = pbp->bio_to->geom->softc;
261 g_io_deliver(pbp, pbp->bio_error);
262 atomic_subtract_int(&sc->sc_inflight, 1);
263}
264
265/*
266 * This function should never be called, but GEOM made as it set ->orphan()
267 * method for every geom.
268 */
269static void
270g_eli_orphan_spoil_assert(struct g_consumer *cp)
271{
272
273 panic("Function %s() called for %s.", __func__, cp->geom->name);
274}
275
276static void
277g_eli_orphan(struct g_consumer *cp)
278{
279 struct g_eli_softc *sc;
280
281 g_topology_assert();
282 sc = cp->geom->softc;
283 if (sc == NULL)
284 return;
285 g_eli_destroy(sc, TRUE);
286}
287
288/*
289 * BIO_READ:
290 * G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
291 * BIO_WRITE:
292 * G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
293 */
294static void
295g_eli_start(struct bio *bp)
296{
297 struct g_eli_softc *sc;
298 struct g_consumer *cp;
299 struct bio *cbp;
300
301 sc = bp->bio_to->geom->softc;
302 KASSERT(sc != NULL,
303 ("Provider's error should be set (error=%d)(device=%s).",
304 bp->bio_to->error, bp->bio_to->name));
305 G_ELI_LOGREQ(2, bp, "Request received.");
306
307 switch (bp->bio_cmd) {
308 case BIO_READ:
309 case BIO_WRITE:
310 case BIO_GETATTR:
311 case BIO_FLUSH:
312 break;
313 case BIO_DELETE:
314 /*
315 * If the user hasn't set the NODELETE flag, we just pass
316 * it down the stack and let the layers beneath us do (or
317 * not) whatever they do with it. If they have, we
318 * reject it. A possible extension would be an
319 * additional flag to take it as a hint to shred the data
320 * with [multiple?] overwrites.
321 */
322 if (!(sc->sc_flags & G_ELI_FLAG_NODELETE))
323 break;
324 default:
325 g_io_deliver(bp, EOPNOTSUPP);
326 return;
327 }
328 cbp = g_clone_bio(bp);
329 if (cbp == NULL) {
330 g_io_deliver(bp, ENOMEM);
331 return;
332 }
333 bp->bio_driver1 = cbp;
334 bp->bio_pflags = G_ELI_NEW_BIO;
335 switch (bp->bio_cmd) {
336 case BIO_READ:
337 if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) {
338 g_eli_crypto_read(sc, bp, 0);
339 break;
340 }
341 /* FALLTHROUGH */
342 case BIO_WRITE:
343 mtx_lock(&sc->sc_queue_mtx);
344 bioq_insert_tail(&sc->sc_queue, bp);
345 mtx_unlock(&sc->sc_queue_mtx);
346 wakeup(sc);
347 break;
348 case BIO_GETATTR:
349 case BIO_FLUSH:
350 case BIO_DELETE:
351 cbp->bio_done = g_std_done;
352 cp = LIST_FIRST(&sc->sc_geom->consumer);
353 cbp->bio_to = cp->provider;
354 G_ELI_LOGREQ(2, cbp, "Sending request.");
355 g_io_request(cbp, cp);
356 break;
357 }
358}
359
360static int
361g_eli_newsession(struct g_eli_worker *wr)
362{
363 struct g_eli_softc *sc;
364 struct cryptoini crie, cria;
365 int error;
366
367 sc = wr->w_softc;
368
369 bzero(&crie, sizeof(crie));
370 crie.cri_alg = sc->sc_ealgo;
371 crie.cri_klen = sc->sc_ekeylen;
372 if (sc->sc_ealgo == CRYPTO_AES_XTS)
373 crie.cri_klen <<= 1;
374 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) {
375 crie.cri_key = g_eli_key_hold(sc, 0,
376 LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize);
377 } else {
378 crie.cri_key = sc->sc_ekey;
379 }
380 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
381 bzero(&cria, sizeof(cria));
382 cria.cri_alg = sc->sc_aalgo;
383 cria.cri_klen = sc->sc_akeylen;
384 cria.cri_key = sc->sc_akey;
385 crie.cri_next = &cria;
386 }
387
388 switch (sc->sc_crypto) {
389 case G_ELI_CRYPTO_SW:
390 error = crypto_newsession(&wr->w_sid, &crie,
391 CRYPTOCAP_F_SOFTWARE);
392 break;
393 case G_ELI_CRYPTO_HW:
394 error = crypto_newsession(&wr->w_sid, &crie,
395 CRYPTOCAP_F_HARDWARE);
396 break;
397 case G_ELI_CRYPTO_UNKNOWN:
398 error = crypto_newsession(&wr->w_sid, &crie,
399 CRYPTOCAP_F_HARDWARE);
400 if (error == 0) {
401 mtx_lock(&sc->sc_queue_mtx);
402 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
403 sc->sc_crypto = G_ELI_CRYPTO_HW;
404 mtx_unlock(&sc->sc_queue_mtx);
405 } else {
406 error = crypto_newsession(&wr->w_sid, &crie,
407 CRYPTOCAP_F_SOFTWARE);
408 mtx_lock(&sc->sc_queue_mtx);
409 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
410 sc->sc_crypto = G_ELI_CRYPTO_SW;
411 mtx_unlock(&sc->sc_queue_mtx);
412 }
413 break;
414 default:
415 panic("%s: invalid condition", __func__);
416 }
417
418 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0)
419 g_eli_key_drop(sc, crie.cri_key);
420
421 return (error);
422}
423
424static void
425g_eli_freesession(struct g_eli_worker *wr)
426{
427
428 crypto_freesession(wr->w_sid);
429}
430
431static void
432g_eli_cancel(struct g_eli_softc *sc)
433{
434 struct bio *bp;
435
436 mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
437
438 while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) {
439 KASSERT(bp->bio_pflags == G_ELI_NEW_BIO,
440 ("Not new bio when canceling (bp=%p).", bp));
441 g_io_deliver(bp, ENXIO);
442 }
443}
444
445static struct bio *
446g_eli_takefirst(struct g_eli_softc *sc)
447{
448 struct bio *bp;
449
450 mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
451
452 if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND))
453 return (bioq_takefirst(&sc->sc_queue));
454 /*
455 * Device suspended, so we skip new I/O requests.
456 */
457 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
458 if (bp->bio_pflags != G_ELI_NEW_BIO)
459 break;
460 }
461 if (bp != NULL)
462 bioq_remove(&sc->sc_queue, bp);
463 return (bp);
464}
465
466/*
467 * This is the main function for kernel worker thread when we don't have
468 * hardware acceleration and we have to do cryptography in software.
469 * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM
470 * threads with crypto work.
471 */
472static void
473g_eli_worker(void *arg)
474{
475 struct g_eli_softc *sc;
476 struct g_eli_worker *wr;
477 struct bio *bp;
478 int error;
479
480 wr = arg;
481 sc = wr->w_softc;
482#ifdef SMP
483 /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */
484 if (sc->sc_cpubind) {
485 while (!smp_started)
486 tsleep(wr, 0, "geli:smp", hz / 4);
487 }
488#endif
489 thread_lock(curthread);
490 sched_prio(curthread, PUSER);
491 if (sc->sc_cpubind)
492 sched_bind(curthread, wr->w_number % mp_ncpus);
493 thread_unlock(curthread);
494
495 G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm);
496
497 for (;;) {
498 mtx_lock(&sc->sc_queue_mtx);
499again:
500 bp = g_eli_takefirst(sc);
501 if (bp == NULL) {
502 if (sc->sc_flags & G_ELI_FLAG_DESTROY) {
503 g_eli_cancel(sc);
504 LIST_REMOVE(wr, w_next);
505 g_eli_freesession(wr);
506 free(wr, M_ELI);
507 G_ELI_DEBUG(1, "Thread %s exiting.",
508 curthread->td_proc->p_comm);
509 wakeup(&sc->sc_workers);
510 mtx_unlock(&sc->sc_queue_mtx);
511 kproc_exit(0);
512 }
513 while (sc->sc_flags & G_ELI_FLAG_SUSPEND) {
514 if (sc->sc_inflight > 0) {
515 G_ELI_DEBUG(0, "inflight=%d",
516 sc->sc_inflight);
517 /*
518 * We still have inflight BIOs, so
519 * sleep and retry.
520 */
521 msleep(sc, &sc->sc_queue_mtx, PRIBIO,
522 "geli:inf", hz / 5);
523 goto again;
524 }
525 /*
526 * Suspend requested, mark the worker as
527 * suspended and go to sleep.
528 */
529 if (wr->w_active) {
530 g_eli_freesession(wr);
531 wr->w_active = FALSE;
532 }
533 wakeup(&sc->sc_workers);
534 msleep(sc, &sc->sc_queue_mtx, PRIBIO,
535 "geli:suspend", 0);
536 if (!wr->w_active &&
537 !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) {
538 error = g_eli_newsession(wr);
539 KASSERT(error == 0,
540 ("g_eli_newsession() failed on resume (error=%d)",
541 error));
542 wr->w_active = TRUE;
543 }
544 goto again;
545 }
546 msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0);
547 continue;
548 }
549 if (bp->bio_pflags == G_ELI_NEW_BIO)
550 atomic_add_int(&sc->sc_inflight, 1);
551 mtx_unlock(&sc->sc_queue_mtx);
552 if (bp->bio_pflags == G_ELI_NEW_BIO) {
553 bp->bio_pflags = 0;
554 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
555 if (bp->bio_cmd == BIO_READ)
556 g_eli_auth_read(sc, bp);
557 else
558 g_eli_auth_run(wr, bp);
559 } else {
560 if (bp->bio_cmd == BIO_READ)
561 g_eli_crypto_read(sc, bp, 1);
562 else
563 g_eli_crypto_run(wr, bp);
564 }
565 } else {
566 if (sc->sc_flags & G_ELI_FLAG_AUTH)
567 g_eli_auth_run(wr, bp);
568 else
569 g_eli_crypto_run(wr, bp);
570 }
571 }
572}
573
574/*
575 * Here we generate IV. It is unique for every sector.
576 */
577void
578g_eli_crypto_ivgen(struct g_eli_softc *sc, off_t offset, u_char *iv,
579 size_t size)
580{
581 uint8_t off[8];
582
583 if ((sc->sc_flags & G_ELI_FLAG_NATIVE_BYTE_ORDER) != 0)
584 bcopy(&offset, off, sizeof(off));
585 else
586 le64enc(off, (uint64_t)offset);
587
588 switch (sc->sc_ealgo) {
589 case CRYPTO_AES_XTS:
590 bcopy(off, iv, sizeof(off));
591 bzero(iv + sizeof(off), size - sizeof(off));
592 break;
593 default:
594 {
595 u_char hash[SHA256_DIGEST_LENGTH];
596 SHA256_CTX ctx;
597
598 /* Copy precalculated SHA256 context for IV-Key. */
599 bcopy(&sc->sc_ivctx, &ctx, sizeof(ctx));
600 SHA256_Update(&ctx, off, sizeof(off));
601 SHA256_Final(hash, &ctx);
602 bcopy(hash, iv, MIN(sizeof(hash), size));
603 break;
604 }
605 }
606}
607
608int
609g_eli_read_metadata(struct g_class *mp, struct g_provider *pp,
610 struct g_eli_metadata *md)
611{
612 struct g_geom *gp;
613 struct g_consumer *cp;
614 u_char *buf = NULL;
615 int error;
616
617 g_topology_assert();
618
619 gp = g_new_geomf(mp, "eli:taste");
620 gp->start = g_eli_start;
621 gp->access = g_std_access;
622 /*
623 * g_eli_read_metadata() is always called from the event thread.
624 * Our geom is created and destroyed in the same event, so there
625 * could be no orphan nor spoil event in the meantime.
626 */
627 gp->orphan = g_eli_orphan_spoil_assert;
628 gp->spoiled = g_eli_orphan_spoil_assert;
629 cp = g_new_consumer(gp);
630 error = g_attach(cp, pp);
631 if (error != 0)
632 goto end;
633 error = g_access(cp, 1, 0, 0);
634 if (error != 0)
635 goto end;
636 g_topology_unlock();
637 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
638 &error);
639 g_topology_lock();
640 if (buf == NULL)
641 goto end;
642 error = eli_metadata_decode(buf, md);
643 if (error != 0)
644 goto end;
645 /* Metadata was read and decoded successfully. */
646end:
647 if (buf != NULL)
648 g_free(buf);
649 if (cp->provider != NULL) {
650 if (cp->acr == 1)
651 g_access(cp, -1, 0, 0);
652 g_detach(cp);
653 }
654 g_destroy_consumer(cp);
655 g_destroy_geom(gp);
656 return (error);
657}
658
659/*
660 * The function is called when we had last close on provider and user requested
661 * to close it when this situation occur.
662 */
663static void
664g_eli_last_close(void *arg, int flags __unused)
665{
666 struct g_geom *gp;
667 char gpname[64];
668 int error;
669
670 g_topology_assert();
671 gp = arg;
672 strlcpy(gpname, gp->name, sizeof(gpname));
673 error = g_eli_destroy(gp->softc, TRUE);
674 KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).",
675 gpname, error));
676 G_ELI_DEBUG(0, "Detached %s on last close.", gpname);
677}
678
679int
680g_eli_access(struct g_provider *pp, int dr, int dw, int de)
681{
682 struct g_eli_softc *sc;
683 struct g_geom *gp;
684
685 gp = pp->geom;
686 sc = gp->softc;
687
688 if (dw > 0) {
689 if (sc->sc_flags & G_ELI_FLAG_RO) {
690 /* Deny write attempts. */
691 return (EROFS);
692 }
693 /* Someone is opening us for write, we need to remember that. */
694 sc->sc_flags |= G_ELI_FLAG_WOPEN;
695 return (0);
696 }
697 /* Is this the last close? */
698 if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0)
699 return (0);
700
701 /*
702 * Automatically detach on last close if requested.
703 */
704 if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) ||
705 (sc->sc_flags & G_ELI_FLAG_WOPEN)) {
706 g_post_event(g_eli_last_close, gp, M_WAITOK, NULL);
707 }
708 return (0);
709}
710
711static int
712g_eli_cpu_is_disabled(int cpu)
713{
714#ifdef SMP
715 return (CPU_ISSET(cpu, &hlt_cpus_mask));
716#else
717 return (0);
718#endif
719}
720
721struct g_geom *
722g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp,
723 const struct g_eli_metadata *md, const u_char *mkey, int nkey)
724{
725 struct g_eli_softc *sc;
726 struct g_eli_worker *wr;
727 struct g_geom *gp;
728 struct g_provider *pp;
729 struct g_consumer *cp;
730 u_int i, threads;
731 int error;
732
733 G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX);
734
735 gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX);
736 sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO);
737 gp->start = g_eli_start;
738 /*
739 * Spoiling can happen even though we have the provider open
740 * exclusively, e.g. through media change events.
741 */
742 gp->spoiled = g_eli_orphan;
743 gp->orphan = g_eli_orphan;
744 gp->dumpconf = g_eli_dumpconf;
745 /*
746 * If detach-on-last-close feature is not enabled and we don't operate
747 * on read-only provider, we can simply use g_std_access().
748 */
749 if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO))
750 gp->access = g_eli_access;
751 else
752 gp->access = g_std_access;
753
574int
575g_eli_read_metadata(struct g_class *mp, struct g_provider *pp,
576 struct g_eli_metadata *md)
577{
578 struct g_geom *gp;
579 struct g_consumer *cp;
580 u_char *buf = NULL;
581 int error;
582
583 g_topology_assert();
584
585 gp = g_new_geomf(mp, "eli:taste");
586 gp->start = g_eli_start;
587 gp->access = g_std_access;
588 /*
589 * g_eli_read_metadata() is always called from the event thread.
590 * Our geom is created and destroyed in the same event, so there
591 * could be no orphan nor spoil event in the meantime.
592 */
593 gp->orphan = g_eli_orphan_spoil_assert;
594 gp->spoiled = g_eli_orphan_spoil_assert;
595 cp = g_new_consumer(gp);
596 error = g_attach(cp, pp);
597 if (error != 0)
598 goto end;
599 error = g_access(cp, 1, 0, 0);
600 if (error != 0)
601 goto end;
602 g_topology_unlock();
603 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
604 &error);
605 g_topology_lock();
606 if (buf == NULL)
607 goto end;
608 error = eli_metadata_decode(buf, md);
609 if (error != 0)
610 goto end;
611 /* Metadata was read and decoded successfully. */
612end:
613 if (buf != NULL)
614 g_free(buf);
615 if (cp->provider != NULL) {
616 if (cp->acr == 1)
617 g_access(cp, -1, 0, 0);
618 g_detach(cp);
619 }
620 g_destroy_consumer(cp);
621 g_destroy_geom(gp);
622 return (error);
623}
624
625/*
626 * The function is called when we had last close on provider and user requested
627 * to close it when this situation occur.
628 */
629static void
630g_eli_last_close(void *arg, int flags __unused)
631{
632 struct g_geom *gp;
633 char gpname[64];
634 int error;
635
636 g_topology_assert();
637 gp = arg;
638 strlcpy(gpname, gp->name, sizeof(gpname));
639 error = g_eli_destroy(gp->softc, TRUE);
640 KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).",
641 gpname, error));
642 G_ELI_DEBUG(0, "Detached %s on last close.", gpname);
643}
644
645int
646g_eli_access(struct g_provider *pp, int dr, int dw, int de)
647{
648 struct g_eli_softc *sc;
649 struct g_geom *gp;
650
651 gp = pp->geom;
652 sc = gp->softc;
653
654 if (dw > 0) {
655 if (sc->sc_flags & G_ELI_FLAG_RO) {
656 /* Deny write attempts. */
657 return (EROFS);
658 }
659 /* Someone is opening us for write, we need to remember that. */
660 sc->sc_flags |= G_ELI_FLAG_WOPEN;
661 return (0);
662 }
663 /* Is this the last close? */
664 if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0)
665 return (0);
666
667 /*
668 * Automatically detach on last close if requested.
669 */
670 if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) ||
671 (sc->sc_flags & G_ELI_FLAG_WOPEN)) {
672 g_post_event(g_eli_last_close, gp, M_WAITOK, NULL);
673 }
674 return (0);
675}
676
677static int
678g_eli_cpu_is_disabled(int cpu)
679{
680#ifdef SMP
681 return (CPU_ISSET(cpu, &hlt_cpus_mask));
682#else
683 return (0);
684#endif
685}
686
687struct g_geom *
688g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp,
689 const struct g_eli_metadata *md, const u_char *mkey, int nkey)
690{
691 struct g_eli_softc *sc;
692 struct g_eli_worker *wr;
693 struct g_geom *gp;
694 struct g_provider *pp;
695 struct g_consumer *cp;
696 u_int i, threads;
697 int error;
698
699 G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX);
700
701 gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX);
702 sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO);
703 gp->start = g_eli_start;
704 /*
705 * Spoiling can happen even though we have the provider open
706 * exclusively, e.g. through media change events.
707 */
708 gp->spoiled = g_eli_orphan;
709 gp->orphan = g_eli_orphan;
710 gp->dumpconf = g_eli_dumpconf;
711 /*
712 * If detach-on-last-close feature is not enabled and we don't operate
713 * on read-only provider, we can simply use g_std_access().
714 */
715 if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO))
716 gp->access = g_eli_access;
717 else
718 gp->access = g_std_access;
719
754 sc->sc_version = md->md_version;
755 sc->sc_inflight = 0;
756 sc->sc_crypto = G_ELI_CRYPTO_UNKNOWN;
757 sc->sc_flags = md->md_flags;
758 /* Backward compatibility. */
759 if (md->md_version < G_ELI_VERSION_04)
760 sc->sc_flags |= G_ELI_FLAG_NATIVE_BYTE_ORDER;
761 if (md->md_version < G_ELI_VERSION_05)
762 sc->sc_flags |= G_ELI_FLAG_SINGLE_KEY;
763 if (md->md_version < G_ELI_VERSION_06 &&
764 (sc->sc_flags & G_ELI_FLAG_AUTH) != 0) {
765 sc->sc_flags |= G_ELI_FLAG_FIRST_KEY;
766 }
767 if (md->md_version < G_ELI_VERSION_07)
768 sc->sc_flags |= G_ELI_FLAG_ENC_IVKEY;
769 sc->sc_ealgo = md->md_ealgo;
720 eli_metadata_softc(sc, md, bpp->sectorsize, bpp->mediasize);
770 sc->sc_nkey = nkey;
771
721 sc->sc_nkey = nkey;
722
772 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
773 sc->sc_akeylen = sizeof(sc->sc_akey) * 8;
774 sc->sc_aalgo = md->md_aalgo;
775 sc->sc_alen = g_eli_hashlen(sc->sc_aalgo);
776
777 sc->sc_data_per_sector = bpp->sectorsize - sc->sc_alen;
778 /*
779 * Some hash functions (like SHA1 and RIPEMD160) generates hash
780 * which length is not multiple of 128 bits, but we want data
781 * length to be multiple of 128, so we can encrypt without
782 * padding. The line below rounds down data length to multiple
783 * of 128 bits.
784 */
785 sc->sc_data_per_sector -= sc->sc_data_per_sector % 16;
786
787 sc->sc_bytes_per_sector =
788 (md->md_sectorsize - 1) / sc->sc_data_per_sector + 1;
789 sc->sc_bytes_per_sector *= bpp->sectorsize;
790 }
791
792 gp->softc = sc;
793 sc->sc_geom = gp;
794
795 bioq_init(&sc->sc_queue);
796 mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF);
797 mtx_init(&sc->sc_ekeys_lock, "geli:ekeys", NULL, MTX_DEF);
798
799 pp = NULL;
800 cp = g_new_consumer(gp);
801 error = g_attach(cp, bpp);
802 if (error != 0) {
803 if (req != NULL) {
804 gctl_error(req, "Cannot attach to %s (error=%d).",
805 bpp->name, error);
806 } else {
807 G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).",
808 bpp->name, error);
809 }
810 goto failed;
811 }
812 /*
813 * Keep provider open all the time, so we can run critical tasks,
814 * like Master Keys deletion, without wondering if we can open
815 * provider or not.
816 * We don't open provider for writing only when user requested read-only
817 * access.
818 */
819 if (sc->sc_flags & G_ELI_FLAG_RO)
820 error = g_access(cp, 1, 0, 1);
821 else
822 error = g_access(cp, 1, 1, 1);
823 if (error != 0) {
824 if (req != NULL) {
825 gctl_error(req, "Cannot access %s (error=%d).",
826 bpp->name, error);
827 } else {
828 G_ELI_DEBUG(1, "Cannot access %s (error=%d).",
829 bpp->name, error);
830 }
831 goto failed;
832 }
833
723 gp->softc = sc;
724 sc->sc_geom = gp;
725
726 bioq_init(&sc->sc_queue);
727 mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF);
728 mtx_init(&sc->sc_ekeys_lock, "geli:ekeys", NULL, MTX_DEF);
729
730 pp = NULL;
731 cp = g_new_consumer(gp);
732 error = g_attach(cp, bpp);
733 if (error != 0) {
734 if (req != NULL) {
735 gctl_error(req, "Cannot attach to %s (error=%d).",
736 bpp->name, error);
737 } else {
738 G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).",
739 bpp->name, error);
740 }
741 goto failed;
742 }
743 /*
744 * Keep provider open all the time, so we can run critical tasks,
745 * like Master Keys deletion, without wondering if we can open
746 * provider or not.
747 * We don't open provider for writing only when user requested read-only
748 * access.
749 */
750 if (sc->sc_flags & G_ELI_FLAG_RO)
751 error = g_access(cp, 1, 0, 1);
752 else
753 error = g_access(cp, 1, 1, 1);
754 if (error != 0) {
755 if (req != NULL) {
756 gctl_error(req, "Cannot access %s (error=%d).",
757 bpp->name, error);
758 } else {
759 G_ELI_DEBUG(1, "Cannot access %s (error=%d).",
760 bpp->name, error);
761 }
762 goto failed;
763 }
764
834 sc->sc_sectorsize = md->md_sectorsize;
835 sc->sc_mediasize = bpp->mediasize;
836 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME))
837 sc->sc_mediasize -= bpp->sectorsize;
838 if (!(sc->sc_flags & G_ELI_FLAG_AUTH))
839 sc->sc_mediasize -= (sc->sc_mediasize % sc->sc_sectorsize);
840 else {
841 sc->sc_mediasize /= sc->sc_bytes_per_sector;
842 sc->sc_mediasize *= sc->sc_sectorsize;
843 }
844
845 /*
846 * Remember the keys in our softc structure.
847 */
848 g_eli_mkey_propagate(sc, mkey);
765 /*
766 * Remember the keys in our softc structure.
767 */
768 g_eli_mkey_propagate(sc, mkey);
849 sc->sc_ekeylen = md->md_keylen;
850
851 LIST_INIT(&sc->sc_workers);
852
853 threads = g_eli_threads;
854 if (threads == 0)
855 threads = mp_ncpus;
856 sc->sc_cpubind = (mp_ncpus > 1 && threads == mp_ncpus);
857 for (i = 0; i < threads; i++) {
858 if (g_eli_cpu_is_disabled(i)) {
859 G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.",
860 bpp->name, i);
861 continue;
862 }
863 wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO);
864 wr->w_softc = sc;
865 wr->w_number = i;
866 wr->w_active = TRUE;
867
868 error = g_eli_newsession(wr);
869 if (error != 0) {
870 free(wr, M_ELI);
871 if (req != NULL) {
872 gctl_error(req, "Cannot set up crypto session "
873 "for %s (error=%d).", bpp->name, error);
874 } else {
875 G_ELI_DEBUG(1, "Cannot set up crypto session "
876 "for %s (error=%d).", bpp->name, error);
877 }
878 goto failed;
879 }
880
881 error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0,
882 "g_eli[%u] %s", i, bpp->name);
883 if (error != 0) {
884 g_eli_freesession(wr);
885 free(wr, M_ELI);
886 if (req != NULL) {
887 gctl_error(req, "Cannot create kernel thread "
888 "for %s (error=%d).", bpp->name, error);
889 } else {
890 G_ELI_DEBUG(1, "Cannot create kernel thread "
891 "for %s (error=%d).", bpp->name, error);
892 }
893 goto failed;
894 }
895 LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next);
896 }
897
898 /*
899 * Create decrypted provider.
900 */
901 pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX);
902 pp->mediasize = sc->sc_mediasize;
903 pp->sectorsize = sc->sc_sectorsize;
904
905 g_error_provider(pp, 0);
906
907 G_ELI_DEBUG(0, "Device %s created.", pp->name);
908 G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo),
909 sc->sc_ekeylen);
910 if (sc->sc_flags & G_ELI_FLAG_AUTH)
911 G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo));
912 G_ELI_DEBUG(0, " Crypto: %s",
913 sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware");
914 return (gp);
915failed:
916 mtx_lock(&sc->sc_queue_mtx);
917 sc->sc_flags |= G_ELI_FLAG_DESTROY;
918 wakeup(sc);
919 /*
920 * Wait for kernel threads self destruction.
921 */
922 while (!LIST_EMPTY(&sc->sc_workers)) {
923 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
924 "geli:destroy", 0);
925 }
926 mtx_destroy(&sc->sc_queue_mtx);
927 if (cp->provider != NULL) {
928 if (cp->acr == 1)
929 g_access(cp, -1, -1, -1);
930 g_detach(cp);
931 }
932 g_destroy_consumer(cp);
933 g_destroy_geom(gp);
934 g_eli_key_destroy(sc);
935 bzero(sc, sizeof(*sc));
936 free(sc, M_ELI);
937 return (NULL);
938}
939
940int
941g_eli_destroy(struct g_eli_softc *sc, boolean_t force)
942{
943 struct g_geom *gp;
944 struct g_provider *pp;
945
946 g_topology_assert();
947
948 if (sc == NULL)
949 return (ENXIO);
950
951 gp = sc->sc_geom;
952 pp = LIST_FIRST(&gp->provider);
953 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
954 if (force) {
955 G_ELI_DEBUG(1, "Device %s is still open, so it "
956 "cannot be definitely removed.", pp->name);
957 sc->sc_flags |= G_ELI_FLAG_RW_DETACH;
958 gp->access = g_eli_access;
959 g_wither_provider(pp, ENXIO);
960 return (EBUSY);
961 } else {
962 G_ELI_DEBUG(1,
963 "Device %s is still open (r%dw%de%d).", pp->name,
964 pp->acr, pp->acw, pp->ace);
965 return (EBUSY);
966 }
967 }
968
969 mtx_lock(&sc->sc_queue_mtx);
970 sc->sc_flags |= G_ELI_FLAG_DESTROY;
971 wakeup(sc);
972 while (!LIST_EMPTY(&sc->sc_workers)) {
973 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
974 "geli:destroy", 0);
975 }
976 mtx_destroy(&sc->sc_queue_mtx);
977 gp->softc = NULL;
978 g_eli_key_destroy(sc);
979 bzero(sc, sizeof(*sc));
980 free(sc, M_ELI);
981
982 if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0))
983 G_ELI_DEBUG(0, "Device %s destroyed.", gp->name);
984 g_wither_geom_close(gp, ENXIO);
985
986 return (0);
987}
988
989static int
990g_eli_destroy_geom(struct gctl_req *req __unused,
991 struct g_class *mp __unused, struct g_geom *gp)
992{
993 struct g_eli_softc *sc;
994
995 sc = gp->softc;
996 return (g_eli_destroy(sc, FALSE));
997}
998
999static int
1000g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider)
1001{
1002 u_char *keyfile, *data;
1003 char *file, name[64];
1004 size_t size;
1005 int i;
1006
1007 for (i = 0; ; i++) {
1008 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
1009 keyfile = preload_search_by_type(name);
1010 if (keyfile == NULL && i == 0) {
1011 /*
1012 * If there is only one keyfile, allow simpler name.
1013 */
1014 snprintf(name, sizeof(name), "%s:geli_keyfile", provider);
1015 keyfile = preload_search_by_type(name);
1016 }
1017 if (keyfile == NULL)
1018 return (i); /* Return number of loaded keyfiles. */
1019 data = preload_fetch_addr(keyfile);
1020 if (data == NULL) {
1021 G_ELI_DEBUG(0, "Cannot find key file data for %s.",
1022 name);
1023 return (0);
1024 }
1025 size = preload_fetch_size(keyfile);
1026 if (size == 0) {
1027 G_ELI_DEBUG(0, "Cannot find key file size for %s.",
1028 name);
1029 return (0);
1030 }
1031 file = preload_search_info(keyfile, MODINFO_NAME);
1032 if (file == NULL) {
1033 G_ELI_DEBUG(0, "Cannot find key file name for %s.",
1034 name);
1035 return (0);
1036 }
1037 G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file,
1038 provider, name);
1039 g_eli_crypto_hmac_update(ctx, data, size);
1040 }
1041}
1042
1043static void
1044g_eli_keyfiles_clear(const char *provider)
1045{
1046 u_char *keyfile, *data;
1047 char name[64];
1048 size_t size;
1049 int i;
1050
1051 for (i = 0; ; i++) {
1052 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
1053 keyfile = preload_search_by_type(name);
1054 if (keyfile == NULL)
1055 return;
1056 data = preload_fetch_addr(keyfile);
1057 size = preload_fetch_size(keyfile);
1058 if (data != NULL && size != 0)
1059 bzero(data, size);
1060 }
1061}
1062
1063/*
1064 * Tasting is only made on boot.
1065 * We detect providers which should be attached before root is mounted.
1066 */
1067static struct g_geom *
1068g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1069{
1070 struct g_eli_metadata md;
1071 struct g_geom *gp;
1072 struct hmac_ctx ctx;
1073 char passphrase[256];
1074 u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN];
1075 u_int i, nkey, nkeyfiles, tries;
1076 int error;
1077
1078 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
1079 g_topology_assert();
1080
1081 if (root_mounted() || g_eli_tries == 0)
1082 return (NULL);
1083
1084 G_ELI_DEBUG(3, "Tasting %s.", pp->name);
1085
1086 error = g_eli_read_metadata(mp, pp, &md);
1087 if (error != 0)
1088 return (NULL);
1089 gp = NULL;
1090
1091 if (strcmp(md.md_magic, G_ELI_MAGIC) != 0)
1092 return (NULL);
1093 if (md.md_version > G_ELI_VERSION) {
1094 printf("geom_eli.ko module is too old to handle %s.\n",
1095 pp->name);
1096 return (NULL);
1097 }
1098 if (md.md_provsize != pp->mediasize)
1099 return (NULL);
1100 /* Should we attach it on boot? */
1101 if (!(md.md_flags & G_ELI_FLAG_BOOT))
1102 return (NULL);
1103 if (md.md_keys == 0x00) {
1104 G_ELI_DEBUG(0, "No valid keys on %s.", pp->name);
1105 return (NULL);
1106 }
1107 if (md.md_iterations == -1) {
1108 /* If there is no passphrase, we try only once. */
1109 tries = 1;
1110 } else {
1111 /* Ask for the passphrase no more than g_eli_tries times. */
1112 tries = g_eli_tries;
1113 }
1114
1115 for (i = 0; i <= tries; i++) {
1116 g_eli_crypto_hmac_init(&ctx, NULL, 0);
1117
1118 /*
1119 * Load all key files.
1120 */
1121 nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name);
1122
1123 if (nkeyfiles == 0 && md.md_iterations == -1) {
1124 /*
1125 * No key files and no passphrase, something is
1126 * definitely wrong here.
1127 * geli(8) doesn't allow for such situation, so assume
1128 * that there was really no passphrase and in that case
1129 * key files are no properly defined in loader.conf.
1130 */
1131 G_ELI_DEBUG(0,
1132 "Found no key files in loader.conf for %s.",
1133 pp->name);
1134 return (NULL);
1135 }
1136
1137 /* Ask for the passphrase if defined. */
1138 if (md.md_iterations >= 0) {
1139 /* Try first with cached passphrase. */
1140 if (i == 0) {
1141 if (!g_eli_boot_passcache)
1142 continue;
1143 memcpy(passphrase, cached_passphrase,
1144 sizeof(passphrase));
1145 } else {
1146 printf("Enter passphrase for %s: ", pp->name);
1147 cngets(passphrase, sizeof(passphrase),
1148 g_eli_visible_passphrase);
1149 memcpy(cached_passphrase, passphrase,
1150 sizeof(passphrase));
1151 }
1152 }
1153
1154 /*
1155 * Prepare Derived-Key from the user passphrase.
1156 */
1157 if (md.md_iterations == 0) {
1158 g_eli_crypto_hmac_update(&ctx, md.md_salt,
1159 sizeof(md.md_salt));
1160 g_eli_crypto_hmac_update(&ctx, passphrase,
1161 strlen(passphrase));
1162 bzero(passphrase, sizeof(passphrase));
1163 } else if (md.md_iterations > 0) {
1164 u_char dkey[G_ELI_USERKEYLEN];
1165
1166 pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt,
1167 sizeof(md.md_salt), passphrase, md.md_iterations);
1168 bzero(passphrase, sizeof(passphrase));
1169 g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey));
1170 bzero(dkey, sizeof(dkey));
1171 }
1172
1173 g_eli_crypto_hmac_final(&ctx, key, 0);
1174
1175 /*
1176 * Decrypt Master-Key.
1177 */
1178 error = g_eli_mkey_decrypt(&md, key, mkey, &nkey);
1179 bzero(key, sizeof(key));
1180 if (error == -1) {
1181 if (i == tries) {
1182 G_ELI_DEBUG(0,
1183 "Wrong key for %s. No tries left.",
1184 pp->name);
1185 g_eli_keyfiles_clear(pp->name);
1186 return (NULL);
1187 }
1188 if (i > 0) {
1189 G_ELI_DEBUG(0,
1190 "Wrong key for %s. Tries left: %u.",
1191 pp->name, tries - i);
1192 }
1193 /* Try again. */
1194 continue;
1195 } else if (error > 0) {
1196 G_ELI_DEBUG(0,
1197 "Cannot decrypt Master Key for %s (error=%d).",
1198 pp->name, error);
1199 g_eli_keyfiles_clear(pp->name);
1200 return (NULL);
1201 }
1202 g_eli_keyfiles_clear(pp->name);
1203 G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name);
1204 break;
1205 }
1206
1207 /*
1208 * We have correct key, let's attach provider.
1209 */
1210 gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey);
1211 bzero(mkey, sizeof(mkey));
1212 bzero(&md, sizeof(md));
1213 if (gp == NULL) {
1214 G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name,
1215 G_ELI_SUFFIX);
1216 return (NULL);
1217 }
1218 return (gp);
1219}
1220
1221static void
1222g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1223 struct g_consumer *cp, struct g_provider *pp)
1224{
1225 struct g_eli_softc *sc;
1226
1227 g_topology_assert();
1228 sc = gp->softc;
1229 if (sc == NULL)
1230 return;
1231 if (pp != NULL || cp != NULL)
1232 return; /* Nothing here. */
1233
1234 sbuf_printf(sb, "%s<KeysTotal>%ju</KeysTotal>\n", indent,
1235 (uintmax_t)sc->sc_ekeys_total);
1236 sbuf_printf(sb, "%s<KeysAllocated>%ju</KeysAllocated>\n", indent,
1237 (uintmax_t)sc->sc_ekeys_allocated);
1238 sbuf_printf(sb, "%s<Flags>", indent);
1239 if (sc->sc_flags == 0)
1240 sbuf_printf(sb, "NONE");
1241 else {
1242 int first = 1;
1243
1244#define ADD_FLAG(flag, name) do { \
1245 if (sc->sc_flags & (flag)) { \
1246 if (!first) \
1247 sbuf_printf(sb, ", "); \
1248 else \
1249 first = 0; \
1250 sbuf_printf(sb, name); \
1251 } \
1252} while (0)
1253 ADD_FLAG(G_ELI_FLAG_SUSPEND, "SUSPEND");
1254 ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY");
1255 ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER");
1256 ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME");
1257 ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT");
1258 ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH");
1259 ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH");
1260 ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH");
1261 ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN");
1262 ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY");
1263 ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY");
1264 ADD_FLAG(G_ELI_FLAG_NODELETE, "NODELETE");
1265#undef ADD_FLAG
1266 }
1267 sbuf_printf(sb, "</Flags>\n");
1268
1269 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) {
1270 sbuf_printf(sb, "%s<UsedKey>%u</UsedKey>\n", indent,
1271 sc->sc_nkey);
1272 }
1273 sbuf_printf(sb, "%s<Version>%u</Version>\n", indent, sc->sc_version);
1274 sbuf_printf(sb, "%s<Crypto>", indent);
1275 switch (sc->sc_crypto) {
1276 case G_ELI_CRYPTO_HW:
1277 sbuf_printf(sb, "hardware");
1278 break;
1279 case G_ELI_CRYPTO_SW:
1280 sbuf_printf(sb, "software");
1281 break;
1282 default:
1283 sbuf_printf(sb, "UNKNOWN");
1284 break;
1285 }
1286 sbuf_printf(sb, "</Crypto>\n");
1287 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
1288 sbuf_printf(sb,
1289 "%s<AuthenticationAlgorithm>%s</AuthenticationAlgorithm>\n",
1290 indent, g_eli_algo2str(sc->sc_aalgo));
1291 }
1292 sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent,
1293 sc->sc_ekeylen);
1294 sbuf_printf(sb, "%s<EncryptionAlgorithm>%s</EncryptionAlgorithm>\n",
1295 indent, g_eli_algo2str(sc->sc_ealgo));
1296 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
1297 (sc->sc_flags & G_ELI_FLAG_SUSPEND) ? "SUSPENDED" : "ACTIVE");
1298}
1299
1300static void
1301g_eli_shutdown_pre_sync(void *arg, int howto)
1302{
1303 struct g_class *mp;
1304 struct g_geom *gp, *gp2;
1305 struct g_provider *pp;
1306 struct g_eli_softc *sc;
1307 int error;
1308
1309 mp = arg;
1310 DROP_GIANT();
1311 g_topology_lock();
1312 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
1313 sc = gp->softc;
1314 if (sc == NULL)
1315 continue;
1316 pp = LIST_FIRST(&gp->provider);
1317 KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name));
1318 if (pp->acr + pp->acw + pp->ace == 0)
1319 error = g_eli_destroy(sc, TRUE);
1320 else {
1321 sc->sc_flags |= G_ELI_FLAG_RW_DETACH;
1322 gp->access = g_eli_access;
1323 }
1324 }
1325 g_topology_unlock();
1326 PICKUP_GIANT();
1327}
1328
1329static void
1330g_eli_init(struct g_class *mp)
1331{
1332
1333 g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
1334 g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
1335 if (g_eli_pre_sync == NULL)
1336 G_ELI_DEBUG(0, "Warning! Cannot register shutdown event.");
1337}
1338
1339static void
1340g_eli_fini(struct g_class *mp)
1341{
1342
1343 if (g_eli_pre_sync != NULL)
1344 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync);
1345}
1346
1347DECLARE_GEOM_CLASS(g_eli_class, g_eli);
1348MODULE_DEPEND(g_eli, crypto, 1, 1, 1);
769
770 LIST_INIT(&sc->sc_workers);
771
772 threads = g_eli_threads;
773 if (threads == 0)
774 threads = mp_ncpus;
775 sc->sc_cpubind = (mp_ncpus > 1 && threads == mp_ncpus);
776 for (i = 0; i < threads; i++) {
777 if (g_eli_cpu_is_disabled(i)) {
778 G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.",
779 bpp->name, i);
780 continue;
781 }
782 wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO);
783 wr->w_softc = sc;
784 wr->w_number = i;
785 wr->w_active = TRUE;
786
787 error = g_eli_newsession(wr);
788 if (error != 0) {
789 free(wr, M_ELI);
790 if (req != NULL) {
791 gctl_error(req, "Cannot set up crypto session "
792 "for %s (error=%d).", bpp->name, error);
793 } else {
794 G_ELI_DEBUG(1, "Cannot set up crypto session "
795 "for %s (error=%d).", bpp->name, error);
796 }
797 goto failed;
798 }
799
800 error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0,
801 "g_eli[%u] %s", i, bpp->name);
802 if (error != 0) {
803 g_eli_freesession(wr);
804 free(wr, M_ELI);
805 if (req != NULL) {
806 gctl_error(req, "Cannot create kernel thread "
807 "for %s (error=%d).", bpp->name, error);
808 } else {
809 G_ELI_DEBUG(1, "Cannot create kernel thread "
810 "for %s (error=%d).", bpp->name, error);
811 }
812 goto failed;
813 }
814 LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next);
815 }
816
817 /*
818 * Create decrypted provider.
819 */
820 pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX);
821 pp->mediasize = sc->sc_mediasize;
822 pp->sectorsize = sc->sc_sectorsize;
823
824 g_error_provider(pp, 0);
825
826 G_ELI_DEBUG(0, "Device %s created.", pp->name);
827 G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo),
828 sc->sc_ekeylen);
829 if (sc->sc_flags & G_ELI_FLAG_AUTH)
830 G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo));
831 G_ELI_DEBUG(0, " Crypto: %s",
832 sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware");
833 return (gp);
834failed:
835 mtx_lock(&sc->sc_queue_mtx);
836 sc->sc_flags |= G_ELI_FLAG_DESTROY;
837 wakeup(sc);
838 /*
839 * Wait for kernel threads self destruction.
840 */
841 while (!LIST_EMPTY(&sc->sc_workers)) {
842 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
843 "geli:destroy", 0);
844 }
845 mtx_destroy(&sc->sc_queue_mtx);
846 if (cp->provider != NULL) {
847 if (cp->acr == 1)
848 g_access(cp, -1, -1, -1);
849 g_detach(cp);
850 }
851 g_destroy_consumer(cp);
852 g_destroy_geom(gp);
853 g_eli_key_destroy(sc);
854 bzero(sc, sizeof(*sc));
855 free(sc, M_ELI);
856 return (NULL);
857}
858
859int
860g_eli_destroy(struct g_eli_softc *sc, boolean_t force)
861{
862 struct g_geom *gp;
863 struct g_provider *pp;
864
865 g_topology_assert();
866
867 if (sc == NULL)
868 return (ENXIO);
869
870 gp = sc->sc_geom;
871 pp = LIST_FIRST(&gp->provider);
872 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
873 if (force) {
874 G_ELI_DEBUG(1, "Device %s is still open, so it "
875 "cannot be definitely removed.", pp->name);
876 sc->sc_flags |= G_ELI_FLAG_RW_DETACH;
877 gp->access = g_eli_access;
878 g_wither_provider(pp, ENXIO);
879 return (EBUSY);
880 } else {
881 G_ELI_DEBUG(1,
882 "Device %s is still open (r%dw%de%d).", pp->name,
883 pp->acr, pp->acw, pp->ace);
884 return (EBUSY);
885 }
886 }
887
888 mtx_lock(&sc->sc_queue_mtx);
889 sc->sc_flags |= G_ELI_FLAG_DESTROY;
890 wakeup(sc);
891 while (!LIST_EMPTY(&sc->sc_workers)) {
892 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
893 "geli:destroy", 0);
894 }
895 mtx_destroy(&sc->sc_queue_mtx);
896 gp->softc = NULL;
897 g_eli_key_destroy(sc);
898 bzero(sc, sizeof(*sc));
899 free(sc, M_ELI);
900
901 if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0))
902 G_ELI_DEBUG(0, "Device %s destroyed.", gp->name);
903 g_wither_geom_close(gp, ENXIO);
904
905 return (0);
906}
907
908static int
909g_eli_destroy_geom(struct gctl_req *req __unused,
910 struct g_class *mp __unused, struct g_geom *gp)
911{
912 struct g_eli_softc *sc;
913
914 sc = gp->softc;
915 return (g_eli_destroy(sc, FALSE));
916}
917
918static int
919g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider)
920{
921 u_char *keyfile, *data;
922 char *file, name[64];
923 size_t size;
924 int i;
925
926 for (i = 0; ; i++) {
927 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
928 keyfile = preload_search_by_type(name);
929 if (keyfile == NULL && i == 0) {
930 /*
931 * If there is only one keyfile, allow simpler name.
932 */
933 snprintf(name, sizeof(name), "%s:geli_keyfile", provider);
934 keyfile = preload_search_by_type(name);
935 }
936 if (keyfile == NULL)
937 return (i); /* Return number of loaded keyfiles. */
938 data = preload_fetch_addr(keyfile);
939 if (data == NULL) {
940 G_ELI_DEBUG(0, "Cannot find key file data for %s.",
941 name);
942 return (0);
943 }
944 size = preload_fetch_size(keyfile);
945 if (size == 0) {
946 G_ELI_DEBUG(0, "Cannot find key file size for %s.",
947 name);
948 return (0);
949 }
950 file = preload_search_info(keyfile, MODINFO_NAME);
951 if (file == NULL) {
952 G_ELI_DEBUG(0, "Cannot find key file name for %s.",
953 name);
954 return (0);
955 }
956 G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file,
957 provider, name);
958 g_eli_crypto_hmac_update(ctx, data, size);
959 }
960}
961
962static void
963g_eli_keyfiles_clear(const char *provider)
964{
965 u_char *keyfile, *data;
966 char name[64];
967 size_t size;
968 int i;
969
970 for (i = 0; ; i++) {
971 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
972 keyfile = preload_search_by_type(name);
973 if (keyfile == NULL)
974 return;
975 data = preload_fetch_addr(keyfile);
976 size = preload_fetch_size(keyfile);
977 if (data != NULL && size != 0)
978 bzero(data, size);
979 }
980}
981
982/*
983 * Tasting is only made on boot.
984 * We detect providers which should be attached before root is mounted.
985 */
986static struct g_geom *
987g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
988{
989 struct g_eli_metadata md;
990 struct g_geom *gp;
991 struct hmac_ctx ctx;
992 char passphrase[256];
993 u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN];
994 u_int i, nkey, nkeyfiles, tries;
995 int error;
996
997 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
998 g_topology_assert();
999
1000 if (root_mounted() || g_eli_tries == 0)
1001 return (NULL);
1002
1003 G_ELI_DEBUG(3, "Tasting %s.", pp->name);
1004
1005 error = g_eli_read_metadata(mp, pp, &md);
1006 if (error != 0)
1007 return (NULL);
1008 gp = NULL;
1009
1010 if (strcmp(md.md_magic, G_ELI_MAGIC) != 0)
1011 return (NULL);
1012 if (md.md_version > G_ELI_VERSION) {
1013 printf("geom_eli.ko module is too old to handle %s.\n",
1014 pp->name);
1015 return (NULL);
1016 }
1017 if (md.md_provsize != pp->mediasize)
1018 return (NULL);
1019 /* Should we attach it on boot? */
1020 if (!(md.md_flags & G_ELI_FLAG_BOOT))
1021 return (NULL);
1022 if (md.md_keys == 0x00) {
1023 G_ELI_DEBUG(0, "No valid keys on %s.", pp->name);
1024 return (NULL);
1025 }
1026 if (md.md_iterations == -1) {
1027 /* If there is no passphrase, we try only once. */
1028 tries = 1;
1029 } else {
1030 /* Ask for the passphrase no more than g_eli_tries times. */
1031 tries = g_eli_tries;
1032 }
1033
1034 for (i = 0; i <= tries; i++) {
1035 g_eli_crypto_hmac_init(&ctx, NULL, 0);
1036
1037 /*
1038 * Load all key files.
1039 */
1040 nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name);
1041
1042 if (nkeyfiles == 0 && md.md_iterations == -1) {
1043 /*
1044 * No key files and no passphrase, something is
1045 * definitely wrong here.
1046 * geli(8) doesn't allow for such situation, so assume
1047 * that there was really no passphrase and in that case
1048 * key files are no properly defined in loader.conf.
1049 */
1050 G_ELI_DEBUG(0,
1051 "Found no key files in loader.conf for %s.",
1052 pp->name);
1053 return (NULL);
1054 }
1055
1056 /* Ask for the passphrase if defined. */
1057 if (md.md_iterations >= 0) {
1058 /* Try first with cached passphrase. */
1059 if (i == 0) {
1060 if (!g_eli_boot_passcache)
1061 continue;
1062 memcpy(passphrase, cached_passphrase,
1063 sizeof(passphrase));
1064 } else {
1065 printf("Enter passphrase for %s: ", pp->name);
1066 cngets(passphrase, sizeof(passphrase),
1067 g_eli_visible_passphrase);
1068 memcpy(cached_passphrase, passphrase,
1069 sizeof(passphrase));
1070 }
1071 }
1072
1073 /*
1074 * Prepare Derived-Key from the user passphrase.
1075 */
1076 if (md.md_iterations == 0) {
1077 g_eli_crypto_hmac_update(&ctx, md.md_salt,
1078 sizeof(md.md_salt));
1079 g_eli_crypto_hmac_update(&ctx, passphrase,
1080 strlen(passphrase));
1081 bzero(passphrase, sizeof(passphrase));
1082 } else if (md.md_iterations > 0) {
1083 u_char dkey[G_ELI_USERKEYLEN];
1084
1085 pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt,
1086 sizeof(md.md_salt), passphrase, md.md_iterations);
1087 bzero(passphrase, sizeof(passphrase));
1088 g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey));
1089 bzero(dkey, sizeof(dkey));
1090 }
1091
1092 g_eli_crypto_hmac_final(&ctx, key, 0);
1093
1094 /*
1095 * Decrypt Master-Key.
1096 */
1097 error = g_eli_mkey_decrypt(&md, key, mkey, &nkey);
1098 bzero(key, sizeof(key));
1099 if (error == -1) {
1100 if (i == tries) {
1101 G_ELI_DEBUG(0,
1102 "Wrong key for %s. No tries left.",
1103 pp->name);
1104 g_eli_keyfiles_clear(pp->name);
1105 return (NULL);
1106 }
1107 if (i > 0) {
1108 G_ELI_DEBUG(0,
1109 "Wrong key for %s. Tries left: %u.",
1110 pp->name, tries - i);
1111 }
1112 /* Try again. */
1113 continue;
1114 } else if (error > 0) {
1115 G_ELI_DEBUG(0,
1116 "Cannot decrypt Master Key for %s (error=%d).",
1117 pp->name, error);
1118 g_eli_keyfiles_clear(pp->name);
1119 return (NULL);
1120 }
1121 g_eli_keyfiles_clear(pp->name);
1122 G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name);
1123 break;
1124 }
1125
1126 /*
1127 * We have correct key, let's attach provider.
1128 */
1129 gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey);
1130 bzero(mkey, sizeof(mkey));
1131 bzero(&md, sizeof(md));
1132 if (gp == NULL) {
1133 G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name,
1134 G_ELI_SUFFIX);
1135 return (NULL);
1136 }
1137 return (gp);
1138}
1139
1140static void
1141g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1142 struct g_consumer *cp, struct g_provider *pp)
1143{
1144 struct g_eli_softc *sc;
1145
1146 g_topology_assert();
1147 sc = gp->softc;
1148 if (sc == NULL)
1149 return;
1150 if (pp != NULL || cp != NULL)
1151 return; /* Nothing here. */
1152
1153 sbuf_printf(sb, "%s<KeysTotal>%ju</KeysTotal>\n", indent,
1154 (uintmax_t)sc->sc_ekeys_total);
1155 sbuf_printf(sb, "%s<KeysAllocated>%ju</KeysAllocated>\n", indent,
1156 (uintmax_t)sc->sc_ekeys_allocated);
1157 sbuf_printf(sb, "%s<Flags>", indent);
1158 if (sc->sc_flags == 0)
1159 sbuf_printf(sb, "NONE");
1160 else {
1161 int first = 1;
1162
1163#define ADD_FLAG(flag, name) do { \
1164 if (sc->sc_flags & (flag)) { \
1165 if (!first) \
1166 sbuf_printf(sb, ", "); \
1167 else \
1168 first = 0; \
1169 sbuf_printf(sb, name); \
1170 } \
1171} while (0)
1172 ADD_FLAG(G_ELI_FLAG_SUSPEND, "SUSPEND");
1173 ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY");
1174 ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER");
1175 ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME");
1176 ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT");
1177 ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH");
1178 ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH");
1179 ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH");
1180 ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN");
1181 ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY");
1182 ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY");
1183 ADD_FLAG(G_ELI_FLAG_NODELETE, "NODELETE");
1184#undef ADD_FLAG
1185 }
1186 sbuf_printf(sb, "</Flags>\n");
1187
1188 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) {
1189 sbuf_printf(sb, "%s<UsedKey>%u</UsedKey>\n", indent,
1190 sc->sc_nkey);
1191 }
1192 sbuf_printf(sb, "%s<Version>%u</Version>\n", indent, sc->sc_version);
1193 sbuf_printf(sb, "%s<Crypto>", indent);
1194 switch (sc->sc_crypto) {
1195 case G_ELI_CRYPTO_HW:
1196 sbuf_printf(sb, "hardware");
1197 break;
1198 case G_ELI_CRYPTO_SW:
1199 sbuf_printf(sb, "software");
1200 break;
1201 default:
1202 sbuf_printf(sb, "UNKNOWN");
1203 break;
1204 }
1205 sbuf_printf(sb, "</Crypto>\n");
1206 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
1207 sbuf_printf(sb,
1208 "%s<AuthenticationAlgorithm>%s</AuthenticationAlgorithm>\n",
1209 indent, g_eli_algo2str(sc->sc_aalgo));
1210 }
1211 sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent,
1212 sc->sc_ekeylen);
1213 sbuf_printf(sb, "%s<EncryptionAlgorithm>%s</EncryptionAlgorithm>\n",
1214 indent, g_eli_algo2str(sc->sc_ealgo));
1215 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
1216 (sc->sc_flags & G_ELI_FLAG_SUSPEND) ? "SUSPENDED" : "ACTIVE");
1217}
1218
1219static void
1220g_eli_shutdown_pre_sync(void *arg, int howto)
1221{
1222 struct g_class *mp;
1223 struct g_geom *gp, *gp2;
1224 struct g_provider *pp;
1225 struct g_eli_softc *sc;
1226 int error;
1227
1228 mp = arg;
1229 DROP_GIANT();
1230 g_topology_lock();
1231 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
1232 sc = gp->softc;
1233 if (sc == NULL)
1234 continue;
1235 pp = LIST_FIRST(&gp->provider);
1236 KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name));
1237 if (pp->acr + pp->acw + pp->ace == 0)
1238 error = g_eli_destroy(sc, TRUE);
1239 else {
1240 sc->sc_flags |= G_ELI_FLAG_RW_DETACH;
1241 gp->access = g_eli_access;
1242 }
1243 }
1244 g_topology_unlock();
1245 PICKUP_GIANT();
1246}
1247
1248static void
1249g_eli_init(struct g_class *mp)
1250{
1251
1252 g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
1253 g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
1254 if (g_eli_pre_sync == NULL)
1255 G_ELI_DEBUG(0, "Warning! Cannot register shutdown event.");
1256}
1257
1258static void
1259g_eli_fini(struct g_class *mp)
1260{
1261
1262 if (g_eli_pre_sync != NULL)
1263 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync);
1264}
1265
1266DECLARE_GEOM_CLASS(g_eli_class, g_eli);
1267MODULE_DEPEND(g_eli, crypto, 1, 1, 1);