Deleted Added
sdiff udiff text old ( 226840 ) new ( 228634 )
full compact
1/*-
2 * Copyright (c) 2005-2011 Pawel Jakub Dawidek <pawel@dawidek.net>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/geom/eli/g_eli.c 228634 2011-12-17 15:26:34Z avg $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/cons.h>
33#include <sys/kernel.h>
34#include <sys/linker.h>
35#include <sys/module.h>
36#include <sys/lock.h>
37#include <sys/mutex.h>
38#include <sys/bio.h>
39#include <sys/sbuf.h>
40#include <sys/sysctl.h>
41#include <sys/malloc.h>
42#include <sys/eventhandler.h>
43#include <sys/kthread.h>
44#include <sys/proc.h>
45#include <sys/sched.h>
46#include <sys/smp.h>
47#include <sys/uio.h>
48#include <sys/vnode.h>
49
50#include <vm/uma.h>
51
52#include <geom/geom.h>
53#include <geom/eli/g_eli.h>
54#include <geom/eli/pkcs5v2.h>
55
56FEATURE(geom_eli, "GEOM crypto module");
57
58MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data");
59
60SYSCTL_DECL(_kern_geom);
61SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff");
62static int g_eli_version = G_ELI_VERSION;
63SYSCTL_INT(_kern_geom_eli, OID_AUTO, version, CTLFLAG_RD, &g_eli_version, 0,
64 "GELI version");
65int g_eli_debug = 0;
66TUNABLE_INT("kern.geom.eli.debug", &g_eli_debug);
67SYSCTL_INT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RW, &g_eli_debug, 0,
68 "Debug level");
69static u_int g_eli_tries = 3;
70TUNABLE_INT("kern.geom.eli.tries", &g_eli_tries);
71SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RW, &g_eli_tries, 0,
72 "Number of tries for entering the passphrase");
73static u_int g_eli_visible_passphrase = GETS_NOECHO;
74TUNABLE_INT("kern.geom.eli.visible_passphrase", &g_eli_visible_passphrase);
75SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RW,
76 &g_eli_visible_passphrase, 0,
77 "Visibility of passphrase prompt (0 = invisible, 1 = visible, 2 = asterisk)");
78u_int g_eli_overwrites = G_ELI_OVERWRITES;
79TUNABLE_INT("kern.geom.eli.overwrites", &g_eli_overwrites);
80SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RW, &g_eli_overwrites,
81 0, "Number of times on-disk keys should be overwritten when destroying them");
82static u_int g_eli_threads = 0;
83TUNABLE_INT("kern.geom.eli.threads", &g_eli_threads);
84SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RW, &g_eli_threads, 0,
85 "Number of threads doing crypto work");
86u_int g_eli_batch = 0;
87TUNABLE_INT("kern.geom.eli.batch", &g_eli_batch);
88SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RW, &g_eli_batch, 0,
89 "Use crypto operations batching");
90
91static eventhandler_tag g_eli_pre_sync = NULL;
92
93static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp,
94 struct g_geom *gp);
95static void g_eli_init(struct g_class *mp);
96static void g_eli_fini(struct g_class *mp);
97
98static g_taste_t g_eli_taste;
99static g_dumpconf_t g_eli_dumpconf;
100
101struct g_class g_eli_class = {
102 .name = G_ELI_CLASS_NAME,
103 .version = G_VERSION,
104 .ctlreq = g_eli_config,
105 .taste = g_eli_taste,
106 .destroy_geom = g_eli_destroy_geom,
107 .init = g_eli_init,
108 .fini = g_eli_fini
109};
110
111
112/*
113 * Code paths:
114 * BIO_READ:
115 * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
116 * BIO_WRITE:
117 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
118 */
119
120
121/*
122 * EAGAIN from crypto(9) means, that we were probably balanced to another crypto
123 * accelerator or something like this.
124 * The function updates the SID and rerun the operation.
125 */
126int
127g_eli_crypto_rerun(struct cryptop *crp)
128{
129 struct g_eli_softc *sc;
130 struct g_eli_worker *wr;
131 struct bio *bp;
132 int error;
133
134 bp = (struct bio *)crp->crp_opaque;
135 sc = bp->bio_to->geom->softc;
136 LIST_FOREACH(wr, &sc->sc_workers, w_next) {
137 if (wr->w_number == bp->bio_pflags)
138 break;
139 }
140 KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags));
141 G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %ju -> %ju).",
142 bp->bio_cmd == BIO_READ ? "READ" : "WRITE", (uintmax_t)wr->w_sid,
143 (uintmax_t)crp->crp_sid);
144 wr->w_sid = crp->crp_sid;
145 crp->crp_etype = 0;
146 error = crypto_dispatch(crp);
147 if (error == 0)
148 return (0);
149 G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error);
150 crp->crp_etype = error;
151 return (error);
152}
153
154/*
155 * The function is called afer reading encrypted data from the provider.
156 *
157 * g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
158 */
159void
160g_eli_read_done(struct bio *bp)
161{
162 struct g_eli_softc *sc;
163 struct bio *pbp;
164
165 G_ELI_LOGREQ(2, bp, "Request done.");
166 pbp = bp->bio_parent;
167 if (pbp->bio_error == 0)
168 pbp->bio_error = bp->bio_error;
169 g_destroy_bio(bp);
170 /*
171 * Do we have all sectors already?
172 */
173 pbp->bio_inbed++;
174 if (pbp->bio_inbed < pbp->bio_children)
175 return;
176 sc = pbp->bio_to->geom->softc;
177 if (pbp->bio_error != 0) {
178 G_ELI_LOGREQ(0, pbp, "%s() failed", __func__);
179 pbp->bio_completed = 0;
180 if (pbp->bio_driver2 != NULL) {
181 free(pbp->bio_driver2, M_ELI);
182 pbp->bio_driver2 = NULL;
183 }
184 g_io_deliver(pbp, pbp->bio_error);
185 atomic_subtract_int(&sc->sc_inflight, 1);
186 return;
187 }
188 mtx_lock(&sc->sc_queue_mtx);
189 bioq_insert_tail(&sc->sc_queue, pbp);
190 mtx_unlock(&sc->sc_queue_mtx);
191 wakeup(sc);
192}
193
194/*
195 * The function is called after we encrypt and write data.
196 *
197 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver
198 */
199void
200g_eli_write_done(struct bio *bp)
201{
202 struct g_eli_softc *sc;
203 struct bio *pbp;
204
205 G_ELI_LOGREQ(2, bp, "Request done.");
206 pbp = bp->bio_parent;
207 if (pbp->bio_error == 0) {
208 if (bp->bio_error != 0)
209 pbp->bio_error = bp->bio_error;
210 }
211 g_destroy_bio(bp);
212 /*
213 * Do we have all sectors already?
214 */
215 pbp->bio_inbed++;
216 if (pbp->bio_inbed < pbp->bio_children)
217 return;
218 free(pbp->bio_driver2, M_ELI);
219 pbp->bio_driver2 = NULL;
220 if (pbp->bio_error != 0) {
221 G_ELI_LOGREQ(0, pbp, "Crypto WRITE request failed (error=%d).",
222 pbp->bio_error);
223 pbp->bio_completed = 0;
224 }
225 /*
226 * Write is finished, send it up.
227 */
228 pbp->bio_completed = pbp->bio_length;
229 sc = pbp->bio_to->geom->softc;
230 g_io_deliver(pbp, pbp->bio_error);
231 atomic_subtract_int(&sc->sc_inflight, 1);
232}
233
234/*
235 * This function should never be called, but GEOM made as it set ->orphan()
236 * method for every geom.
237 */
238static void
239g_eli_orphan_spoil_assert(struct g_consumer *cp)
240{
241
242 panic("Function %s() called for %s.", __func__, cp->geom->name);
243}
244
245static void
246g_eli_orphan(struct g_consumer *cp)
247{
248 struct g_eli_softc *sc;
249
250 g_topology_assert();
251 sc = cp->geom->softc;
252 if (sc == NULL)
253 return;
254 g_eli_destroy(sc, TRUE);
255}
256
257/*
258 * BIO_READ:
259 * G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
260 * BIO_WRITE:
261 * G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
262 */
263static void
264g_eli_start(struct bio *bp)
265{
266 struct g_eli_softc *sc;
267 struct g_consumer *cp;
268 struct bio *cbp;
269
270 sc = bp->bio_to->geom->softc;
271 KASSERT(sc != NULL,
272 ("Provider's error should be set (error=%d)(device=%s).",
273 bp->bio_to->error, bp->bio_to->name));
274 G_ELI_LOGREQ(2, bp, "Request received.");
275
276 switch (bp->bio_cmd) {
277 case BIO_READ:
278 case BIO_WRITE:
279 case BIO_GETATTR:
280 case BIO_FLUSH:
281 break;
282 case BIO_DELETE:
283 /*
284 * We could eventually support BIO_DELETE request.
285 * It could be done by overwritting requested sector with
286 * random data g_eli_overwrites number of times.
287 */
288 default:
289 g_io_deliver(bp, EOPNOTSUPP);
290 return;
291 }
292 cbp = g_clone_bio(bp);
293 if (cbp == NULL) {
294 g_io_deliver(bp, ENOMEM);
295 return;
296 }
297 bp->bio_driver1 = cbp;
298 bp->bio_pflags = G_ELI_NEW_BIO;
299 switch (bp->bio_cmd) {
300 case BIO_READ:
301 if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) {
302 g_eli_crypto_read(sc, bp, 0);
303 break;
304 }
305 /* FALLTHROUGH */
306 case BIO_WRITE:
307 mtx_lock(&sc->sc_queue_mtx);
308 bioq_insert_tail(&sc->sc_queue, bp);
309 mtx_unlock(&sc->sc_queue_mtx);
310 wakeup(sc);
311 break;
312 case BIO_GETATTR:
313 case BIO_FLUSH:
314 cbp->bio_done = g_std_done;
315 cp = LIST_FIRST(&sc->sc_geom->consumer);
316 cbp->bio_to = cp->provider;
317 G_ELI_LOGREQ(2, cbp, "Sending request.");
318 g_io_request(cbp, cp);
319 break;
320 }
321}
322
323static int
324g_eli_newsession(struct g_eli_worker *wr)
325{
326 struct g_eli_softc *sc;
327 struct cryptoini crie, cria;
328 int error;
329
330 sc = wr->w_softc;
331
332 bzero(&crie, sizeof(crie));
333 crie.cri_alg = sc->sc_ealgo;
334 crie.cri_klen = sc->sc_ekeylen;
335 if (sc->sc_ealgo == CRYPTO_AES_XTS)
336 crie.cri_klen <<= 1;
337 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) {
338 crie.cri_key = g_eli_key_hold(sc, 0,
339 LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize);
340 } else {
341 crie.cri_key = sc->sc_ekey;
342 }
343 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
344 bzero(&cria, sizeof(cria));
345 cria.cri_alg = sc->sc_aalgo;
346 cria.cri_klen = sc->sc_akeylen;
347 cria.cri_key = sc->sc_akey;
348 crie.cri_next = &cria;
349 }
350
351 switch (sc->sc_crypto) {
352 case G_ELI_CRYPTO_SW:
353 error = crypto_newsession(&wr->w_sid, &crie,
354 CRYPTOCAP_F_SOFTWARE);
355 break;
356 case G_ELI_CRYPTO_HW:
357 error = crypto_newsession(&wr->w_sid, &crie,
358 CRYPTOCAP_F_HARDWARE);
359 break;
360 case G_ELI_CRYPTO_UNKNOWN:
361 error = crypto_newsession(&wr->w_sid, &crie,
362 CRYPTOCAP_F_HARDWARE);
363 if (error == 0) {
364 mtx_lock(&sc->sc_queue_mtx);
365 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
366 sc->sc_crypto = G_ELI_CRYPTO_HW;
367 mtx_unlock(&sc->sc_queue_mtx);
368 } else {
369 error = crypto_newsession(&wr->w_sid, &crie,
370 CRYPTOCAP_F_SOFTWARE);
371 mtx_lock(&sc->sc_queue_mtx);
372 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
373 sc->sc_crypto = G_ELI_CRYPTO_SW;
374 mtx_unlock(&sc->sc_queue_mtx);
375 }
376 break;
377 default:
378 panic("%s: invalid condition", __func__);
379 }
380
381 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0)
382 g_eli_key_drop(sc, crie.cri_key);
383
384 return (error);
385}
386
387static void
388g_eli_freesession(struct g_eli_worker *wr)
389{
390
391 crypto_freesession(wr->w_sid);
392}
393
394static void
395g_eli_cancel(struct g_eli_softc *sc)
396{
397 struct bio *bp;
398
399 mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
400
401 while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) {
402 KASSERT(bp->bio_pflags == G_ELI_NEW_BIO,
403 ("Not new bio when canceling (bp=%p).", bp));
404 g_io_deliver(bp, ENXIO);
405 }
406}
407
408static struct bio *
409g_eli_takefirst(struct g_eli_softc *sc)
410{
411 struct bio *bp;
412
413 mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
414
415 if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND))
416 return (bioq_takefirst(&sc->sc_queue));
417 /*
418 * Device suspended, so we skip new I/O requests.
419 */
420 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
421 if (bp->bio_pflags != G_ELI_NEW_BIO)
422 break;
423 }
424 if (bp != NULL)
425 bioq_remove(&sc->sc_queue, bp);
426 return (bp);
427}
428
429/*
430 * This is the main function for kernel worker thread when we don't have
431 * hardware acceleration and we have to do cryptography in software.
432 * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM
433 * threads with crypto work.
434 */
435static void
436g_eli_worker(void *arg)
437{
438 struct g_eli_softc *sc;
439 struct g_eli_worker *wr;
440 struct bio *bp;
441 int error;
442
443 wr = arg;
444 sc = wr->w_softc;
445#ifdef SMP
446 /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */
447 if (sc->sc_cpubind) {
448 while (!smp_started)
449 tsleep(wr, 0, "geli:smp", hz / 4);
450 }
451#endif
452 thread_lock(curthread);
453 sched_prio(curthread, PUSER);
454 if (sc->sc_cpubind)
455 sched_bind(curthread, wr->w_number % mp_ncpus);
456 thread_unlock(curthread);
457
458 G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm);
459
460 for (;;) {
461 mtx_lock(&sc->sc_queue_mtx);
462again:
463 bp = g_eli_takefirst(sc);
464 if (bp == NULL) {
465 if (sc->sc_flags & G_ELI_FLAG_DESTROY) {
466 g_eli_cancel(sc);
467 LIST_REMOVE(wr, w_next);
468 g_eli_freesession(wr);
469 free(wr, M_ELI);
470 G_ELI_DEBUG(1, "Thread %s exiting.",
471 curthread->td_proc->p_comm);
472 wakeup(&sc->sc_workers);
473 mtx_unlock(&sc->sc_queue_mtx);
474 kproc_exit(0);
475 }
476 while (sc->sc_flags & G_ELI_FLAG_SUSPEND) {
477 if (sc->sc_inflight > 0) {
478 G_ELI_DEBUG(0, "inflight=%d",
479 sc->sc_inflight);
480 /*
481 * We still have inflight BIOs, so
482 * sleep and retry.
483 */
484 msleep(sc, &sc->sc_queue_mtx, PRIBIO,
485 "geli:inf", hz / 5);
486 goto again;
487 }
488 /*
489 * Suspend requested, mark the worker as
490 * suspended and go to sleep.
491 */
492 if (wr->w_active) {
493 g_eli_freesession(wr);
494 wr->w_active = FALSE;
495 }
496 wakeup(&sc->sc_workers);
497 msleep(sc, &sc->sc_queue_mtx, PRIBIO,
498 "geli:suspend", 0);
499 if (!wr->w_active &&
500 !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) {
501 error = g_eli_newsession(wr);
502 KASSERT(error == 0,
503 ("g_eli_newsession() failed on resume (error=%d)",
504 error));
505 wr->w_active = TRUE;
506 }
507 goto again;
508 }
509 msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0);
510 continue;
511 }
512 if (bp->bio_pflags == G_ELI_NEW_BIO)
513 atomic_add_int(&sc->sc_inflight, 1);
514 mtx_unlock(&sc->sc_queue_mtx);
515 if (bp->bio_pflags == G_ELI_NEW_BIO) {
516 bp->bio_pflags = 0;
517 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
518 if (bp->bio_cmd == BIO_READ)
519 g_eli_auth_read(sc, bp);
520 else
521 g_eli_auth_run(wr, bp);
522 } else {
523 if (bp->bio_cmd == BIO_READ)
524 g_eli_crypto_read(sc, bp, 1);
525 else
526 g_eli_crypto_run(wr, bp);
527 }
528 } else {
529 if (sc->sc_flags & G_ELI_FLAG_AUTH)
530 g_eli_auth_run(wr, bp);
531 else
532 g_eli_crypto_run(wr, bp);
533 }
534 }
535}
536
537/*
538 * Here we generate IV. It is unique for every sector.
539 */
540void
541g_eli_crypto_ivgen(struct g_eli_softc *sc, off_t offset, u_char *iv,
542 size_t size)
543{
544 uint8_t off[8];
545
546 if ((sc->sc_flags & G_ELI_FLAG_NATIVE_BYTE_ORDER) != 0)
547 bcopy(&offset, off, sizeof(off));
548 else
549 le64enc(off, (uint64_t)offset);
550
551 switch (sc->sc_ealgo) {
552 case CRYPTO_AES_XTS:
553 bcopy(off, iv, sizeof(off));
554 bzero(iv + sizeof(off), size - sizeof(off));
555 break;
556 default:
557 {
558 u_char hash[SHA256_DIGEST_LENGTH];
559 SHA256_CTX ctx;
560
561 /* Copy precalculated SHA256 context for IV-Key. */
562 bcopy(&sc->sc_ivctx, &ctx, sizeof(ctx));
563 SHA256_Update(&ctx, off, sizeof(off));
564 SHA256_Final(hash, &ctx);
565 bcopy(hash, iv, MIN(sizeof(hash), size));
566 break;
567 }
568 }
569}
570
571int
572g_eli_read_metadata(struct g_class *mp, struct g_provider *pp,
573 struct g_eli_metadata *md)
574{
575 struct g_geom *gp;
576 struct g_consumer *cp;
577 u_char *buf = NULL;
578 int error;
579
580 g_topology_assert();
581
582 gp = g_new_geomf(mp, "eli:taste");
583 gp->start = g_eli_start;
584 gp->access = g_std_access;
585 /*
586 * g_eli_read_metadata() is always called from the event thread.
587 * Our geom is created and destroyed in the same event, so there
588 * could be no orphan nor spoil event in the meantime.
589 */
590 gp->orphan = g_eli_orphan_spoil_assert;
591 gp->spoiled = g_eli_orphan_spoil_assert;
592 cp = g_new_consumer(gp);
593 error = g_attach(cp, pp);
594 if (error != 0)
595 goto end;
596 error = g_access(cp, 1, 0, 0);
597 if (error != 0)
598 goto end;
599 g_topology_unlock();
600 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
601 &error);
602 g_topology_lock();
603 if (buf == NULL)
604 goto end;
605 eli_metadata_decode(buf, md);
606end:
607 if (buf != NULL)
608 g_free(buf);
609 if (cp->provider != NULL) {
610 if (cp->acr == 1)
611 g_access(cp, -1, 0, 0);
612 g_detach(cp);
613 }
614 g_destroy_consumer(cp);
615 g_destroy_geom(gp);
616 return (error);
617}
618
619/*
620 * The function is called when we had last close on provider and user requested
621 * to close it when this situation occur.
622 */
623static void
624g_eli_last_close(struct g_eli_softc *sc)
625{
626 struct g_geom *gp;
627 struct g_provider *pp;
628 char ppname[64];
629 int error;
630
631 g_topology_assert();
632 gp = sc->sc_geom;
633 pp = LIST_FIRST(&gp->provider);
634 strlcpy(ppname, pp->name, sizeof(ppname));
635 error = g_eli_destroy(sc, TRUE);
636 KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).",
637 ppname, error));
638 G_ELI_DEBUG(0, "Detached %s on last close.", ppname);
639}
640
641int
642g_eli_access(struct g_provider *pp, int dr, int dw, int de)
643{
644 struct g_eli_softc *sc;
645 struct g_geom *gp;
646
647 gp = pp->geom;
648 sc = gp->softc;
649
650 if (dw > 0) {
651 if (sc->sc_flags & G_ELI_FLAG_RO) {
652 /* Deny write attempts. */
653 return (EROFS);
654 }
655 /* Someone is opening us for write, we need to remember that. */
656 sc->sc_flags |= G_ELI_FLAG_WOPEN;
657 return (0);
658 }
659 /* Is this the last close? */
660 if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0)
661 return (0);
662
663 /*
664 * Automatically detach on last close if requested.
665 */
666 if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) ||
667 (sc->sc_flags & G_ELI_FLAG_WOPEN)) {
668 g_eli_last_close(sc);
669 }
670 return (0);
671}
672
673static int
674g_eli_cpu_is_disabled(int cpu)
675{
676#ifdef SMP
677 return (CPU_ISSET(cpu, &hlt_cpus_mask));
678#else
679 return (0);
680#endif
681}
682
683struct g_geom *
684g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp,
685 const struct g_eli_metadata *md, const u_char *mkey, int nkey)
686{
687 struct g_eli_softc *sc;
688 struct g_eli_worker *wr;
689 struct g_geom *gp;
690 struct g_provider *pp;
691 struct g_consumer *cp;
692 u_int i, threads;
693 int error;
694
695 G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX);
696
697 gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX);
698 sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO);
699 gp->start = g_eli_start;
700 /*
701 * Spoiling cannot happen actually, because we keep provider open for
702 * writing all the time or provider is read-only.
703 */
704 gp->spoiled = g_eli_orphan_spoil_assert;
705 gp->orphan = g_eli_orphan;
706 gp->dumpconf = g_eli_dumpconf;
707 /*
708 * If detach-on-last-close feature is not enabled and we don't operate
709 * on read-only provider, we can simply use g_std_access().
710 */
711 if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO))
712 gp->access = g_eli_access;
713 else
714 gp->access = g_std_access;
715
716 sc->sc_version = md->md_version;
717 sc->sc_inflight = 0;
718 sc->sc_crypto = G_ELI_CRYPTO_UNKNOWN;
719 sc->sc_flags = md->md_flags;
720 /* Backward compatibility. */
721 if (md->md_version < G_ELI_VERSION_04)
722 sc->sc_flags |= G_ELI_FLAG_NATIVE_BYTE_ORDER;
723 if (md->md_version < G_ELI_VERSION_05)
724 sc->sc_flags |= G_ELI_FLAG_SINGLE_KEY;
725 if (md->md_version < G_ELI_VERSION_06 &&
726 (sc->sc_flags & G_ELI_FLAG_AUTH) != 0) {
727 sc->sc_flags |= G_ELI_FLAG_FIRST_KEY;
728 }
729 sc->sc_ealgo = md->md_ealgo;
730 sc->sc_nkey = nkey;
731
732 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
733 sc->sc_akeylen = sizeof(sc->sc_akey) * 8;
734 sc->sc_aalgo = md->md_aalgo;
735 sc->sc_alen = g_eli_hashlen(sc->sc_aalgo);
736
737 sc->sc_data_per_sector = bpp->sectorsize - sc->sc_alen;
738 /*
739 * Some hash functions (like SHA1 and RIPEMD160) generates hash
740 * which length is not multiple of 128 bits, but we want data
741 * length to be multiple of 128, so we can encrypt without
742 * padding. The line below rounds down data length to multiple
743 * of 128 bits.
744 */
745 sc->sc_data_per_sector -= sc->sc_data_per_sector % 16;
746
747 sc->sc_bytes_per_sector =
748 (md->md_sectorsize - 1) / sc->sc_data_per_sector + 1;
749 sc->sc_bytes_per_sector *= bpp->sectorsize;
750 }
751
752 gp->softc = sc;
753 sc->sc_geom = gp;
754
755 bioq_init(&sc->sc_queue);
756 mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF);
757 mtx_init(&sc->sc_ekeys_lock, "geli:ekeys", NULL, MTX_DEF);
758
759 pp = NULL;
760 cp = g_new_consumer(gp);
761 error = g_attach(cp, bpp);
762 if (error != 0) {
763 if (req != NULL) {
764 gctl_error(req, "Cannot attach to %s (error=%d).",
765 bpp->name, error);
766 } else {
767 G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).",
768 bpp->name, error);
769 }
770 goto failed;
771 }
772 /*
773 * Keep provider open all the time, so we can run critical tasks,
774 * like Master Keys deletion, without wondering if we can open
775 * provider or not.
776 * We don't open provider for writing only when user requested read-only
777 * access.
778 */
779 if (sc->sc_flags & G_ELI_FLAG_RO)
780 error = g_access(cp, 1, 0, 1);
781 else
782 error = g_access(cp, 1, 1, 1);
783 if (error != 0) {
784 if (req != NULL) {
785 gctl_error(req, "Cannot access %s (error=%d).",
786 bpp->name, error);
787 } else {
788 G_ELI_DEBUG(1, "Cannot access %s (error=%d).",
789 bpp->name, error);
790 }
791 goto failed;
792 }
793
794 sc->sc_sectorsize = md->md_sectorsize;
795 sc->sc_mediasize = bpp->mediasize;
796 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME))
797 sc->sc_mediasize -= bpp->sectorsize;
798 if (!(sc->sc_flags & G_ELI_FLAG_AUTH))
799 sc->sc_mediasize -= (sc->sc_mediasize % sc->sc_sectorsize);
800 else {
801 sc->sc_mediasize /= sc->sc_bytes_per_sector;
802 sc->sc_mediasize *= sc->sc_sectorsize;
803 }
804
805 /*
806 * Remember the keys in our softc structure.
807 */
808 g_eli_mkey_propagate(sc, mkey);
809 sc->sc_ekeylen = md->md_keylen;
810
811 LIST_INIT(&sc->sc_workers);
812
813 threads = g_eli_threads;
814 if (threads == 0)
815 threads = mp_ncpus;
816 sc->sc_cpubind = (mp_ncpus > 1 && threads == mp_ncpus);
817 for (i = 0; i < threads; i++) {
818 if (g_eli_cpu_is_disabled(i)) {
819 G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.",
820 bpp->name, i);
821 continue;
822 }
823 wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO);
824 wr->w_softc = sc;
825 wr->w_number = i;
826 wr->w_active = TRUE;
827
828 error = g_eli_newsession(wr);
829 if (error != 0) {
830 free(wr, M_ELI);
831 if (req != NULL) {
832 gctl_error(req, "Cannot set up crypto session "
833 "for %s (error=%d).", bpp->name, error);
834 } else {
835 G_ELI_DEBUG(1, "Cannot set up crypto session "
836 "for %s (error=%d).", bpp->name, error);
837 }
838 goto failed;
839 }
840
841 error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0,
842 "g_eli[%u] %s", i, bpp->name);
843 if (error != 0) {
844 g_eli_freesession(wr);
845 free(wr, M_ELI);
846 if (req != NULL) {
847 gctl_error(req, "Cannot create kernel thread "
848 "for %s (error=%d).", bpp->name, error);
849 } else {
850 G_ELI_DEBUG(1, "Cannot create kernel thread "
851 "for %s (error=%d).", bpp->name, error);
852 }
853 goto failed;
854 }
855 LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next);
856 }
857
858 /*
859 * Create decrypted provider.
860 */
861 pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX);
862 pp->mediasize = sc->sc_mediasize;
863 pp->sectorsize = sc->sc_sectorsize;
864
865 g_error_provider(pp, 0);
866
867 G_ELI_DEBUG(0, "Device %s created.", pp->name);
868 G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo),
869 sc->sc_ekeylen);
870 if (sc->sc_flags & G_ELI_FLAG_AUTH)
871 G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo));
872 G_ELI_DEBUG(0, " Crypto: %s",
873 sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware");
874 return (gp);
875failed:
876 mtx_lock(&sc->sc_queue_mtx);
877 sc->sc_flags |= G_ELI_FLAG_DESTROY;
878 wakeup(sc);
879 /*
880 * Wait for kernel threads self destruction.
881 */
882 while (!LIST_EMPTY(&sc->sc_workers)) {
883 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
884 "geli:destroy", 0);
885 }
886 mtx_destroy(&sc->sc_queue_mtx);
887 if (cp->provider != NULL) {
888 if (cp->acr == 1)
889 g_access(cp, -1, -1, -1);
890 g_detach(cp);
891 }
892 g_destroy_consumer(cp);
893 g_destroy_geom(gp);
894 g_eli_key_destroy(sc);
895 bzero(sc, sizeof(*sc));
896 free(sc, M_ELI);
897 return (NULL);
898}
899
900int
901g_eli_destroy(struct g_eli_softc *sc, boolean_t force)
902{
903 struct g_geom *gp;
904 struct g_provider *pp;
905
906 g_topology_assert();
907
908 if (sc == NULL)
909 return (ENXIO);
910
911 gp = sc->sc_geom;
912 pp = LIST_FIRST(&gp->provider);
913 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
914 if (force) {
915 G_ELI_DEBUG(1, "Device %s is still open, so it "
916 "cannot be definitely removed.", pp->name);
917 } else {
918 G_ELI_DEBUG(1,
919 "Device %s is still open (r%dw%de%d).", pp->name,
920 pp->acr, pp->acw, pp->ace);
921 return (EBUSY);
922 }
923 }
924
925 mtx_lock(&sc->sc_queue_mtx);
926 sc->sc_flags |= G_ELI_FLAG_DESTROY;
927 wakeup(sc);
928 while (!LIST_EMPTY(&sc->sc_workers)) {
929 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
930 "geli:destroy", 0);
931 }
932 mtx_destroy(&sc->sc_queue_mtx);
933 gp->softc = NULL;
934 g_eli_key_destroy(sc);
935 bzero(sc, sizeof(*sc));
936 free(sc, M_ELI);
937
938 if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0))
939 G_ELI_DEBUG(0, "Device %s destroyed.", gp->name);
940 g_wither_geom_close(gp, ENXIO);
941
942 return (0);
943}
944
945static int
946g_eli_destroy_geom(struct gctl_req *req __unused,
947 struct g_class *mp __unused, struct g_geom *gp)
948{
949 struct g_eli_softc *sc;
950
951 sc = gp->softc;
952 return (g_eli_destroy(sc, FALSE));
953}
954
955static int
956g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider)
957{
958 u_char *keyfile, *data;
959 char *file, name[64];
960 size_t size;
961 int i;
962
963 for (i = 0; ; i++) {
964 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
965 keyfile = preload_search_by_type(name);
966 if (keyfile == NULL)
967 return (i); /* Return number of loaded keyfiles. */
968 data = preload_fetch_addr(keyfile);
969 if (data == NULL) {
970 G_ELI_DEBUG(0, "Cannot find key file data for %s.",
971 name);
972 return (0);
973 }
974 size = preload_fetch_size(keyfile);
975 if (size == 0) {
976 G_ELI_DEBUG(0, "Cannot find key file size for %s.",
977 name);
978 return (0);
979 }
980 file = preload_search_info(keyfile, MODINFO_NAME);
981 if (file == NULL) {
982 G_ELI_DEBUG(0, "Cannot find key file name for %s.",
983 name);
984 return (0);
985 }
986 G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file,
987 provider, name);
988 g_eli_crypto_hmac_update(ctx, data, size);
989 }
990}
991
992static void
993g_eli_keyfiles_clear(const char *provider)
994{
995 u_char *keyfile, *data;
996 char name[64];
997 size_t size;
998 int i;
999
1000 for (i = 0; ; i++) {
1001 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
1002 keyfile = preload_search_by_type(name);
1003 if (keyfile == NULL)
1004 return;
1005 data = preload_fetch_addr(keyfile);
1006 size = preload_fetch_size(keyfile);
1007 if (data != NULL && size != 0)
1008 bzero(data, size);
1009 }
1010}
1011
1012/*
1013 * Tasting is only made on boot.
1014 * We detect providers which should be attached before root is mounted.
1015 */
1016static struct g_geom *
1017g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1018{
1019 struct g_eli_metadata md;
1020 struct g_geom *gp;
1021 struct hmac_ctx ctx;
1022 char passphrase[256];
1023 u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN];
1024 u_int i, nkey, nkeyfiles, tries;
1025 int error;
1026
1027 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
1028 g_topology_assert();
1029
1030 if (root_mounted() || g_eli_tries == 0)
1031 return (NULL);
1032
1033 G_ELI_DEBUG(3, "Tasting %s.", pp->name);
1034
1035 error = g_eli_read_metadata(mp, pp, &md);
1036 if (error != 0)
1037 return (NULL);
1038 gp = NULL;
1039
1040 if (strcmp(md.md_magic, G_ELI_MAGIC) != 0)
1041 return (NULL);
1042 if (md.md_version > G_ELI_VERSION) {
1043 printf("geom_eli.ko module is too old to handle %s.\n",
1044 pp->name);
1045 return (NULL);
1046 }
1047 if (md.md_provsize != pp->mediasize)
1048 return (NULL);
1049 /* Should we attach it on boot? */
1050 if (!(md.md_flags & G_ELI_FLAG_BOOT))
1051 return (NULL);
1052 if (md.md_keys == 0x00) {
1053 G_ELI_DEBUG(0, "No valid keys on %s.", pp->name);
1054 return (NULL);
1055 }
1056 if (md.md_iterations == -1) {
1057 /* If there is no passphrase, we try only once. */
1058 tries = 1;
1059 } else {
1060 /* Ask for the passphrase no more than g_eli_tries times. */
1061 tries = g_eli_tries;
1062 }
1063
1064 for (i = 0; i < tries; i++) {
1065 g_eli_crypto_hmac_init(&ctx, NULL, 0);
1066
1067 /*
1068 * Load all key files.
1069 */
1070 nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name);
1071
1072 if (nkeyfiles == 0 && md.md_iterations == -1) {
1073 /*
1074 * No key files and no passphrase, something is
1075 * definitely wrong here.
1076 * geli(8) doesn't allow for such situation, so assume
1077 * that there was really no passphrase and in that case
1078 * key files are no properly defined in loader.conf.
1079 */
1080 G_ELI_DEBUG(0,
1081 "Found no key files in loader.conf for %s.",
1082 pp->name);
1083 return (NULL);
1084 }
1085
1086 /* Ask for the passphrase if defined. */
1087 if (md.md_iterations >= 0) {
1088 printf("Enter passphrase for %s: ", pp->name);
1089 cngets(passphrase, sizeof(passphrase),
1090 g_eli_visible_passphrase);
1091 }
1092
1093 /*
1094 * Prepare Derived-Key from the user passphrase.
1095 */
1096 if (md.md_iterations == 0) {
1097 g_eli_crypto_hmac_update(&ctx, md.md_salt,
1098 sizeof(md.md_salt));
1099 g_eli_crypto_hmac_update(&ctx, passphrase,
1100 strlen(passphrase));
1101 bzero(passphrase, sizeof(passphrase));
1102 } else if (md.md_iterations > 0) {
1103 u_char dkey[G_ELI_USERKEYLEN];
1104
1105 pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt,
1106 sizeof(md.md_salt), passphrase, md.md_iterations);
1107 bzero(passphrase, sizeof(passphrase));
1108 g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey));
1109 bzero(dkey, sizeof(dkey));
1110 }
1111
1112 g_eli_crypto_hmac_final(&ctx, key, 0);
1113
1114 /*
1115 * Decrypt Master-Key.
1116 */
1117 error = g_eli_mkey_decrypt(&md, key, mkey, &nkey);
1118 bzero(key, sizeof(key));
1119 if (error == -1) {
1120 if (i == tries - 1) {
1121 G_ELI_DEBUG(0,
1122 "Wrong key for %s. No tries left.",
1123 pp->name);
1124 g_eli_keyfiles_clear(pp->name);
1125 return (NULL);
1126 }
1127 G_ELI_DEBUG(0, "Wrong key for %s. Tries left: %u.",
1128 pp->name, tries - i - 1);
1129 /* Try again. */
1130 continue;
1131 } else if (error > 0) {
1132 G_ELI_DEBUG(0,
1133 "Cannot decrypt Master Key for %s (error=%d).",
1134 pp->name, error);
1135 g_eli_keyfiles_clear(pp->name);
1136 return (NULL);
1137 }
1138 G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name);
1139 break;
1140 }
1141
1142 /*
1143 * We have correct key, let's attach provider.
1144 */
1145 gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey);
1146 bzero(mkey, sizeof(mkey));
1147 bzero(&md, sizeof(md));
1148 if (gp == NULL) {
1149 G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name,
1150 G_ELI_SUFFIX);
1151 return (NULL);
1152 }
1153 return (gp);
1154}
1155
1156static void
1157g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1158 struct g_consumer *cp, struct g_provider *pp)
1159{
1160 struct g_eli_softc *sc;
1161
1162 g_topology_assert();
1163 sc = gp->softc;
1164 if (sc == NULL)
1165 return;
1166 if (pp != NULL || cp != NULL)
1167 return; /* Nothing here. */
1168
1169 sbuf_printf(sb, "%s<KeysTotal>%ju</KeysTotal>", indent,
1170 (uintmax_t)sc->sc_ekeys_total);
1171 sbuf_printf(sb, "%s<KeysAllocated>%ju</KeysAllocated>", indent,
1172 (uintmax_t)sc->sc_ekeys_allocated);
1173 sbuf_printf(sb, "%s<Flags>", indent);
1174 if (sc->sc_flags == 0)
1175 sbuf_printf(sb, "NONE");
1176 else {
1177 int first = 1;
1178
1179#define ADD_FLAG(flag, name) do { \
1180 if (sc->sc_flags & (flag)) { \
1181 if (!first) \
1182 sbuf_printf(sb, ", "); \
1183 else \
1184 first = 0; \
1185 sbuf_printf(sb, name); \
1186 } \
1187} while (0)
1188 ADD_FLAG(G_ELI_FLAG_SUSPEND, "SUSPEND");
1189 ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY");
1190 ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER");
1191 ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME");
1192 ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT");
1193 ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH");
1194 ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH");
1195 ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH");
1196 ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN");
1197 ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY");
1198 ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY");
1199#undef ADD_FLAG
1200 }
1201 sbuf_printf(sb, "</Flags>\n");
1202
1203 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) {
1204 sbuf_printf(sb, "%s<UsedKey>%u</UsedKey>\n", indent,
1205 sc->sc_nkey);
1206 }
1207 sbuf_printf(sb, "%s<Version>%u</Version>\n", indent, sc->sc_version);
1208 sbuf_printf(sb, "%s<Crypto>", indent);
1209 switch (sc->sc_crypto) {
1210 case G_ELI_CRYPTO_HW:
1211 sbuf_printf(sb, "hardware");
1212 break;
1213 case G_ELI_CRYPTO_SW:
1214 sbuf_printf(sb, "software");
1215 break;
1216 default:
1217 sbuf_printf(sb, "UNKNOWN");
1218 break;
1219 }
1220 sbuf_printf(sb, "</Crypto>\n");
1221 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
1222 sbuf_printf(sb,
1223 "%s<AuthenticationAlgorithm>%s</AuthenticationAlgorithm>\n",
1224 indent, g_eli_algo2str(sc->sc_aalgo));
1225 }
1226 sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent,
1227 sc->sc_ekeylen);
1228 sbuf_printf(sb, "%s<EncryptionAlgorithm>%s</EncryptionAlgorithm>\n",
1229 indent, g_eli_algo2str(sc->sc_ealgo));
1230 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
1231 (sc->sc_flags & G_ELI_FLAG_SUSPEND) ? "SUSPENDED" : "ACTIVE");
1232}
1233
1234static void
1235g_eli_shutdown_pre_sync(void *arg, int howto)
1236{
1237 struct g_class *mp;
1238 struct g_geom *gp, *gp2;
1239 struct g_provider *pp;
1240 struct g_eli_softc *sc;
1241 int error;
1242
1243 mp = arg;
1244 DROP_GIANT();
1245 g_topology_lock();
1246 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
1247 sc = gp->softc;
1248 if (sc == NULL)
1249 continue;
1250 pp = LIST_FIRST(&gp->provider);
1251 KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name));
1252 if (pp->acr + pp->acw + pp->ace == 0)
1253 error = g_eli_destroy(sc, TRUE);
1254 else {
1255 sc->sc_flags |= G_ELI_FLAG_RW_DETACH;
1256 gp->access = g_eli_access;
1257 }
1258 }
1259 g_topology_unlock();
1260 PICKUP_GIANT();
1261}
1262
1263static void
1264g_eli_init(struct g_class *mp)
1265{
1266
1267 g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
1268 g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
1269 if (g_eli_pre_sync == NULL)
1270 G_ELI_DEBUG(0, "Warning! Cannot register shutdown event.");
1271}
1272
1273static void
1274g_eli_fini(struct g_class *mp)
1275{
1276
1277 if (g_eli_pre_sync != NULL)
1278 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync);
1279}
1280
1281DECLARE_GEOM_CLASS(g_eli_class, g_eli);
1282MODULE_DEPEND(g_eli, crypto, 1, 1, 1);