Deleted Added
sdiff udiff text old ( 226840 ) new ( 228634 )
full compact
1/*-
2 * Copyright (c) 2005-2011 Pawel Jakub Dawidek <pawel@dawidek.net>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/geom/eli/g_eli.c 226840 2011-10-27 16:12:25Z pjd $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/linker.h>
34#include <sys/module.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/bio.h>
38#include <sys/sbuf.h>
39#include <sys/sysctl.h>
40#include <sys/malloc.h>
41#include <sys/eventhandler.h>
42#include <sys/kthread.h>
43#include <sys/proc.h>
44#include <sys/sched.h>
45#include <sys/smp.h>
46#include <sys/uio.h>
47#include <sys/vnode.h>
48
49#include <vm/uma.h>
50
51#include <geom/geom.h>
52#include <geom/eli/g_eli.h>
53#include <geom/eli/pkcs5v2.h>
54
55FEATURE(geom_eli, "GEOM crypto module");
56
57MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data");
58
59SYSCTL_DECL(_kern_geom);
60SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff");
61static int g_eli_version = G_ELI_VERSION;
62SYSCTL_INT(_kern_geom_eli, OID_AUTO, version, CTLFLAG_RD, &g_eli_version, 0,
63 "GELI version");
64int g_eli_debug = 0;
65TUNABLE_INT("kern.geom.eli.debug", &g_eli_debug);
66SYSCTL_INT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RW, &g_eli_debug, 0,
67 "Debug level");
68static u_int g_eli_tries = 3;
69TUNABLE_INT("kern.geom.eli.tries", &g_eli_tries);
70SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RW, &g_eli_tries, 0,
71 "Number of tries for entering the passphrase");
72static u_int g_eli_visible_passphrase = GETS_NOECHO;
73TUNABLE_INT("kern.geom.eli.visible_passphrase", &g_eli_visible_passphrase);
74SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RW,
75 &g_eli_visible_passphrase, 0,
76 "Visibility of passphrase prompt (0 = invisible, 1 = visible, 2 = asterisk)");
77u_int g_eli_overwrites = G_ELI_OVERWRITES;
78TUNABLE_INT("kern.geom.eli.overwrites", &g_eli_overwrites);
79SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RW, &g_eli_overwrites,
80 0, "Number of times on-disk keys should be overwritten when destroying them");
81static u_int g_eli_threads = 0;
82TUNABLE_INT("kern.geom.eli.threads", &g_eli_threads);
83SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RW, &g_eli_threads, 0,
84 "Number of threads doing crypto work");
85u_int g_eli_batch = 0;
86TUNABLE_INT("kern.geom.eli.batch", &g_eli_batch);
87SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RW, &g_eli_batch, 0,
88 "Use crypto operations batching");
89
90static eventhandler_tag g_eli_pre_sync = NULL;
91
92static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp,
93 struct g_geom *gp);
94static void g_eli_init(struct g_class *mp);
95static void g_eli_fini(struct g_class *mp);
96
97static g_taste_t g_eli_taste;
98static g_dumpconf_t g_eli_dumpconf;
99
100struct g_class g_eli_class = {
101 .name = G_ELI_CLASS_NAME,
102 .version = G_VERSION,
103 .ctlreq = g_eli_config,
104 .taste = g_eli_taste,
105 .destroy_geom = g_eli_destroy_geom,
106 .init = g_eli_init,
107 .fini = g_eli_fini
108};
109
110
111/*
112 * Code paths:
113 * BIO_READ:
114 * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
115 * BIO_WRITE:
116 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
117 */
118
119
120/*
121 * EAGAIN from crypto(9) means, that we were probably balanced to another crypto
122 * accelerator or something like this.
123 * The function updates the SID and rerun the operation.
124 */
125int
126g_eli_crypto_rerun(struct cryptop *crp)
127{
128 struct g_eli_softc *sc;
129 struct g_eli_worker *wr;
130 struct bio *bp;
131 int error;
132
133 bp = (struct bio *)crp->crp_opaque;
134 sc = bp->bio_to->geom->softc;
135 LIST_FOREACH(wr, &sc->sc_workers, w_next) {
136 if (wr->w_number == bp->bio_pflags)
137 break;
138 }
139 KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags));
140 G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %ju -> %ju).",
141 bp->bio_cmd == BIO_READ ? "READ" : "WRITE", (uintmax_t)wr->w_sid,
142 (uintmax_t)crp->crp_sid);
143 wr->w_sid = crp->crp_sid;
144 crp->crp_etype = 0;
145 error = crypto_dispatch(crp);
146 if (error == 0)
147 return (0);
148 G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error);
149 crp->crp_etype = error;
150 return (error);
151}
152
153/*
154 * The function is called afer reading encrypted data from the provider.
155 *
156 * g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
157 */
158void
159g_eli_read_done(struct bio *bp)
160{
161 struct g_eli_softc *sc;
162 struct bio *pbp;
163
164 G_ELI_LOGREQ(2, bp, "Request done.");
165 pbp = bp->bio_parent;
166 if (pbp->bio_error == 0)
167 pbp->bio_error = bp->bio_error;
168 g_destroy_bio(bp);
169 /*
170 * Do we have all sectors already?
171 */
172 pbp->bio_inbed++;
173 if (pbp->bio_inbed < pbp->bio_children)
174 return;
175 sc = pbp->bio_to->geom->softc;
176 if (pbp->bio_error != 0) {
177 G_ELI_LOGREQ(0, pbp, "%s() failed", __func__);
178 pbp->bio_completed = 0;
179 if (pbp->bio_driver2 != NULL) {
180 free(pbp->bio_driver2, M_ELI);
181 pbp->bio_driver2 = NULL;
182 }
183 g_io_deliver(pbp, pbp->bio_error);
184 atomic_subtract_int(&sc->sc_inflight, 1);
185 return;
186 }
187 mtx_lock(&sc->sc_queue_mtx);
188 bioq_insert_tail(&sc->sc_queue, pbp);
189 mtx_unlock(&sc->sc_queue_mtx);
190 wakeup(sc);
191}
192
193/*
194 * The function is called after we encrypt and write data.
195 *
196 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver
197 */
198void
199g_eli_write_done(struct bio *bp)
200{
201 struct g_eli_softc *sc;
202 struct bio *pbp;
203
204 G_ELI_LOGREQ(2, bp, "Request done.");
205 pbp = bp->bio_parent;
206 if (pbp->bio_error == 0) {
207 if (bp->bio_error != 0)
208 pbp->bio_error = bp->bio_error;
209 }
210 g_destroy_bio(bp);
211 /*
212 * Do we have all sectors already?
213 */
214 pbp->bio_inbed++;
215 if (pbp->bio_inbed < pbp->bio_children)
216 return;
217 free(pbp->bio_driver2, M_ELI);
218 pbp->bio_driver2 = NULL;
219 if (pbp->bio_error != 0) {
220 G_ELI_LOGREQ(0, pbp, "Crypto WRITE request failed (error=%d).",
221 pbp->bio_error);
222 pbp->bio_completed = 0;
223 }
224 /*
225 * Write is finished, send it up.
226 */
227 pbp->bio_completed = pbp->bio_length;
228 sc = pbp->bio_to->geom->softc;
229 g_io_deliver(pbp, pbp->bio_error);
230 atomic_subtract_int(&sc->sc_inflight, 1);
231}
232
233/*
234 * This function should never be called, but GEOM made as it set ->orphan()
235 * method for every geom.
236 */
237static void
238g_eli_orphan_spoil_assert(struct g_consumer *cp)
239{
240
241 panic("Function %s() called for %s.", __func__, cp->geom->name);
242}
243
244static void
245g_eli_orphan(struct g_consumer *cp)
246{
247 struct g_eli_softc *sc;
248
249 g_topology_assert();
250 sc = cp->geom->softc;
251 if (sc == NULL)
252 return;
253 g_eli_destroy(sc, TRUE);
254}
255
256/*
257 * BIO_READ:
258 * G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
259 * BIO_WRITE:
260 * G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
261 */
262static void
263g_eli_start(struct bio *bp)
264{
265 struct g_eli_softc *sc;
266 struct g_consumer *cp;
267 struct bio *cbp;
268
269 sc = bp->bio_to->geom->softc;
270 KASSERT(sc != NULL,
271 ("Provider's error should be set (error=%d)(device=%s).",
272 bp->bio_to->error, bp->bio_to->name));
273 G_ELI_LOGREQ(2, bp, "Request received.");
274
275 switch (bp->bio_cmd) {
276 case BIO_READ:
277 case BIO_WRITE:
278 case BIO_GETATTR:
279 case BIO_FLUSH:
280 break;
281 case BIO_DELETE:
282 /*
283 * We could eventually support BIO_DELETE request.
284 * It could be done by overwritting requested sector with
285 * random data g_eli_overwrites number of times.
286 */
287 default:
288 g_io_deliver(bp, EOPNOTSUPP);
289 return;
290 }
291 cbp = g_clone_bio(bp);
292 if (cbp == NULL) {
293 g_io_deliver(bp, ENOMEM);
294 return;
295 }
296 bp->bio_driver1 = cbp;
297 bp->bio_pflags = G_ELI_NEW_BIO;
298 switch (bp->bio_cmd) {
299 case BIO_READ:
300 if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) {
301 g_eli_crypto_read(sc, bp, 0);
302 break;
303 }
304 /* FALLTHROUGH */
305 case BIO_WRITE:
306 mtx_lock(&sc->sc_queue_mtx);
307 bioq_insert_tail(&sc->sc_queue, bp);
308 mtx_unlock(&sc->sc_queue_mtx);
309 wakeup(sc);
310 break;
311 case BIO_GETATTR:
312 case BIO_FLUSH:
313 cbp->bio_done = g_std_done;
314 cp = LIST_FIRST(&sc->sc_geom->consumer);
315 cbp->bio_to = cp->provider;
316 G_ELI_LOGREQ(2, cbp, "Sending request.");
317 g_io_request(cbp, cp);
318 break;
319 }
320}
321
322static int
323g_eli_newsession(struct g_eli_worker *wr)
324{
325 struct g_eli_softc *sc;
326 struct cryptoini crie, cria;
327 int error;
328
329 sc = wr->w_softc;
330
331 bzero(&crie, sizeof(crie));
332 crie.cri_alg = sc->sc_ealgo;
333 crie.cri_klen = sc->sc_ekeylen;
334 if (sc->sc_ealgo == CRYPTO_AES_XTS)
335 crie.cri_klen <<= 1;
336 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) {
337 crie.cri_key = g_eli_key_hold(sc, 0,
338 LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize);
339 } else {
340 crie.cri_key = sc->sc_ekey;
341 }
342 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
343 bzero(&cria, sizeof(cria));
344 cria.cri_alg = sc->sc_aalgo;
345 cria.cri_klen = sc->sc_akeylen;
346 cria.cri_key = sc->sc_akey;
347 crie.cri_next = &cria;
348 }
349
350 switch (sc->sc_crypto) {
351 case G_ELI_CRYPTO_SW:
352 error = crypto_newsession(&wr->w_sid, &crie,
353 CRYPTOCAP_F_SOFTWARE);
354 break;
355 case G_ELI_CRYPTO_HW:
356 error = crypto_newsession(&wr->w_sid, &crie,
357 CRYPTOCAP_F_HARDWARE);
358 break;
359 case G_ELI_CRYPTO_UNKNOWN:
360 error = crypto_newsession(&wr->w_sid, &crie,
361 CRYPTOCAP_F_HARDWARE);
362 if (error == 0) {
363 mtx_lock(&sc->sc_queue_mtx);
364 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
365 sc->sc_crypto = G_ELI_CRYPTO_HW;
366 mtx_unlock(&sc->sc_queue_mtx);
367 } else {
368 error = crypto_newsession(&wr->w_sid, &crie,
369 CRYPTOCAP_F_SOFTWARE);
370 mtx_lock(&sc->sc_queue_mtx);
371 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
372 sc->sc_crypto = G_ELI_CRYPTO_SW;
373 mtx_unlock(&sc->sc_queue_mtx);
374 }
375 break;
376 default:
377 panic("%s: invalid condition", __func__);
378 }
379
380 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0)
381 g_eli_key_drop(sc, crie.cri_key);
382
383 return (error);
384}
385
386static void
387g_eli_freesession(struct g_eli_worker *wr)
388{
389
390 crypto_freesession(wr->w_sid);
391}
392
393static void
394g_eli_cancel(struct g_eli_softc *sc)
395{
396 struct bio *bp;
397
398 mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
399
400 while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) {
401 KASSERT(bp->bio_pflags == G_ELI_NEW_BIO,
402 ("Not new bio when canceling (bp=%p).", bp));
403 g_io_deliver(bp, ENXIO);
404 }
405}
406
407static struct bio *
408g_eli_takefirst(struct g_eli_softc *sc)
409{
410 struct bio *bp;
411
412 mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
413
414 if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND))
415 return (bioq_takefirst(&sc->sc_queue));
416 /*
417 * Device suspended, so we skip new I/O requests.
418 */
419 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
420 if (bp->bio_pflags != G_ELI_NEW_BIO)
421 break;
422 }
423 if (bp != NULL)
424 bioq_remove(&sc->sc_queue, bp);
425 return (bp);
426}
427
428/*
429 * This is the main function for kernel worker thread when we don't have
430 * hardware acceleration and we have to do cryptography in software.
431 * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM
432 * threads with crypto work.
433 */
434static void
435g_eli_worker(void *arg)
436{
437 struct g_eli_softc *sc;
438 struct g_eli_worker *wr;
439 struct bio *bp;
440 int error;
441
442 wr = arg;
443 sc = wr->w_softc;
444#ifdef SMP
445 /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */
446 if (sc->sc_cpubind) {
447 while (!smp_started)
448 tsleep(wr, 0, "geli:smp", hz / 4);
449 }
450#endif
451 thread_lock(curthread);
452 sched_prio(curthread, PUSER);
453 if (sc->sc_cpubind)
454 sched_bind(curthread, wr->w_number % mp_ncpus);
455 thread_unlock(curthread);
456
457 G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm);
458
459 for (;;) {
460 mtx_lock(&sc->sc_queue_mtx);
461again:
462 bp = g_eli_takefirst(sc);
463 if (bp == NULL) {
464 if (sc->sc_flags & G_ELI_FLAG_DESTROY) {
465 g_eli_cancel(sc);
466 LIST_REMOVE(wr, w_next);
467 g_eli_freesession(wr);
468 free(wr, M_ELI);
469 G_ELI_DEBUG(1, "Thread %s exiting.",
470 curthread->td_proc->p_comm);
471 wakeup(&sc->sc_workers);
472 mtx_unlock(&sc->sc_queue_mtx);
473 kproc_exit(0);
474 }
475 while (sc->sc_flags & G_ELI_FLAG_SUSPEND) {
476 if (sc->sc_inflight > 0) {
477 G_ELI_DEBUG(0, "inflight=%d",
478 sc->sc_inflight);
479 /*
480 * We still have inflight BIOs, so
481 * sleep and retry.
482 */
483 msleep(sc, &sc->sc_queue_mtx, PRIBIO,
484 "geli:inf", hz / 5);
485 goto again;
486 }
487 /*
488 * Suspend requested, mark the worker as
489 * suspended and go to sleep.
490 */
491 if (wr->w_active) {
492 g_eli_freesession(wr);
493 wr->w_active = FALSE;
494 }
495 wakeup(&sc->sc_workers);
496 msleep(sc, &sc->sc_queue_mtx, PRIBIO,
497 "geli:suspend", 0);
498 if (!wr->w_active &&
499 !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) {
500 error = g_eli_newsession(wr);
501 KASSERT(error == 0,
502 ("g_eli_newsession() failed on resume (error=%d)",
503 error));
504 wr->w_active = TRUE;
505 }
506 goto again;
507 }
508 msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0);
509 continue;
510 }
511 if (bp->bio_pflags == G_ELI_NEW_BIO)
512 atomic_add_int(&sc->sc_inflight, 1);
513 mtx_unlock(&sc->sc_queue_mtx);
514 if (bp->bio_pflags == G_ELI_NEW_BIO) {
515 bp->bio_pflags = 0;
516 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
517 if (bp->bio_cmd == BIO_READ)
518 g_eli_auth_read(sc, bp);
519 else
520 g_eli_auth_run(wr, bp);
521 } else {
522 if (bp->bio_cmd == BIO_READ)
523 g_eli_crypto_read(sc, bp, 1);
524 else
525 g_eli_crypto_run(wr, bp);
526 }
527 } else {
528 if (sc->sc_flags & G_ELI_FLAG_AUTH)
529 g_eli_auth_run(wr, bp);
530 else
531 g_eli_crypto_run(wr, bp);
532 }
533 }
534}
535
536/*
537 * Here we generate IV. It is unique for every sector.
538 */
539void
540g_eli_crypto_ivgen(struct g_eli_softc *sc, off_t offset, u_char *iv,
541 size_t size)
542{
543 uint8_t off[8];
544
545 if ((sc->sc_flags & G_ELI_FLAG_NATIVE_BYTE_ORDER) != 0)
546 bcopy(&offset, off, sizeof(off));
547 else
548 le64enc(off, (uint64_t)offset);
549
550 switch (sc->sc_ealgo) {
551 case CRYPTO_AES_XTS:
552 bcopy(off, iv, sizeof(off));
553 bzero(iv + sizeof(off), size - sizeof(off));
554 break;
555 default:
556 {
557 u_char hash[SHA256_DIGEST_LENGTH];
558 SHA256_CTX ctx;
559
560 /* Copy precalculated SHA256 context for IV-Key. */
561 bcopy(&sc->sc_ivctx, &ctx, sizeof(ctx));
562 SHA256_Update(&ctx, off, sizeof(off));
563 SHA256_Final(hash, &ctx);
564 bcopy(hash, iv, MIN(sizeof(hash), size));
565 break;
566 }
567 }
568}
569
570int
571g_eli_read_metadata(struct g_class *mp, struct g_provider *pp,
572 struct g_eli_metadata *md)
573{
574 struct g_geom *gp;
575 struct g_consumer *cp;
576 u_char *buf = NULL;
577 int error;
578
579 g_topology_assert();
580
581 gp = g_new_geomf(mp, "eli:taste");
582 gp->start = g_eli_start;
583 gp->access = g_std_access;
584 /*
585 * g_eli_read_metadata() is always called from the event thread.
586 * Our geom is created and destroyed in the same event, so there
587 * could be no orphan nor spoil event in the meantime.
588 */
589 gp->orphan = g_eli_orphan_spoil_assert;
590 gp->spoiled = g_eli_orphan_spoil_assert;
591 cp = g_new_consumer(gp);
592 error = g_attach(cp, pp);
593 if (error != 0)
594 goto end;
595 error = g_access(cp, 1, 0, 0);
596 if (error != 0)
597 goto end;
598 g_topology_unlock();
599 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
600 &error);
601 g_topology_lock();
602 if (buf == NULL)
603 goto end;
604 eli_metadata_decode(buf, md);
605end:
606 if (buf != NULL)
607 g_free(buf);
608 if (cp->provider != NULL) {
609 if (cp->acr == 1)
610 g_access(cp, -1, 0, 0);
611 g_detach(cp);
612 }
613 g_destroy_consumer(cp);
614 g_destroy_geom(gp);
615 return (error);
616}
617
618/*
619 * The function is called when we had last close on provider and user requested
620 * to close it when this situation occur.
621 */
622static void
623g_eli_last_close(struct g_eli_softc *sc)
624{
625 struct g_geom *gp;
626 struct g_provider *pp;
627 char ppname[64];
628 int error;
629
630 g_topology_assert();
631 gp = sc->sc_geom;
632 pp = LIST_FIRST(&gp->provider);
633 strlcpy(ppname, pp->name, sizeof(ppname));
634 error = g_eli_destroy(sc, TRUE);
635 KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).",
636 ppname, error));
637 G_ELI_DEBUG(0, "Detached %s on last close.", ppname);
638}
639
640int
641g_eli_access(struct g_provider *pp, int dr, int dw, int de)
642{
643 struct g_eli_softc *sc;
644 struct g_geom *gp;
645
646 gp = pp->geom;
647 sc = gp->softc;
648
649 if (dw > 0) {
650 if (sc->sc_flags & G_ELI_FLAG_RO) {
651 /* Deny write attempts. */
652 return (EROFS);
653 }
654 /* Someone is opening us for write, we need to remember that. */
655 sc->sc_flags |= G_ELI_FLAG_WOPEN;
656 return (0);
657 }
658 /* Is this the last close? */
659 if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0)
660 return (0);
661
662 /*
663 * Automatically detach on last close if requested.
664 */
665 if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) ||
666 (sc->sc_flags & G_ELI_FLAG_WOPEN)) {
667 g_eli_last_close(sc);
668 }
669 return (0);
670}
671
672static int
673g_eli_cpu_is_disabled(int cpu)
674{
675#ifdef SMP
676 return (CPU_ISSET(cpu, &hlt_cpus_mask));
677#else
678 return (0);
679#endif
680}
681
682struct g_geom *
683g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp,
684 const struct g_eli_metadata *md, const u_char *mkey, int nkey)
685{
686 struct g_eli_softc *sc;
687 struct g_eli_worker *wr;
688 struct g_geom *gp;
689 struct g_provider *pp;
690 struct g_consumer *cp;
691 u_int i, threads;
692 int error;
693
694 G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX);
695
696 gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX);
697 sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO);
698 gp->start = g_eli_start;
699 /*
700 * Spoiling cannot happen actually, because we keep provider open for
701 * writing all the time or provider is read-only.
702 */
703 gp->spoiled = g_eli_orphan_spoil_assert;
704 gp->orphan = g_eli_orphan;
705 gp->dumpconf = g_eli_dumpconf;
706 /*
707 * If detach-on-last-close feature is not enabled and we don't operate
708 * on read-only provider, we can simply use g_std_access().
709 */
710 if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO))
711 gp->access = g_eli_access;
712 else
713 gp->access = g_std_access;
714
715 sc->sc_version = md->md_version;
716 sc->sc_inflight = 0;
717 sc->sc_crypto = G_ELI_CRYPTO_UNKNOWN;
718 sc->sc_flags = md->md_flags;
719 /* Backward compatibility. */
720 if (md->md_version < G_ELI_VERSION_04)
721 sc->sc_flags |= G_ELI_FLAG_NATIVE_BYTE_ORDER;
722 if (md->md_version < G_ELI_VERSION_05)
723 sc->sc_flags |= G_ELI_FLAG_SINGLE_KEY;
724 if (md->md_version < G_ELI_VERSION_06 &&
725 (sc->sc_flags & G_ELI_FLAG_AUTH) != 0) {
726 sc->sc_flags |= G_ELI_FLAG_FIRST_KEY;
727 }
728 sc->sc_ealgo = md->md_ealgo;
729 sc->sc_nkey = nkey;
730
731 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
732 sc->sc_akeylen = sizeof(sc->sc_akey) * 8;
733 sc->sc_aalgo = md->md_aalgo;
734 sc->sc_alen = g_eli_hashlen(sc->sc_aalgo);
735
736 sc->sc_data_per_sector = bpp->sectorsize - sc->sc_alen;
737 /*
738 * Some hash functions (like SHA1 and RIPEMD160) generates hash
739 * which length is not multiple of 128 bits, but we want data
740 * length to be multiple of 128, so we can encrypt without
741 * padding. The line below rounds down data length to multiple
742 * of 128 bits.
743 */
744 sc->sc_data_per_sector -= sc->sc_data_per_sector % 16;
745
746 sc->sc_bytes_per_sector =
747 (md->md_sectorsize - 1) / sc->sc_data_per_sector + 1;
748 sc->sc_bytes_per_sector *= bpp->sectorsize;
749 }
750
751 gp->softc = sc;
752 sc->sc_geom = gp;
753
754 bioq_init(&sc->sc_queue);
755 mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF);
756 mtx_init(&sc->sc_ekeys_lock, "geli:ekeys", NULL, MTX_DEF);
757
758 pp = NULL;
759 cp = g_new_consumer(gp);
760 error = g_attach(cp, bpp);
761 if (error != 0) {
762 if (req != NULL) {
763 gctl_error(req, "Cannot attach to %s (error=%d).",
764 bpp->name, error);
765 } else {
766 G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).",
767 bpp->name, error);
768 }
769 goto failed;
770 }
771 /*
772 * Keep provider open all the time, so we can run critical tasks,
773 * like Master Keys deletion, without wondering if we can open
774 * provider or not.
775 * We don't open provider for writing only when user requested read-only
776 * access.
777 */
778 if (sc->sc_flags & G_ELI_FLAG_RO)
779 error = g_access(cp, 1, 0, 1);
780 else
781 error = g_access(cp, 1, 1, 1);
782 if (error != 0) {
783 if (req != NULL) {
784 gctl_error(req, "Cannot access %s (error=%d).",
785 bpp->name, error);
786 } else {
787 G_ELI_DEBUG(1, "Cannot access %s (error=%d).",
788 bpp->name, error);
789 }
790 goto failed;
791 }
792
793 sc->sc_sectorsize = md->md_sectorsize;
794 sc->sc_mediasize = bpp->mediasize;
795 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME))
796 sc->sc_mediasize -= bpp->sectorsize;
797 if (!(sc->sc_flags & G_ELI_FLAG_AUTH))
798 sc->sc_mediasize -= (sc->sc_mediasize % sc->sc_sectorsize);
799 else {
800 sc->sc_mediasize /= sc->sc_bytes_per_sector;
801 sc->sc_mediasize *= sc->sc_sectorsize;
802 }
803
804 /*
805 * Remember the keys in our softc structure.
806 */
807 g_eli_mkey_propagate(sc, mkey);
808 sc->sc_ekeylen = md->md_keylen;
809
810 LIST_INIT(&sc->sc_workers);
811
812 threads = g_eli_threads;
813 if (threads == 0)
814 threads = mp_ncpus;
815 sc->sc_cpubind = (mp_ncpus > 1 && threads == mp_ncpus);
816 for (i = 0; i < threads; i++) {
817 if (g_eli_cpu_is_disabled(i)) {
818 G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.",
819 bpp->name, i);
820 continue;
821 }
822 wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO);
823 wr->w_softc = sc;
824 wr->w_number = i;
825 wr->w_active = TRUE;
826
827 error = g_eli_newsession(wr);
828 if (error != 0) {
829 free(wr, M_ELI);
830 if (req != NULL) {
831 gctl_error(req, "Cannot set up crypto session "
832 "for %s (error=%d).", bpp->name, error);
833 } else {
834 G_ELI_DEBUG(1, "Cannot set up crypto session "
835 "for %s (error=%d).", bpp->name, error);
836 }
837 goto failed;
838 }
839
840 error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0,
841 "g_eli[%u] %s", i, bpp->name);
842 if (error != 0) {
843 g_eli_freesession(wr);
844 free(wr, M_ELI);
845 if (req != NULL) {
846 gctl_error(req, "Cannot create kernel thread "
847 "for %s (error=%d).", bpp->name, error);
848 } else {
849 G_ELI_DEBUG(1, "Cannot create kernel thread "
850 "for %s (error=%d).", bpp->name, error);
851 }
852 goto failed;
853 }
854 LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next);
855 }
856
857 /*
858 * Create decrypted provider.
859 */
860 pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX);
861 pp->mediasize = sc->sc_mediasize;
862 pp->sectorsize = sc->sc_sectorsize;
863
864 g_error_provider(pp, 0);
865
866 G_ELI_DEBUG(0, "Device %s created.", pp->name);
867 G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo),
868 sc->sc_ekeylen);
869 if (sc->sc_flags & G_ELI_FLAG_AUTH)
870 G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo));
871 G_ELI_DEBUG(0, " Crypto: %s",
872 sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware");
873 return (gp);
874failed:
875 mtx_lock(&sc->sc_queue_mtx);
876 sc->sc_flags |= G_ELI_FLAG_DESTROY;
877 wakeup(sc);
878 /*
879 * Wait for kernel threads self destruction.
880 */
881 while (!LIST_EMPTY(&sc->sc_workers)) {
882 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
883 "geli:destroy", 0);
884 }
885 mtx_destroy(&sc->sc_queue_mtx);
886 if (cp->provider != NULL) {
887 if (cp->acr == 1)
888 g_access(cp, -1, -1, -1);
889 g_detach(cp);
890 }
891 g_destroy_consumer(cp);
892 g_destroy_geom(gp);
893 g_eli_key_destroy(sc);
894 bzero(sc, sizeof(*sc));
895 free(sc, M_ELI);
896 return (NULL);
897}
898
899int
900g_eli_destroy(struct g_eli_softc *sc, boolean_t force)
901{
902 struct g_geom *gp;
903 struct g_provider *pp;
904
905 g_topology_assert();
906
907 if (sc == NULL)
908 return (ENXIO);
909
910 gp = sc->sc_geom;
911 pp = LIST_FIRST(&gp->provider);
912 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
913 if (force) {
914 G_ELI_DEBUG(1, "Device %s is still open, so it "
915 "cannot be definitely removed.", pp->name);
916 } else {
917 G_ELI_DEBUG(1,
918 "Device %s is still open (r%dw%de%d).", pp->name,
919 pp->acr, pp->acw, pp->ace);
920 return (EBUSY);
921 }
922 }
923
924 mtx_lock(&sc->sc_queue_mtx);
925 sc->sc_flags |= G_ELI_FLAG_DESTROY;
926 wakeup(sc);
927 while (!LIST_EMPTY(&sc->sc_workers)) {
928 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
929 "geli:destroy", 0);
930 }
931 mtx_destroy(&sc->sc_queue_mtx);
932 gp->softc = NULL;
933 g_eli_key_destroy(sc);
934 bzero(sc, sizeof(*sc));
935 free(sc, M_ELI);
936
937 if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0))
938 G_ELI_DEBUG(0, "Device %s destroyed.", gp->name);
939 g_wither_geom_close(gp, ENXIO);
940
941 return (0);
942}
943
944static int
945g_eli_destroy_geom(struct gctl_req *req __unused,
946 struct g_class *mp __unused, struct g_geom *gp)
947{
948 struct g_eli_softc *sc;
949
950 sc = gp->softc;
951 return (g_eli_destroy(sc, FALSE));
952}
953
954static int
955g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider)
956{
957 u_char *keyfile, *data;
958 char *file, name[64];
959 size_t size;
960 int i;
961
962 for (i = 0; ; i++) {
963 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
964 keyfile = preload_search_by_type(name);
965 if (keyfile == NULL)
966 return (i); /* Return number of loaded keyfiles. */
967 data = preload_fetch_addr(keyfile);
968 if (data == NULL) {
969 G_ELI_DEBUG(0, "Cannot find key file data for %s.",
970 name);
971 return (0);
972 }
973 size = preload_fetch_size(keyfile);
974 if (size == 0) {
975 G_ELI_DEBUG(0, "Cannot find key file size for %s.",
976 name);
977 return (0);
978 }
979 file = preload_search_info(keyfile, MODINFO_NAME);
980 if (file == NULL) {
981 G_ELI_DEBUG(0, "Cannot find key file name for %s.",
982 name);
983 return (0);
984 }
985 G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file,
986 provider, name);
987 g_eli_crypto_hmac_update(ctx, data, size);
988 }
989}
990
991static void
992g_eli_keyfiles_clear(const char *provider)
993{
994 u_char *keyfile, *data;
995 char name[64];
996 size_t size;
997 int i;
998
999 for (i = 0; ; i++) {
1000 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
1001 keyfile = preload_search_by_type(name);
1002 if (keyfile == NULL)
1003 return;
1004 data = preload_fetch_addr(keyfile);
1005 size = preload_fetch_size(keyfile);
1006 if (data != NULL && size != 0)
1007 bzero(data, size);
1008 }
1009}
1010
1011/*
1012 * Tasting is only made on boot.
1013 * We detect providers which should be attached before root is mounted.
1014 */
1015static struct g_geom *
1016g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1017{
1018 struct g_eli_metadata md;
1019 struct g_geom *gp;
1020 struct hmac_ctx ctx;
1021 char passphrase[256];
1022 u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN];
1023 u_int i, nkey, nkeyfiles, tries;
1024 int error;
1025
1026 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
1027 g_topology_assert();
1028
1029 if (root_mounted() || g_eli_tries == 0)
1030 return (NULL);
1031
1032 G_ELI_DEBUG(3, "Tasting %s.", pp->name);
1033
1034 error = g_eli_read_metadata(mp, pp, &md);
1035 if (error != 0)
1036 return (NULL);
1037 gp = NULL;
1038
1039 if (strcmp(md.md_magic, G_ELI_MAGIC) != 0)
1040 return (NULL);
1041 if (md.md_version > G_ELI_VERSION) {
1042 printf("geom_eli.ko module is too old to handle %s.\n",
1043 pp->name);
1044 return (NULL);
1045 }
1046 if (md.md_provsize != pp->mediasize)
1047 return (NULL);
1048 /* Should we attach it on boot? */
1049 if (!(md.md_flags & G_ELI_FLAG_BOOT))
1050 return (NULL);
1051 if (md.md_keys == 0x00) {
1052 G_ELI_DEBUG(0, "No valid keys on %s.", pp->name);
1053 return (NULL);
1054 }
1055 if (md.md_iterations == -1) {
1056 /* If there is no passphrase, we try only once. */
1057 tries = 1;
1058 } else {
1059 /* Ask for the passphrase no more than g_eli_tries times. */
1060 tries = g_eli_tries;
1061 }
1062
1063 for (i = 0; i < tries; i++) {
1064 g_eli_crypto_hmac_init(&ctx, NULL, 0);
1065
1066 /*
1067 * Load all key files.
1068 */
1069 nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name);
1070
1071 if (nkeyfiles == 0 && md.md_iterations == -1) {
1072 /*
1073 * No key files and no passphrase, something is
1074 * definitely wrong here.
1075 * geli(8) doesn't allow for such situation, so assume
1076 * that there was really no passphrase and in that case
1077 * key files are no properly defined in loader.conf.
1078 */
1079 G_ELI_DEBUG(0,
1080 "Found no key files in loader.conf for %s.",
1081 pp->name);
1082 return (NULL);
1083 }
1084
1085 /* Ask for the passphrase if defined. */
1086 if (md.md_iterations >= 0) {
1087 printf("Enter passphrase for %s: ", pp->name);
1088 gets(passphrase, sizeof(passphrase),
1089 g_eli_visible_passphrase);
1090 }
1091
1092 /*
1093 * Prepare Derived-Key from the user passphrase.
1094 */
1095 if (md.md_iterations == 0) {
1096 g_eli_crypto_hmac_update(&ctx, md.md_salt,
1097 sizeof(md.md_salt));
1098 g_eli_crypto_hmac_update(&ctx, passphrase,
1099 strlen(passphrase));
1100 bzero(passphrase, sizeof(passphrase));
1101 } else if (md.md_iterations > 0) {
1102 u_char dkey[G_ELI_USERKEYLEN];
1103
1104 pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt,
1105 sizeof(md.md_salt), passphrase, md.md_iterations);
1106 bzero(passphrase, sizeof(passphrase));
1107 g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey));
1108 bzero(dkey, sizeof(dkey));
1109 }
1110
1111 g_eli_crypto_hmac_final(&ctx, key, 0);
1112
1113 /*
1114 * Decrypt Master-Key.
1115 */
1116 error = g_eli_mkey_decrypt(&md, key, mkey, &nkey);
1117 bzero(key, sizeof(key));
1118 if (error == -1) {
1119 if (i == tries - 1) {
1120 G_ELI_DEBUG(0,
1121 "Wrong key for %s. No tries left.",
1122 pp->name);
1123 g_eli_keyfiles_clear(pp->name);
1124 return (NULL);
1125 }
1126 G_ELI_DEBUG(0, "Wrong key for %s. Tries left: %u.",
1127 pp->name, tries - i - 1);
1128 /* Try again. */
1129 continue;
1130 } else if (error > 0) {
1131 G_ELI_DEBUG(0,
1132 "Cannot decrypt Master Key for %s (error=%d).",
1133 pp->name, error);
1134 g_eli_keyfiles_clear(pp->name);
1135 return (NULL);
1136 }
1137 G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name);
1138 break;
1139 }
1140
1141 /*
1142 * We have correct key, let's attach provider.
1143 */
1144 gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey);
1145 bzero(mkey, sizeof(mkey));
1146 bzero(&md, sizeof(md));
1147 if (gp == NULL) {
1148 G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name,
1149 G_ELI_SUFFIX);
1150 return (NULL);
1151 }
1152 return (gp);
1153}
1154
1155static void
1156g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1157 struct g_consumer *cp, struct g_provider *pp)
1158{
1159 struct g_eli_softc *sc;
1160
1161 g_topology_assert();
1162 sc = gp->softc;
1163 if (sc == NULL)
1164 return;
1165 if (pp != NULL || cp != NULL)
1166 return; /* Nothing here. */
1167
1168 sbuf_printf(sb, "%s<KeysTotal>%ju</KeysTotal>", indent,
1169 (uintmax_t)sc->sc_ekeys_total);
1170 sbuf_printf(sb, "%s<KeysAllocated>%ju</KeysAllocated>", indent,
1171 (uintmax_t)sc->sc_ekeys_allocated);
1172 sbuf_printf(sb, "%s<Flags>", indent);
1173 if (sc->sc_flags == 0)
1174 sbuf_printf(sb, "NONE");
1175 else {
1176 int first = 1;
1177
1178#define ADD_FLAG(flag, name) do { \
1179 if (sc->sc_flags & (flag)) { \
1180 if (!first) \
1181 sbuf_printf(sb, ", "); \
1182 else \
1183 first = 0; \
1184 sbuf_printf(sb, name); \
1185 } \
1186} while (0)
1187 ADD_FLAG(G_ELI_FLAG_SUSPEND, "SUSPEND");
1188 ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY");
1189 ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER");
1190 ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME");
1191 ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT");
1192 ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH");
1193 ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH");
1194 ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH");
1195 ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN");
1196 ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY");
1197 ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY");
1198#undef ADD_FLAG
1199 }
1200 sbuf_printf(sb, "</Flags>\n");
1201
1202 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) {
1203 sbuf_printf(sb, "%s<UsedKey>%u</UsedKey>\n", indent,
1204 sc->sc_nkey);
1205 }
1206 sbuf_printf(sb, "%s<Version>%u</Version>\n", indent, sc->sc_version);
1207 sbuf_printf(sb, "%s<Crypto>", indent);
1208 switch (sc->sc_crypto) {
1209 case G_ELI_CRYPTO_HW:
1210 sbuf_printf(sb, "hardware");
1211 break;
1212 case G_ELI_CRYPTO_SW:
1213 sbuf_printf(sb, "software");
1214 break;
1215 default:
1216 sbuf_printf(sb, "UNKNOWN");
1217 break;
1218 }
1219 sbuf_printf(sb, "</Crypto>\n");
1220 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
1221 sbuf_printf(sb,
1222 "%s<AuthenticationAlgorithm>%s</AuthenticationAlgorithm>\n",
1223 indent, g_eli_algo2str(sc->sc_aalgo));
1224 }
1225 sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent,
1226 sc->sc_ekeylen);
1227 sbuf_printf(sb, "%s<EncryptionAlgorithm>%s</EncryptionAlgorithm>\n",
1228 indent, g_eli_algo2str(sc->sc_ealgo));
1229 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
1230 (sc->sc_flags & G_ELI_FLAG_SUSPEND) ? "SUSPENDED" : "ACTIVE");
1231}
1232
1233static void
1234g_eli_shutdown_pre_sync(void *arg, int howto)
1235{
1236 struct g_class *mp;
1237 struct g_geom *gp, *gp2;
1238 struct g_provider *pp;
1239 struct g_eli_softc *sc;
1240 int error;
1241
1242 mp = arg;
1243 DROP_GIANT();
1244 g_topology_lock();
1245 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
1246 sc = gp->softc;
1247 if (sc == NULL)
1248 continue;
1249 pp = LIST_FIRST(&gp->provider);
1250 KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name));
1251 if (pp->acr + pp->acw + pp->ace == 0)
1252 error = g_eli_destroy(sc, TRUE);
1253 else {
1254 sc->sc_flags |= G_ELI_FLAG_RW_DETACH;
1255 gp->access = g_eli_access;
1256 }
1257 }
1258 g_topology_unlock();
1259 PICKUP_GIANT();
1260}
1261
1262static void
1263g_eli_init(struct g_class *mp)
1264{
1265
1266 g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
1267 g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
1268 if (g_eli_pre_sync == NULL)
1269 G_ELI_DEBUG(0, "Warning! Cannot register shutdown event.");
1270}
1271
1272static void
1273g_eli_fini(struct g_class *mp)
1274{
1275
1276 if (g_eli_pre_sync != NULL)
1277 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync);
1278}
1279
1280DECLARE_GEOM_CLASS(g_eli_class, g_eli);
1281MODULE_DEPEND(g_eli, crypto, 1, 1, 1);