Deleted Added
full compact
g_eli.c (211927) g_eli.c (213062)
1/*-
2 * Copyright (c) 2005-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2005-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/geom/eli/g_eli.c 211927 2010-08-28 08:30:20Z pjd $");
28__FBSDID("$FreeBSD: head/sys/geom/eli/g_eli.c 213062 2010-09-23 11:19:48Z pjd $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/linker.h>
34#include <sys/module.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/bio.h>
38#include <sys/sysctl.h>
39#include <sys/malloc.h>
40#include <sys/eventhandler.h>
41#include <sys/kthread.h>
42#include <sys/proc.h>
43#include <sys/sched.h>
44#include <sys/smp.h>
45#include <sys/uio.h>
46#include <sys/vnode.h>
47
48#include <vm/uma.h>
49
50#include <geom/geom.h>
51#include <geom/eli/g_eli.h>
52#include <geom/eli/pkcs5v2.h>
53
54
55MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data");
56
57SYSCTL_DECL(_kern_geom);
58SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff");
59u_int g_eli_debug = 0;
60TUNABLE_INT("kern.geom.eli.debug", &g_eli_debug);
61SYSCTL_UINT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RW, &g_eli_debug, 0,
62 "Debug level");
63static u_int g_eli_tries = 3;
64TUNABLE_INT("kern.geom.eli.tries", &g_eli_tries);
65SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RW, &g_eli_tries, 0,
66 "Number of tries for entering the passphrase");
67static u_int g_eli_visible_passphrase = 0;
68TUNABLE_INT("kern.geom.eli.visible_passphrase", &g_eli_visible_passphrase);
69SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RW,
70 &g_eli_visible_passphrase, 0,
71 "Turn on echo when entering the passphrase (for debug purposes only!!)");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/linker.h>
34#include <sys/module.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/bio.h>
38#include <sys/sysctl.h>
39#include <sys/malloc.h>
40#include <sys/eventhandler.h>
41#include <sys/kthread.h>
42#include <sys/proc.h>
43#include <sys/sched.h>
44#include <sys/smp.h>
45#include <sys/uio.h>
46#include <sys/vnode.h>
47
48#include <vm/uma.h>
49
50#include <geom/geom.h>
51#include <geom/eli/g_eli.h>
52#include <geom/eli/pkcs5v2.h>
53
54
55MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data");
56
57SYSCTL_DECL(_kern_geom);
58SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff");
59u_int g_eli_debug = 0;
60TUNABLE_INT("kern.geom.eli.debug", &g_eli_debug);
61SYSCTL_UINT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RW, &g_eli_debug, 0,
62 "Debug level");
63static u_int g_eli_tries = 3;
64TUNABLE_INT("kern.geom.eli.tries", &g_eli_tries);
65SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RW, &g_eli_tries, 0,
66 "Number of tries for entering the passphrase");
67static u_int g_eli_visible_passphrase = 0;
68TUNABLE_INT("kern.geom.eli.visible_passphrase", &g_eli_visible_passphrase);
69SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RW,
70 &g_eli_visible_passphrase, 0,
71 "Turn on echo when entering the passphrase (for debug purposes only!!)");
72u_int g_eli_overwrites = 5;
72u_int g_eli_overwrites = G_ELI_OVERWRITES;
73TUNABLE_INT("kern.geom.eli.overwrites", &g_eli_overwrites);
74SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RW, &g_eli_overwrites,
75 0, "Number of times on-disk keys should be overwritten when destroying them");
76static u_int g_eli_threads = 0;
77TUNABLE_INT("kern.geom.eli.threads", &g_eli_threads);
78SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RW, &g_eli_threads, 0,
79 "Number of threads doing crypto work");
80u_int g_eli_batch = 0;
81TUNABLE_INT("kern.geom.eli.batch", &g_eli_batch);
82SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RW, &g_eli_batch, 0,
83 "Use crypto operations batching");
84
85static eventhandler_tag g_eli_pre_sync = NULL;
86
87static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp,
88 struct g_geom *gp);
89static void g_eli_init(struct g_class *mp);
90static void g_eli_fini(struct g_class *mp);
91
92static g_taste_t g_eli_taste;
93static g_dumpconf_t g_eli_dumpconf;
94
95struct g_class g_eli_class = {
96 .name = G_ELI_CLASS_NAME,
97 .version = G_VERSION,
98 .ctlreq = g_eli_config,
99 .taste = g_eli_taste,
100 .destroy_geom = g_eli_destroy_geom,
101 .init = g_eli_init,
102 .fini = g_eli_fini
103};
104
105
106/*
107 * Code paths:
108 * BIO_READ:
109 * g_eli_start -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
110 * BIO_WRITE:
111 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
112 */
113
114
115/*
116 * EAGAIN from crypto(9) means, that we were probably balanced to another crypto
117 * accelerator or something like this.
118 * The function updates the SID and rerun the operation.
119 */
120int
121g_eli_crypto_rerun(struct cryptop *crp)
122{
123 struct g_eli_softc *sc;
124 struct g_eli_worker *wr;
125 struct bio *bp;
126 int error;
127
128 bp = (struct bio *)crp->crp_opaque;
129 sc = bp->bio_to->geom->softc;
130 LIST_FOREACH(wr, &sc->sc_workers, w_next) {
131 if (wr->w_number == bp->bio_pflags)
132 break;
133 }
134 KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags));
135 G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %ju -> %ju).",
136 bp->bio_cmd == BIO_READ ? "READ" : "WRITE", (uintmax_t)wr->w_sid,
137 (uintmax_t)crp->crp_sid);
138 wr->w_sid = crp->crp_sid;
139 crp->crp_etype = 0;
140 error = crypto_dispatch(crp);
141 if (error == 0)
142 return (0);
143 G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error);
144 crp->crp_etype = error;
145 return (error);
146}
147
148/*
149 * The function is called afer reading encrypted data from the provider.
150 *
151 * g_eli_start -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
152 */
153void
154g_eli_read_done(struct bio *bp)
155{
156 struct g_eli_softc *sc;
157 struct bio *pbp;
158
159 G_ELI_LOGREQ(2, bp, "Request done.");
160 pbp = bp->bio_parent;
161 if (pbp->bio_error == 0)
162 pbp->bio_error = bp->bio_error;
163 /*
164 * Do we have all sectors already?
165 */
166 pbp->bio_inbed++;
167 if (pbp->bio_inbed < pbp->bio_children)
168 return;
169 g_destroy_bio(bp);
170 if (pbp->bio_error != 0) {
171 G_ELI_LOGREQ(0, pbp, "%s() failed", __func__);
172 pbp->bio_completed = 0;
173 if (pbp->bio_driver2 != NULL) {
174 free(pbp->bio_driver2, M_ELI);
175 pbp->bio_driver2 = NULL;
176 }
177 g_io_deliver(pbp, pbp->bio_error);
178 return;
179 }
180 sc = pbp->bio_to->geom->softc;
181 mtx_lock(&sc->sc_queue_mtx);
182 bioq_insert_tail(&sc->sc_queue, pbp);
183 mtx_unlock(&sc->sc_queue_mtx);
184 wakeup(sc);
185}
186
187/*
188 * The function is called after we encrypt and write data.
189 *
190 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver
191 */
192void
193g_eli_write_done(struct bio *bp)
194{
195 struct bio *pbp;
196
197 G_ELI_LOGREQ(2, bp, "Request done.");
198 pbp = bp->bio_parent;
199 if (pbp->bio_error == 0) {
200 if (bp->bio_error != 0)
201 pbp->bio_error = bp->bio_error;
202 }
203 /*
204 * Do we have all sectors already?
205 */
206 pbp->bio_inbed++;
207 if (pbp->bio_inbed < pbp->bio_children)
208 return;
209 free(pbp->bio_driver2, M_ELI);
210 pbp->bio_driver2 = NULL;
211 if (pbp->bio_error != 0) {
212 G_ELI_LOGREQ(0, pbp, "Crypto WRITE request failed (error=%d).",
213 pbp->bio_error);
214 pbp->bio_completed = 0;
215 }
216 g_destroy_bio(bp);
217 /*
218 * Write is finished, send it up.
219 */
220 pbp->bio_completed = pbp->bio_length;
221 g_io_deliver(pbp, pbp->bio_error);
222}
223
224/*
225 * This function should never be called, but GEOM made as it set ->orphan()
226 * method for every geom.
227 */
228static void
229g_eli_orphan_spoil_assert(struct g_consumer *cp)
230{
231
232 panic("Function %s() called for %s.", __func__, cp->geom->name);
233}
234
235static void
236g_eli_orphan(struct g_consumer *cp)
237{
238 struct g_eli_softc *sc;
239
240 g_topology_assert();
241 sc = cp->geom->softc;
242 if (sc == NULL)
243 return;
244 g_eli_destroy(sc, 1);
245}
246
247/*
248 * BIO_READ : G_ELI_START -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
249 * BIO_WRITE: G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
250 */
251static void
252g_eli_start(struct bio *bp)
253{
254 struct g_eli_softc *sc;
255 struct g_consumer *cp;
256 struct bio *cbp;
257
258 sc = bp->bio_to->geom->softc;
259 KASSERT(sc != NULL,
260 ("Provider's error should be set (error=%d)(device=%s).",
261 bp->bio_to->error, bp->bio_to->name));
262 G_ELI_LOGREQ(2, bp, "Request received.");
263
264 switch (bp->bio_cmd) {
265 case BIO_READ:
266 case BIO_WRITE:
267 case BIO_GETATTR:
268 case BIO_FLUSH:
269 break;
270 case BIO_DELETE:
271 /*
272 * We could eventually support BIO_DELETE request.
273 * It could be done by overwritting requested sector with
274 * random data g_eli_overwrites number of times.
275 */
276 default:
277 g_io_deliver(bp, EOPNOTSUPP);
278 return;
279 }
280 cbp = g_clone_bio(bp);
281 if (cbp == NULL) {
282 g_io_deliver(bp, ENOMEM);
283 return;
284 }
285 switch (bp->bio_cmd) {
286 case BIO_READ:
287 if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) {
288 bp->bio_driver2 = NULL;
289 cbp->bio_done = g_eli_read_done;
290 cp = LIST_FIRST(&sc->sc_geom->consumer);
291 cbp->bio_to = cp->provider;
292 G_ELI_LOGREQ(2, cbp, "Sending request.");
293 /*
294 * Read encrypted data from provider.
295 */
296 g_io_request(cbp, cp);
297 break;
298 }
299 bp->bio_pflags = 255;
300 /* FALLTHROUGH */
301 case BIO_WRITE:
302 bp->bio_driver1 = cbp;
303 mtx_lock(&sc->sc_queue_mtx);
304 bioq_insert_tail(&sc->sc_queue, bp);
305 mtx_unlock(&sc->sc_queue_mtx);
306 wakeup(sc);
307 break;
308 case BIO_GETATTR:
309 case BIO_FLUSH:
310 cbp->bio_done = g_std_done;
311 cp = LIST_FIRST(&sc->sc_geom->consumer);
312 cbp->bio_to = cp->provider;
313 G_ELI_LOGREQ(2, cbp, "Sending request.");
314 g_io_request(cbp, cp);
315 break;
316 }
317}
318
319/*
320 * This is the main function for kernel worker thread when we don't have
321 * hardware acceleration and we have to do cryptography in software.
322 * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM
323 * threads with crypto work.
324 */
325static void
326g_eli_worker(void *arg)
327{
328 struct g_eli_softc *sc;
329 struct g_eli_worker *wr;
330 struct bio *bp;
331
332 wr = arg;
333 sc = wr->w_softc;
334#ifdef SMP
335 /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */
336 if (mp_ncpus > 1 && sc->sc_crypto == G_ELI_CRYPTO_SW &&
337 g_eli_threads == 0) {
338 while (!smp_started)
339 tsleep(wr, 0, "geli:smp", hz / 4);
340 }
341#endif
342 thread_lock(curthread);
343 sched_prio(curthread, PUSER);
344 if (sc->sc_crypto == G_ELI_CRYPTO_SW && g_eli_threads == 0)
345 sched_bind(curthread, wr->w_number);
346 thread_unlock(curthread);
347
348 G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm);
349
350 for (;;) {
351 mtx_lock(&sc->sc_queue_mtx);
352 bp = bioq_takefirst(&sc->sc_queue);
353 if (bp == NULL) {
354 if (sc->sc_flags & G_ELI_FLAG_DESTROY) {
355 LIST_REMOVE(wr, w_next);
356 crypto_freesession(wr->w_sid);
357 free(wr, M_ELI);
358 G_ELI_DEBUG(1, "Thread %s exiting.",
359 curthread->td_proc->p_comm);
360 wakeup(&sc->sc_workers);
361 mtx_unlock(&sc->sc_queue_mtx);
362 kproc_exit(0);
363 }
364 msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0);
365 continue;
366 }
367 mtx_unlock(&sc->sc_queue_mtx);
368 if (bp->bio_cmd == BIO_READ && bp->bio_pflags == 255)
369 g_eli_auth_read(sc, bp);
370 else if (sc->sc_flags & G_ELI_FLAG_AUTH)
371 g_eli_auth_run(wr, bp);
372 else
373 g_eli_crypto_run(wr, bp);
374 }
375}
376
377/*
378 * Here we generate IV. It is unique for every sector.
379 */
380void
381g_eli_crypto_ivgen(struct g_eli_softc *sc, off_t offset, u_char *iv,
382 size_t size)
383{
384 u_char off[8], hash[SHA256_DIGEST_LENGTH];
385 SHA256_CTX ctx;
386
387 if ((sc->sc_flags & G_ELI_FLAG_NATIVE_BYTE_ORDER) != 0)
388 bcopy(&offset, off, sizeof(off));
389 else
390 le64enc(off, (uint64_t)offset);
391 /* Copy precalculated SHA256 context for IV-Key. */
392 bcopy(&sc->sc_ivctx, &ctx, sizeof(ctx));
393 SHA256_Update(&ctx, off, sizeof(off));
394 SHA256_Final(hash, &ctx);
395 bcopy(hash, iv, size);
396}
397
398int
399g_eli_read_metadata(struct g_class *mp, struct g_provider *pp,
400 struct g_eli_metadata *md)
401{
402 struct g_geom *gp;
403 struct g_consumer *cp;
404 u_char *buf = NULL;
405 int error;
406
407 g_topology_assert();
408
409 gp = g_new_geomf(mp, "eli:taste");
410 gp->start = g_eli_start;
411 gp->access = g_std_access;
412 /*
413 * g_eli_read_metadata() is always called from the event thread.
414 * Our geom is created and destroyed in the same event, so there
415 * could be no orphan nor spoil event in the meantime.
416 */
417 gp->orphan = g_eli_orphan_spoil_assert;
418 gp->spoiled = g_eli_orphan_spoil_assert;
419 cp = g_new_consumer(gp);
420 error = g_attach(cp, pp);
421 if (error != 0)
422 goto end;
423 error = g_access(cp, 1, 0, 0);
424 if (error != 0)
425 goto end;
426 g_topology_unlock();
427 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
428 &error);
429 g_topology_lock();
430 if (buf == NULL)
431 goto end;
432 eli_metadata_decode(buf, md);
433end:
434 if (buf != NULL)
435 g_free(buf);
436 if (cp->provider != NULL) {
437 if (cp->acr == 1)
438 g_access(cp, -1, 0, 0);
439 g_detach(cp);
440 }
441 g_destroy_consumer(cp);
442 g_destroy_geom(gp);
443 return (error);
444}
445
446/*
447 * The function is called when we had last close on provider and user requested
448 * to close it when this situation occur.
449 */
450static void
451g_eli_last_close(struct g_eli_softc *sc)
452{
453 struct g_geom *gp;
454 struct g_provider *pp;
455 char ppname[64];
456 int error;
457
458 g_topology_assert();
459 gp = sc->sc_geom;
460 pp = LIST_FIRST(&gp->provider);
461 strlcpy(ppname, pp->name, sizeof(ppname));
462 error = g_eli_destroy(sc, 1);
463 KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).",
464 ppname, error));
465 G_ELI_DEBUG(0, "Detached %s on last close.", ppname);
466}
467
468int
469g_eli_access(struct g_provider *pp, int dr, int dw, int de)
470{
471 struct g_eli_softc *sc;
472 struct g_geom *gp;
473
474 gp = pp->geom;
475 sc = gp->softc;
476
477 if (dw > 0) {
478 if (sc->sc_flags & G_ELI_FLAG_RO) {
479 /* Deny write attempts. */
480 return (EROFS);
481 }
482 /* Someone is opening us for write, we need to remember that. */
483 sc->sc_flags |= G_ELI_FLAG_WOPEN;
484 return (0);
485 }
486 /* Is this the last close? */
487 if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0)
488 return (0);
489
490 /*
491 * Automatically detach on last close if requested.
492 */
493 if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) ||
494 (sc->sc_flags & G_ELI_FLAG_WOPEN)) {
495 g_eli_last_close(sc);
496 }
497 return (0);
498}
499
500static int
501g_eli_cpu_is_disabled(int cpu)
502{
503#ifdef SMP
504 return ((hlt_cpus_mask & (1 << cpu)) != 0);
505#else
506 return (0);
507#endif
508}
509
510struct g_geom *
511g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp,
512 const struct g_eli_metadata *md, const u_char *mkey, int nkey)
513{
514 struct g_eli_softc *sc;
515 struct g_eli_worker *wr;
516 struct g_geom *gp;
517 struct g_provider *pp;
518 struct g_consumer *cp;
519 struct cryptoini crie, cria;
520 u_int i, threads;
521 int error;
522
523 G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX);
524
525 gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX);
526 gp->softc = NULL; /* for a moment */
527
528 sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO);
529 gp->start = g_eli_start;
530 /*
531 * Spoiling cannot happen actually, because we keep provider open for
532 * writing all the time or provider is read-only.
533 */
534 gp->spoiled = g_eli_orphan_spoil_assert;
535 gp->orphan = g_eli_orphan;
536 gp->dumpconf = g_eli_dumpconf;
537 /*
538 * If detach-on-last-close feature is not enabled and we don't operate
539 * on read-only provider, we can simply use g_std_access().
540 */
541 if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO))
542 gp->access = g_eli_access;
543 else
544 gp->access = g_std_access;
545
546 sc->sc_crypto = G_ELI_CRYPTO_SW;
547 sc->sc_flags = md->md_flags;
548 /* Backward compatibility. */
549 if (md->md_version < 4)
550 sc->sc_flags |= G_ELI_FLAG_NATIVE_BYTE_ORDER;
551 sc->sc_ealgo = md->md_ealgo;
552 sc->sc_nkey = nkey;
553 /*
554 * Remember the keys in our softc structure.
555 */
556 g_eli_mkey_propagate(sc, mkey);
557 sc->sc_ekeylen = md->md_keylen;
558
559 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
560 sc->sc_akeylen = sizeof(sc->sc_akey) * 8;
561 sc->sc_aalgo = md->md_aalgo;
562 sc->sc_alen = g_eli_hashlen(sc->sc_aalgo);
563
564 sc->sc_data_per_sector = bpp->sectorsize - sc->sc_alen;
565 /*
566 * Some hash functions (like SHA1 and RIPEMD160) generates hash
567 * which length is not multiple of 128 bits, but we want data
568 * length to be multiple of 128, so we can encrypt without
569 * padding. The line below rounds down data length to multiple
570 * of 128 bits.
571 */
572 sc->sc_data_per_sector -= sc->sc_data_per_sector % 16;
573
574 sc->sc_bytes_per_sector =
575 (md->md_sectorsize - 1) / sc->sc_data_per_sector + 1;
576 sc->sc_bytes_per_sector *= bpp->sectorsize;
577 /*
578 * Precalculate SHA256 for HMAC key generation.
579 * This is expensive operation and we can do it only once now or
580 * for every access to sector, so now will be much better.
581 */
582 SHA256_Init(&sc->sc_akeyctx);
583 SHA256_Update(&sc->sc_akeyctx, sc->sc_akey,
584 sizeof(sc->sc_akey));
585 }
586
587 /*
588 * Precalculate SHA256 for IV generation.
589 * This is expensive operation and we can do it only once now or for
590 * every access to sector, so now will be much better.
591 */
592 SHA256_Init(&sc->sc_ivctx);
593 SHA256_Update(&sc->sc_ivctx, sc->sc_ivkey, sizeof(sc->sc_ivkey));
594
595 gp->softc = sc;
596 sc->sc_geom = gp;
597
598 bioq_init(&sc->sc_queue);
599 mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF);
600
601 pp = NULL;
602 cp = g_new_consumer(gp);
603 error = g_attach(cp, bpp);
604 if (error != 0) {
605 if (req != NULL) {
606 gctl_error(req, "Cannot attach to %s (error=%d).",
607 bpp->name, error);
608 } else {
609 G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).",
610 bpp->name, error);
611 }
612 goto failed;
613 }
614 /*
615 * Keep provider open all the time, so we can run critical tasks,
616 * like Master Keys deletion, without wondering if we can open
617 * provider or not.
618 * We don't open provider for writing only when user requested read-only
619 * access.
620 */
621 if (sc->sc_flags & G_ELI_FLAG_RO)
622 error = g_access(cp, 1, 0, 1);
623 else
624 error = g_access(cp, 1, 1, 1);
625 if (error != 0) {
626 if (req != NULL) {
627 gctl_error(req, "Cannot access %s (error=%d).",
628 bpp->name, error);
629 } else {
630 G_ELI_DEBUG(1, "Cannot access %s (error=%d).",
631 bpp->name, error);
632 }
633 goto failed;
634 }
635
636 LIST_INIT(&sc->sc_workers);
637
638 bzero(&crie, sizeof(crie));
639 crie.cri_alg = sc->sc_ealgo;
640 crie.cri_klen = sc->sc_ekeylen;
641 crie.cri_key = sc->sc_ekey;
642 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
643 bzero(&cria, sizeof(cria));
644 cria.cri_alg = sc->sc_aalgo;
645 cria.cri_klen = sc->sc_akeylen;
646 cria.cri_key = sc->sc_akey;
647 crie.cri_next = &cria;
648 }
649
650 threads = g_eli_threads;
651 if (threads == 0)
652 threads = mp_ncpus;
653 else if (threads > mp_ncpus) {
654 /* There is really no need for too many worker threads. */
655 threads = mp_ncpus;
656 G_ELI_DEBUG(0, "Reducing number of threads to %u.", threads);
657 }
658 for (i = 0; i < threads; i++) {
659 if (g_eli_cpu_is_disabled(i)) {
660 G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.",
661 bpp->name, i);
662 continue;
663 }
664 wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO);
665 wr->w_softc = sc;
666 wr->w_number = i;
667
668 /*
669 * If this is the first pass, try to get hardware support.
670 * Use software cryptography, if we cannot get it.
671 */
672 if (LIST_EMPTY(&sc->sc_workers)) {
673 error = crypto_newsession(&wr->w_sid, &crie,
674 CRYPTOCAP_F_HARDWARE);
675 if (error == 0)
676 sc->sc_crypto = G_ELI_CRYPTO_HW;
677 }
678 if (sc->sc_crypto == G_ELI_CRYPTO_SW) {
679 error = crypto_newsession(&wr->w_sid, &crie,
680 CRYPTOCAP_F_SOFTWARE);
681 }
682 if (error != 0) {
683 free(wr, M_ELI);
684 if (req != NULL) {
685 gctl_error(req, "Cannot set up crypto session "
686 "for %s (error=%d).", bpp->name, error);
687 } else {
688 G_ELI_DEBUG(1, "Cannot set up crypto session "
689 "for %s (error=%d).", bpp->name, error);
690 }
691 goto failed;
692 }
693
694 error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0,
695 "g_eli[%u] %s", i, bpp->name);
696 if (error != 0) {
697 crypto_freesession(wr->w_sid);
698 free(wr, M_ELI);
699 if (req != NULL) {
700 gctl_error(req, "Cannot create kernel thread "
701 "for %s (error=%d).", bpp->name, error);
702 } else {
703 G_ELI_DEBUG(1, "Cannot create kernel thread "
704 "for %s (error=%d).", bpp->name, error);
705 }
706 goto failed;
707 }
708 LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next);
709 /* If we have hardware support, one thread is enough. */
710 if (sc->sc_crypto == G_ELI_CRYPTO_HW)
711 break;
712 }
713
714 /*
715 * Create decrypted provider.
716 */
717 pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX);
718 pp->sectorsize = md->md_sectorsize;
719 pp->mediasize = bpp->mediasize;
720 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME))
721 pp->mediasize -= bpp->sectorsize;
722 if (!(sc->sc_flags & G_ELI_FLAG_AUTH))
723 pp->mediasize -= (pp->mediasize % pp->sectorsize);
724 else {
725 pp->mediasize /= sc->sc_bytes_per_sector;
726 pp->mediasize *= pp->sectorsize;
727 }
728
729 g_error_provider(pp, 0);
730
731 G_ELI_DEBUG(0, "Device %s created.", pp->name);
732 G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo),
733 sc->sc_ekeylen);
734 if (sc->sc_flags & G_ELI_FLAG_AUTH)
735 G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo));
736 G_ELI_DEBUG(0, " Crypto: %s",
737 sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware");
738 return (gp);
739failed:
740 mtx_lock(&sc->sc_queue_mtx);
741 sc->sc_flags |= G_ELI_FLAG_DESTROY;
742 wakeup(sc);
743 /*
744 * Wait for kernel threads self destruction.
745 */
746 while (!LIST_EMPTY(&sc->sc_workers)) {
747 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
748 "geli:destroy", 0);
749 }
750 mtx_destroy(&sc->sc_queue_mtx);
751 if (cp->provider != NULL) {
752 if (cp->acr == 1)
753 g_access(cp, -1, -1, -1);
754 g_detach(cp);
755 }
756 g_destroy_consumer(cp);
757 g_destroy_geom(gp);
758 bzero(sc, sizeof(*sc));
759 free(sc, M_ELI);
760 return (NULL);
761}
762
763int
764g_eli_destroy(struct g_eli_softc *sc, boolean_t force)
765{
766 struct g_geom *gp;
767 struct g_provider *pp;
768
769 g_topology_assert();
770
771 if (sc == NULL)
772 return (ENXIO);
773
774 gp = sc->sc_geom;
775 pp = LIST_FIRST(&gp->provider);
776 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
777 if (force) {
778 G_ELI_DEBUG(1, "Device %s is still open, so it "
779 "cannot be definitely removed.", pp->name);
780 } else {
781 G_ELI_DEBUG(1,
782 "Device %s is still open (r%dw%de%d).", pp->name,
783 pp->acr, pp->acw, pp->ace);
784 return (EBUSY);
785 }
786 }
787
788 mtx_lock(&sc->sc_queue_mtx);
789 sc->sc_flags |= G_ELI_FLAG_DESTROY;
790 wakeup(sc);
791 while (!LIST_EMPTY(&sc->sc_workers)) {
792 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
793 "geli:destroy", 0);
794 }
795 mtx_destroy(&sc->sc_queue_mtx);
796 gp->softc = NULL;
797 bzero(sc, sizeof(*sc));
798 free(sc, M_ELI);
799
800 if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0))
801 G_ELI_DEBUG(0, "Device %s destroyed.", gp->name);
802 g_wither_geom_close(gp, ENXIO);
803
804 return (0);
805}
806
807static int
808g_eli_destroy_geom(struct gctl_req *req __unused,
809 struct g_class *mp __unused, struct g_geom *gp)
810{
811 struct g_eli_softc *sc;
812
813 sc = gp->softc;
814 return (g_eli_destroy(sc, 0));
815}
816
817static int
818g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider)
819{
820 u_char *keyfile, *data, *size;
821 char *file, name[64];
822 int i;
823
824 for (i = 0; ; i++) {
825 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
826 keyfile = preload_search_by_type(name);
827 if (keyfile == NULL)
828 return (i); /* Return number of loaded keyfiles. */
829 data = preload_search_info(keyfile, MODINFO_ADDR);
830 if (data == NULL) {
831 G_ELI_DEBUG(0, "Cannot find key file data for %s.",
832 name);
833 return (0);
834 }
835 data = *(void **)data;
836 size = preload_search_info(keyfile, MODINFO_SIZE);
837 if (size == NULL) {
838 G_ELI_DEBUG(0, "Cannot find key file size for %s.",
839 name);
840 return (0);
841 }
842 file = preload_search_info(keyfile, MODINFO_NAME);
843 if (file == NULL) {
844 G_ELI_DEBUG(0, "Cannot find key file name for %s.",
845 name);
846 return (0);
847 }
848 G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file,
849 provider, name);
850 g_eli_crypto_hmac_update(ctx, data, *(size_t *)size);
851 }
852}
853
854static void
855g_eli_keyfiles_clear(const char *provider)
856{
857 u_char *keyfile, *data, *size;
858 char name[64];
859 int i;
860
861 for (i = 0; ; i++) {
862 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
863 keyfile = preload_search_by_type(name);
864 if (keyfile == NULL)
865 return;
866 data = preload_search_info(keyfile, MODINFO_ADDR);
867 size = preload_search_info(keyfile, MODINFO_SIZE);
868 if (data == NULL || size == NULL)
869 continue;
870 data = *(void **)data;
871 bzero(data, *(size_t *)size);
872 }
873}
874
875/*
876 * Tasting is only made on boot.
877 * We detect providers which should be attached before root is mounted.
878 */
879static struct g_geom *
880g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
881{
882 struct g_eli_metadata md;
883 struct g_geom *gp;
884 struct hmac_ctx ctx;
885 char passphrase[256];
886 u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN];
887 u_int i, nkey, nkeyfiles, tries;
888 int error;
889
890 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
891 g_topology_assert();
892
893 if (root_mounted() || g_eli_tries == 0)
894 return (NULL);
895
896 G_ELI_DEBUG(3, "Tasting %s.", pp->name);
897
898 error = g_eli_read_metadata(mp, pp, &md);
899 if (error != 0)
900 return (NULL);
901 gp = NULL;
902
903 if (strcmp(md.md_magic, G_ELI_MAGIC) != 0)
904 return (NULL);
905 if (md.md_version > G_ELI_VERSION) {
906 printf("geom_eli.ko module is too old to handle %s.\n",
907 pp->name);
908 return (NULL);
909 }
910 if (md.md_provsize != pp->mediasize)
911 return (NULL);
912 /* Should we attach it on boot? */
913 if (!(md.md_flags & G_ELI_FLAG_BOOT))
914 return (NULL);
915 if (md.md_keys == 0x00) {
916 G_ELI_DEBUG(0, "No valid keys on %s.", pp->name);
917 return (NULL);
918 }
919 if (md.md_iterations == -1) {
920 /* If there is no passphrase, we try only once. */
921 tries = 1;
922 } else {
923 /* Ask for the passphrase no more than g_eli_tries times. */
924 tries = g_eli_tries;
925 }
926
927 for (i = 0; i < tries; i++) {
928 g_eli_crypto_hmac_init(&ctx, NULL, 0);
929
930 /*
931 * Load all key files.
932 */
933 nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name);
934
935 if (nkeyfiles == 0 && md.md_iterations == -1) {
936 /*
937 * No key files and no passphrase, something is
938 * definitely wrong here.
939 * geli(8) doesn't allow for such situation, so assume
940 * that there was really no passphrase and in that case
941 * key files are no properly defined in loader.conf.
942 */
943 G_ELI_DEBUG(0,
944 "Found no key files in loader.conf for %s.",
945 pp->name);
946 return (NULL);
947 }
948
949 /* Ask for the passphrase if defined. */
950 if (md.md_iterations >= 0) {
951 printf("Enter passphrase for %s: ", pp->name);
952 gets(passphrase, sizeof(passphrase),
953 g_eli_visible_passphrase);
954 }
955
956 /*
957 * Prepare Derived-Key from the user passphrase.
958 */
959 if (md.md_iterations == 0) {
960 g_eli_crypto_hmac_update(&ctx, md.md_salt,
961 sizeof(md.md_salt));
962 g_eli_crypto_hmac_update(&ctx, passphrase,
963 strlen(passphrase));
964 bzero(passphrase, sizeof(passphrase));
965 } else if (md.md_iterations > 0) {
966 u_char dkey[G_ELI_USERKEYLEN];
967
968 pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt,
969 sizeof(md.md_salt), passphrase, md.md_iterations);
970 bzero(passphrase, sizeof(passphrase));
971 g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey));
972 bzero(dkey, sizeof(dkey));
973 }
974
975 g_eli_crypto_hmac_final(&ctx, key, 0);
976
977 /*
978 * Decrypt Master-Key.
979 */
980 error = g_eli_mkey_decrypt(&md, key, mkey, &nkey);
981 bzero(key, sizeof(key));
982 if (error == -1) {
983 if (i == tries - 1) {
984 G_ELI_DEBUG(0,
985 "Wrong key for %s. No tries left.",
986 pp->name);
987 g_eli_keyfiles_clear(pp->name);
988 return (NULL);
989 }
990 G_ELI_DEBUG(0, "Wrong key for %s. Tries left: %u.",
991 pp->name, tries - i - 1);
992 /* Try again. */
993 continue;
994 } else if (error > 0) {
995 G_ELI_DEBUG(0, "Cannot decrypt Master Key for %s (error=%d).",
996 pp->name, error);
997 g_eli_keyfiles_clear(pp->name);
998 return (NULL);
999 }
1000 G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name);
1001 break;
1002 }
1003
1004 /*
1005 * We have correct key, let's attach provider.
1006 */
1007 gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey);
1008 bzero(mkey, sizeof(mkey));
1009 bzero(&md, sizeof(md));
1010 if (gp == NULL) {
1011 G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name,
1012 G_ELI_SUFFIX);
1013 return (NULL);
1014 }
1015 return (gp);
1016}
1017
1018static void
1019g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1020 struct g_consumer *cp, struct g_provider *pp)
1021{
1022 struct g_eli_softc *sc;
1023
1024 g_topology_assert();
1025 sc = gp->softc;
1026 if (sc == NULL)
1027 return;
1028 if (pp != NULL || cp != NULL)
1029 return; /* Nothing here. */
1030 sbuf_printf(sb, "%s<Flags>", indent);
1031 if (sc->sc_flags == 0)
1032 sbuf_printf(sb, "NONE");
1033 else {
1034 int first = 1;
1035
1036#define ADD_FLAG(flag, name) do { \
1037 if (sc->sc_flags & (flag)) { \
1038 if (!first) \
1039 sbuf_printf(sb, ", "); \
1040 else \
1041 first = 0; \
1042 sbuf_printf(sb, name); \
1043 } \
1044} while (0)
1045 ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER");
1046 ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME");
1047 ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT");
1048 ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH");
1049 ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH");
1050 ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH");
1051 ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN");
1052 ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY");
1053 ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY");
1054#undef ADD_FLAG
1055 }
1056 sbuf_printf(sb, "</Flags>\n");
1057
1058 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) {
1059 sbuf_printf(sb, "%s<UsedKey>%u</UsedKey>\n", indent,
1060 sc->sc_nkey);
1061 }
1062 sbuf_printf(sb, "%s<Crypto>", indent);
1063 switch (sc->sc_crypto) {
1064 case G_ELI_CRYPTO_HW:
1065 sbuf_printf(sb, "hardware");
1066 break;
1067 case G_ELI_CRYPTO_SW:
1068 sbuf_printf(sb, "software");
1069 break;
1070 default:
1071 sbuf_printf(sb, "UNKNOWN");
1072 break;
1073 }
1074 sbuf_printf(sb, "</Crypto>\n");
1075 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
1076 sbuf_printf(sb,
1077 "%s<AuthenticationAlgorithm>%s</AuthenticationAlgorithm>\n",
1078 indent, g_eli_algo2str(sc->sc_aalgo));
1079 }
1080 sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent,
1081 sc->sc_ekeylen);
1082 sbuf_printf(sb, "%s<EncryptionAlgorithm>%s</EncryptionAlgorithm>\n", indent,
1083 g_eli_algo2str(sc->sc_ealgo));
1084}
1085
1086static void
1087g_eli_shutdown_pre_sync(void *arg, int howto)
1088{
1089 struct g_class *mp;
1090 struct g_geom *gp, *gp2;
1091 struct g_provider *pp;
1092 struct g_eli_softc *sc;
1093 int error;
1094
1095 mp = arg;
1096 DROP_GIANT();
1097 g_topology_lock();
1098 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
1099 sc = gp->softc;
1100 if (sc == NULL)
1101 continue;
1102 pp = LIST_FIRST(&gp->provider);
1103 KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name));
1104 if (pp->acr + pp->acw + pp->ace == 0)
1105 error = g_eli_destroy(sc, 1);
1106 else {
1107 sc->sc_flags |= G_ELI_FLAG_RW_DETACH;
1108 gp->access = g_eli_access;
1109 }
1110 }
1111 g_topology_unlock();
1112 PICKUP_GIANT();
1113}
1114
1115static void
1116g_eli_init(struct g_class *mp)
1117{
1118
1119 g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
1120 g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
1121 if (g_eli_pre_sync == NULL)
1122 G_ELI_DEBUG(0, "Warning! Cannot register shutdown event.");
1123}
1124
1125static void
1126g_eli_fini(struct g_class *mp)
1127{
1128
1129 if (g_eli_pre_sync != NULL)
1130 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync);
1131}
1132
1133DECLARE_GEOM_CLASS(g_eli_class, g_eli);
1134MODULE_DEPEND(g_eli, crypto, 1, 1, 1);
73TUNABLE_INT("kern.geom.eli.overwrites", &g_eli_overwrites);
74SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RW, &g_eli_overwrites,
75 0, "Number of times on-disk keys should be overwritten when destroying them");
76static u_int g_eli_threads = 0;
77TUNABLE_INT("kern.geom.eli.threads", &g_eli_threads);
78SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RW, &g_eli_threads, 0,
79 "Number of threads doing crypto work");
80u_int g_eli_batch = 0;
81TUNABLE_INT("kern.geom.eli.batch", &g_eli_batch);
82SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RW, &g_eli_batch, 0,
83 "Use crypto operations batching");
84
85static eventhandler_tag g_eli_pre_sync = NULL;
86
87static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp,
88 struct g_geom *gp);
89static void g_eli_init(struct g_class *mp);
90static void g_eli_fini(struct g_class *mp);
91
92static g_taste_t g_eli_taste;
93static g_dumpconf_t g_eli_dumpconf;
94
95struct g_class g_eli_class = {
96 .name = G_ELI_CLASS_NAME,
97 .version = G_VERSION,
98 .ctlreq = g_eli_config,
99 .taste = g_eli_taste,
100 .destroy_geom = g_eli_destroy_geom,
101 .init = g_eli_init,
102 .fini = g_eli_fini
103};
104
105
106/*
107 * Code paths:
108 * BIO_READ:
109 * g_eli_start -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
110 * BIO_WRITE:
111 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
112 */
113
114
115/*
116 * EAGAIN from crypto(9) means, that we were probably balanced to another crypto
117 * accelerator or something like this.
118 * The function updates the SID and rerun the operation.
119 */
120int
121g_eli_crypto_rerun(struct cryptop *crp)
122{
123 struct g_eli_softc *sc;
124 struct g_eli_worker *wr;
125 struct bio *bp;
126 int error;
127
128 bp = (struct bio *)crp->crp_opaque;
129 sc = bp->bio_to->geom->softc;
130 LIST_FOREACH(wr, &sc->sc_workers, w_next) {
131 if (wr->w_number == bp->bio_pflags)
132 break;
133 }
134 KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags));
135 G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %ju -> %ju).",
136 bp->bio_cmd == BIO_READ ? "READ" : "WRITE", (uintmax_t)wr->w_sid,
137 (uintmax_t)crp->crp_sid);
138 wr->w_sid = crp->crp_sid;
139 crp->crp_etype = 0;
140 error = crypto_dispatch(crp);
141 if (error == 0)
142 return (0);
143 G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error);
144 crp->crp_etype = error;
145 return (error);
146}
147
148/*
149 * The function is called afer reading encrypted data from the provider.
150 *
151 * g_eli_start -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
152 */
153void
154g_eli_read_done(struct bio *bp)
155{
156 struct g_eli_softc *sc;
157 struct bio *pbp;
158
159 G_ELI_LOGREQ(2, bp, "Request done.");
160 pbp = bp->bio_parent;
161 if (pbp->bio_error == 0)
162 pbp->bio_error = bp->bio_error;
163 /*
164 * Do we have all sectors already?
165 */
166 pbp->bio_inbed++;
167 if (pbp->bio_inbed < pbp->bio_children)
168 return;
169 g_destroy_bio(bp);
170 if (pbp->bio_error != 0) {
171 G_ELI_LOGREQ(0, pbp, "%s() failed", __func__);
172 pbp->bio_completed = 0;
173 if (pbp->bio_driver2 != NULL) {
174 free(pbp->bio_driver2, M_ELI);
175 pbp->bio_driver2 = NULL;
176 }
177 g_io_deliver(pbp, pbp->bio_error);
178 return;
179 }
180 sc = pbp->bio_to->geom->softc;
181 mtx_lock(&sc->sc_queue_mtx);
182 bioq_insert_tail(&sc->sc_queue, pbp);
183 mtx_unlock(&sc->sc_queue_mtx);
184 wakeup(sc);
185}
186
187/*
188 * The function is called after we encrypt and write data.
189 *
190 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver
191 */
192void
193g_eli_write_done(struct bio *bp)
194{
195 struct bio *pbp;
196
197 G_ELI_LOGREQ(2, bp, "Request done.");
198 pbp = bp->bio_parent;
199 if (pbp->bio_error == 0) {
200 if (bp->bio_error != 0)
201 pbp->bio_error = bp->bio_error;
202 }
203 /*
204 * Do we have all sectors already?
205 */
206 pbp->bio_inbed++;
207 if (pbp->bio_inbed < pbp->bio_children)
208 return;
209 free(pbp->bio_driver2, M_ELI);
210 pbp->bio_driver2 = NULL;
211 if (pbp->bio_error != 0) {
212 G_ELI_LOGREQ(0, pbp, "Crypto WRITE request failed (error=%d).",
213 pbp->bio_error);
214 pbp->bio_completed = 0;
215 }
216 g_destroy_bio(bp);
217 /*
218 * Write is finished, send it up.
219 */
220 pbp->bio_completed = pbp->bio_length;
221 g_io_deliver(pbp, pbp->bio_error);
222}
223
224/*
225 * This function should never be called, but GEOM made as it set ->orphan()
226 * method for every geom.
227 */
228static void
229g_eli_orphan_spoil_assert(struct g_consumer *cp)
230{
231
232 panic("Function %s() called for %s.", __func__, cp->geom->name);
233}
234
235static void
236g_eli_orphan(struct g_consumer *cp)
237{
238 struct g_eli_softc *sc;
239
240 g_topology_assert();
241 sc = cp->geom->softc;
242 if (sc == NULL)
243 return;
244 g_eli_destroy(sc, 1);
245}
246
247/*
248 * BIO_READ : G_ELI_START -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
249 * BIO_WRITE: G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
250 */
251static void
252g_eli_start(struct bio *bp)
253{
254 struct g_eli_softc *sc;
255 struct g_consumer *cp;
256 struct bio *cbp;
257
258 sc = bp->bio_to->geom->softc;
259 KASSERT(sc != NULL,
260 ("Provider's error should be set (error=%d)(device=%s).",
261 bp->bio_to->error, bp->bio_to->name));
262 G_ELI_LOGREQ(2, bp, "Request received.");
263
264 switch (bp->bio_cmd) {
265 case BIO_READ:
266 case BIO_WRITE:
267 case BIO_GETATTR:
268 case BIO_FLUSH:
269 break;
270 case BIO_DELETE:
271 /*
272 * We could eventually support BIO_DELETE request.
273 * It could be done by overwritting requested sector with
274 * random data g_eli_overwrites number of times.
275 */
276 default:
277 g_io_deliver(bp, EOPNOTSUPP);
278 return;
279 }
280 cbp = g_clone_bio(bp);
281 if (cbp == NULL) {
282 g_io_deliver(bp, ENOMEM);
283 return;
284 }
285 switch (bp->bio_cmd) {
286 case BIO_READ:
287 if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) {
288 bp->bio_driver2 = NULL;
289 cbp->bio_done = g_eli_read_done;
290 cp = LIST_FIRST(&sc->sc_geom->consumer);
291 cbp->bio_to = cp->provider;
292 G_ELI_LOGREQ(2, cbp, "Sending request.");
293 /*
294 * Read encrypted data from provider.
295 */
296 g_io_request(cbp, cp);
297 break;
298 }
299 bp->bio_pflags = 255;
300 /* FALLTHROUGH */
301 case BIO_WRITE:
302 bp->bio_driver1 = cbp;
303 mtx_lock(&sc->sc_queue_mtx);
304 bioq_insert_tail(&sc->sc_queue, bp);
305 mtx_unlock(&sc->sc_queue_mtx);
306 wakeup(sc);
307 break;
308 case BIO_GETATTR:
309 case BIO_FLUSH:
310 cbp->bio_done = g_std_done;
311 cp = LIST_FIRST(&sc->sc_geom->consumer);
312 cbp->bio_to = cp->provider;
313 G_ELI_LOGREQ(2, cbp, "Sending request.");
314 g_io_request(cbp, cp);
315 break;
316 }
317}
318
319/*
320 * This is the main function for kernel worker thread when we don't have
321 * hardware acceleration and we have to do cryptography in software.
322 * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM
323 * threads with crypto work.
324 */
325static void
326g_eli_worker(void *arg)
327{
328 struct g_eli_softc *sc;
329 struct g_eli_worker *wr;
330 struct bio *bp;
331
332 wr = arg;
333 sc = wr->w_softc;
334#ifdef SMP
335 /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */
336 if (mp_ncpus > 1 && sc->sc_crypto == G_ELI_CRYPTO_SW &&
337 g_eli_threads == 0) {
338 while (!smp_started)
339 tsleep(wr, 0, "geli:smp", hz / 4);
340 }
341#endif
342 thread_lock(curthread);
343 sched_prio(curthread, PUSER);
344 if (sc->sc_crypto == G_ELI_CRYPTO_SW && g_eli_threads == 0)
345 sched_bind(curthread, wr->w_number);
346 thread_unlock(curthread);
347
348 G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm);
349
350 for (;;) {
351 mtx_lock(&sc->sc_queue_mtx);
352 bp = bioq_takefirst(&sc->sc_queue);
353 if (bp == NULL) {
354 if (sc->sc_flags & G_ELI_FLAG_DESTROY) {
355 LIST_REMOVE(wr, w_next);
356 crypto_freesession(wr->w_sid);
357 free(wr, M_ELI);
358 G_ELI_DEBUG(1, "Thread %s exiting.",
359 curthread->td_proc->p_comm);
360 wakeup(&sc->sc_workers);
361 mtx_unlock(&sc->sc_queue_mtx);
362 kproc_exit(0);
363 }
364 msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0);
365 continue;
366 }
367 mtx_unlock(&sc->sc_queue_mtx);
368 if (bp->bio_cmd == BIO_READ && bp->bio_pflags == 255)
369 g_eli_auth_read(sc, bp);
370 else if (sc->sc_flags & G_ELI_FLAG_AUTH)
371 g_eli_auth_run(wr, bp);
372 else
373 g_eli_crypto_run(wr, bp);
374 }
375}
376
377/*
378 * Here we generate IV. It is unique for every sector.
379 */
380void
381g_eli_crypto_ivgen(struct g_eli_softc *sc, off_t offset, u_char *iv,
382 size_t size)
383{
384 u_char off[8], hash[SHA256_DIGEST_LENGTH];
385 SHA256_CTX ctx;
386
387 if ((sc->sc_flags & G_ELI_FLAG_NATIVE_BYTE_ORDER) != 0)
388 bcopy(&offset, off, sizeof(off));
389 else
390 le64enc(off, (uint64_t)offset);
391 /* Copy precalculated SHA256 context for IV-Key. */
392 bcopy(&sc->sc_ivctx, &ctx, sizeof(ctx));
393 SHA256_Update(&ctx, off, sizeof(off));
394 SHA256_Final(hash, &ctx);
395 bcopy(hash, iv, size);
396}
397
398int
399g_eli_read_metadata(struct g_class *mp, struct g_provider *pp,
400 struct g_eli_metadata *md)
401{
402 struct g_geom *gp;
403 struct g_consumer *cp;
404 u_char *buf = NULL;
405 int error;
406
407 g_topology_assert();
408
409 gp = g_new_geomf(mp, "eli:taste");
410 gp->start = g_eli_start;
411 gp->access = g_std_access;
412 /*
413 * g_eli_read_metadata() is always called from the event thread.
414 * Our geom is created and destroyed in the same event, so there
415 * could be no orphan nor spoil event in the meantime.
416 */
417 gp->orphan = g_eli_orphan_spoil_assert;
418 gp->spoiled = g_eli_orphan_spoil_assert;
419 cp = g_new_consumer(gp);
420 error = g_attach(cp, pp);
421 if (error != 0)
422 goto end;
423 error = g_access(cp, 1, 0, 0);
424 if (error != 0)
425 goto end;
426 g_topology_unlock();
427 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
428 &error);
429 g_topology_lock();
430 if (buf == NULL)
431 goto end;
432 eli_metadata_decode(buf, md);
433end:
434 if (buf != NULL)
435 g_free(buf);
436 if (cp->provider != NULL) {
437 if (cp->acr == 1)
438 g_access(cp, -1, 0, 0);
439 g_detach(cp);
440 }
441 g_destroy_consumer(cp);
442 g_destroy_geom(gp);
443 return (error);
444}
445
446/*
447 * The function is called when we had last close on provider and user requested
448 * to close it when this situation occur.
449 */
450static void
451g_eli_last_close(struct g_eli_softc *sc)
452{
453 struct g_geom *gp;
454 struct g_provider *pp;
455 char ppname[64];
456 int error;
457
458 g_topology_assert();
459 gp = sc->sc_geom;
460 pp = LIST_FIRST(&gp->provider);
461 strlcpy(ppname, pp->name, sizeof(ppname));
462 error = g_eli_destroy(sc, 1);
463 KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).",
464 ppname, error));
465 G_ELI_DEBUG(0, "Detached %s on last close.", ppname);
466}
467
468int
469g_eli_access(struct g_provider *pp, int dr, int dw, int de)
470{
471 struct g_eli_softc *sc;
472 struct g_geom *gp;
473
474 gp = pp->geom;
475 sc = gp->softc;
476
477 if (dw > 0) {
478 if (sc->sc_flags & G_ELI_FLAG_RO) {
479 /* Deny write attempts. */
480 return (EROFS);
481 }
482 /* Someone is opening us for write, we need to remember that. */
483 sc->sc_flags |= G_ELI_FLAG_WOPEN;
484 return (0);
485 }
486 /* Is this the last close? */
487 if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0)
488 return (0);
489
490 /*
491 * Automatically detach on last close if requested.
492 */
493 if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) ||
494 (sc->sc_flags & G_ELI_FLAG_WOPEN)) {
495 g_eli_last_close(sc);
496 }
497 return (0);
498}
499
500static int
501g_eli_cpu_is_disabled(int cpu)
502{
503#ifdef SMP
504 return ((hlt_cpus_mask & (1 << cpu)) != 0);
505#else
506 return (0);
507#endif
508}
509
510struct g_geom *
511g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp,
512 const struct g_eli_metadata *md, const u_char *mkey, int nkey)
513{
514 struct g_eli_softc *sc;
515 struct g_eli_worker *wr;
516 struct g_geom *gp;
517 struct g_provider *pp;
518 struct g_consumer *cp;
519 struct cryptoini crie, cria;
520 u_int i, threads;
521 int error;
522
523 G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX);
524
525 gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX);
526 gp->softc = NULL; /* for a moment */
527
528 sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO);
529 gp->start = g_eli_start;
530 /*
531 * Spoiling cannot happen actually, because we keep provider open for
532 * writing all the time or provider is read-only.
533 */
534 gp->spoiled = g_eli_orphan_spoil_assert;
535 gp->orphan = g_eli_orphan;
536 gp->dumpconf = g_eli_dumpconf;
537 /*
538 * If detach-on-last-close feature is not enabled and we don't operate
539 * on read-only provider, we can simply use g_std_access().
540 */
541 if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO))
542 gp->access = g_eli_access;
543 else
544 gp->access = g_std_access;
545
546 sc->sc_crypto = G_ELI_CRYPTO_SW;
547 sc->sc_flags = md->md_flags;
548 /* Backward compatibility. */
549 if (md->md_version < 4)
550 sc->sc_flags |= G_ELI_FLAG_NATIVE_BYTE_ORDER;
551 sc->sc_ealgo = md->md_ealgo;
552 sc->sc_nkey = nkey;
553 /*
554 * Remember the keys in our softc structure.
555 */
556 g_eli_mkey_propagate(sc, mkey);
557 sc->sc_ekeylen = md->md_keylen;
558
559 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
560 sc->sc_akeylen = sizeof(sc->sc_akey) * 8;
561 sc->sc_aalgo = md->md_aalgo;
562 sc->sc_alen = g_eli_hashlen(sc->sc_aalgo);
563
564 sc->sc_data_per_sector = bpp->sectorsize - sc->sc_alen;
565 /*
566 * Some hash functions (like SHA1 and RIPEMD160) generates hash
567 * which length is not multiple of 128 bits, but we want data
568 * length to be multiple of 128, so we can encrypt without
569 * padding. The line below rounds down data length to multiple
570 * of 128 bits.
571 */
572 sc->sc_data_per_sector -= sc->sc_data_per_sector % 16;
573
574 sc->sc_bytes_per_sector =
575 (md->md_sectorsize - 1) / sc->sc_data_per_sector + 1;
576 sc->sc_bytes_per_sector *= bpp->sectorsize;
577 /*
578 * Precalculate SHA256 for HMAC key generation.
579 * This is expensive operation and we can do it only once now or
580 * for every access to sector, so now will be much better.
581 */
582 SHA256_Init(&sc->sc_akeyctx);
583 SHA256_Update(&sc->sc_akeyctx, sc->sc_akey,
584 sizeof(sc->sc_akey));
585 }
586
587 /*
588 * Precalculate SHA256 for IV generation.
589 * This is expensive operation and we can do it only once now or for
590 * every access to sector, so now will be much better.
591 */
592 SHA256_Init(&sc->sc_ivctx);
593 SHA256_Update(&sc->sc_ivctx, sc->sc_ivkey, sizeof(sc->sc_ivkey));
594
595 gp->softc = sc;
596 sc->sc_geom = gp;
597
598 bioq_init(&sc->sc_queue);
599 mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF);
600
601 pp = NULL;
602 cp = g_new_consumer(gp);
603 error = g_attach(cp, bpp);
604 if (error != 0) {
605 if (req != NULL) {
606 gctl_error(req, "Cannot attach to %s (error=%d).",
607 bpp->name, error);
608 } else {
609 G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).",
610 bpp->name, error);
611 }
612 goto failed;
613 }
614 /*
615 * Keep provider open all the time, so we can run critical tasks,
616 * like Master Keys deletion, without wondering if we can open
617 * provider or not.
618 * We don't open provider for writing only when user requested read-only
619 * access.
620 */
621 if (sc->sc_flags & G_ELI_FLAG_RO)
622 error = g_access(cp, 1, 0, 1);
623 else
624 error = g_access(cp, 1, 1, 1);
625 if (error != 0) {
626 if (req != NULL) {
627 gctl_error(req, "Cannot access %s (error=%d).",
628 bpp->name, error);
629 } else {
630 G_ELI_DEBUG(1, "Cannot access %s (error=%d).",
631 bpp->name, error);
632 }
633 goto failed;
634 }
635
636 LIST_INIT(&sc->sc_workers);
637
638 bzero(&crie, sizeof(crie));
639 crie.cri_alg = sc->sc_ealgo;
640 crie.cri_klen = sc->sc_ekeylen;
641 crie.cri_key = sc->sc_ekey;
642 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
643 bzero(&cria, sizeof(cria));
644 cria.cri_alg = sc->sc_aalgo;
645 cria.cri_klen = sc->sc_akeylen;
646 cria.cri_key = sc->sc_akey;
647 crie.cri_next = &cria;
648 }
649
650 threads = g_eli_threads;
651 if (threads == 0)
652 threads = mp_ncpus;
653 else if (threads > mp_ncpus) {
654 /* There is really no need for too many worker threads. */
655 threads = mp_ncpus;
656 G_ELI_DEBUG(0, "Reducing number of threads to %u.", threads);
657 }
658 for (i = 0; i < threads; i++) {
659 if (g_eli_cpu_is_disabled(i)) {
660 G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.",
661 bpp->name, i);
662 continue;
663 }
664 wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO);
665 wr->w_softc = sc;
666 wr->w_number = i;
667
668 /*
669 * If this is the first pass, try to get hardware support.
670 * Use software cryptography, if we cannot get it.
671 */
672 if (LIST_EMPTY(&sc->sc_workers)) {
673 error = crypto_newsession(&wr->w_sid, &crie,
674 CRYPTOCAP_F_HARDWARE);
675 if (error == 0)
676 sc->sc_crypto = G_ELI_CRYPTO_HW;
677 }
678 if (sc->sc_crypto == G_ELI_CRYPTO_SW) {
679 error = crypto_newsession(&wr->w_sid, &crie,
680 CRYPTOCAP_F_SOFTWARE);
681 }
682 if (error != 0) {
683 free(wr, M_ELI);
684 if (req != NULL) {
685 gctl_error(req, "Cannot set up crypto session "
686 "for %s (error=%d).", bpp->name, error);
687 } else {
688 G_ELI_DEBUG(1, "Cannot set up crypto session "
689 "for %s (error=%d).", bpp->name, error);
690 }
691 goto failed;
692 }
693
694 error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0,
695 "g_eli[%u] %s", i, bpp->name);
696 if (error != 0) {
697 crypto_freesession(wr->w_sid);
698 free(wr, M_ELI);
699 if (req != NULL) {
700 gctl_error(req, "Cannot create kernel thread "
701 "for %s (error=%d).", bpp->name, error);
702 } else {
703 G_ELI_DEBUG(1, "Cannot create kernel thread "
704 "for %s (error=%d).", bpp->name, error);
705 }
706 goto failed;
707 }
708 LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next);
709 /* If we have hardware support, one thread is enough. */
710 if (sc->sc_crypto == G_ELI_CRYPTO_HW)
711 break;
712 }
713
714 /*
715 * Create decrypted provider.
716 */
717 pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX);
718 pp->sectorsize = md->md_sectorsize;
719 pp->mediasize = bpp->mediasize;
720 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME))
721 pp->mediasize -= bpp->sectorsize;
722 if (!(sc->sc_flags & G_ELI_FLAG_AUTH))
723 pp->mediasize -= (pp->mediasize % pp->sectorsize);
724 else {
725 pp->mediasize /= sc->sc_bytes_per_sector;
726 pp->mediasize *= pp->sectorsize;
727 }
728
729 g_error_provider(pp, 0);
730
731 G_ELI_DEBUG(0, "Device %s created.", pp->name);
732 G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo),
733 sc->sc_ekeylen);
734 if (sc->sc_flags & G_ELI_FLAG_AUTH)
735 G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo));
736 G_ELI_DEBUG(0, " Crypto: %s",
737 sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware");
738 return (gp);
739failed:
740 mtx_lock(&sc->sc_queue_mtx);
741 sc->sc_flags |= G_ELI_FLAG_DESTROY;
742 wakeup(sc);
743 /*
744 * Wait for kernel threads self destruction.
745 */
746 while (!LIST_EMPTY(&sc->sc_workers)) {
747 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
748 "geli:destroy", 0);
749 }
750 mtx_destroy(&sc->sc_queue_mtx);
751 if (cp->provider != NULL) {
752 if (cp->acr == 1)
753 g_access(cp, -1, -1, -1);
754 g_detach(cp);
755 }
756 g_destroy_consumer(cp);
757 g_destroy_geom(gp);
758 bzero(sc, sizeof(*sc));
759 free(sc, M_ELI);
760 return (NULL);
761}
762
763int
764g_eli_destroy(struct g_eli_softc *sc, boolean_t force)
765{
766 struct g_geom *gp;
767 struct g_provider *pp;
768
769 g_topology_assert();
770
771 if (sc == NULL)
772 return (ENXIO);
773
774 gp = sc->sc_geom;
775 pp = LIST_FIRST(&gp->provider);
776 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
777 if (force) {
778 G_ELI_DEBUG(1, "Device %s is still open, so it "
779 "cannot be definitely removed.", pp->name);
780 } else {
781 G_ELI_DEBUG(1,
782 "Device %s is still open (r%dw%de%d).", pp->name,
783 pp->acr, pp->acw, pp->ace);
784 return (EBUSY);
785 }
786 }
787
788 mtx_lock(&sc->sc_queue_mtx);
789 sc->sc_flags |= G_ELI_FLAG_DESTROY;
790 wakeup(sc);
791 while (!LIST_EMPTY(&sc->sc_workers)) {
792 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
793 "geli:destroy", 0);
794 }
795 mtx_destroy(&sc->sc_queue_mtx);
796 gp->softc = NULL;
797 bzero(sc, sizeof(*sc));
798 free(sc, M_ELI);
799
800 if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0))
801 G_ELI_DEBUG(0, "Device %s destroyed.", gp->name);
802 g_wither_geom_close(gp, ENXIO);
803
804 return (0);
805}
806
807static int
808g_eli_destroy_geom(struct gctl_req *req __unused,
809 struct g_class *mp __unused, struct g_geom *gp)
810{
811 struct g_eli_softc *sc;
812
813 sc = gp->softc;
814 return (g_eli_destroy(sc, 0));
815}
816
817static int
818g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider)
819{
820 u_char *keyfile, *data, *size;
821 char *file, name[64];
822 int i;
823
824 for (i = 0; ; i++) {
825 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
826 keyfile = preload_search_by_type(name);
827 if (keyfile == NULL)
828 return (i); /* Return number of loaded keyfiles. */
829 data = preload_search_info(keyfile, MODINFO_ADDR);
830 if (data == NULL) {
831 G_ELI_DEBUG(0, "Cannot find key file data for %s.",
832 name);
833 return (0);
834 }
835 data = *(void **)data;
836 size = preload_search_info(keyfile, MODINFO_SIZE);
837 if (size == NULL) {
838 G_ELI_DEBUG(0, "Cannot find key file size for %s.",
839 name);
840 return (0);
841 }
842 file = preload_search_info(keyfile, MODINFO_NAME);
843 if (file == NULL) {
844 G_ELI_DEBUG(0, "Cannot find key file name for %s.",
845 name);
846 return (0);
847 }
848 G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file,
849 provider, name);
850 g_eli_crypto_hmac_update(ctx, data, *(size_t *)size);
851 }
852}
853
854static void
855g_eli_keyfiles_clear(const char *provider)
856{
857 u_char *keyfile, *data, *size;
858 char name[64];
859 int i;
860
861 for (i = 0; ; i++) {
862 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
863 keyfile = preload_search_by_type(name);
864 if (keyfile == NULL)
865 return;
866 data = preload_search_info(keyfile, MODINFO_ADDR);
867 size = preload_search_info(keyfile, MODINFO_SIZE);
868 if (data == NULL || size == NULL)
869 continue;
870 data = *(void **)data;
871 bzero(data, *(size_t *)size);
872 }
873}
874
875/*
876 * Tasting is only made on boot.
877 * We detect providers which should be attached before root is mounted.
878 */
879static struct g_geom *
880g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
881{
882 struct g_eli_metadata md;
883 struct g_geom *gp;
884 struct hmac_ctx ctx;
885 char passphrase[256];
886 u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN];
887 u_int i, nkey, nkeyfiles, tries;
888 int error;
889
890 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
891 g_topology_assert();
892
893 if (root_mounted() || g_eli_tries == 0)
894 return (NULL);
895
896 G_ELI_DEBUG(3, "Tasting %s.", pp->name);
897
898 error = g_eli_read_metadata(mp, pp, &md);
899 if (error != 0)
900 return (NULL);
901 gp = NULL;
902
903 if (strcmp(md.md_magic, G_ELI_MAGIC) != 0)
904 return (NULL);
905 if (md.md_version > G_ELI_VERSION) {
906 printf("geom_eli.ko module is too old to handle %s.\n",
907 pp->name);
908 return (NULL);
909 }
910 if (md.md_provsize != pp->mediasize)
911 return (NULL);
912 /* Should we attach it on boot? */
913 if (!(md.md_flags & G_ELI_FLAG_BOOT))
914 return (NULL);
915 if (md.md_keys == 0x00) {
916 G_ELI_DEBUG(0, "No valid keys on %s.", pp->name);
917 return (NULL);
918 }
919 if (md.md_iterations == -1) {
920 /* If there is no passphrase, we try only once. */
921 tries = 1;
922 } else {
923 /* Ask for the passphrase no more than g_eli_tries times. */
924 tries = g_eli_tries;
925 }
926
927 for (i = 0; i < tries; i++) {
928 g_eli_crypto_hmac_init(&ctx, NULL, 0);
929
930 /*
931 * Load all key files.
932 */
933 nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name);
934
935 if (nkeyfiles == 0 && md.md_iterations == -1) {
936 /*
937 * No key files and no passphrase, something is
938 * definitely wrong here.
939 * geli(8) doesn't allow for such situation, so assume
940 * that there was really no passphrase and in that case
941 * key files are no properly defined in loader.conf.
942 */
943 G_ELI_DEBUG(0,
944 "Found no key files in loader.conf for %s.",
945 pp->name);
946 return (NULL);
947 }
948
949 /* Ask for the passphrase if defined. */
950 if (md.md_iterations >= 0) {
951 printf("Enter passphrase for %s: ", pp->name);
952 gets(passphrase, sizeof(passphrase),
953 g_eli_visible_passphrase);
954 }
955
956 /*
957 * Prepare Derived-Key from the user passphrase.
958 */
959 if (md.md_iterations == 0) {
960 g_eli_crypto_hmac_update(&ctx, md.md_salt,
961 sizeof(md.md_salt));
962 g_eli_crypto_hmac_update(&ctx, passphrase,
963 strlen(passphrase));
964 bzero(passphrase, sizeof(passphrase));
965 } else if (md.md_iterations > 0) {
966 u_char dkey[G_ELI_USERKEYLEN];
967
968 pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt,
969 sizeof(md.md_salt), passphrase, md.md_iterations);
970 bzero(passphrase, sizeof(passphrase));
971 g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey));
972 bzero(dkey, sizeof(dkey));
973 }
974
975 g_eli_crypto_hmac_final(&ctx, key, 0);
976
977 /*
978 * Decrypt Master-Key.
979 */
980 error = g_eli_mkey_decrypt(&md, key, mkey, &nkey);
981 bzero(key, sizeof(key));
982 if (error == -1) {
983 if (i == tries - 1) {
984 G_ELI_DEBUG(0,
985 "Wrong key for %s. No tries left.",
986 pp->name);
987 g_eli_keyfiles_clear(pp->name);
988 return (NULL);
989 }
990 G_ELI_DEBUG(0, "Wrong key for %s. Tries left: %u.",
991 pp->name, tries - i - 1);
992 /* Try again. */
993 continue;
994 } else if (error > 0) {
995 G_ELI_DEBUG(0, "Cannot decrypt Master Key for %s (error=%d).",
996 pp->name, error);
997 g_eli_keyfiles_clear(pp->name);
998 return (NULL);
999 }
1000 G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name);
1001 break;
1002 }
1003
1004 /*
1005 * We have correct key, let's attach provider.
1006 */
1007 gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey);
1008 bzero(mkey, sizeof(mkey));
1009 bzero(&md, sizeof(md));
1010 if (gp == NULL) {
1011 G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name,
1012 G_ELI_SUFFIX);
1013 return (NULL);
1014 }
1015 return (gp);
1016}
1017
1018static void
1019g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1020 struct g_consumer *cp, struct g_provider *pp)
1021{
1022 struct g_eli_softc *sc;
1023
1024 g_topology_assert();
1025 sc = gp->softc;
1026 if (sc == NULL)
1027 return;
1028 if (pp != NULL || cp != NULL)
1029 return; /* Nothing here. */
1030 sbuf_printf(sb, "%s<Flags>", indent);
1031 if (sc->sc_flags == 0)
1032 sbuf_printf(sb, "NONE");
1033 else {
1034 int first = 1;
1035
1036#define ADD_FLAG(flag, name) do { \
1037 if (sc->sc_flags & (flag)) { \
1038 if (!first) \
1039 sbuf_printf(sb, ", "); \
1040 else \
1041 first = 0; \
1042 sbuf_printf(sb, name); \
1043 } \
1044} while (0)
1045 ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER");
1046 ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME");
1047 ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT");
1048 ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH");
1049 ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH");
1050 ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH");
1051 ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN");
1052 ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY");
1053 ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY");
1054#undef ADD_FLAG
1055 }
1056 sbuf_printf(sb, "</Flags>\n");
1057
1058 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) {
1059 sbuf_printf(sb, "%s<UsedKey>%u</UsedKey>\n", indent,
1060 sc->sc_nkey);
1061 }
1062 sbuf_printf(sb, "%s<Crypto>", indent);
1063 switch (sc->sc_crypto) {
1064 case G_ELI_CRYPTO_HW:
1065 sbuf_printf(sb, "hardware");
1066 break;
1067 case G_ELI_CRYPTO_SW:
1068 sbuf_printf(sb, "software");
1069 break;
1070 default:
1071 sbuf_printf(sb, "UNKNOWN");
1072 break;
1073 }
1074 sbuf_printf(sb, "</Crypto>\n");
1075 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
1076 sbuf_printf(sb,
1077 "%s<AuthenticationAlgorithm>%s</AuthenticationAlgorithm>\n",
1078 indent, g_eli_algo2str(sc->sc_aalgo));
1079 }
1080 sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent,
1081 sc->sc_ekeylen);
1082 sbuf_printf(sb, "%s<EncryptionAlgorithm>%s</EncryptionAlgorithm>\n", indent,
1083 g_eli_algo2str(sc->sc_ealgo));
1084}
1085
1086static void
1087g_eli_shutdown_pre_sync(void *arg, int howto)
1088{
1089 struct g_class *mp;
1090 struct g_geom *gp, *gp2;
1091 struct g_provider *pp;
1092 struct g_eli_softc *sc;
1093 int error;
1094
1095 mp = arg;
1096 DROP_GIANT();
1097 g_topology_lock();
1098 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
1099 sc = gp->softc;
1100 if (sc == NULL)
1101 continue;
1102 pp = LIST_FIRST(&gp->provider);
1103 KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name));
1104 if (pp->acr + pp->acw + pp->ace == 0)
1105 error = g_eli_destroy(sc, 1);
1106 else {
1107 sc->sc_flags |= G_ELI_FLAG_RW_DETACH;
1108 gp->access = g_eli_access;
1109 }
1110 }
1111 g_topology_unlock();
1112 PICKUP_GIANT();
1113}
1114
1115static void
1116g_eli_init(struct g_class *mp)
1117{
1118
1119 g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
1120 g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
1121 if (g_eli_pre_sync == NULL)
1122 G_ELI_DEBUG(0, "Warning! Cannot register shutdown event.");
1123}
1124
1125static void
1126g_eli_fini(struct g_class *mp)
1127{
1128
1129 if (g_eli_pre_sync != NULL)
1130 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync);
1131}
1132
1133DECLARE_GEOM_CLASS(g_eli_class, g_eli);
1134MODULE_DEPEND(g_eli, crypto, 1, 1, 1);