Deleted Added
full compact
1/*-
2 * Copyright (c) 2005 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/geom/eli/g_eli.c 159306 2006-06-05 21:25:19Z pjd $");
28__FBSDID("$FreeBSD: head/sys/geom/eli/g_eli.c 159307 2006-06-05 21:38:54Z pjd $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/linker.h>
34#include <sys/module.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/bio.h>
38#include <sys/sysctl.h>
39#include <sys/malloc.h>
40#include <sys/kthread.h>
41#include <sys/proc.h>
42#include <sys/sched.h>
43#include <sys/smp.h>
44#include <sys/uio.h>
45#include <sys/vnode.h>
46
47#include <vm/uma.h>
48
49#include <geom/geom.h>
50#include <geom/eli/g_eli.h>
51#include <geom/eli/pkcs5v2.h>
52
53
54MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data");
55
56SYSCTL_DECL(_kern_geom);
57SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff");
58u_int g_eli_debug = 0;
59TUNABLE_INT("kern.geom.eli.debug", &g_eli_debug);
60SYSCTL_UINT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RW, &g_eli_debug, 0,
61 "Debug level");
62static u_int g_eli_tries = 3;
63TUNABLE_INT("kern.geom.eli.tries", &g_eli_tries);
64SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RW, &g_eli_tries, 0,
65 "Number of tries for entering the passphrase");
66static u_int g_eli_visible_passphrase = 0;
67TUNABLE_INT("kern.geom.eli.visible_passphrase", &g_eli_visible_passphrase);
68SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RW,
69 &g_eli_visible_passphrase, 0,
70 "Turn on echo when entering the passphrase (for debug purposes only!!)");
71u_int g_eli_overwrites = 5;
72TUNABLE_INT("kern.geom.eli.overwrites", &g_eli_overwrites);
73SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RW, &g_eli_overwrites,
74 0, "Number of times on-disk keys should be overwritten when destroying them");
75static u_int g_eli_threads = 0;
76TUNABLE_INT("kern.geom.eli.threads", &g_eli_threads);
77SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RW, &g_eli_threads, 0,
78 "Number of threads doing crypto work");
79u_int g_eli_batch = 0;
80TUNABLE_INT("kern.geom.eli.batch", &g_eli_batch);
81SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RW, &g_eli_batch, 0,
82 "Use crypto operations batching");
83
84static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp,
85 struct g_geom *gp);
82static void g_eli_crypto_run(struct g_eli_worker *wr, struct bio *bp);
86
87static g_taste_t g_eli_taste;
88static g_dumpconf_t g_eli_dumpconf;
89
90struct g_class g_eli_class = {
91 .name = G_ELI_CLASS_NAME,
92 .version = G_VERSION,
93 .ctlreq = g_eli_config,
94 .taste = g_eli_taste,
95 .destroy_geom = g_eli_destroy_geom
96};
97
98
99/*
100 * Code paths:
101 * BIO_READ:
102 * g_eli_start -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
103 * BIO_WRITE:
104 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
105 */
106
107
108/*
109 * EAGAIN from crypto(9) means, that we were probably balanced to another crypto
110 * accelerator or something like this.
111 * The function updates the SID and rerun the operation.
112 */
110static int
113int
114g_eli_crypto_rerun(struct cryptop *crp)
115{
116 struct g_eli_softc *sc;
117 struct g_eli_worker *wr;
118 struct bio *bp;
119 int error;
120
121 bp = (struct bio *)crp->crp_opaque;
122 sc = bp->bio_to->geom->softc;
123 LIST_FOREACH(wr, &sc->sc_workers, w_next) {
124 if (wr->w_number == bp->bio_pflags)
125 break;
126 }
127 KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags));
128 G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %ju -> %ju).",
129 bp->bio_cmd == BIO_READ ? "READ" : "WRITE", (uintmax_t)wr->w_sid,
130 (uintmax_t)crp->crp_sid);
131 wr->w_sid = crp->crp_sid;
132 crp->crp_etype = 0;
133 error = crypto_dispatch(crp);
134 if (error == 0)
135 return (0);
136 G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error);
137 crp->crp_etype = error;
138 return (error);
139}
140
141/*
142 * The function is called afer reading encrypted data from the provider.
143 *
144 * g_eli_start -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
145 */
143static void
146void
147g_eli_read_done(struct bio *bp)
148{
149 struct g_eli_softc *sc;
150 struct bio *pbp;
151
152 G_ELI_LOGREQ(2, bp, "Request done.");
153 pbp = bp->bio_parent;
154 if (pbp->bio_error == 0)
155 pbp->bio_error = bp->bio_error;
156 /*
157 * Do we have all sectors already?
158 */
159 pbp->bio_inbed++;
160 if (pbp->bio_inbed < pbp->bio_children)
161 return;
162 g_destroy_bio(bp);
163 if (pbp->bio_error != 0) {
164 G_ELI_LOGREQ(0, pbp, "%s() failed", __func__);
165 pbp->bio_completed = 0;
166 if (pbp->bio_driver2 != NULL) {
167 free(pbp->bio_driver2, M_ELI);
168 pbp->bio_driver2 = NULL;
169 }
170 g_io_deliver(pbp, pbp->bio_error);
171 return;
172 }
173 sc = pbp->bio_to->geom->softc;
174 mtx_lock(&sc->sc_queue_mtx);
175 bioq_insert_tail(&sc->sc_queue, pbp);
176 mtx_unlock(&sc->sc_queue_mtx);
177 wakeup(sc);
178}
179
180/*
168 * The function is called after we read and decrypt data.
169 *
170 * g_eli_start -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> G_ELI_CRYPTO_READ_DONE -> g_io_deliver
171 */
172static int
173g_eli_crypto_read_done(struct cryptop *crp)
174{
175 struct bio *bp;
176
177 if (crp->crp_etype == EAGAIN) {
178 if (g_eli_crypto_rerun(crp) == 0)
179 return (0);
180 }
181 bp = (struct bio *)crp->crp_opaque;
182 bp->bio_inbed++;
183 if (crp->crp_etype == 0) {
184 G_ELI_DEBUG(3, "Crypto READ request done (%d/%d).",
185 bp->bio_inbed, bp->bio_children);
186 bp->bio_completed += crp->crp_olen;
187 } else {
188 G_ELI_DEBUG(1, "Crypto READ request failed (%d/%d) error=%d.",
189 bp->bio_inbed, bp->bio_children, crp->crp_etype);
190 if (bp->bio_error == 0)
191 bp->bio_error = crp->crp_etype;
192 }
193 /*
194 * Do we have all sectors already?
195 */
196 if (bp->bio_inbed < bp->bio_children)
197 return (0);
198 free(bp->bio_driver2, M_ELI);
199 bp->bio_driver2 = NULL;
200 if (bp->bio_error != 0) {
201 G_ELI_LOGREQ(0, bp, "Crypto READ request failed (error=%d).",
202 bp->bio_error);
203 bp->bio_completed = 0;
204 }
205 /*
206 * Read is finished, send it up.
207 */
208 g_io_deliver(bp, bp->bio_error);
209 return (0);
210}
211
212/*
181 * The function is called after we encrypt and write data.
182 *
183 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver
184 */
217static void
185void
186g_eli_write_done(struct bio *bp)
187{
188 struct bio *pbp;
189
190 G_ELI_LOGREQ(2, bp, "Request done.");
191 pbp = bp->bio_parent;
224 if (pbp->bio_error == 0)
225 pbp->bio_error = bp->bio_error;
192 if (pbp->bio_error == 0) {
193 if (bp->bio_error != 0)
194 pbp->bio_error = bp->bio_error;
195 }
196 /*
197 * Do we have all sectors already?
198 */
199 pbp->bio_inbed++;
200 if (pbp->bio_inbed < pbp->bio_children)
201 return;
202 free(pbp->bio_driver2, M_ELI);
203 pbp->bio_driver2 = NULL;
228 if (pbp->bio_error == 0)
229 pbp->bio_completed = pbp->bio_length;
230 else {
204 if (pbp->bio_error != 0) {
205 G_ELI_LOGREQ(0, pbp, "Crypto WRITE request failed (error=%d).",
206 pbp->bio_error);
207 pbp->bio_completed = 0;
208 }
209 g_destroy_bio(bp);
210 /*
211 * Write is finished, send it up.
212 */
213 pbp->bio_completed = pbp->bio_length;
214 g_io_deliver(pbp, pbp->bio_error);
215}
216
217/*
243 * The function is called after data encryption.
244 *
245 * g_eli_start -> g_eli_crypto_run -> G_ELI_CRYPTO_WRITE_DONE -> g_io_request -> g_eli_write_done -> g_io_deliver
246 */
247static int
248g_eli_crypto_write_done(struct cryptop *crp)
249{
250 struct g_geom *gp;
251 struct g_consumer *cp;
252 struct bio *bp, *cbp;
253
254 if (crp->crp_etype == EAGAIN) {
255 if (g_eli_crypto_rerun(crp) == 0)
256 return (0);
257 }
258 bp = (struct bio *)crp->crp_opaque;
259 bp->bio_inbed++;
260 if (crp->crp_etype == 0) {
261 G_ELI_DEBUG(3, "Crypto WRITE request done (%d/%d).",
262 bp->bio_inbed, bp->bio_children);
263 } else {
264 G_ELI_DEBUG(1, "Crypto WRITE request failed (%d/%d) error=%d.",
265 bp->bio_inbed, bp->bio_children, crp->crp_etype);
266 if (bp->bio_error == 0)
267 bp->bio_error = crp->crp_etype;
268 }
269 /*
270 * All sectors are already encrypted?
271 */
272 if (bp->bio_inbed < bp->bio_children)
273 return (0);
274 bp->bio_inbed = 0;
275 bp->bio_children = 1;
276 cbp = bp->bio_driver1;
277 bp->bio_driver1 = NULL;
278 if (bp->bio_error != 0) {
279 G_ELI_LOGREQ(0, bp, "Crypto WRITE request failed (error=%d).",
280 bp->bio_error);
281 free(bp->bio_driver2, M_ELI);
282 bp->bio_driver2 = NULL;
283 g_destroy_bio(cbp);
284 g_io_deliver(bp, bp->bio_error);
285 return (0);
286 }
287 cbp->bio_data = bp->bio_driver2;
288 cbp->bio_done = g_eli_write_done;
289 gp = bp->bio_to->geom;
290 cp = LIST_FIRST(&gp->consumer);
291 cbp->bio_to = cp->provider;
292 G_ELI_LOGREQ(2, cbp, "Sending request.");
293 /*
294 * Send encrypted data to the provider.
295 */
296 g_io_request(cbp, cp);
297 return (0);
298}
299
300/*
218 * This function should never be called, but GEOM made as it set ->orphan()
219 * method for every geom.
220 */
221static void
222g_eli_orphan_spoil_assert(struct g_consumer *cp)
223{
224
225 panic("Function %s() called for %s.", __func__, cp->geom->name);
226}
227
228static void
229g_eli_orphan(struct g_consumer *cp)
230{
231 struct g_eli_softc *sc;
232
233 g_topology_assert();
234 sc = cp->geom->softc;
235 if (sc == NULL)
236 return;
237 g_eli_destroy(sc, 1);
238}
239
240/*
241 * BIO_READ : G_ELI_START -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
242 * BIO_WRITE: G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
243 */
244static void
245g_eli_start(struct bio *bp)
246{
247 struct g_eli_softc *sc;
248 struct g_consumer *cp;
249 struct bio *cbp;
250
251 sc = bp->bio_to->geom->softc;
252 KASSERT(sc != NULL,
253 ("Provider's error should be set (error=%d)(device=%s).",
254 bp->bio_to->error, bp->bio_to->name));
255 G_ELI_LOGREQ(2, bp, "Request received.");
256
257 switch (bp->bio_cmd) {
258 case BIO_READ:
259 case BIO_WRITE:
260 case BIO_GETATTR:
261 break;
262 case BIO_DELETE:
263 /*
264 * We could eventually support BIO_DELETE request.
265 * It could be done by overwritting requested sector with
266 * random data g_eli_overwrites number of times.
267 */
268 default:
269 g_io_deliver(bp, EOPNOTSUPP);
270 return;
271 }
272 cbp = g_clone_bio(bp);
273 if (cbp == NULL) {
274 g_io_deliver(bp, ENOMEM);
275 return;
276 }
277 switch (bp->bio_cmd) {
278 case BIO_READ:
362 cbp->bio_done = g_eli_read_done;
363 cp = LIST_FIRST(&sc->sc_geom->consumer);
364 cbp->bio_to = cp->provider;
365 G_ELI_LOGREQ(2, cbp, "Sending request.");
366 /*
367 * Read encrypted data from provider.
368 */
369 g_io_request(cbp, cp);
370 break;
279 if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) {
280 bp->bio_driver2 = NULL;
281 cbp->bio_done = g_eli_read_done;
282 cp = LIST_FIRST(&sc->sc_geom->consumer);
283 cbp->bio_to = cp->provider;
284 G_ELI_LOGREQ(2, cbp, "Sending request.");
285 /*
286 * Read encrypted data from provider.
287 */
288 g_io_request(cbp, cp);
289 break;
290 }
291 bp->bio_pflags = 255;
292 /* FALLTHROUGH */
293 case BIO_WRITE:
294 bp->bio_driver1 = cbp;
295 mtx_lock(&sc->sc_queue_mtx);
296 bioq_insert_tail(&sc->sc_queue, bp);
297 mtx_unlock(&sc->sc_queue_mtx);
298 wakeup(sc);
299 break;
300 case BIO_GETATTR:
301 cbp->bio_done = g_std_done;
302 cp = LIST_FIRST(&sc->sc_geom->consumer);
303 cbp->bio_to = cp->provider;
304 G_ELI_LOGREQ(2, cbp, "Sending request.");
305 g_io_request(cbp, cp);
306 break;
307 }
308}
309
310/*
311 * This is the main function for kernel worker thread when we don't have
312 * hardware acceleration and we have to do cryptography in software.
313 * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM
314 * threads with crypto work.
315 */
316static void
317g_eli_worker(void *arg)
318{
319 struct g_eli_softc *sc;
320 struct g_eli_worker *wr;
321 struct bio *bp;
322
323 wr = arg;
324 sc = wr->w_softc;
325 mtx_lock_spin(&sched_lock);
326 sched_prio(curthread, PRIBIO);
327 if (sc->sc_crypto == G_ELI_CRYPTO_SW && g_eli_threads == 0)
328 sched_bind(curthread, wr->w_number);
329 mtx_unlock_spin(&sched_lock);
330
331 G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm);
332
333 for (;;) {
334 mtx_lock(&sc->sc_queue_mtx);
335 bp = bioq_takefirst(&sc->sc_queue);
336 if (bp == NULL) {
415 if ((sc->sc_flags & G_ELI_FLAG_DESTROY) != 0) {
337 if (sc->sc_flags & G_ELI_FLAG_DESTROY) {
338 LIST_REMOVE(wr, w_next);
339 crypto_freesession(wr->w_sid);
340 free(wr, M_ELI);
341 G_ELI_DEBUG(1, "Thread %s exiting.",
342 curthread->td_proc->p_comm);
343 wakeup(&sc->sc_workers);
344 mtx_unlock(&sc->sc_queue_mtx);
345 kthread_exit(0);
346 }
347 msleep(sc, &sc->sc_queue_mtx, PRIBIO | PDROP,
348 "geli:w", 0);
349 continue;
350 }
351 mtx_unlock(&sc->sc_queue_mtx);
430 g_eli_crypto_run(wr, bp);
352 if (bp->bio_cmd == BIO_READ && bp->bio_pflags == 255)
353 g_eli_auth_read(sc, bp);
354 else if (sc->sc_flags & G_ELI_FLAG_AUTH)
355 g_eli_auth_run(wr, bp);
356 else
357 g_eli_crypto_run(wr, bp);
358 }
359}
360
361/*
362 * Here we generate IV. It is unique for every sector.
363 */
437static void
364void
365g_eli_crypto_ivgen(struct g_eli_softc *sc, off_t offset, u_char *iv,
366 size_t size)
367{
368 u_char hash[SHA256_DIGEST_LENGTH];
369 SHA256_CTX ctx;
370
371 /* Copy precalculated SHA256 context for IV-Key. */
372 bcopy(&sc->sc_ivctx, &ctx, sizeof(ctx));
373 SHA256_Update(&ctx, (uint8_t *)&offset, sizeof(offset));
374 SHA256_Final(hash, &ctx);
375 bcopy(hash, iv, size);
376}
377
451/*
452 * This is the main function responsible for cryptography (ie. communication
453 * with crypto(9) subsystem).
454 */
455static void
456g_eli_crypto_run(struct g_eli_worker *wr, struct bio *bp)
457{
458 struct g_eli_softc *sc;
459 struct cryptop *crp;
460 struct cryptodesc *crd;
461 struct uio *uio;
462 struct iovec *iov;
463 u_int i, nsec, add, secsize;
464 int err, error;
465 size_t size;
466 u_char *p, *data;
467
468 G_ELI_LOGREQ(3, bp, "%s", __func__);
469
470 bp->bio_pflags = wr->w_number;
471 sc = wr->w_softc;
472 secsize = LIST_FIRST(&sc->sc_geom->provider)->sectorsize;
473 nsec = bp->bio_length / secsize;
474
475 /*
476 * Calculate how much memory do we need.
477 * We need separate crypto operation for every single sector.
478 * It is much faster to calculate total amount of needed memory here and
479 * do the allocation once instead of allocating memory in pieces (many,
480 * many pieces).
481 */
482 size = sizeof(*crp) * nsec;
483 size += sizeof(*crd) * nsec;
484 size += sizeof(*uio) * nsec;
485 size += sizeof(*iov) * nsec;
486 /*
487 * If we write the data we cannot destroy current bio_data content,
488 * so we need to allocate more memory for encrypted data.
489 */
490 if (bp->bio_cmd == BIO_WRITE)
491 size += bp->bio_length;
492 p = malloc(size, M_ELI, M_WAITOK);
493
494 bp->bio_inbed = 0;
495 bp->bio_children = nsec;
496 bp->bio_driver2 = p;
497
498 if (bp->bio_cmd == BIO_READ)
499 data = bp->bio_data;
500 else {
501 data = p;
502 p += bp->bio_length;
503 bcopy(bp->bio_data, data, bp->bio_length);
504 }
505
506 error = 0;
507 for (i = 0, add = 0; i < nsec; i++, add += secsize) {
508 crp = (struct cryptop *)p; p += sizeof(*crp);
509 crd = (struct cryptodesc *)p; p += sizeof(*crd);
510 uio = (struct uio *)p; p += sizeof(*uio);
511 iov = (struct iovec *)p; p += sizeof(*iov);
512
513 iov->iov_len = secsize;
514 iov->iov_base = data;
515 data += secsize;
516
517 uio->uio_iov = iov;
518 uio->uio_iovcnt = 1;
519 uio->uio_segflg = UIO_SYSSPACE;
520 uio->uio_resid = secsize;
521
522 crp->crp_sid = wr->w_sid;
523 crp->crp_ilen = secsize;
524 crp->crp_olen = secsize;
525 crp->crp_opaque = (void *)bp;
526 crp->crp_buf = (void *)uio;
527 if (bp->bio_cmd == BIO_WRITE)
528 crp->crp_callback = g_eli_crypto_write_done;
529 else /* if (bp->bio_cmd == BIO_READ) */
530 crp->crp_callback = g_eli_crypto_read_done;
531 crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIFSYNC | CRYPTO_F_REL;
532 crp->crp_desc = crd;
533
534 crd->crd_skip = 0;
535 crd->crd_len = secsize;
536 crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
537 if (bp->bio_cmd == BIO_WRITE)
538 crd->crd_flags |= CRD_F_ENCRYPT;
539 crd->crd_alg = sc->sc_algo;
540 crd->crd_key = sc->sc_datakey;
541 crd->crd_klen = sc->sc_keylen;
542 g_eli_crypto_ivgen(sc, bp->bio_offset + add, crd->crd_iv,
543 sizeof(crd->crd_iv));
544 crd->crd_next = NULL;
545
546 crp->crp_etype = 0;
547 err = crypto_dispatch(crp);
548 if (error == 0)
549 error = err;
550 }
551 if (bp->bio_error == 0)
552 bp->bio_error = error;
553}
554
378int
379g_eli_read_metadata(struct g_class *mp, struct g_provider *pp,
380 struct g_eli_metadata *md)
381{
382 struct g_geom *gp;
383 struct g_consumer *cp;
384 u_char *buf = NULL;
385 int error;
386
387 g_topology_assert();
388
389 gp = g_new_geomf(mp, "eli:taste");
390 gp->start = g_eli_start;
391 gp->access = g_std_access;
392 /*
393 * g_eli_read_metadata() is always called from the event thread.
394 * Our geom is created and destroyed in the same event, so there
395 * could be no orphan nor spoil event in the meantime.
396 */
397 gp->orphan = g_eli_orphan_spoil_assert;
398 gp->spoiled = g_eli_orphan_spoil_assert;
399 cp = g_new_consumer(gp);
400 error = g_attach(cp, pp);
401 if (error != 0)
402 goto end;
403 error = g_access(cp, 1, 0, 0);
404 if (error != 0)
405 goto end;
406 g_topology_unlock();
407 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
408 &error);
409 g_topology_lock();
410 if (buf == NULL)
411 goto end;
412 eli_metadata_decode(buf, md);
413end:
414 if (buf != NULL)
415 g_free(buf);
416 if (cp->provider != NULL) {
417 if (cp->acr == 1)
418 g_access(cp, -1, 0, 0);
419 g_detach(cp);
420 }
421 g_destroy_consumer(cp);
422 g_destroy_geom(gp);
423 return (error);
424}
425
426/*
427 * The function is called when we had last close on provider and user requested
428 * to close it when this situation occur.
429 */
430static void
431g_eli_last_close(struct g_eli_softc *sc)
432{
433 struct g_geom *gp;
434 struct g_provider *pp;
435 char ppname[64];
436 int error;
437
438 g_topology_assert();
439 gp = sc->sc_geom;
440 pp = LIST_FIRST(&gp->provider);
441 strlcpy(ppname, pp->name, sizeof(ppname));
442 error = g_eli_destroy(sc, 1);
443 KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).",
444 ppname, error));
445 G_ELI_DEBUG(0, "Detached %s on last close.", ppname);
446}
447
448int
449g_eli_access(struct g_provider *pp, int dr, int dw, int de)
450{
451 struct g_eli_softc *sc;
452 struct g_geom *gp;
453
454 gp = pp->geom;
455 sc = gp->softc;
456
457 if (dw > 0) {
458 /* Someone is opening us for write, we need to remember that. */
459 sc->sc_flags |= G_ELI_FLAG_WOPEN;
460 return (0);
461 }
462 /* Is this the last close? */
463 if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0)
464 return (0);
465
466 /*
467 * Automatically detach on last close if requested.
468 */
469 if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) ||
470 (sc->sc_flags & G_ELI_FLAG_WOPEN)) {
471 g_eli_last_close(sc);
472 }
473 return (0);
474}
475
476struct g_geom *
477g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp,
478 const struct g_eli_metadata *md, const u_char *mkey, int nkey)
479{
480 struct g_eli_softc *sc;
481 struct g_eli_worker *wr;
482 struct g_geom *gp;
483 struct g_provider *pp;
484 struct g_consumer *cp;
662 struct cryptoini cri;
485 struct cryptoini crie, cria;
486 u_int i, threads;
487 int error;
488
489 G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX);
490
491 gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX);
492 gp->softc = NULL; /* for a moment */
493
494 sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO);
495 gp->start = g_eli_start;
496 /*
497 * Spoiling cannot happen actually, because we keep provider open for
498 * writing all the time.
499 */
500 gp->spoiled = g_eli_orphan_spoil_assert;
501 gp->orphan = g_eli_orphan;
502 /*
503 * If detach-on-last-close feature is not enabled, we can simply use
504 * g_std_access().
505 */
506 if (md->md_flags & G_ELI_FLAG_WO_DETACH)
507 gp->access = g_eli_access;
508 else
509 gp->access = g_std_access;
510 gp->dumpconf = g_eli_dumpconf;
511
512 sc->sc_crypto = G_ELI_CRYPTO_SW;
513 sc->sc_flags = md->md_flags;
691 sc->sc_algo = md->md_algo;
514 sc->sc_ealgo = md->md_ealgo;
515 sc->sc_nkey = nkey;
516 /*
517 * Remember the keys in our softc structure.
518 */
696 bcopy(mkey, sc->sc_ivkey, sizeof(sc->sc_ivkey));
697 mkey += sizeof(sc->sc_ivkey);
698 bcopy(mkey, sc->sc_datakey, sizeof(sc->sc_datakey));
699 sc->sc_keylen = md->md_keylen;
519 g_eli_mkey_propagate(sc, mkey);
520 sc->sc_ekeylen = md->md_keylen;
521
522 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
523 sc->sc_akeylen = sizeof(sc->sc_akey) * 8;
524 sc->sc_aalgo = md->md_aalgo;
525 sc->sc_alen = g_eli_hashlen(sc->sc_aalgo);
526
527 sc->sc_data_per_sector = bpp->sectorsize - sc->sc_alen;
528 /*
529 * Some hash functions (like SHA1 and RIPEMD160) generates hash
530 * which length is not multiple of 128 bits, but we want data
531 * length to be multiple of 128, so we can encrypt without
532 * padding. The line below rounds down data length to multiple
533 * of 128 bits.
534 */
535 sc->sc_data_per_sector -= sc->sc_data_per_sector % 16;
536
537 sc->sc_bytes_per_sector =
538 (md->md_sectorsize - 1) / sc->sc_data_per_sector + 1;
539 sc->sc_bytes_per_sector *= bpp->sectorsize;
540 /*
541 * Precalculate SHA256 for HMAC key generation.
542 * This is expensive operation and we can do it only once now or
543 * for every access to sector, so now will be much better.
544 */
545 SHA256_Init(&sc->sc_akeyctx);
546 SHA256_Update(&sc->sc_akeyctx, sc->sc_akey,
547 sizeof(sc->sc_akey));
548 }
549
550 /*
551 * Precalculate SHA256 for IV generation.
552 * This is expensive operation and we can do it only once now or for
553 * every access to sector, so now will be much better.
554 */
555 SHA256_Init(&sc->sc_ivctx);
556 SHA256_Update(&sc->sc_ivctx, sc->sc_ivkey, sizeof(sc->sc_ivkey));
557
558 gp->softc = sc;
559 sc->sc_geom = gp;
560
561 bioq_init(&sc->sc_queue);
562 mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF);
563
564 pp = NULL;
565 cp = g_new_consumer(gp);
566 error = g_attach(cp, bpp);
567 if (error != 0) {
568 if (req != NULL) {
569 gctl_error(req, "Cannot attach to %s (error=%d).",
570 bpp->name, error);
571 } else {
572 G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).",
573 bpp->name, error);
574 }
575 goto failed;
576 }
577 /*
578 * Keep provider open all the time, so we can run critical tasks,
579 * like Master Keys deletion, without wondering if we can open
580 * provider or not.
581 */
582 error = g_access(cp, 1, 1, 1);
583 if (error != 0) {
584 if (req != NULL) {
585 gctl_error(req, "Cannot access %s (error=%d).",
586 bpp->name, error);
587 } else {
588 G_ELI_DEBUG(1, "Cannot access %s (error=%d).",
589 bpp->name, error);
590 }
591 goto failed;
592 }
593
594 LIST_INIT(&sc->sc_workers);
595
747 bzero(&cri, sizeof(cri));
748 cri.cri_alg = sc->sc_algo;
749 cri.cri_klen = sc->sc_keylen;
750 cri.cri_key = sc->sc_datakey;
596 bzero(&crie, sizeof(crie));
597 crie.cri_alg = sc->sc_ealgo;
598 crie.cri_klen = sc->sc_ekeylen;
599 crie.cri_key = sc->sc_ekey;
600 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
601 bzero(&cria, sizeof(cria));
602 cria.cri_alg = sc->sc_aalgo;
603 cria.cri_klen = sc->sc_akeylen;
604 cria.cri_key = sc->sc_akey;
605 crie.cri_next = &cria;
606 }
607
608 threads = g_eli_threads;
609 if (threads == 0)
610 threads = mp_ncpus;
611 else if (threads > mp_ncpus) {
612 /* There is really no need for too many worker threads. */
613 threads = mp_ncpus;
614 G_ELI_DEBUG(0, "Reducing number of threads to %u.", threads);
615 }
616 for (i = 0; i < threads; i++) {
617 wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO);
618 wr->w_softc = sc;
619 wr->w_number = i;
620
621 /*
622 * If this is the first pass, try to get hardware support.
623 * Use software cryptography, if we cannot get it.
624 */
625 if (i == 0) {
770 error = crypto_newsession(&wr->w_sid, &cri, 1);
626 error = crypto_newsession(&wr->w_sid, &crie, 1);
627 if (error == 0)
628 sc->sc_crypto = G_ELI_CRYPTO_HW;
629 }
630 if (sc->sc_crypto == G_ELI_CRYPTO_SW)
775 error = crypto_newsession(&wr->w_sid, &cri, 0);
631 error = crypto_newsession(&wr->w_sid, &crie, 0);
632 if (error != 0) {
633 free(wr, M_ELI);
634 if (req != NULL) {
635 gctl_error(req, "Cannot set up crypto session "
636 "for %s (error=%d).", bpp->name, error);
637 } else {
638 G_ELI_DEBUG(1, "Cannot set up crypto session "
639 "for %s (error=%d).", bpp->name, error);
640 }
641 goto failed;
642 }
643
644 error = kthread_create(g_eli_worker, wr, &wr->w_proc, 0, 0,
645 "g_eli[%u] %s", i, bpp->name);
646 if (error != 0) {
647 crypto_freesession(wr->w_sid);
648 free(wr, M_ELI);
649 if (req != NULL) {
650 gctl_error(req, "Cannot create kernel thread "
651 "for %s (error=%d).", bpp->name, error);
652 } else {
653 G_ELI_DEBUG(1, "Cannot create kernel thread "
654 "for %s (error=%d).", bpp->name, error);
655 }
656 goto failed;
657 }
658 LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next);
659 /* If we have hardware support, one thread is enough. */
660 if (sc->sc_crypto == G_ELI_CRYPTO_HW)
661 break;
662 }
663
664 /*
665 * Create decrypted provider.
666 */
667 pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX);
668 pp->sectorsize = md->md_sectorsize;
669 pp->mediasize = bpp->mediasize;
814 if ((sc->sc_flags & G_ELI_FLAG_ONETIME) == 0)
670 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME))
671 pp->mediasize -= bpp->sectorsize;
816 pp->mediasize -= (pp->mediasize % pp->sectorsize);
672 if (!(sc->sc_flags & G_ELI_FLAG_AUTH))
673 pp->mediasize -= (pp->mediasize % pp->sectorsize);
674 else {
675 pp->mediasize /= sc->sc_bytes_per_sector;
676 pp->mediasize *= pp->sectorsize;
677 }
678
679 g_error_provider(pp, 0);
680
681 G_ELI_DEBUG(0, "Device %s created.", pp->name);
820 G_ELI_DEBUG(0, " Cipher: %s", g_eli_algo2str(sc->sc_algo));
821 G_ELI_DEBUG(0, "Key length: %u", sc->sc_keylen);
682 G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo),
683 sc->sc_ekeylen);
684 if (sc->sc_flags & G_ELI_FLAG_AUTH)
685 G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo));
686 G_ELI_DEBUG(0, " Crypto: %s",
687 sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware");
688 return (gp);
689failed:
690 mtx_lock(&sc->sc_queue_mtx);
691 sc->sc_flags |= G_ELI_FLAG_DESTROY;
692 wakeup(sc);
693 /*
694 * Wait for kernel threads self destruction.
695 */
696 while (!LIST_EMPTY(&sc->sc_workers)) {
697 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
698 "geli:destroy", 0);
699 }
700 mtx_destroy(&sc->sc_queue_mtx);
701 if (cp->provider != NULL) {
702 if (cp->acr == 1)
703 g_access(cp, -1, -1, -1);
704 g_detach(cp);
705 }
706 g_destroy_consumer(cp);
707 g_destroy_geom(gp);
708 bzero(sc, sizeof(*sc));
709 free(sc, M_ELI);
710 return (NULL);
711}
712
713int
714g_eli_destroy(struct g_eli_softc *sc, boolean_t force)
715{
716 struct g_geom *gp;
717 struct g_provider *pp;
718
719 g_topology_assert();
720
721 if (sc == NULL)
722 return (ENXIO);
723
724 gp = sc->sc_geom;
725 pp = LIST_FIRST(&gp->provider);
726 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
727 if (force) {
728 G_ELI_DEBUG(1, "Device %s is still open, so it "
729 "cannot be definitely removed.", pp->name);
730 } else {
731 G_ELI_DEBUG(1,
732 "Device %s is still open (r%dw%de%d).", pp->name,
733 pp->acr, pp->acw, pp->ace);
734 return (EBUSY);
735 }
736 }
737
738 mtx_lock(&sc->sc_queue_mtx);
739 sc->sc_flags |= G_ELI_FLAG_DESTROY;
740 wakeup(sc);
741 while (!LIST_EMPTY(&sc->sc_workers)) {
742 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
743 "geli:destroy", 0);
744 }
745 mtx_destroy(&sc->sc_queue_mtx);
746 gp->softc = NULL;
747 bzero(sc, sizeof(*sc));
748 free(sc, M_ELI);
749
750 if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0))
751 G_ELI_DEBUG(0, "Device %s destroyed.", gp->name);
752 g_wither_geom_close(gp, ENXIO);
753
754 return (0);
755}
756
757static int
758g_eli_destroy_geom(struct gctl_req *req __unused,
759 struct g_class *mp __unused, struct g_geom *gp)
760{
761 struct g_eli_softc *sc;
762
763 sc = gp->softc;
764 return (g_eli_destroy(sc, 0));
765}
766
767static int
768g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider)
769{
770 u_char *keyfile, *data, *size;
771 char *file, name[64];
772 int i;
773
774 for (i = 0; ; i++) {
775 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
776 keyfile = preload_search_by_type(name);
777 if (keyfile == NULL)
778 return (i); /* Return number of loaded keyfiles. */
779 data = preload_search_info(keyfile, MODINFO_ADDR);
780 if (data == NULL) {
781 G_ELI_DEBUG(0, "Cannot find key file data for %s.",
782 name);
783 return (0);
784 }
785 data = *(void **)data;
786 size = preload_search_info(keyfile, MODINFO_SIZE);
787 if (size == NULL) {
788 G_ELI_DEBUG(0, "Cannot find key file size for %s.",
789 name);
790 return (0);
791 }
792 file = preload_search_info(keyfile, MODINFO_NAME);
793 if (file == NULL) {
794 G_ELI_DEBUG(0, "Cannot find key file name for %s.",
795 name);
796 return (0);
797 }
798 G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file,
799 provider, name);
800 g_eli_crypto_hmac_update(ctx, data, *(size_t *)size);
801 }
802}
803
804static void
805g_eli_keyfiles_clear(const char *provider)
806{
807 u_char *keyfile, *data, *size;
808 char name[64];
809 int i;
810
811 for (i = 0; ; i++) {
812 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
813 keyfile = preload_search_by_type(name);
814 if (keyfile == NULL)
815 return;
816 data = preload_search_info(keyfile, MODINFO_ADDR);
817 size = preload_search_info(keyfile, MODINFO_SIZE);
818 if (data == NULL || size == NULL)
819 continue;
820 data = *(void **)data;
821 bzero(data, *(size_t *)size);
822 }
823}
824
825/*
826 * Tasting is only made on boot.
827 * We detect providers which should be attached before root is mounted.
828 */
829static struct g_geom *
830g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
831{
832 struct g_eli_metadata md;
833 struct g_geom *gp;
834 struct hmac_ctx ctx;
835 char passphrase[256];
836 u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN];
837 u_int i, nkey, nkeyfiles, tries;
838 int error;
839
840 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
841 g_topology_assert();
842
843 if (rootvnode != NULL || g_eli_tries == 0)
844 return (NULL);
845
846 G_ELI_DEBUG(3, "Tasting %s.", pp->name);
847
848 error = g_eli_read_metadata(mp, pp, &md);
849 if (error != 0)
850 return (NULL);
851 gp = NULL;
852
853 if (strcmp(md.md_magic, G_ELI_MAGIC) != 0)
854 return (NULL);
855 if (md.md_version > G_ELI_VERSION) {
856 printf("geom_eli.ko module is too old to handle %s.\n",
857 pp->name);
858 return (NULL);
859 }
860 if (md.md_provsize != pp->mediasize)
861 return (NULL);
862 /* Should we attach it on boot? */
999 if ((md.md_flags & G_ELI_FLAG_BOOT) == 0)
863 if (!(md.md_flags & G_ELI_FLAG_BOOT))
864 return (NULL);
865 if (md.md_keys == 0x00) {
866 G_ELI_DEBUG(0, "No valid keys on %s.", pp->name);
867 return (NULL);
868 }
869 if (md.md_iterations == -1) {
870 /* If there is no passphrase, we try only once. */
871 tries = 1;
872 } else {
873 /* Ask for the passphrase no more than g_eli_tries times. */
874 tries = g_eli_tries;
875 }
876
877 for (i = 0; i < tries; i++) {
878 g_eli_crypto_hmac_init(&ctx, NULL, 0);
879
880 /*
881 * Load all key files.
882 */
883 nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name);
884
885 if (nkeyfiles == 0 && md.md_iterations == -1) {
886 /*
887 * No key files and no passphrase, something is
888 * definitely wrong here.
889 * geli(8) doesn't allow for such situation, so assume
890 * that there was really no passphrase and in that case
891 * key files are no properly defined in loader.conf.
892 */
893 G_ELI_DEBUG(0,
894 "Found no key files in loader.conf for %s.",
895 pp->name);
896 return (NULL);
897 }
898
899 /* Ask for the passphrase if defined. */
900 if (md.md_iterations >= 0) {
901 printf("Enter passphrase for %s: ", pp->name);
902 gets(passphrase, sizeof(passphrase),
903 g_eli_visible_passphrase);
904 }
905
906 /*
907 * Prepare Derived-Key from the user passphrase.
908 */
909 if (md.md_iterations == 0) {
910 g_eli_crypto_hmac_update(&ctx, md.md_salt,
911 sizeof(md.md_salt));
912 g_eli_crypto_hmac_update(&ctx, passphrase,
913 strlen(passphrase));
914 } else if (md.md_iterations > 0) {
915 u_char dkey[G_ELI_USERKEYLEN];
916
917 pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt,
918 sizeof(md.md_salt), passphrase, md.md_iterations);
919 g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey));
920 bzero(dkey, sizeof(dkey));
921 }
922
923 g_eli_crypto_hmac_final(&ctx, key, 0);
924
925 /*
926 * Decrypt Master-Key.
927 */
928 error = g_eli_mkey_decrypt(&md, key, mkey, &nkey);
929 bzero(key, sizeof(key));
930 if (error == -1) {
931 if (i == tries - 1) {
932 G_ELI_DEBUG(0,
933 "Wrong key for %s. No tries left.",
934 pp->name);
935 g_eli_keyfiles_clear(pp->name);
936 return (NULL);
937 }
938 G_ELI_DEBUG(0, "Wrong key for %s. Tries left: %u.",
939 pp->name, tries - i - 1);
940 /* Try again. */
941 continue;
942 } else if (error > 0) {
943 G_ELI_DEBUG(0, "Cannot decrypt Master Key for %s (error=%d).",
944 pp->name, error);
945 g_eli_keyfiles_clear(pp->name);
946 return (NULL);
947 }
948 G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name);
949 break;
950 }
951
952 /*
953 * We have correct key, let's attach provider.
954 */
955 gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey);
956 bzero(mkey, sizeof(mkey));
957 bzero(&md, sizeof(md));
958 if (gp == NULL) {
959 G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name,
960 G_ELI_SUFFIX);
961 return (NULL);
962 }
963 return (gp);
964}
965
966static void
967g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
968 struct g_consumer *cp, struct g_provider *pp)
969{
970 struct g_eli_softc *sc;
971
972 g_topology_assert();
973 sc = gp->softc;
974 if (sc == NULL)
975 return;
976 if (pp != NULL || cp != NULL)
977 return; /* Nothing here. */
978 sbuf_printf(sb, "%s<Flags>", indent);
979 if (sc->sc_flags == 0)
980 sbuf_printf(sb, "NONE");
981 else {
982 int first = 1;
983
984#define ADD_FLAG(flag, name) do { \
1121 if ((sc->sc_flags & (flag)) != 0) { \
985 if (sc->sc_flags & (flag)) { \
986 if (!first) \
987 sbuf_printf(sb, ", "); \
988 else \
989 first = 0; \
990 sbuf_printf(sb, name); \
991 } \
992} while (0)
993 ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME");
994 ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT");
995 ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH");
996 ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH");
997 ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH");
998 ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN");
999 ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY");
1000#undef ADD_FLAG
1001 }
1002 sbuf_printf(sb, "</Flags>\n");
1003
1139 if ((sc->sc_flags & G_ELI_FLAG_ONETIME) == 0) {
1004 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) {
1005 sbuf_printf(sb, "%s<UsedKey>%u</UsedKey>\n", indent,
1006 sc->sc_nkey);
1007 }
1008 sbuf_printf(sb, "%s<Crypto>", indent);
1009 switch (sc->sc_crypto) {
1010 case G_ELI_CRYPTO_HW:
1011 sbuf_printf(sb, "hardware");
1012 break;
1013 case G_ELI_CRYPTO_SW:
1014 sbuf_printf(sb, "software");
1015 break;
1016 default:
1017 sbuf_printf(sb, "UNKNOWN");
1018 break;
1019 }
1020 sbuf_printf(sb, "</Crypto>\n");
1156 sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent, sc->sc_keylen);
1157 sbuf_printf(sb, "%s<Cipher>%s</Cipher>\n", indent,
1158 g_eli_algo2str(sc->sc_algo));
1021 if (sc->sc_flags & G_ELI_FLAG_AUTH) {
1022 sbuf_printf(sb,
1023 "%s<AuthenticationAlgorithm>%s</AuthenticationAlgorithm>\n",
1024 indent, g_eli_algo2str(sc->sc_aalgo));
1025 }
1026 sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent,
1027 sc->sc_ekeylen);
1028 sbuf_printf(sb, "%s<EncryptionAlgorithm>%s</EncryptionAlgorithm>\n", indent,
1029 g_eli_algo2str(sc->sc_ealgo));
1030}
1031
1032DECLARE_GEOM_CLASS(g_eli_class, g_eli);
1033MODULE_DEPEND(geom_eli, crypto, 1, 1, 1);