1159307Spjd/*- 2220922Spjd * Copyright (c) 2005-2011 Pawel Jakub Dawidek <pawel@dawidek.net> 3159307Spjd * All rights reserved. 4159307Spjd * 5159307Spjd * Redistribution and use in source and binary forms, with or without 6159307Spjd * modification, are permitted provided that the following conditions 7159307Spjd * are met: 8159307Spjd * 1. Redistributions of source code must retain the above copyright 9159307Spjd * notice, this list of conditions and the following disclaimer. 10159307Spjd * 2. Redistributions in binary form must reproduce the above copyright 11159307Spjd * notice, this list of conditions and the following disclaimer in the 12159307Spjd * documentation and/or other materials provided with the distribution. 13159307Spjd * 14159307Spjd * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15159307Spjd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16159307Spjd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17159307Spjd * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18159307Spjd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19159307Spjd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20159307Spjd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21159307Spjd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22159307Spjd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23159307Spjd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24159307Spjd * SUCH DAMAGE. 25159307Spjd */ 26159307Spjd 27159307Spjd#include <sys/cdefs.h> 28159307Spjd__FBSDID("$FreeBSD$"); 29159307Spjd 30159307Spjd#include <sys/param.h> 31159307Spjd#include <sys/systm.h> 32159307Spjd#include <sys/kernel.h> 33159307Spjd#include <sys/linker.h> 34159307Spjd#include <sys/module.h> 35159307Spjd#include <sys/lock.h> 36159307Spjd#include <sys/mutex.h> 37159307Spjd#include <sys/bio.h> 38159307Spjd#include <sys/sysctl.h> 39159307Spjd#include <sys/malloc.h> 40159307Spjd#include <sys/kthread.h> 41159307Spjd#include <sys/proc.h> 42159307Spjd#include <sys/sched.h> 43159307Spjd#include <sys/smp.h> 44159307Spjd#include <sys/uio.h> 45159307Spjd#include <sys/vnode.h> 46159307Spjd 47159307Spjd#include <vm/uma.h> 48159307Spjd 49159307Spjd#include <geom/geom.h> 50159307Spjd#include <geom/eli/g_eli.h> 51159307Spjd#include <geom/eli/pkcs5v2.h> 52159307Spjd 53159307Spjd/* 54159307Spjd * Code paths: 55159307Spjd * BIO_READ: 56214118Spjd * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver 57159307Spjd * BIO_WRITE: 58159307Spjd * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 59159307Spjd */ 60159307Spjd 61159307SpjdMALLOC_DECLARE(M_ELI); 62159307Spjd 63159307Spjd/* 64159307Spjd * The function is called after we read and decrypt data. 65159307Spjd * 66214118Spjd * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> G_ELI_CRYPTO_READ_DONE -> g_io_deliver 67159307Spjd */ 68159307Spjdstatic int 69159307Spjdg_eli_crypto_read_done(struct cryptop *crp) 70159307Spjd{ 71214118Spjd struct g_eli_softc *sc; 72159307Spjd struct bio *bp; 73159307Spjd 74159307Spjd if (crp->crp_etype == EAGAIN) { 75159307Spjd if (g_eli_crypto_rerun(crp) == 0) 76159307Spjd return (0); 77159307Spjd } 78159307Spjd bp = (struct bio *)crp->crp_opaque; 79159307Spjd bp->bio_inbed++; 80159307Spjd if (crp->crp_etype == 0) { 81159307Spjd G_ELI_DEBUG(3, "Crypto READ request done (%d/%d).", 82159307Spjd bp->bio_inbed, bp->bio_children); 83159307Spjd bp->bio_completed += crp->crp_olen; 84159307Spjd } else { 85159307Spjd G_ELI_DEBUG(1, "Crypto READ request failed (%d/%d) error=%d.", 86159307Spjd bp->bio_inbed, bp->bio_children, crp->crp_etype); 87159307Spjd if (bp->bio_error == 0) 88159307Spjd bp->bio_error = crp->crp_etype; 89159307Spjd } 90220922Spjd sc = bp->bio_to->geom->softc; 91220922Spjd g_eli_key_drop(sc, crp->crp_desc->crd_key); 92159307Spjd /* 93159307Spjd * Do we have all sectors already? 94159307Spjd */ 95159307Spjd if (bp->bio_inbed < bp->bio_children) 96159307Spjd return (0); 97159307Spjd free(bp->bio_driver2, M_ELI); 98159307Spjd bp->bio_driver2 = NULL; 99159307Spjd if (bp->bio_error != 0) { 100159307Spjd G_ELI_LOGREQ(0, bp, "Crypto READ request failed (error=%d).", 101159307Spjd bp->bio_error); 102159307Spjd bp->bio_completed = 0; 103159307Spjd } 104159307Spjd /* 105159307Spjd * Read is finished, send it up. 106159307Spjd */ 107159307Spjd g_io_deliver(bp, bp->bio_error); 108214118Spjd atomic_subtract_int(&sc->sc_inflight, 1); 109159307Spjd return (0); 110159307Spjd} 111159307Spjd 112159307Spjd/* 113159307Spjd * The function is called after data encryption. 114159307Spjd * 115159307Spjd * g_eli_start -> g_eli_crypto_run -> G_ELI_CRYPTO_WRITE_DONE -> g_io_request -> g_eli_write_done -> g_io_deliver 116159307Spjd */ 117159307Spjdstatic int 118159307Spjdg_eli_crypto_write_done(struct cryptop *crp) 119159307Spjd{ 120214118Spjd struct g_eli_softc *sc; 121159307Spjd struct g_geom *gp; 122159307Spjd struct g_consumer *cp; 123159307Spjd struct bio *bp, *cbp; 124159307Spjd 125159307Spjd if (crp->crp_etype == EAGAIN) { 126159307Spjd if (g_eli_crypto_rerun(crp) == 0) 127159307Spjd return (0); 128159307Spjd } 129159307Spjd bp = (struct bio *)crp->crp_opaque; 130159307Spjd bp->bio_inbed++; 131159307Spjd if (crp->crp_etype == 0) { 132159307Spjd G_ELI_DEBUG(3, "Crypto WRITE request done (%d/%d).", 133159307Spjd bp->bio_inbed, bp->bio_children); 134159307Spjd } else { 135159307Spjd G_ELI_DEBUG(1, "Crypto WRITE request failed (%d/%d) error=%d.", 136159307Spjd bp->bio_inbed, bp->bio_children, crp->crp_etype); 137159307Spjd if (bp->bio_error == 0) 138159307Spjd bp->bio_error = crp->crp_etype; 139159307Spjd } 140220922Spjd gp = bp->bio_to->geom; 141220922Spjd sc = gp->softc; 142220922Spjd g_eli_key_drop(sc, crp->crp_desc->crd_key); 143159307Spjd /* 144159307Spjd * All sectors are already encrypted? 145159307Spjd */ 146159307Spjd if (bp->bio_inbed < bp->bio_children) 147159307Spjd return (0); 148159307Spjd bp->bio_inbed = 0; 149159307Spjd bp->bio_children = 1; 150159307Spjd cbp = bp->bio_driver1; 151159307Spjd bp->bio_driver1 = NULL; 152159307Spjd if (bp->bio_error != 0) { 153159307Spjd G_ELI_LOGREQ(0, bp, "Crypto WRITE request failed (error=%d).", 154159307Spjd bp->bio_error); 155159307Spjd free(bp->bio_driver2, M_ELI); 156159307Spjd bp->bio_driver2 = NULL; 157159307Spjd g_destroy_bio(cbp); 158159307Spjd g_io_deliver(bp, bp->bio_error); 159214118Spjd atomic_subtract_int(&sc->sc_inflight, 1); 160159307Spjd return (0); 161159307Spjd } 162159307Spjd cbp->bio_data = bp->bio_driver2; 163159307Spjd cbp->bio_done = g_eli_write_done; 164159307Spjd cp = LIST_FIRST(&gp->consumer); 165159307Spjd cbp->bio_to = cp->provider; 166159307Spjd G_ELI_LOGREQ(2, cbp, "Sending request."); 167159307Spjd /* 168159307Spjd * Send encrypted data to the provider. 169159307Spjd */ 170159307Spjd g_io_request(cbp, cp); 171159307Spjd return (0); 172159307Spjd} 173159307Spjd 174159307Spjd/* 175214118Spjd * The function is called to read encrypted data. 176214118Spjd * 177214118Spjd * g_eli_start -> G_ELI_CRYPTO_READ -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver 178214118Spjd */ 179214118Spjdvoid 180214118Spjdg_eli_crypto_read(struct g_eli_softc *sc, struct bio *bp, boolean_t fromworker) 181214118Spjd{ 182214118Spjd struct g_consumer *cp; 183214118Spjd struct bio *cbp; 184214118Spjd 185214118Spjd if (!fromworker) { 186214118Spjd /* 187214118Spjd * We are not called from the worker thread, so check if 188214118Spjd * device is suspended. 189214118Spjd */ 190214118Spjd mtx_lock(&sc->sc_queue_mtx); 191214118Spjd if (sc->sc_flags & G_ELI_FLAG_SUSPEND) { 192214118Spjd /* 193214118Spjd * If device is suspended, we place the request onto 194214118Spjd * the queue, so it can be handled after resume. 195214118Spjd */ 196214118Spjd G_ELI_DEBUG(0, "device suspended, move onto queue"); 197214118Spjd bioq_insert_tail(&sc->sc_queue, bp); 198214118Spjd mtx_unlock(&sc->sc_queue_mtx); 199214118Spjd wakeup(sc); 200214118Spjd return; 201214118Spjd } 202214118Spjd atomic_add_int(&sc->sc_inflight, 1); 203214118Spjd mtx_unlock(&sc->sc_queue_mtx); 204214118Spjd } 205214118Spjd bp->bio_pflags = 0; 206214118Spjd bp->bio_driver2 = NULL; 207214118Spjd cbp = bp->bio_driver1; 208214118Spjd cbp->bio_done = g_eli_read_done; 209214118Spjd cp = LIST_FIRST(&sc->sc_geom->consumer); 210214118Spjd cbp->bio_to = cp->provider; 211214118Spjd G_ELI_LOGREQ(2, cbp, "Sending request."); 212214118Spjd /* 213214118Spjd * Read encrypted data from provider. 214214118Spjd */ 215214118Spjd g_io_request(cbp, cp); 216214118Spjd} 217214118Spjd 218214118Spjd/* 219159307Spjd * This is the main function responsible for cryptography (ie. communication 220159307Spjd * with crypto(9) subsystem). 221214116Spjd * 222214116Spjd * BIO_READ: 223214118Spjd * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> G_ELI_CRYPTO_RUN -> g_eli_crypto_read_done -> g_io_deliver 224214116Spjd * BIO_WRITE: 225214116Spjd * g_eli_start -> G_ELI_CRYPTO_RUN -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 226159307Spjd */ 227159307Spjdvoid 228159307Spjdg_eli_crypto_run(struct g_eli_worker *wr, struct bio *bp) 229159307Spjd{ 230159307Spjd struct g_eli_softc *sc; 231159307Spjd struct cryptop *crp; 232159307Spjd struct cryptodesc *crd; 233159307Spjd struct uio *uio; 234159307Spjd struct iovec *iov; 235213063Spjd u_int i, nsec, secsize; 236159307Spjd int err, error; 237213063Spjd off_t dstoff; 238159307Spjd size_t size; 239159307Spjd u_char *p, *data; 240159307Spjd 241159307Spjd G_ELI_LOGREQ(3, bp, "%s", __func__); 242159307Spjd 243159307Spjd bp->bio_pflags = wr->w_number; 244159307Spjd sc = wr->w_softc; 245159307Spjd secsize = LIST_FIRST(&sc->sc_geom->provider)->sectorsize; 246159307Spjd nsec = bp->bio_length / secsize; 247159307Spjd 248159307Spjd /* 249159307Spjd * Calculate how much memory do we need. 250159307Spjd * We need separate crypto operation for every single sector. 251159307Spjd * It is much faster to calculate total amount of needed memory here and 252159307Spjd * do the allocation once instead of allocating memory in pieces (many, 253159307Spjd * many pieces). 254159307Spjd */ 255159307Spjd size = sizeof(*crp) * nsec; 256159307Spjd size += sizeof(*crd) * nsec; 257159307Spjd size += sizeof(*uio) * nsec; 258159307Spjd size += sizeof(*iov) * nsec; 259159307Spjd /* 260159307Spjd * If we write the data we cannot destroy current bio_data content, 261159307Spjd * so we need to allocate more memory for encrypted data. 262159307Spjd */ 263159307Spjd if (bp->bio_cmd == BIO_WRITE) 264159307Spjd size += bp->bio_length; 265159307Spjd p = malloc(size, M_ELI, M_WAITOK); 266159307Spjd 267159307Spjd bp->bio_inbed = 0; 268159307Spjd bp->bio_children = nsec; 269159307Spjd bp->bio_driver2 = p; 270159307Spjd 271159307Spjd if (bp->bio_cmd == BIO_READ) 272159307Spjd data = bp->bio_data; 273159307Spjd else { 274159307Spjd data = p; 275159307Spjd p += bp->bio_length; 276159307Spjd bcopy(bp->bio_data, data, bp->bio_length); 277159307Spjd } 278159307Spjd 279159307Spjd error = 0; 280213063Spjd for (i = 0, dstoff = bp->bio_offset; i < nsec; i++, dstoff += secsize) { 281159307Spjd crp = (struct cryptop *)p; p += sizeof(*crp); 282159307Spjd crd = (struct cryptodesc *)p; p += sizeof(*crd); 283159307Spjd uio = (struct uio *)p; p += sizeof(*uio); 284159307Spjd iov = (struct iovec *)p; p += sizeof(*iov); 285159307Spjd 286159307Spjd iov->iov_len = secsize; 287159307Spjd iov->iov_base = data; 288159307Spjd data += secsize; 289159307Spjd 290159307Spjd uio->uio_iov = iov; 291159307Spjd uio->uio_iovcnt = 1; 292159307Spjd uio->uio_segflg = UIO_SYSSPACE; 293159307Spjd uio->uio_resid = secsize; 294159307Spjd 295159307Spjd crp->crp_sid = wr->w_sid; 296159307Spjd crp->crp_ilen = secsize; 297159307Spjd crp->crp_olen = secsize; 298159307Spjd crp->crp_opaque = (void *)bp; 299159307Spjd crp->crp_buf = (void *)uio; 300159307Spjd if (bp->bio_cmd == BIO_WRITE) 301159307Spjd crp->crp_callback = g_eli_crypto_write_done; 302159307Spjd else /* if (bp->bio_cmd == BIO_READ) */ 303159307Spjd crp->crp_callback = g_eli_crypto_read_done; 304159307Spjd crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIFSYNC | CRYPTO_F_REL; 305159307Spjd if (g_eli_batch) 306159307Spjd crp->crp_flags |= CRYPTO_F_BATCH; 307159307Spjd crp->crp_desc = crd; 308159307Spjd 309159307Spjd crd->crd_skip = 0; 310159307Spjd crd->crd_len = secsize; 311159307Spjd crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT; 312220922Spjd if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) == 0) 313213067Spjd crd->crd_flags |= CRD_F_KEY_EXPLICIT; 314159307Spjd if (bp->bio_cmd == BIO_WRITE) 315159307Spjd crd->crd_flags |= CRD_F_ENCRYPT; 316159307Spjd crd->crd_alg = sc->sc_ealgo; 317220922Spjd crd->crd_key = g_eli_key_hold(sc, dstoff, secsize); 318159307Spjd crd->crd_klen = sc->sc_ekeylen; 319213070Spjd if (sc->sc_ealgo == CRYPTO_AES_XTS) 320213070Spjd crd->crd_klen <<= 1; 321213063Spjd g_eli_crypto_ivgen(sc, dstoff, crd->crd_iv, 322159307Spjd sizeof(crd->crd_iv)); 323159307Spjd crd->crd_next = NULL; 324159307Spjd 325159307Spjd crp->crp_etype = 0; 326159307Spjd err = crypto_dispatch(crp); 327159307Spjd if (error == 0) 328159307Spjd error = err; 329159307Spjd } 330159307Spjd if (bp->bio_error == 0) 331159307Spjd bp->bio_error = error; 332159307Spjd} 333