1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2019 Google LLC
4 */
5
6/*
7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
8 */
9
10#define pr_fmt(fmt) "blk-crypto: " fmt
11
12#include <linux/bio.h>
13#include <linux/blkdev.h>
14#include <linux/blk-crypto-profile.h>
15#include <linux/module.h>
16#include <linux/ratelimit.h>
17#include <linux/slab.h>
18
19#include "blk-crypto-internal.h"
20
21const struct blk_crypto_mode blk_crypto_modes[] = {
22	[BLK_ENCRYPTION_MODE_AES_256_XTS] = {
23		.name = "AES-256-XTS",
24		.cipher_str = "xts(aes)",
25		.keysize = 64,
26		.ivsize = 16,
27	},
28	[BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
29		.name = "AES-128-CBC-ESSIV",
30		.cipher_str = "essiv(cbc(aes),sha256)",
31		.keysize = 16,
32		.ivsize = 16,
33	},
34	[BLK_ENCRYPTION_MODE_ADIANTUM] = {
35		.name = "Adiantum",
36		.cipher_str = "adiantum(xchacha12,aes)",
37		.keysize = 32,
38		.ivsize = 32,
39	},
40	[BLK_ENCRYPTION_MODE_SM4_XTS] = {
41		.name = "SM4-XTS",
42		.cipher_str = "xts(sm4)",
43		.keysize = 32,
44		.ivsize = 16,
45	},
46};
47
48/*
49 * This number needs to be at least (the number of threads doing IO
50 * concurrently) * (maximum recursive depth of a bio), so that we don't
51 * deadlock on crypt_ctx allocations. The default is chosen to be the same
52 * as the default number of post read contexts in both EXT4 and F2FS.
53 */
54static int num_prealloc_crypt_ctxs = 128;
55
56module_param(num_prealloc_crypt_ctxs, int, 0444);
57MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
58		"Number of bio crypto contexts to preallocate");
59
60static struct kmem_cache *bio_crypt_ctx_cache;
61static mempool_t *bio_crypt_ctx_pool;
62
63static int __init bio_crypt_ctx_init(void)
64{
65	size_t i;
66
67	bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
68	if (!bio_crypt_ctx_cache)
69		goto out_no_mem;
70
71	bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
72						      bio_crypt_ctx_cache);
73	if (!bio_crypt_ctx_pool)
74		goto out_no_mem;
75
76	/* This is assumed in various places. */
77	BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
78
79	/* Sanity check that no algorithm exceeds the defined limits. */
80	for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
81		BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
82		BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
83	}
84
85	return 0;
86out_no_mem:
87	panic("Failed to allocate mem for bio crypt ctxs\n");
88}
89subsys_initcall(bio_crypt_ctx_init);
90
91void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
92		       const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask)
93{
94	struct bio_crypt_ctx *bc;
95
96	/*
97	 * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so
98	 * that the mempool_alloc() can't fail.
99	 */
100	WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM));
101
102	bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
103
104	bc->bc_key = key;
105	memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun));
106
107	bio->bi_crypt_context = bc;
108}
109
110void __bio_crypt_free_ctx(struct bio *bio)
111{
112	mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
113	bio->bi_crypt_context = NULL;
114}
115
116int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
117{
118	dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
119	if (!dst->bi_crypt_context)
120		return -ENOMEM;
121	*dst->bi_crypt_context = *src->bi_crypt_context;
122	return 0;
123}
124
125/* Increments @dun by @inc, treating @dun as a multi-limb integer. */
126void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
127			     unsigned int inc)
128{
129	int i;
130
131	for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
132		dun[i] += inc;
133		/*
134		 * If the addition in this limb overflowed, then we need to
135		 * carry 1 into the next limb. Else the carry is 0.
136		 */
137		if (dun[i] < inc)
138			inc = 1;
139		else
140			inc = 0;
141	}
142}
143
144void __bio_crypt_advance(struct bio *bio, unsigned int bytes)
145{
146	struct bio_crypt_ctx *bc = bio->bi_crypt_context;
147
148	bio_crypt_dun_increment(bc->bc_dun,
149				bytes >> bc->bc_key->data_unit_size_bits);
150}
151
152/*
153 * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to
154 * @next_dun, treating the DUNs as multi-limb integers.
155 */
156bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
157				 unsigned int bytes,
158				 const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
159{
160	int i;
161	unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits;
162
163	for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
164		if (bc->bc_dun[i] + carry != next_dun[i])
165			return false;
166		/*
167		 * If the addition in this limb overflowed, then we need to
168		 * carry 1 into the next limb. Else the carry is 0.
169		 */
170		if ((bc->bc_dun[i] + carry) < carry)
171			carry = 1;
172		else
173			carry = 0;
174	}
175
176	/* If the DUN wrapped through 0, don't treat it as contiguous. */
177	return carry == 0;
178}
179
180/*
181 * Checks that two bio crypt contexts are compatible - i.e. that
182 * they are mergeable except for data_unit_num continuity.
183 */
184static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1,
185				     struct bio_crypt_ctx *bc2)
186{
187	if (!bc1)
188		return !bc2;
189
190	return bc2 && bc1->bc_key == bc2->bc_key;
191}
192
193bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio)
194{
195	return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context);
196}
197
198/*
199 * Checks that two bio crypt contexts are compatible, and also
200 * that their data_unit_nums are continuous (and can hence be merged)
201 * in the order @bc1 followed by @bc2.
202 */
203bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
204			     struct bio_crypt_ctx *bc2)
205{
206	if (!bio_crypt_ctx_compatible(bc1, bc2))
207		return false;
208
209	return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun);
210}
211
212/* Check that all I/O segments are data unit aligned. */
213static bool bio_crypt_check_alignment(struct bio *bio)
214{
215	const unsigned int data_unit_size =
216		bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size;
217	struct bvec_iter iter;
218	struct bio_vec bv;
219
220	bio_for_each_segment(bv, bio, iter) {
221		if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
222			return false;
223	}
224
225	return true;
226}
227
228blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq)
229{
230	return blk_crypto_get_keyslot(rq->q->crypto_profile,
231				      rq->crypt_ctx->bc_key,
232				      &rq->crypt_keyslot);
233}
234
235void __blk_crypto_rq_put_keyslot(struct request *rq)
236{
237	blk_crypto_put_keyslot(rq->crypt_keyslot);
238	rq->crypt_keyslot = NULL;
239}
240
241void __blk_crypto_free_request(struct request *rq)
242{
243	/* The keyslot, if one was needed, should have been released earlier. */
244	if (WARN_ON_ONCE(rq->crypt_keyslot))
245		__blk_crypto_rq_put_keyslot(rq);
246
247	mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
248	rq->crypt_ctx = NULL;
249}
250
251/**
252 * __blk_crypto_bio_prep - Prepare bio for inline encryption
253 *
254 * @bio_ptr: pointer to original bio pointer
255 *
256 * If the bio crypt context provided for the bio is supported by the underlying
257 * device's inline encryption hardware, do nothing.
258 *
259 * Otherwise, try to perform en/decryption for this bio by falling back to the
260 * kernel crypto API. When the crypto API fallback is used for encryption,
261 * blk-crypto may choose to split the bio into 2 - the first one that will
262 * continue to be processed and the second one that will be resubmitted via
263 * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents
264 * of the aforementioned "first one", and *bio_ptr will be updated to this
265 * bounce bio.
266 *
267 * Caller must ensure bio has bio_crypt_ctx.
268 *
269 * Return: true on success; false on error (and bio->bi_status will be set
270 *	   appropriately, and bio_endio() will have been called so bio
271 *	   submission should abort).
272 */
273bool __blk_crypto_bio_prep(struct bio **bio_ptr)
274{
275	struct bio *bio = *bio_ptr;
276	const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
277
278	/* Error if bio has no data. */
279	if (WARN_ON_ONCE(!bio_has_data(bio))) {
280		bio->bi_status = BLK_STS_IOERR;
281		goto fail;
282	}
283
284	if (!bio_crypt_check_alignment(bio)) {
285		bio->bi_status = BLK_STS_IOERR;
286		goto fail;
287	}
288
289	/*
290	 * Success if device supports the encryption context, or if we succeeded
291	 * in falling back to the crypto API.
292	 */
293	if (blk_crypto_config_supported_natively(bio->bi_bdev,
294						 &bc_key->crypto_cfg))
295		return true;
296	if (blk_crypto_fallback_bio_prep(bio_ptr))
297		return true;
298fail:
299	bio_endio(*bio_ptr);
300	return false;
301}
302
303int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
304			     gfp_t gfp_mask)
305{
306	if (!rq->crypt_ctx) {
307		rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
308		if (!rq->crypt_ctx)
309			return -ENOMEM;
310	}
311	*rq->crypt_ctx = *bio->bi_crypt_context;
312	return 0;
313}
314
315/**
316 * blk_crypto_init_key() - Prepare a key for use with blk-crypto
317 * @blk_key: Pointer to the blk_crypto_key to initialize.
318 * @raw_key: Pointer to the raw key. Must be the correct length for the chosen
319 *	     @crypto_mode; see blk_crypto_modes[].
320 * @crypto_mode: identifier for the encryption algorithm to use
321 * @dun_bytes: number of bytes that will be used to specify the DUN when this
322 *	       key is used
323 * @data_unit_size: the data unit size to use for en/decryption
324 *
325 * Return: 0 on success, -errno on failure.  The caller is responsible for
326 *	   zeroizing both blk_key and raw_key when done with them.
327 */
328int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
329			enum blk_crypto_mode_num crypto_mode,
330			unsigned int dun_bytes,
331			unsigned int data_unit_size)
332{
333	const struct blk_crypto_mode *mode;
334
335	memset(blk_key, 0, sizeof(*blk_key));
336
337	if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
338		return -EINVAL;
339
340	mode = &blk_crypto_modes[crypto_mode];
341	if (mode->keysize == 0)
342		return -EINVAL;
343
344	if (dun_bytes == 0 || dun_bytes > mode->ivsize)
345		return -EINVAL;
346
347	if (!is_power_of_2(data_unit_size))
348		return -EINVAL;
349
350	blk_key->crypto_cfg.crypto_mode = crypto_mode;
351	blk_key->crypto_cfg.dun_bytes = dun_bytes;
352	blk_key->crypto_cfg.data_unit_size = data_unit_size;
353	blk_key->data_unit_size_bits = ilog2(data_unit_size);
354	blk_key->size = mode->keysize;
355	memcpy(blk_key->raw, raw_key, mode->keysize);
356
357	return 0;
358}
359
360bool blk_crypto_config_supported_natively(struct block_device *bdev,
361					  const struct blk_crypto_config *cfg)
362{
363	return __blk_crypto_cfg_supported(bdev_get_queue(bdev)->crypto_profile,
364					  cfg);
365}
366
367/*
368 * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the
369 * block_device it's submitted to supports inline crypto, or the
370 * blk-crypto-fallback is enabled and supports the cfg).
371 */
372bool blk_crypto_config_supported(struct block_device *bdev,
373				 const struct blk_crypto_config *cfg)
374{
375	return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
376	       blk_crypto_config_supported_natively(bdev, cfg);
377}
378
379/**
380 * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device
381 * @bdev: block device to operate on
382 * @key: A key to use on the device
383 *
384 * Upper layers must call this function to ensure that either the hardware
385 * supports the key's crypto settings, or the crypto API fallback has transforms
386 * for the needed mode allocated and ready to go. This function may allocate
387 * an skcipher, and *should not* be called from the data path, since that might
388 * cause a deadlock
389 *
390 * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and
391 *	   blk-crypto-fallback is either disabled or the needed algorithm
392 *	   is disabled in the crypto API; or another -errno code.
393 */
394int blk_crypto_start_using_key(struct block_device *bdev,
395			       const struct blk_crypto_key *key)
396{
397	if (blk_crypto_config_supported_natively(bdev, &key->crypto_cfg))
398		return 0;
399	return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
400}
401
402/**
403 * blk_crypto_evict_key() - Evict a blk_crypto_key from a block_device
404 * @bdev: a block_device on which I/O using the key may have been done
405 * @key: the key to evict
406 *
407 * For a given block_device, this function removes the given blk_crypto_key from
408 * the keyslot management structures and evicts it from any underlying hardware
409 * keyslot(s) or blk-crypto-fallback keyslot it may have been programmed into.
410 *
411 * Upper layers must call this before freeing the blk_crypto_key.  It must be
412 * called for every block_device the key may have been used on.  The key must no
413 * longer be in use by any I/O when this function is called.
414 *
415 * Context: May sleep.
416 */
417void blk_crypto_evict_key(struct block_device *bdev,
418			  const struct blk_crypto_key *key)
419{
420	struct request_queue *q = bdev_get_queue(bdev);
421	int err;
422
423	if (blk_crypto_config_supported_natively(bdev, &key->crypto_cfg))
424		err = __blk_crypto_evict_key(q->crypto_profile, key);
425	else
426		err = blk_crypto_fallback_evict_key(key);
427	/*
428	 * An error can only occur here if the key failed to be evicted from a
429	 * keyslot (due to a hardware or driver issue) or is allegedly still in
430	 * use by I/O (due to a kernel bug).  Even in these cases, the key is
431	 * still unlinked from the keyslot management structures, and the caller
432	 * is allowed and expected to free it right away.  There's nothing
433	 * callers can do to handle errors, so just log them and return void.
434	 */
435	if (err)
436		pr_warn_ratelimited("%pg: error %d evicting key\n", bdev, err);
437}
438EXPORT_SYMBOL_GPL(blk_crypto_evict_key);
439