1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Symmetric key cipher operations.
4 *
5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
6 * multiple page boundaries by using temporary blocks.  In user context,
7 * the kernel is given a chance to schedule us once per page.
8 *
9 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
10 */
11
12#include <crypto/internal/aead.h>
13#include <crypto/internal/cipher.h>
14#include <crypto/internal/skcipher.h>
15#include <crypto/scatterwalk.h>
16#include <linux/bug.h>
17#include <linux/cryptouser.h>
18#include <linux/err.h>
19#include <linux/kernel.h>
20#include <linux/list.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/seq_file.h>
24#include <linux/slab.h>
25#include <linux/string.h>
26#include <net/netlink.h>
27#include "skcipher.h"
28
29#define CRYPTO_ALG_TYPE_SKCIPHER_MASK	0x0000000e
30
31enum {
32	SKCIPHER_WALK_PHYS = 1 << 0,
33	SKCIPHER_WALK_SLOW = 1 << 1,
34	SKCIPHER_WALK_COPY = 1 << 2,
35	SKCIPHER_WALK_DIFF = 1 << 3,
36	SKCIPHER_WALK_SLEEP = 1 << 4,
37};
38
39struct skcipher_walk_buffer {
40	struct list_head entry;
41	struct scatter_walk dst;
42	unsigned int len;
43	u8 *data;
44	u8 buffer[];
45};
46
47static const struct crypto_type crypto_skcipher_type;
48
49static int skcipher_walk_next(struct skcipher_walk *walk);
50
51static inline void skcipher_map_src(struct skcipher_walk *walk)
52{
53	walk->src.virt.addr = scatterwalk_map(&walk->in);
54}
55
56static inline void skcipher_map_dst(struct skcipher_walk *walk)
57{
58	walk->dst.virt.addr = scatterwalk_map(&walk->out);
59}
60
61static inline void skcipher_unmap_src(struct skcipher_walk *walk)
62{
63	scatterwalk_unmap(walk->src.virt.addr);
64}
65
66static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
67{
68	scatterwalk_unmap(walk->dst.virt.addr);
69}
70
71static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
72{
73	return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
74}
75
76/* Get a spot of the specified length that does not straddle a page.
77 * The caller needs to ensure that there is enough space for this operation.
78 */
79static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
80{
81	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
82
83	return max(start, end_page);
84}
85
86static inline struct skcipher_alg *__crypto_skcipher_alg(
87	struct crypto_alg *alg)
88{
89	return container_of(alg, struct skcipher_alg, base);
90}
91
92static inline struct crypto_istat_cipher *skcipher_get_stat(
93	struct skcipher_alg *alg)
94{
95	return skcipher_get_stat_common(&alg->co);
96}
97
98static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err)
99{
100	struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
101
102	if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
103		return err;
104
105	if (err && err != -EINPROGRESS && err != -EBUSY)
106		atomic64_inc(&istat->err_cnt);
107
108	return err;
109}
110
111static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
112{
113	u8 *addr;
114
115	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
116	addr = skcipher_get_spot(addr, bsize);
117	scatterwalk_copychunks(addr, &walk->out, bsize,
118			       (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
119	return 0;
120}
121
122int skcipher_walk_done(struct skcipher_walk *walk, int err)
123{
124	unsigned int n = walk->nbytes;
125	unsigned int nbytes = 0;
126
127	if (!n)
128		goto finish;
129
130	if (likely(err >= 0)) {
131		n -= err;
132		nbytes = walk->total - n;
133	}
134
135	if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
136				    SKCIPHER_WALK_SLOW |
137				    SKCIPHER_WALK_COPY |
138				    SKCIPHER_WALK_DIFF)))) {
139unmap_src:
140		skcipher_unmap_src(walk);
141	} else if (walk->flags & SKCIPHER_WALK_DIFF) {
142		skcipher_unmap_dst(walk);
143		goto unmap_src;
144	} else if (walk->flags & SKCIPHER_WALK_COPY) {
145		skcipher_map_dst(walk);
146		memcpy(walk->dst.virt.addr, walk->page, n);
147		skcipher_unmap_dst(walk);
148	} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
149		if (err > 0) {
150			/*
151			 * Didn't process all bytes.  Either the algorithm is
152			 * broken, or this was the last step and it turned out
153			 * the message wasn't evenly divisible into blocks but
154			 * the algorithm requires it.
155			 */
156			err = -EINVAL;
157			nbytes = 0;
158		} else
159			n = skcipher_done_slow(walk, n);
160	}
161
162	if (err > 0)
163		err = 0;
164
165	walk->total = nbytes;
166	walk->nbytes = 0;
167
168	scatterwalk_advance(&walk->in, n);
169	scatterwalk_advance(&walk->out, n);
170	scatterwalk_done(&walk->in, 0, nbytes);
171	scatterwalk_done(&walk->out, 1, nbytes);
172
173	if (nbytes) {
174		crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
175			     CRYPTO_TFM_REQ_MAY_SLEEP : 0);
176		return skcipher_walk_next(walk);
177	}
178
179finish:
180	/* Short-circuit for the common/fast path. */
181	if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
182		goto out;
183
184	if (walk->flags & SKCIPHER_WALK_PHYS)
185		goto out;
186
187	if (walk->iv != walk->oiv)
188		memcpy(walk->oiv, walk->iv, walk->ivsize);
189	if (walk->buffer != walk->page)
190		kfree(walk->buffer);
191	if (walk->page)
192		free_page((unsigned long)walk->page);
193
194out:
195	return err;
196}
197EXPORT_SYMBOL_GPL(skcipher_walk_done);
198
199void skcipher_walk_complete(struct skcipher_walk *walk, int err)
200{
201	struct skcipher_walk_buffer *p, *tmp;
202
203	list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
204		u8 *data;
205
206		if (err)
207			goto done;
208
209		data = p->data;
210		if (!data) {
211			data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
212			data = skcipher_get_spot(data, walk->stride);
213		}
214
215		scatterwalk_copychunks(data, &p->dst, p->len, 1);
216
217		if (offset_in_page(p->data) + p->len + walk->stride >
218		    PAGE_SIZE)
219			free_page((unsigned long)p->data);
220
221done:
222		list_del(&p->entry);
223		kfree(p);
224	}
225
226	if (!err && walk->iv != walk->oiv)
227		memcpy(walk->oiv, walk->iv, walk->ivsize);
228	if (walk->buffer != walk->page)
229		kfree(walk->buffer);
230	if (walk->page)
231		free_page((unsigned long)walk->page);
232}
233EXPORT_SYMBOL_GPL(skcipher_walk_complete);
234
235static void skcipher_queue_write(struct skcipher_walk *walk,
236				 struct skcipher_walk_buffer *p)
237{
238	p->dst = walk->out;
239	list_add_tail(&p->entry, &walk->buffers);
240}
241
242static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
243{
244	bool phys = walk->flags & SKCIPHER_WALK_PHYS;
245	unsigned alignmask = walk->alignmask;
246	struct skcipher_walk_buffer *p;
247	unsigned a;
248	unsigned n;
249	u8 *buffer;
250	void *v;
251
252	if (!phys) {
253		if (!walk->buffer)
254			walk->buffer = walk->page;
255		buffer = walk->buffer;
256		if (buffer)
257			goto ok;
258	}
259
260	/* Start with the minimum alignment of kmalloc. */
261	a = crypto_tfm_ctx_alignment() - 1;
262	n = bsize;
263
264	if (phys) {
265		/* Calculate the minimum alignment of p->buffer. */
266		a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
267		n += sizeof(*p);
268	}
269
270	/* Minimum size to align p->buffer by alignmask. */
271	n += alignmask & ~a;
272
273	/* Minimum size to ensure p->buffer does not straddle a page. */
274	n += (bsize - 1) & ~(alignmask | a);
275
276	v = kzalloc(n, skcipher_walk_gfp(walk));
277	if (!v)
278		return skcipher_walk_done(walk, -ENOMEM);
279
280	if (phys) {
281		p = v;
282		p->len = bsize;
283		skcipher_queue_write(walk, p);
284		buffer = p->buffer;
285	} else {
286		walk->buffer = v;
287		buffer = v;
288	}
289
290ok:
291	walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
292	walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
293	walk->src.virt.addr = walk->dst.virt.addr;
294
295	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
296
297	walk->nbytes = bsize;
298	walk->flags |= SKCIPHER_WALK_SLOW;
299
300	return 0;
301}
302
303static int skcipher_next_copy(struct skcipher_walk *walk)
304{
305	struct skcipher_walk_buffer *p;
306	u8 *tmp = walk->page;
307
308	skcipher_map_src(walk);
309	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
310	skcipher_unmap_src(walk);
311
312	walk->src.virt.addr = tmp;
313	walk->dst.virt.addr = tmp;
314
315	if (!(walk->flags & SKCIPHER_WALK_PHYS))
316		return 0;
317
318	p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
319	if (!p)
320		return -ENOMEM;
321
322	p->data = walk->page;
323	p->len = walk->nbytes;
324	skcipher_queue_write(walk, p);
325
326	if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
327	    PAGE_SIZE)
328		walk->page = NULL;
329	else
330		walk->page += walk->nbytes;
331
332	return 0;
333}
334
335static int skcipher_next_fast(struct skcipher_walk *walk)
336{
337	unsigned long diff;
338
339	walk->src.phys.page = scatterwalk_page(&walk->in);
340	walk->src.phys.offset = offset_in_page(walk->in.offset);
341	walk->dst.phys.page = scatterwalk_page(&walk->out);
342	walk->dst.phys.offset = offset_in_page(walk->out.offset);
343
344	if (walk->flags & SKCIPHER_WALK_PHYS)
345		return 0;
346
347	diff = walk->src.phys.offset - walk->dst.phys.offset;
348	diff |= walk->src.virt.page - walk->dst.virt.page;
349
350	skcipher_map_src(walk);
351	walk->dst.virt.addr = walk->src.virt.addr;
352
353	if (diff) {
354		walk->flags |= SKCIPHER_WALK_DIFF;
355		skcipher_map_dst(walk);
356	}
357
358	return 0;
359}
360
361static int skcipher_walk_next(struct skcipher_walk *walk)
362{
363	unsigned int bsize;
364	unsigned int n;
365	int err;
366
367	walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
368			 SKCIPHER_WALK_DIFF);
369
370	n = walk->total;
371	bsize = min(walk->stride, max(n, walk->blocksize));
372	n = scatterwalk_clamp(&walk->in, n);
373	n = scatterwalk_clamp(&walk->out, n);
374
375	if (unlikely(n < bsize)) {
376		if (unlikely(walk->total < walk->blocksize))
377			return skcipher_walk_done(walk, -EINVAL);
378
379slow_path:
380		err = skcipher_next_slow(walk, bsize);
381		goto set_phys_lowmem;
382	}
383
384	if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
385		if (!walk->page) {
386			gfp_t gfp = skcipher_walk_gfp(walk);
387
388			walk->page = (void *)__get_free_page(gfp);
389			if (!walk->page)
390				goto slow_path;
391		}
392
393		walk->nbytes = min_t(unsigned, n,
394				     PAGE_SIZE - offset_in_page(walk->page));
395		walk->flags |= SKCIPHER_WALK_COPY;
396		err = skcipher_next_copy(walk);
397		goto set_phys_lowmem;
398	}
399
400	walk->nbytes = n;
401
402	return skcipher_next_fast(walk);
403
404set_phys_lowmem:
405	if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
406		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
407		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
408		walk->src.phys.offset &= PAGE_SIZE - 1;
409		walk->dst.phys.offset &= PAGE_SIZE - 1;
410	}
411	return err;
412}
413
414static int skcipher_copy_iv(struct skcipher_walk *walk)
415{
416	unsigned a = crypto_tfm_ctx_alignment() - 1;
417	unsigned alignmask = walk->alignmask;
418	unsigned ivsize = walk->ivsize;
419	unsigned bs = walk->stride;
420	unsigned aligned_bs;
421	unsigned size;
422	u8 *iv;
423
424	aligned_bs = ALIGN(bs, alignmask + 1);
425
426	/* Minimum size to align buffer by alignmask. */
427	size = alignmask & ~a;
428
429	if (walk->flags & SKCIPHER_WALK_PHYS)
430		size += ivsize;
431	else {
432		size += aligned_bs + ivsize;
433
434		/* Minimum size to ensure buffer does not straddle a page. */
435		size += (bs - 1) & ~(alignmask | a);
436	}
437
438	walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
439	if (!walk->buffer)
440		return -ENOMEM;
441
442	iv = PTR_ALIGN(walk->buffer, alignmask + 1);
443	iv = skcipher_get_spot(iv, bs) + aligned_bs;
444
445	walk->iv = memcpy(iv, walk->iv, walk->ivsize);
446	return 0;
447}
448
449static int skcipher_walk_first(struct skcipher_walk *walk)
450{
451	if (WARN_ON_ONCE(in_hardirq()))
452		return -EDEADLK;
453
454	walk->buffer = NULL;
455	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
456		int err = skcipher_copy_iv(walk);
457		if (err)
458			return err;
459	}
460
461	walk->page = NULL;
462
463	return skcipher_walk_next(walk);
464}
465
466static int skcipher_walk_skcipher(struct skcipher_walk *walk,
467				  struct skcipher_request *req)
468{
469	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
470	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
471
472	walk->total = req->cryptlen;
473	walk->nbytes = 0;
474	walk->iv = req->iv;
475	walk->oiv = req->iv;
476
477	if (unlikely(!walk->total))
478		return 0;
479
480	scatterwalk_start(&walk->in, req->src);
481	scatterwalk_start(&walk->out, req->dst);
482
483	walk->flags &= ~SKCIPHER_WALK_SLEEP;
484	walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
485		       SKCIPHER_WALK_SLEEP : 0;
486
487	walk->blocksize = crypto_skcipher_blocksize(tfm);
488	walk->ivsize = crypto_skcipher_ivsize(tfm);
489	walk->alignmask = crypto_skcipher_alignmask(tfm);
490
491	if (alg->co.base.cra_type != &crypto_skcipher_type)
492		walk->stride = alg->co.chunksize;
493	else
494		walk->stride = alg->walksize;
495
496	return skcipher_walk_first(walk);
497}
498
499int skcipher_walk_virt(struct skcipher_walk *walk,
500		       struct skcipher_request *req, bool atomic)
501{
502	int err;
503
504	might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
505
506	walk->flags &= ~SKCIPHER_WALK_PHYS;
507
508	err = skcipher_walk_skcipher(walk, req);
509
510	walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
511
512	return err;
513}
514EXPORT_SYMBOL_GPL(skcipher_walk_virt);
515
516int skcipher_walk_async(struct skcipher_walk *walk,
517			struct skcipher_request *req)
518{
519	walk->flags |= SKCIPHER_WALK_PHYS;
520
521	INIT_LIST_HEAD(&walk->buffers);
522
523	return skcipher_walk_skcipher(walk, req);
524}
525EXPORT_SYMBOL_GPL(skcipher_walk_async);
526
527static int skcipher_walk_aead_common(struct skcipher_walk *walk,
528				     struct aead_request *req, bool atomic)
529{
530	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
531	int err;
532
533	walk->nbytes = 0;
534	walk->iv = req->iv;
535	walk->oiv = req->iv;
536
537	if (unlikely(!walk->total))
538		return 0;
539
540	walk->flags &= ~SKCIPHER_WALK_PHYS;
541
542	scatterwalk_start(&walk->in, req->src);
543	scatterwalk_start(&walk->out, req->dst);
544
545	scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
546	scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
547
548	scatterwalk_done(&walk->in, 0, walk->total);
549	scatterwalk_done(&walk->out, 0, walk->total);
550
551	if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
552		walk->flags |= SKCIPHER_WALK_SLEEP;
553	else
554		walk->flags &= ~SKCIPHER_WALK_SLEEP;
555
556	walk->blocksize = crypto_aead_blocksize(tfm);
557	walk->stride = crypto_aead_chunksize(tfm);
558	walk->ivsize = crypto_aead_ivsize(tfm);
559	walk->alignmask = crypto_aead_alignmask(tfm);
560
561	err = skcipher_walk_first(walk);
562
563	if (atomic)
564		walk->flags &= ~SKCIPHER_WALK_SLEEP;
565
566	return err;
567}
568
569int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
570			       struct aead_request *req, bool atomic)
571{
572	walk->total = req->cryptlen;
573
574	return skcipher_walk_aead_common(walk, req, atomic);
575}
576EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
577
578int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
579			       struct aead_request *req, bool atomic)
580{
581	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
582
583	walk->total = req->cryptlen - crypto_aead_authsize(tfm);
584
585	return skcipher_walk_aead_common(walk, req, atomic);
586}
587EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
588
589static void skcipher_set_needkey(struct crypto_skcipher *tfm)
590{
591	if (crypto_skcipher_max_keysize(tfm) != 0)
592		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
593}
594
595static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
596				     const u8 *key, unsigned int keylen)
597{
598	unsigned long alignmask = crypto_skcipher_alignmask(tfm);
599	struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
600	u8 *buffer, *alignbuffer;
601	unsigned long absize;
602	int ret;
603
604	absize = keylen + alignmask;
605	buffer = kmalloc(absize, GFP_ATOMIC);
606	if (!buffer)
607		return -ENOMEM;
608
609	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
610	memcpy(alignbuffer, key, keylen);
611	ret = cipher->setkey(tfm, alignbuffer, keylen);
612	kfree_sensitive(buffer);
613	return ret;
614}
615
616int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
617			   unsigned int keylen)
618{
619	struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
620	unsigned long alignmask = crypto_skcipher_alignmask(tfm);
621	int err;
622
623	if (cipher->co.base.cra_type != &crypto_skcipher_type) {
624		struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm);
625
626		crypto_lskcipher_clear_flags(*ctx, CRYPTO_TFM_REQ_MASK);
627		crypto_lskcipher_set_flags(*ctx,
628					   crypto_skcipher_get_flags(tfm) &
629					   CRYPTO_TFM_REQ_MASK);
630		err = crypto_lskcipher_setkey(*ctx, key, keylen);
631		goto out;
632	}
633
634	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize)
635		return -EINVAL;
636
637	if ((unsigned long)key & alignmask)
638		err = skcipher_setkey_unaligned(tfm, key, keylen);
639	else
640		err = cipher->setkey(tfm, key, keylen);
641
642out:
643	if (unlikely(err)) {
644		skcipher_set_needkey(tfm);
645		return err;
646	}
647
648	crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
649	return 0;
650}
651EXPORT_SYMBOL_GPL(crypto_skcipher_setkey);
652
653int crypto_skcipher_encrypt(struct skcipher_request *req)
654{
655	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
656	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
657	int ret;
658
659	if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
660		struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
661
662		atomic64_inc(&istat->encrypt_cnt);
663		atomic64_add(req->cryptlen, &istat->encrypt_tlen);
664	}
665
666	if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
667		ret = -ENOKEY;
668	else if (alg->co.base.cra_type != &crypto_skcipher_type)
669		ret = crypto_lskcipher_encrypt_sg(req);
670	else
671		ret = alg->encrypt(req);
672
673	return crypto_skcipher_errstat(alg, ret);
674}
675EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
676
677int crypto_skcipher_decrypt(struct skcipher_request *req)
678{
679	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
680	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
681	int ret;
682
683	if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
684		struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
685
686		atomic64_inc(&istat->decrypt_cnt);
687		atomic64_add(req->cryptlen, &istat->decrypt_tlen);
688	}
689
690	if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
691		ret = -ENOKEY;
692	else if (alg->co.base.cra_type != &crypto_skcipher_type)
693		ret = crypto_lskcipher_decrypt_sg(req);
694	else
695		ret = alg->decrypt(req);
696
697	return crypto_skcipher_errstat(alg, ret);
698}
699EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
700
701static int crypto_lskcipher_export(struct skcipher_request *req, void *out)
702{
703	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
704	u8 *ivs = skcipher_request_ctx(req);
705
706	ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
707
708	memcpy(out, ivs + crypto_skcipher_ivsize(tfm),
709	       crypto_skcipher_statesize(tfm));
710
711	return 0;
712}
713
714static int crypto_lskcipher_import(struct skcipher_request *req, const void *in)
715{
716	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
717	u8 *ivs = skcipher_request_ctx(req);
718
719	ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
720
721	memcpy(ivs + crypto_skcipher_ivsize(tfm), in,
722	       crypto_skcipher_statesize(tfm));
723
724	return 0;
725}
726
727static int skcipher_noexport(struct skcipher_request *req, void *out)
728{
729	return 0;
730}
731
732static int skcipher_noimport(struct skcipher_request *req, const void *in)
733{
734	return 0;
735}
736
737int crypto_skcipher_export(struct skcipher_request *req, void *out)
738{
739	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
740	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
741
742	if (alg->co.base.cra_type != &crypto_skcipher_type)
743		return crypto_lskcipher_export(req, out);
744	return alg->export(req, out);
745}
746EXPORT_SYMBOL_GPL(crypto_skcipher_export);
747
748int crypto_skcipher_import(struct skcipher_request *req, const void *in)
749{
750	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
751	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
752
753	if (alg->co.base.cra_type != &crypto_skcipher_type)
754		return crypto_lskcipher_import(req, in);
755	return alg->import(req, in);
756}
757EXPORT_SYMBOL_GPL(crypto_skcipher_import);
758
759static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
760{
761	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
762	struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
763
764	alg->exit(skcipher);
765}
766
767static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
768{
769	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
770	struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
771
772	skcipher_set_needkey(skcipher);
773
774	if (tfm->__crt_alg->cra_type != &crypto_skcipher_type) {
775		unsigned am = crypto_skcipher_alignmask(skcipher);
776		unsigned reqsize;
777
778		reqsize = am & ~(crypto_tfm_ctx_alignment() - 1);
779		reqsize += crypto_skcipher_ivsize(skcipher);
780		reqsize += crypto_skcipher_statesize(skcipher);
781		crypto_skcipher_set_reqsize(skcipher, reqsize);
782
783		return crypto_init_lskcipher_ops_sg(tfm);
784	}
785
786	if (alg->exit)
787		skcipher->base.exit = crypto_skcipher_exit_tfm;
788
789	if (alg->init)
790		return alg->init(skcipher);
791
792	return 0;
793}
794
795static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
796{
797	if (alg->cra_type != &crypto_skcipher_type)
798		return sizeof(struct crypto_lskcipher *);
799
800	return crypto_alg_extsize(alg);
801}
802
803static void crypto_skcipher_free_instance(struct crypto_instance *inst)
804{
805	struct skcipher_instance *skcipher =
806		container_of(inst, struct skcipher_instance, s.base);
807
808	skcipher->free(skcipher);
809}
810
811static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
812	__maybe_unused;
813static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
814{
815	struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
816
817	seq_printf(m, "type         : skcipher\n");
818	seq_printf(m, "async        : %s\n",
819		   alg->cra_flags & CRYPTO_ALG_ASYNC ?  "yes" : "no");
820	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
821	seq_printf(m, "min keysize  : %u\n", skcipher->min_keysize);
822	seq_printf(m, "max keysize  : %u\n", skcipher->max_keysize);
823	seq_printf(m, "ivsize       : %u\n", skcipher->ivsize);
824	seq_printf(m, "chunksize    : %u\n", skcipher->chunksize);
825	seq_printf(m, "walksize     : %u\n", skcipher->walksize);
826	seq_printf(m, "statesize    : %u\n", skcipher->statesize);
827}
828
829static int __maybe_unused crypto_skcipher_report(
830	struct sk_buff *skb, struct crypto_alg *alg)
831{
832	struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
833	struct crypto_report_blkcipher rblkcipher;
834
835	memset(&rblkcipher, 0, sizeof(rblkcipher));
836
837	strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
838	strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
839
840	rblkcipher.blocksize = alg->cra_blocksize;
841	rblkcipher.min_keysize = skcipher->min_keysize;
842	rblkcipher.max_keysize = skcipher->max_keysize;
843	rblkcipher.ivsize = skcipher->ivsize;
844
845	return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
846		       sizeof(rblkcipher), &rblkcipher);
847}
848
849static int __maybe_unused crypto_skcipher_report_stat(
850	struct sk_buff *skb, struct crypto_alg *alg)
851{
852	struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
853	struct crypto_istat_cipher *istat;
854	struct crypto_stat_cipher rcipher;
855
856	istat = skcipher_get_stat(skcipher);
857
858	memset(&rcipher, 0, sizeof(rcipher));
859
860	strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
861
862	rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
863	rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
864	rcipher.stat_decrypt_cnt =  atomic64_read(&istat->decrypt_cnt);
865	rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
866	rcipher.stat_err_cnt =  atomic64_read(&istat->err_cnt);
867
868	return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
869}
870
871static const struct crypto_type crypto_skcipher_type = {
872	.extsize = crypto_skcipher_extsize,
873	.init_tfm = crypto_skcipher_init_tfm,
874	.free = crypto_skcipher_free_instance,
875#ifdef CONFIG_PROC_FS
876	.show = crypto_skcipher_show,
877#endif
878#if IS_ENABLED(CONFIG_CRYPTO_USER)
879	.report = crypto_skcipher_report,
880#endif
881#ifdef CONFIG_CRYPTO_STATS
882	.report_stat = crypto_skcipher_report_stat,
883#endif
884	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
885	.maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK,
886	.type = CRYPTO_ALG_TYPE_SKCIPHER,
887	.tfmsize = offsetof(struct crypto_skcipher, base),
888};
889
890int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
891			 struct crypto_instance *inst,
892			 const char *name, u32 type, u32 mask)
893{
894	spawn->base.frontend = &crypto_skcipher_type;
895	return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
896}
897EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
898
899struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
900					      u32 type, u32 mask)
901{
902	return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
903}
904EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
905
906struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
907				const char *alg_name, u32 type, u32 mask)
908{
909	struct crypto_skcipher *tfm;
910
911	/* Only sync algorithms allowed. */
912	mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE;
913
914	tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
915
916	/*
917	 * Make sure we do not allocate something that might get used with
918	 * an on-stack request: check the request size.
919	 */
920	if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
921				    MAX_SYNC_SKCIPHER_REQSIZE)) {
922		crypto_free_skcipher(tfm);
923		return ERR_PTR(-EINVAL);
924	}
925
926	return (struct crypto_sync_skcipher *)tfm;
927}
928EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
929
930int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask)
931{
932	return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask);
933}
934EXPORT_SYMBOL_GPL(crypto_has_skcipher);
935
936int skcipher_prepare_alg_common(struct skcipher_alg_common *alg)
937{
938	struct crypto_istat_cipher *istat = skcipher_get_stat_common(alg);
939	struct crypto_alg *base = &alg->base;
940
941	if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
942	    alg->statesize > PAGE_SIZE / 2 ||
943	    (alg->ivsize + alg->statesize) > PAGE_SIZE / 2)
944		return -EINVAL;
945
946	if (!alg->chunksize)
947		alg->chunksize = base->cra_blocksize;
948
949	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
950
951	if (IS_ENABLED(CONFIG_CRYPTO_STATS))
952		memset(istat, 0, sizeof(*istat));
953
954	return 0;
955}
956
957static int skcipher_prepare_alg(struct skcipher_alg *alg)
958{
959	struct crypto_alg *base = &alg->base;
960	int err;
961
962	err = skcipher_prepare_alg_common(&alg->co);
963	if (err)
964		return err;
965
966	if (alg->walksize > PAGE_SIZE / 8)
967		return -EINVAL;
968
969	if (!alg->walksize)
970		alg->walksize = alg->chunksize;
971
972	if (!alg->statesize) {
973		alg->import = skcipher_noimport;
974		alg->export = skcipher_noexport;
975	} else if (!(alg->import && alg->export))
976		return -EINVAL;
977
978	base->cra_type = &crypto_skcipher_type;
979	base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
980
981	return 0;
982}
983
984int crypto_register_skcipher(struct skcipher_alg *alg)
985{
986	struct crypto_alg *base = &alg->base;
987	int err;
988
989	err = skcipher_prepare_alg(alg);
990	if (err)
991		return err;
992
993	return crypto_register_alg(base);
994}
995EXPORT_SYMBOL_GPL(crypto_register_skcipher);
996
997void crypto_unregister_skcipher(struct skcipher_alg *alg)
998{
999	crypto_unregister_alg(&alg->base);
1000}
1001EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
1002
1003int crypto_register_skciphers(struct skcipher_alg *algs, int count)
1004{
1005	int i, ret;
1006
1007	for (i = 0; i < count; i++) {
1008		ret = crypto_register_skcipher(&algs[i]);
1009		if (ret)
1010			goto err;
1011	}
1012
1013	return 0;
1014
1015err:
1016	for (--i; i >= 0; --i)
1017		crypto_unregister_skcipher(&algs[i]);
1018
1019	return ret;
1020}
1021EXPORT_SYMBOL_GPL(crypto_register_skciphers);
1022
1023void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
1024{
1025	int i;
1026
1027	for (i = count - 1; i >= 0; --i)
1028		crypto_unregister_skcipher(&algs[i]);
1029}
1030EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
1031
1032int skcipher_register_instance(struct crypto_template *tmpl,
1033			   struct skcipher_instance *inst)
1034{
1035	int err;
1036
1037	if (WARN_ON(!inst->free))
1038		return -EINVAL;
1039
1040	err = skcipher_prepare_alg(&inst->alg);
1041	if (err)
1042		return err;
1043
1044	return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
1045}
1046EXPORT_SYMBOL_GPL(skcipher_register_instance);
1047
1048static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
1049				  unsigned int keylen)
1050{
1051	struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
1052
1053	crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
1054	crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
1055				CRYPTO_TFM_REQ_MASK);
1056	return crypto_cipher_setkey(cipher, key, keylen);
1057}
1058
1059static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
1060{
1061	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
1062	struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
1063	struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
1064	struct crypto_cipher *cipher;
1065
1066	cipher = crypto_spawn_cipher(spawn);
1067	if (IS_ERR(cipher))
1068		return PTR_ERR(cipher);
1069
1070	ctx->cipher = cipher;
1071	return 0;
1072}
1073
1074static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
1075{
1076	struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
1077
1078	crypto_free_cipher(ctx->cipher);
1079}
1080
1081static void skcipher_free_instance_simple(struct skcipher_instance *inst)
1082{
1083	crypto_drop_cipher(skcipher_instance_ctx(inst));
1084	kfree(inst);
1085}
1086
1087/**
1088 * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
1089 *
1090 * Allocate an skcipher_instance for a simple block cipher mode of operation,
1091 * e.g. cbc or ecb.  The instance context will have just a single crypto_spawn,
1092 * that for the underlying cipher.  The {min,max}_keysize, ivsize, blocksize,
1093 * alignmask, and priority are set from the underlying cipher but can be
1094 * overridden if needed.  The tfm context defaults to skcipher_ctx_simple, and
1095 * default ->setkey(), ->init(), and ->exit() methods are installed.
1096 *
1097 * @tmpl: the template being instantiated
1098 * @tb: the template parameters
1099 *
1100 * Return: a pointer to the new instance, or an ERR_PTR().  The caller still
1101 *	   needs to register the instance.
1102 */
1103struct skcipher_instance *skcipher_alloc_instance_simple(
1104	struct crypto_template *tmpl, struct rtattr **tb)
1105{
1106	u32 mask;
1107	struct skcipher_instance *inst;
1108	struct crypto_cipher_spawn *spawn;
1109	struct crypto_alg *cipher_alg;
1110	int err;
1111
1112	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
1113	if (err)
1114		return ERR_PTR(err);
1115
1116	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
1117	if (!inst)
1118		return ERR_PTR(-ENOMEM);
1119	spawn = skcipher_instance_ctx(inst);
1120
1121	err = crypto_grab_cipher(spawn, skcipher_crypto_instance(inst),
1122				 crypto_attr_alg_name(tb[1]), 0, mask);
1123	if (err)
1124		goto err_free_inst;
1125	cipher_alg = crypto_spawn_cipher_alg(spawn);
1126
1127	err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
1128				  cipher_alg);
1129	if (err)
1130		goto err_free_inst;
1131
1132	inst->free = skcipher_free_instance_simple;
1133
1134	/* Default algorithm properties, can be overridden */
1135	inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
1136	inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
1137	inst->alg.base.cra_priority = cipher_alg->cra_priority;
1138	inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
1139	inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
1140	inst->alg.ivsize = cipher_alg->cra_blocksize;
1141
1142	/* Use skcipher_ctx_simple by default, can be overridden */
1143	inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
1144	inst->alg.setkey = skcipher_setkey_simple;
1145	inst->alg.init = skcipher_init_tfm_simple;
1146	inst->alg.exit = skcipher_exit_tfm_simple;
1147
1148	return inst;
1149
1150err_free_inst:
1151	skcipher_free_instance_simple(inst);
1152	return ERR_PTR(err);
1153}
1154EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
1155
1156MODULE_LICENSE("GPL");
1157MODULE_DESCRIPTION("Symmetric key cipher type");
1158MODULE_IMPORT_NS(CRYPTO_INTERNAL);
1159