1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * XCTR: XOR Counter mode - Adapted from ctr.c
4 *
5 * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
6 * Copyright 2021 Google LLC
7 */
8
9/*
10 * XCTR mode is a blockcipher mode of operation used to implement HCTR2. XCTR is
11 * closely related to the CTR mode of operation; the main difference is that CTR
12 * generates the keystream using E(CTR + IV) whereas XCTR generates the
13 * keystream using E(CTR ^ IV). This allows implementations to avoid dealing
14 * with multi-limb integers (as is required in CTR mode). XCTR is also specified
15 * using little-endian arithmetic which makes it slightly faster on LE machines.
16 *
17 * See the HCTR2 paper for more details:
18 *	Length-preserving encryption with HCTR2
19 *      (https://eprint.iacr.org/2021/1441.pdf)
20 */
21
22#include <crypto/algapi.h>
23#include <crypto/internal/cipher.h>
24#include <crypto/internal/skcipher.h>
25#include <linux/err.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/slab.h>
30
31/* For now this implementation is limited to 16-byte blocks for simplicity */
32#define XCTR_BLOCKSIZE 16
33
34static void crypto_xctr_crypt_final(struct skcipher_walk *walk,
35				   struct crypto_cipher *tfm, u32 byte_ctr)
36{
37	u8 keystream[XCTR_BLOCKSIZE];
38	const u8 *src = walk->src.virt.addr;
39	u8 *dst = walk->dst.virt.addr;
40	unsigned int nbytes = walk->nbytes;
41	__le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1);
42
43	crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32));
44	crypto_cipher_encrypt_one(tfm, keystream, walk->iv);
45	crypto_xor_cpy(dst, keystream, src, nbytes);
46	crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32));
47}
48
49static int crypto_xctr_crypt_segment(struct skcipher_walk *walk,
50				    struct crypto_cipher *tfm, u32 byte_ctr)
51{
52	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
53		   crypto_cipher_alg(tfm)->cia_encrypt;
54	const u8 *src = walk->src.virt.addr;
55	u8 *dst = walk->dst.virt.addr;
56	unsigned int nbytes = walk->nbytes;
57	__le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1);
58
59	do {
60		crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32));
61		fn(crypto_cipher_tfm(tfm), dst, walk->iv);
62		crypto_xor(dst, src, XCTR_BLOCKSIZE);
63		crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32));
64
65		le32_add_cpu(&ctr32, 1);
66
67		src += XCTR_BLOCKSIZE;
68		dst += XCTR_BLOCKSIZE;
69	} while ((nbytes -= XCTR_BLOCKSIZE) >= XCTR_BLOCKSIZE);
70
71	return nbytes;
72}
73
74static int crypto_xctr_crypt_inplace(struct skcipher_walk *walk,
75				    struct crypto_cipher *tfm, u32 byte_ctr)
76{
77	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
78		   crypto_cipher_alg(tfm)->cia_encrypt;
79	unsigned long alignmask = crypto_cipher_alignmask(tfm);
80	unsigned int nbytes = walk->nbytes;
81	u8 *data = walk->src.virt.addr;
82	u8 tmp[XCTR_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
83	u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
84	__le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1);
85
86	do {
87		crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32));
88		fn(crypto_cipher_tfm(tfm), keystream, walk->iv);
89		crypto_xor(data, keystream, XCTR_BLOCKSIZE);
90		crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32));
91
92		le32_add_cpu(&ctr32, 1);
93
94		data += XCTR_BLOCKSIZE;
95	} while ((nbytes -= XCTR_BLOCKSIZE) >= XCTR_BLOCKSIZE);
96
97	return nbytes;
98}
99
100static int crypto_xctr_crypt(struct skcipher_request *req)
101{
102	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
103	struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
104	struct skcipher_walk walk;
105	unsigned int nbytes;
106	int err;
107	u32 byte_ctr = 0;
108
109	err = skcipher_walk_virt(&walk, req, false);
110
111	while (walk.nbytes >= XCTR_BLOCKSIZE) {
112		if (walk.src.virt.addr == walk.dst.virt.addr)
113			nbytes = crypto_xctr_crypt_inplace(&walk, cipher,
114							   byte_ctr);
115		else
116			nbytes = crypto_xctr_crypt_segment(&walk, cipher,
117							   byte_ctr);
118
119		byte_ctr += walk.nbytes - nbytes;
120		err = skcipher_walk_done(&walk, nbytes);
121	}
122
123	if (walk.nbytes) {
124		crypto_xctr_crypt_final(&walk, cipher, byte_ctr);
125		err = skcipher_walk_done(&walk, 0);
126	}
127
128	return err;
129}
130
131static int crypto_xctr_create(struct crypto_template *tmpl, struct rtattr **tb)
132{
133	struct skcipher_instance *inst;
134	struct crypto_alg *alg;
135	int err;
136
137	inst = skcipher_alloc_instance_simple(tmpl, tb);
138	if (IS_ERR(inst))
139		return PTR_ERR(inst);
140
141	alg = skcipher_ialg_simple(inst);
142
143	/* Block size must be 16 bytes. */
144	err = -EINVAL;
145	if (alg->cra_blocksize != XCTR_BLOCKSIZE)
146		goto out_free_inst;
147
148	/* XCTR mode is a stream cipher. */
149	inst->alg.base.cra_blocksize = 1;
150
151	/*
152	 * To simplify the implementation, configure the skcipher walk to only
153	 * give a partial block at the very end, never earlier.
154	 */
155	inst->alg.chunksize = alg->cra_blocksize;
156
157	inst->alg.encrypt = crypto_xctr_crypt;
158	inst->alg.decrypt = crypto_xctr_crypt;
159
160	err = skcipher_register_instance(tmpl, inst);
161	if (err) {
162out_free_inst:
163		inst->free(inst);
164	}
165
166	return err;
167}
168
169static struct crypto_template crypto_xctr_tmpl = {
170	.name = "xctr",
171	.create = crypto_xctr_create,
172	.module = THIS_MODULE,
173};
174
175static int __init crypto_xctr_module_init(void)
176{
177	return crypto_register_template(&crypto_xctr_tmpl);
178}
179
180static void __exit crypto_xctr_module_exit(void)
181{
182	crypto_unregister_template(&crypto_xctr_tmpl);
183}
184
185subsys_initcall(crypto_xctr_module_init);
186module_exit(crypto_xctr_module_exit);
187
188MODULE_LICENSE("GPL");
189MODULE_DESCRIPTION("XCTR block cipher mode of operation");
190MODULE_ALIAS_CRYPTO("xctr");
191MODULE_IMPORT_NS(CRYPTO_INTERNAL);
192