1// SPDX-License-Identifier: GPL-2.0
2/* Marvell OcteonTX CPT driver
3 *
4 * Copyright (C) 2019 Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <crypto/aes.h>
12#include <crypto/authenc.h>
13#include <crypto/cryptd.h>
14#include <crypto/des.h>
15#include <crypto/internal/aead.h>
16#include <crypto/sha1.h>
17#include <crypto/sha2.h>
18#include <crypto/xts.h>
19#include <crypto/scatterwalk.h>
20#include <linux/rtnetlink.h>
21#include <linux/sort.h>
22#include <linux/module.h>
23#include "otx_cptvf.h"
24#include "otx_cptvf_algs.h"
25#include "otx_cptvf_reqmgr.h"
26
27#define CPT_MAX_VF_NUM	64
28/* Size of salt in AES GCM mode */
29#define AES_GCM_SALT_SIZE	4
30/* Size of IV in AES GCM mode */
31#define AES_GCM_IV_SIZE		8
32/* Size of ICV (Integrity Check Value) in AES GCM mode */
33#define AES_GCM_ICV_SIZE	16
34/* Offset of IV in AES GCM mode */
35#define AES_GCM_IV_OFFSET	8
36#define CONTROL_WORD_LEN	8
37#define KEY2_OFFSET		48
38#define DMA_MODE_FLAG(dma_mode) \
39	(((dma_mode) == OTX_CPT_DMA_GATHER_SCATTER) ? (1 << 7) : 0)
40
41/* Truncated SHA digest size */
42#define SHA1_TRUNC_DIGEST_SIZE		12
43#define SHA256_TRUNC_DIGEST_SIZE	16
44#define SHA384_TRUNC_DIGEST_SIZE	24
45#define SHA512_TRUNC_DIGEST_SIZE	32
46
47static DEFINE_MUTEX(mutex);
48static int is_crypto_registered;
49
50struct cpt_device_desc {
51	enum otx_cptpf_type pf_type;
52	struct pci_dev *dev;
53	int num_queues;
54};
55
56struct cpt_device_table {
57	atomic_t count;
58	struct cpt_device_desc desc[CPT_MAX_VF_NUM];
59};
60
61static struct cpt_device_table se_devices = {
62	.count = ATOMIC_INIT(0)
63};
64
65static struct cpt_device_table ae_devices = {
66	.count = ATOMIC_INIT(0)
67};
68
69static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
70{
71	int count, ret = 0;
72
73	count = atomic_read(&se_devices.count);
74	if (count < 1)
75		return -ENODEV;
76
77	*cpu_num = get_cpu();
78
79	if (se_devices.desc[0].pf_type == OTX_CPT_SE) {
80		/*
81		 * On OcteonTX platform there is one CPT instruction queue bound
82		 * to each VF. We get maximum performance if one CPT queue
83		 * is available for each cpu otherwise CPT queues need to be
84		 * shared between cpus.
85		 */
86		if (*cpu_num >= count)
87			*cpu_num %= count;
88		*pdev = se_devices.desc[*cpu_num].dev;
89	} else {
90		pr_err("Unknown PF type %d\n", se_devices.desc[0].pf_type);
91		ret = -EINVAL;
92	}
93	put_cpu();
94
95	return ret;
96}
97
98static inline int validate_hmac_cipher_null(struct otx_cpt_req_info *cpt_req)
99{
100	struct otx_cpt_req_ctx *rctx;
101	struct aead_request *req;
102	struct crypto_aead *tfm;
103
104	req = container_of(cpt_req->areq, struct aead_request, base);
105	tfm = crypto_aead_reqtfm(req);
106	rctx = aead_request_ctx_dma(req);
107	if (memcmp(rctx->fctx.hmac.s.hmac_calc,
108		   rctx->fctx.hmac.s.hmac_recv,
109		   crypto_aead_authsize(tfm)) != 0)
110		return -EBADMSG;
111
112	return 0;
113}
114
115static void otx_cpt_aead_callback(int status, void *arg1, void *arg2)
116{
117	struct otx_cpt_info_buffer *cpt_info = arg2;
118	struct crypto_async_request *areq = arg1;
119	struct otx_cpt_req_info *cpt_req;
120	struct pci_dev *pdev;
121
122	if (!cpt_info)
123		goto complete;
124
125	cpt_req = cpt_info->req;
126	if (!status) {
127		/*
128		 * When selected cipher is NULL we need to manually
129		 * verify whether calculated hmac value matches
130		 * received hmac value
131		 */
132		if (cpt_req->req_type == OTX_CPT_AEAD_ENC_DEC_NULL_REQ &&
133		    !cpt_req->is_enc)
134			status = validate_hmac_cipher_null(cpt_req);
135	}
136	pdev = cpt_info->pdev;
137	do_request_cleanup(pdev, cpt_info);
138
139complete:
140	if (areq)
141		crypto_request_complete(areq, status);
142}
143
144static void output_iv_copyback(struct crypto_async_request *areq)
145{
146	struct otx_cpt_req_info *req_info;
147	struct skcipher_request *sreq;
148	struct crypto_skcipher *stfm;
149	struct otx_cpt_req_ctx *rctx;
150	struct otx_cpt_enc_ctx *ctx;
151	u32 start, ivsize;
152
153	sreq = container_of(areq, struct skcipher_request, base);
154	stfm = crypto_skcipher_reqtfm(sreq);
155	ctx = crypto_skcipher_ctx(stfm);
156	if (ctx->cipher_type == OTX_CPT_AES_CBC ||
157	    ctx->cipher_type == OTX_CPT_DES3_CBC) {
158		rctx = skcipher_request_ctx_dma(sreq);
159		req_info = &rctx->cpt_req;
160		ivsize = crypto_skcipher_ivsize(stfm);
161		start = sreq->cryptlen - ivsize;
162
163		if (req_info->is_enc) {
164			scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
165						 ivsize, 0);
166		} else {
167			if (sreq->src != sreq->dst) {
168				scatterwalk_map_and_copy(sreq->iv, sreq->src,
169							 start, ivsize, 0);
170			} else {
171				memcpy(sreq->iv, req_info->iv_out, ivsize);
172				kfree(req_info->iv_out);
173			}
174		}
175	}
176}
177
178static void otx_cpt_skcipher_callback(int status, void *arg1, void *arg2)
179{
180	struct otx_cpt_info_buffer *cpt_info = arg2;
181	struct crypto_async_request *areq = arg1;
182	struct pci_dev *pdev;
183
184	if (areq) {
185		if (!status)
186			output_iv_copyback(areq);
187		if (cpt_info) {
188			pdev = cpt_info->pdev;
189			do_request_cleanup(pdev, cpt_info);
190		}
191		crypto_request_complete(areq, status);
192	}
193}
194
195static inline void update_input_data(struct otx_cpt_req_info *req_info,
196				     struct scatterlist *inp_sg,
197				     u32 nbytes, u32 *argcnt)
198{
199	req_info->req.dlen += nbytes;
200
201	while (nbytes) {
202		u32 len = min(nbytes, inp_sg->length);
203		u8 *ptr = sg_virt(inp_sg);
204
205		req_info->in[*argcnt].vptr = (void *)ptr;
206		req_info->in[*argcnt].size = len;
207		nbytes -= len;
208		++(*argcnt);
209		inp_sg = sg_next(inp_sg);
210	}
211}
212
213static inline void update_output_data(struct otx_cpt_req_info *req_info,
214				      struct scatterlist *outp_sg,
215				      u32 offset, u32 nbytes, u32 *argcnt)
216{
217	req_info->rlen += nbytes;
218
219	while (nbytes) {
220		u32 len = min(nbytes, outp_sg->length - offset);
221		u8 *ptr = sg_virt(outp_sg);
222
223		req_info->out[*argcnt].vptr = (void *) (ptr + offset);
224		req_info->out[*argcnt].size = len;
225		nbytes -= len;
226		++(*argcnt);
227		offset = 0;
228		outp_sg = sg_next(outp_sg);
229	}
230}
231
232static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
233				 u32 *argcnt)
234{
235	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
236	struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
237	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
238	struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
239	struct otx_cpt_enc_ctx *ctx = crypto_tfm_ctx(tfm);
240	struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
241	int ivsize = crypto_skcipher_ivsize(stfm);
242	u32 start = req->cryptlen - ivsize;
243	gfp_t flags;
244
245	flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
246			GFP_KERNEL : GFP_ATOMIC;
247	req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
248	req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
249
250	req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
251				DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
252	if (enc) {
253		req_info->req.opcode.s.minor = 2;
254	} else {
255		req_info->req.opcode.s.minor = 3;
256		if ((ctx->cipher_type == OTX_CPT_AES_CBC ||
257		    ctx->cipher_type == OTX_CPT_DES3_CBC) &&
258		    req->src == req->dst) {
259			req_info->iv_out = kmalloc(ivsize, flags);
260			if (!req_info->iv_out)
261				return -ENOMEM;
262
263			scatterwalk_map_and_copy(req_info->iv_out, req->src,
264						 start, ivsize, 0);
265		}
266	}
267	/* Encryption data length */
268	req_info->req.param1 = req->cryptlen;
269	/* Authentication data length */
270	req_info->req.param2 = 0;
271
272	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
273	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
274	fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
275
276	if (ctx->cipher_type == OTX_CPT_AES_XTS)
277		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
278	else
279		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
280
281	memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
282
283	fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
284
285	/*
286	 * Storing  Packet Data Information in offset
287	 * Control Word First 8 bytes
288	 */
289	req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
290	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
291	req_info->req.dlen += CONTROL_WORD_LEN;
292	++(*argcnt);
293
294	req_info->in[*argcnt].vptr = (u8 *)fctx;
295	req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
296	req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
297
298	++(*argcnt);
299
300	return 0;
301}
302
303static inline u32 create_input_list(struct skcipher_request *req, u32 enc,
304				    u32 enc_iv_len)
305{
306	struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
307	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
308	u32 argcnt =  0;
309	int ret;
310
311	ret = create_ctx_hdr(req, enc, &argcnt);
312	if (ret)
313		return ret;
314
315	update_input_data(req_info, req->src, req->cryptlen, &argcnt);
316	req_info->incnt = argcnt;
317
318	return 0;
319}
320
321static inline void create_output_list(struct skcipher_request *req,
322				      u32 enc_iv_len)
323{
324	struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
325	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
326	u32 argcnt = 0;
327
328	/*
329	 * OUTPUT Buffer Processing
330	 * AES encryption/decryption output would be
331	 * received in the following format
332	 *
333	 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
334	 * [ 16 Bytes/     [   Request Enc/Dec/ DATA Len AES CBC ]
335	 */
336	update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
337	req_info->outcnt = argcnt;
338}
339
340static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
341{
342	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
343	struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
344	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
345	u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
346	struct pci_dev *pdev;
347	int status, cpu_num;
348
349	/* Validate that request doesn't exceed maximum CPT supported size */
350	if (req->cryptlen > OTX_CPT_MAX_REQ_SIZE)
351		return -E2BIG;
352
353	/* Clear control words */
354	rctx->ctrl_word.flags = 0;
355	rctx->fctx.enc.enc_ctrl.flags = 0;
356
357	status = create_input_list(req, enc, enc_iv_len);
358	if (status)
359		return status;
360	create_output_list(req, enc_iv_len);
361
362	status = get_se_device(&pdev, &cpu_num);
363	if (status)
364		return status;
365
366	req_info->callback = (void *)otx_cpt_skcipher_callback;
367	req_info->areq = &req->base;
368	req_info->req_type = OTX_CPT_ENC_DEC_REQ;
369	req_info->is_enc = enc;
370	req_info->is_trunc_hmac = false;
371	req_info->ctrl.s.grp = 0;
372
373	/*
374	 * We perform an asynchronous send and once
375	 * the request is completed the driver would
376	 * intimate through registered call back functions
377	 */
378	status = otx_cpt_do_request(pdev, req_info, cpu_num);
379
380	return status;
381}
382
383static int otx_cpt_skcipher_encrypt(struct skcipher_request *req)
384{
385	return cpt_enc_dec(req, true);
386}
387
388static int otx_cpt_skcipher_decrypt(struct skcipher_request *req)
389{
390	return cpt_enc_dec(req, false);
391}
392
393static int otx_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
394				       const u8 *key, u32 keylen)
395{
396	struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
397	const u8 *key2 = key + (keylen / 2);
398	const u8 *key1 = key;
399	int ret;
400
401	ret = xts_verify_key(tfm, key, keylen);
402	if (ret)
403		return ret;
404	ctx->key_len = keylen;
405	memcpy(ctx->enc_key, key1, keylen / 2);
406	memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
407	ctx->cipher_type = OTX_CPT_AES_XTS;
408	switch (ctx->key_len) {
409	case 2 * AES_KEYSIZE_128:
410		ctx->key_type = OTX_CPT_AES_128_BIT;
411		break;
412	case 2 * AES_KEYSIZE_256:
413		ctx->key_type = OTX_CPT_AES_256_BIT;
414		break;
415	default:
416		return -EINVAL;
417	}
418
419	return 0;
420}
421
422static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
423			  u32 keylen, u8 cipher_type)
424{
425	struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
426
427	if (keylen != DES3_EDE_KEY_SIZE)
428		return -EINVAL;
429
430	ctx->key_len = keylen;
431	ctx->cipher_type = cipher_type;
432
433	memcpy(ctx->enc_key, key, keylen);
434
435	return 0;
436}
437
438static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
439			  u32 keylen, u8 cipher_type)
440{
441	struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
442
443	switch (keylen) {
444	case AES_KEYSIZE_128:
445		ctx->key_type = OTX_CPT_AES_128_BIT;
446		break;
447	case AES_KEYSIZE_192:
448		ctx->key_type = OTX_CPT_AES_192_BIT;
449		break;
450	case AES_KEYSIZE_256:
451		ctx->key_type = OTX_CPT_AES_256_BIT;
452		break;
453	default:
454		return -EINVAL;
455	}
456	ctx->key_len = keylen;
457	ctx->cipher_type = cipher_type;
458
459	memcpy(ctx->enc_key, key, keylen);
460
461	return 0;
462}
463
464static int otx_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
465					   const u8 *key, u32 keylen)
466{
467	return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CBC);
468}
469
470static int otx_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
471					   const u8 *key, u32 keylen)
472{
473	return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_ECB);
474}
475
476static int otx_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
477					    const u8 *key, u32 keylen)
478{
479	return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_CBC);
480}
481
482static int otx_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
483					    const u8 *key, u32 keylen)
484{
485	return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_ECB);
486}
487
488static int otx_cpt_enc_dec_init(struct crypto_skcipher *tfm)
489{
490	struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
491
492	memset(ctx, 0, sizeof(*ctx));
493	/*
494	 * Additional memory for skcipher_request is
495	 * allocated since the cryptd daemon uses
496	 * this memory for request_ctx information
497	 */
498	crypto_skcipher_set_reqsize_dma(
499		tfm, sizeof(struct otx_cpt_req_ctx) +
500		     sizeof(struct skcipher_request));
501
502	return 0;
503}
504
505static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type)
506{
507	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
508
509	ctx->cipher_type = cipher_type;
510	ctx->mac_type = mac_type;
511
512	/*
513	 * When selected cipher is NULL we use HMAC opcode instead of
514	 * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
515	 * for calculating ipad and opad
516	 */
517	if (ctx->cipher_type != OTX_CPT_CIPHER_NULL) {
518		switch (ctx->mac_type) {
519		case OTX_CPT_SHA1:
520			ctx->hashalg = crypto_alloc_shash("sha1", 0,
521							  CRYPTO_ALG_ASYNC);
522			if (IS_ERR(ctx->hashalg))
523				return PTR_ERR(ctx->hashalg);
524			break;
525
526		case OTX_CPT_SHA256:
527			ctx->hashalg = crypto_alloc_shash("sha256", 0,
528							  CRYPTO_ALG_ASYNC);
529			if (IS_ERR(ctx->hashalg))
530				return PTR_ERR(ctx->hashalg);
531			break;
532
533		case OTX_CPT_SHA384:
534			ctx->hashalg = crypto_alloc_shash("sha384", 0,
535							  CRYPTO_ALG_ASYNC);
536			if (IS_ERR(ctx->hashalg))
537				return PTR_ERR(ctx->hashalg);
538			break;
539
540		case OTX_CPT_SHA512:
541			ctx->hashalg = crypto_alloc_shash("sha512", 0,
542							  CRYPTO_ALG_ASYNC);
543			if (IS_ERR(ctx->hashalg))
544				return PTR_ERR(ctx->hashalg);
545			break;
546		}
547	}
548
549	crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx));
550
551	return 0;
552}
553
554static int otx_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
555{
556	return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA1);
557}
558
559static int otx_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
560{
561	return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA256);
562}
563
564static int otx_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
565{
566	return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA384);
567}
568
569static int otx_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
570{
571	return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA512);
572}
573
574static int otx_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
575{
576	return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA1);
577}
578
579static int otx_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
580{
581	return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA256);
582}
583
584static int otx_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
585{
586	return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA384);
587}
588
589static int otx_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
590{
591	return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA512);
592}
593
594static int otx_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
595{
596	return cpt_aead_init(tfm, OTX_CPT_AES_GCM, OTX_CPT_MAC_NULL);
597}
598
599static void otx_cpt_aead_exit(struct crypto_aead *tfm)
600{
601	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
602
603	kfree(ctx->ipad);
604	kfree(ctx->opad);
605	if (ctx->hashalg)
606		crypto_free_shash(ctx->hashalg);
607	kfree(ctx->sdesc);
608}
609
610/*
611 * This is the Integrity Check Value validation (aka the authentication tag
612 * length)
613 */
614static int otx_cpt_aead_set_authsize(struct crypto_aead *tfm,
615				     unsigned int authsize)
616{
617	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
618
619	switch (ctx->mac_type) {
620	case OTX_CPT_SHA1:
621		if (authsize != SHA1_DIGEST_SIZE &&
622		    authsize != SHA1_TRUNC_DIGEST_SIZE)
623			return -EINVAL;
624
625		if (authsize == SHA1_TRUNC_DIGEST_SIZE)
626			ctx->is_trunc_hmac = true;
627		break;
628
629	case OTX_CPT_SHA256:
630		if (authsize != SHA256_DIGEST_SIZE &&
631		    authsize != SHA256_TRUNC_DIGEST_SIZE)
632			return -EINVAL;
633
634		if (authsize == SHA256_TRUNC_DIGEST_SIZE)
635			ctx->is_trunc_hmac = true;
636		break;
637
638	case OTX_CPT_SHA384:
639		if (authsize != SHA384_DIGEST_SIZE &&
640		    authsize != SHA384_TRUNC_DIGEST_SIZE)
641			return -EINVAL;
642
643		if (authsize == SHA384_TRUNC_DIGEST_SIZE)
644			ctx->is_trunc_hmac = true;
645		break;
646
647	case OTX_CPT_SHA512:
648		if (authsize != SHA512_DIGEST_SIZE &&
649		    authsize != SHA512_TRUNC_DIGEST_SIZE)
650			return -EINVAL;
651
652		if (authsize == SHA512_TRUNC_DIGEST_SIZE)
653			ctx->is_trunc_hmac = true;
654		break;
655
656	case OTX_CPT_MAC_NULL:
657		if (ctx->cipher_type == OTX_CPT_AES_GCM) {
658			if (authsize != AES_GCM_ICV_SIZE)
659				return -EINVAL;
660		} else
661			return -EINVAL;
662		break;
663
664	default:
665		return -EINVAL;
666	}
667
668	tfm->authsize = authsize;
669	return 0;
670}
671
672static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
673{
674	struct otx_cpt_sdesc *sdesc;
675	int size;
676
677	size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
678	sdesc = kmalloc(size, GFP_KERNEL);
679	if (!sdesc)
680		return NULL;
681
682	sdesc->shash.tfm = alg;
683
684	return sdesc;
685}
686
687static inline void swap_data32(void *buf, u32 len)
688{
689	cpu_to_be32_array(buf, buf, len / 4);
690}
691
692static inline void swap_data64(void *buf, u32 len)
693{
694	__be64 *dst = buf;
695	u64 *src = buf;
696	int i = 0;
697
698	for (i = 0 ; i < len / 8; i++, src++, dst++)
699		*dst = cpu_to_be64p(src);
700}
701
702static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
703{
704	struct sha512_state *sha512;
705	struct sha256_state *sha256;
706	struct sha1_state *sha1;
707
708	switch (mac_type) {
709	case OTX_CPT_SHA1:
710		sha1 = (struct sha1_state *) in_pad;
711		swap_data32(sha1->state, SHA1_DIGEST_SIZE);
712		memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
713		break;
714
715	case OTX_CPT_SHA256:
716		sha256 = (struct sha256_state *) in_pad;
717		swap_data32(sha256->state, SHA256_DIGEST_SIZE);
718		memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
719		break;
720
721	case OTX_CPT_SHA384:
722	case OTX_CPT_SHA512:
723		sha512 = (struct sha512_state *) in_pad;
724		swap_data64(sha512->state, SHA512_DIGEST_SIZE);
725		memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
726		break;
727
728	default:
729		return -EINVAL;
730	}
731
732	return 0;
733}
734
735static int aead_hmac_init(struct crypto_aead *cipher)
736{
737	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
738	int state_size = crypto_shash_statesize(ctx->hashalg);
739	int ds = crypto_shash_digestsize(ctx->hashalg);
740	int bs = crypto_shash_blocksize(ctx->hashalg);
741	int authkeylen = ctx->auth_key_len;
742	u8 *ipad = NULL, *opad = NULL;
743	int ret = 0, icount = 0;
744
745	ctx->sdesc = alloc_sdesc(ctx->hashalg);
746	if (!ctx->sdesc)
747		return -ENOMEM;
748
749	ctx->ipad = kzalloc(bs, GFP_KERNEL);
750	if (!ctx->ipad) {
751		ret = -ENOMEM;
752		goto calc_fail;
753	}
754
755	ctx->opad = kzalloc(bs, GFP_KERNEL);
756	if (!ctx->opad) {
757		ret = -ENOMEM;
758		goto calc_fail;
759	}
760
761	ipad = kzalloc(state_size, GFP_KERNEL);
762	if (!ipad) {
763		ret = -ENOMEM;
764		goto calc_fail;
765	}
766
767	opad = kzalloc(state_size, GFP_KERNEL);
768	if (!opad) {
769		ret = -ENOMEM;
770		goto calc_fail;
771	}
772
773	if (authkeylen > bs) {
774		ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
775					  authkeylen, ipad);
776		if (ret)
777			goto calc_fail;
778
779		authkeylen = ds;
780	} else {
781		memcpy(ipad, ctx->key, authkeylen);
782	}
783
784	memset(ipad + authkeylen, 0, bs - authkeylen);
785	memcpy(opad, ipad, bs);
786
787	for (icount = 0; icount < bs; icount++) {
788		ipad[icount] ^= 0x36;
789		opad[icount] ^= 0x5c;
790	}
791
792	/*
793	 * Partial Hash calculated from the software
794	 * algorithm is retrieved for IPAD & OPAD
795	 */
796
797	/* IPAD Calculation */
798	crypto_shash_init(&ctx->sdesc->shash);
799	crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
800	crypto_shash_export(&ctx->sdesc->shash, ipad);
801	ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
802	if (ret)
803		goto calc_fail;
804
805	/* OPAD Calculation */
806	crypto_shash_init(&ctx->sdesc->shash);
807	crypto_shash_update(&ctx->sdesc->shash, opad, bs);
808	crypto_shash_export(&ctx->sdesc->shash, opad);
809	ret = copy_pad(ctx->mac_type, ctx->opad, opad);
810	if (ret)
811		goto calc_fail;
812
813	kfree(ipad);
814	kfree(opad);
815
816	return 0;
817
818calc_fail:
819	kfree(ctx->ipad);
820	ctx->ipad = NULL;
821	kfree(ctx->opad);
822	ctx->opad = NULL;
823	kfree(ipad);
824	kfree(opad);
825	kfree(ctx->sdesc);
826	ctx->sdesc = NULL;
827
828	return ret;
829}
830
831static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
832					   const unsigned char *key,
833					   unsigned int keylen)
834{
835	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
836	struct crypto_authenc_key_param *param;
837	int enckeylen = 0, authkeylen = 0;
838	struct rtattr *rta = (void *)key;
839	int status = -EINVAL;
840
841	if (!RTA_OK(rta, keylen))
842		goto badkey;
843
844	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
845		goto badkey;
846
847	if (RTA_PAYLOAD(rta) < sizeof(*param))
848		goto badkey;
849
850	param = RTA_DATA(rta);
851	enckeylen = be32_to_cpu(param->enckeylen);
852	key += RTA_ALIGN(rta->rta_len);
853	keylen -= RTA_ALIGN(rta->rta_len);
854	if (keylen < enckeylen)
855		goto badkey;
856
857	if (keylen > OTX_CPT_MAX_KEY_SIZE)
858		goto badkey;
859
860	authkeylen = keylen - enckeylen;
861	memcpy(ctx->key, key, keylen);
862
863	switch (enckeylen) {
864	case AES_KEYSIZE_128:
865		ctx->key_type = OTX_CPT_AES_128_BIT;
866		break;
867	case AES_KEYSIZE_192:
868		ctx->key_type = OTX_CPT_AES_192_BIT;
869		break;
870	case AES_KEYSIZE_256:
871		ctx->key_type = OTX_CPT_AES_256_BIT;
872		break;
873	default:
874		/* Invalid key length */
875		goto badkey;
876	}
877
878	ctx->enc_key_len = enckeylen;
879	ctx->auth_key_len = authkeylen;
880
881	status = aead_hmac_init(cipher);
882	if (status)
883		goto badkey;
884
885	return 0;
886badkey:
887	return status;
888}
889
890static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
891					    const unsigned char *key,
892					    unsigned int keylen)
893{
894	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
895	struct crypto_authenc_key_param *param;
896	struct rtattr *rta = (void *)key;
897	int enckeylen = 0;
898
899	if (!RTA_OK(rta, keylen))
900		goto badkey;
901
902	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
903		goto badkey;
904
905	if (RTA_PAYLOAD(rta) < sizeof(*param))
906		goto badkey;
907
908	param = RTA_DATA(rta);
909	enckeylen = be32_to_cpu(param->enckeylen);
910	key += RTA_ALIGN(rta->rta_len);
911	keylen -= RTA_ALIGN(rta->rta_len);
912	if (enckeylen != 0)
913		goto badkey;
914
915	if (keylen > OTX_CPT_MAX_KEY_SIZE)
916		goto badkey;
917
918	memcpy(ctx->key, key, keylen);
919	ctx->enc_key_len = enckeylen;
920	ctx->auth_key_len = keylen;
921	return 0;
922badkey:
923	return -EINVAL;
924}
925
926static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
927				       const unsigned char *key,
928				       unsigned int keylen)
929{
930	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
931
932	/*
933	 * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
934	 * and salt (4 bytes)
935	 */
936	switch (keylen) {
937	case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
938		ctx->key_type = OTX_CPT_AES_128_BIT;
939		ctx->enc_key_len = AES_KEYSIZE_128;
940		break;
941	case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
942		ctx->key_type = OTX_CPT_AES_192_BIT;
943		ctx->enc_key_len = AES_KEYSIZE_192;
944		break;
945	case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
946		ctx->key_type = OTX_CPT_AES_256_BIT;
947		ctx->enc_key_len = AES_KEYSIZE_256;
948		break;
949	default:
950		/* Invalid key and salt length */
951		return -EINVAL;
952	}
953
954	/* Store encryption key and salt */
955	memcpy(ctx->key, key, keylen);
956
957	return 0;
958}
959
960static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc,
961				      u32 *argcnt)
962{
963	struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
964	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
965	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
966	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
967	struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
968	int mac_len = crypto_aead_authsize(tfm);
969	int ds;
970
971	rctx->ctrl_word.e.enc_data_offset = req->assoclen;
972
973	switch (ctx->cipher_type) {
974	case OTX_CPT_AES_CBC:
975		fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
976		/* Copy encryption key to context */
977		memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
978		       ctx->enc_key_len);
979		/* Copy IV to context */
980		memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
981
982		ds = crypto_shash_digestsize(ctx->hashalg);
983		if (ctx->mac_type == OTX_CPT_SHA384)
984			ds = SHA512_DIGEST_SIZE;
985		if (ctx->ipad)
986			memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
987		if (ctx->opad)
988			memcpy(fctx->hmac.e.opad, ctx->opad, ds);
989		break;
990
991	case OTX_CPT_AES_GCM:
992		fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_DPTR;
993		/* Copy encryption key to context */
994		memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
995		/* Copy salt to context */
996		memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
997		       AES_GCM_SALT_SIZE);
998
999		rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
1000		break;
1001
1002	default:
1003		/* Unknown cipher type */
1004		return -EINVAL;
1005	}
1006	rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.cflags);
1007
1008	req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
1009	req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
1010	req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
1011				 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
1012	if (enc) {
1013		req_info->req.opcode.s.minor = 2;
1014		req_info->req.param1 = req->cryptlen;
1015		req_info->req.param2 = req->cryptlen + req->assoclen;
1016	} else {
1017		req_info->req.opcode.s.minor = 3;
1018		req_info->req.param1 = req->cryptlen - mac_len;
1019		req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
1020	}
1021
1022	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
1023	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
1024	fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
1025	fctx->enc.enc_ctrl.e.mac_len = mac_len;
1026	fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
1027
1028	/*
1029	 * Storing Packet Data Information in offset
1030	 * Control Word First 8 bytes
1031	 */
1032	req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
1033	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
1034	req_info->req.dlen += CONTROL_WORD_LEN;
1035	++(*argcnt);
1036
1037	req_info->in[*argcnt].vptr = (u8 *)fctx;
1038	req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
1039	req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
1040	++(*argcnt);
1041
1042	return 0;
1043}
1044
1045static inline u32 create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
1046				      u32 enc)
1047{
1048	struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1049	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1050	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
1051	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1052
1053	req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
1054	req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
1055	req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_HMAC |
1056				 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
1057	req_info->is_trunc_hmac = ctx->is_trunc_hmac;
1058
1059	req_info->req.opcode.s.minor = 0;
1060	req_info->req.param1 = ctx->auth_key_len;
1061	req_info->req.param2 = ctx->mac_type << 8;
1062
1063	/* Add authentication key */
1064	req_info->in[*argcnt].vptr = ctx->key;
1065	req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
1066	req_info->req.dlen += round_up(ctx->auth_key_len, 8);
1067	++(*argcnt);
1068
1069	return 0;
1070}
1071
1072static inline u32 create_aead_input_list(struct aead_request *req, u32 enc)
1073{
1074	struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1075	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1076	u32 inputlen =  req->cryptlen + req->assoclen;
1077	u32 status, argcnt = 0;
1078
1079	status = create_aead_ctx_hdr(req, enc, &argcnt);
1080	if (status)
1081		return status;
1082	update_input_data(req_info, req->src, inputlen, &argcnt);
1083	req_info->incnt = argcnt;
1084
1085	return 0;
1086}
1087
1088static inline u32 create_aead_output_list(struct aead_request *req, u32 enc,
1089					  u32 mac_len)
1090{
1091	struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1092	struct otx_cpt_req_info *req_info =  &rctx->cpt_req;
1093	u32 argcnt = 0, outputlen = 0;
1094
1095	if (enc)
1096		outputlen = req->cryptlen +  req->assoclen + mac_len;
1097	else
1098		outputlen = req->cryptlen + req->assoclen - mac_len;
1099
1100	update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
1101	req_info->outcnt = argcnt;
1102
1103	return 0;
1104}
1105
1106static inline u32 create_aead_null_input_list(struct aead_request *req,
1107					      u32 enc, u32 mac_len)
1108{
1109	struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1110	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1111	u32 inputlen, argcnt = 0;
1112
1113	if (enc)
1114		inputlen =  req->cryptlen + req->assoclen;
1115	else
1116		inputlen =  req->cryptlen + req->assoclen - mac_len;
1117
1118	create_hmac_ctx_hdr(req, &argcnt, enc);
1119	update_input_data(req_info, req->src, inputlen, &argcnt);
1120	req_info->incnt = argcnt;
1121
1122	return 0;
1123}
1124
1125static inline u32 create_aead_null_output_list(struct aead_request *req,
1126					       u32 enc, u32 mac_len)
1127{
1128	struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1129	struct otx_cpt_req_info *req_info =  &rctx->cpt_req;
1130	struct scatterlist *dst;
1131	u8 *ptr = NULL;
1132	int argcnt = 0, status, offset;
1133	u32 inputlen;
1134
1135	if (enc)
1136		inputlen =  req->cryptlen + req->assoclen;
1137	else
1138		inputlen =  req->cryptlen + req->assoclen - mac_len;
1139
1140	/*
1141	 * If source and destination are different
1142	 * then copy payload to destination
1143	 */
1144	if (req->src != req->dst) {
1145
1146		ptr = kmalloc(inputlen, (req_info->areq->flags &
1147					 CRYPTO_TFM_REQ_MAY_SLEEP) ?
1148					 GFP_KERNEL : GFP_ATOMIC);
1149		if (!ptr) {
1150			status = -ENOMEM;
1151			goto error;
1152		}
1153
1154		status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
1155					   inputlen);
1156		if (status != inputlen) {
1157			status = -EINVAL;
1158			goto error_free;
1159		}
1160		status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
1161					     inputlen);
1162		if (status != inputlen) {
1163			status = -EINVAL;
1164			goto error_free;
1165		}
1166		kfree(ptr);
1167	}
1168
1169	if (enc) {
1170		/*
1171		 * In an encryption scenario hmac needs
1172		 * to be appended after payload
1173		 */
1174		dst = req->dst;
1175		offset = inputlen;
1176		while (offset >= dst->length) {
1177			offset -= dst->length;
1178			dst = sg_next(dst);
1179			if (!dst) {
1180				status = -ENOENT;
1181				goto error;
1182			}
1183		}
1184
1185		update_output_data(req_info, dst, offset, mac_len, &argcnt);
1186	} else {
1187		/*
1188		 * In a decryption scenario calculated hmac for received
1189		 * payload needs to be compare with hmac received
1190		 */
1191		status = sg_copy_buffer(req->src, sg_nents(req->src),
1192					rctx->fctx.hmac.s.hmac_recv, mac_len,
1193					inputlen, true);
1194		if (status != mac_len) {
1195			status = -EINVAL;
1196			goto error;
1197		}
1198
1199		req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
1200		req_info->out[argcnt].size = mac_len;
1201		argcnt++;
1202	}
1203
1204	req_info->outcnt = argcnt;
1205	return 0;
1206
1207error_free:
1208	kfree(ptr);
1209error:
1210	return status;
1211}
1212
1213static u32 cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
1214{
1215	struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1216	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1217	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1218	struct pci_dev *pdev;
1219	u32 status, cpu_num;
1220
1221	/* Clear control words */
1222	rctx->ctrl_word.flags = 0;
1223	rctx->fctx.enc.enc_ctrl.flags = 0;
1224
1225	req_info->callback = otx_cpt_aead_callback;
1226	req_info->areq = &req->base;
1227	req_info->req_type = reg_type;
1228	req_info->is_enc = enc;
1229	req_info->is_trunc_hmac = false;
1230
1231	switch (reg_type) {
1232	case OTX_CPT_AEAD_ENC_DEC_REQ:
1233		status = create_aead_input_list(req, enc);
1234		if (status)
1235			return status;
1236		status = create_aead_output_list(req, enc,
1237						 crypto_aead_authsize(tfm));
1238		if (status)
1239			return status;
1240		break;
1241
1242	case OTX_CPT_AEAD_ENC_DEC_NULL_REQ:
1243		status = create_aead_null_input_list(req, enc,
1244						     crypto_aead_authsize(tfm));
1245		if (status)
1246			return status;
1247		status = create_aead_null_output_list(req, enc,
1248						crypto_aead_authsize(tfm));
1249		if (status)
1250			return status;
1251		break;
1252
1253	default:
1254		return -EINVAL;
1255	}
1256
1257	/* Validate that request doesn't exceed maximum CPT supported size */
1258	if (req_info->req.param1 > OTX_CPT_MAX_REQ_SIZE ||
1259	    req_info->req.param2 > OTX_CPT_MAX_REQ_SIZE)
1260		return -E2BIG;
1261
1262	status = get_se_device(&pdev, &cpu_num);
1263	if (status)
1264		return status;
1265
1266	req_info->ctrl.s.grp = 0;
1267
1268	status = otx_cpt_do_request(pdev, req_info, cpu_num);
1269	/*
1270	 * We perform an asynchronous send and once
1271	 * the request is completed the driver would
1272	 * intimate through registered call back functions
1273	 */
1274	return status;
1275}
1276
1277static int otx_cpt_aead_encrypt(struct aead_request *req)
1278{
1279	return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, true);
1280}
1281
1282static int otx_cpt_aead_decrypt(struct aead_request *req)
1283{
1284	return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, false);
1285}
1286
1287static int otx_cpt_aead_null_encrypt(struct aead_request *req)
1288{
1289	return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, true);
1290}
1291
1292static int otx_cpt_aead_null_decrypt(struct aead_request *req)
1293{
1294	return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, false);
1295}
1296
1297static struct skcipher_alg otx_cpt_skciphers[] = { {
1298	.base.cra_name = "xts(aes)",
1299	.base.cra_driver_name = "cpt_xts_aes",
1300	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1301	.base.cra_blocksize = AES_BLOCK_SIZE,
1302	.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1303	.base.cra_alignmask = 7,
1304	.base.cra_priority = 4001,
1305	.base.cra_module = THIS_MODULE,
1306
1307	.init = otx_cpt_enc_dec_init,
1308	.ivsize = AES_BLOCK_SIZE,
1309	.min_keysize = 2 * AES_MIN_KEY_SIZE,
1310	.max_keysize = 2 * AES_MAX_KEY_SIZE,
1311	.setkey = otx_cpt_skcipher_xts_setkey,
1312	.encrypt = otx_cpt_skcipher_encrypt,
1313	.decrypt = otx_cpt_skcipher_decrypt,
1314}, {
1315	.base.cra_name = "cbc(aes)",
1316	.base.cra_driver_name = "cpt_cbc_aes",
1317	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1318	.base.cra_blocksize = AES_BLOCK_SIZE,
1319	.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1320	.base.cra_alignmask = 7,
1321	.base.cra_priority = 4001,
1322	.base.cra_module = THIS_MODULE,
1323
1324	.init = otx_cpt_enc_dec_init,
1325	.ivsize = AES_BLOCK_SIZE,
1326	.min_keysize = AES_MIN_KEY_SIZE,
1327	.max_keysize = AES_MAX_KEY_SIZE,
1328	.setkey = otx_cpt_skcipher_cbc_aes_setkey,
1329	.encrypt = otx_cpt_skcipher_encrypt,
1330	.decrypt = otx_cpt_skcipher_decrypt,
1331}, {
1332	.base.cra_name = "ecb(aes)",
1333	.base.cra_driver_name = "cpt_ecb_aes",
1334	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1335	.base.cra_blocksize = AES_BLOCK_SIZE,
1336	.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1337	.base.cra_alignmask = 7,
1338	.base.cra_priority = 4001,
1339	.base.cra_module = THIS_MODULE,
1340
1341	.init = otx_cpt_enc_dec_init,
1342	.ivsize = 0,
1343	.min_keysize = AES_MIN_KEY_SIZE,
1344	.max_keysize = AES_MAX_KEY_SIZE,
1345	.setkey = otx_cpt_skcipher_ecb_aes_setkey,
1346	.encrypt = otx_cpt_skcipher_encrypt,
1347	.decrypt = otx_cpt_skcipher_decrypt,
1348}, {
1349	.base.cra_name = "cbc(des3_ede)",
1350	.base.cra_driver_name = "cpt_cbc_des3_ede",
1351	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1352	.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1353	.base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
1354	.base.cra_alignmask = 7,
1355	.base.cra_priority = 4001,
1356	.base.cra_module = THIS_MODULE,
1357
1358	.init = otx_cpt_enc_dec_init,
1359	.min_keysize = DES3_EDE_KEY_SIZE,
1360	.max_keysize = DES3_EDE_KEY_SIZE,
1361	.ivsize = DES_BLOCK_SIZE,
1362	.setkey = otx_cpt_skcipher_cbc_des3_setkey,
1363	.encrypt = otx_cpt_skcipher_encrypt,
1364	.decrypt = otx_cpt_skcipher_decrypt,
1365}, {
1366	.base.cra_name = "ecb(des3_ede)",
1367	.base.cra_driver_name = "cpt_ecb_des3_ede",
1368	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1369	.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1370	.base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
1371	.base.cra_alignmask = 7,
1372	.base.cra_priority = 4001,
1373	.base.cra_module = THIS_MODULE,
1374
1375	.init = otx_cpt_enc_dec_init,
1376	.min_keysize = DES3_EDE_KEY_SIZE,
1377	.max_keysize = DES3_EDE_KEY_SIZE,
1378	.ivsize = 0,
1379	.setkey = otx_cpt_skcipher_ecb_des3_setkey,
1380	.encrypt = otx_cpt_skcipher_encrypt,
1381	.decrypt = otx_cpt_skcipher_decrypt,
1382} };
1383
1384static struct aead_alg otx_cpt_aeads[] = { {
1385	.base = {
1386		.cra_name = "authenc(hmac(sha1),cbc(aes))",
1387		.cra_driver_name = "cpt_hmac_sha1_cbc_aes",
1388		.cra_blocksize = AES_BLOCK_SIZE,
1389		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1390		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1391		.cra_priority = 4001,
1392		.cra_alignmask = 0,
1393		.cra_module = THIS_MODULE,
1394	},
1395	.init = otx_cpt_aead_cbc_aes_sha1_init,
1396	.exit = otx_cpt_aead_exit,
1397	.setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1398	.setauthsize = otx_cpt_aead_set_authsize,
1399	.encrypt = otx_cpt_aead_encrypt,
1400	.decrypt = otx_cpt_aead_decrypt,
1401	.ivsize = AES_BLOCK_SIZE,
1402	.maxauthsize = SHA1_DIGEST_SIZE,
1403}, {
1404	.base = {
1405		.cra_name = "authenc(hmac(sha256),cbc(aes))",
1406		.cra_driver_name = "cpt_hmac_sha256_cbc_aes",
1407		.cra_blocksize = AES_BLOCK_SIZE,
1408		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1409		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1410		.cra_priority = 4001,
1411		.cra_alignmask = 0,
1412		.cra_module = THIS_MODULE,
1413	},
1414	.init = otx_cpt_aead_cbc_aes_sha256_init,
1415	.exit = otx_cpt_aead_exit,
1416	.setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1417	.setauthsize = otx_cpt_aead_set_authsize,
1418	.encrypt = otx_cpt_aead_encrypt,
1419	.decrypt = otx_cpt_aead_decrypt,
1420	.ivsize = AES_BLOCK_SIZE,
1421	.maxauthsize = SHA256_DIGEST_SIZE,
1422}, {
1423	.base = {
1424		.cra_name = "authenc(hmac(sha384),cbc(aes))",
1425		.cra_driver_name = "cpt_hmac_sha384_cbc_aes",
1426		.cra_blocksize = AES_BLOCK_SIZE,
1427		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1428		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1429		.cra_priority = 4001,
1430		.cra_alignmask = 0,
1431		.cra_module = THIS_MODULE,
1432	},
1433	.init = otx_cpt_aead_cbc_aes_sha384_init,
1434	.exit = otx_cpt_aead_exit,
1435	.setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1436	.setauthsize = otx_cpt_aead_set_authsize,
1437	.encrypt = otx_cpt_aead_encrypt,
1438	.decrypt = otx_cpt_aead_decrypt,
1439	.ivsize = AES_BLOCK_SIZE,
1440	.maxauthsize = SHA384_DIGEST_SIZE,
1441}, {
1442	.base = {
1443		.cra_name = "authenc(hmac(sha512),cbc(aes))",
1444		.cra_driver_name = "cpt_hmac_sha512_cbc_aes",
1445		.cra_blocksize = AES_BLOCK_SIZE,
1446		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1447		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1448		.cra_priority = 4001,
1449		.cra_alignmask = 0,
1450		.cra_module = THIS_MODULE,
1451	},
1452	.init = otx_cpt_aead_cbc_aes_sha512_init,
1453	.exit = otx_cpt_aead_exit,
1454	.setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1455	.setauthsize = otx_cpt_aead_set_authsize,
1456	.encrypt = otx_cpt_aead_encrypt,
1457	.decrypt = otx_cpt_aead_decrypt,
1458	.ivsize = AES_BLOCK_SIZE,
1459	.maxauthsize = SHA512_DIGEST_SIZE,
1460}, {
1461	.base = {
1462		.cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
1463		.cra_driver_name = "cpt_hmac_sha1_ecb_null",
1464		.cra_blocksize = 1,
1465		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1466		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1467		.cra_priority = 4001,
1468		.cra_alignmask = 0,
1469		.cra_module = THIS_MODULE,
1470	},
1471	.init = otx_cpt_aead_ecb_null_sha1_init,
1472	.exit = otx_cpt_aead_exit,
1473	.setkey = otx_cpt_aead_ecb_null_sha_setkey,
1474	.setauthsize = otx_cpt_aead_set_authsize,
1475	.encrypt = otx_cpt_aead_null_encrypt,
1476	.decrypt = otx_cpt_aead_null_decrypt,
1477	.ivsize = 0,
1478	.maxauthsize = SHA1_DIGEST_SIZE,
1479}, {
1480	.base = {
1481		.cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
1482		.cra_driver_name = "cpt_hmac_sha256_ecb_null",
1483		.cra_blocksize = 1,
1484		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1485		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1486		.cra_priority = 4001,
1487		.cra_alignmask = 0,
1488		.cra_module = THIS_MODULE,
1489	},
1490	.init = otx_cpt_aead_ecb_null_sha256_init,
1491	.exit = otx_cpt_aead_exit,
1492	.setkey = otx_cpt_aead_ecb_null_sha_setkey,
1493	.setauthsize = otx_cpt_aead_set_authsize,
1494	.encrypt = otx_cpt_aead_null_encrypt,
1495	.decrypt = otx_cpt_aead_null_decrypt,
1496	.ivsize = 0,
1497	.maxauthsize = SHA256_DIGEST_SIZE,
1498}, {
1499	.base = {
1500		.cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
1501		.cra_driver_name = "cpt_hmac_sha384_ecb_null",
1502		.cra_blocksize = 1,
1503		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1504		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1505		.cra_priority = 4001,
1506		.cra_alignmask = 0,
1507		.cra_module = THIS_MODULE,
1508	},
1509	.init = otx_cpt_aead_ecb_null_sha384_init,
1510	.exit = otx_cpt_aead_exit,
1511	.setkey = otx_cpt_aead_ecb_null_sha_setkey,
1512	.setauthsize = otx_cpt_aead_set_authsize,
1513	.encrypt = otx_cpt_aead_null_encrypt,
1514	.decrypt = otx_cpt_aead_null_decrypt,
1515	.ivsize = 0,
1516	.maxauthsize = SHA384_DIGEST_SIZE,
1517}, {
1518	.base = {
1519		.cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
1520		.cra_driver_name = "cpt_hmac_sha512_ecb_null",
1521		.cra_blocksize = 1,
1522		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1523		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1524		.cra_priority = 4001,
1525		.cra_alignmask = 0,
1526		.cra_module = THIS_MODULE,
1527	},
1528	.init = otx_cpt_aead_ecb_null_sha512_init,
1529	.exit = otx_cpt_aead_exit,
1530	.setkey = otx_cpt_aead_ecb_null_sha_setkey,
1531	.setauthsize = otx_cpt_aead_set_authsize,
1532	.encrypt = otx_cpt_aead_null_encrypt,
1533	.decrypt = otx_cpt_aead_null_decrypt,
1534	.ivsize = 0,
1535	.maxauthsize = SHA512_DIGEST_SIZE,
1536}, {
1537	.base = {
1538		.cra_name = "rfc4106(gcm(aes))",
1539		.cra_driver_name = "cpt_rfc4106_gcm_aes",
1540		.cra_blocksize = 1,
1541		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1542		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1543		.cra_priority = 4001,
1544		.cra_alignmask = 0,
1545		.cra_module = THIS_MODULE,
1546	},
1547	.init = otx_cpt_aead_gcm_aes_init,
1548	.exit = otx_cpt_aead_exit,
1549	.setkey = otx_cpt_aead_gcm_aes_setkey,
1550	.setauthsize = otx_cpt_aead_set_authsize,
1551	.encrypt = otx_cpt_aead_encrypt,
1552	.decrypt = otx_cpt_aead_decrypt,
1553	.ivsize = AES_GCM_IV_SIZE,
1554	.maxauthsize = AES_GCM_ICV_SIZE,
1555} };
1556
1557static inline int is_any_alg_used(void)
1558{
1559	int i;
1560
1561	for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
1562		if (refcount_read(&otx_cpt_skciphers[i].base.cra_refcnt) != 1)
1563			return true;
1564	for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
1565		if (refcount_read(&otx_cpt_aeads[i].base.cra_refcnt) != 1)
1566			return true;
1567	return false;
1568}
1569
1570static inline int cpt_register_algs(void)
1571{
1572	int i, err = 0;
1573
1574	if (!IS_ENABLED(CONFIG_DM_CRYPT)) {
1575		for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
1576			otx_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1577
1578		err = crypto_register_skciphers(otx_cpt_skciphers,
1579						ARRAY_SIZE(otx_cpt_skciphers));
1580		if (err)
1581			return err;
1582	}
1583
1584	for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
1585		otx_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1586
1587	err = crypto_register_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
1588	if (err) {
1589		crypto_unregister_skciphers(otx_cpt_skciphers,
1590					    ARRAY_SIZE(otx_cpt_skciphers));
1591		return err;
1592	}
1593
1594	return 0;
1595}
1596
1597static inline void cpt_unregister_algs(void)
1598{
1599	crypto_unregister_skciphers(otx_cpt_skciphers,
1600				    ARRAY_SIZE(otx_cpt_skciphers));
1601	crypto_unregister_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
1602}
1603
1604static int compare_func(const void *lptr, const void *rptr)
1605{
1606	struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
1607	struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
1608
1609	if (ldesc->dev->devfn < rdesc->dev->devfn)
1610		return -1;
1611	if (ldesc->dev->devfn > rdesc->dev->devfn)
1612		return 1;
1613	return 0;
1614}
1615
1616static void swap_func(void *lptr, void *rptr, int size)
1617{
1618	struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
1619	struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
1620
1621	swap(*ldesc, *rdesc);
1622}
1623
1624int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
1625			enum otx_cptpf_type pf_type,
1626			enum otx_cptvf_type engine_type,
1627			int num_queues, int num_devices)
1628{
1629	int ret = 0;
1630	int count;
1631
1632	mutex_lock(&mutex);
1633	switch (engine_type) {
1634	case OTX_CPT_SE_TYPES:
1635		count = atomic_read(&se_devices.count);
1636		if (count >= CPT_MAX_VF_NUM) {
1637			dev_err(&pdev->dev, "No space to add a new device\n");
1638			ret = -ENOSPC;
1639			goto err;
1640		}
1641		se_devices.desc[count].pf_type = pf_type;
1642		se_devices.desc[count].num_queues = num_queues;
1643		se_devices.desc[count++].dev = pdev;
1644		atomic_inc(&se_devices.count);
1645
1646		if (atomic_read(&se_devices.count) == num_devices &&
1647		    is_crypto_registered == false) {
1648			if (cpt_register_algs()) {
1649				dev_err(&pdev->dev,
1650				   "Error in registering crypto algorithms\n");
1651				ret =  -EINVAL;
1652				goto err;
1653			}
1654			try_module_get(mod);
1655			is_crypto_registered = true;
1656		}
1657		sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
1658		     compare_func, swap_func);
1659		break;
1660
1661	case OTX_CPT_AE_TYPES:
1662		count = atomic_read(&ae_devices.count);
1663		if (count >= CPT_MAX_VF_NUM) {
1664			dev_err(&pdev->dev, "No space to a add new device\n");
1665			ret = -ENOSPC;
1666			goto err;
1667		}
1668		ae_devices.desc[count].pf_type = pf_type;
1669		ae_devices.desc[count].num_queues = num_queues;
1670		ae_devices.desc[count++].dev = pdev;
1671		atomic_inc(&ae_devices.count);
1672		sort(ae_devices.desc, count, sizeof(struct cpt_device_desc),
1673		     compare_func, swap_func);
1674		break;
1675
1676	default:
1677		dev_err(&pdev->dev, "Unknown VF type %d\n", engine_type);
1678		ret = BAD_OTX_CPTVF_TYPE;
1679	}
1680err:
1681	mutex_unlock(&mutex);
1682	return ret;
1683}
1684
1685void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod,
1686			 enum otx_cptvf_type engine_type)
1687{
1688	struct cpt_device_table *dev_tbl;
1689	bool dev_found = false;
1690	int i, j, count;
1691
1692	mutex_lock(&mutex);
1693
1694	dev_tbl = (engine_type == OTX_CPT_AE_TYPES) ? &ae_devices : &se_devices;
1695	count = atomic_read(&dev_tbl->count);
1696	for (i = 0; i < count; i++)
1697		if (pdev == dev_tbl->desc[i].dev) {
1698			for (j = i; j < count-1; j++)
1699				dev_tbl->desc[j] = dev_tbl->desc[j+1];
1700			dev_found = true;
1701			break;
1702		}
1703
1704	if (!dev_found) {
1705		dev_err(&pdev->dev, "%s device not found\n", __func__);
1706		goto exit;
1707	}
1708
1709	if (engine_type != OTX_CPT_AE_TYPES) {
1710		if (atomic_dec_and_test(&se_devices.count) &&
1711		    !is_any_alg_used()) {
1712			cpt_unregister_algs();
1713			module_put(mod);
1714			is_crypto_registered = false;
1715		}
1716	} else
1717		atomic_dec(&ae_devices.count);
1718exit:
1719	mutex_unlock(&mutex);
1720}
1721