• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/crypto/
1/*
2 * Support for Marvell's crypto engine which can be found on some Orion5X
3 * boards.
4 *
5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
6 * License: GPLv2
7 *
8 */
9#include <crypto/aes.h>
10#include <crypto/algapi.h>
11#include <linux/crypto.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/kthread.h>
15#include <linux/platform_device.h>
16#include <linux/scatterlist.h>
17#include <linux/slab.h>
18#include <crypto/internal/hash.h>
19#include <crypto/sha.h>
20
21#include "mv_cesa.h"
22
23#define MV_CESA	"MV-CESA:"
24#define MAX_HW_HASH_SIZE	0xFFFF
25
26/*
27 * STM:
28 *   /---------------------------------------\
29 *   |					     | request complete
30 *  \./					     |
31 * IDLE -> new request -> BUSY -> done -> DEQUEUE
32 *                         /��\               |
33 *			    |		     | more scatter entries
34 *			    \________________/
35 */
36enum engine_status {
37	ENGINE_IDLE,
38	ENGINE_BUSY,
39	ENGINE_W_DEQUEUE,
40};
41
42/**
43 * struct req_progress - used for every crypt request
44 * @src_sg_it:		sg iterator for src
45 * @dst_sg_it:		sg iterator for dst
46 * @sg_src_left:	bytes left in src to process (scatter list)
47 * @src_start:		offset to add to src start position (scatter list)
48 * @crypt_len:		length of current hw crypt/hash process
49 * @hw_nbytes:		total bytes to process in hw for this request
50 * @copy_back:		whether to copy data back (crypt) or not (hash)
51 * @sg_dst_left:	bytes left dst to process in this scatter list
52 * @dst_start:		offset to add to dst start position (scatter list)
53 * @hw_processed_bytes:	number of bytes processed by hw (request).
54 *
55 * sg helper are used to iterate over the scatterlist. Since the size of the
56 * SRAM may be less than the scatter size, this struct struct is used to keep
57 * track of progress within current scatterlist.
58 */
59struct req_progress {
60	struct sg_mapping_iter src_sg_it;
61	struct sg_mapping_iter dst_sg_it;
62	void (*complete) (void);
63	void (*process) (int is_first);
64
65	/* src mostly */
66	int sg_src_left;
67	int src_start;
68	int crypt_len;
69	int hw_nbytes;
70	/* dst mostly */
71	int copy_back;
72	int sg_dst_left;
73	int dst_start;
74	int hw_processed_bytes;
75};
76
77struct crypto_priv {
78	void __iomem *reg;
79	void __iomem *sram;
80	int irq;
81	struct task_struct *queue_th;
82
83	/* the lock protects queue and eng_st */
84	spinlock_t lock;
85	struct crypto_queue queue;
86	enum engine_status eng_st;
87	struct crypto_async_request *cur_req;
88	struct req_progress p;
89	int max_req_size;
90	int sram_size;
91	int has_sha1;
92	int has_hmac_sha1;
93};
94
95static struct crypto_priv *cpg;
96
97struct mv_ctx {
98	u8 aes_enc_key[AES_KEY_LEN];
99	u32 aes_dec_key[8];
100	int key_len;
101	u32 need_calc_aes_dkey;
102};
103
104enum crypto_op {
105	COP_AES_ECB,
106	COP_AES_CBC,
107};
108
109struct mv_req_ctx {
110	enum crypto_op op;
111	int decrypt;
112};
113
114enum hash_op {
115	COP_SHA1,
116	COP_HMAC_SHA1
117};
118
119struct mv_tfm_hash_ctx {
120	struct crypto_shash *fallback;
121	struct crypto_shash *base_hash;
122	u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
123	int count_add;
124	enum hash_op op;
125};
126
127struct mv_req_hash_ctx {
128	u64 count;
129	u32 state[SHA1_DIGEST_SIZE / 4];
130	u8 buffer[SHA1_BLOCK_SIZE];
131	int first_hash;		/* marks that we don't have previous state */
132	int last_chunk;		/* marks that this is the 'final' request */
133	int extra_bytes;	/* unprocessed bytes in buffer */
134	enum hash_op op;
135	int count_add;
136	struct scatterlist dummysg;
137};
138
139static void compute_aes_dec_key(struct mv_ctx *ctx)
140{
141	struct crypto_aes_ctx gen_aes_key;
142	int key_pos;
143
144	if (!ctx->need_calc_aes_dkey)
145		return;
146
147	crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
148
149	key_pos = ctx->key_len + 24;
150	memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
151	switch (ctx->key_len) {
152	case AES_KEYSIZE_256:
153		key_pos -= 2;
154		/* fall */
155	case AES_KEYSIZE_192:
156		key_pos -= 2;
157		memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
158				4 * 4);
159		break;
160	}
161	ctx->need_calc_aes_dkey = 0;
162}
163
164static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
165		unsigned int len)
166{
167	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
168	struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
169
170	switch (len) {
171	case AES_KEYSIZE_128:
172	case AES_KEYSIZE_192:
173	case AES_KEYSIZE_256:
174		break;
175	default:
176		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
177		return -EINVAL;
178	}
179	ctx->key_len = len;
180	ctx->need_calc_aes_dkey = 1;
181
182	memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
183	return 0;
184}
185
186static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
187{
188	int ret;
189	void *sbuf;
190	int copied = 0;
191
192	while (1) {
193		if (!p->sg_src_left) {
194			ret = sg_miter_next(&p->src_sg_it);
195			BUG_ON(!ret);
196			p->sg_src_left = p->src_sg_it.length;
197			p->src_start = 0;
198		}
199
200		sbuf = p->src_sg_it.addr + p->src_start;
201
202		if (p->sg_src_left <= len - copied) {
203			memcpy(dbuf + copied, sbuf, p->sg_src_left);
204			copied += p->sg_src_left;
205			p->sg_src_left = 0;
206			if (copied >= len)
207				break;
208		} else {
209			int copy_len = len - copied;
210			memcpy(dbuf + copied, sbuf, copy_len);
211			p->src_start += copy_len;
212			p->sg_src_left -= copy_len;
213			break;
214		}
215	}
216}
217
218static void setup_data_in(void)
219{
220	struct req_progress *p = &cpg->p;
221	int data_in_sram =
222	    min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
223	copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
224			data_in_sram - p->crypt_len);
225	p->crypt_len = data_in_sram;
226}
227
228static void mv_process_current_q(int first_block)
229{
230	struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
231	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
232	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
233	struct sec_accel_config op;
234
235	switch (req_ctx->op) {
236	case COP_AES_ECB:
237		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
238		break;
239	case COP_AES_CBC:
240	default:
241		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
242		op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
243			ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
244		if (first_block)
245			memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
246		break;
247	}
248	if (req_ctx->decrypt) {
249		op.config |= CFG_DIR_DEC;
250		memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
251				AES_KEY_LEN);
252	} else {
253		op.config |= CFG_DIR_ENC;
254		memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
255				AES_KEY_LEN);
256	}
257
258	switch (ctx->key_len) {
259	case AES_KEYSIZE_128:
260		op.config |= CFG_AES_LEN_128;
261		break;
262	case AES_KEYSIZE_192:
263		op.config |= CFG_AES_LEN_192;
264		break;
265	case AES_KEYSIZE_256:
266		op.config |= CFG_AES_LEN_256;
267		break;
268	}
269	op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
270		ENC_P_DST(SRAM_DATA_OUT_START);
271	op.enc_key_p = SRAM_DATA_KEY_P;
272
273	setup_data_in();
274	op.enc_len = cpg->p.crypt_len;
275	memcpy(cpg->sram + SRAM_CONFIG, &op,
276			sizeof(struct sec_accel_config));
277
278	writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
279	/* GO */
280	writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
281
282}
283
284static void mv_crypto_algo_completion(void)
285{
286	struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
287	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
288
289	sg_miter_stop(&cpg->p.src_sg_it);
290	sg_miter_stop(&cpg->p.dst_sg_it);
291
292	if (req_ctx->op != COP_AES_CBC)
293		return ;
294
295	memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
296}
297
298static void mv_process_hash_current(int first_block)
299{
300	struct ahash_request *req = ahash_request_cast(cpg->cur_req);
301	struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
302	struct req_progress *p = &cpg->p;
303	struct sec_accel_config op = { 0 };
304	int is_last;
305
306	switch (req_ctx->op) {
307	case COP_SHA1:
308	default:
309		op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
310		break;
311	case COP_HMAC_SHA1:
312		op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
313		break;
314	}
315
316	op.mac_src_p =
317		MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
318		req_ctx->
319		count);
320
321	setup_data_in();
322
323	op.mac_digest =
324		MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
325	op.mac_iv =
326		MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
327		MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
328
329	is_last = req_ctx->last_chunk
330		&& (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
331		&& (req_ctx->count <= MAX_HW_HASH_SIZE);
332	if (req_ctx->first_hash) {
333		if (is_last)
334			op.config |= CFG_NOT_FRAG;
335		else
336			op.config |= CFG_FIRST_FRAG;
337
338		req_ctx->first_hash = 0;
339	} else {
340		if (is_last)
341			op.config |= CFG_LAST_FRAG;
342		else
343			op.config |= CFG_MID_FRAG;
344	}
345
346	memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
347
348	writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
349	/* GO */
350	writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
351
352}
353
354static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
355					  struct shash_desc *desc)
356{
357	int i;
358	struct sha1_state shash_state;
359
360	shash_state.count = ctx->count + ctx->count_add;
361	for (i = 0; i < 5; i++)
362		shash_state.state[i] = ctx->state[i];
363	memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
364	return crypto_shash_import(desc, &shash_state);
365}
366
367static int mv_hash_final_fallback(struct ahash_request *req)
368{
369	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
370	struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
371	struct {
372		struct shash_desc shash;
373		char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
374	} desc;
375	int rc;
376
377	desc.shash.tfm = tfm_ctx->fallback;
378	desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
379	if (unlikely(req_ctx->first_hash)) {
380		crypto_shash_init(&desc.shash);
381		crypto_shash_update(&desc.shash, req_ctx->buffer,
382				    req_ctx->extra_bytes);
383	} else {
384		/* only SHA1 for now....
385		 */
386		rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
387		if (rc)
388			goto out;
389	}
390	rc = crypto_shash_final(&desc.shash, req->result);
391out:
392	return rc;
393}
394
395static void mv_hash_algo_completion(void)
396{
397	struct ahash_request *req = ahash_request_cast(cpg->cur_req);
398	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
399
400	if (ctx->extra_bytes)
401		copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
402	sg_miter_stop(&cpg->p.src_sg_it);
403
404	ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
405	ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
406	ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
407	ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
408	ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
409
410	if (likely(ctx->last_chunk)) {
411		if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
412			memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
413			       crypto_ahash_digestsize(crypto_ahash_reqtfm
414						       (req)));
415		} else
416			mv_hash_final_fallback(req);
417	}
418}
419
420static void dequeue_complete_req(void)
421{
422	struct crypto_async_request *req = cpg->cur_req;
423	void *buf;
424	int ret;
425	cpg->p.hw_processed_bytes += cpg->p.crypt_len;
426	if (cpg->p.copy_back) {
427		int need_copy_len = cpg->p.crypt_len;
428		int sram_offset = 0;
429		do {
430			int dst_copy;
431
432			if (!cpg->p.sg_dst_left) {
433				ret = sg_miter_next(&cpg->p.dst_sg_it);
434				BUG_ON(!ret);
435				cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
436				cpg->p.dst_start = 0;
437			}
438
439			buf = cpg->p.dst_sg_it.addr;
440			buf += cpg->p.dst_start;
441
442			dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
443
444			memcpy(buf,
445			       cpg->sram + SRAM_DATA_OUT_START + sram_offset,
446			       dst_copy);
447			sram_offset += dst_copy;
448			cpg->p.sg_dst_left -= dst_copy;
449			need_copy_len -= dst_copy;
450			cpg->p.dst_start += dst_copy;
451		} while (need_copy_len > 0);
452	}
453
454	cpg->p.crypt_len = 0;
455
456	BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
457	if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
458		/* process next scatter list entry */
459		cpg->eng_st = ENGINE_BUSY;
460		cpg->p.process(0);
461	} else {
462		cpg->p.complete();
463		cpg->eng_st = ENGINE_IDLE;
464		local_bh_disable();
465		req->complete(req, 0);
466		local_bh_enable();
467	}
468}
469
470static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
471{
472	int i = 0;
473	size_t cur_len;
474
475	while (1) {
476		cur_len = sl[i].length;
477		++i;
478		if (total_bytes > cur_len)
479			total_bytes -= cur_len;
480		else
481			break;
482	}
483
484	return i;
485}
486
487static void mv_start_new_crypt_req(struct ablkcipher_request *req)
488{
489	struct req_progress *p = &cpg->p;
490	int num_sgs;
491
492	cpg->cur_req = &req->base;
493	memset(p, 0, sizeof(struct req_progress));
494	p->hw_nbytes = req->nbytes;
495	p->complete = mv_crypto_algo_completion;
496	p->process = mv_process_current_q;
497	p->copy_back = 1;
498
499	num_sgs = count_sgs(req->src, req->nbytes);
500	sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
501
502	num_sgs = count_sgs(req->dst, req->nbytes);
503	sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
504
505	mv_process_current_q(1);
506}
507
508static void mv_start_new_hash_req(struct ahash_request *req)
509{
510	struct req_progress *p = &cpg->p;
511	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
512	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
513	int num_sgs, hw_bytes, old_extra_bytes, rc;
514	cpg->cur_req = &req->base;
515	memset(p, 0, sizeof(struct req_progress));
516	hw_bytes = req->nbytes + ctx->extra_bytes;
517	old_extra_bytes = ctx->extra_bytes;
518
519	if (unlikely(ctx->extra_bytes)) {
520		memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
521		       ctx->extra_bytes);
522		p->crypt_len = ctx->extra_bytes;
523	}
524
525	memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
526
527	if (unlikely(!ctx->first_hash)) {
528		writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
529		writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
530		writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
531		writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
532		writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
533	}
534
535	ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
536	if (ctx->extra_bytes != 0
537	    && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
538		hw_bytes -= ctx->extra_bytes;
539	else
540		ctx->extra_bytes = 0;
541
542	num_sgs = count_sgs(req->src, req->nbytes);
543	sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
544
545	if (hw_bytes) {
546		p->hw_nbytes = hw_bytes;
547		p->complete = mv_hash_algo_completion;
548		p->process = mv_process_hash_current;
549
550		mv_process_hash_current(1);
551	} else {
552		copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
553				ctx->extra_bytes - old_extra_bytes);
554		sg_miter_stop(&p->src_sg_it);
555		if (ctx->last_chunk)
556			rc = mv_hash_final_fallback(req);
557		else
558			rc = 0;
559		cpg->eng_st = ENGINE_IDLE;
560		local_bh_disable();
561		req->base.complete(&req->base, rc);
562		local_bh_enable();
563	}
564}
565
566static int queue_manag(void *data)
567{
568	cpg->eng_st = ENGINE_IDLE;
569	do {
570		struct crypto_async_request *async_req = NULL;
571		struct crypto_async_request *backlog;
572
573		__set_current_state(TASK_INTERRUPTIBLE);
574
575		if (cpg->eng_st == ENGINE_W_DEQUEUE)
576			dequeue_complete_req();
577
578		spin_lock_irq(&cpg->lock);
579		if (cpg->eng_st == ENGINE_IDLE) {
580			backlog = crypto_get_backlog(&cpg->queue);
581			async_req = crypto_dequeue_request(&cpg->queue);
582			if (async_req) {
583				BUG_ON(cpg->eng_st != ENGINE_IDLE);
584				cpg->eng_st = ENGINE_BUSY;
585			}
586		}
587		spin_unlock_irq(&cpg->lock);
588
589		if (backlog) {
590			backlog->complete(backlog, -EINPROGRESS);
591			backlog = NULL;
592		}
593
594		if (async_req) {
595			if (async_req->tfm->__crt_alg->cra_type !=
596			    &crypto_ahash_type) {
597				struct ablkcipher_request *req =
598				    container_of(async_req,
599						 struct ablkcipher_request,
600						 base);
601				mv_start_new_crypt_req(req);
602			} else {
603				struct ahash_request *req =
604				    ahash_request_cast(async_req);
605				mv_start_new_hash_req(req);
606			}
607			async_req = NULL;
608		}
609
610		schedule();
611
612	} while (!kthread_should_stop());
613	return 0;
614}
615
616static int mv_handle_req(struct crypto_async_request *req)
617{
618	unsigned long flags;
619	int ret;
620
621	spin_lock_irqsave(&cpg->lock, flags);
622	ret = crypto_enqueue_request(&cpg->queue, req);
623	spin_unlock_irqrestore(&cpg->lock, flags);
624	wake_up_process(cpg->queue_th);
625	return ret;
626}
627
628static int mv_enc_aes_ecb(struct ablkcipher_request *req)
629{
630	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
631
632	req_ctx->op = COP_AES_ECB;
633	req_ctx->decrypt = 0;
634
635	return mv_handle_req(&req->base);
636}
637
638static int mv_dec_aes_ecb(struct ablkcipher_request *req)
639{
640	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
641	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
642
643	req_ctx->op = COP_AES_ECB;
644	req_ctx->decrypt = 1;
645
646	compute_aes_dec_key(ctx);
647	return mv_handle_req(&req->base);
648}
649
650static int mv_enc_aes_cbc(struct ablkcipher_request *req)
651{
652	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
653
654	req_ctx->op = COP_AES_CBC;
655	req_ctx->decrypt = 0;
656
657	return mv_handle_req(&req->base);
658}
659
660static int mv_dec_aes_cbc(struct ablkcipher_request *req)
661{
662	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
663	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
664
665	req_ctx->op = COP_AES_CBC;
666	req_ctx->decrypt = 1;
667
668	compute_aes_dec_key(ctx);
669	return mv_handle_req(&req->base);
670}
671
672static int mv_cra_init(struct crypto_tfm *tfm)
673{
674	tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
675	return 0;
676}
677
678static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
679				 int is_last, unsigned int req_len,
680				 int count_add)
681{
682	memset(ctx, 0, sizeof(*ctx));
683	ctx->op = op;
684	ctx->count = req_len;
685	ctx->first_hash = 1;
686	ctx->last_chunk = is_last;
687	ctx->count_add = count_add;
688}
689
690static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
691				   unsigned req_len)
692{
693	ctx->last_chunk = is_last;
694	ctx->count += req_len;
695}
696
697static int mv_hash_init(struct ahash_request *req)
698{
699	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
700	mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
701			     tfm_ctx->count_add);
702	return 0;
703}
704
705static int mv_hash_update(struct ahash_request *req)
706{
707	if (!req->nbytes)
708		return 0;
709
710	mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
711	return mv_handle_req(&req->base);
712}
713
714static int mv_hash_final(struct ahash_request *req)
715{
716	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
717	/* dummy buffer of 4 bytes */
718	sg_init_one(&ctx->dummysg, ctx->buffer, 4);
719	/* I think I'm allowed to do that... */
720	ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0);
721	mv_update_hash_req_ctx(ctx, 1, 0);
722	return mv_handle_req(&req->base);
723}
724
725static int mv_hash_finup(struct ahash_request *req)
726{
727	if (!req->nbytes)
728		return mv_hash_final(req);
729
730	mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
731	return mv_handle_req(&req->base);
732}
733
734static int mv_hash_digest(struct ahash_request *req)
735{
736	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
737	mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
738			     req->nbytes, tfm_ctx->count_add);
739	return mv_handle_req(&req->base);
740}
741
742static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
743			     const void *ostate)
744{
745	const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
746	int i;
747	for (i = 0; i < 5; i++) {
748		ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
749		ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
750	}
751}
752
753static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
754			  unsigned int keylen)
755{
756	int rc;
757	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
758	int bs, ds, ss;
759
760	if (!ctx->base_hash)
761		return 0;
762
763	rc = crypto_shash_setkey(ctx->fallback, key, keylen);
764	if (rc)
765		return rc;
766
767	/* Can't see a way to extract the ipad/opad from the fallback tfm
768	   so I'm basically copying code from the hmac module */
769	bs = crypto_shash_blocksize(ctx->base_hash);
770	ds = crypto_shash_digestsize(ctx->base_hash);
771	ss = crypto_shash_statesize(ctx->base_hash);
772
773	{
774		struct {
775			struct shash_desc shash;
776			char ctx[crypto_shash_descsize(ctx->base_hash)];
777		} desc;
778		unsigned int i;
779		char ipad[ss];
780		char opad[ss];
781
782		desc.shash.tfm = ctx->base_hash;
783		desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
784		    CRYPTO_TFM_REQ_MAY_SLEEP;
785
786		if (keylen > bs) {
787			int err;
788
789			err =
790			    crypto_shash_digest(&desc.shash, key, keylen, ipad);
791			if (err)
792				return err;
793
794			keylen = ds;
795		} else
796			memcpy(ipad, key, keylen);
797
798		memset(ipad + keylen, 0, bs - keylen);
799		memcpy(opad, ipad, bs);
800
801		for (i = 0; i < bs; i++) {
802			ipad[i] ^= 0x36;
803			opad[i] ^= 0x5c;
804		}
805
806		rc = crypto_shash_init(&desc.shash) ? :
807		    crypto_shash_update(&desc.shash, ipad, bs) ? :
808		    crypto_shash_export(&desc.shash, ipad) ? :
809		    crypto_shash_init(&desc.shash) ? :
810		    crypto_shash_update(&desc.shash, opad, bs) ? :
811		    crypto_shash_export(&desc.shash, opad);
812
813		if (rc == 0)
814			mv_hash_init_ivs(ctx, ipad, opad);
815
816		return rc;
817	}
818}
819
820static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
821			    enum hash_op op, int count_add)
822{
823	const char *fallback_driver_name = tfm->__crt_alg->cra_name;
824	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
825	struct crypto_shash *fallback_tfm = NULL;
826	struct crypto_shash *base_hash = NULL;
827	int err = -ENOMEM;
828
829	ctx->op = op;
830	ctx->count_add = count_add;
831
832	/* Allocate a fallback and abort if it failed. */
833	fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
834					  CRYPTO_ALG_NEED_FALLBACK);
835	if (IS_ERR(fallback_tfm)) {
836		printk(KERN_WARNING MV_CESA
837		       "Fallback driver '%s' could not be loaded!\n",
838		       fallback_driver_name);
839		err = PTR_ERR(fallback_tfm);
840		goto out;
841	}
842	ctx->fallback = fallback_tfm;
843
844	if (base_hash_name) {
845		/* Allocate a hash to compute the ipad/opad of hmac. */
846		base_hash = crypto_alloc_shash(base_hash_name, 0,
847					       CRYPTO_ALG_NEED_FALLBACK);
848		if (IS_ERR(base_hash)) {
849			printk(KERN_WARNING MV_CESA
850			       "Base driver '%s' could not be loaded!\n",
851			       base_hash_name);
852			err = PTR_ERR(fallback_tfm);
853			goto err_bad_base;
854		}
855	}
856	ctx->base_hash = base_hash;
857
858	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
859				 sizeof(struct mv_req_hash_ctx) +
860				 crypto_shash_descsize(ctx->fallback));
861	return 0;
862err_bad_base:
863	crypto_free_shash(fallback_tfm);
864out:
865	return err;
866}
867
868static void mv_cra_hash_exit(struct crypto_tfm *tfm)
869{
870	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
871
872	crypto_free_shash(ctx->fallback);
873	if (ctx->base_hash)
874		crypto_free_shash(ctx->base_hash);
875}
876
877static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
878{
879	return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
880}
881
882static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
883{
884	return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
885}
886
887irqreturn_t crypto_int(int irq, void *priv)
888{
889	u32 val;
890
891	val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
892	if (!(val & SEC_INT_ACCEL0_DONE))
893		return IRQ_NONE;
894
895	val &= ~SEC_INT_ACCEL0_DONE;
896	writel(val, cpg->reg + FPGA_INT_STATUS);
897	writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
898	BUG_ON(cpg->eng_st != ENGINE_BUSY);
899	cpg->eng_st = ENGINE_W_DEQUEUE;
900	wake_up_process(cpg->queue_th);
901	return IRQ_HANDLED;
902}
903
904struct crypto_alg mv_aes_alg_ecb = {
905	.cra_name		= "ecb(aes)",
906	.cra_driver_name	= "mv-ecb-aes",
907	.cra_priority	= 300,
908	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
909	.cra_blocksize	= 16,
910	.cra_ctxsize	= sizeof(struct mv_ctx),
911	.cra_alignmask	= 0,
912	.cra_type	= &crypto_ablkcipher_type,
913	.cra_module	= THIS_MODULE,
914	.cra_init	= mv_cra_init,
915	.cra_u		= {
916		.ablkcipher = {
917			.min_keysize	=	AES_MIN_KEY_SIZE,
918			.max_keysize	=	AES_MAX_KEY_SIZE,
919			.setkey		=	mv_setkey_aes,
920			.encrypt	=	mv_enc_aes_ecb,
921			.decrypt	=	mv_dec_aes_ecb,
922		},
923	},
924};
925
926struct crypto_alg mv_aes_alg_cbc = {
927	.cra_name		= "cbc(aes)",
928	.cra_driver_name	= "mv-cbc-aes",
929	.cra_priority	= 300,
930	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
931	.cra_blocksize	= AES_BLOCK_SIZE,
932	.cra_ctxsize	= sizeof(struct mv_ctx),
933	.cra_alignmask	= 0,
934	.cra_type	= &crypto_ablkcipher_type,
935	.cra_module	= THIS_MODULE,
936	.cra_init	= mv_cra_init,
937	.cra_u		= {
938		.ablkcipher = {
939			.ivsize		=	AES_BLOCK_SIZE,
940			.min_keysize	=	AES_MIN_KEY_SIZE,
941			.max_keysize	=	AES_MAX_KEY_SIZE,
942			.setkey		=	mv_setkey_aes,
943			.encrypt	=	mv_enc_aes_cbc,
944			.decrypt	=	mv_dec_aes_cbc,
945		},
946	},
947};
948
949struct ahash_alg mv_sha1_alg = {
950	.init = mv_hash_init,
951	.update = mv_hash_update,
952	.final = mv_hash_final,
953	.finup = mv_hash_finup,
954	.digest = mv_hash_digest,
955	.halg = {
956		 .digestsize = SHA1_DIGEST_SIZE,
957		 .base = {
958			  .cra_name = "sha1",
959			  .cra_driver_name = "mv-sha1",
960			  .cra_priority = 300,
961			  .cra_flags =
962			  CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
963			  .cra_blocksize = SHA1_BLOCK_SIZE,
964			  .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
965			  .cra_init = mv_cra_hash_sha1_init,
966			  .cra_exit = mv_cra_hash_exit,
967			  .cra_module = THIS_MODULE,
968			  }
969		 }
970};
971
972struct ahash_alg mv_hmac_sha1_alg = {
973	.init = mv_hash_init,
974	.update = mv_hash_update,
975	.final = mv_hash_final,
976	.finup = mv_hash_finup,
977	.digest = mv_hash_digest,
978	.setkey = mv_hash_setkey,
979	.halg = {
980		 .digestsize = SHA1_DIGEST_SIZE,
981		 .base = {
982			  .cra_name = "hmac(sha1)",
983			  .cra_driver_name = "mv-hmac-sha1",
984			  .cra_priority = 300,
985			  .cra_flags =
986			  CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
987			  .cra_blocksize = SHA1_BLOCK_SIZE,
988			  .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
989			  .cra_init = mv_cra_hash_hmac_sha1_init,
990			  .cra_exit = mv_cra_hash_exit,
991			  .cra_module = THIS_MODULE,
992			  }
993		 }
994};
995
996static int mv_probe(struct platform_device *pdev)
997{
998	struct crypto_priv *cp;
999	struct resource *res;
1000	int irq;
1001	int ret;
1002
1003	if (cpg) {
1004		printk(KERN_ERR MV_CESA "Second crypto dev?\n");
1005		return -EEXIST;
1006	}
1007
1008	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
1009	if (!res)
1010		return -ENXIO;
1011
1012	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1013	if (!cp)
1014		return -ENOMEM;
1015
1016	spin_lock_init(&cp->lock);
1017	crypto_init_queue(&cp->queue, 50);
1018	cp->reg = ioremap(res->start, resource_size(res));
1019	if (!cp->reg) {
1020		ret = -ENOMEM;
1021		goto err;
1022	}
1023
1024	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
1025	if (!res) {
1026		ret = -ENXIO;
1027		goto err_unmap_reg;
1028	}
1029	cp->sram_size = resource_size(res);
1030	cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
1031	cp->sram = ioremap(res->start, cp->sram_size);
1032	if (!cp->sram) {
1033		ret = -ENOMEM;
1034		goto err_unmap_reg;
1035	}
1036
1037	irq = platform_get_irq(pdev, 0);
1038	if (irq < 0 || irq == NO_IRQ) {
1039		ret = irq;
1040		goto err_unmap_sram;
1041	}
1042	cp->irq = irq;
1043
1044	platform_set_drvdata(pdev, cp);
1045	cpg = cp;
1046
1047	cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
1048	if (IS_ERR(cp->queue_th)) {
1049		ret = PTR_ERR(cp->queue_th);
1050		goto err_unmap_sram;
1051	}
1052
1053	ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
1054			cp);
1055	if (ret)
1056		goto err_thread;
1057
1058	writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
1059	writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
1060
1061	ret = crypto_register_alg(&mv_aes_alg_ecb);
1062	if (ret)
1063		goto err_irq;
1064
1065	ret = crypto_register_alg(&mv_aes_alg_cbc);
1066	if (ret)
1067		goto err_unreg_ecb;
1068
1069	ret = crypto_register_ahash(&mv_sha1_alg);
1070	if (ret == 0)
1071		cpg->has_sha1 = 1;
1072	else
1073		printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
1074
1075	ret = crypto_register_ahash(&mv_hmac_sha1_alg);
1076	if (ret == 0) {
1077		cpg->has_hmac_sha1 = 1;
1078	} else {
1079		printk(KERN_WARNING MV_CESA
1080		       "Could not register hmac-sha1 driver\n");
1081	}
1082
1083	return 0;
1084err_unreg_ecb:
1085	crypto_unregister_alg(&mv_aes_alg_ecb);
1086err_irq:
1087	free_irq(irq, cp);
1088err_thread:
1089	kthread_stop(cp->queue_th);
1090err_unmap_sram:
1091	iounmap(cp->sram);
1092err_unmap_reg:
1093	iounmap(cp->reg);
1094err:
1095	kfree(cp);
1096	cpg = NULL;
1097	platform_set_drvdata(pdev, NULL);
1098	return ret;
1099}
1100
1101static int mv_remove(struct platform_device *pdev)
1102{
1103	struct crypto_priv *cp = platform_get_drvdata(pdev);
1104
1105	crypto_unregister_alg(&mv_aes_alg_ecb);
1106	crypto_unregister_alg(&mv_aes_alg_cbc);
1107	if (cp->has_sha1)
1108		crypto_unregister_ahash(&mv_sha1_alg);
1109	if (cp->has_hmac_sha1)
1110		crypto_unregister_ahash(&mv_hmac_sha1_alg);
1111	kthread_stop(cp->queue_th);
1112	free_irq(cp->irq, cp);
1113	memset(cp->sram, 0, cp->sram_size);
1114	iounmap(cp->sram);
1115	iounmap(cp->reg);
1116	kfree(cp);
1117	cpg = NULL;
1118	return 0;
1119}
1120
1121static struct platform_driver marvell_crypto = {
1122	.probe		= mv_probe,
1123	.remove		= mv_remove,
1124	.driver		= {
1125		.owner	= THIS_MODULE,
1126		.name	= "mv_crypto",
1127	},
1128};
1129MODULE_ALIAS("platform:mv_crypto");
1130
1131static int __init mv_crypto_init(void)
1132{
1133	return platform_driver_register(&marvell_crypto);
1134}
1135module_init(mv_crypto_init);
1136
1137static void __exit mv_crypto_exit(void)
1138{
1139	platform_driver_unregister(&marvell_crypto);
1140}
1141module_exit(mv_crypto_exit);
1142
1143MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
1144MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
1145MODULE_LICENSE("GPL");
1146