1/*
2 * An OCF module that uses Intels IXP CryptACC API to do the crypto.
3 * This driver requires the IXP400 Access Library that is available
4 * from Intel in order to operate (or compile).
5 *
6 * Written by David McCullough <david_mccullough@mcafee.com>
7 * Copyright (C) 2006-2011 David McCullough
8 * Copyright (C) 2004-2005 Intel Corporation.
9 *
10 * LICENSE TERMS
11 *
12 * The free distribution and use of this software in both source and binary
13 * form is allowed (with or without changes) provided that:
14 *
15 *   1. distributions of this source code include the above copyright
16 *      notice, this list of conditions and the following disclaimer;
17 *
18 *   2. distributions in binary form include the above copyright
19 *      notice, this list of conditions and the following disclaimer
20 *      in the documentation and/or other associated materials;
21 *
22 *   3. the copyright holder's name is not used to endorse products
23 *      built using this software without specific written permission.
24 *
25 * ALTERNATIVELY, provided that this notice is retained in full, this product
26 * may be distributed under the terms of the GNU General Public License (GPL),
27 * in which case the provisions of the GPL apply INSTEAD OF those given above.
28 *
29 * DISCLAIMER
30 *
31 * This software is provided 'as is' with no explicit or implied warranties
32 * in respect of its properties, including, but not limited to, correctness
33 * and/or fitness for purpose.
34 */
35
36#include <linux/version.h>
37#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
38#include <linux/config.h>
39#endif
40#include <linux/module.h>
41#include <linux/init.h>
42#include <linux/list.h>
43#include <linux/slab.h>
44#include <linux/sched.h>
45#include <linux/wait.h>
46#include <linux/crypto.h>
47#include <linux/interrupt.h>
48#include <asm/scatterlist.h>
49
50#include <IxTypes.h>
51#include <IxOsBuffMgt.h>
52#include <IxNpeDl.h>
53#include <IxCryptoAcc.h>
54#include <IxQMgr.h>
55#include <IxOsServices.h>
56#include <IxOsCacheMMU.h>
57
58#include <cryptodev.h>
59#include <uio.h>
60
61#ifndef IX_MBUF_PRIV
62#define IX_MBUF_PRIV(x) ((x)->priv)
63#endif
64
65struct ixp_data;
66
67struct ixp_q {
68	struct list_head	 ixp_q_list;
69	struct ixp_data		*ixp_q_data;
70	struct cryptop		*ixp_q_crp;
71	struct cryptodesc	*ixp_q_ccrd;
72	struct cryptodesc	*ixp_q_acrd;
73	IX_MBUF				 ixp_q_mbuf;
74	UINT8				*ixp_hash_dest; /* Location for hash in client buffer */
75	UINT8				*ixp_hash_src; /* Location of hash in internal buffer */
76	unsigned char		 ixp_q_iv_data[IX_CRYPTO_ACC_MAX_CIPHER_IV_LENGTH];
77	unsigned char		*ixp_q_iv;
78};
79
80struct ixp_data {
81	int					 ixp_registered;	/* is the context registered */
82	int					 ixp_crd_flags;		/* detect direction changes */
83
84	int					 ixp_cipher_alg;
85	int					 ixp_auth_alg;
86
87	UINT32				 ixp_ctx_id;
88	UINT32				 ixp_hash_key_id;	/* used when hashing */
89	IxCryptoAccCtx		 ixp_ctx;
90	IX_MBUF				 ixp_pri_mbuf;
91	IX_MBUF				 ixp_sec_mbuf;
92
93	struct work_struct   ixp_pending_work;
94	struct work_struct   ixp_registration_work;
95	struct list_head	 ixp_q;				/* unprocessed requests */
96};
97
98#ifdef __ixp46X
99
100#define	MAX_IOP_SIZE	64	/* words */
101#define	MAX_OOP_SIZE	128
102
103#define	MAX_PARAMS		3
104
105struct ixp_pkq {
106	struct list_head			 pkq_list;
107	struct cryptkop				*pkq_krp;
108
109	IxCryptoAccPkeEauInOperands	 pkq_op;
110	IxCryptoAccPkeEauOpResult	 pkq_result;
111
112	UINT32						 pkq_ibuf0[MAX_IOP_SIZE];
113	UINT32						 pkq_ibuf1[MAX_IOP_SIZE];
114	UINT32						 pkq_ibuf2[MAX_IOP_SIZE];
115	UINT32						 pkq_obuf[MAX_OOP_SIZE];
116};
117
118static LIST_HEAD(ixp_pkq); /* current PK wait list */
119static struct ixp_pkq *ixp_pk_cur;
120static spinlock_t ixp_pkq_lock;
121
122#endif /* __ixp46X */
123
124static int ixp_blocked = 0;
125
126static int32_t			 ixp_id = -1;
127static struct ixp_data **ixp_sessions = NULL;
128static u_int32_t		 ixp_sesnum = 0;
129
130static int ixp_process(device_t, struct cryptop *, int);
131static int ixp_newsession(device_t, u_int32_t *, struct cryptoini *);
132static int ixp_freesession(device_t, u_int64_t);
133#ifdef __ixp46X
134static int ixp_kprocess(device_t, struct cryptkop *krp, int hint);
135#endif
136
137#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
138static kmem_cache_t *qcache;
139#else
140static struct kmem_cache *qcache;
141#endif
142
143#define debug ixp_debug
144static int ixp_debug = 0;
145module_param(ixp_debug, int, 0644);
146MODULE_PARM_DESC(ixp_debug, "Enable debug");
147
148static int ixp_init_crypto = 1;
149module_param(ixp_init_crypto, int, 0444); /* RO after load/boot */
150MODULE_PARM_DESC(ixp_init_crypto, "Call ixCryptoAccInit (default is 1)");
151
152static void ixp_process_pending(void *arg);
153static void ixp_registration(void *arg);
154#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
155static void ixp_process_pending_wq(struct work_struct *work);
156static void ixp_registration_wq(struct work_struct *work);
157#endif
158
159/*
160 * dummy device structure
161 */
162
163static struct {
164	softc_device_decl	sc_dev;
165} ixpdev;
166
167static device_method_t ixp_methods = {
168	/* crypto device methods */
169	DEVMETHOD(cryptodev_newsession,	ixp_newsession),
170	DEVMETHOD(cryptodev_freesession,ixp_freesession),
171	DEVMETHOD(cryptodev_process,	ixp_process),
172#ifdef __ixp46X
173	DEVMETHOD(cryptodev_kprocess,	ixp_kprocess),
174#endif
175};
176
177/*
178 * Generate a new software session.
179 */
180static int
181ixp_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
182{
183	struct ixp_data *ixp;
184	u_int32_t i;
185#define AUTH_LEN(cri, def) \
186	(cri->cri_mlen ? cri->cri_mlen : (def))
187
188	dprintk("%s():alg %d\n", __FUNCTION__,cri->cri_alg);
189	if (sid == NULL || cri == NULL) {
190		dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
191		return EINVAL;
192	}
193
194	if (ixp_sessions) {
195		for (i = 1; i < ixp_sesnum; i++)
196			if (ixp_sessions[i] == NULL)
197				break;
198	} else
199		i = 1;		/* NB: to silence compiler warning */
200
201	if (ixp_sessions == NULL || i == ixp_sesnum) {
202		struct ixp_data **ixpd;
203
204		if (ixp_sessions == NULL) {
205			i = 1; /* We leave ixp_sessions[0] empty */
206			ixp_sesnum = CRYPTO_SW_SESSIONS;
207		} else
208			ixp_sesnum *= 2;
209
210		ixpd = kmalloc(ixp_sesnum * sizeof(struct ixp_data *), SLAB_ATOMIC);
211		if (ixpd == NULL) {
212			/* Reset session number */
213			if (ixp_sesnum == CRYPTO_SW_SESSIONS)
214				ixp_sesnum = 0;
215			else
216				ixp_sesnum /= 2;
217			dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
218			return ENOBUFS;
219		}
220		memset(ixpd, 0, ixp_sesnum * sizeof(struct ixp_data *));
221
222		/* Copy existing sessions */
223		if (ixp_sessions) {
224			memcpy(ixpd, ixp_sessions,
225			    (ixp_sesnum / 2) * sizeof(struct ixp_data *));
226			kfree(ixp_sessions);
227		}
228
229		ixp_sessions = ixpd;
230	}
231
232	ixp_sessions[i] = (struct ixp_data *) kmalloc(sizeof(struct ixp_data),
233			SLAB_ATOMIC);
234	if (ixp_sessions[i] == NULL) {
235		ixp_freesession(NULL, i);
236		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
237		return ENOBUFS;
238	}
239
240	*sid = i;
241
242	ixp = ixp_sessions[i];
243	memset(ixp, 0, sizeof(*ixp));
244
245	ixp->ixp_cipher_alg = -1;
246	ixp->ixp_auth_alg = -1;
247	ixp->ixp_ctx_id = -1;
248	INIT_LIST_HEAD(&ixp->ixp_q);
249
250	ixp->ixp_ctx.useDifferentSrcAndDestMbufs = 0;
251
252	while (cri) {
253		switch (cri->cri_alg) {
254		case CRYPTO_DES_CBC:
255			ixp->ixp_cipher_alg = cri->cri_alg;
256			ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_DES;
257			ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
258			ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
259			ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
260			ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
261						IX_CRYPTO_ACC_DES_IV_64;
262			memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
263					cri->cri_key, (cri->cri_klen + 7) / 8);
264			break;
265
266		case CRYPTO_3DES_CBC:
267			ixp->ixp_cipher_alg = cri->cri_alg;
268			ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
269			ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
270			ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
271			ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
272			ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
273						IX_CRYPTO_ACC_DES_IV_64;
274			memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
275					cri->cri_key, (cri->cri_klen + 7) / 8);
276			break;
277
278		case CRYPTO_RIJNDAEL128_CBC:
279			ixp->ixp_cipher_alg = cri->cri_alg;
280			ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_AES;
281			ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
282			ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
283			ixp->ixp_ctx.cipherCtx.cipherBlockLen = 16;
284			ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen = 16;
285			memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
286					cri->cri_key, (cri->cri_klen + 7) / 8);
287			break;
288
289		case CRYPTO_MD5:
290		case CRYPTO_MD5_HMAC:
291			ixp->ixp_auth_alg = cri->cri_alg;
292			ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_MD5;
293			ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, MD5_HASH_LEN);
294			ixp->ixp_ctx.authCtx.aadLen = 0;
295			/* Only MD5_HMAC needs a key */
296			if (cri->cri_alg == CRYPTO_MD5_HMAC) {
297				ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
298				if (ixp->ixp_ctx.authCtx.authKeyLen >
299						sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
300					printk(
301						"ixp4xx: Invalid key length for MD5_HMAC - %d bits\n",
302							cri->cri_klen);
303					ixp_freesession(NULL, i);
304					return EINVAL;
305				}
306				memcpy(ixp->ixp_ctx.authCtx.key.authKey,
307						cri->cri_key, (cri->cri_klen + 7) / 8);
308			}
309			break;
310
311		case CRYPTO_SHA1:
312		case CRYPTO_SHA1_HMAC:
313			ixp->ixp_auth_alg = cri->cri_alg;
314			ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
315			ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, SHA1_HASH_LEN);
316			ixp->ixp_ctx.authCtx.aadLen = 0;
317			/* Only SHA1_HMAC needs a key */
318			if (cri->cri_alg == CRYPTO_SHA1_HMAC) {
319				ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
320				if (ixp->ixp_ctx.authCtx.authKeyLen >
321						sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
322					printk(
323						"ixp4xx: Invalid key length for SHA1_HMAC - %d bits\n",
324							cri->cri_klen);
325					ixp_freesession(NULL, i);
326					return EINVAL;
327				}
328				memcpy(ixp->ixp_ctx.authCtx.key.authKey,
329						cri->cri_key, (cri->cri_klen + 7) / 8);
330			}
331			break;
332
333		default:
334			printk("ixp: unknown algo 0x%x\n", cri->cri_alg);
335			ixp_freesession(NULL, i);
336			return EINVAL;
337		}
338		cri = cri->cri_next;
339	}
340
341#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
342	INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending_wq);
343	INIT_WORK(&ixp->ixp_registration_work, ixp_registration_wq);
344#else
345	INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending, ixp);
346	INIT_WORK(&ixp->ixp_registration_work, ixp_registration, ixp);
347#endif
348
349	return 0;
350}
351
352
353/*
354 * Free a session.
355 */
356static int
357ixp_freesession(device_t dev, u_int64_t tid)
358{
359	u_int32_t sid = CRYPTO_SESID2LID(tid);
360
361	dprintk("%s()\n", __FUNCTION__);
362	if (sid > ixp_sesnum || ixp_sessions == NULL ||
363			ixp_sessions[sid] == NULL) {
364		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
365		return EINVAL;
366	}
367
368	/* Silently accept and return */
369	if (sid == 0)
370		return 0;
371
372	if (ixp_sessions[sid]) {
373		if (ixp_sessions[sid]->ixp_ctx_id != -1) {
374			ixCryptoAccCtxUnregister(ixp_sessions[sid]->ixp_ctx_id);
375			ixp_sessions[sid]->ixp_ctx_id = -1;
376		}
377		kfree(ixp_sessions[sid]);
378	}
379	ixp_sessions[sid] = NULL;
380	if (ixp_blocked) {
381		ixp_blocked = 0;
382		crypto_unblock(ixp_id, CRYPTO_SYMQ);
383	}
384	return 0;
385}
386
387
388/*
389 * callback for when hash processing is complete
390 */
391
392static void
393ixp_hash_perform_cb(
394	UINT32 hash_key_id,
395	IX_MBUF *bufp,
396	IxCryptoAccStatus status)
397{
398	struct ixp_q *q;
399
400	dprintk("%s(%u, %p, 0x%x)\n", __FUNCTION__, hash_key_id, bufp, status);
401
402	if (bufp == NULL) {
403		printk("ixp: NULL buf in %s\n", __FUNCTION__);
404		return;
405	}
406
407	q = IX_MBUF_PRIV(bufp);
408	if (q == NULL) {
409		printk("ixp: NULL priv in %s\n", __FUNCTION__);
410		return;
411	}
412
413	if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
414		/* On success, need to copy hash back into original client buffer */
415		memcpy(q->ixp_hash_dest, q->ixp_hash_src,
416				(q->ixp_q_data->ixp_auth_alg == CRYPTO_SHA1) ?
417					SHA1_HASH_LEN : MD5_HASH_LEN);
418	}
419	else {
420		printk("ixp: hash perform failed status=%d\n", status);
421		q->ixp_q_crp->crp_etype = EINVAL;
422	}
423
424	/* Free internal buffer used for hashing */
425	kfree(IX_MBUF_MDATA(&q->ixp_q_mbuf));
426
427	crypto_done(q->ixp_q_crp);
428	kmem_cache_free(qcache, q);
429}
430
431/*
432 * setup a request and perform it
433 */
434static void
435ixp_q_process(struct ixp_q *q)
436{
437	IxCryptoAccStatus status;
438	struct ixp_data *ixp = q->ixp_q_data;
439	int auth_off = 0;
440	int auth_len = 0;
441	int crypt_off = 0;
442	int crypt_len = 0;
443	int icv_off = 0;
444	char *crypt_func;
445
446	dprintk("%s(%p)\n", __FUNCTION__, q);
447
448	if (q->ixp_q_ccrd) {
449		if (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT) {
450			if (q->ixp_q_ccrd->crd_flags & CRD_F_IV_EXPLICIT) {
451				q->ixp_q_iv = q->ixp_q_ccrd->crd_iv;
452			} else {
453				q->ixp_q_iv = q->ixp_q_iv_data;
454				read_random(q->ixp_q_iv, ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen);
455			}
456			if ((q->ixp_q_ccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
457				crypto_copyback(q->ixp_q_crp->crp_flags, q->ixp_q_crp->crp_buf,
458						q->ixp_q_ccrd->crd_inject,
459						ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen,
460						(caddr_t) q->ixp_q_iv);
461		} else {
462			if (q->ixp_q_ccrd->crd_flags & CRD_F_IV_EXPLICIT)
463				q->ixp_q_iv = q->ixp_q_ccrd->crd_iv;
464			else {
465				q->ixp_q_iv = q->ixp_q_iv_data;
466				crypto_copydata(q->ixp_q_crp->crp_flags, q->ixp_q_crp->crp_buf,
467						q->ixp_q_ccrd->crd_inject,
468						ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen,
469						(caddr_t) q->ixp_q_iv);
470			}
471		}
472
473		if (q->ixp_q_acrd) {
474			auth_off = q->ixp_q_acrd->crd_skip;
475			auth_len = q->ixp_q_acrd->crd_len;
476			icv_off  = q->ixp_q_acrd->crd_inject;
477		}
478
479		crypt_off = q->ixp_q_ccrd->crd_skip;
480		crypt_len = q->ixp_q_ccrd->crd_len;
481	} else { /* if (q->ixp_q_acrd) */
482		auth_off = q->ixp_q_acrd->crd_skip;
483		auth_len = q->ixp_q_acrd->crd_len;
484		icv_off  = q->ixp_q_acrd->crd_inject;
485	}
486
487	if (q->ixp_q_crp->crp_flags & CRYPTO_F_SKBUF) {
488		struct sk_buff *skb = (struct sk_buff *) q->ixp_q_crp->crp_buf;
489		if (skb_shinfo(skb)->nr_frags) {
490			/*
491			 * DAVIDM fix this limitation one day by using
492			 * a buffer pool and chaining,  it is not currently
493			 * needed for current user/kernel space acceleration
494			 */
495			printk("ixp: Cannot handle fragmented skb's yet !\n");
496			q->ixp_q_crp->crp_etype = ENOENT;
497			goto done;
498		}
499		IX_MBUF_MLEN(&q->ixp_q_mbuf) =
500				IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) =  skb->len;
501		IX_MBUF_MDATA(&q->ixp_q_mbuf) = skb->data;
502	} else if (q->ixp_q_crp->crp_flags & CRYPTO_F_IOV) {
503		struct uio *uiop = (struct uio *) q->ixp_q_crp->crp_buf;
504		if (uiop->uio_iovcnt != 1) {
505			/*
506			 * DAVIDM fix this limitation one day by using
507			 * a buffer pool and chaining,  it is not currently
508			 * needed for current user/kernel space acceleration
509			 */
510			printk("ixp: Cannot handle more than 1 iovec yet !\n");
511			q->ixp_q_crp->crp_etype = ENOENT;
512			goto done;
513		}
514		IX_MBUF_MLEN(&q->ixp_q_mbuf) =
515				IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_len;
516		IX_MBUF_MDATA(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_base;
517	} else /* contig buffer */ {
518		IX_MBUF_MLEN(&q->ixp_q_mbuf)  =
519				IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_ilen;
520		IX_MBUF_MDATA(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_buf;
521	}
522
523	IX_MBUF_PRIV(&q->ixp_q_mbuf) = q;
524
525	if (ixp->ixp_auth_alg == CRYPTO_SHA1 || ixp->ixp_auth_alg == CRYPTO_MD5) {
526		/*
527		 * For SHA1 and MD5 hash, need to create an internal buffer that is big
528		 * enough to hold the original data + the appropriate padding for the
529		 * hash algorithm.
530		 */
531		UINT8 *tbuf = NULL;
532
533		IX_MBUF_MLEN(&q->ixp_q_mbuf) = IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) =
534			((IX_MBUF_MLEN(&q->ixp_q_mbuf) * 8) + 72 + 511) / 8;
535		tbuf = kmalloc(IX_MBUF_MLEN(&q->ixp_q_mbuf), SLAB_ATOMIC);
536
537		if (IX_MBUF_MDATA(&q->ixp_q_mbuf) == NULL) {
538			printk("ixp: kmalloc(%u, SLAB_ATOMIC) failed\n",
539					IX_MBUF_MLEN(&q->ixp_q_mbuf));
540			q->ixp_q_crp->crp_etype = ENOMEM;
541			goto done;
542		}
543		memcpy(tbuf, &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off], auth_len);
544
545		/* Set location in client buffer to copy hash into */
546		q->ixp_hash_dest =
547			&(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off + auth_len];
548
549		IX_MBUF_MDATA(&q->ixp_q_mbuf) = tbuf;
550
551		/* Set location in internal buffer for where hash starts */
552		q->ixp_hash_src = &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_len];
553
554		crypt_func = "ixCryptoAccHashPerform";
555		status = ixCryptoAccHashPerform(ixp->ixp_ctx.authCtx.authAlgo,
556				&q->ixp_q_mbuf, ixp_hash_perform_cb, 0, auth_len, auth_len,
557				&ixp->ixp_hash_key_id);
558	}
559	else {
560		crypt_func = "ixCryptoAccAuthCryptPerform";
561		status = ixCryptoAccAuthCryptPerform(ixp->ixp_ctx_id, &q->ixp_q_mbuf,
562			NULL, auth_off, auth_len, crypt_off, crypt_len, icv_off,
563			q->ixp_q_iv);
564	}
565
566	if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
567		return;
568
569	if (IX_CRYPTO_ACC_STATUS_QUEUE_FULL == status) {
570		q->ixp_q_crp->crp_etype = ENOMEM;
571		goto done;
572	}
573
574	printk("ixp: %s failed %u\n", crypt_func, status);
575	q->ixp_q_crp->crp_etype = EINVAL;
576
577done:
578	crypto_done(q->ixp_q_crp);
579	kmem_cache_free(qcache, q);
580}
581
582
583/*
584 * because we cannot process the Q from the Register callback
585 * we do it here on a task Q.
586 */
587
588static void
589ixp_process_pending(void *arg)
590{
591	struct ixp_data *ixp = arg;
592	struct ixp_q *q = NULL;
593
594	dprintk("%s(%p)\n", __FUNCTION__, arg);
595
596	if (!ixp)
597		return;
598
599	while (!list_empty(&ixp->ixp_q)) {
600		q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
601		list_del(&q->ixp_q_list);
602		ixp_q_process(q);
603	}
604}
605
606#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
607static void
608ixp_process_pending_wq(struct work_struct *work)
609{
610	struct ixp_data *ixp = container_of(work, struct ixp_data, ixp_pending_work);
611	ixp_process_pending(ixp);
612}
613#endif
614
615/*
616 * callback for when context registration is complete
617 */
618
619static void
620ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
621{
622	int i;
623	struct ixp_data *ixp;
624	struct ixp_q *q;
625
626	dprintk("%s(%d, %p, %d)\n", __FUNCTION__, ctx_id, bufp, status);
627
628	/*
629	 * free any buffer passed in to this routine
630	 */
631	if (bufp) {
632		IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
633		kfree(IX_MBUF_MDATA(bufp));
634		IX_MBUF_MDATA(bufp) = NULL;
635	}
636
637	for (i = 0; i < ixp_sesnum; i++) {
638		ixp = ixp_sessions[i];
639		if (ixp && ixp->ixp_ctx_id == ctx_id)
640			break;
641	}
642	if (i >= ixp_sesnum) {
643		printk("ixp: invalid context id %d\n", ctx_id);
644		return;
645	}
646
647	if (IX_CRYPTO_ACC_STATUS_WAIT == status) {
648		/* this is normal to free the first of two buffers */
649		dprintk("ixp: register not finished yet.\n");
650		return;
651	}
652
653	if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
654		printk("ixp: register failed 0x%x\n", status);
655		while (!list_empty(&ixp->ixp_q)) {
656			q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
657			list_del(&q->ixp_q_list);
658			q->ixp_q_crp->crp_etype = EINVAL;
659			crypto_done(q->ixp_q_crp);
660			kmem_cache_free(qcache, q);
661		}
662		return;
663	}
664
665	/*
666	 * we are now registered,  we cannot start processing the Q here
667	 * or we get strange errors with AES (DES/3DES seem to be ok).
668	 */
669	ixp->ixp_registered = 1;
670	schedule_work(&ixp->ixp_pending_work);
671}
672
673
674/*
675 * callback for when data processing is complete
676 */
677
678static void
679ixp_perform_cb(
680	UINT32 ctx_id,
681	IX_MBUF *sbufp,
682	IX_MBUF *dbufp,
683	IxCryptoAccStatus status)
684{
685	struct ixp_q *q;
686
687	dprintk("%s(%d, %p, %p, 0x%x)\n", __FUNCTION__, ctx_id, sbufp,
688			dbufp, status);
689
690	if (sbufp == NULL) {
691		printk("ixp: NULL sbuf in ixp_perform_cb\n");
692		return;
693	}
694
695	q = IX_MBUF_PRIV(sbufp);
696	if (q == NULL) {
697		printk("ixp: NULL priv in ixp_perform_cb\n");
698		return;
699	}
700
701	if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
702		printk("ixp: perform failed status=%d\n", status);
703		q->ixp_q_crp->crp_etype = EINVAL;
704	}
705
706	crypto_done(q->ixp_q_crp);
707	kmem_cache_free(qcache, q);
708}
709
710
711/*
712 * registration is not callable at IRQ time,  so we defer
713 * to a task queue,  this routines completes the registration for us
714 * when the task queue runs
715 *
716 * Unfortunately this means we cannot tell OCF that the driver is blocked,
717 * we do that on the next request.
718 */
719
720static void
721ixp_registration(void *arg)
722{
723	struct ixp_data *ixp = arg;
724	struct ixp_q *q = NULL;
725	IX_MBUF *pri = NULL, *sec = NULL;
726	int status = IX_CRYPTO_ACC_STATUS_SUCCESS;
727
728	if (!ixp) {
729		printk("ixp: ixp_registration with no arg\n");
730		return;
731	}
732
733	if (ixp->ixp_ctx_id != -1) {
734		ixCryptoAccCtxUnregister(ixp->ixp_ctx_id);
735		ixp->ixp_ctx_id = -1;
736	}
737
738	if (list_empty(&ixp->ixp_q)) {
739		printk("ixp: ixp_registration with no Q\n");
740		return;
741	}
742
743	/*
744	 * setup the primary and secondary buffers
745	 */
746	q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
747	if (q->ixp_q_acrd) {
748		pri = &ixp->ixp_pri_mbuf;
749		sec = &ixp->ixp_sec_mbuf;
750		IX_MBUF_MLEN(pri)  = IX_MBUF_PKT_LEN(pri) = 128;
751		IX_MBUF_MDATA(pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
752		IX_MBUF_MLEN(sec)  = IX_MBUF_PKT_LEN(sec) = 128;
753		IX_MBUF_MDATA(sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
754	}
755
756	/* Only need to register if a crypt op or HMAC op */
757	if (!(ixp->ixp_auth_alg == CRYPTO_SHA1 ||
758				ixp->ixp_auth_alg == CRYPTO_MD5)) {
759		status = ixCryptoAccCtxRegister(
760					&ixp->ixp_ctx,
761					pri, sec,
762					ixp_register_cb,
763					ixp_perform_cb,
764					&ixp->ixp_ctx_id);
765	}
766	else {
767		/* Otherwise we start processing pending q */
768		schedule_work(&ixp->ixp_pending_work);
769	}
770
771	if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
772		return;
773
774	if (IX_CRYPTO_ACC_STATUS_EXCEED_MAX_TUNNELS == status) {
775		printk("ixp: ixCryptoAccCtxRegister failed (out of tunnels)\n");
776		ixp_blocked = 1;
777		/* perhaps we should return EGAIN on queued ops ? */
778		return;
779	}
780
781	printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
782	ixp->ixp_ctx_id = -1;
783
784	/*
785	 * everything waiting is toasted
786	 */
787	while (!list_empty(&ixp->ixp_q)) {
788		q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
789		list_del(&q->ixp_q_list);
790		q->ixp_q_crp->crp_etype = ENOENT;
791		crypto_done(q->ixp_q_crp);
792		kmem_cache_free(qcache, q);
793	}
794}
795
796#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
797static void
798ixp_registration_wq(struct work_struct *work)
799{
800	struct ixp_data *ixp = container_of(work, struct ixp_data,
801								ixp_registration_work);
802	ixp_registration(ixp);
803}
804#endif
805
806/*
807 * Process a request.
808 */
809static int
810ixp_process(device_t dev, struct cryptop *crp, int hint)
811{
812	struct ixp_data *ixp;
813	unsigned int lid;
814	struct ixp_q *q = NULL;
815	int status;
816
817	dprintk("%s()\n", __FUNCTION__);
818
819	/* Sanity check */
820	if (crp == NULL) {
821		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
822		return EINVAL;
823	}
824
825	crp->crp_etype = 0;
826
827	if (ixp_blocked)
828		return ERESTART;
829
830	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
831		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
832		crp->crp_etype = EINVAL;
833		goto done;
834	}
835
836	/*
837	 * find the session we are using
838	 */
839
840	lid = crp->crp_sid & 0xffffffff;
841	if (lid >= ixp_sesnum || lid == 0 || ixp_sessions == NULL ||
842			ixp_sessions[lid] == NULL) {
843		crp->crp_etype = ENOENT;
844		dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
845		goto done;
846	}
847	ixp = ixp_sessions[lid];
848
849	/*
850	 * setup a new request ready for queuing
851	 */
852	q = kmem_cache_alloc(qcache, SLAB_ATOMIC);
853	if (q == NULL) {
854		dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
855		crp->crp_etype = ENOMEM;
856		goto done;
857	}
858	/*
859	 * save some cycles by only zeroing the important bits
860	 */
861	memset(&q->ixp_q_mbuf, 0, sizeof(q->ixp_q_mbuf));
862	q->ixp_q_ccrd = NULL;
863	q->ixp_q_acrd = NULL;
864	q->ixp_q_crp = crp;
865	q->ixp_q_data = ixp;
866
867	/*
868	 * point the cipher and auth descriptors appropriately
869	 * check that we have something to do
870	 */
871	if (crp->crp_desc->crd_alg == ixp->ixp_cipher_alg)
872		q->ixp_q_ccrd = crp->crp_desc;
873	else if (crp->crp_desc->crd_alg == ixp->ixp_auth_alg)
874		q->ixp_q_acrd = crp->crp_desc;
875	else {
876		crp->crp_etype = ENOENT;
877		dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
878		goto done;
879	}
880	if (crp->crp_desc->crd_next) {
881		if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_cipher_alg)
882			q->ixp_q_ccrd = crp->crp_desc->crd_next;
883		else if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_auth_alg)
884			q->ixp_q_acrd = crp->crp_desc->crd_next;
885		else {
886			crp->crp_etype = ENOENT;
887			dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
888			goto done;
889		}
890	}
891
892	/*
893	 * If there is a direction change for this context then we mark it as
894	 * unregistered and re-register is for the new direction.  This is not
895	 * a very expensive operation and currently only tends to happen when
896	 * user-space application are doing benchmarks
897	 *
898	 * DM - we should be checking for pending requests before unregistering.
899	 */
900	if (q->ixp_q_ccrd && ixp->ixp_registered &&
901			ixp->ixp_crd_flags != (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT)) {
902		dprintk("%s - detected direction change on session\n", __FUNCTION__);
903		ixp->ixp_registered = 0;
904	}
905
906	/*
907	 * if we are registered,  call straight into the perform code
908	 */
909	if (ixp->ixp_registered) {
910		ixp_q_process(q);
911		return 0;
912	}
913
914	/*
915	 * the only part of the context not set in newsession is the direction
916	 * dependent parts
917	 */
918	if (q->ixp_q_ccrd) {
919		ixp->ixp_crd_flags = (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT);
920		if (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT) {
921			ixp->ixp_ctx.operation = q->ixp_q_acrd ?
922					IX_CRYPTO_ACC_OP_ENCRYPT_AUTH : IX_CRYPTO_ACC_OP_ENCRYPT;
923		} else {
924			ixp->ixp_ctx.operation = q->ixp_q_acrd ?
925					IX_CRYPTO_ACC_OP_AUTH_DECRYPT : IX_CRYPTO_ACC_OP_DECRYPT;
926		}
927	} else {
928		/* q->ixp_q_acrd must be set if we are here */
929		ixp->ixp_ctx.operation = IX_CRYPTO_ACC_OP_AUTH_CALC;
930	}
931
932	status = list_empty(&ixp->ixp_q);
933	list_add_tail(&q->ixp_q_list, &ixp->ixp_q);
934	if (status)
935		schedule_work(&ixp->ixp_registration_work);
936	return 0;
937
938done:
939	if (q)
940		kmem_cache_free(qcache, q);
941	crypto_done(crp);
942	return 0;
943}
944
945
946#ifdef __ixp46X
947/*
948 * key processing support for the ixp465
949 */
950
951
952/*
953 * copy a BN (LE) into a buffer (BE) an fill out the op appropriately
954 * assume zeroed and only copy bits that are significant
955 */
956
957static int
958ixp_copy_ibuf(struct crparam *p, IxCryptoAccPkeEauOperand *op, UINT32 *buf)
959{
960	unsigned char *src = (unsigned char *) p->crp_p;
961	unsigned char *dst;
962	int len, bits = p->crp_nbits;
963
964	dprintk("%s()\n", __FUNCTION__);
965
966	if (bits > MAX_IOP_SIZE * sizeof(UINT32) * 8) {
967		dprintk("%s - ibuf too big (%d > %d)\n", __FUNCTION__,
968				bits, MAX_IOP_SIZE * sizeof(UINT32) * 8);
969		return -1;
970	}
971
972	len = (bits + 31) / 32; /* the number UINT32's needed */
973
974	dst = (unsigned char *) &buf[len];
975	dst--;
976
977	while (bits > 0) {
978		*dst-- = *src++;
979		bits -= 8;
980	}
981
982#if 0 /* no need to zero remaining bits as it is done during request alloc */
983	while (dst > (unsigned char *) buf)
984		*dst-- = '\0';
985#endif
986
987	op->pData = buf;
988	op->dataLen = len;
989	return 0;
990}
991
992/*
993 * copy out the result,  be as forgiving as we can about small output buffers
994 */
995
996static int
997ixp_copy_obuf(struct crparam *p, IxCryptoAccPkeEauOpResult *op, UINT32 *buf)
998{
999	unsigned char *dst = (unsigned char *) p->crp_p;
1000	unsigned char *src = (unsigned char *) buf;
1001	int len, z, bits = p->crp_nbits;
1002
1003	dprintk("%s()\n", __FUNCTION__);
1004
1005	len = op->dataLen * sizeof(UINT32);
1006
1007	/* skip leading zeroes to be small buffer friendly */
1008	z = 0;
1009	while (z < len && src[z] == '\0')
1010		z++;
1011
1012	src += len;
1013	src--;
1014	len -= z;
1015
1016	while (len > 0 && bits > 0) {
1017		*dst++ = *src--;
1018		len--;
1019		bits -= 8;
1020	}
1021
1022	while (bits > 0) {
1023		*dst++ = '\0';
1024		bits -= 8;
1025	}
1026
1027	if (len > 0) {
1028		dprintk("%s - obuf is %d (z=%d, ob=%d) bytes too small\n",
1029				__FUNCTION__, len, z, p->crp_nbits / 8);
1030		return -1;
1031	}
1032
1033	return 0;
1034}
1035
1036
1037/*
1038 * the parameter offsets for exp_mod
1039 */
1040
1041#define IXP_PARAM_BASE 0
1042#define IXP_PARAM_EXP  1
1043#define IXP_PARAM_MOD  2
1044#define IXP_PARAM_RES  3
1045
1046/*
1047 * key processing complete callback,  is also used to start processing
1048 * by passing a NULL for pResult
1049 */
1050
1051static void
1052ixp_kperform_cb(
1053	IxCryptoAccPkeEauOperation operation,
1054	IxCryptoAccPkeEauOpResult *pResult,
1055	BOOL carryOrBorrow,
1056	IxCryptoAccStatus status)
1057{
1058	struct ixp_pkq *q, *tmp;
1059	unsigned long flags;
1060
1061	dprintk("%s(0x%x, %p, %d, 0x%x)\n", __FUNCTION__, operation, pResult,
1062			carryOrBorrow, status);
1063
1064	/* handle a completed request */
1065	if (pResult) {
1066		if (ixp_pk_cur && &ixp_pk_cur->pkq_result == pResult) {
1067			q = ixp_pk_cur;
1068			if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
1069				dprintk("%s() - op failed 0x%x\n", __FUNCTION__, status);
1070				q->pkq_krp->krp_status = ERANGE; /* could do better */
1071			} else {
1072				/* copy out the result */
1073				if (ixp_copy_obuf(&q->pkq_krp->krp_param[IXP_PARAM_RES],
1074						&q->pkq_result, q->pkq_obuf))
1075					q->pkq_krp->krp_status = ERANGE;
1076			}
1077			crypto_kdone(q->pkq_krp);
1078			kfree(q);
1079			ixp_pk_cur = NULL;
1080		} else
1081			printk("%s - callback with invalid result pointer\n", __FUNCTION__);
1082	}
1083
1084	spin_lock_irqsave(&ixp_pkq_lock, flags);
1085	if (ixp_pk_cur || list_empty(&ixp_pkq)) {
1086		spin_unlock_irqrestore(&ixp_pkq_lock, flags);
1087		return;
1088	}
1089
1090	list_for_each_entry_safe(q, tmp, &ixp_pkq, pkq_list) {
1091
1092		list_del(&q->pkq_list);
1093		ixp_pk_cur = q;
1094
1095		spin_unlock_irqrestore(&ixp_pkq_lock, flags);
1096
1097		status = ixCryptoAccPkeEauPerform(
1098				IX_CRYPTO_ACC_OP_EAU_MOD_EXP,
1099				&q->pkq_op,
1100				ixp_kperform_cb,
1101				&q->pkq_result);
1102
1103		if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
1104			dprintk("%s() - ixCryptoAccPkeEauPerform SUCCESS\n", __FUNCTION__);
1105			return; /* callback will return here for callback */
1106		} else if (status == IX_CRYPTO_ACC_STATUS_RETRY) {
1107			printk("%s() - ixCryptoAccPkeEauPerform RETRY\n", __FUNCTION__);
1108		} else {
1109			printk("%s() - ixCryptoAccPkeEauPerform failed %d\n",
1110					__FUNCTION__, status);
1111		}
1112		q->pkq_krp->krp_status = ERANGE; /* could do better */
1113		crypto_kdone(q->pkq_krp);
1114		kfree(q);
1115		spin_lock_irqsave(&ixp_pkq_lock, flags);
1116	}
1117	spin_unlock_irqrestore(&ixp_pkq_lock, flags);
1118}
1119
1120
1121static int
1122ixp_kprocess(device_t dev, struct cryptkop *krp, int hint)
1123{
1124	struct ixp_pkq *q;
1125	int rc = 0;
1126	unsigned long flags;
1127
1128	dprintk("%s l1=%d l2=%d l3=%d l4=%d\n", __FUNCTION__,
1129			krp->krp_param[IXP_PARAM_BASE].crp_nbits,
1130			krp->krp_param[IXP_PARAM_EXP].crp_nbits,
1131			krp->krp_param[IXP_PARAM_MOD].crp_nbits,
1132			krp->krp_param[IXP_PARAM_RES].crp_nbits);
1133
1134
1135	if (krp->krp_op != CRK_MOD_EXP) {
1136		krp->krp_status = EOPNOTSUPP;
1137		goto err;
1138	}
1139
1140	q = (struct ixp_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
1141	if (q == NULL) {
1142		krp->krp_status = ENOMEM;
1143		goto err;
1144	}
1145
1146	/*
1147	 * The PKE engine does not appear to zero the output buffer
1148	 * appropriately, so we need to do it all here.
1149	 */
1150	memset(q, 0, sizeof(*q));
1151
1152	q->pkq_krp = krp;
1153	INIT_LIST_HEAD(&q->pkq_list);
1154
1155	if (ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_BASE], &q->pkq_op.modExpOpr.M,
1156			q->pkq_ibuf0))
1157		rc = 1;
1158	if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_EXP],
1159				&q->pkq_op.modExpOpr.e, q->pkq_ibuf1))
1160		rc = 2;
1161	if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_MOD],
1162				&q->pkq_op.modExpOpr.N, q->pkq_ibuf2))
1163		rc = 3;
1164
1165	if (rc) {
1166		kfree(q);
1167		krp->krp_status = ERANGE;
1168		goto err;
1169	}
1170
1171	q->pkq_result.pData           = q->pkq_obuf;
1172	q->pkq_result.dataLen         =
1173			(krp->krp_param[IXP_PARAM_RES].crp_nbits + 31) / 32;
1174
1175	spin_lock_irqsave(&ixp_pkq_lock, flags);
1176	list_add_tail(&q->pkq_list, &ixp_pkq);
1177	spin_unlock_irqrestore(&ixp_pkq_lock, flags);
1178
1179	if (!ixp_pk_cur)
1180		ixp_kperform_cb(0, NULL, 0, 0);
1181	return (0);
1182
1183err:
1184	crypto_kdone(krp);
1185	return (0);
1186}
1187
1188
1189
1190#ifdef CONFIG_OCF_RANDOMHARVEST
1191/*
1192 * We run the random number generator output through SHA so that it
1193 * is FIPS compliant.
1194 */
1195
1196static volatile int sha_done = 0;
1197static unsigned char sha_digest[20];
1198
1199static void
1200ixp_hash_cb(UINT8 *digest, IxCryptoAccStatus status)
1201{
1202	dprintk("%s(%p, %d)\n", __FUNCTION__, digest, status);
1203	if (sha_digest != digest)
1204		printk("digest error\n");
1205	if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
1206		sha_done = 1;
1207	else
1208		sha_done = -status;
1209}
1210
1211static int
1212ixp_read_random(void *arg, u_int32_t *buf, int maxwords)
1213{
1214	IxCryptoAccStatus status;
1215	int i, n, rc;
1216
1217	dprintk("%s(%p, %d)\n", __FUNCTION__, buf, maxwords);
1218	memset(buf, 0, maxwords * sizeof(*buf));
1219	status = ixCryptoAccPkePseudoRandomNumberGet(maxwords, buf);
1220	if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
1221		dprintk("%s: ixCryptoAccPkePseudoRandomNumberGet failed %d\n",
1222				__FUNCTION__, status);
1223		return 0;
1224	}
1225
1226	/*
1227	 * run the random data through SHA to make it look more random
1228	 */
1229
1230	n = sizeof(sha_digest); /* process digest bytes at a time */
1231
1232	rc = 0;
1233	for (i = 0; i < maxwords; i += n / sizeof(*buf)) {
1234		if ((maxwords - i) * sizeof(*buf) < n)
1235			n = (maxwords - i) * sizeof(*buf);
1236		sha_done = 0;
1237		status = ixCryptoAccPkeHashPerform(IX_CRYPTO_ACC_AUTH_SHA1,
1238				(UINT8 *) &buf[i], n, ixp_hash_cb, sha_digest);
1239		if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
1240			dprintk("ixCryptoAccPkeHashPerform failed %d\n", status);
1241			return -EIO;
1242		}
1243		while (!sha_done)
1244			schedule();
1245		if (sha_done < 0) {
1246			dprintk("ixCryptoAccPkeHashPerform failed CB %d\n", -sha_done);
1247			return 0;
1248		}
1249		memcpy(&buf[i], sha_digest, n);
1250		rc += n / sizeof(*buf);;
1251	}
1252
1253	return rc;
1254}
1255#endif /* CONFIG_OCF_RANDOMHARVEST */
1256
1257#endif /* __ixp46X */
1258
1259
1260
1261/*
1262 * our driver startup and shutdown routines
1263 */
1264
1265static int
1266ixp_init(void)
1267{
1268	dprintk("%s(%p)\n", __FUNCTION__, ixp_init);
1269
1270	if (ixp_init_crypto && ixCryptoAccInit() != IX_CRYPTO_ACC_STATUS_SUCCESS)
1271		printk("ixCryptoAccInit failed, assuming already initialised!\n");
1272
1273	qcache = kmem_cache_create("ixp4xx_q", sizeof(struct ixp_q), 0,
1274				SLAB_HWCACHE_ALIGN, NULL
1275#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
1276				, NULL
1277#endif
1278				  );
1279	if (!qcache) {
1280		printk("failed to create Qcache\n");
1281		return -ENOENT;
1282	}
1283
1284	memset(&ixpdev, 0, sizeof(ixpdev));
1285	softc_device_init(&ixpdev, "ixp4xx", 0, ixp_methods);
1286
1287	ixp_id = crypto_get_driverid(softc_get_device(&ixpdev),
1288				CRYPTOCAP_F_HARDWARE);
1289	if (ixp_id < 0)
1290		panic("IXP/OCF crypto device cannot initialize!");
1291
1292#define	REGISTER(alg) \
1293	crypto_register(ixp_id,alg,0,0)
1294
1295	REGISTER(CRYPTO_DES_CBC);
1296	REGISTER(CRYPTO_3DES_CBC);
1297	REGISTER(CRYPTO_RIJNDAEL128_CBC);
1298#ifdef CONFIG_OCF_IXP4XX_SHA1_MD5
1299	REGISTER(CRYPTO_MD5);
1300	REGISTER(CRYPTO_SHA1);
1301#endif
1302	REGISTER(CRYPTO_MD5_HMAC);
1303	REGISTER(CRYPTO_SHA1_HMAC);
1304#undef REGISTER
1305
1306#ifdef __ixp46X
1307	spin_lock_init(&ixp_pkq_lock);
1308	/*
1309	 * we do not enable the go fast options here as they can potentially
1310	 * allow timing based attacks
1311	 *
1312	 * http://www.openssl.org/news/secadv_20030219.txt
1313	 */
1314	ixCryptoAccPkeEauExpConfig(0, 0);
1315	crypto_kregister(ixp_id, CRK_MOD_EXP, 0);
1316#ifdef CONFIG_OCF_RANDOMHARVEST
1317	crypto_rregister(ixp_id, ixp_read_random, NULL);
1318#endif
1319#endif
1320
1321	return 0;
1322}
1323
1324static void
1325ixp_exit(void)
1326{
1327	dprintk("%s()\n", __FUNCTION__);
1328	crypto_unregister_all(ixp_id);
1329	ixp_id = -1;
1330	kmem_cache_destroy(qcache);
1331	qcache = NULL;
1332}
1333
1334module_init(ixp_init);
1335module_exit(ixp_exit);
1336
1337MODULE_LICENSE("Dual BSD/GPL");
1338MODULE_AUTHOR("David McCullough <dmccullough@cyberguard.com>");
1339MODULE_DESCRIPTION("ixp (OCF module for IXP4xx crypto)");
1340