• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/arch/cris/arch-v32/drivers/
1/* $Id: cryptocop.c,v 1.1.1.1 2007-08-03 18:51:41 $
2 *
3 * Stream co-processor driver for the ETRAX FS
4 *
5 *    Copyright (C) 2003-2005  Axis Communications AB
6 */
7
8#include <linux/init.h>
9#include <linux/sched.h>
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/string.h>
13#include <linux/fs.h>
14#include <linux/mm.h>
15#include <linux/spinlock.h>
16#include <linux/stddef.h>
17
18#include <asm/uaccess.h>
19#include <asm/io.h>
20#include <asm/atomic.h>
21
22#include <linux/list.h>
23#include <linux/interrupt.h>
24
25#include <asm/signal.h>
26#include <asm/irq.h>
27
28#include <asm/arch/dma.h>
29#include <asm/arch/hwregs/dma.h>
30#include <asm/arch/hwregs/reg_map.h>
31#include <asm/arch/hwregs/reg_rdwr.h>
32#include <asm/arch/hwregs/intr_vect_defs.h>
33
34#include <asm/arch/hwregs/strcop.h>
35#include <asm/arch/hwregs/strcop_defs.h>
36#include <asm/arch/cryptocop.h>
37
38
39
40#define DESCR_ALLOC_PAD  (31)
41
42struct cryptocop_dma_desc {
43	char *free_buf; /* If non-null will be kfreed in free_cdesc() */
44	dma_descr_data *dma_descr;
45
46	unsigned char dma_descr_buf[sizeof(dma_descr_data) + DESCR_ALLOC_PAD];
47
48	unsigned int from_pool:1; /* If 1 'allocated' from the descriptor pool. */
49	struct cryptocop_dma_desc *next;
50};
51
52
53struct cryptocop_int_operation{
54	void                        *alloc_ptr;
55	cryptocop_session_id        sid;
56
57	dma_descr_context           ctx_out;
58	dma_descr_context           ctx_in;
59
60	/* DMA descriptors allocated by driver. */
61	struct cryptocop_dma_desc   *cdesc_out;
62	struct cryptocop_dma_desc   *cdesc_in;
63
64	/* Strcop config to use. */
65	cryptocop_3des_mode         tdes_mode;
66	cryptocop_csum_type         csum_mode;
67
68	/* DMA descrs provided by consumer. */
69	dma_descr_data              *ddesc_out;
70	dma_descr_data              *ddesc_in;
71};
72
73
74struct cryptocop_tfrm_ctx {
75	cryptocop_tfrm_id tid;
76	unsigned int blocklength;
77
78	unsigned int start_ix;
79
80	struct cryptocop_tfrm_cfg *tcfg;
81	struct cryptocop_transform_ctx *tctx;
82
83	unsigned char previous_src;
84	unsigned char current_src;
85
86	/* Values to use in metadata out. */
87	unsigned char hash_conf;
88	unsigned char hash_mode;
89	unsigned char ciph_conf;
90	unsigned char cbcmode;
91	unsigned char decrypt;
92
93	unsigned int requires_padding:1;
94	unsigned int strict_block_length:1;
95	unsigned int active:1;
96	unsigned int done:1;
97	size_t consumed;
98	size_t produced;
99
100	/* Pad (input) descriptors to put in the DMA out list when the transform
101	 * output is put on the DMA in list. */
102	struct cryptocop_dma_desc *pad_descs;
103
104	struct cryptocop_tfrm_ctx *prev_src;
105	struct cryptocop_tfrm_ctx *curr_src;
106
107	/* Mapping to HW. */
108	unsigned char unit_no;
109};
110
111
112struct cryptocop_private{
113	cryptocop_session_id sid;
114	struct cryptocop_private *next;
115};
116
117/* Session list. */
118
119struct cryptocop_transform_ctx{
120	struct cryptocop_transform_init init;
121	unsigned char dec_key[CRYPTOCOP_MAX_KEY_LENGTH];
122	unsigned int dec_key_set:1;
123
124	struct cryptocop_transform_ctx *next;
125};
126
127
128struct cryptocop_session{
129	cryptocop_session_id sid;
130
131	struct cryptocop_transform_ctx *tfrm_ctx;
132
133	struct cryptocop_session *next;
134};
135
136/* Priority levels for jobs sent to the cryptocop.  Checksum operations from
137   kernel have highest priority since TCPIP stack processing must not
138   be a bottleneck. */
139typedef enum {
140	cryptocop_prio_kernel_csum = 0,
141	cryptocop_prio_kernel = 1,
142	cryptocop_prio_user = 2,
143	cryptocop_prio_no_prios = 3
144} cryptocop_queue_priority;
145
146struct cryptocop_prio_queue{
147	struct list_head jobs;
148	cryptocop_queue_priority prio;
149};
150
151struct cryptocop_prio_job{
152	struct list_head node;
153	cryptocop_queue_priority prio;
154
155	struct cryptocop_operation *oper;
156	struct cryptocop_int_operation *iop;
157};
158
159struct ioctl_job_cb_ctx {
160	unsigned int processed:1;
161};
162
163
164static struct cryptocop_session *cryptocop_sessions = NULL;
165spinlock_t cryptocop_sessions_lock;
166
167/* Next Session ID to assign. */
168static cryptocop_session_id next_sid = 1;
169
170/* Pad for checksum. */
171static const char csum_zero_pad[1] = {0x00};
172
173/* Trash buffer for mem2mem operations. */
174#define MEM2MEM_DISCARD_BUF_LENGTH  (512)
175static unsigned char mem2mem_discard_buf[MEM2MEM_DISCARD_BUF_LENGTH];
176
177/* Descriptor pool. */
178#define CRYPTOCOP_DESCRIPTOR_POOL_SIZE   (100)
179static struct cryptocop_dma_desc descr_pool[CRYPTOCOP_DESCRIPTOR_POOL_SIZE];
180static struct cryptocop_dma_desc *descr_pool_free_list;
181static int descr_pool_no_free;
182static spinlock_t descr_pool_lock;
183
184/* Lock to stop cryptocop to start processing of a new operation. The holder
185   of this lock MUST call cryptocop_start_job() after it is unlocked. */
186spinlock_t cryptocop_process_lock;
187
188static struct cryptocop_prio_queue cryptocop_job_queues[cryptocop_prio_no_prios];
189static spinlock_t cryptocop_job_queue_lock;
190static struct cryptocop_prio_job *cryptocop_running_job = NULL;
191static spinlock_t running_job_lock;
192
193/* The interrupt handler appends completed jobs to this list. The scehduled
194 * tasklet removes them upon sending the response to the crypto consumer. */
195static struct list_head cryptocop_completed_jobs;
196static spinlock_t cryptocop_completed_jobs_lock;
197
198DECLARE_WAIT_QUEUE_HEAD(cryptocop_ioc_process_wq);
199
200
201/** Local functions. **/
202
203static int cryptocop_open(struct inode *, struct file *);
204
205static int cryptocop_release(struct inode *, struct file *);
206
207static int cryptocop_ioctl(struct inode *inode, struct file *file,
208			   unsigned int cmd, unsigned long arg);
209
210static void cryptocop_start_job(void);
211
212static int cryptocop_job_queue_insert(cryptocop_queue_priority prio, struct cryptocop_operation *operation);
213static int cryptocop_job_setup(struct cryptocop_prio_job **pj, struct cryptocop_operation *operation);
214
215static int cryptocop_job_queue_init(void);
216static void cryptocop_job_queue_close(void);
217
218static int create_md5_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length);
219
220static int create_sha1_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length);
221
222static int transform_ok(struct cryptocop_transform_init *tinit);
223
224static struct cryptocop_session *get_session(cryptocop_session_id sid);
225
226static struct cryptocop_transform_ctx *get_transform_ctx(struct cryptocop_session *sess, cryptocop_tfrm_id tid);
227
228static void delete_internal_operation(struct cryptocop_int_operation *iop);
229
230static void get_aes_decrypt_key(unsigned char *dec_key, const unsigned  char *key, unsigned int keylength);
231
232static int init_stream_coprocessor(void);
233
234static void __exit exit_stream_coprocessor(void);
235
236/*#define LDEBUG*/
237#ifdef LDEBUG
238#define DEBUG(s) s
239#define DEBUG_API(s) s
240static void print_cryptocop_operation(struct cryptocop_operation *cop);
241static void print_dma_descriptors(struct cryptocop_int_operation *iop);
242static void print_strcop_crypto_op(struct strcop_crypto_op *cop);
243static void print_lock_status(void);
244static void print_user_dma_lists(struct cryptocop_dma_list_operation *dma_op);
245#define assert(s) do{if (!(s)) panic(#s);} while(0);
246#else
247#define DEBUG(s)
248#define DEBUG_API(s)
249#define assert(s)
250#endif
251
252
253/* Transform constants. */
254#define DES_BLOCK_LENGTH   (8)
255#define AES_BLOCK_LENGTH   (16)
256#define MD5_BLOCK_LENGTH   (64)
257#define SHA1_BLOCK_LENGTH  (64)
258#define CSUM_BLOCK_LENGTH  (2)
259#define MD5_STATE_LENGTH   (16)
260#define SHA1_STATE_LENGTH  (20)
261
262/* The device number. */
263#define CRYPTOCOP_MAJOR    (254)
264#define CRYPTOCOP_MINOR    (0)
265
266
267
268const struct file_operations cryptocop_fops = {
269	owner: THIS_MODULE,
270	open: cryptocop_open,
271	release: cryptocop_release,
272	ioctl: cryptocop_ioctl
273};
274
275
276static void free_cdesc(struct cryptocop_dma_desc *cdesc)
277{
278	DEBUG(printk("free_cdesc: cdesc 0x%p, from_pool=%d\n", cdesc, cdesc->from_pool));
279	kfree(cdesc->free_buf);
280
281	if (cdesc->from_pool) {
282		unsigned long int flags;
283		spin_lock_irqsave(&descr_pool_lock, flags);
284		cdesc->next = descr_pool_free_list;
285		descr_pool_free_list = cdesc;
286		++descr_pool_no_free;
287		spin_unlock_irqrestore(&descr_pool_lock, flags);
288	} else {
289		kfree(cdesc);
290	}
291}
292
293
294static struct cryptocop_dma_desc *alloc_cdesc(int alloc_flag)
295{
296	int use_pool = (alloc_flag & GFP_ATOMIC) ? 1 : 0;
297	struct cryptocop_dma_desc *cdesc;
298
299	if (use_pool) {
300		unsigned long int flags;
301		spin_lock_irqsave(&descr_pool_lock, flags);
302		if (!descr_pool_free_list) {
303			spin_unlock_irqrestore(&descr_pool_lock, flags);
304			DEBUG_API(printk("alloc_cdesc: pool is empty\n"));
305			return NULL;
306		}
307		cdesc = descr_pool_free_list;
308		descr_pool_free_list = descr_pool_free_list->next;
309		--descr_pool_no_free;
310		spin_unlock_irqrestore(&descr_pool_lock, flags);
311		cdesc->from_pool = 1;
312	} else {
313		cdesc = kmalloc(sizeof(struct cryptocop_dma_desc), alloc_flag);
314		if (!cdesc) {
315			DEBUG_API(printk("alloc_cdesc: kmalloc\n"));
316			return NULL;
317		}
318		cdesc->from_pool = 0;
319	}
320	cdesc->dma_descr = (dma_descr_data*)(((unsigned long int)cdesc + offsetof(struct cryptocop_dma_desc, dma_descr_buf) + DESCR_ALLOC_PAD) & ~0x0000001F);
321
322	cdesc->next = NULL;
323
324	cdesc->free_buf = NULL;
325	cdesc->dma_descr->out_eop = 0;
326	cdesc->dma_descr->in_eop = 0;
327	cdesc->dma_descr->intr = 0;
328	cdesc->dma_descr->eol = 0;
329	cdesc->dma_descr->wait = 0;
330	cdesc->dma_descr->buf = NULL;
331	cdesc->dma_descr->after = NULL;
332
333	DEBUG_API(printk("alloc_cdesc: return 0x%p, cdesc->dma_descr=0x%p, from_pool=%d\n", cdesc, cdesc->dma_descr, cdesc->from_pool));
334	return cdesc;
335}
336
337
338static void setup_descr_chain(struct cryptocop_dma_desc *cd)
339{
340	DEBUG(printk("setup_descr_chain: entering\n"));
341	while (cd) {
342		if (cd->next) {
343			cd->dma_descr->next = (dma_descr_data*)virt_to_phys(cd->next->dma_descr);
344		} else {
345			cd->dma_descr->next = NULL;
346		}
347		cd = cd->next;
348	}
349	DEBUG(printk("setup_descr_chain: exit\n"));
350}
351
352
353/* Create a pad descriptor for the transform.
354 * Return -1 for error, 0 if pad created. */
355static int create_pad_descriptor(struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **pad_desc, int alloc_flag)
356{
357	struct cryptocop_dma_desc        *cdesc = NULL;
358	int                              error = 0;
359	struct strcop_meta_out           mo = {
360		.ciphsel = src_none,
361		.hashsel = src_none,
362		.csumsel = src_none
363	};
364	char                             *pad;
365	size_t                           plen;
366
367	DEBUG(printk("create_pad_descriptor: start.\n"));
368	/* Setup pad descriptor. */
369
370	DEBUG(printk("create_pad_descriptor: setting up padding.\n"));
371	cdesc = alloc_cdesc(alloc_flag);
372	if (!cdesc){
373		DEBUG_API(printk("create_pad_descriptor: alloc pad desc\n"));
374		goto error_cleanup;
375	}
376	switch (tc->unit_no) {
377	case src_md5:
378		error = create_md5_pad(alloc_flag, tc->consumed, &pad, &plen);
379		if (error){
380			DEBUG_API(printk("create_pad_descriptor: create_md5_pad_failed\n"));
381			goto error_cleanup;
382		}
383		cdesc->free_buf = pad;
384		mo.hashsel = src_dma;
385		mo.hashconf = tc->hash_conf;
386		mo.hashmode = tc->hash_mode;
387		break;
388	case src_sha1:
389		error = create_sha1_pad(alloc_flag, tc->consumed, &pad, &plen);
390		if (error){
391			DEBUG_API(printk("create_pad_descriptor: create_sha1_pad_failed\n"));
392			goto error_cleanup;
393		}
394		cdesc->free_buf = pad;
395		mo.hashsel = src_dma;
396		mo.hashconf = tc->hash_conf;
397		mo.hashmode = tc->hash_mode;
398		break;
399	case src_csum:
400		if (tc->consumed % tc->blocklength){
401			pad = (char*)csum_zero_pad;
402			plen = 1;
403		} else {
404			pad = (char*)cdesc; /* Use any pointer. */
405			plen = 0;
406		}
407		mo.csumsel = src_dma;
408		break;
409	}
410	cdesc->dma_descr->wait = 1;
411	cdesc->dma_descr->out_eop = 1; /* Since this is a pad output is pushed.  EOP is ok here since the padded unit is the only one active. */
412	cdesc->dma_descr->buf = (char*)virt_to_phys((char*)pad);
413	cdesc->dma_descr->after = cdesc->dma_descr->buf + plen;
414
415	cdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo);
416	*pad_desc = cdesc;
417
418	return 0;
419
420 error_cleanup:
421	if (cdesc) free_cdesc(cdesc);
422	return -1;
423}
424
425
426static int setup_key_dl_desc(struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **kd, int alloc_flag)
427{
428	struct cryptocop_dma_desc  *key_desc = alloc_cdesc(alloc_flag);
429	struct strcop_meta_out     mo = {0};
430
431	DEBUG(printk("setup_key_dl_desc\n"));
432
433	if (!key_desc) {
434		DEBUG_API(printk("setup_key_dl_desc: failed descriptor allocation.\n"));
435		return -ENOMEM;
436	}
437
438	/* Download key. */
439	if ((tc->tctx->init.alg == cryptocop_alg_aes) && (tc->tcfg->flags & CRYPTOCOP_DECRYPT)) {
440		/* Precook the AES decrypt key. */
441		if (!tc->tctx->dec_key_set){
442			get_aes_decrypt_key(tc->tctx->dec_key, tc->tctx->init.key, tc->tctx->init.keylen);
443			tc->tctx->dec_key_set = 1;
444		}
445		key_desc->dma_descr->buf = (char*)virt_to_phys(tc->tctx->dec_key);
446		key_desc->dma_descr->after = key_desc->dma_descr->buf + tc->tctx->init.keylen/8;
447	} else {
448		key_desc->dma_descr->buf = (char*)virt_to_phys(tc->tctx->init.key);
449		key_desc->dma_descr->after = key_desc->dma_descr->buf + tc->tctx->init.keylen/8;
450	}
451	/* Setup metadata. */
452	mo.dlkey = 1;
453	switch (tc->tctx->init.keylen) {
454	case 64:
455		mo.decrypt = 0;
456		mo.hashmode = 0;
457		break;
458	case 128:
459		mo.decrypt = 0;
460		mo.hashmode = 1;
461		break;
462	case 192:
463		mo.decrypt = 1;
464		mo.hashmode = 0;
465		break;
466	case 256:
467		mo.decrypt = 1;
468		mo.hashmode = 1;
469		break;
470	default:
471		break;
472	}
473	mo.ciphsel = mo.hashsel = mo.csumsel = src_none;
474	key_desc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo);
475
476	key_desc->dma_descr->out_eop = 1;
477	key_desc->dma_descr->wait = 1;
478	key_desc->dma_descr->intr = 0;
479
480	*kd = key_desc;
481	return 0;
482}
483
484static int setup_cipher_iv_desc(struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **id, int alloc_flag)
485{
486	struct cryptocop_dma_desc  *iv_desc = alloc_cdesc(alloc_flag);
487	struct strcop_meta_out     mo = {0};
488
489	DEBUG(printk("setup_cipher_iv_desc\n"));
490
491	if (!iv_desc) {
492		DEBUG_API(printk("setup_cipher_iv_desc: failed CBC IV descriptor allocation.\n"));
493		return -ENOMEM;
494	}
495	/* Download IV. */
496	iv_desc->dma_descr->buf = (char*)virt_to_phys(tc->tcfg->iv);
497	iv_desc->dma_descr->after = iv_desc->dma_descr->buf + tc->blocklength;
498
499	/* Setup metadata. */
500	mo.hashsel = mo.csumsel = src_none;
501	mo.ciphsel = src_dma;
502	mo.ciphconf = tc->ciph_conf;
503	mo.cbcmode = tc->cbcmode;
504
505	iv_desc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo);
506
507	iv_desc->dma_descr->out_eop = 0;
508	iv_desc->dma_descr->wait = 1;
509	iv_desc->dma_descr->intr = 0;
510
511	*id = iv_desc;
512	return 0;
513}
514
515/* Map the ouput length of the transform to operation output starting on the inject index. */
516static int create_input_descriptors(struct cryptocop_operation *operation, struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **id, int alloc_flag)
517{
518	int                        err = 0;
519	struct cryptocop_dma_desc  head = {0};
520	struct cryptocop_dma_desc  *outdesc = &head;
521	size_t                     iov_offset = 0;
522	size_t                     out_ix = 0;
523	int                        outiov_ix = 0;
524	struct strcop_meta_in      mi = {0};
525
526	size_t                     out_length = tc->produced;
527	int                        rem_length;
528	int                        dlength;
529
530	assert(out_length != 0);
531	if (((tc->produced + tc->tcfg->inject_ix) > operation->tfrm_op.outlen) || (tc->produced && (operation->tfrm_op.outlen == 0))) {
532		DEBUG_API(printk("create_input_descriptors: operation outdata too small\n"));
533		return -EINVAL;
534	}
535	/* Traverse the out iovec until the result inject index is reached. */
536	while ((outiov_ix < operation->tfrm_op.outcount) && ((out_ix + operation->tfrm_op.outdata[outiov_ix].iov_len) <= tc->tcfg->inject_ix)){
537		out_ix += operation->tfrm_op.outdata[outiov_ix].iov_len;
538		outiov_ix++;
539	}
540	if (outiov_ix >= operation->tfrm_op.outcount){
541		DEBUG_API(printk("create_input_descriptors: operation outdata too small\n"));
542		return -EINVAL;
543	}
544	iov_offset = tc->tcfg->inject_ix - out_ix;
545	mi.dmasel = tc->unit_no;
546
547	/* Setup the output descriptors. */
548	while ((out_length > 0) && (outiov_ix < operation->tfrm_op.outcount)) {
549		outdesc->next = alloc_cdesc(alloc_flag);
550		if (!outdesc->next) {
551			DEBUG_API(printk("create_input_descriptors: alloc_cdesc\n"));
552			err = -ENOMEM;
553			goto error_cleanup;
554		}
555		outdesc = outdesc->next;
556		rem_length = operation->tfrm_op.outdata[outiov_ix].iov_len - iov_offset;
557		dlength = (out_length < rem_length) ? out_length : rem_length;
558
559		DEBUG(printk("create_input_descriptors:\n"
560			     "outiov_ix=%d, rem_length=%d, dlength=%d\n"
561			     "iov_offset=%d, outdata[outiov_ix].iov_len=%d\n"
562			     "outcount=%d, outiov_ix=%d\n",
563			     outiov_ix, rem_length, dlength, iov_offset, operation->tfrm_op.outdata[outiov_ix].iov_len, operation->tfrm_op.outcount, outiov_ix));
564
565		outdesc->dma_descr->buf = (char*)virt_to_phys(operation->tfrm_op.outdata[outiov_ix].iov_base + iov_offset);
566		outdesc->dma_descr->after = outdesc->dma_descr->buf + dlength;
567		outdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi);
568
569		out_length -= dlength;
570		iov_offset += dlength;
571		if (iov_offset >= operation->tfrm_op.outdata[outiov_ix].iov_len) {
572			iov_offset = 0;
573			++outiov_ix;
574		}
575	}
576	if (out_length > 0){
577		DEBUG_API(printk("create_input_descriptors: not enough room for output, %d remained\n", out_length));
578		err = -EINVAL;
579		goto error_cleanup;
580	}
581	/* Set sync in last descriptor. */
582	mi.sync = 1;
583	outdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi);
584
585	*id = head.next;
586	return 0;
587
588 error_cleanup:
589	while (head.next) {
590		outdesc = head.next->next;
591		free_cdesc(head.next);
592		head.next = outdesc;
593	}
594	return err;
595}
596
597
598static int create_output_descriptors(struct cryptocop_operation *operation, int *iniov_ix, int *iniov_offset, size_t desc_len, struct cryptocop_dma_desc **current_out_cdesc, struct strcop_meta_out *meta_out, int alloc_flag)
599{
600	while (desc_len != 0) {
601		struct cryptocop_dma_desc  *cdesc;
602		int                        rem_length = operation->tfrm_op.indata[*iniov_ix].iov_len - *iniov_offset;
603		int                        dlength = (desc_len < rem_length) ? desc_len : rem_length;
604
605		cdesc = alloc_cdesc(alloc_flag);
606		if (!cdesc) {
607			DEBUG_API(printk("create_output_descriptors: alloc_cdesc\n"));
608			return -ENOMEM;
609		}
610		(*current_out_cdesc)->next = cdesc;
611		(*current_out_cdesc) = cdesc;
612
613		cdesc->free_buf = NULL;
614
615		cdesc->dma_descr->buf = (char*)virt_to_phys(operation->tfrm_op.indata[*iniov_ix].iov_base + *iniov_offset);
616		cdesc->dma_descr->after = cdesc->dma_descr->buf + dlength;
617
618		desc_len -= dlength;
619		*iniov_offset += dlength;
620		assert(desc_len >= 0);
621		if (*iniov_offset >= operation->tfrm_op.indata[*iniov_ix].iov_len) {
622			*iniov_offset = 0;
623			++(*iniov_ix);
624			if (*iniov_ix > operation->tfrm_op.incount) {
625				DEBUG_API(printk("create_output_descriptors: not enough indata in operation."));
626				return  -EINVAL;
627			}
628		}
629		cdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, (*meta_out));
630	} /* while (desc_len != 0) */
631	/* Last DMA descriptor gets a 'wait' bit to signal expected change in metadata. */
632	(*current_out_cdesc)->dma_descr->wait = 1; /* This will set extraneous WAIT in some situations, e.g. when padding hashes and checksums. */
633
634	return 0;
635}
636
637
638static int append_input_descriptors(struct cryptocop_operation *operation, struct cryptocop_dma_desc **current_in_cdesc, struct cryptocop_dma_desc **current_out_cdesc, struct cryptocop_tfrm_ctx *tc, int alloc_flag)
639{
640	DEBUG(printk("append_input_descriptors, tc=0x%p, unit_no=%d\n", tc, tc->unit_no));
641	if (tc->tcfg) {
642		int                        failed = 0;
643		struct cryptocop_dma_desc  *idescs = NULL;
644		DEBUG(printk("append_input_descriptors: pushing output, consumed %d produced %d bytes.\n", tc->consumed, tc->produced));
645		if (tc->pad_descs) {
646			DEBUG(printk("append_input_descriptors: append pad descriptors to DMA out list.\n"));
647			while (tc->pad_descs) {
648				DEBUG(printk("append descriptor 0x%p\n", tc->pad_descs));
649				(*current_out_cdesc)->next = tc->pad_descs;
650				tc->pad_descs = tc->pad_descs->next;
651				(*current_out_cdesc) = (*current_out_cdesc)->next;
652			}
653		}
654
655		/* Setup and append output descriptors to DMA in list. */
656		if (tc->unit_no == src_dma){
657			/* mem2mem.  Setup DMA in descriptors to discard all input prior to the requested mem2mem data. */
658			struct strcop_meta_in mi = {.sync = 0, .dmasel = src_dma};
659			unsigned int start_ix = tc->start_ix;
660			while (start_ix){
661				unsigned int desclen = start_ix < MEM2MEM_DISCARD_BUF_LENGTH ? start_ix : MEM2MEM_DISCARD_BUF_LENGTH;
662				(*current_in_cdesc)->next = alloc_cdesc(alloc_flag);
663				if (!(*current_in_cdesc)->next){
664					DEBUG_API(printk("append_input_descriptors: alloc_cdesc mem2mem discard failed\n"));
665					return -ENOMEM;
666				}
667				(*current_in_cdesc) = (*current_in_cdesc)->next;
668				(*current_in_cdesc)->dma_descr->buf = (char*)virt_to_phys(mem2mem_discard_buf);
669				(*current_in_cdesc)->dma_descr->after = (*current_in_cdesc)->dma_descr->buf + desclen;
670				(*current_in_cdesc)->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi);
671				start_ix -= desclen;
672			}
673			mi.sync = 1;
674			(*current_in_cdesc)->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi);
675		}
676
677		failed = create_input_descriptors(operation, tc, &idescs, alloc_flag);
678		if (failed){
679			DEBUG_API(printk("append_input_descriptors: output descriptor setup failed\n"));
680			return failed;
681		}
682		DEBUG(printk("append_input_descriptors: append output descriptors to DMA in list.\n"));
683		while (idescs) {
684			DEBUG(printk("append descriptor 0x%p\n", idescs));
685			(*current_in_cdesc)->next = idescs;
686			idescs = idescs->next;
687			(*current_in_cdesc) = (*current_in_cdesc)->next;
688		}
689	}
690	return 0;
691}
692
693
694
695static int cryptocop_setup_dma_list(struct cryptocop_operation *operation, struct cryptocop_int_operation **int_op, int alloc_flag)
696{
697	struct cryptocop_session *sess;
698	struct cryptocop_transform_ctx *tctx;
699
700	struct cryptocop_tfrm_ctx digest_ctx = {
701		.previous_src = src_none,
702		.current_src = src_none,
703		.start_ix = 0,
704		.requires_padding = 1,
705		.strict_block_length = 0,
706		.hash_conf = 0,
707		.hash_mode = 0,
708		.ciph_conf = 0,
709		.cbcmode = 0,
710		.decrypt = 0,
711		.consumed = 0,
712		.produced = 0,
713		.pad_descs = NULL,
714		.active = 0,
715		.done = 0,
716		.prev_src = NULL,
717		.curr_src = NULL,
718		.tcfg = NULL};
719	struct cryptocop_tfrm_ctx cipher_ctx = {
720		.previous_src = src_none,
721		.current_src = src_none,
722		.start_ix = 0,
723		.requires_padding = 0,
724		.strict_block_length = 1,
725		.hash_conf = 0,
726		.hash_mode = 0,
727		.ciph_conf = 0,
728		.cbcmode = 0,
729		.decrypt = 0,
730		.consumed = 0,
731		.produced = 0,
732		.pad_descs = NULL,
733		.active = 0,
734		.done = 0,
735		.prev_src = NULL,
736		.curr_src = NULL,
737		.tcfg = NULL};
738	struct cryptocop_tfrm_ctx csum_ctx = {
739		.previous_src = src_none,
740		.current_src = src_none,
741		.start_ix = 0,
742		.blocklength = 2,
743		.requires_padding = 1,
744		.strict_block_length = 0,
745		.hash_conf = 0,
746		.hash_mode = 0,
747		.ciph_conf = 0,
748		.cbcmode = 0,
749		.decrypt = 0,
750		.consumed = 0,
751		.produced = 0,
752		.pad_descs = NULL,
753		.active = 0,
754		.done = 0,
755		.tcfg = NULL,
756		.prev_src = NULL,
757		.curr_src = NULL,
758		.unit_no = src_csum};
759	struct cryptocop_tfrm_cfg *tcfg = operation->tfrm_op.tfrm_cfg;
760
761	unsigned int indata_ix = 0;
762
763	/* iovec accounting. */
764	int iniov_ix = 0;
765	int iniov_offset = 0;
766
767	/* Operation descriptor cfg traversal pointer. */
768	struct cryptocop_desc *odsc;
769
770	int failed = 0;
771	/* List heads for allocated descriptors. */
772	struct cryptocop_dma_desc out_cdesc_head = {0};
773	struct cryptocop_dma_desc in_cdesc_head = {0};
774
775	struct cryptocop_dma_desc *current_out_cdesc = &out_cdesc_head;
776	struct cryptocop_dma_desc *current_in_cdesc = &in_cdesc_head;
777
778	struct cryptocop_tfrm_ctx *output_tc = NULL;
779	void                      *iop_alloc_ptr;
780
781	assert(operation != NULL);
782	assert(int_op != NULL);
783
784	DEBUG(printk("cryptocop_setup_dma_list: start\n"));
785	DEBUG(print_cryptocop_operation(operation));
786
787	sess = get_session(operation->sid);
788	if (!sess) {
789		DEBUG_API(printk("cryptocop_setup_dma_list: no session found for operation.\n"));
790		failed = -EINVAL;
791		goto error_cleanup;
792	}
793	iop_alloc_ptr = kmalloc(DESCR_ALLOC_PAD + sizeof(struct cryptocop_int_operation), alloc_flag);
794	if (!iop_alloc_ptr) {
795		DEBUG_API(printk("cryptocop_setup_dma_list:  kmalloc cryptocop_int_operation\n"));
796		failed = -ENOMEM;
797		goto error_cleanup;
798	}
799	(*int_op) = (struct cryptocop_int_operation*)(((unsigned long int)(iop_alloc_ptr + DESCR_ALLOC_PAD + offsetof(struct cryptocop_int_operation, ctx_out)) & ~0x0000001F) - offsetof(struct cryptocop_int_operation, ctx_out));
800	DEBUG(memset((*int_op), 0xff, sizeof(struct cryptocop_int_operation)));
801	(*int_op)->alloc_ptr = iop_alloc_ptr;
802	DEBUG(printk("cryptocop_setup_dma_list: *int_op=0x%p, alloc_ptr=0x%p\n", *int_op, (*int_op)->alloc_ptr));
803
804	(*int_op)->sid = operation->sid;
805	(*int_op)->cdesc_out = NULL;
806	(*int_op)->cdesc_in = NULL;
807	(*int_op)->tdes_mode = cryptocop_3des_ede;
808	(*int_op)->csum_mode = cryptocop_csum_le;
809	(*int_op)->ddesc_out = NULL;
810	(*int_op)->ddesc_in = NULL;
811
812	/* Scan operation->tfrm_op.tfrm_cfg for bad configuration and set up the local contexts. */
813	if (!tcfg) {
814		DEBUG_API(printk("cryptocop_setup_dma_list: no configured transforms in operation.\n"));
815		failed = -EINVAL;
816		goto error_cleanup;
817	}
818	while (tcfg) {
819		tctx = get_transform_ctx(sess, tcfg->tid);
820		if (!tctx) {
821			DEBUG_API(printk("cryptocop_setup_dma_list: no transform id %d in session.\n", tcfg->tid));
822			failed = -EINVAL;
823			goto error_cleanup;
824		}
825		if (tcfg->inject_ix > operation->tfrm_op.outlen){
826			DEBUG_API(printk("cryptocop_setup_dma_list: transform id %d inject_ix (%d) > operation->tfrm_op.outlen(%d)", tcfg->tid, tcfg->inject_ix, operation->tfrm_op.outlen));
827			failed = -EINVAL;
828			goto error_cleanup;
829		}
830		switch (tctx->init.alg){
831		case cryptocop_alg_mem2mem:
832			if (cipher_ctx.tcfg != NULL){
833				DEBUG_API(printk("cryptocop_setup_dma_list: multiple ciphers in operation.\n"));
834				failed = -EINVAL;
835				goto error_cleanup;
836			}
837			/* mem2mem is handled as a NULL cipher. */
838			cipher_ctx.cbcmode = 0;
839			cipher_ctx.decrypt = 0;
840			cipher_ctx.blocklength = 1;
841			cipher_ctx.ciph_conf = 0;
842			cipher_ctx.unit_no = src_dma;
843			cipher_ctx.tcfg = tcfg;
844			cipher_ctx.tctx = tctx;
845			break;
846		case cryptocop_alg_des:
847		case cryptocop_alg_3des:
848		case cryptocop_alg_aes:
849			/* cipher */
850			if (cipher_ctx.tcfg != NULL){
851				DEBUG_API(printk("cryptocop_setup_dma_list: multiple ciphers in operation.\n"));
852				failed = -EINVAL;
853				goto error_cleanup;
854			}
855			cipher_ctx.tcfg = tcfg;
856			cipher_ctx.tctx = tctx;
857			if (cipher_ctx.tcfg->flags & CRYPTOCOP_DECRYPT){
858				cipher_ctx.decrypt = 1;
859			}
860			switch (tctx->init.cipher_mode) {
861			case cryptocop_cipher_mode_ecb:
862				cipher_ctx.cbcmode = 0;
863				break;
864			case cryptocop_cipher_mode_cbc:
865				cipher_ctx.cbcmode = 1;
866				break;
867			default:
868				DEBUG_API(printk("cryptocop_setup_dma_list: cipher_ctx, bad cipher mode==%d\n", tctx->init.cipher_mode));
869				failed = -EINVAL;
870				goto error_cleanup;
871			}
872			DEBUG(printk("cryptocop_setup_dma_list: cipher_ctx, set CBC mode==%d\n", cipher_ctx.cbcmode));
873			switch (tctx->init.alg){
874			case cryptocop_alg_des:
875				cipher_ctx.ciph_conf = 0;
876				cipher_ctx.unit_no = src_des;
877				cipher_ctx.blocklength = DES_BLOCK_LENGTH;
878				break;
879			case cryptocop_alg_3des:
880				cipher_ctx.ciph_conf = 1;
881				cipher_ctx.unit_no = src_des;
882				cipher_ctx.blocklength = DES_BLOCK_LENGTH;
883				break;
884			case cryptocop_alg_aes:
885				cipher_ctx.ciph_conf = 2;
886				cipher_ctx.unit_no = src_aes;
887				cipher_ctx.blocklength = AES_BLOCK_LENGTH;
888				break;
889			default:
890				panic("cryptocop_setup_dma_list: impossible algorithm %d\n", tctx->init.alg);
891			}
892			(*int_op)->tdes_mode = tctx->init.tdes_mode;
893			break;
894		case cryptocop_alg_md5:
895		case cryptocop_alg_sha1:
896			/* digest */
897			if (digest_ctx.tcfg != NULL){
898				DEBUG_API(printk("cryptocop_setup_dma_list: multiple digests in operation.\n"));
899				failed = -EINVAL;
900				goto error_cleanup;
901			}
902			digest_ctx.tcfg = tcfg;
903			digest_ctx.tctx = tctx;
904			digest_ctx.hash_mode = 0; /* Don't use explicit IV in this API. */
905			switch (tctx->init.alg){
906			case cryptocop_alg_md5:
907				digest_ctx.blocklength = MD5_BLOCK_LENGTH;
908				digest_ctx.unit_no = src_md5;
909				digest_ctx.hash_conf = 1; /* 1 => MD-5 */
910				break;
911			case cryptocop_alg_sha1:
912				digest_ctx.blocklength = SHA1_BLOCK_LENGTH;
913				digest_ctx.unit_no = src_sha1;
914				digest_ctx.hash_conf = 0; /* 0 => SHA-1 */
915				break;
916			default:
917				panic("cryptocop_setup_dma_list: impossible digest algorithm\n");
918			}
919			break;
920		case cryptocop_alg_csum:
921			/* digest */
922			if (csum_ctx.tcfg != NULL){
923				DEBUG_API(printk("cryptocop_setup_dma_list: multiple checksums in operation.\n"));
924				failed = -EINVAL;
925				goto error_cleanup;
926			}
927			(*int_op)->csum_mode = tctx->init.csum_mode;
928			csum_ctx.tcfg = tcfg;
929			csum_ctx.tctx = tctx;
930			break;
931		default:
932			/* no algorithm. */
933			DEBUG_API(printk("cryptocop_setup_dma_list: invalid algorithm %d specified in tfrm %d.\n", tctx->init.alg, tcfg->tid));
934			failed = -EINVAL;
935			goto error_cleanup;
936		}
937		tcfg = tcfg->next;
938	}
939	/* Download key if a cipher is used. */
940	if (cipher_ctx.tcfg && (cipher_ctx.tctx->init.alg != cryptocop_alg_mem2mem)){
941		struct cryptocop_dma_desc  *key_desc = NULL;
942
943		failed = setup_key_dl_desc(&cipher_ctx, &key_desc, alloc_flag);
944		if (failed) {
945			DEBUG_API(printk("cryptocop_setup_dma_list: setup key dl\n"));
946			goto error_cleanup;
947		}
948		current_out_cdesc->next = key_desc;
949		current_out_cdesc = key_desc;
950		indata_ix += (unsigned int)(key_desc->dma_descr->after - key_desc->dma_descr->buf);
951
952		/* Download explicit IV if a cipher is used and CBC mode and explicit IV selected. */
953		if ((cipher_ctx.tctx->init.cipher_mode == cryptocop_cipher_mode_cbc) && (cipher_ctx.tcfg->flags & CRYPTOCOP_EXPLICIT_IV)) {
954			struct cryptocop_dma_desc  *iv_desc = NULL;
955
956			DEBUG(printk("cryptocop_setup_dma_list: setup cipher CBC IV descriptor.\n"));
957
958			failed = setup_cipher_iv_desc(&cipher_ctx, &iv_desc, alloc_flag);
959			if (failed) {
960				DEBUG_API(printk("cryptocop_setup_dma_list: CBC IV descriptor.\n"));
961				goto error_cleanup;
962			}
963			current_out_cdesc->next = iv_desc;
964			current_out_cdesc = iv_desc;
965			indata_ix += (unsigned int)(iv_desc->dma_descr->after - iv_desc->dma_descr->buf);
966		}
967	}
968
969	/* Process descriptors. */
970	odsc = operation->tfrm_op.desc;
971	while (odsc) {
972		struct cryptocop_desc_cfg   *dcfg = odsc->cfg;
973		struct strcop_meta_out      meta_out = {0};
974		size_t                      desc_len = odsc->length;
975		int                         active_count, eop_needed_count;
976
977		output_tc = NULL;
978
979		DEBUG(printk("cryptocop_setup_dma_list: parsing an operation descriptor\n"));
980
981		while (dcfg) {
982			struct cryptocop_tfrm_ctx  *tc = NULL;
983
984			DEBUG(printk("cryptocop_setup_dma_list: parsing an operation descriptor configuration.\n"));
985			/* Get the local context for the transform and mark it as the output unit if it produces output. */
986			if (digest_ctx.tcfg && (digest_ctx.tcfg->tid == dcfg->tid)){
987				tc = &digest_ctx;
988			} else if (cipher_ctx.tcfg && (cipher_ctx.tcfg->tid == dcfg->tid)){
989				tc = &cipher_ctx;
990			} else if (csum_ctx.tcfg && (csum_ctx.tcfg->tid == dcfg->tid)){
991				tc = &csum_ctx;
992			}
993			if (!tc) {
994				DEBUG_API(printk("cryptocop_setup_dma_list: invalid transform %d specified in descriptor.\n", dcfg->tid));
995				failed = -EINVAL;
996				goto error_cleanup;
997			}
998			if (tc->done) {
999				DEBUG_API(printk("cryptocop_setup_dma_list: completed transform %d reused.\n", dcfg->tid));
1000				failed = -EINVAL;
1001				goto error_cleanup;
1002			}
1003			if (!tc->active) {
1004				tc->start_ix = indata_ix;
1005				tc->active = 1;
1006			}
1007
1008			tc->previous_src = tc->current_src;
1009			tc->prev_src = tc->curr_src;
1010			/* Map source unit id to DMA source config. */
1011			switch (dcfg->src){
1012			case cryptocop_source_dma:
1013				tc->current_src = src_dma;
1014				break;
1015			case cryptocop_source_des:
1016				tc->current_src = src_des;
1017				break;
1018			case cryptocop_source_3des:
1019				tc->current_src = src_des;
1020				break;
1021			case cryptocop_source_aes:
1022				tc->current_src = src_aes;
1023				break;
1024			case cryptocop_source_md5:
1025			case cryptocop_source_sha1:
1026			case cryptocop_source_csum:
1027			case cryptocop_source_none:
1028			default:
1029				/* We do not allow using accumulating style units (SHA-1, MD5, checksum) as sources to other units.
1030				 */
1031				DEBUG_API(printk("cryptocop_setup_dma_list: bad unit source configured %d.\n", dcfg->src));
1032				failed = -EINVAL;
1033				goto error_cleanup;
1034			}
1035			if (tc->current_src != src_dma) {
1036				/* Find the unit we are sourcing from. */
1037				if (digest_ctx.unit_no == tc->current_src){
1038					tc->curr_src = &digest_ctx;
1039				} else if (cipher_ctx.unit_no == tc->current_src){
1040					tc->curr_src = &cipher_ctx;
1041				} else if (csum_ctx.unit_no == tc->current_src){
1042					tc->curr_src = &csum_ctx;
1043				}
1044				if ((tc->curr_src == tc) && (tc->unit_no != src_dma)){
1045					DEBUG_API(printk("cryptocop_setup_dma_list: unit %d configured to source from itself.\n", tc->unit_no));
1046					failed = -EINVAL;
1047					goto error_cleanup;
1048				}
1049			} else {
1050				tc->curr_src = NULL;
1051			}
1052
1053			/* Detect source switch. */
1054			DEBUG(printk("cryptocop_setup_dma_list: tc->active=%d tc->unit_no=%d tc->current_src=%d tc->previous_src=%d, tc->curr_src=0x%p, tc->prev_srv=0x%p\n", tc->active, tc->unit_no, tc->current_src, tc->previous_src, tc->curr_src, tc->prev_src));
1055			if (tc->active && (tc->current_src != tc->previous_src)) {
1056				/* Only allow source switch when both the old source unit and the new one have
1057				 * no pending data to process (i.e. the consumed length must be a multiple of the
1058				 * transform blocklength). */
1059				/* Note: if the src == NULL we are actually sourcing from DMA out. */
1060				if (((tc->prev_src != NULL) && (tc->prev_src->consumed % tc->prev_src->blocklength)) ||
1061				    ((tc->curr_src != NULL) && (tc->curr_src->consumed % tc->curr_src->blocklength)))
1062				{
1063					DEBUG_API(printk("cryptocop_setup_dma_list: can only disconnect from or connect to a unit on a multiple of the blocklength, old: cons=%d, prod=%d, block=%d, new: cons=%d prod=%d, block=%d.\n", tc->prev_src ? tc->prev_src->consumed : INT_MIN, tc->prev_src ? tc->prev_src->produced : INT_MIN, tc->prev_src ? tc->prev_src->blocklength : INT_MIN, tc->curr_src ? tc->curr_src->consumed : INT_MIN, tc->curr_src ? tc->curr_src->produced : INT_MIN, tc->curr_src ? tc->curr_src->blocklength : INT_MIN));
1064					failed = -EINVAL;
1065					goto error_cleanup;
1066				}
1067			}
1068			/* Detect unit deactivation. */
1069			if (dcfg->last) {
1070				/* Length check of this is handled below. */
1071				tc->done = 1;
1072			}
1073			dcfg = dcfg->next;
1074		} /* while (dcfg) */
1075		DEBUG(printk("cryptocop_setup_dma_list: parsing operation descriptor configuration complete.\n"));
1076
1077		if (cipher_ctx.active && (cipher_ctx.curr_src != NULL) && !cipher_ctx.curr_src->active){
1078			DEBUG_API(printk("cryptocop_setup_dma_list: cipher source from inactive unit %d\n", cipher_ctx.curr_src->unit_no));
1079			failed = -EINVAL;
1080			goto error_cleanup;
1081		}
1082		if (digest_ctx.active && (digest_ctx.curr_src != NULL) && !digest_ctx.curr_src->active){
1083			DEBUG_API(printk("cryptocop_setup_dma_list: digest source from inactive unit %d\n", digest_ctx.curr_src->unit_no));
1084			failed = -EINVAL;
1085			goto error_cleanup;
1086		}
1087		if (csum_ctx.active && (csum_ctx.curr_src != NULL) && !csum_ctx.curr_src->active){
1088			DEBUG_API(printk("cryptocop_setup_dma_list: cipher source from inactive unit %d\n", csum_ctx.curr_src->unit_no));
1089			failed = -EINVAL;
1090			goto error_cleanup;
1091		}
1092
1093		/* Update consumed and produced lengths.
1094
1095		   The consumed length accounting here is actually cheating.  If a unit source from DMA (or any
1096		   other unit that process data in blocks of one octet) it is correct, but if it source from a
1097		   block processing unit, i.e. a cipher, it will be temporarily incorrect at some times.  However
1098		   since it is only allowed--by the HW--to change source to or from a block processing unit at times where that
1099		   unit has processed an exact multiple of its block length the end result will be correct.
1100		   Beware that if the source change restriction change this code will need to be (much) reworked.
1101		*/
1102		DEBUG(printk("cryptocop_setup_dma_list: desc->length=%d, desc_len=%d.\n", odsc->length, desc_len));
1103
1104		if (csum_ctx.active) {
1105			csum_ctx.consumed += desc_len;
1106			if (csum_ctx.done) {
1107				csum_ctx.produced = 2;
1108			}
1109			DEBUG(printk("cryptocop_setup_dma_list: csum_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", csum_ctx.consumed, csum_ctx.produced, csum_ctx.blocklength));
1110		}
1111		if (digest_ctx.active) {
1112			digest_ctx.consumed += desc_len;
1113			if (digest_ctx.done) {
1114				if (digest_ctx.unit_no == src_md5) {
1115					digest_ctx.produced = MD5_STATE_LENGTH;
1116				} else {
1117					digest_ctx.produced = SHA1_STATE_LENGTH;
1118				}
1119			}
1120			DEBUG(printk("cryptocop_setup_dma_list: digest_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", digest_ctx.consumed, digest_ctx.produced, digest_ctx.blocklength));
1121		}
1122		if (cipher_ctx.active) {
1123			/* Ciphers are allowed only to source from DMA out.  That is filtered above. */
1124			assert(cipher_ctx.current_src == src_dma);
1125			cipher_ctx.consumed += desc_len;
1126			cipher_ctx.produced = cipher_ctx.blocklength * (cipher_ctx.consumed / cipher_ctx.blocklength);
1127			if (cipher_ctx.cbcmode && !(cipher_ctx.tcfg->flags & CRYPTOCOP_EXPLICIT_IV) && cipher_ctx.produced){
1128				cipher_ctx.produced -= cipher_ctx.blocklength; /* Compensate for CBC iv. */
1129			}
1130			DEBUG(printk("cryptocop_setup_dma_list: cipher_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", cipher_ctx.consumed, cipher_ctx.produced, cipher_ctx.blocklength));
1131		}
1132
1133		/* Setup the DMA out descriptors. */
1134		/* Configure the metadata. */
1135		active_count = 0;
1136		eop_needed_count = 0;
1137		if (cipher_ctx.active) {
1138			++active_count;
1139			if (cipher_ctx.unit_no == src_dma){
1140				/* mem2mem */
1141				meta_out.ciphsel = src_none;
1142			} else {
1143				meta_out.ciphsel = cipher_ctx.current_src;
1144			}
1145			meta_out.ciphconf = cipher_ctx.ciph_conf;
1146			meta_out.cbcmode = cipher_ctx.cbcmode;
1147			meta_out.decrypt = cipher_ctx.decrypt;
1148			DEBUG(printk("set ciphsel=%d ciphconf=%d cbcmode=%d decrypt=%d\n", meta_out.ciphsel, meta_out.ciphconf, meta_out.cbcmode, meta_out.decrypt));
1149			if (cipher_ctx.done) ++eop_needed_count;
1150		} else {
1151			meta_out.ciphsel = src_none;
1152		}
1153
1154		if (digest_ctx.active) {
1155			++active_count;
1156			meta_out.hashsel = digest_ctx.current_src;
1157			meta_out.hashconf = digest_ctx.hash_conf;
1158			meta_out.hashmode = 0; /* Explicit mode is not used here. */
1159			DEBUG(printk("set hashsel=%d hashconf=%d hashmode=%d\n", meta_out.hashsel, meta_out.hashconf, meta_out.hashmode));
1160			if (digest_ctx.done) {
1161				assert(digest_ctx.pad_descs == NULL);
1162				failed = create_pad_descriptor(&digest_ctx, &digest_ctx.pad_descs, alloc_flag);
1163				if (failed) {
1164					DEBUG_API(printk("cryptocop_setup_dma_list: failed digest pad creation.\n"));
1165					goto error_cleanup;
1166				}
1167			}
1168		} else {
1169			meta_out.hashsel = src_none;
1170		}
1171
1172		if (csum_ctx.active) {
1173			++active_count;
1174			meta_out.csumsel = csum_ctx.current_src;
1175			if (csum_ctx.done) {
1176				assert(csum_ctx.pad_descs == NULL);
1177				failed = create_pad_descriptor(&csum_ctx, &csum_ctx.pad_descs, alloc_flag);
1178				if (failed) {
1179					DEBUG_API(printk("cryptocop_setup_dma_list: failed csum pad creation.\n"));
1180					goto error_cleanup;
1181				}
1182			}
1183		} else {
1184			meta_out.csumsel = src_none;
1185		}
1186		DEBUG(printk("cryptocop_setup_dma_list: %d eop needed, %d active units\n", eop_needed_count, active_count));
1187		/* Setup DMA out descriptors for the indata. */
1188		failed = create_output_descriptors(operation, &iniov_ix, &iniov_offset, desc_len, &current_out_cdesc, &meta_out, alloc_flag);
1189		if (failed) {
1190			DEBUG_API(printk("cryptocop_setup_dma_list: create_output_descriptors %d\n", failed));
1191			goto error_cleanup;
1192		}
1193		/* Setup out EOP.  If there are active units that are not done here they cannot get an EOP
1194		 * so we ust setup a zero length descriptor to DMA to signal EOP only to done units.
1195		 * If there is a pad descriptor EOP for the padded unit will be EOPed by it.
1196		 */
1197		assert(active_count >= eop_needed_count);
1198		assert((eop_needed_count == 0) || (eop_needed_count == 1));
1199		if (eop_needed_count) {
1200			/* This means that the bulk operation (cipeher/m2m) is terminated. */
1201			if (active_count > 1) {
1202				/* Use zero length EOP descriptor. */
1203				struct cryptocop_dma_desc *ed = alloc_cdesc(alloc_flag);
1204				struct strcop_meta_out    ed_mo = {0};
1205				if (!ed) {
1206					DEBUG_API(printk("cryptocop_setup_dma_list: alloc EOP descriptor for cipher\n"));
1207					failed = -ENOMEM;
1208					goto error_cleanup;
1209				}
1210
1211				assert(cipher_ctx.active && cipher_ctx.done);
1212
1213				if (cipher_ctx.unit_no == src_dma){
1214					/* mem2mem */
1215					ed_mo.ciphsel = src_none;
1216				} else {
1217					ed_mo.ciphsel = cipher_ctx.current_src;
1218				}
1219				ed_mo.ciphconf = cipher_ctx.ciph_conf;
1220				ed_mo.cbcmode = cipher_ctx.cbcmode;
1221				ed_mo.decrypt = cipher_ctx.decrypt;
1222
1223				ed->free_buf = NULL;
1224				ed->dma_descr->wait = 1;
1225				ed->dma_descr->out_eop = 1;
1226
1227				ed->dma_descr->buf = (char*)virt_to_phys(&ed); /* Use any valid physical address for zero length descriptor. */
1228				ed->dma_descr->after = ed->dma_descr->buf;
1229				ed->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, ed_mo);
1230				current_out_cdesc->next = ed;
1231				current_out_cdesc = ed;
1232			} else {
1233				/* Set EOP in the current out descriptor since the only active module is
1234				 * the one needing the EOP. */
1235
1236				current_out_cdesc->dma_descr->out_eop = 1;
1237			}
1238		}
1239
1240		if (cipher_ctx.done && cipher_ctx.active) cipher_ctx.active = 0;
1241		if (digest_ctx.done && digest_ctx.active) digest_ctx.active = 0;
1242		if (csum_ctx.done && csum_ctx.active) csum_ctx.active = 0;
1243		indata_ix += odsc->length;
1244		odsc = odsc->next;
1245	} /* while (odsc) */ /* Process descriptors. */
1246	DEBUG(printk("cryptocop_setup_dma_list: done parsing operation descriptors\n"));
1247	if (cipher_ctx.tcfg && (cipher_ctx.active || !cipher_ctx.done)){
1248		DEBUG_API(printk("cryptocop_setup_dma_list: cipher operation not terminated.\n"));
1249		failed = -EINVAL;
1250		goto error_cleanup;
1251	}
1252	if (digest_ctx.tcfg && (digest_ctx.active || !digest_ctx.done)){
1253		DEBUG_API(printk("cryptocop_setup_dma_list: digest operation not terminated.\n"));
1254		failed = -EINVAL;
1255		goto error_cleanup;
1256	}
1257	if (csum_ctx.tcfg && (csum_ctx.active || !csum_ctx.done)){
1258		DEBUG_API(printk("cryptocop_setup_dma_list: csum operation not terminated.\n"));
1259		failed = -EINVAL;
1260		goto error_cleanup;
1261	}
1262
1263	failed = append_input_descriptors(operation, &current_in_cdesc, &current_out_cdesc, &cipher_ctx, alloc_flag);
1264	if (failed){
1265		DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed));
1266		goto error_cleanup;
1267	}
1268	failed = append_input_descriptors(operation, &current_in_cdesc, &current_out_cdesc, &digest_ctx, alloc_flag);
1269	if (failed){
1270		DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed));
1271		goto error_cleanup;
1272	}
1273	failed = append_input_descriptors(operation, &current_in_cdesc, &current_out_cdesc, &csum_ctx, alloc_flag);
1274	if (failed){
1275		DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed));
1276		goto error_cleanup;
1277	}
1278
1279	DEBUG(printk("cryptocop_setup_dma_list: int_op=0x%p, *int_op=0x%p\n", int_op, *int_op));
1280	(*int_op)->cdesc_out = out_cdesc_head.next;
1281	(*int_op)->cdesc_in = in_cdesc_head.next;
1282	DEBUG(printk("cryptocop_setup_dma_list: out_cdesc_head=0x%p in_cdesc_head=0x%p\n", (*int_op)->cdesc_out, (*int_op)->cdesc_in));
1283
1284	setup_descr_chain(out_cdesc_head.next);
1285	setup_descr_chain(in_cdesc_head.next);
1286
1287	/* Last but not least: mark the last DMA in descriptor for a INTR and EOL and the the
1288	 * last DMA out descriptor for EOL.
1289	 */
1290	current_in_cdesc->dma_descr->intr = 1;
1291	current_in_cdesc->dma_descr->eol = 1;
1292	current_out_cdesc->dma_descr->eol = 1;
1293
1294	/* Setup DMA contexts. */
1295	(*int_op)->ctx_out.next = NULL;
1296	(*int_op)->ctx_out.eol = 1;
1297	(*int_op)->ctx_out.intr = 0;
1298	(*int_op)->ctx_out.store_mode = 0;
1299	(*int_op)->ctx_out.en = 0;
1300	(*int_op)->ctx_out.dis = 0;
1301	(*int_op)->ctx_out.md0 = 0;
1302	(*int_op)->ctx_out.md1 = 0;
1303	(*int_op)->ctx_out.md2 = 0;
1304	(*int_op)->ctx_out.md3 = 0;
1305	(*int_op)->ctx_out.md4 = 0;
1306	(*int_op)->ctx_out.saved_data = (dma_descr_data*)virt_to_phys((*int_op)->cdesc_out->dma_descr);
1307	(*int_op)->ctx_out.saved_data_buf = (*int_op)->cdesc_out->dma_descr->buf; /* Already physical address. */
1308
1309	(*int_op)->ctx_in.next = NULL;
1310	(*int_op)->ctx_in.eol = 1;
1311	(*int_op)->ctx_in.intr = 0;
1312	(*int_op)->ctx_in.store_mode = 0;
1313	(*int_op)->ctx_in.en = 0;
1314	(*int_op)->ctx_in.dis = 0;
1315	(*int_op)->ctx_in.md0 = 0;
1316	(*int_op)->ctx_in.md1 = 0;
1317	(*int_op)->ctx_in.md2 = 0;
1318	(*int_op)->ctx_in.md3 = 0;
1319	(*int_op)->ctx_in.md4 = 0;
1320
1321	(*int_op)->ctx_in.saved_data = (dma_descr_data*)virt_to_phys((*int_op)->cdesc_in->dma_descr);
1322	(*int_op)->ctx_in.saved_data_buf = (*int_op)->cdesc_in->dma_descr->buf; /* Already physical address. */
1323
1324	DEBUG(printk("cryptocop_setup_dma_list: done\n"));
1325	return 0;
1326
1327error_cleanup:
1328	{
1329		/* Free all allocated resources. */
1330		struct cryptocop_dma_desc *tmp_cdesc;
1331		while (digest_ctx.pad_descs){
1332			tmp_cdesc = digest_ctx.pad_descs->next;
1333			free_cdesc(digest_ctx.pad_descs);
1334			digest_ctx.pad_descs = tmp_cdesc;
1335		}
1336		while (csum_ctx.pad_descs){
1337			tmp_cdesc = csum_ctx.pad_descs->next;
1338			free_cdesc(csum_ctx.pad_descs);
1339			csum_ctx.pad_descs = tmp_cdesc;
1340		}
1341		assert(cipher_ctx.pad_descs == NULL); /* The ciphers are never padded. */
1342
1343		if (*int_op != NULL) delete_internal_operation(*int_op);
1344	}
1345	DEBUG_API(printk("cryptocop_setup_dma_list: done with error %d\n", failed));
1346	return failed;
1347}
1348
1349
1350static void delete_internal_operation(struct cryptocop_int_operation *iop)
1351{
1352	void                      *ptr = iop->alloc_ptr;
1353	struct cryptocop_dma_desc *cd = iop->cdesc_out;
1354	struct cryptocop_dma_desc *next;
1355
1356	DEBUG(printk("delete_internal_operation: iop=0x%p, alloc_ptr=0x%p\n", iop, ptr));
1357
1358	while (cd) {
1359		next = cd->next;
1360		free_cdesc(cd);
1361		cd = next;
1362	}
1363	cd = iop->cdesc_in;
1364	while (cd) {
1365		next = cd->next;
1366		free_cdesc(cd);
1367		cd = next;
1368	}
1369	kfree(ptr);
1370}
1371
1372#define MD5_MIN_PAD_LENGTH (9)
1373#define MD5_PAD_LENGTH_FIELD_LENGTH (8)
1374
1375static int create_md5_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length)
1376{
1377	size_t                  padlen = MD5_BLOCK_LENGTH - (hashed_length % MD5_BLOCK_LENGTH);
1378	unsigned char           *p;
1379	int                     i;
1380	unsigned long long int  bit_length = hashed_length << 3;
1381
1382	if (padlen < MD5_MIN_PAD_LENGTH) padlen += MD5_BLOCK_LENGTH;
1383
1384	p = kmalloc(padlen, alloc_flag);
1385	if (!pad) return -ENOMEM;
1386
1387	*p = 0x80;
1388	memset(p+1, 0, padlen - 1);
1389
1390	DEBUG(printk("create_md5_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length));
1391
1392	i = padlen - MD5_PAD_LENGTH_FIELD_LENGTH;
1393	while (bit_length != 0){
1394		p[i++] = bit_length % 0x100;
1395		bit_length >>= 8;
1396	}
1397
1398	*pad = (char*)p;
1399	*pad_length = padlen;
1400
1401	return 0;
1402}
1403
1404#define SHA1_MIN_PAD_LENGTH (9)
1405#define SHA1_PAD_LENGTH_FIELD_LENGTH (8)
1406
1407static int create_sha1_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length)
1408{
1409	size_t                  padlen = SHA1_BLOCK_LENGTH - (hashed_length % SHA1_BLOCK_LENGTH);
1410	unsigned char           *p;
1411	int                     i;
1412	unsigned long long int  bit_length = hashed_length << 3;
1413
1414	if (padlen < SHA1_MIN_PAD_LENGTH) padlen += SHA1_BLOCK_LENGTH;
1415
1416	p = kmalloc(padlen, alloc_flag);
1417	if (!pad) return -ENOMEM;
1418
1419	*p = 0x80;
1420	memset(p+1, 0, padlen - 1);
1421
1422	DEBUG(printk("create_sha1_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length));
1423
1424	i = padlen - 1;
1425	while (bit_length != 0){
1426		p[i--] = bit_length % 0x100;
1427		bit_length >>= 8;
1428	}
1429
1430	*pad = (char*)p;
1431	*pad_length = padlen;
1432
1433	return 0;
1434}
1435
1436
1437static int transform_ok(struct cryptocop_transform_init *tinit)
1438{
1439	switch (tinit->alg){
1440	case cryptocop_alg_csum:
1441		switch (tinit->csum_mode){
1442		case cryptocop_csum_le:
1443		case cryptocop_csum_be:
1444			break;
1445		default:
1446			DEBUG_API(printk("transform_ok: Bad mode set for csum transform\n"));
1447			return -EINVAL;
1448		}
1449	case cryptocop_alg_mem2mem:
1450	case cryptocop_alg_md5:
1451	case cryptocop_alg_sha1:
1452		if (tinit->keylen != 0) {
1453			DEBUG_API(printk("transform_ok: non-zero keylength, %d, for a digest/csum algorithm\n", tinit->keylen));
1454			return -EINVAL; /* This check is a bit strict. */
1455		}
1456		break;
1457	case cryptocop_alg_des:
1458		if (tinit->keylen != 64) {
1459			DEBUG_API(printk("transform_ok: keylen %d invalid for DES\n", tinit->keylen));
1460			return -EINVAL;
1461		}
1462		break;
1463	case cryptocop_alg_3des:
1464		if (tinit->keylen != 192) {
1465			DEBUG_API(printk("transform_ok: keylen %d invalid for 3DES\n", tinit->keylen));
1466			return -EINVAL;
1467		}
1468		break;
1469	case cryptocop_alg_aes:
1470		if (tinit->keylen != 128 && tinit->keylen != 192 && tinit->keylen != 256) {
1471			DEBUG_API(printk("transform_ok: keylen %d invalid for AES\n", tinit->keylen));
1472			return -EINVAL;
1473		}
1474		break;
1475	case cryptocop_no_alg:
1476	default:
1477		DEBUG_API(printk("transform_ok: no such algorithm %d\n", tinit->alg));
1478		return -EINVAL;
1479	}
1480
1481	switch (tinit->alg){
1482	case cryptocop_alg_des:
1483	case cryptocop_alg_3des:
1484	case cryptocop_alg_aes:
1485		if (tinit->cipher_mode != cryptocop_cipher_mode_ecb && tinit->cipher_mode != cryptocop_cipher_mode_cbc) return -EINVAL;
1486	default:
1487		 break;
1488	}
1489	return 0;
1490}
1491
1492
1493int cryptocop_new_session(cryptocop_session_id *sid, struct cryptocop_transform_init *tinit, int alloc_flag)
1494{
1495	struct cryptocop_session         *sess;
1496	struct cryptocop_transform_init  *tfrm_in = tinit;
1497	struct cryptocop_transform_init  *tmp_in;
1498	int                              no_tfrms = 0;
1499	int                              i;
1500	unsigned long int                flags;
1501
1502	init_stream_coprocessor(); /* For safety if we are called early */
1503
1504	while (tfrm_in){
1505		int err;
1506		++no_tfrms;
1507		if ((err = transform_ok(tfrm_in))) {
1508			DEBUG_API(printk("cryptocop_new_session, bad transform\n"));
1509			return err;
1510		}
1511		tfrm_in = tfrm_in->next;
1512	}
1513	if (0 == no_tfrms) {
1514		DEBUG_API(printk("cryptocop_new_session, no transforms specified\n"));
1515		return -EINVAL;
1516	}
1517
1518	sess = kmalloc(sizeof(struct cryptocop_session), alloc_flag);
1519	if (!sess){
1520		DEBUG_API(printk("cryptocop_new_session, kmalloc cryptocop_session\n"));
1521		return -ENOMEM;
1522	}
1523
1524	sess->tfrm_ctx = kmalloc(no_tfrms * sizeof(struct cryptocop_transform_ctx), alloc_flag);
1525	if (!sess->tfrm_ctx) {
1526		DEBUG_API(printk("cryptocop_new_session, kmalloc cryptocop_transform_ctx\n"));
1527		kfree(sess);
1528		return -ENOMEM;
1529	}
1530
1531	tfrm_in = tinit;
1532	for (i = 0; i < no_tfrms; i++){
1533		tmp_in = tfrm_in->next;
1534		while (tmp_in){
1535			if (tmp_in->tid == tfrm_in->tid) {
1536				DEBUG_API(printk("cryptocop_new_session, duplicate transform ids\n"));
1537				kfree(sess->tfrm_ctx);
1538				kfree(sess);
1539				return -EINVAL;
1540			}
1541			tmp_in = tmp_in->next;
1542		}
1543		memcpy(&sess->tfrm_ctx[i].init, tfrm_in, sizeof(struct cryptocop_transform_init));
1544		sess->tfrm_ctx[i].dec_key_set = 0;
1545		sess->tfrm_ctx[i].next = &sess->tfrm_ctx[i] + 1;
1546
1547		tfrm_in = tfrm_in->next;
1548	}
1549	sess->tfrm_ctx[i-1].next = NULL;
1550
1551	spin_lock_irqsave(&cryptocop_sessions_lock, flags);
1552	sess->sid = next_sid;
1553	next_sid++;
1554	/* TODO If we are really paranoid we should do duplicate check to handle sid wraparound.
1555	 *      OTOH 2^64 is a really large number of session. */
1556	if (next_sid == 0) next_sid = 1;
1557
1558	/* Prepend to session list. */
1559	sess->next = cryptocop_sessions;
1560	cryptocop_sessions = sess;
1561	spin_unlock_irqrestore(&cryptocop_sessions_lock, flags);
1562	*sid = sess->sid;
1563	return 0;
1564}
1565
1566
1567int cryptocop_free_session(cryptocop_session_id sid)
1568{
1569	struct cryptocop_transform_ctx    *tc;
1570	struct cryptocop_session          *sess = NULL;
1571	struct cryptocop_session          *psess = NULL;
1572	unsigned long int                 flags;
1573	int                               i;
1574	LIST_HEAD(remove_list);
1575	struct list_head                  *node, *tmp;
1576	struct cryptocop_prio_job         *pj;
1577
1578	DEBUG(printk("cryptocop_free_session: sid=%lld\n", sid));
1579
1580	spin_lock_irqsave(&cryptocop_sessions_lock, flags);
1581	sess = cryptocop_sessions;
1582	while (sess && sess->sid != sid){
1583		psess = sess;
1584		sess = sess->next;
1585	}
1586	if (sess){
1587		if (psess){
1588			psess->next = sess->next;
1589		} else {
1590			cryptocop_sessions = sess->next;
1591		}
1592	}
1593	spin_unlock_irqrestore(&cryptocop_sessions_lock, flags);
1594
1595	if (!sess) return -EINVAL;
1596
1597	/* Remove queued jobs. */
1598	spin_lock_irqsave(&cryptocop_job_queue_lock, flags);
1599
1600	for (i = 0; i < cryptocop_prio_no_prios; i++){
1601		if (!list_empty(&(cryptocop_job_queues[i].jobs))){
1602			list_for_each_safe(node, tmp, &(cryptocop_job_queues[i].jobs)) {
1603				pj = list_entry(node, struct cryptocop_prio_job, node);
1604				if (pj->oper->sid == sid) {
1605					list_move_tail(node, &remove_list);
1606				}
1607			}
1608		}
1609	}
1610	spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags);
1611
1612	list_for_each_safe(node, tmp, &remove_list) {
1613		list_del(node);
1614		pj = list_entry(node, struct cryptocop_prio_job, node);
1615		pj->oper->operation_status = -EAGAIN;  /* EAGAIN is not ideal for job/session terminated but it's the best choice I know of. */
1616		DEBUG(printk("cryptocop_free_session: pj=0x%p, pj->oper=0x%p, pj->iop=0x%p\n", pj, pj->oper, pj->iop));
1617		pj->oper->cb(pj->oper, pj->oper->cb_data);
1618		delete_internal_operation(pj->iop);
1619		kfree(pj);
1620	}
1621
1622	tc = sess->tfrm_ctx;
1623	/* Erase keying data. */
1624	while (tc){
1625		DEBUG(printk("cryptocop_free_session: memset keys, tfrm id=%d\n", tc->init.tid));
1626		memset(tc->init.key, 0xff, CRYPTOCOP_MAX_KEY_LENGTH);
1627		memset(tc->dec_key, 0xff, CRYPTOCOP_MAX_KEY_LENGTH);
1628		tc = tc->next;
1629	}
1630	kfree(sess->tfrm_ctx);
1631	kfree(sess);
1632
1633	return 0;
1634}
1635
1636static struct cryptocop_session *get_session(cryptocop_session_id sid)
1637{
1638	struct cryptocop_session    *sess;
1639	unsigned long int           flags;
1640
1641	spin_lock_irqsave(&cryptocop_sessions_lock, flags);
1642	sess = cryptocop_sessions;
1643	while (sess && (sess->sid != sid)){
1644		sess = sess->next;
1645	}
1646	spin_unlock_irqrestore(&cryptocop_sessions_lock, flags);
1647
1648	return sess;
1649}
1650
1651static struct cryptocop_transform_ctx *get_transform_ctx(struct cryptocop_session *sess, cryptocop_tfrm_id tid)
1652{
1653	struct cryptocop_transform_ctx *tc = sess->tfrm_ctx;
1654
1655	DEBUG(printk("get_transform_ctx, sess=0x%p, tid=%d\n", sess, tid));
1656	assert(sess != NULL);
1657	while (tc && tc->init.tid != tid){
1658		DEBUG(printk("tc=0x%p, tc->next=0x%p\n", tc, tc->next));
1659		tc = tc->next;
1660	}
1661	DEBUG(printk("get_transform_ctx, returning tc=0x%p\n", tc));
1662	return tc;
1663}
1664
1665
1666
1667/* The AES s-transform matrix (s-box). */
1668static const u8 aes_sbox[256] = {
1669	99,  124, 119, 123, 242, 107, 111, 197, 48,  1,   103, 43,  254, 215, 171, 118,
1670	202, 130, 201, 125, 250, 89,  71,  240, 173, 212, 162, 175, 156, 164, 114, 192,
1671	183, 253, 147, 38,  54,  63,  247, 204, 52,  165, 229, 241, 113, 216, 49,  21,
1672	4,   199, 35,  195, 24,  150, 5,   154, 7,   18,  128, 226, 235, 39,  178, 117,
1673	9,   131, 44,  26,  27,  110, 90,  160, 82,  59,  214, 179, 41,  227, 47,  132,
1674	83,  209, 0,   237, 32,  252, 177, 91,  106, 203, 190, 57,  74,  76,  88,  207,
1675	208, 239, 170, 251, 67,  77,  51,  133, 69,  249, 2,   127, 80,  60,  159, 168,
1676	81,  163, 64,  143, 146, 157, 56,  245, 188, 182, 218, 33,  16,  255, 243, 210,
1677	205, 12,  19,  236, 95,  151, 68,  23,  196, 167, 126, 61,  100, 93,  25,  115,
1678	96,  129, 79,  220, 34,  42,  144, 136, 70,  238, 184, 20,  222, 94,  11,  219,
1679	224, 50,  58,  10,  73,  6,   36,  92,  194, 211, 172, 98,  145, 149, 228, 121,
1680	231, 200, 55,  109, 141, 213, 78,  169, 108, 86,  244, 234, 101, 122, 174, 8,
1681	186, 120, 37,  46,  28,  166, 180, 198, 232, 221, 116, 31,  75,  189, 139, 138,
1682	112, 62,  181, 102, 72,  3,   246, 14,  97,  53,  87,  185, 134, 193, 29,  158,
1683	225, 248, 152, 17,  105, 217, 142, 148, 155, 30,  135, 233, 206, 85,  40,  223,
1684	140, 161, 137, 13,  191, 230, 66,  104, 65,  153, 45,  15,  176, 84,  187, 22
1685};
1686
1687/* AES has a 32 bit word round constants for each round in the
1688 * key schedule.  round_constant[i] is really Rcon[i+1] in FIPS187.
1689 */
1690static u32 round_constant[11] = {
1691	0x01000000, 0x02000000, 0x04000000, 0x08000000,
1692	0x10000000, 0x20000000, 0x40000000, 0x80000000,
1693	0x1B000000, 0x36000000, 0x6C000000
1694};
1695
1696/* Apply the s-box to each of the four occtets in w. */
1697static u32 aes_ks_subword(const u32 w)
1698{
1699	u8 bytes[4];
1700
1701	*(u32*)(&bytes[0]) = w;
1702	bytes[0] = aes_sbox[bytes[0]];
1703	bytes[1] = aes_sbox[bytes[1]];
1704	bytes[2] = aes_sbox[bytes[2]];
1705	bytes[3] = aes_sbox[bytes[3]];
1706	return *(u32*)(&bytes[0]);
1707}
1708
1709/* The encrypt (forward) Rijndael key schedule algorithm pseudo code:
1710 * (Note that AES words are 32 bit long)
1711 *
1712 * KeyExpansion(byte key[4*Nk], word w[Nb*(Nr+1)], Nk){
1713 * word temp
1714 * i = 0
1715 * while (i < Nk) {
1716 *   w[i] = word(key[4*i, 4*i + 1, 4*i + 2, 4*i + 3])
1717 *   i = i + 1
1718 * }
1719 * i = Nk
1720 *
1721 * while (i < (Nb * (Nr + 1))) {
1722 *   temp = w[i - 1]
1723 *   if ((i mod Nk) == 0) {
1724 *     temp = SubWord(RotWord(temp)) xor Rcon[i/Nk]
1725 *   }
1726 *   else if ((Nk > 6) && ((i mod Nk) == 4)) {
1727 *     temp = SubWord(temp)
1728 *   }
1729 *   w[i] = w[i - Nk] xor temp
1730 * }
1731 * RotWord(t) does a 8 bit cyclic shift left on a 32 bit word.
1732 * SubWord(t) applies the AES s-box individually to each octet
1733 * in a 32 bit word.
1734 *
1735 * For AES Nk can have the values 4, 6, and 8 (corresponding to
1736 * values for Nr of 10, 12, and 14).  Nb is always 4.
1737 *
1738 * To construct w[i], w[i - 1] and w[i - Nk] must be
1739 * available.  Consequently we must keep a state of the last Nk words
1740 * to be able to create the last round keys.
1741 */
1742static void get_aes_decrypt_key(unsigned char *dec_key, const unsigned  char *key, unsigned int keylength)
1743{
1744	u32 temp;
1745	u32 w_ring[8]; /* nk is max 8, use elements 0..(nk - 1) as a ringbuffer */
1746	u8  w_last_ix;
1747	int i;
1748	u8  nr, nk;
1749
1750	switch (keylength){
1751	case 128:
1752		nk = 4;
1753		nr = 10;
1754		break;
1755	case 192:
1756		nk = 6;
1757		nr = 12;
1758		break;
1759	case 256:
1760		nk = 8;
1761		nr = 14;
1762		break;
1763	default:
1764		panic("stream co-processor: bad aes key length in get_aes_decrypt_key\n");
1765	};
1766
1767	/* Need to do host byte order correction here since key is byte oriented and the
1768	 * kx algorithm is word (u32) oriented. */
1769	for (i = 0; i < nk; i+=1) {
1770		w_ring[i] = be32_to_cpu(*(u32*)&key[4*i]);
1771	}
1772
1773	i = (int)nk;
1774	w_last_ix = i - 1;
1775	while (i < (4 * (nr + 2))) {
1776		temp = w_ring[w_last_ix];
1777		if (!(i % nk)) {
1778			/* RotWord(temp) */
1779			temp = (temp << 8) | (temp >> 24);
1780			temp = aes_ks_subword(temp);
1781			temp ^= round_constant[i/nk - 1];
1782		} else if ((nk > 6) && ((i % nk) == 4)) {
1783			temp = aes_ks_subword(temp);
1784		}
1785		w_last_ix = (w_last_ix + 1) % nk; /* This is the same as (i-Nk) mod Nk */
1786		temp ^= w_ring[w_last_ix];
1787		w_ring[w_last_ix] = temp;
1788
1789		/* We need the round keys for round Nr+1 and Nr+2 (round key
1790		 * Nr+2 is the round key beyond the last one used when
1791		 * encrypting).  Rounds are numbered starting from 0, Nr=10
1792		 * implies 11 rounds are used in encryption/decryption.
1793		 */
1794		if (i >= (4 * nr)) {
1795			/* Need to do host byte order correction here, the key
1796			 * is byte oriented. */
1797			*(u32*)dec_key = cpu_to_be32(temp);
1798			dec_key += 4;
1799		}
1800		++i;
1801	}
1802}
1803
1804
1805/**** Job/operation management. ****/
1806
1807int cryptocop_job_queue_insert_csum(struct cryptocop_operation *operation)
1808{
1809	return cryptocop_job_queue_insert(cryptocop_prio_kernel_csum, operation);
1810}
1811
1812int cryptocop_job_queue_insert_crypto(struct cryptocop_operation *operation)
1813{
1814	return cryptocop_job_queue_insert(cryptocop_prio_kernel, operation);
1815}
1816
1817int cryptocop_job_queue_insert_user_job(struct cryptocop_operation *operation)
1818{
1819	return cryptocop_job_queue_insert(cryptocop_prio_user, operation);
1820}
1821
1822static int cryptocop_job_queue_insert(cryptocop_queue_priority prio, struct cryptocop_operation *operation)
1823{
1824	int                           ret;
1825	struct cryptocop_prio_job     *pj = NULL;
1826	unsigned long int             flags;
1827
1828	DEBUG(printk("cryptocop_job_queue_insert(%d, 0x%p)\n", prio, operation));
1829
1830	if (!operation || !operation->cb){
1831		DEBUG_API(printk("cryptocop_job_queue_insert oper=0x%p, NULL operation or callback\n", operation));
1832		return -EINVAL;
1833	}
1834
1835	if ((ret = cryptocop_job_setup(&pj, operation)) != 0){
1836		DEBUG_API(printk("cryptocop_job_queue_insert: job setup failed\n"));
1837		return ret;
1838	}
1839	assert(pj != NULL);
1840
1841	spin_lock_irqsave(&cryptocop_job_queue_lock, flags);
1842	list_add_tail(&pj->node, &cryptocop_job_queues[prio].jobs);
1843	spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags);
1844
1845	/* Make sure a job is running */
1846	cryptocop_start_job();
1847	return 0;
1848}
1849
1850static void cryptocop_do_tasklet(unsigned long unused);
1851DECLARE_TASKLET (cryptocop_tasklet, cryptocop_do_tasklet, 0);
1852
1853static void cryptocop_do_tasklet(unsigned long unused)
1854{
1855	struct list_head             *node;
1856	struct cryptocop_prio_job    *pj = NULL;
1857	unsigned long                flags;
1858
1859	DEBUG(printk("cryptocop_do_tasklet: entering\n"));
1860
1861	do {
1862		spin_lock_irqsave(&cryptocop_completed_jobs_lock, flags);
1863		if (!list_empty(&cryptocop_completed_jobs)){
1864			node = cryptocop_completed_jobs.next;
1865			list_del(node);
1866			pj = list_entry(node, struct cryptocop_prio_job, node);
1867		} else {
1868			pj = NULL;
1869		}
1870		spin_unlock_irqrestore(&cryptocop_completed_jobs_lock, flags);
1871		if (pj) {
1872			assert(pj->oper != NULL);
1873
1874			/* Notify consumer of operation completeness. */
1875			DEBUG(printk("cryptocop_do_tasklet: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data));
1876
1877			pj->oper->operation_status = 0; /* Job is completed. */
1878			pj->oper->cb(pj->oper, pj->oper->cb_data);
1879			delete_internal_operation(pj->iop);
1880			kfree(pj);
1881		}
1882	} while (pj != NULL);
1883
1884	DEBUG(printk("cryptocop_do_tasklet: exiting\n"));
1885}
1886
1887static irqreturn_t
1888dma_done_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1889{
1890	struct cryptocop_prio_job *done_job;
1891	reg_dma_rw_ack_intr ack_intr = {
1892		.data = 1,
1893	};
1894
1895	REG_WR (dma, regi_dma9, rw_ack_intr, ack_intr);
1896
1897	DEBUG(printk("cryptocop DMA done\n"));
1898
1899	spin_lock(&running_job_lock);
1900	if (cryptocop_running_job == NULL){
1901		printk("stream co-processor got interrupt when not busy\n");
1902		spin_unlock(&running_job_lock);
1903		return IRQ_HANDLED;
1904	}
1905	done_job = cryptocop_running_job;
1906	cryptocop_running_job = NULL;
1907	spin_unlock(&running_job_lock);
1908
1909	/* Start processing a job. */
1910	if (!spin_trylock(&cryptocop_process_lock)){
1911		DEBUG(printk("cryptocop irq handler, not starting a job\n"));
1912	} else {
1913		cryptocop_start_job();
1914		spin_unlock(&cryptocop_process_lock);
1915	}
1916
1917	done_job->oper->operation_status = 0; /* Job is completed. */
1918	if (done_job->oper->fast_callback){
1919		/* This operation wants callback from interrupt. */
1920		done_job->oper->cb(done_job->oper, done_job->oper->cb_data);
1921		delete_internal_operation(done_job->iop);
1922		kfree(done_job);
1923	} else {
1924		spin_lock(&cryptocop_completed_jobs_lock);
1925		list_add_tail(&(done_job->node), &cryptocop_completed_jobs);
1926		spin_unlock(&cryptocop_completed_jobs_lock);
1927		tasklet_schedule(&cryptocop_tasklet);
1928	}
1929
1930	DEBUG(printk("cryptocop leave irq handler\n"));
1931	return IRQ_HANDLED;
1932}
1933
1934
1935/* Setup interrupts and DMA channels. */
1936static int init_cryptocop(void)
1937{
1938	unsigned long          flags;
1939	reg_intr_vect_rw_mask  intr_mask;
1940	reg_dma_rw_cfg         dma_cfg = {.en = 1};
1941	reg_dma_rw_intr_mask   intr_mask_in = {.data = regk_dma_yes}; /* Only want descriptor interrupts from the DMA in channel. */
1942	reg_dma_rw_ack_intr    ack_intr = {.data = 1,.in_eop = 1 };
1943	reg_strcop_rw_cfg      strcop_cfg = {
1944		.ipend = regk_strcop_little,
1945		.td1 = regk_strcop_e,
1946		.td2 = regk_strcop_d,
1947		.td3 = regk_strcop_e,
1948		.ignore_sync = 0,
1949		.en = 1
1950	};
1951
1952	if (request_irq(DMA9_INTR_VECT, dma_done_interrupt, 0, "stream co-processor DMA", NULL)) panic("request_irq stream co-processor irq dma9");
1953
1954	(void)crisv32_request_dma(8, "strcop", DMA_PANIC_ON_ERROR, 0, dma_strp);
1955	(void)crisv32_request_dma(9, "strcop", DMA_PANIC_ON_ERROR, 0, dma_strp);
1956
1957	local_irq_save(flags);
1958
1959	/* Reset and enable the cryptocop. */
1960	strcop_cfg.en = 0;
1961	REG_WR(strcop, regi_strcop, rw_cfg, strcop_cfg);
1962	strcop_cfg.en = 1;
1963	REG_WR(strcop, regi_strcop, rw_cfg, strcop_cfg);
1964
1965	/* Enable DMA9 interrupt */
1966	intr_mask = REG_RD(intr_vect, regi_irq, rw_mask);
1967	intr_mask.dma9 = 1;
1968	REG_WR(intr_vect, regi_irq, rw_mask, intr_mask);
1969
1970	/* Enable DMAs. */
1971	REG_WR(dma, regi_dma9, rw_cfg, dma_cfg); /* input DMA */
1972	REG_WR(dma, regi_dma8, rw_cfg, dma_cfg); /* output DMA */
1973
1974	/* Set up wordsize = 4 for DMAs. */
1975	DMA_WR_CMD (regi_dma8, regk_dma_set_w_size4);
1976	DMA_WR_CMD (regi_dma9, regk_dma_set_w_size4);
1977
1978	/* Enable interrupts. */
1979	REG_WR(dma, regi_dma9, rw_intr_mask, intr_mask_in);
1980
1981	/* Clear intr ack. */
1982	REG_WR(dma, regi_dma9, rw_ack_intr, ack_intr);
1983
1984	local_irq_restore(flags);
1985
1986	return 0;
1987}
1988
1989/* Free used cryptocop hw resources (interrupt and DMA channels). */
1990static void release_cryptocop(void)
1991{
1992	unsigned long          flags;
1993	reg_intr_vect_rw_mask  intr_mask;
1994	reg_dma_rw_cfg         dma_cfg = {.en = 0};
1995	reg_dma_rw_intr_mask   intr_mask_in = {0};
1996	reg_dma_rw_ack_intr    ack_intr = {.data = 1,.in_eop = 1 };
1997
1998	local_irq_save(flags);
1999
2000	/* Clear intr ack. */
2001	REG_WR(dma, regi_dma9, rw_ack_intr, ack_intr);
2002
2003	/* Disable DMA9 interrupt */
2004	intr_mask = REG_RD(intr_vect, regi_irq, rw_mask);
2005	intr_mask.dma9 = 0;
2006	REG_WR(intr_vect, regi_irq, rw_mask, intr_mask);
2007
2008	/* Disable DMAs. */
2009	REG_WR(dma, regi_dma9, rw_cfg, dma_cfg); /* input DMA */
2010	REG_WR(dma, regi_dma8, rw_cfg, dma_cfg); /* output DMA */
2011
2012	/* Disable interrupts. */
2013	REG_WR(dma, regi_dma9, rw_intr_mask, intr_mask_in);
2014
2015	local_irq_restore(flags);
2016
2017	free_irq(DMA9_INTR_VECT, NULL);
2018
2019	(void)crisv32_free_dma(8);
2020	(void)crisv32_free_dma(9);
2021}
2022
2023
2024/* Init job queue. */
2025static int cryptocop_job_queue_init(void)
2026{
2027	int i;
2028
2029	INIT_LIST_HEAD(&cryptocop_completed_jobs);
2030
2031	for (i = 0; i < cryptocop_prio_no_prios; i++){
2032		cryptocop_job_queues[i].prio = (cryptocop_queue_priority)i;
2033		INIT_LIST_HEAD(&cryptocop_job_queues[i].jobs);
2034	}
2035	return 0;
2036}
2037
2038
2039static void cryptocop_job_queue_close(void)
2040{
2041	struct list_head               *node, *tmp;
2042	struct cryptocop_prio_job      *pj = NULL;
2043	unsigned long int              process_flags, flags;
2044	int                            i;
2045
2046
2047	/* Stop strcop from getting an operation to process while we are closing the
2048	   module. */
2049	spin_lock_irqsave(&cryptocop_process_lock, process_flags);
2050
2051	/* Empty the job queue. */
2052	for (i = 0; i < cryptocop_prio_no_prios; i++){
2053		if (!list_empty(&(cryptocop_job_queues[i].jobs))){
2054			list_for_each_safe(node, tmp, &(cryptocop_job_queues[i].jobs)) {
2055				pj = list_entry(node, struct cryptocop_prio_job, node);
2056				list_del(node);
2057
2058				/* Call callback to notify consumer of job removal. */
2059				DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data));
2060				pj->oper->operation_status = -EINTR; /* Job is terminated without completion. */
2061				pj->oper->cb(pj->oper, pj->oper->cb_data);
2062
2063				delete_internal_operation(pj->iop);
2064				kfree(pj);
2065			}
2066		}
2067	}
2068	spin_unlock_irqrestore(&cryptocop_process_lock, process_flags);
2069
2070	/* Remove the running job, if any. */
2071	spin_lock_irqsave(&running_job_lock, flags);
2072	if (cryptocop_running_job){
2073		reg_strcop_rw_cfg rw_cfg;
2074		reg_dma_rw_cfg    dma_out_cfg, dma_in_cfg;
2075
2076		/* Stop DMA. */
2077		dma_out_cfg = REG_RD(dma, regi_dma8, rw_cfg);
2078		dma_out_cfg.en = regk_dma_no;
2079		REG_WR(dma, regi_dma8, rw_cfg, dma_out_cfg);
2080
2081		dma_in_cfg = REG_RD(dma, regi_dma9, rw_cfg);
2082		dma_in_cfg.en = regk_dma_no;
2083		REG_WR(dma, regi_dma9, rw_cfg, dma_in_cfg);
2084
2085		/* Disble the cryptocop. */
2086		rw_cfg = REG_RD(strcop, regi_strcop, rw_cfg);
2087		rw_cfg.en = 0;
2088		REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg);
2089
2090		pj = cryptocop_running_job;
2091		cryptocop_running_job = NULL;
2092
2093		/* Call callback to notify consumer of job removal. */
2094		DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data));
2095		pj->oper->operation_status = -EINTR; /* Job is terminated without completion. */
2096		pj->oper->cb(pj->oper, pj->oper->cb_data);
2097
2098		delete_internal_operation(pj->iop);
2099		kfree(pj);
2100	}
2101	spin_unlock_irqrestore(&running_job_lock, flags);
2102
2103	/* Remove completed jobs, if any. */
2104	spin_lock_irqsave(&cryptocop_completed_jobs_lock, flags);
2105
2106	list_for_each_safe(node, tmp, &cryptocop_completed_jobs) {
2107		pj = list_entry(node, struct cryptocop_prio_job, node);
2108		list_del(node);
2109		/* Call callback to notify consumer of job removal. */
2110		DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data));
2111		pj->oper->operation_status = -EINTR; /* Job is terminated without completion. */
2112		pj->oper->cb(pj->oper, pj->oper->cb_data);
2113
2114		delete_internal_operation(pj->iop);
2115		kfree(pj);
2116	}
2117	spin_unlock_irqrestore(&cryptocop_completed_jobs_lock, flags);
2118}
2119
2120
2121static void cryptocop_start_job(void)
2122{
2123	int                          i;
2124	struct cryptocop_prio_job    *pj;
2125	unsigned long int            flags;
2126	unsigned long int            running_job_flags;
2127	reg_strcop_rw_cfg            rw_cfg = {.en = 1, .ignore_sync = 0};
2128
2129	DEBUG(printk("cryptocop_start_job: entering\n"));
2130
2131	spin_lock_irqsave(&running_job_lock, running_job_flags);
2132	if (cryptocop_running_job != NULL){
2133		/* Already running. */
2134		DEBUG(printk("cryptocop_start_job: already running, exit\n"));
2135		spin_unlock_irqrestore(&running_job_lock, running_job_flags);
2136		return;
2137	}
2138	spin_lock_irqsave(&cryptocop_job_queue_lock, flags);
2139
2140	/* Check the queues in priority order. */
2141	for (i = cryptocop_prio_kernel_csum; (i < cryptocop_prio_no_prios) && list_empty(&cryptocop_job_queues[i].jobs); i++);
2142	if (i == cryptocop_prio_no_prios) {
2143		spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags);
2144		spin_unlock_irqrestore(&running_job_lock, running_job_flags);
2145		DEBUG(printk("cryptocop_start_job: no jobs to run\n"));
2146		return; /* No jobs to run */
2147	}
2148	DEBUG(printk("starting job for prio %d\n", i));
2149
2150	/* TODO: Do not starve lower priority jobs.  Let in a lower
2151	 * prio job for every N-th processed higher prio job or some
2152	 * other scheduling policy.  This could reasonably be
2153	 * tweakable since the optimal balance would depend on the
2154	 * type of load on the system. */
2155
2156	/* Pull the DMA lists from the job and start the DMA client. */
2157	pj = list_entry(cryptocop_job_queues[i].jobs.next, struct cryptocop_prio_job, node);
2158	list_del(&pj->node);
2159	spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags);
2160	cryptocop_running_job = pj;
2161
2162	/* Set config register (3DES and CSUM modes). */
2163	switch (pj->iop->tdes_mode){
2164	case cryptocop_3des_eee:
2165		rw_cfg.td1 = regk_strcop_e;
2166		rw_cfg.td2 = regk_strcop_e;
2167		rw_cfg.td3 = regk_strcop_e;
2168		break;
2169	case cryptocop_3des_eed:
2170		rw_cfg.td1 = regk_strcop_e;
2171		rw_cfg.td2 = regk_strcop_e;
2172		rw_cfg.td3 = regk_strcop_d;
2173		break;
2174	case cryptocop_3des_ede:
2175		rw_cfg.td1 = regk_strcop_e;
2176		rw_cfg.td2 = regk_strcop_d;
2177		rw_cfg.td3 = regk_strcop_e;
2178		break;
2179	case cryptocop_3des_edd:
2180		rw_cfg.td1 = regk_strcop_e;
2181		rw_cfg.td2 = regk_strcop_d;
2182		rw_cfg.td3 = regk_strcop_d;
2183		break;
2184	case cryptocop_3des_dee:
2185		rw_cfg.td1 = regk_strcop_d;
2186		rw_cfg.td2 = regk_strcop_e;
2187		rw_cfg.td3 = regk_strcop_e;
2188		break;
2189	case cryptocop_3des_ded:
2190		rw_cfg.td1 = regk_strcop_d;
2191		rw_cfg.td2 = regk_strcop_e;
2192		rw_cfg.td3 = regk_strcop_d;
2193		break;
2194	case cryptocop_3des_dde:
2195		rw_cfg.td1 = regk_strcop_d;
2196		rw_cfg.td2 = regk_strcop_d;
2197		rw_cfg.td3 = regk_strcop_e;
2198		break;
2199	case cryptocop_3des_ddd:
2200		rw_cfg.td1 = regk_strcop_d;
2201		rw_cfg.td2 = regk_strcop_d;
2202		rw_cfg.td3 = regk_strcop_d;
2203		break;
2204	default:
2205		DEBUG(printk("cryptocop_setup_dma_list: bad 3DES mode\n"));
2206	}
2207	switch (pj->iop->csum_mode){
2208	case cryptocop_csum_le:
2209		rw_cfg.ipend = regk_strcop_little;
2210		break;
2211	case cryptocop_csum_be:
2212		rw_cfg.ipend = regk_strcop_big;
2213		break;
2214	default:
2215		DEBUG(printk("cryptocop_setup_dma_list: bad checksum mode\n"));
2216	}
2217	REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg);
2218
2219	DEBUG(printk("cryptocop_start_job: starting DMA, new cryptocop_running_job=0x%p\n"
2220		     "ctx_in: 0x%p, phys: 0x%p\n"
2221		     "ctx_out: 0x%p, phys: 0x%p\n",
2222		     pj,
2223		     &pj->iop->ctx_in, (char*)virt_to_phys(&pj->iop->ctx_in),
2224		     &pj->iop->ctx_out, (char*)virt_to_phys(&pj->iop->ctx_out)));
2225
2226	/* Start input DMA. */
2227	DMA_START_CONTEXT(regi_dma9, virt_to_phys(&pj->iop->ctx_in));
2228
2229	/* Start output DMA. */
2230	DMA_START_CONTEXT(regi_dma8, virt_to_phys(&pj->iop->ctx_out));
2231
2232	spin_unlock_irqrestore(&running_job_lock, running_job_flags);
2233	DEBUG(printk("cryptocop_start_job: exiting\n"));
2234}
2235
2236
2237static int cryptocop_job_setup(struct cryptocop_prio_job **pj, struct cryptocop_operation *operation)
2238{
2239	int  err;
2240	int  alloc_flag = operation->in_interrupt ? GFP_ATOMIC : GFP_KERNEL;
2241	void *iop_alloc_ptr = NULL;
2242
2243	*pj = kmalloc(sizeof (struct cryptocop_prio_job), alloc_flag);
2244	if (!*pj) return -ENOMEM;
2245
2246	DEBUG(printk("cryptocop_job_setup: operation=0x%p\n", operation));
2247
2248	(*pj)->oper = operation;
2249	DEBUG(printk("cryptocop_job_setup, cb=0x%p cb_data=0x%p\n",  (*pj)->oper->cb, (*pj)->oper->cb_data));
2250
2251	if (operation->use_dmalists) {
2252		DEBUG(print_user_dma_lists(&operation->list_op));
2253		if (!operation->list_op.inlist || !operation->list_op.outlist || !operation->list_op.out_data_buf || !operation->list_op.in_data_buf){
2254			DEBUG_API(printk("cryptocop_job_setup: bad indata (use_dmalists)\n"));
2255			kfree(*pj);
2256			return -EINVAL;
2257		}
2258		iop_alloc_ptr = kmalloc(DESCR_ALLOC_PAD + sizeof(struct cryptocop_int_operation), alloc_flag);
2259		if (!iop_alloc_ptr) {
2260			DEBUG_API(printk("cryptocop_job_setup: kmalloc cryptocop_int_operation\n"));
2261			kfree(*pj);
2262			return -ENOMEM;
2263		}
2264		(*pj)->iop = (struct cryptocop_int_operation*)(((unsigned long int)(iop_alloc_ptr + DESCR_ALLOC_PAD + offsetof(struct cryptocop_int_operation, ctx_out)) & ~0x0000001F) - offsetof(struct cryptocop_int_operation, ctx_out));
2265		DEBUG(memset((*pj)->iop, 0xff, sizeof(struct cryptocop_int_operation)));
2266		(*pj)->iop->alloc_ptr = iop_alloc_ptr;
2267		(*pj)->iop->sid = operation->sid;
2268		(*pj)->iop->cdesc_out = NULL;
2269		(*pj)->iop->cdesc_in = NULL;
2270		(*pj)->iop->tdes_mode = operation->list_op.tdes_mode;
2271		(*pj)->iop->csum_mode = operation->list_op.csum_mode;
2272		(*pj)->iop->ddesc_out = operation->list_op.outlist;
2273		(*pj)->iop->ddesc_in = operation->list_op.inlist;
2274
2275		/* Setup DMA contexts. */
2276		(*pj)->iop->ctx_out.next = NULL;
2277		(*pj)->iop->ctx_out.eol = 1;
2278		(*pj)->iop->ctx_out.saved_data = operation->list_op.outlist;
2279		(*pj)->iop->ctx_out.saved_data_buf = operation->list_op.out_data_buf;
2280
2281		(*pj)->iop->ctx_in.next = NULL;
2282		(*pj)->iop->ctx_in.eol = 1;
2283		(*pj)->iop->ctx_in.saved_data = operation->list_op.inlist;
2284		(*pj)->iop->ctx_in.saved_data_buf = operation->list_op.in_data_buf;
2285	} else {
2286		if ((err = cryptocop_setup_dma_list(operation, &(*pj)->iop, alloc_flag))) {
2287			DEBUG_API(printk("cryptocop_job_setup: cryptocop_setup_dma_list failed %d\n", err));
2288			kfree(*pj);
2289			return err;
2290		}
2291	}
2292	DEBUG(print_dma_descriptors((*pj)->iop));
2293
2294	DEBUG(printk("cryptocop_job_setup, DMA list setup successful\n"));
2295
2296	return 0;
2297}
2298
2299
2300static int cryptocop_open(struct inode *inode, struct file *filp)
2301{
2302	int p = iminor(inode);
2303
2304	if (p != CRYPTOCOP_MINOR) return -EINVAL;
2305
2306	filp->private_data = NULL;
2307	return 0;
2308}
2309
2310
2311static int cryptocop_release(struct inode *inode, struct file *filp)
2312{
2313	struct cryptocop_private *dev = filp->private_data;
2314	struct cryptocop_private *dev_next;
2315
2316	while (dev){
2317		dev_next = dev->next;
2318		if (dev->sid != CRYPTOCOP_SESSION_ID_NONE) {
2319			(void)cryptocop_free_session(dev->sid);
2320		}
2321		kfree(dev);
2322		dev = dev_next;
2323	}
2324
2325	return 0;
2326}
2327
2328
2329static int cryptocop_ioctl_close_session(struct inode *inode, struct file *filp,
2330					 unsigned int cmd, unsigned long arg)
2331{
2332	struct cryptocop_private  *dev = filp->private_data;
2333	struct cryptocop_private  *prev_dev = NULL;
2334	struct strcop_session_op  *sess_op = (struct strcop_session_op *)arg;
2335	struct strcop_session_op  sop;
2336	int                       err;
2337
2338	DEBUG(printk("cryptocop_ioctl_close_session\n"));
2339
2340	if (!access_ok(VERIFY_READ, sess_op, sizeof(struct strcop_session_op)))
2341		return -EFAULT;
2342	err = copy_from_user(&sop, sess_op, sizeof(struct strcop_session_op));
2343	if (err) return -EFAULT;
2344
2345	while (dev && (dev->sid != sop.ses_id)) {
2346		prev_dev = dev;
2347		dev = dev->next;
2348	}
2349	if (dev){
2350		if (prev_dev){
2351			prev_dev->next = dev->next;
2352		} else {
2353			filp->private_data = dev->next;
2354		}
2355		err = cryptocop_free_session(dev->sid);
2356		if (err) return -EFAULT;
2357	} else {
2358		DEBUG_API(printk("cryptocop_ioctl_close_session: session %lld not found\n", sop.ses_id));
2359		return -EINVAL;
2360	}
2361	return 0;
2362}
2363
2364
2365static void ioctl_process_job_callback(struct cryptocop_operation *op, void*cb_data)
2366{
2367	struct ioctl_job_cb_ctx *jc = (struct ioctl_job_cb_ctx *)cb_data;
2368
2369	DEBUG(printk("ioctl_process_job_callback: op=0x%p, cb_data=0x%p\n", op, cb_data));
2370
2371	jc->processed = 1;
2372	wake_up(&cryptocop_ioc_process_wq);
2373}
2374
2375
2376#define CRYPTOCOP_IOCTL_CIPHER_TID  (1)
2377#define CRYPTOCOP_IOCTL_DIGEST_TID  (2)
2378#define CRYPTOCOP_IOCTL_CSUM_TID    (3)
2379
2380static size_t first_cfg_change_ix(struct strcop_crypto_op *crp_op)
2381{
2382	size_t ch_ix = 0;
2383
2384	if (crp_op->do_cipher) ch_ix = crp_op->cipher_start;
2385	if (crp_op->do_digest && (crp_op->digest_start < ch_ix)) ch_ix = crp_op->digest_start;
2386	if (crp_op->do_csum && (crp_op->csum_start < ch_ix)) ch_ix = crp_op->csum_start;
2387
2388	DEBUG(printk("first_cfg_change_ix: ix=%d\n", ch_ix));
2389	return ch_ix;
2390}
2391
2392
2393static size_t next_cfg_change_ix(struct strcop_crypto_op *crp_op, size_t ix)
2394{
2395	size_t ch_ix = INT_MAX;
2396	size_t tmp_ix = 0;
2397
2398	if (crp_op->do_cipher && ((crp_op->cipher_start + crp_op->cipher_len) > ix)){
2399		if (crp_op->cipher_start > ix) {
2400			ch_ix = crp_op->cipher_start;
2401		} else {
2402			ch_ix = crp_op->cipher_start + crp_op->cipher_len;
2403		}
2404	}
2405	if (crp_op->do_digest && ((crp_op->digest_start + crp_op->digest_len) > ix)){
2406		if (crp_op->digest_start > ix) {
2407			tmp_ix = crp_op->digest_start;
2408		} else {
2409			tmp_ix = crp_op->digest_start + crp_op->digest_len;
2410		}
2411		if (tmp_ix < ch_ix) ch_ix = tmp_ix;
2412	}
2413	if (crp_op->do_csum && ((crp_op->csum_start + crp_op->csum_len) > ix)){
2414		if (crp_op->csum_start > ix) {
2415			tmp_ix = crp_op->csum_start;
2416		} else {
2417			tmp_ix = crp_op->csum_start + crp_op->csum_len;
2418		}
2419		if (tmp_ix < ch_ix) ch_ix = tmp_ix;
2420	}
2421	if (ch_ix == INT_MAX) ch_ix = ix;
2422	DEBUG(printk("next_cfg_change_ix prev ix=%d, next ix=%d\n", ix, ch_ix));
2423	return ch_ix;
2424}
2425
2426
2427/* Map map_length bytes from the pages starting on *pageix and *pageoffset to iovecs starting on *iovix.
2428 * Return -1 for ok, 0 for fail. */
2429static int map_pages_to_iovec(struct iovec *iov, int iovlen, int *iovix, struct page **pages, int nopages, int *pageix, int *pageoffset, int map_length )
2430{
2431	int tmplen;
2432
2433	assert(iov != NULL);
2434	assert(iovix != NULL);
2435	assert(pages != NULL);
2436	assert(pageix != NULL);
2437	assert(pageoffset != NULL);
2438
2439	DEBUG(printk("map_pages_to_iovec, map_length=%d, iovlen=%d, *iovix=%d, nopages=%d, *pageix=%d, *pageoffset=%d\n", map_length, iovlen, *iovix, nopages, *pageix, *pageoffset));
2440
2441	while (map_length > 0){
2442		DEBUG(printk("map_pages_to_iovec, map_length=%d, iovlen=%d, *iovix=%d, nopages=%d, *pageix=%d, *pageoffset=%d\n", map_length, iovlen, *iovix, nopages, *pageix, *pageoffset));
2443		if (*iovix >= iovlen){
2444			DEBUG_API(printk("map_page_to_iovec: *iovix=%d >= iovlen=%d\n", *iovix, iovlen));
2445			return 0;
2446		}
2447		if (*pageix >= nopages){
2448			DEBUG_API(printk("map_page_to_iovec: *pageix=%d >= nopages=%d\n", *pageix, nopages));
2449			return 0;
2450		}
2451		iov[*iovix].iov_base = (unsigned char*)page_address(pages[*pageix]) + *pageoffset;
2452		tmplen = PAGE_SIZE - *pageoffset;
2453		if (tmplen < map_length){
2454			(*pageoffset) = 0;
2455			(*pageix)++;
2456		} else {
2457			tmplen = map_length;
2458			(*pageoffset) += map_length;
2459		}
2460		DEBUG(printk("mapping %d bytes from page %d (or %d) to iovec %d\n", tmplen, *pageix, *pageix-1, *iovix));
2461		iov[*iovix].iov_len = tmplen;
2462		map_length -= tmplen;
2463		(*iovix)++;
2464	}
2465	DEBUG(printk("map_page_to_iovec, exit, *iovix=%d\n", *iovix));
2466	return -1;
2467}
2468
2469
2470
2471static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
2472{
2473	int                             i;
2474	struct cryptocop_private        *dev = filp->private_data;
2475	struct strcop_crypto_op         *crp_oper = (struct strcop_crypto_op *)arg;
2476	struct strcop_crypto_op         oper = {0};
2477	int                             err = 0;
2478	struct cryptocop_operation      *cop = NULL;
2479
2480	struct ioctl_job_cb_ctx         *jc = NULL;
2481
2482	struct page                     **inpages = NULL;
2483	struct page                     **outpages = NULL;
2484	int                             noinpages = 0;
2485	int                             nooutpages = 0;
2486
2487	struct cryptocop_desc           descs[5]; /* Max 5 descriptors are needed, there are three transforms that
2488						   * can get connected/disconnected on different places in the indata. */
2489	struct cryptocop_desc_cfg       dcfgs[5*3];
2490	int                             desc_ix = 0;
2491	int                             dcfg_ix = 0;
2492	struct cryptocop_tfrm_cfg       ciph_tcfg = {0};
2493	struct cryptocop_tfrm_cfg       digest_tcfg = {0};
2494	struct cryptocop_tfrm_cfg       csum_tcfg = {0};
2495
2496	unsigned char                   *digest_result = NULL;
2497	int                             digest_length = 0;
2498	int                             cblocklen = 0;
2499	unsigned char                   csum_result[CSUM_BLOCK_LENGTH];
2500	struct cryptocop_session        *sess;
2501
2502	int    iovlen = 0;
2503	int    iovix = 0;
2504	int    pageix = 0;
2505	int    pageoffset = 0;
2506
2507	size_t prev_ix = 0;
2508	size_t next_ix;
2509
2510	int    cipher_active, digest_active, csum_active;
2511	int    end_digest, end_csum;
2512	int    digest_done = 0;
2513	int    cipher_done = 0;
2514	int    csum_done = 0;
2515
2516	DEBUG(printk("cryptocop_ioctl_process\n"));
2517
2518	if (!access_ok(VERIFY_WRITE, crp_oper, sizeof(struct strcop_crypto_op))){
2519		DEBUG_API(printk("cryptocop_ioctl_process: !access_ok crp_oper!\n"));
2520		return -EFAULT;
2521	}
2522	if (copy_from_user(&oper, crp_oper, sizeof(struct strcop_crypto_op))) {
2523		DEBUG_API(printk("cryptocop_ioctl_process: copy_from_user\n"));
2524		return -EFAULT;
2525	}
2526	DEBUG(print_strcop_crypto_op(&oper));
2527
2528	while (dev && dev->sid != oper.ses_id) dev = dev->next;
2529	if (!dev){
2530		DEBUG_API(printk("cryptocop_ioctl_process: session %lld not found\n", oper.ses_id));
2531		return -EINVAL;
2532	}
2533
2534	/* Check buffers. */
2535	if (((oper.indata + oper.inlen) < oper.indata) || ((oper.cipher_outdata + oper.cipher_outlen) < oper.cipher_outdata)){
2536		DEBUG_API(printk("cryptocop_ioctl_process: user buffers wrapped around, bad user!\n"));
2537		return -EINVAL;
2538	}
2539
2540	if (!access_ok(VERIFY_WRITE, oper.cipher_outdata, oper.cipher_outlen)){
2541		DEBUG_API(printk("cryptocop_ioctl_process: !access_ok out data!\n"));
2542		return -EFAULT;
2543	}
2544	if (!access_ok(VERIFY_READ, oper.indata, oper.inlen)){
2545		DEBUG_API(printk("cryptocop_ioctl_process: !access_ok in data!\n"));
2546		return -EFAULT;
2547	}
2548
2549	cop = kmalloc(sizeof(struct cryptocop_operation), GFP_KERNEL);
2550	if (!cop) {
2551		DEBUG_API(printk("cryptocop_ioctl_process: kmalloc\n"));
2552		return -ENOMEM;
2553	}
2554	jc = kmalloc(sizeof(struct ioctl_job_cb_ctx), GFP_KERNEL);
2555	if (!jc) {
2556		DEBUG_API(printk("cryptocop_ioctl_process: kmalloc\n"));
2557		err = -ENOMEM;
2558		goto error_cleanup;
2559	}
2560	jc->processed = 0;
2561
2562	cop->cb_data = jc;
2563	cop->cb = ioctl_process_job_callback;
2564	cop->operation_status = 0;
2565	cop->use_dmalists = 0;
2566	cop->in_interrupt = 0;
2567	cop->fast_callback = 0;
2568	cop->tfrm_op.tfrm_cfg = NULL;
2569	cop->tfrm_op.desc = NULL;
2570	cop->tfrm_op.indata = NULL;
2571	cop->tfrm_op.incount = 0;
2572	cop->tfrm_op.inlen = 0;
2573	cop->tfrm_op.outdata = NULL;
2574	cop->tfrm_op.outcount = 0;
2575	cop->tfrm_op.outlen = 0;
2576
2577	sess = get_session(oper.ses_id);
2578	if (!sess){
2579		DEBUG_API(printk("cryptocop_ioctl_process: bad session id.\n"));
2580		kfree(cop);
2581		kfree(jc);
2582		return -EINVAL;
2583	}
2584
2585	if (oper.do_cipher) {
2586		unsigned int                    cipher_outlen = 0;
2587		struct cryptocop_transform_ctx  *tc = get_transform_ctx(sess, CRYPTOCOP_IOCTL_CIPHER_TID);
2588		if (!tc) {
2589			DEBUG_API(printk("cryptocop_ioctl_process: no cipher transform in session.\n"));
2590			err = -EINVAL;
2591			goto error_cleanup;
2592		}
2593		ciph_tcfg.tid = CRYPTOCOP_IOCTL_CIPHER_TID;
2594		ciph_tcfg.inject_ix = 0;
2595		ciph_tcfg.flags = 0;
2596		if ((oper.cipher_start < 0) || (oper.cipher_len <= 0) || (oper.cipher_start > oper.inlen) || ((oper.cipher_start + oper.cipher_len) > oper.inlen)){
2597			DEBUG_API(printk("cryptocop_ioctl_process: bad cipher length\n"));
2598			kfree(cop);
2599			kfree(jc);
2600			return -EINVAL;
2601		}
2602		cblocklen = tc->init.alg == cryptocop_alg_aes ? AES_BLOCK_LENGTH : DES_BLOCK_LENGTH;
2603		if (oper.cipher_len % cblocklen) {
2604			kfree(cop);
2605			kfree(jc);
2606			DEBUG_API(printk("cryptocop_ioctl_process: cipher inlength not multiple of block length.\n"));
2607			return -EINVAL;
2608		}
2609		cipher_outlen = oper.cipher_len;
2610		if (tc->init.cipher_mode == cryptocop_cipher_mode_cbc){
2611			if (oper.cipher_explicit) {
2612				ciph_tcfg.flags |= CRYPTOCOP_EXPLICIT_IV;
2613				memcpy(ciph_tcfg.iv, oper.cipher_iv, cblocklen);
2614			} else {
2615				cipher_outlen = oper.cipher_len - cblocklen;
2616			}
2617		} else {
2618			if (oper.cipher_explicit){
2619				kfree(cop);
2620				kfree(jc);
2621				DEBUG_API(printk("cryptocop_ioctl_process: explicit_iv when not CBC mode\n"));
2622				return -EINVAL;
2623			}
2624		}
2625		if (oper.cipher_outlen != cipher_outlen) {
2626			kfree(cop);
2627			kfree(jc);
2628			DEBUG_API(printk("cryptocop_ioctl_process: cipher_outlen incorrect, should be %d not %d.\n", cipher_outlen, oper.cipher_outlen));
2629			return -EINVAL;
2630		}
2631
2632		if (oper.decrypt){
2633			ciph_tcfg.flags |= CRYPTOCOP_DECRYPT;
2634		} else {
2635			ciph_tcfg.flags |= CRYPTOCOP_ENCRYPT;
2636		}
2637		ciph_tcfg.next = cop->tfrm_op.tfrm_cfg;
2638		cop->tfrm_op.tfrm_cfg = &ciph_tcfg;
2639	}
2640	if (oper.do_digest){
2641		struct cryptocop_transform_ctx *tc = get_transform_ctx(sess, CRYPTOCOP_IOCTL_DIGEST_TID);
2642		if (!tc) {
2643			DEBUG_API(printk("cryptocop_ioctl_process: no digest transform in session.\n"));
2644			err = -EINVAL;
2645			goto error_cleanup;
2646		}
2647		digest_length = tc->init.alg == cryptocop_alg_md5 ? 16 : 20;
2648		digest_result = kmalloc(digest_length, GFP_KERNEL);
2649		if (!digest_result) {
2650			DEBUG_API(printk("cryptocop_ioctl_process: kmalloc digest_result\n"));
2651			err = -EINVAL;
2652			goto error_cleanup;
2653		}
2654		DEBUG(memset(digest_result, 0xff, digest_length));
2655
2656		digest_tcfg.tid = CRYPTOCOP_IOCTL_DIGEST_TID;
2657		digest_tcfg.inject_ix = 0;
2658		ciph_tcfg.inject_ix += digest_length;
2659		if ((oper.digest_start < 0) || (oper.digest_len <= 0) || (oper.digest_start > oper.inlen) || ((oper.digest_start + oper.digest_len) > oper.inlen)){
2660			DEBUG_API(printk("cryptocop_ioctl_process: bad digest length\n"));
2661			err = -EINVAL;
2662			goto error_cleanup;
2663		}
2664
2665		digest_tcfg.next = cop->tfrm_op.tfrm_cfg;
2666		cop->tfrm_op.tfrm_cfg = &digest_tcfg;
2667	}
2668	if (oper.do_csum){
2669		csum_tcfg.tid = CRYPTOCOP_IOCTL_CSUM_TID;
2670		csum_tcfg.inject_ix = digest_length;
2671		ciph_tcfg.inject_ix += 2;
2672
2673		if ((oper.csum_start < 0) || (oper.csum_len <= 0) || (oper.csum_start > oper.inlen) || ((oper.csum_start + oper.csum_len) > oper.inlen)){
2674			DEBUG_API(printk("cryptocop_ioctl_process: bad csum length\n"));
2675			kfree(cop);
2676			kfree(jc);
2677			return -EINVAL;
2678		}
2679
2680		csum_tcfg.next = cop->tfrm_op.tfrm_cfg;
2681		cop->tfrm_op.tfrm_cfg = &csum_tcfg;
2682	}
2683
2684	prev_ix = first_cfg_change_ix(&oper);
2685	if (prev_ix > oper.inlen) {
2686		DEBUG_API(printk("cryptocop_ioctl_process: length mismatch\n"));
2687		nooutpages = noinpages = 0;
2688		err = -EINVAL;
2689		goto error_cleanup;
2690	}
2691	DEBUG(printk("cryptocop_ioctl_process: inlen=%d, cipher_outlen=%d\n", oper.inlen, oper.cipher_outlen));
2692
2693	/* Map user pages for in and out data of the operation. */
2694	noinpages = (((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK) + oper.inlen - 1 - prev_ix + ~PAGE_MASK) >> PAGE_SHIFT;
2695	DEBUG(printk("cryptocop_ioctl_process: noinpages=%d\n", noinpages));
2696	inpages = kmalloc(noinpages * sizeof(struct page*), GFP_KERNEL);
2697	if (!inpages){
2698		DEBUG_API(printk("cryptocop_ioctl_process: kmalloc inpages\n"));
2699		nooutpages = noinpages = 0;
2700		err = -ENOMEM;
2701		goto error_cleanup;
2702	}
2703	if (oper.do_cipher){
2704		nooutpages = (((unsigned long int)oper.cipher_outdata & ~PAGE_MASK) + oper.cipher_outlen - 1 + ~PAGE_MASK) >> PAGE_SHIFT;
2705		DEBUG(printk("cryptocop_ioctl_process: nooutpages=%d\n", nooutpages));
2706		outpages = kmalloc(nooutpages * sizeof(struct page*), GFP_KERNEL);
2707		if (!outpages){
2708			DEBUG_API(printk("cryptocop_ioctl_process: kmalloc outpages\n"));
2709			nooutpages = noinpages = 0;
2710			err = -ENOMEM;
2711			goto error_cleanup;
2712		}
2713	}
2714
2715	/* Acquire the mm page semaphore. */
2716	down_read(&current->mm->mmap_sem);
2717
2718	err = get_user_pages(current,
2719			     current->mm,
2720			     (unsigned long int)(oper.indata + prev_ix),
2721			     noinpages,
2722			     0,  /* read access only for in data */
2723			     0, /* no force */
2724			     inpages,
2725			     NULL);
2726
2727	if (err < 0) {
2728		up_read(&current->mm->mmap_sem);
2729		nooutpages = noinpages = 0;
2730		DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages indata\n"));
2731		goto error_cleanup;
2732	}
2733	noinpages = err;
2734	if (oper.do_cipher){
2735		err = get_user_pages(current,
2736				     current->mm,
2737				     (unsigned long int)oper.cipher_outdata,
2738				     nooutpages,
2739				     1, /* write access for out data */
2740				     0, /* no force */
2741				     outpages,
2742				     NULL);
2743		up_read(&current->mm->mmap_sem);
2744		if (err < 0) {
2745			nooutpages = 0;
2746			DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages outdata\n"));
2747			goto error_cleanup;
2748		}
2749		nooutpages = err;
2750	} else {
2751		up_read(&current->mm->mmap_sem);
2752	}
2753
2754	/* Add 6 to nooutpages to make room for possibly inserted buffers for storing digest and
2755	 * csum output and splits when units are (dis-)connected. */
2756	cop->tfrm_op.indata = kmalloc((noinpages) * sizeof(struct iovec), GFP_KERNEL);
2757	cop->tfrm_op.outdata = kmalloc((6 + nooutpages) * sizeof(struct iovec), GFP_KERNEL);
2758	if (!cop->tfrm_op.indata || !cop->tfrm_op.outdata) {
2759		DEBUG_API(printk("cryptocop_ioctl_process: kmalloc iovecs\n"));
2760		err = -ENOMEM;
2761		goto error_cleanup;
2762	}
2763
2764	cop->tfrm_op.inlen = oper.inlen - prev_ix;
2765	cop->tfrm_op.outlen = 0;
2766	if (oper.do_cipher) cop->tfrm_op.outlen += oper.cipher_outlen;
2767	if (oper.do_digest) cop->tfrm_op.outlen += digest_length;
2768	if (oper.do_csum) cop->tfrm_op.outlen += 2;
2769
2770	/* Setup the in iovecs. */
2771	cop->tfrm_op.incount = noinpages;
2772	if (noinpages > 1){
2773		size_t tmplen = cop->tfrm_op.inlen;
2774
2775		cop->tfrm_op.indata[0].iov_len = PAGE_SIZE - ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK);
2776		cop->tfrm_op.indata[0].iov_base = (unsigned char*)page_address(inpages[0]) + ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK);
2777		tmplen -= cop->tfrm_op.indata[0].iov_len;
2778		for (i = 1; i<noinpages; i++){
2779			cop->tfrm_op.indata[i].iov_len = tmplen < PAGE_SIZE ? tmplen : PAGE_SIZE;
2780			cop->tfrm_op.indata[i].iov_base = (unsigned char*)page_address(inpages[i]);
2781			tmplen -= PAGE_SIZE;
2782		}
2783	} else {
2784		cop->tfrm_op.indata[0].iov_len = oper.inlen - prev_ix;
2785		cop->tfrm_op.indata[0].iov_base = (unsigned char*)page_address(inpages[0]) + ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK);
2786	}
2787
2788	iovlen = nooutpages + 6;
2789	pageoffset = oper.do_cipher ? ((unsigned long int)oper.cipher_outdata & ~PAGE_MASK) : 0;
2790
2791	next_ix = next_cfg_change_ix(&oper, prev_ix);
2792	if (prev_ix == next_ix){
2793		DEBUG_API(printk("cryptocop_ioctl_process: length configuration broken.\n"));
2794		err = -EINVAL;  /* This should be impossible barring bugs. */
2795		goto error_cleanup;
2796	}
2797	while (prev_ix != next_ix){
2798		end_digest = end_csum = cipher_active = digest_active = csum_active = 0;
2799		descs[desc_ix].cfg = NULL;
2800		descs[desc_ix].length = next_ix - prev_ix;
2801
2802		if (oper.do_cipher && (oper.cipher_start < next_ix) && (prev_ix < (oper.cipher_start + oper.cipher_len))) {
2803			dcfgs[dcfg_ix].tid = CRYPTOCOP_IOCTL_CIPHER_TID;
2804			dcfgs[dcfg_ix].src = cryptocop_source_dma;
2805			cipher_active = 1;
2806
2807			if (next_ix == (oper.cipher_start + oper.cipher_len)){
2808				cipher_done = 1;
2809				dcfgs[dcfg_ix].last = 1;
2810			} else {
2811				dcfgs[dcfg_ix].last = 0;
2812			}
2813			dcfgs[dcfg_ix].next = descs[desc_ix].cfg;
2814			descs[desc_ix].cfg = &dcfgs[dcfg_ix];
2815			++dcfg_ix;
2816		}
2817		if (oper.do_digest && (oper.digest_start < next_ix) && (prev_ix < (oper.digest_start + oper.digest_len))) {
2818			digest_active = 1;
2819			dcfgs[dcfg_ix].tid = CRYPTOCOP_IOCTL_DIGEST_TID;
2820			dcfgs[dcfg_ix].src = cryptocop_source_dma;
2821			if (next_ix == (oper.digest_start + oper.digest_len)){
2822				assert(!digest_done);
2823				digest_done = 1;
2824				dcfgs[dcfg_ix].last = 1;
2825			} else {
2826				dcfgs[dcfg_ix].last = 0;
2827			}
2828			dcfgs[dcfg_ix].next = descs[desc_ix].cfg;
2829			descs[desc_ix].cfg = &dcfgs[dcfg_ix];
2830			++dcfg_ix;
2831		}
2832		if (oper.do_csum && (oper.csum_start < next_ix) && (prev_ix < (oper.csum_start + oper.csum_len))){
2833			csum_active = 1;
2834			dcfgs[dcfg_ix].tid = CRYPTOCOP_IOCTL_CSUM_TID;
2835			dcfgs[dcfg_ix].src = cryptocop_source_dma;
2836			if (next_ix == (oper.csum_start + oper.csum_len)){
2837				csum_done = 1;
2838				dcfgs[dcfg_ix].last = 1;
2839			} else {
2840				dcfgs[dcfg_ix].last = 0;
2841			}
2842			dcfgs[dcfg_ix].next = descs[desc_ix].cfg;
2843			descs[desc_ix].cfg = &dcfgs[dcfg_ix];
2844			++dcfg_ix;
2845		}
2846		if (!descs[desc_ix].cfg){
2847			DEBUG_API(printk("cryptocop_ioctl_process: data segment %d (%d to %d) had no active transforms\n", desc_ix, prev_ix, next_ix));
2848			err = -EINVAL;
2849			goto error_cleanup;
2850		}
2851		descs[desc_ix].next = &(descs[desc_ix]) + 1;
2852		++desc_ix;
2853		prev_ix = next_ix;
2854		next_ix = next_cfg_change_ix(&oper, prev_ix);
2855	}
2856	if (desc_ix > 0){
2857		descs[desc_ix-1].next = NULL;
2858	} else {
2859		descs[0].next = NULL;
2860	}
2861	if (oper.do_digest) {
2862		DEBUG(printk("cryptocop_ioctl_process: mapping %d byte digest output to iovec %d\n", digest_length, iovix));
2863		/* Add outdata iovec, length == <length of type of digest> */
2864		cop->tfrm_op.outdata[iovix].iov_base = digest_result;
2865		cop->tfrm_op.outdata[iovix].iov_len = digest_length;
2866		++iovix;
2867	}
2868	if (oper.do_csum) {
2869		/* Add outdata iovec, length == 2, the length of csum. */
2870		DEBUG(printk("cryptocop_ioctl_process: mapping 2 byte csum output to iovec %d\n", iovix));
2871		/* Add outdata iovec, length == <length of type of digest> */
2872		cop->tfrm_op.outdata[iovix].iov_base = csum_result;
2873		cop->tfrm_op.outdata[iovix].iov_len = 2;
2874		++iovix;
2875	}
2876	if (oper.do_cipher) {
2877		if (!map_pages_to_iovec(cop->tfrm_op.outdata, iovlen, &iovix, outpages, nooutpages, &pageix, &pageoffset, oper.cipher_outlen)){
2878			DEBUG_API(printk("cryptocop_ioctl_process: failed to map pages to iovec.\n"));
2879			err = -ENOSYS; /* This should be impossible barring bugs. */
2880			goto error_cleanup;
2881		}
2882	}
2883	DEBUG(printk("cryptocop_ioctl_process: setting cop->tfrm_op.outcount %d\n", iovix));
2884	cop->tfrm_op.outcount = iovix;
2885	assert(iovix <= (nooutpages + 6));
2886
2887	cop->sid = oper.ses_id;
2888	cop->tfrm_op.desc = &descs[0];
2889
2890	DEBUG(printk("cryptocop_ioctl_process: inserting job, cb_data=0x%p\n", cop->cb_data));
2891
2892	if ((err = cryptocop_job_queue_insert_user_job(cop)) != 0) {
2893		DEBUG_API(printk("cryptocop_ioctl_process: insert job %d\n", err));
2894		err = -EINVAL;
2895		goto error_cleanup;
2896	}
2897
2898	DEBUG(printk("cryptocop_ioctl_process: begin wait for result\n"));
2899
2900	wait_event(cryptocop_ioc_process_wq, (jc->processed != 0));
2901	DEBUG(printk("cryptocop_ioctl_process: end wait for result\n"));
2902        if (!jc->processed){
2903		printk(KERN_WARNING "cryptocop_ioctl_process: job not processed at completion\n");
2904		err = -EIO;
2905		goto error_cleanup;
2906	}
2907
2908	/* Job process done.  Cipher output should already be correct in job so no post processing of outdata. */
2909	DEBUG(printk("cryptocop_ioctl_process: operation_status = %d\n", cop->operation_status));
2910	if (cop->operation_status == 0){
2911		if (oper.do_digest){
2912			DEBUG(printk("cryptocop_ioctl_process: copy %d bytes digest to user\n", digest_length));
2913			err = copy_to_user((unsigned char*)crp_oper + offsetof(struct strcop_crypto_op, digest), digest_result, digest_length);
2914			if (0 != err){
2915				DEBUG_API(printk("cryptocop_ioctl_process: copy_to_user, digest length %d, err %d\n", digest_length, err));
2916				err = -EFAULT;
2917				goto error_cleanup;
2918			}
2919		}
2920		if (oper.do_csum){
2921			DEBUG(printk("cryptocop_ioctl_process: copy 2 bytes checksum to user\n"));
2922			err = copy_to_user((unsigned char*)crp_oper + offsetof(struct strcop_crypto_op, csum), csum_result, 2);
2923			if (0 != err){
2924				DEBUG_API(printk("cryptocop_ioctl_process: copy_to_user, csum, err %d\n", err));
2925				err = -EFAULT;
2926				goto error_cleanup;
2927			}
2928		}
2929		err = 0;
2930	} else {
2931		DEBUG(printk("cryptocop_ioctl_process: returning err = operation_status = %d\n", cop->operation_status));
2932		err = cop->operation_status;
2933	}
2934
2935 error_cleanup:
2936	/* Release page caches. */
2937	for (i = 0; i < noinpages; i++){
2938		put_page(inpages[i]);
2939	}
2940	for (i = 0; i < nooutpages; i++){
2941		int spdl_err;
2942		/* Mark output pages dirty. */
2943		spdl_err = set_page_dirty_lock(outpages[i]);
2944		DEBUG(if (spdl_err < 0)printk("cryptocop_ioctl_process: set_page_dirty_lock returned %d\n", spdl_err));
2945	}
2946	for (i = 0; i < nooutpages; i++){
2947		put_page(outpages[i]);
2948	}
2949
2950	kfree(digest_result);
2951	kfree(inpages);
2952	kfree(outpages);
2953	if (cop){
2954		kfree(cop->tfrm_op.indata);
2955		kfree(cop->tfrm_op.outdata);
2956		kfree(cop);
2957	}
2958	kfree(jc);
2959
2960	DEBUG(print_lock_status());
2961
2962	return err;
2963}
2964
2965
2966static int cryptocop_ioctl_create_session(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
2967{
2968	cryptocop_session_id             sid;
2969	int                              err;
2970	struct cryptocop_private         *dev;
2971	struct strcop_session_op         *sess_op = (struct strcop_session_op *)arg;
2972	struct strcop_session_op         sop;
2973	struct cryptocop_transform_init  *tis = NULL;
2974	struct cryptocop_transform_init  ti_cipher = {0};
2975	struct cryptocop_transform_init  ti_digest = {0};
2976	struct cryptocop_transform_init  ti_csum = {0};
2977
2978	if (!access_ok(VERIFY_WRITE, sess_op, sizeof(struct strcop_session_op)))
2979		return -EFAULT;
2980	err = copy_from_user(&sop, sess_op, sizeof(struct strcop_session_op));
2981	if (err) return -EFAULT;
2982	if (sop.cipher != cryptocop_cipher_none) {
2983		if (!access_ok(VERIFY_READ, sop.key, sop.keylen)) return -EFAULT;
2984	}
2985	DEBUG(printk("cryptocop_ioctl_create_session, sess_op:\n"));
2986
2987	DEBUG(printk("\tcipher:%d\n"
2988		     "\tcipher_mode:%d\n"
2989		     "\tdigest:%d\n"
2990		     "\tcsum:%d\n",
2991		     (int)sop.cipher,
2992		     (int)sop.cmode,
2993		     (int)sop.digest,
2994		     (int)sop.csum));
2995
2996	if (sop.cipher != cryptocop_cipher_none){
2997		/* Init the cipher. */
2998		switch (sop.cipher){
2999		case cryptocop_cipher_des:
3000			ti_cipher.alg = cryptocop_alg_des;
3001			break;
3002		case cryptocop_cipher_3des:
3003			ti_cipher.alg = cryptocop_alg_3des;
3004			break;
3005		case cryptocop_cipher_aes:
3006			ti_cipher.alg = cryptocop_alg_aes;
3007			break;
3008		default:
3009			DEBUG_API(printk("create session, bad cipher algorithm %d\n", sop.cipher));
3010			return -EINVAL;
3011		};
3012		DEBUG(printk("setting cipher transform %d\n", ti_cipher.alg));
3013		copy_from_user(ti_cipher.key, sop.key, sop.keylen/8);
3014		ti_cipher.keylen = sop.keylen;
3015		switch (sop.cmode){
3016		case cryptocop_cipher_mode_cbc:
3017		case cryptocop_cipher_mode_ecb:
3018			ti_cipher.cipher_mode = sop.cmode;
3019			break;
3020		default:
3021			DEBUG_API(printk("create session, bad cipher mode %d\n", sop.cmode));
3022			return -EINVAL;
3023		}
3024		DEBUG(printk("cryptocop_ioctl_create_session: setting CBC mode %d\n", ti_cipher.cipher_mode));
3025		switch (sop.des3_mode){
3026		case cryptocop_3des_eee:
3027		case cryptocop_3des_eed:
3028		case cryptocop_3des_ede:
3029		case cryptocop_3des_edd:
3030		case cryptocop_3des_dee:
3031		case cryptocop_3des_ded:
3032		case cryptocop_3des_dde:
3033		case cryptocop_3des_ddd:
3034			ti_cipher.tdes_mode = sop.des3_mode;
3035			break;
3036		default:
3037			DEBUG_API(printk("create session, bad 3DES mode %d\n", sop.des3_mode));
3038			return -EINVAL;
3039		}
3040		ti_cipher.tid = CRYPTOCOP_IOCTL_CIPHER_TID;
3041		ti_cipher.next = tis;
3042		tis = &ti_cipher;
3043	} /* if (sop.cipher != cryptocop_cipher_none) */
3044	if (sop.digest != cryptocop_digest_none){
3045		DEBUG(printk("setting digest transform\n"));
3046		switch (sop.digest){
3047		case cryptocop_digest_md5:
3048			ti_digest.alg = cryptocop_alg_md5;
3049			break;
3050		case cryptocop_digest_sha1:
3051			ti_digest.alg = cryptocop_alg_sha1;
3052			break;
3053		default:
3054			DEBUG_API(printk("create session, bad digest algorithm %d\n", sop.digest));
3055			return -EINVAL;
3056		}
3057		ti_digest.tid = CRYPTOCOP_IOCTL_DIGEST_TID;
3058		ti_digest.next = tis;
3059		tis = &ti_digest;
3060	} /* if (sop.digest != cryptocop_digest_none) */
3061	if (sop.csum != cryptocop_csum_none){
3062		DEBUG(printk("setting csum transform\n"));
3063		switch (sop.csum){
3064		case cryptocop_csum_le:
3065		case cryptocop_csum_be:
3066			ti_csum.csum_mode = sop.csum;
3067			break;
3068		default:
3069			DEBUG_API(printk("create session, bad checksum algorithm %d\n", sop.csum));
3070			return -EINVAL;
3071		}
3072		ti_csum.alg = cryptocop_alg_csum;
3073		ti_csum.tid = CRYPTOCOP_IOCTL_CSUM_TID;
3074		ti_csum.next = tis;
3075		tis = &ti_csum;
3076	} /* (sop.csum != cryptocop_csum_none) */
3077	dev = kmalloc(sizeof(struct cryptocop_private), GFP_KERNEL);
3078	if (!dev){
3079		DEBUG_API(printk("create session, alloc dev\n"));
3080		return -ENOMEM;
3081	}
3082
3083	err = cryptocop_new_session(&sid, tis, GFP_KERNEL);
3084	DEBUG({ if (err) printk("create session, cryptocop_new_session %d\n", err);});
3085
3086	if (err) {
3087		kfree(dev);
3088		return err;
3089	}
3090	sess_op->ses_id = sid;
3091	dev->sid = sid;
3092	dev->next = filp->private_data;
3093	filp->private_data = dev;
3094
3095	return 0;
3096}
3097
3098static int cryptocop_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
3099{
3100	int err = 0;
3101	if (_IOC_TYPE(cmd) != ETRAXCRYPTOCOP_IOCTYPE) {
3102		DEBUG_API(printk("cryptocop_ioctl: wrong type\n"));
3103		return -ENOTTY;
3104	}
3105	if (_IOC_NR(cmd) > CRYPTOCOP_IO_MAXNR){
3106		return -ENOTTY;
3107	}
3108	/* Access check of the argument.  Some commands, e.g. create session and process op,
3109	   needs additional checks.  Those are handled in the command handling functions. */
3110	if (_IOC_DIR(cmd) & _IOC_READ)
3111		err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd));
3112	else if (_IOC_DIR(cmd) & _IOC_WRITE)
3113		err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd));
3114	if (err) return -EFAULT;
3115
3116	switch (cmd) {
3117	case CRYPTOCOP_IO_CREATE_SESSION:
3118		return cryptocop_ioctl_create_session(inode, filp, cmd, arg);
3119	case CRYPTOCOP_IO_CLOSE_SESSION:
3120		return cryptocop_ioctl_close_session(inode, filp, cmd, arg);
3121	case CRYPTOCOP_IO_PROCESS_OP:
3122		return cryptocop_ioctl_process(inode, filp, cmd, arg);
3123	default:
3124		DEBUG_API(printk("cryptocop_ioctl: unknown command\n"));
3125		return -ENOTTY;
3126	}
3127	return 0;
3128}
3129
3130
3131#ifdef LDEBUG
3132static void print_dma_descriptors(struct cryptocop_int_operation *iop)
3133{
3134	struct cryptocop_dma_desc *cdesc_out = iop->cdesc_out;
3135	struct cryptocop_dma_desc *cdesc_in = iop->cdesc_in;
3136	int                       i;
3137
3138	printk("print_dma_descriptors start\n");
3139
3140	printk("iop:\n");
3141	printk("\tsid: 0x%lld\n", iop->sid);
3142
3143	printk("\tcdesc_out: 0x%p\n", iop->cdesc_out);
3144	printk("\tcdesc_in: 0x%p\n", iop->cdesc_in);
3145	printk("\tddesc_out: 0x%p\n", iop->ddesc_out);
3146	printk("\tddesc_in: 0x%p\n", iop->ddesc_in);
3147
3148	printk("\niop->ctx_out: 0x%p phys: 0x%p\n", &iop->ctx_out, (char*)virt_to_phys(&iop->ctx_out));
3149	printk("\tnext: 0x%p\n"
3150	       "\tsaved_data: 0x%p\n"
3151	       "\tsaved_data_buf: 0x%p\n",
3152	       iop->ctx_out.next,
3153	       iop->ctx_out.saved_data,
3154	       iop->ctx_out.saved_data_buf);
3155
3156	printk("\niop->ctx_in: 0x%p phys: 0x%p\n", &iop->ctx_in, (char*)virt_to_phys(&iop->ctx_in));
3157	printk("\tnext: 0x%p\n"
3158	       "\tsaved_data: 0x%p\n"
3159	       "\tsaved_data_buf: 0x%p\n",
3160	       iop->ctx_in.next,
3161	       iop->ctx_in.saved_data,
3162	       iop->ctx_in.saved_data_buf);
3163
3164	i = 0;
3165	while (cdesc_out) {
3166		dma_descr_data *td;
3167		printk("cdesc_out %d, desc=0x%p\n", i, cdesc_out->dma_descr);
3168		printk("\n\tvirt_to_phys(desc): 0x%p\n", (char*)virt_to_phys(cdesc_out->dma_descr));
3169		td = cdesc_out->dma_descr;
3170		printk("\n\tbuf: 0x%p\n"
3171		       "\tafter: 0x%p\n"
3172		       "\tmd: 0x%04x\n"
3173		       "\tnext: 0x%p\n",
3174		       td->buf,
3175		       td->after,
3176		       td->md,
3177		       td->next);
3178		printk("flags:\n"
3179		       "\twait:\t%d\n"
3180		       "\teol:\t%d\n"
3181		       "\touteop:\t%d\n"
3182		       "\tineop:\t%d\n"
3183		       "\tintr:\t%d\n",
3184		       td->wait,
3185		       td->eol,
3186		       td->out_eop,
3187		       td->in_eop,
3188		       td->intr);
3189		cdesc_out = cdesc_out->next;
3190		i++;
3191	}
3192	i = 0;
3193	while (cdesc_in) {
3194		dma_descr_data *td;
3195		printk("cdesc_in %d, desc=0x%p\n", i, cdesc_in->dma_descr);
3196		printk("\n\tvirt_to_phys(desc): 0x%p\n", (char*)virt_to_phys(cdesc_in->dma_descr));
3197		td = cdesc_in->dma_descr;
3198		printk("\n\tbuf: 0x%p\n"
3199		       "\tafter: 0x%p\n"
3200		       "\tmd: 0x%04x\n"
3201		       "\tnext: 0x%p\n",
3202		       td->buf,
3203		       td->after,
3204		       td->md,
3205		       td->next);
3206		printk("flags:\n"
3207		       "\twait:\t%d\n"
3208		       "\teol:\t%d\n"
3209		       "\touteop:\t%d\n"
3210		       "\tineop:\t%d\n"
3211		       "\tintr:\t%d\n",
3212		       td->wait,
3213		       td->eol,
3214		       td->out_eop,
3215		       td->in_eop,
3216		       td->intr);
3217		cdesc_in = cdesc_in->next;
3218		i++;
3219	}
3220
3221	printk("print_dma_descriptors end\n");
3222}
3223
3224
3225static void print_strcop_crypto_op(struct strcop_crypto_op *cop)
3226{
3227	printk("print_strcop_crypto_op, 0x%p\n", cop);
3228
3229	/* Indata. */
3230	printk("indata=0x%p\n"
3231	       "inlen=%d\n"
3232	       "do_cipher=%d\n"
3233	       "decrypt=%d\n"
3234	       "cipher_explicit=%d\n"
3235	       "cipher_start=%d\n"
3236	       "cipher_len=%d\n"
3237	       "outdata=0x%p\n"
3238	       "outlen=%d\n",
3239	       cop->indata,
3240	       cop->inlen,
3241	       cop->do_cipher,
3242	       cop->decrypt,
3243	       cop->cipher_explicit,
3244	       cop->cipher_start,
3245	       cop->cipher_len,
3246	       cop->cipher_outdata,
3247	       cop->cipher_outlen);
3248
3249	printk("do_digest=%d\n"
3250	       "digest_start=%d\n"
3251	       "digest_len=%d\n",
3252	       cop->do_digest,
3253	       cop->digest_start,
3254	       cop->digest_len);
3255
3256	printk("do_csum=%d\n"
3257	       "csum_start=%d\n"
3258	       "csum_len=%d\n",
3259	       cop->do_csum,
3260	       cop->csum_start,
3261	       cop->csum_len);
3262}
3263
3264static void print_cryptocop_operation(struct cryptocop_operation *cop)
3265{
3266	struct cryptocop_desc      *d;
3267	struct cryptocop_tfrm_cfg  *tc;
3268	struct cryptocop_desc_cfg  *dc;
3269	int                        i;
3270
3271	printk("print_cryptocop_operation, cop=0x%p\n\n", cop);
3272	printk("sid: %lld\n", cop->sid);
3273	printk("operation_status=%d\n"
3274	       "use_dmalists=%d\n"
3275	       "in_interrupt=%d\n"
3276	       "fast_callback=%d\n",
3277	       cop->operation_status,
3278	       cop->use_dmalists,
3279	       cop->in_interrupt,
3280	       cop->fast_callback);
3281
3282	if (cop->use_dmalists){
3283		print_user_dma_lists(&cop->list_op);
3284	} else {
3285		printk("cop->tfrm_op\n"
3286		       "tfrm_cfg=0x%p\n"
3287		       "desc=0x%p\n"
3288		       "indata=0x%p\n"
3289		       "incount=%d\n"
3290		       "inlen=%d\n"
3291		       "outdata=0x%p\n"
3292		       "outcount=%d\n"
3293		       "outlen=%d\n\n",
3294		       cop->tfrm_op.tfrm_cfg,
3295		       cop->tfrm_op.desc,
3296		       cop->tfrm_op.indata,
3297		       cop->tfrm_op.incount,
3298		       cop->tfrm_op.inlen,
3299		       cop->tfrm_op.outdata,
3300		       cop->tfrm_op.outcount,
3301		       cop->tfrm_op.outlen);
3302
3303		tc = cop->tfrm_op.tfrm_cfg;
3304		while (tc){
3305			printk("tfrm_cfg, 0x%p\n"
3306			       "tid=%d\n"
3307			       "flags=%d\n"
3308			       "inject_ix=%d\n"
3309			       "next=0x%p\n",
3310			       tc,
3311			       tc->tid,
3312			       tc->flags,
3313			       tc->inject_ix,
3314			       tc->next);
3315			tc = tc->next;
3316		}
3317		d = cop->tfrm_op.desc;
3318		while (d){
3319			printk("\n======================desc, 0x%p\n"
3320			       "length=%d\n"
3321			       "cfg=0x%p\n"
3322			       "next=0x%p\n",
3323			       d,
3324			       d->length,
3325			       d->cfg,
3326			       d->next);
3327			dc = d->cfg;
3328			while (dc){
3329				printk("=========desc_cfg, 0x%p\n"
3330				       "tid=%d\n"
3331				       "src=%d\n"
3332				       "last=%d\n"
3333				       "next=0x%p\n",
3334				       dc,
3335				       dc->tid,
3336				       dc->src,
3337				       dc->last,
3338				       dc->next);
3339				dc = dc->next;
3340			}
3341			d = d->next;
3342		}
3343		printk("\n====iniov\n");
3344		for (i = 0; i < cop->tfrm_op.incount; i++){
3345			printk("indata[%d]\n"
3346			       "base=0x%p\n"
3347			       "len=%d\n",
3348			       i,
3349			       cop->tfrm_op.indata[i].iov_base,
3350			       cop->tfrm_op.indata[i].iov_len);
3351		}
3352		printk("\n====outiov\n");
3353		for (i = 0; i < cop->tfrm_op.outcount; i++){
3354			printk("outdata[%d]\n"
3355			       "base=0x%p\n"
3356			       "len=%d\n",
3357			       i,
3358			       cop->tfrm_op.outdata[i].iov_base,
3359			       cop->tfrm_op.outdata[i].iov_len);
3360		}
3361	}
3362	printk("------------end print_cryptocop_operation\n");
3363}
3364
3365
3366static void print_user_dma_lists(struct cryptocop_dma_list_operation *dma_op)
3367{
3368	dma_descr_data *dd;
3369	int i;
3370
3371	printk("print_user_dma_lists, dma_op=0x%p\n", dma_op);
3372
3373	printk("out_data_buf = 0x%p, phys_to_virt(out_data_buf) = 0x%p\n", dma_op->out_data_buf, phys_to_virt((unsigned long int)dma_op->out_data_buf));
3374	printk("in_data_buf = 0x%p, phys_to_virt(in_data_buf) = 0x%p\n", dma_op->in_data_buf, phys_to_virt((unsigned long int)dma_op->in_data_buf));
3375
3376	printk("##############outlist\n");
3377	dd = phys_to_virt((unsigned long int)dma_op->outlist);
3378	i = 0;
3379	while (dd != NULL) {
3380		printk("#%d phys_to_virt(desc) 0x%p\n", i, dd);
3381		printk("\n\tbuf: 0x%p\n"
3382		       "\tafter: 0x%p\n"
3383		       "\tmd: 0x%04x\n"
3384		       "\tnext: 0x%p\n",
3385		       dd->buf,
3386		       dd->after,
3387		       dd->md,
3388		       dd->next);
3389		printk("flags:\n"
3390		       "\twait:\t%d\n"
3391		       "\teol:\t%d\n"
3392		       "\touteop:\t%d\n"
3393		       "\tineop:\t%d\n"
3394		       "\tintr:\t%d\n",
3395		       dd->wait,
3396		       dd->eol,
3397		       dd->out_eop,
3398		       dd->in_eop,
3399		       dd->intr);
3400		if (dd->eol)
3401			dd = NULL;
3402		else
3403			dd = phys_to_virt((unsigned long int)dd->next);
3404		++i;
3405	}
3406
3407	printk("##############inlist\n");
3408	dd = phys_to_virt((unsigned long int)dma_op->inlist);
3409	i = 0;
3410	while (dd != NULL) {
3411		printk("#%d phys_to_virt(desc) 0x%p\n", i, dd);
3412		printk("\n\tbuf: 0x%p\n"
3413		       "\tafter: 0x%p\n"
3414		       "\tmd: 0x%04x\n"
3415		       "\tnext: 0x%p\n",
3416		       dd->buf,
3417		       dd->after,
3418		       dd->md,
3419		       dd->next);
3420		printk("flags:\n"
3421		       "\twait:\t%d\n"
3422		       "\teol:\t%d\n"
3423		       "\touteop:\t%d\n"
3424		       "\tineop:\t%d\n"
3425		       "\tintr:\t%d\n",
3426		       dd->wait,
3427		       dd->eol,
3428		       dd->out_eop,
3429		       dd->in_eop,
3430		       dd->intr);
3431		if (dd->eol)
3432			dd = NULL;
3433		else
3434			dd = phys_to_virt((unsigned long int)dd->next);
3435		++i;
3436	}
3437}
3438
3439
3440static void print_lock_status(void)
3441{
3442	printk("**********************print_lock_status\n");
3443	printk("cryptocop_completed_jobs_lock %d\n", spin_is_locked(&cryptocop_completed_jobs_lock));
3444	printk("cryptocop_job_queue_lock %d\n", spin_is_locked(&cryptocop_job_queue_lock));
3445	printk("descr_pool_lock %d\n", spin_is_locked(&descr_pool_lock));
3446	printk("cryptocop_sessions_lock %d\n", spin_is_locked(cryptocop_sessions_lock));
3447	printk("running_job_lock %d\n", spin_is_locked(running_job_lock));
3448	printk("cryptocop_process_lock %d\n", spin_is_locked(cryptocop_process_lock));
3449}
3450#endif /* LDEBUG */
3451
3452
3453static const char cryptocop_name[] = "ETRAX FS stream co-processor";
3454
3455static int init_stream_coprocessor(void)
3456{
3457	int err;
3458	int i;
3459	static int initialized = 0;
3460
3461	if (initialized)
3462		return 0;
3463
3464	initialized = 1;
3465
3466	printk("ETRAX FS stream co-processor driver v0.01, (c) 2003 Axis Communications AB\n");
3467
3468	err = register_chrdev(CRYPTOCOP_MAJOR, cryptocop_name, &cryptocop_fops);
3469	if (err < 0) {
3470		printk(KERN_ERR "stream co-processor: could not get major number.\n");
3471		return err;
3472	}
3473
3474	err = init_cryptocop();
3475	if (err) {
3476		(void)unregister_chrdev(CRYPTOCOP_MAJOR, cryptocop_name);
3477		return err;
3478	}
3479	err = cryptocop_job_queue_init();
3480	if (err) {
3481		release_cryptocop();
3482		(void)unregister_chrdev(CRYPTOCOP_MAJOR, cryptocop_name);
3483		return err;
3484	}
3485	/* Init the descriptor pool. */
3486	for (i = 0; i < CRYPTOCOP_DESCRIPTOR_POOL_SIZE - 1; i++) {
3487		descr_pool[i].from_pool = 1;
3488		descr_pool[i].next = &descr_pool[i + 1];
3489	}
3490	descr_pool[i].from_pool = 1;
3491	descr_pool[i].next = NULL;
3492	descr_pool_free_list = &descr_pool[0];
3493	descr_pool_no_free = CRYPTOCOP_DESCRIPTOR_POOL_SIZE;
3494
3495	spin_lock_init(&cryptocop_completed_jobs_lock);
3496	spin_lock_init(&cryptocop_job_queue_lock);
3497	spin_lock_init(&descr_pool_lock);
3498	spin_lock_init(&cryptocop_sessions_lock);
3499	spin_lock_init(&running_job_lock);
3500	spin_lock_init(&cryptocop_process_lock);
3501
3502	cryptocop_sessions = NULL;
3503	next_sid = 1;
3504
3505	cryptocop_running_job = NULL;
3506
3507	printk("stream co-processor: init done.\n");
3508	return 0;
3509}
3510
3511static void __exit exit_stream_coprocessor(void)
3512{
3513	release_cryptocop();
3514	cryptocop_job_queue_close();
3515}
3516
3517module_init(init_stream_coprocessor);
3518module_exit(exit_stream_coprocessor);
3519