• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/crypto/
1/*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
4 * Copyright (c) 2008-2010 Freescale Semiconductor, Inc.
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/mod_devicetable.h>
31#include <linux/device.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h>
34#include <linux/hw_random.h>
35#include <linux/of_platform.h>
36#include <linux/dma-mapping.h>
37#include <linux/io.h>
38#include <linux/spinlock.h>
39#include <linux/rtnetlink.h>
40#include <linux/slab.h>
41
42#include <crypto/algapi.h>
43#include <crypto/aes.h>
44#include <crypto/des.h>
45#include <crypto/sha.h>
46#include <crypto/md5.h>
47#include <crypto/aead.h>
48#include <crypto/authenc.h>
49#include <crypto/skcipher.h>
50#include <crypto/hash.h>
51#include <crypto/internal/hash.h>
52#include <crypto/scatterwalk.h>
53
54#include "talitos.h"
55
56#define TALITOS_TIMEOUT 100000
57#define TALITOS_MAX_DATA_LEN 65535
58
59#define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f)
60#define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf)
61#define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf)
62
63/* descriptor pointer entry */
64struct talitos_ptr {
65	__be16 len;	/* length */
66	u8 j_extent;	/* jump to sg link table and/or extent */
67	u8 eptr;	/* extended address */
68	__be32 ptr;	/* address */
69};
70
71static const struct talitos_ptr zero_entry = {
72	.len = 0,
73	.j_extent = 0,
74	.eptr = 0,
75	.ptr = 0
76};
77
78/* descriptor */
79struct talitos_desc {
80	__be32 hdr;			/* header high bits */
81	__be32 hdr_lo;			/* header low bits */
82	struct talitos_ptr ptr[7];	/* ptr/len pair array */
83};
84
85/**
86 * talitos_request - descriptor submission request
87 * @desc: descriptor pointer (kernel virtual)
88 * @dma_desc: descriptor's physical bus address
89 * @callback: whom to call when descriptor processing is done
90 * @context: caller context (optional)
91 */
92struct talitos_request {
93	struct talitos_desc *desc;
94	dma_addr_t dma_desc;
95	void (*callback) (struct device *dev, struct talitos_desc *desc,
96	                  void *context, int error);
97	void *context;
98};
99
100/* per-channel fifo management */
101struct talitos_channel {
102	/* request fifo */
103	struct talitos_request *fifo;
104
105	/* number of requests pending in channel h/w fifo */
106	atomic_t submit_count ____cacheline_aligned;
107
108	/* request submission (head) lock */
109	spinlock_t head_lock ____cacheline_aligned;
110	/* index to next free descriptor request */
111	int head;
112
113	/* request release (tail) lock */
114	spinlock_t tail_lock ____cacheline_aligned;
115	/* index to next in-progress/done descriptor request */
116	int tail;
117};
118
119struct talitos_private {
120	struct device *dev;
121	struct platform_device *ofdev;
122	void __iomem *reg;
123	int irq;
124
125	/* SEC version geometry (from device tree node) */
126	unsigned int num_channels;
127	unsigned int chfifo_len;
128	unsigned int exec_units;
129	unsigned int desc_types;
130
131	/* SEC Compatibility info */
132	unsigned long features;
133
134	/*
135	 * length of the request fifo
136	 * fifo_len is chfifo_len rounded up to next power of 2
137	 * so we can use bitwise ops to wrap
138	 */
139	unsigned int fifo_len;
140
141	struct talitos_channel *chan;
142
143	/* next channel to be assigned next incoming descriptor */
144	atomic_t last_chan ____cacheline_aligned;
145
146	/* request callback tasklet */
147	struct tasklet_struct done_task;
148
149	/* list of registered algorithms */
150	struct list_head alg_list;
151
152	/* hwrng device */
153	struct hwrng rng;
154};
155
156/* .features flag */
157#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
158#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
159#define TALITOS_FTR_SHA224_HWINIT 0x00000004
160
161static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
162{
163	talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
164	talitos_ptr->eptr = cpu_to_be32(upper_32_bits(dma_addr));
165}
166
167/*
168 * map virtual single (contiguous) pointer to h/w descriptor pointer
169 */
170static void map_single_talitos_ptr(struct device *dev,
171				   struct talitos_ptr *talitos_ptr,
172				   unsigned short len, void *data,
173				   unsigned char extent,
174				   enum dma_data_direction dir)
175{
176	dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
177
178	talitos_ptr->len = cpu_to_be16(len);
179	to_talitos_ptr(talitos_ptr, dma_addr);
180	talitos_ptr->j_extent = extent;
181}
182
183/*
184 * unmap bus single (contiguous) h/w descriptor pointer
185 */
186static void unmap_single_talitos_ptr(struct device *dev,
187				     struct talitos_ptr *talitos_ptr,
188				     enum dma_data_direction dir)
189{
190	dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr),
191			 be16_to_cpu(talitos_ptr->len), dir);
192}
193
194static int reset_channel(struct device *dev, int ch)
195{
196	struct talitos_private *priv = dev_get_drvdata(dev);
197	unsigned int timeout = TALITOS_TIMEOUT;
198
199	setbits32(priv->reg + TALITOS_CCCR(ch), TALITOS_CCCR_RESET);
200
201	while ((in_be32(priv->reg + TALITOS_CCCR(ch)) & TALITOS_CCCR_RESET)
202	       && --timeout)
203		cpu_relax();
204
205	if (timeout == 0) {
206		dev_err(dev, "failed to reset channel %d\n", ch);
207		return -EIO;
208	}
209
210	/* set 36-bit addressing, done writeback enable and done IRQ enable */
211	setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_EAE |
212		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
213
214	/* and ICCR writeback, if available */
215	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
216		setbits32(priv->reg + TALITOS_CCCR_LO(ch),
217		          TALITOS_CCCR_LO_IWSE);
218
219	return 0;
220}
221
222static int reset_device(struct device *dev)
223{
224	struct talitos_private *priv = dev_get_drvdata(dev);
225	unsigned int timeout = TALITOS_TIMEOUT;
226
227	setbits32(priv->reg + TALITOS_MCR, TALITOS_MCR_SWR);
228
229	while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
230	       && --timeout)
231		cpu_relax();
232
233	if (timeout == 0) {
234		dev_err(dev, "failed to reset device\n");
235		return -EIO;
236	}
237
238	return 0;
239}
240
241/*
242 * Reset and initialize the device
243 */
244static int init_device(struct device *dev)
245{
246	struct talitos_private *priv = dev_get_drvdata(dev);
247	int ch, err;
248
249	/*
250	 * Master reset
251	 * errata documentation: warning: certain SEC interrupts
252	 * are not fully cleared by writing the MCR:SWR bit,
253	 * set bit twice to completely reset
254	 */
255	err = reset_device(dev);
256	if (err)
257		return err;
258
259	err = reset_device(dev);
260	if (err)
261		return err;
262
263	/* reset channels */
264	for (ch = 0; ch < priv->num_channels; ch++) {
265		err = reset_channel(dev, ch);
266		if (err)
267			return err;
268	}
269
270	/* enable channel done and error interrupts */
271	setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
272	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
273
274	/* disable integrity check error interrupts (use writeback instead) */
275	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
276		setbits32(priv->reg + TALITOS_MDEUICR_LO,
277		          TALITOS_MDEUICR_LO_ICE);
278
279	return 0;
280}
281
282/**
283 * talitos_submit - submits a descriptor to the device for processing
284 * @dev:	the SEC device to be used
285 * @desc:	the descriptor to be processed by the device
286 * @callback:	whom to call when processing is complete
287 * @context:	a handle for use by caller (optional)
288 *
289 * desc must contain valid dma-mapped (bus physical) address pointers.
290 * callback must check err and feedback in descriptor header
291 * for device processing status.
292 */
293static int talitos_submit(struct device *dev, struct talitos_desc *desc,
294			  void (*callback)(struct device *dev,
295					   struct talitos_desc *desc,
296					   void *context, int error),
297			  void *context)
298{
299	struct talitos_private *priv = dev_get_drvdata(dev);
300	struct talitos_request *request;
301	unsigned long flags, ch;
302	int head;
303
304	/* select done notification */
305	desc->hdr |= DESC_HDR_DONE_NOTIFY;
306
307	/* emulate SEC's round-robin channel fifo polling scheme */
308	ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1);
309
310	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
311
312	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
313		/* h/w fifo is full */
314		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
315		return -EAGAIN;
316	}
317
318	head = priv->chan[ch].head;
319	request = &priv->chan[ch].fifo[head];
320
321	/* map descriptor and save caller data */
322	request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
323					   DMA_BIDIRECTIONAL);
324	request->callback = callback;
325	request->context = context;
326
327	/* increment fifo head */
328	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
329
330	smp_wmb();
331	request->desc = desc;
332
333	/* GO! */
334	wmb();
335	out_be32(priv->reg + TALITOS_FF(ch),
336		 cpu_to_be32(upper_32_bits(request->dma_desc)));
337	out_be32(priv->reg + TALITOS_FF_LO(ch),
338		 cpu_to_be32(lower_32_bits(request->dma_desc)));
339
340	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
341
342	return -EINPROGRESS;
343}
344
345/*
346 * process what was done, notify callback of error if not
347 */
348static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
349{
350	struct talitos_private *priv = dev_get_drvdata(dev);
351	struct talitos_request *request, saved_req;
352	unsigned long flags;
353	int tail, status;
354
355	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
356
357	tail = priv->chan[ch].tail;
358	while (priv->chan[ch].fifo[tail].desc) {
359		request = &priv->chan[ch].fifo[tail];
360
361		/* descriptors with their done bits set don't get the error */
362		rmb();
363		if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
364			status = 0;
365		else
366			if (!error)
367				break;
368			else
369				status = error;
370
371		dma_unmap_single(dev, request->dma_desc,
372				 sizeof(struct talitos_desc),
373				 DMA_BIDIRECTIONAL);
374
375		/* copy entries so we can call callback outside lock */
376		saved_req.desc = request->desc;
377		saved_req.callback = request->callback;
378		saved_req.context = request->context;
379
380		/* release request entry in fifo */
381		smp_wmb();
382		request->desc = NULL;
383
384		/* increment fifo tail */
385		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
386
387		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
388
389		atomic_dec(&priv->chan[ch].submit_count);
390
391		saved_req.callback(dev, saved_req.desc, saved_req.context,
392				   status);
393		/* channel may resume processing in single desc error case */
394		if (error && !reset_ch && status == error)
395			return;
396		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
397		tail = priv->chan[ch].tail;
398	}
399
400	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
401}
402
403/*
404 * process completed requests for channels that have done status
405 */
406static void talitos_done(unsigned long data)
407{
408	struct device *dev = (struct device *)data;
409	struct talitos_private *priv = dev_get_drvdata(dev);
410	int ch;
411
412	for (ch = 0; ch < priv->num_channels; ch++)
413		flush_channel(dev, ch, 0, 0);
414
415	/* At this point, all completed channels have been processed.
416	 * Unmask done interrupts for channels completed later on.
417	 */
418	setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
419	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
420}
421
422/*
423 * locate current (offending) descriptor
424 */
425static struct talitos_desc *current_desc(struct device *dev, int ch)
426{
427	struct talitos_private *priv = dev_get_drvdata(dev);
428	int tail = priv->chan[ch].tail;
429	dma_addr_t cur_desc;
430
431	cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
432
433	while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
434		tail = (tail + 1) & (priv->fifo_len - 1);
435		if (tail == priv->chan[ch].tail) {
436			dev_err(dev, "couldn't locate current descriptor\n");
437			return NULL;
438		}
439	}
440
441	return priv->chan[ch].fifo[tail].desc;
442}
443
444/*
445 * user diagnostics; report root cause of error based on execution unit status
446 */
447static void report_eu_error(struct device *dev, int ch,
448			    struct talitos_desc *desc)
449{
450	struct talitos_private *priv = dev_get_drvdata(dev);
451	int i;
452
453	switch (desc->hdr & DESC_HDR_SEL0_MASK) {
454	case DESC_HDR_SEL0_AFEU:
455		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
456			in_be32(priv->reg + TALITOS_AFEUISR),
457			in_be32(priv->reg + TALITOS_AFEUISR_LO));
458		break;
459	case DESC_HDR_SEL0_DEU:
460		dev_err(dev, "DEUISR 0x%08x_%08x\n",
461			in_be32(priv->reg + TALITOS_DEUISR),
462			in_be32(priv->reg + TALITOS_DEUISR_LO));
463		break;
464	case DESC_HDR_SEL0_MDEUA:
465	case DESC_HDR_SEL0_MDEUB:
466		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
467			in_be32(priv->reg + TALITOS_MDEUISR),
468			in_be32(priv->reg + TALITOS_MDEUISR_LO));
469		break;
470	case DESC_HDR_SEL0_RNG:
471		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
472			in_be32(priv->reg + TALITOS_RNGUISR),
473			in_be32(priv->reg + TALITOS_RNGUISR_LO));
474		break;
475	case DESC_HDR_SEL0_PKEU:
476		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
477			in_be32(priv->reg + TALITOS_PKEUISR),
478			in_be32(priv->reg + TALITOS_PKEUISR_LO));
479		break;
480	case DESC_HDR_SEL0_AESU:
481		dev_err(dev, "AESUISR 0x%08x_%08x\n",
482			in_be32(priv->reg + TALITOS_AESUISR),
483			in_be32(priv->reg + TALITOS_AESUISR_LO));
484		break;
485	case DESC_HDR_SEL0_CRCU:
486		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
487			in_be32(priv->reg + TALITOS_CRCUISR),
488			in_be32(priv->reg + TALITOS_CRCUISR_LO));
489		break;
490	case DESC_HDR_SEL0_KEU:
491		dev_err(dev, "KEUISR 0x%08x_%08x\n",
492			in_be32(priv->reg + TALITOS_KEUISR),
493			in_be32(priv->reg + TALITOS_KEUISR_LO));
494		break;
495	}
496
497	switch (desc->hdr & DESC_HDR_SEL1_MASK) {
498	case DESC_HDR_SEL1_MDEUA:
499	case DESC_HDR_SEL1_MDEUB:
500		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
501			in_be32(priv->reg + TALITOS_MDEUISR),
502			in_be32(priv->reg + TALITOS_MDEUISR_LO));
503		break;
504	case DESC_HDR_SEL1_CRCU:
505		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
506			in_be32(priv->reg + TALITOS_CRCUISR),
507			in_be32(priv->reg + TALITOS_CRCUISR_LO));
508		break;
509	}
510
511	for (i = 0; i < 8; i++)
512		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
513			in_be32(priv->reg + TALITOS_DESCBUF(ch) + 8*i),
514			in_be32(priv->reg + TALITOS_DESCBUF_LO(ch) + 8*i));
515}
516
517/*
518 * recover from error interrupts
519 */
520static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
521{
522	struct device *dev = (struct device *)data;
523	struct talitos_private *priv = dev_get_drvdata(dev);
524	unsigned int timeout = TALITOS_TIMEOUT;
525	int ch, error, reset_dev = 0, reset_ch = 0;
526	u32 v, v_lo;
527
528	for (ch = 0; ch < priv->num_channels; ch++) {
529		/* skip channels without errors */
530		if (!(isr & (1 << (ch * 2 + 1))))
531			continue;
532
533		error = -EINVAL;
534
535		v = in_be32(priv->reg + TALITOS_CCPSR(ch));
536		v_lo = in_be32(priv->reg + TALITOS_CCPSR_LO(ch));
537
538		if (v_lo & TALITOS_CCPSR_LO_DOF) {
539			dev_err(dev, "double fetch fifo overflow error\n");
540			error = -EAGAIN;
541			reset_ch = 1;
542		}
543		if (v_lo & TALITOS_CCPSR_LO_SOF) {
544			/* h/w dropped descriptor */
545			dev_err(dev, "single fetch fifo overflow error\n");
546			error = -EAGAIN;
547		}
548		if (v_lo & TALITOS_CCPSR_LO_MDTE)
549			dev_err(dev, "master data transfer error\n");
550		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
551			dev_err(dev, "s/g data length zero error\n");
552		if (v_lo & TALITOS_CCPSR_LO_FPZ)
553			dev_err(dev, "fetch pointer zero error\n");
554		if (v_lo & TALITOS_CCPSR_LO_IDH)
555			dev_err(dev, "illegal descriptor header error\n");
556		if (v_lo & TALITOS_CCPSR_LO_IEU)
557			dev_err(dev, "invalid execution unit error\n");
558		if (v_lo & TALITOS_CCPSR_LO_EU)
559			report_eu_error(dev, ch, current_desc(dev, ch));
560		if (v_lo & TALITOS_CCPSR_LO_GB)
561			dev_err(dev, "gather boundary error\n");
562		if (v_lo & TALITOS_CCPSR_LO_GRL)
563			dev_err(dev, "gather return/length error\n");
564		if (v_lo & TALITOS_CCPSR_LO_SB)
565			dev_err(dev, "scatter boundary error\n");
566		if (v_lo & TALITOS_CCPSR_LO_SRL)
567			dev_err(dev, "scatter return/length error\n");
568
569		flush_channel(dev, ch, error, reset_ch);
570
571		if (reset_ch) {
572			reset_channel(dev, ch);
573		} else {
574			setbits32(priv->reg + TALITOS_CCCR(ch),
575				  TALITOS_CCCR_CONT);
576			setbits32(priv->reg + TALITOS_CCCR_LO(ch), 0);
577			while ((in_be32(priv->reg + TALITOS_CCCR(ch)) &
578			       TALITOS_CCCR_CONT) && --timeout)
579				cpu_relax();
580			if (timeout == 0) {
581				dev_err(dev, "failed to restart channel %d\n",
582					ch);
583				reset_dev = 1;
584			}
585		}
586	}
587	if (reset_dev || isr & ~TALITOS_ISR_CHERR || isr_lo) {
588		dev_err(dev, "done overflow, internal time out, or rngu error: "
589		        "ISR 0x%08x_%08x\n", isr, isr_lo);
590
591		/* purge request queues */
592		for (ch = 0; ch < priv->num_channels; ch++)
593			flush_channel(dev, ch, -EIO, 1);
594
595		/* reset and reinitialize the device */
596		init_device(dev);
597	}
598}
599
600static irqreturn_t talitos_interrupt(int irq, void *data)
601{
602	struct device *dev = data;
603	struct talitos_private *priv = dev_get_drvdata(dev);
604	u32 isr, isr_lo;
605
606	isr = in_be32(priv->reg + TALITOS_ISR);
607	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
608	/* Acknowledge interrupt */
609	out_be32(priv->reg + TALITOS_ICR, isr);
610	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);
611
612	if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo))
613		talitos_error((unsigned long)data, isr, isr_lo);
614	else
615		if (likely(isr & TALITOS_ISR_CHDONE)) {
616			/* mask further done interrupts. */
617			clrbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_DONE);
618			/* done_task will unmask done interrupts at exit */
619			tasklet_schedule(&priv->done_task);
620		}
621
622	return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE;
623}
624
625/*
626 * hwrng
627 */
628static int talitos_rng_data_present(struct hwrng *rng, int wait)
629{
630	struct device *dev = (struct device *)rng->priv;
631	struct talitos_private *priv = dev_get_drvdata(dev);
632	u32 ofl;
633	int i;
634
635	for (i = 0; i < 20; i++) {
636		ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
637		      TALITOS_RNGUSR_LO_OFL;
638		if (ofl || !wait)
639			break;
640		udelay(10);
641	}
642
643	return !!ofl;
644}
645
646static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
647{
648	struct device *dev = (struct device *)rng->priv;
649	struct talitos_private *priv = dev_get_drvdata(dev);
650
651	/* rng fifo requires 64-bit accesses */
652	*data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
653	*data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);
654
655	return sizeof(u32);
656}
657
658static int talitos_rng_init(struct hwrng *rng)
659{
660	struct device *dev = (struct device *)rng->priv;
661	struct talitos_private *priv = dev_get_drvdata(dev);
662	unsigned int timeout = TALITOS_TIMEOUT;
663
664	setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
665	while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
666	       && --timeout)
667		cpu_relax();
668	if (timeout == 0) {
669		dev_err(dev, "failed to reset rng hw\n");
670		return -ENODEV;
671	}
672
673	/* start generating */
674	setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);
675
676	return 0;
677}
678
679static int talitos_register_rng(struct device *dev)
680{
681	struct talitos_private *priv = dev_get_drvdata(dev);
682
683	priv->rng.name		= dev_driver_string(dev),
684	priv->rng.init		= talitos_rng_init,
685	priv->rng.data_present	= talitos_rng_data_present,
686	priv->rng.data_read	= talitos_rng_data_read,
687	priv->rng.priv		= (unsigned long)dev;
688
689	return hwrng_register(&priv->rng);
690}
691
692static void talitos_unregister_rng(struct device *dev)
693{
694	struct talitos_private *priv = dev_get_drvdata(dev);
695
696	hwrng_unregister(&priv->rng);
697}
698
699/*
700 * crypto alg
701 */
702#define TALITOS_CRA_PRIORITY		3000
703#define TALITOS_MAX_KEY_SIZE		64
704#define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
705
706#define MD5_BLOCK_SIZE    64
707
708struct talitos_ctx {
709	struct device *dev;
710	__be32 desc_hdr_template;
711	u8 key[TALITOS_MAX_KEY_SIZE];
712	u8 iv[TALITOS_MAX_IV_LENGTH];
713	unsigned int keylen;
714	unsigned int enckeylen;
715	unsigned int authkeylen;
716	unsigned int authsize;
717};
718
719#define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
720#define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
721
722struct talitos_ahash_req_ctx {
723	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
724	unsigned int hw_context_size;
725	u8 buf[HASH_MAX_BLOCK_SIZE];
726	u8 bufnext[HASH_MAX_BLOCK_SIZE];
727	unsigned int swinit;
728	unsigned int first;
729	unsigned int last;
730	unsigned int to_hash_later;
731	u64 nbuf;
732	struct scatterlist bufsl[2];
733	struct scatterlist *psrc;
734};
735
736static int aead_setauthsize(struct crypto_aead *authenc,
737			    unsigned int authsize)
738{
739	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
740
741	ctx->authsize = authsize;
742
743	return 0;
744}
745
746static int aead_setkey(struct crypto_aead *authenc,
747		       const u8 *key, unsigned int keylen)
748{
749	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
750	struct rtattr *rta = (void *)key;
751	struct crypto_authenc_key_param *param;
752	unsigned int authkeylen;
753	unsigned int enckeylen;
754
755	if (!RTA_OK(rta, keylen))
756		goto badkey;
757
758	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
759		goto badkey;
760
761	if (RTA_PAYLOAD(rta) < sizeof(*param))
762		goto badkey;
763
764	param = RTA_DATA(rta);
765	enckeylen = be32_to_cpu(param->enckeylen);
766
767	key += RTA_ALIGN(rta->rta_len);
768	keylen -= RTA_ALIGN(rta->rta_len);
769
770	if (keylen < enckeylen)
771		goto badkey;
772
773	authkeylen = keylen - enckeylen;
774
775	if (keylen > TALITOS_MAX_KEY_SIZE)
776		goto badkey;
777
778	memcpy(&ctx->key, key, keylen);
779
780	ctx->keylen = keylen;
781	ctx->enckeylen = enckeylen;
782	ctx->authkeylen = authkeylen;
783
784	return 0;
785
786badkey:
787	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
788	return -EINVAL;
789}
790
791/*
792 * talitos_edesc - s/w-extended descriptor
793 * @src_nents: number of segments in input scatterlist
794 * @dst_nents: number of segments in output scatterlist
795 * @dma_len: length of dma mapped link_tbl space
796 * @dma_link_tbl: bus physical address of link_tbl
797 * @desc: h/w descriptor
798 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
799 *
800 * if decrypting (with authcheck), or either one of src_nents or dst_nents
801 * is greater than 1, an integrity check value is concatenated to the end
802 * of link_tbl data
803 */
804struct talitos_edesc {
805	int src_nents;
806	int dst_nents;
807	int src_is_chained;
808	int dst_is_chained;
809	int dma_len;
810	dma_addr_t dma_link_tbl;
811	struct talitos_desc desc;
812	struct talitos_ptr link_tbl[0];
813};
814
815static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
816			  unsigned int nents, enum dma_data_direction dir,
817			  int chained)
818{
819	if (unlikely(chained))
820		while (sg) {
821			dma_map_sg(dev, sg, 1, dir);
822			sg = scatterwalk_sg_next(sg);
823		}
824	else
825		dma_map_sg(dev, sg, nents, dir);
826	return nents;
827}
828
829static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
830				   enum dma_data_direction dir)
831{
832	while (sg) {
833		dma_unmap_sg(dev, sg, 1, dir);
834		sg = scatterwalk_sg_next(sg);
835	}
836}
837
838static void talitos_sg_unmap(struct device *dev,
839			     struct talitos_edesc *edesc,
840			     struct scatterlist *src,
841			     struct scatterlist *dst)
842{
843	unsigned int src_nents = edesc->src_nents ? : 1;
844	unsigned int dst_nents = edesc->dst_nents ? : 1;
845
846	if (src != dst) {
847		if (edesc->src_is_chained)
848			talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
849		else
850			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
851
852		if (dst) {
853			if (edesc->dst_is_chained)
854				talitos_unmap_sg_chain(dev, dst,
855						       DMA_FROM_DEVICE);
856			else
857				dma_unmap_sg(dev, dst, dst_nents,
858					     DMA_FROM_DEVICE);
859		}
860	} else
861		if (edesc->src_is_chained)
862			talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
863		else
864			dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
865}
866
867static void ipsec_esp_unmap(struct device *dev,
868			    struct talitos_edesc *edesc,
869			    struct aead_request *areq)
870{
871	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
872	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
873	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
874	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
875
876	dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
877
878	talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
879
880	if (edesc->dma_len)
881		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
882				 DMA_BIDIRECTIONAL);
883}
884
885/*
886 * ipsec_esp descriptor callbacks
887 */
888static void ipsec_esp_encrypt_done(struct device *dev,
889				   struct talitos_desc *desc, void *context,
890				   int err)
891{
892	struct aead_request *areq = context;
893	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
894	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
895	struct talitos_edesc *edesc;
896	struct scatterlist *sg;
897	void *icvdata;
898
899	edesc = container_of(desc, struct talitos_edesc, desc);
900
901	ipsec_esp_unmap(dev, edesc, areq);
902
903	/* copy the generated ICV to dst */
904	if (edesc->dma_len) {
905		icvdata = &edesc->link_tbl[edesc->src_nents +
906					   edesc->dst_nents + 2];
907		sg = sg_last(areq->dst, edesc->dst_nents);
908		memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
909		       icvdata, ctx->authsize);
910	}
911
912	kfree(edesc);
913
914	aead_request_complete(areq, err);
915}
916
917static void ipsec_esp_decrypt_swauth_done(struct device *dev,
918					  struct talitos_desc *desc,
919					  void *context, int err)
920{
921	struct aead_request *req = context;
922	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
923	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
924	struct talitos_edesc *edesc;
925	struct scatterlist *sg;
926	void *icvdata;
927
928	edesc = container_of(desc, struct talitos_edesc, desc);
929
930	ipsec_esp_unmap(dev, edesc, req);
931
932	if (!err) {
933		/* auth check */
934		if (edesc->dma_len)
935			icvdata = &edesc->link_tbl[edesc->src_nents +
936						   edesc->dst_nents + 2];
937		else
938			icvdata = &edesc->link_tbl[0];
939
940		sg = sg_last(req->dst, edesc->dst_nents ? : 1);
941		err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
942			     ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
943	}
944
945	kfree(edesc);
946
947	aead_request_complete(req, err);
948}
949
950static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
951					  struct talitos_desc *desc,
952					  void *context, int err)
953{
954	struct aead_request *req = context;
955	struct talitos_edesc *edesc;
956
957	edesc = container_of(desc, struct talitos_edesc, desc);
958
959	ipsec_esp_unmap(dev, edesc, req);
960
961	/* check ICV auth status */
962	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
963		     DESC_HDR_LO_ICCR1_PASS))
964		err = -EBADMSG;
965
966	kfree(edesc);
967
968	aead_request_complete(req, err);
969}
970
971/*
972 * convert scatterlist to SEC h/w link table format
973 * stop at cryptlen bytes
974 */
975static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
976			   int cryptlen, struct talitos_ptr *link_tbl_ptr)
977{
978	int n_sg = sg_count;
979
980	while (n_sg--) {
981		to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
982		link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
983		link_tbl_ptr->j_extent = 0;
984		link_tbl_ptr++;
985		cryptlen -= sg_dma_len(sg);
986		sg = scatterwalk_sg_next(sg);
987	}
988
989	/* adjust (decrease) last one (or two) entry's len to cryptlen */
990	link_tbl_ptr--;
991	while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
992		/* Empty this entry, and move to previous one */
993		cryptlen += be16_to_cpu(link_tbl_ptr->len);
994		link_tbl_ptr->len = 0;
995		sg_count--;
996		link_tbl_ptr--;
997	}
998	link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
999					+ cryptlen);
1000
1001	/* tag end of link table */
1002	link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1003
1004	return sg_count;
1005}
1006
1007/*
1008 * fill in and submit ipsec_esp descriptor
1009 */
1010static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1011		     u8 *giv, u64 seq,
1012		     void (*callback) (struct device *dev,
1013				       struct talitos_desc *desc,
1014				       void *context, int error))
1015{
1016	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1017	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1018	struct device *dev = ctx->dev;
1019	struct talitos_desc *desc = &edesc->desc;
1020	unsigned int cryptlen = areq->cryptlen;
1021	unsigned int authsize = ctx->authsize;
1022	unsigned int ivsize = crypto_aead_ivsize(aead);
1023	int sg_count, ret;
1024	int sg_link_tbl_len;
1025
1026	/* hmac key */
1027	map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1028			       0, DMA_TO_DEVICE);
1029	/* hmac data */
1030	map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize,
1031			       sg_virt(areq->assoc), 0, DMA_TO_DEVICE);
1032	/* cipher iv */
1033	map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
1034			       DMA_TO_DEVICE);
1035
1036	/* cipher key */
1037	map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1038			       (char *)&ctx->key + ctx->authkeylen, 0,
1039			       DMA_TO_DEVICE);
1040
1041	/*
1042	 * cipher in
1043	 * map and adjust cipher len to aead request cryptlen.
1044	 * extent is bytes of HMAC postpended to ciphertext,
1045	 * typically 12 for ipsec
1046	 */
1047	desc->ptr[4].len = cpu_to_be16(cryptlen);
1048	desc->ptr[4].j_extent = authsize;
1049
1050	sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1051				  (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1052							   : DMA_TO_DEVICE,
1053				  edesc->src_is_chained);
1054
1055	if (sg_count == 1) {
1056		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
1057	} else {
1058		sg_link_tbl_len = cryptlen;
1059
1060		if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1061			sg_link_tbl_len = cryptlen + authsize;
1062
1063		sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
1064					  &edesc->link_tbl[0]);
1065		if (sg_count > 1) {
1066			desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1067			to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
1068			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1069						   edesc->dma_len,
1070						   DMA_BIDIRECTIONAL);
1071		} else {
1072			/* Only one segment now, so no link tbl needed */
1073			to_talitos_ptr(&desc->ptr[4],
1074				       sg_dma_address(areq->src));
1075		}
1076	}
1077
1078	/* cipher out */
1079	desc->ptr[5].len = cpu_to_be16(cryptlen);
1080	desc->ptr[5].j_extent = authsize;
1081
1082	if (areq->src != areq->dst)
1083		sg_count = talitos_map_sg(dev, areq->dst,
1084					  edesc->dst_nents ? : 1,
1085					  DMA_FROM_DEVICE,
1086					  edesc->dst_is_chained);
1087
1088	if (sg_count == 1) {
1089		to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
1090	} else {
1091		struct talitos_ptr *link_tbl_ptr =
1092			&edesc->link_tbl[edesc->src_nents + 1];
1093
1094		to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1095			       (edesc->src_nents + 1) *
1096			       sizeof(struct talitos_ptr));
1097		sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1098					  link_tbl_ptr);
1099
1100		/* Add an entry to the link table for ICV data */
1101		link_tbl_ptr += sg_count - 1;
1102		link_tbl_ptr->j_extent = 0;
1103		sg_count++;
1104		link_tbl_ptr++;
1105		link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1106		link_tbl_ptr->len = cpu_to_be16(authsize);
1107
1108		/* icv data follows link tables */
1109		to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl +
1110			       (edesc->src_nents + edesc->dst_nents + 2) *
1111			       sizeof(struct talitos_ptr));
1112		desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1113		dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1114					   edesc->dma_len, DMA_BIDIRECTIONAL);
1115	}
1116
1117	/* iv out */
1118	map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
1119			       DMA_FROM_DEVICE);
1120
1121	ret = talitos_submit(dev, desc, callback, areq);
1122	if (ret != -EINPROGRESS) {
1123		ipsec_esp_unmap(dev, edesc, areq);
1124		kfree(edesc);
1125	}
1126	return ret;
1127}
1128
1129/*
1130 * derive number of elements in scatterlist
1131 */
1132static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
1133{
1134	struct scatterlist *sg = sg_list;
1135	int sg_nents = 0;
1136
1137	*chained = 0;
1138	while (nbytes > 0) {
1139		sg_nents++;
1140		nbytes -= sg->length;
1141		if (!sg_is_last(sg) && (sg + 1)->length == 0)
1142			*chained = 1;
1143		sg = scatterwalk_sg_next(sg);
1144	}
1145
1146	return sg_nents;
1147}
1148
1149/**
1150 * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer
1151 * @sgl:		 The SG list
1152 * @nents:		 Number of SG entries
1153 * @buf:		 Where to copy to
1154 * @buflen:		 The number of bytes to copy
1155 * @skip:		 The number of bytes to skip before copying.
1156 *                       Note: skip + buflen should equal SG total size.
1157 *
1158 * Returns the number of copied bytes.
1159 *
1160 **/
1161static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents,
1162				    void *buf, size_t buflen, unsigned int skip)
1163{
1164	unsigned int offset = 0;
1165	unsigned int boffset = 0;
1166	struct sg_mapping_iter miter;
1167	unsigned long flags;
1168	unsigned int sg_flags = SG_MITER_ATOMIC;
1169	size_t total_buffer = buflen + skip;
1170
1171	sg_flags |= SG_MITER_FROM_SG;
1172
1173	sg_miter_start(&miter, sgl, nents, sg_flags);
1174
1175	local_irq_save(flags);
1176
1177	while (sg_miter_next(&miter) && offset < total_buffer) {
1178		unsigned int len;
1179		unsigned int ignore;
1180
1181		if ((offset + miter.length) > skip) {
1182			if (offset < skip) {
1183				/* Copy part of this segment */
1184				ignore = skip - offset;
1185				len = miter.length - ignore;
1186				if (boffset + len > buflen)
1187					len = buflen - boffset;
1188				memcpy(buf + boffset, miter.addr + ignore, len);
1189			} else {
1190				/* Copy all of this segment (up to buflen) */
1191				len = miter.length;
1192				if (boffset + len > buflen)
1193					len = buflen - boffset;
1194				memcpy(buf + boffset, miter.addr, len);
1195			}
1196			boffset += len;
1197		}
1198		offset += miter.length;
1199	}
1200
1201	sg_miter_stop(&miter);
1202
1203	local_irq_restore(flags);
1204	return boffset;
1205}
1206
1207/*
1208 * allocate and map the extended descriptor
1209 */
1210static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1211						 struct scatterlist *src,
1212						 struct scatterlist *dst,
1213						 int hash_result,
1214						 unsigned int cryptlen,
1215						 unsigned int authsize,
1216						 int icv_stashing,
1217						 u32 cryptoflags)
1218{
1219	struct talitos_edesc *edesc;
1220	int src_nents, dst_nents, alloc_len, dma_len;
1221	int src_chained, dst_chained = 0;
1222	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1223		      GFP_ATOMIC;
1224
1225	if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
1226		dev_err(dev, "length exceeds h/w max limit\n");
1227		return ERR_PTR(-EINVAL);
1228	}
1229
1230	src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1231	src_nents = (src_nents == 1) ? 0 : src_nents;
1232
1233	if (hash_result) {
1234		dst_nents = 0;
1235	} else {
1236		if (dst == src) {
1237			dst_nents = src_nents;
1238		} else {
1239			dst_nents = sg_count(dst, cryptlen + authsize,
1240					     &dst_chained);
1241			dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1242		}
1243	}
1244
1245	/*
1246	 * allocate space for base edesc plus the link tables,
1247	 * allowing for two separate entries for ICV and generated ICV (+ 2),
1248	 * and the ICV data itself
1249	 */
1250	alloc_len = sizeof(struct talitos_edesc);
1251	if (src_nents || dst_nents) {
1252		dma_len = (src_nents + dst_nents + 2) *
1253				 sizeof(struct talitos_ptr) + authsize;
1254		alloc_len += dma_len;
1255	} else {
1256		dma_len = 0;
1257		alloc_len += icv_stashing ? authsize : 0;
1258	}
1259
1260	edesc = kmalloc(alloc_len, GFP_DMA | flags);
1261	if (!edesc) {
1262		dev_err(dev, "could not allocate edescriptor\n");
1263		return ERR_PTR(-ENOMEM);
1264	}
1265
1266	edesc->src_nents = src_nents;
1267	edesc->dst_nents = dst_nents;
1268	edesc->src_is_chained = src_chained;
1269	edesc->dst_is_chained = dst_chained;
1270	edesc->dma_len = dma_len;
1271	if (dma_len)
1272		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1273						     edesc->dma_len,
1274						     DMA_BIDIRECTIONAL);
1275
1276	return edesc;
1277}
1278
1279static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
1280					      int icv_stashing)
1281{
1282	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1283	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1284
1285	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
1286				   areq->cryptlen, ctx->authsize, icv_stashing,
1287				   areq->base.flags);
1288}
1289
1290static int aead_encrypt(struct aead_request *req)
1291{
1292	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1293	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1294	struct talitos_edesc *edesc;
1295
1296	/* allocate extended descriptor */
1297	edesc = aead_edesc_alloc(req, 0);
1298	if (IS_ERR(edesc))
1299		return PTR_ERR(edesc);
1300
1301	/* set encrypt */
1302	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1303
1304	return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
1305}
1306
1307static int aead_decrypt(struct aead_request *req)
1308{
1309	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1310	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1311	unsigned int authsize = ctx->authsize;
1312	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1313	struct talitos_edesc *edesc;
1314	struct scatterlist *sg;
1315	void *icvdata;
1316
1317	req->cryptlen -= authsize;
1318
1319	/* allocate extended descriptor */
1320	edesc = aead_edesc_alloc(req, 1);
1321	if (IS_ERR(edesc))
1322		return PTR_ERR(edesc);
1323
1324	if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1325	    ((!edesc->src_nents && !edesc->dst_nents) ||
1326	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1327
1328		/* decrypt and check the ICV */
1329		edesc->desc.hdr = ctx->desc_hdr_template |
1330				  DESC_HDR_DIR_INBOUND |
1331				  DESC_HDR_MODE1_MDEU_CICV;
1332
1333		/* reset integrity check result bits */
1334		edesc->desc.hdr_lo = 0;
1335
1336		return ipsec_esp(edesc, req, NULL, 0,
1337				 ipsec_esp_decrypt_hwauth_done);
1338
1339	}
1340
1341	/* Have to check the ICV with software */
1342	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1343
1344	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1345	if (edesc->dma_len)
1346		icvdata = &edesc->link_tbl[edesc->src_nents +
1347					   edesc->dst_nents + 2];
1348	else
1349		icvdata = &edesc->link_tbl[0];
1350
1351	sg = sg_last(req->src, edesc->src_nents ? : 1);
1352
1353	memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1354	       ctx->authsize);
1355
1356	return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done);
1357}
1358
1359static int aead_givencrypt(struct aead_givcrypt_request *req)
1360{
1361	struct aead_request *areq = &req->areq;
1362	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1363	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1364	struct talitos_edesc *edesc;
1365
1366	/* allocate extended descriptor */
1367	edesc = aead_edesc_alloc(areq, 0);
1368	if (IS_ERR(edesc))
1369		return PTR_ERR(edesc);
1370
1371	/* set encrypt */
1372	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1373
1374	memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1375	/* avoid consecutive packets going out with same IV */
1376	*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1377
1378	return ipsec_esp(edesc, areq, req->giv, req->seq,
1379			 ipsec_esp_encrypt_done);
1380}
1381
1382static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1383			     const u8 *key, unsigned int keylen)
1384{
1385	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1386	struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher);
1387
1388	if (keylen > TALITOS_MAX_KEY_SIZE)
1389		goto badkey;
1390
1391	if (keylen < alg->min_keysize || keylen > alg->max_keysize)
1392		goto badkey;
1393
1394	memcpy(&ctx->key, key, keylen);
1395	ctx->keylen = keylen;
1396
1397	return 0;
1398
1399badkey:
1400	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1401	return -EINVAL;
1402}
1403
1404static void common_nonsnoop_unmap(struct device *dev,
1405				  struct talitos_edesc *edesc,
1406				  struct ablkcipher_request *areq)
1407{
1408	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1409	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1410	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1411
1412	talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
1413
1414	if (edesc->dma_len)
1415		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1416				 DMA_BIDIRECTIONAL);
1417}
1418
1419static void ablkcipher_done(struct device *dev,
1420			    struct talitos_desc *desc, void *context,
1421			    int err)
1422{
1423	struct ablkcipher_request *areq = context;
1424	struct talitos_edesc *edesc;
1425
1426	edesc = container_of(desc, struct talitos_edesc, desc);
1427
1428	common_nonsnoop_unmap(dev, edesc, areq);
1429
1430	kfree(edesc);
1431
1432	areq->base.complete(&areq->base, err);
1433}
1434
1435static int common_nonsnoop(struct talitos_edesc *edesc,
1436			   struct ablkcipher_request *areq,
1437			   u8 *giv,
1438			   void (*callback) (struct device *dev,
1439					     struct talitos_desc *desc,
1440					     void *context, int error))
1441{
1442	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1443	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1444	struct device *dev = ctx->dev;
1445	struct talitos_desc *desc = &edesc->desc;
1446	unsigned int cryptlen = areq->nbytes;
1447	unsigned int ivsize;
1448	int sg_count, ret;
1449
1450	/* first DWORD empty */
1451	desc->ptr[0].len = 0;
1452	to_talitos_ptr(&desc->ptr[0], 0);
1453	desc->ptr[0].j_extent = 0;
1454
1455	/* cipher iv */
1456	ivsize = crypto_ablkcipher_ivsize(cipher);
1457	map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0,
1458			       DMA_TO_DEVICE);
1459
1460	/* cipher key */
1461	map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1462			       (char *)&ctx->key, 0, DMA_TO_DEVICE);
1463
1464	/*
1465	 * cipher in
1466	 */
1467	desc->ptr[3].len = cpu_to_be16(cryptlen);
1468	desc->ptr[3].j_extent = 0;
1469
1470	sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1471				  (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1472							   : DMA_TO_DEVICE,
1473				  edesc->src_is_chained);
1474
1475	if (sg_count == 1) {
1476		to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src));
1477	} else {
1478		sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
1479					  &edesc->link_tbl[0]);
1480		if (sg_count > 1) {
1481			to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1482			desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1483			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1484						   edesc->dma_len,
1485						   DMA_BIDIRECTIONAL);
1486		} else {
1487			/* Only one segment now, so no link tbl needed */
1488			to_talitos_ptr(&desc->ptr[3],
1489				       sg_dma_address(areq->src));
1490		}
1491	}
1492
1493	/* cipher out */
1494	desc->ptr[4].len = cpu_to_be16(cryptlen);
1495	desc->ptr[4].j_extent = 0;
1496
1497	if (areq->src != areq->dst)
1498		sg_count = talitos_map_sg(dev, areq->dst,
1499					  edesc->dst_nents ? : 1,
1500					  DMA_FROM_DEVICE,
1501					  edesc->dst_is_chained);
1502
1503	if (sg_count == 1) {
1504		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst));
1505	} else {
1506		struct talitos_ptr *link_tbl_ptr =
1507			&edesc->link_tbl[edesc->src_nents + 1];
1508
1509		to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1510					      (edesc->src_nents + 1) *
1511					      sizeof(struct talitos_ptr));
1512		desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1513		sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1514					  link_tbl_ptr);
1515		dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1516					   edesc->dma_len, DMA_BIDIRECTIONAL);
1517	}
1518
1519	/* iv out */
1520	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0,
1521			       DMA_FROM_DEVICE);
1522
1523	/* last DWORD empty */
1524	desc->ptr[6].len = 0;
1525	to_talitos_ptr(&desc->ptr[6], 0);
1526	desc->ptr[6].j_extent = 0;
1527
1528	ret = talitos_submit(dev, desc, callback, areq);
1529	if (ret != -EINPROGRESS) {
1530		common_nonsnoop_unmap(dev, edesc, areq);
1531		kfree(edesc);
1532	}
1533	return ret;
1534}
1535
1536static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1537						    areq)
1538{
1539	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1540	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1541
1542	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
1543				   areq->nbytes, 0, 0, areq->base.flags);
1544}
1545
1546static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1547{
1548	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1549	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1550	struct talitos_edesc *edesc;
1551
1552	/* allocate extended descriptor */
1553	edesc = ablkcipher_edesc_alloc(areq);
1554	if (IS_ERR(edesc))
1555		return PTR_ERR(edesc);
1556
1557	/* set encrypt */
1558	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1559
1560	return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
1561}
1562
1563static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1564{
1565	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1566	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1567	struct talitos_edesc *edesc;
1568
1569	/* allocate extended descriptor */
1570	edesc = ablkcipher_edesc_alloc(areq);
1571	if (IS_ERR(edesc))
1572		return PTR_ERR(edesc);
1573
1574	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1575
1576	return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
1577}
1578
1579static void common_nonsnoop_hash_unmap(struct device *dev,
1580				       struct talitos_edesc *edesc,
1581				       struct ahash_request *areq)
1582{
1583	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1584
1585	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1586
1587	/* When using hashctx-in, must unmap it. */
1588	if (edesc->desc.ptr[1].len)
1589		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1590					 DMA_TO_DEVICE);
1591
1592	if (edesc->desc.ptr[2].len)
1593		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1594					 DMA_TO_DEVICE);
1595
1596	talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL);
1597
1598	if (edesc->dma_len)
1599		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1600				 DMA_BIDIRECTIONAL);
1601
1602}
1603
1604static void ahash_done(struct device *dev,
1605		       struct talitos_desc *desc, void *context,
1606		       int err)
1607{
1608	struct ahash_request *areq = context;
1609	struct talitos_edesc *edesc =
1610		 container_of(desc, struct talitos_edesc, desc);
1611	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1612
1613	if (!req_ctx->last && req_ctx->to_hash_later) {
1614		/* Position any partial block for next update/final/finup */
1615		memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1616		req_ctx->nbuf = req_ctx->to_hash_later;
1617	}
1618	common_nonsnoop_hash_unmap(dev, edesc, areq);
1619
1620	kfree(edesc);
1621
1622	areq->base.complete(&areq->base, err);
1623}
1624
1625static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1626				struct ahash_request *areq, unsigned int length,
1627				void (*callback) (struct device *dev,
1628						  struct talitos_desc *desc,
1629						  void *context, int error))
1630{
1631	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1632	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1633	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1634	struct device *dev = ctx->dev;
1635	struct talitos_desc *desc = &edesc->desc;
1636	int sg_count, ret;
1637
1638	/* first DWORD empty */
1639	desc->ptr[0] = zero_entry;
1640
1641	/* hash context in */
1642	if (!req_ctx->first || req_ctx->swinit) {
1643		map_single_talitos_ptr(dev, &desc->ptr[1],
1644				       req_ctx->hw_context_size,
1645				       (char *)req_ctx->hw_context, 0,
1646				       DMA_TO_DEVICE);
1647		req_ctx->swinit = 0;
1648	} else {
1649		desc->ptr[1] = zero_entry;
1650		/* Indicate next op is not the first. */
1651		req_ctx->first = 0;
1652	}
1653
1654	/* HMAC key */
1655	if (ctx->keylen)
1656		map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1657				       (char *)&ctx->key, 0, DMA_TO_DEVICE);
1658	else
1659		desc->ptr[2] = zero_entry;
1660
1661	/*
1662	 * data in
1663	 */
1664	desc->ptr[3].len = cpu_to_be16(length);
1665	desc->ptr[3].j_extent = 0;
1666
1667	sg_count = talitos_map_sg(dev, req_ctx->psrc,
1668				  edesc->src_nents ? : 1,
1669				  DMA_TO_DEVICE,
1670				  edesc->src_is_chained);
1671
1672	if (sg_count == 1) {
1673		to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc));
1674	} else {
1675		sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length,
1676					  &edesc->link_tbl[0]);
1677		if (sg_count > 1) {
1678			desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1679			to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1680			dma_sync_single_for_device(ctx->dev,
1681						   edesc->dma_link_tbl,
1682						   edesc->dma_len,
1683						   DMA_BIDIRECTIONAL);
1684		} else {
1685			/* Only one segment now, so no link tbl needed */
1686			to_talitos_ptr(&desc->ptr[3],
1687				       sg_dma_address(req_ctx->psrc));
1688		}
1689	}
1690
1691	/* fifth DWORD empty */
1692	desc->ptr[4] = zero_entry;
1693
1694	/* hash/HMAC out -or- hash context out */
1695	if (req_ctx->last)
1696		map_single_talitos_ptr(dev, &desc->ptr[5],
1697				       crypto_ahash_digestsize(tfm),
1698				       areq->result, 0, DMA_FROM_DEVICE);
1699	else
1700		map_single_talitos_ptr(dev, &desc->ptr[5],
1701				       req_ctx->hw_context_size,
1702				       req_ctx->hw_context, 0, DMA_FROM_DEVICE);
1703
1704	/* last DWORD empty */
1705	desc->ptr[6] = zero_entry;
1706
1707	ret = talitos_submit(dev, desc, callback, areq);
1708	if (ret != -EINPROGRESS) {
1709		common_nonsnoop_hash_unmap(dev, edesc, areq);
1710		kfree(edesc);
1711	}
1712	return ret;
1713}
1714
1715static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1716					       unsigned int nbytes)
1717{
1718	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1719	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1720	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1721
1722	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, 1,
1723				   nbytes, 0, 0, areq->base.flags);
1724}
1725
1726static int ahash_init(struct ahash_request *areq)
1727{
1728	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1729	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1730
1731	/* Initialize the context */
1732	req_ctx->nbuf = 0;
1733	req_ctx->first = 1; /* first indicates h/w must init its context */
1734	req_ctx->swinit = 0; /* assume h/w init of context */
1735	req_ctx->hw_context_size =
1736		(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1737			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1738			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1739
1740	return 0;
1741}
1742
1743/*
1744 * on h/w without explicit sha224 support, we initialize h/w context
1745 * manually with sha224 constants, and tell it to run sha256.
1746 */
1747static int ahash_init_sha224_swinit(struct ahash_request *areq)
1748{
1749	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1750
1751	ahash_init(areq);
1752	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1753
1754	req_ctx->hw_context[0] = cpu_to_be32(SHA224_H0);
1755	req_ctx->hw_context[1] = cpu_to_be32(SHA224_H1);
1756	req_ctx->hw_context[2] = cpu_to_be32(SHA224_H2);
1757	req_ctx->hw_context[3] = cpu_to_be32(SHA224_H3);
1758	req_ctx->hw_context[4] = cpu_to_be32(SHA224_H4);
1759	req_ctx->hw_context[5] = cpu_to_be32(SHA224_H5);
1760	req_ctx->hw_context[6] = cpu_to_be32(SHA224_H6);
1761	req_ctx->hw_context[7] = cpu_to_be32(SHA224_H7);
1762
1763	/* init 64-bit count */
1764	req_ctx->hw_context[8] = 0;
1765	req_ctx->hw_context[9] = 0;
1766
1767	return 0;
1768}
1769
1770static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1771{
1772	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1773	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1774	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1775	struct talitos_edesc *edesc;
1776	unsigned int blocksize =
1777			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1778	unsigned int nbytes_to_hash;
1779	unsigned int to_hash_later;
1780	unsigned int nsg;
1781	int chained;
1782
1783	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1784		/* Buffer up to one whole block */
1785		sg_copy_to_buffer(areq->src,
1786				  sg_count(areq->src, nbytes, &chained),
1787				  req_ctx->buf + req_ctx->nbuf, nbytes);
1788		req_ctx->nbuf += nbytes;
1789		return 0;
1790	}
1791
1792	/* At least (blocksize + 1) bytes are available to hash */
1793	nbytes_to_hash = nbytes + req_ctx->nbuf;
1794	to_hash_later = nbytes_to_hash & (blocksize - 1);
1795
1796	if (req_ctx->last)
1797		to_hash_later = 0;
1798	else if (to_hash_later)
1799		/* There is a partial block. Hash the full block(s) now */
1800		nbytes_to_hash -= to_hash_later;
1801	else {
1802		/* Keep one block buffered */
1803		nbytes_to_hash -= blocksize;
1804		to_hash_later = blocksize;
1805	}
1806
1807	/* Chain in any previously buffered data */
1808	if (req_ctx->nbuf) {
1809		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1810		sg_init_table(req_ctx->bufsl, nsg);
1811		sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1812		if (nsg > 1)
1813			scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
1814		req_ctx->psrc = req_ctx->bufsl;
1815	} else
1816		req_ctx->psrc = areq->src;
1817
1818	if (to_hash_later) {
1819		int nents = sg_count(areq->src, nbytes, &chained);
1820		sg_copy_end_to_buffer(areq->src, nents,
1821				      req_ctx->bufnext,
1822				      to_hash_later,
1823				      nbytes - to_hash_later);
1824	}
1825	req_ctx->to_hash_later = to_hash_later;
1826
1827	/* Allocate extended descriptor */
1828	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1829	if (IS_ERR(edesc))
1830		return PTR_ERR(edesc);
1831
1832	edesc->desc.hdr = ctx->desc_hdr_template;
1833
1834	/* On last one, request SEC to pad; otherwise continue */
1835	if (req_ctx->last)
1836		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1837	else
1838		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1839
1840	/* request SEC to INIT hash. */
1841	if (req_ctx->first && !req_ctx->swinit)
1842		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1843
1844	/* When the tfm context has a keylen, it's an HMAC.
1845	 * A first or last (ie. not middle) descriptor must request HMAC.
1846	 */
1847	if (ctx->keylen && (req_ctx->first || req_ctx->last))
1848		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1849
1850	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1851				    ahash_done);
1852}
1853
1854static int ahash_update(struct ahash_request *areq)
1855{
1856	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1857
1858	req_ctx->last = 0;
1859
1860	return ahash_process_req(areq, areq->nbytes);
1861}
1862
1863static int ahash_final(struct ahash_request *areq)
1864{
1865	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1866
1867	req_ctx->last = 1;
1868
1869	return ahash_process_req(areq, 0);
1870}
1871
1872static int ahash_finup(struct ahash_request *areq)
1873{
1874	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1875
1876	req_ctx->last = 1;
1877
1878	return ahash_process_req(areq, areq->nbytes);
1879}
1880
1881static int ahash_digest(struct ahash_request *areq)
1882{
1883	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1884	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1885
1886	ahash->init(areq);
1887	req_ctx->last = 1;
1888
1889	return ahash_process_req(areq, areq->nbytes);
1890}
1891
1892struct talitos_alg_template {
1893	u32 type;
1894	union {
1895		struct crypto_alg crypto;
1896		struct ahash_alg hash;
1897	} alg;
1898	__be32 desc_hdr_template;
1899};
1900
1901static struct talitos_alg_template driver_algs[] = {
1902	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
1903	{	.type = CRYPTO_ALG_TYPE_AEAD,
1904		.alg.crypto = {
1905			.cra_name = "authenc(hmac(sha1),cbc(aes))",
1906			.cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
1907			.cra_blocksize = AES_BLOCK_SIZE,
1908			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1909			.cra_type = &crypto_aead_type,
1910			.cra_aead = {
1911				.setkey = aead_setkey,
1912				.setauthsize = aead_setauthsize,
1913				.encrypt = aead_encrypt,
1914				.decrypt = aead_decrypt,
1915				.givencrypt = aead_givencrypt,
1916				.geniv = "<built-in>",
1917				.ivsize = AES_BLOCK_SIZE,
1918				.maxauthsize = SHA1_DIGEST_SIZE,
1919			}
1920		},
1921		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1922			             DESC_HDR_SEL0_AESU |
1923		                     DESC_HDR_MODE0_AESU_CBC |
1924		                     DESC_HDR_SEL1_MDEUA |
1925		                     DESC_HDR_MODE1_MDEU_INIT |
1926		                     DESC_HDR_MODE1_MDEU_PAD |
1927		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1928	},
1929	{	.type = CRYPTO_ALG_TYPE_AEAD,
1930		.alg.crypto = {
1931			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1932			.cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
1933			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1934			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1935			.cra_type = &crypto_aead_type,
1936			.cra_aead = {
1937				.setkey = aead_setkey,
1938				.setauthsize = aead_setauthsize,
1939				.encrypt = aead_encrypt,
1940				.decrypt = aead_decrypt,
1941				.givencrypt = aead_givencrypt,
1942				.geniv = "<built-in>",
1943				.ivsize = DES3_EDE_BLOCK_SIZE,
1944				.maxauthsize = SHA1_DIGEST_SIZE,
1945			}
1946		},
1947		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1948			             DESC_HDR_SEL0_DEU |
1949		                     DESC_HDR_MODE0_DEU_CBC |
1950		                     DESC_HDR_MODE0_DEU_3DES |
1951		                     DESC_HDR_SEL1_MDEUA |
1952		                     DESC_HDR_MODE1_MDEU_INIT |
1953		                     DESC_HDR_MODE1_MDEU_PAD |
1954		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1955	},
1956	{	.type = CRYPTO_ALG_TYPE_AEAD,
1957		.alg.crypto = {
1958			.cra_name = "authenc(hmac(sha256),cbc(aes))",
1959			.cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
1960			.cra_blocksize = AES_BLOCK_SIZE,
1961			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1962			.cra_type = &crypto_aead_type,
1963			.cra_aead = {
1964				.setkey = aead_setkey,
1965				.setauthsize = aead_setauthsize,
1966				.encrypt = aead_encrypt,
1967				.decrypt = aead_decrypt,
1968				.givencrypt = aead_givencrypt,
1969				.geniv = "<built-in>",
1970				.ivsize = AES_BLOCK_SIZE,
1971				.maxauthsize = SHA256_DIGEST_SIZE,
1972			}
1973		},
1974		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1975			             DESC_HDR_SEL0_AESU |
1976		                     DESC_HDR_MODE0_AESU_CBC |
1977		                     DESC_HDR_SEL1_MDEUA |
1978		                     DESC_HDR_MODE1_MDEU_INIT |
1979		                     DESC_HDR_MODE1_MDEU_PAD |
1980		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
1981	},
1982	{	.type = CRYPTO_ALG_TYPE_AEAD,
1983		.alg.crypto = {
1984			.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
1985			.cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
1986			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1987			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1988			.cra_type = &crypto_aead_type,
1989			.cra_aead = {
1990				.setkey = aead_setkey,
1991				.setauthsize = aead_setauthsize,
1992				.encrypt = aead_encrypt,
1993				.decrypt = aead_decrypt,
1994				.givencrypt = aead_givencrypt,
1995				.geniv = "<built-in>",
1996				.ivsize = DES3_EDE_BLOCK_SIZE,
1997				.maxauthsize = SHA256_DIGEST_SIZE,
1998			}
1999		},
2000		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2001			             DESC_HDR_SEL0_DEU |
2002		                     DESC_HDR_MODE0_DEU_CBC |
2003		                     DESC_HDR_MODE0_DEU_3DES |
2004		                     DESC_HDR_SEL1_MDEUA |
2005		                     DESC_HDR_MODE1_MDEU_INIT |
2006		                     DESC_HDR_MODE1_MDEU_PAD |
2007		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2008	},
2009	{	.type = CRYPTO_ALG_TYPE_AEAD,
2010		.alg.crypto = {
2011			.cra_name = "authenc(hmac(md5),cbc(aes))",
2012			.cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
2013			.cra_blocksize = AES_BLOCK_SIZE,
2014			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2015			.cra_type = &crypto_aead_type,
2016			.cra_aead = {
2017				.setkey = aead_setkey,
2018				.setauthsize = aead_setauthsize,
2019				.encrypt = aead_encrypt,
2020				.decrypt = aead_decrypt,
2021				.givencrypt = aead_givencrypt,
2022				.geniv = "<built-in>",
2023				.ivsize = AES_BLOCK_SIZE,
2024				.maxauthsize = MD5_DIGEST_SIZE,
2025			}
2026		},
2027		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2028			             DESC_HDR_SEL0_AESU |
2029		                     DESC_HDR_MODE0_AESU_CBC |
2030		                     DESC_HDR_SEL1_MDEUA |
2031		                     DESC_HDR_MODE1_MDEU_INIT |
2032		                     DESC_HDR_MODE1_MDEU_PAD |
2033		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2034	},
2035	{	.type = CRYPTO_ALG_TYPE_AEAD,
2036		.alg.crypto = {
2037			.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2038			.cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
2039			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2040			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2041			.cra_type = &crypto_aead_type,
2042			.cra_aead = {
2043				.setkey = aead_setkey,
2044				.setauthsize = aead_setauthsize,
2045				.encrypt = aead_encrypt,
2046				.decrypt = aead_decrypt,
2047				.givencrypt = aead_givencrypt,
2048				.geniv = "<built-in>",
2049				.ivsize = DES3_EDE_BLOCK_SIZE,
2050				.maxauthsize = MD5_DIGEST_SIZE,
2051			}
2052		},
2053		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2054			             DESC_HDR_SEL0_DEU |
2055		                     DESC_HDR_MODE0_DEU_CBC |
2056		                     DESC_HDR_MODE0_DEU_3DES |
2057		                     DESC_HDR_SEL1_MDEUA |
2058		                     DESC_HDR_MODE1_MDEU_INIT |
2059		                     DESC_HDR_MODE1_MDEU_PAD |
2060		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2061	},
2062	/* ABLKCIPHER algorithms. */
2063	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2064		.alg.crypto = {
2065			.cra_name = "cbc(aes)",
2066			.cra_driver_name = "cbc-aes-talitos",
2067			.cra_blocksize = AES_BLOCK_SIZE,
2068			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2069                                     CRYPTO_ALG_ASYNC,
2070			.cra_type = &crypto_ablkcipher_type,
2071			.cra_ablkcipher = {
2072				.setkey = ablkcipher_setkey,
2073				.encrypt = ablkcipher_encrypt,
2074				.decrypt = ablkcipher_decrypt,
2075				.geniv = "eseqiv",
2076				.min_keysize = AES_MIN_KEY_SIZE,
2077				.max_keysize = AES_MAX_KEY_SIZE,
2078				.ivsize = AES_BLOCK_SIZE,
2079			}
2080		},
2081		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2082				     DESC_HDR_SEL0_AESU |
2083				     DESC_HDR_MODE0_AESU_CBC,
2084	},
2085	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2086		.alg.crypto = {
2087			.cra_name = "cbc(des3_ede)",
2088			.cra_driver_name = "cbc-3des-talitos",
2089			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2090			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2091                                     CRYPTO_ALG_ASYNC,
2092			.cra_type = &crypto_ablkcipher_type,
2093			.cra_ablkcipher = {
2094				.setkey = ablkcipher_setkey,
2095				.encrypt = ablkcipher_encrypt,
2096				.decrypt = ablkcipher_decrypt,
2097				.geniv = "eseqiv",
2098				.min_keysize = DES3_EDE_KEY_SIZE,
2099				.max_keysize = DES3_EDE_KEY_SIZE,
2100				.ivsize = DES3_EDE_BLOCK_SIZE,
2101			}
2102		},
2103		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2104			             DESC_HDR_SEL0_DEU |
2105		                     DESC_HDR_MODE0_DEU_CBC |
2106		                     DESC_HDR_MODE0_DEU_3DES,
2107	},
2108	/* AHASH algorithms. */
2109	{	.type = CRYPTO_ALG_TYPE_AHASH,
2110		.alg.hash = {
2111			.init = ahash_init,
2112			.update = ahash_update,
2113			.final = ahash_final,
2114			.finup = ahash_finup,
2115			.digest = ahash_digest,
2116			.halg.digestsize = MD5_DIGEST_SIZE,
2117			.halg.base = {
2118				.cra_name = "md5",
2119				.cra_driver_name = "md5-talitos",
2120				.cra_blocksize = MD5_BLOCK_SIZE,
2121				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2122					     CRYPTO_ALG_ASYNC,
2123				.cra_type = &crypto_ahash_type
2124			}
2125		},
2126		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2127				     DESC_HDR_SEL0_MDEUA |
2128				     DESC_HDR_MODE0_MDEU_MD5,
2129	},
2130	{	.type = CRYPTO_ALG_TYPE_AHASH,
2131		.alg.hash = {
2132			.init = ahash_init,
2133			.update = ahash_update,
2134			.final = ahash_final,
2135			.finup = ahash_finup,
2136			.digest = ahash_digest,
2137			.halg.digestsize = SHA1_DIGEST_SIZE,
2138			.halg.base = {
2139				.cra_name = "sha1",
2140				.cra_driver_name = "sha1-talitos",
2141				.cra_blocksize = SHA1_BLOCK_SIZE,
2142				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2143					     CRYPTO_ALG_ASYNC,
2144				.cra_type = &crypto_ahash_type
2145			}
2146		},
2147		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2148				     DESC_HDR_SEL0_MDEUA |
2149				     DESC_HDR_MODE0_MDEU_SHA1,
2150	},
2151	{	.type = CRYPTO_ALG_TYPE_AHASH,
2152		.alg.hash = {
2153			.init = ahash_init,
2154			.update = ahash_update,
2155			.final = ahash_final,
2156			.finup = ahash_finup,
2157			.digest = ahash_digest,
2158			.halg.digestsize = SHA224_DIGEST_SIZE,
2159			.halg.base = {
2160				.cra_name = "sha224",
2161				.cra_driver_name = "sha224-talitos",
2162				.cra_blocksize = SHA224_BLOCK_SIZE,
2163				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2164					     CRYPTO_ALG_ASYNC,
2165				.cra_type = &crypto_ahash_type
2166			}
2167		},
2168		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2169				     DESC_HDR_SEL0_MDEUA |
2170				     DESC_HDR_MODE0_MDEU_SHA224,
2171	},
2172	{	.type = CRYPTO_ALG_TYPE_AHASH,
2173		.alg.hash = {
2174			.init = ahash_init,
2175			.update = ahash_update,
2176			.final = ahash_final,
2177			.finup = ahash_finup,
2178			.digest = ahash_digest,
2179			.halg.digestsize = SHA256_DIGEST_SIZE,
2180			.halg.base = {
2181				.cra_name = "sha256",
2182				.cra_driver_name = "sha256-talitos",
2183				.cra_blocksize = SHA256_BLOCK_SIZE,
2184				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2185					     CRYPTO_ALG_ASYNC,
2186				.cra_type = &crypto_ahash_type
2187			}
2188		},
2189		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2190				     DESC_HDR_SEL0_MDEUA |
2191				     DESC_HDR_MODE0_MDEU_SHA256,
2192	},
2193	{	.type = CRYPTO_ALG_TYPE_AHASH,
2194		.alg.hash = {
2195			.init = ahash_init,
2196			.update = ahash_update,
2197			.final = ahash_final,
2198			.finup = ahash_finup,
2199			.digest = ahash_digest,
2200			.halg.digestsize = SHA384_DIGEST_SIZE,
2201			.halg.base = {
2202				.cra_name = "sha384",
2203				.cra_driver_name = "sha384-talitos",
2204				.cra_blocksize = SHA384_BLOCK_SIZE,
2205				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2206					     CRYPTO_ALG_ASYNC,
2207				.cra_type = &crypto_ahash_type
2208			}
2209		},
2210		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2211				     DESC_HDR_SEL0_MDEUB |
2212				     DESC_HDR_MODE0_MDEUB_SHA384,
2213	},
2214	{	.type = CRYPTO_ALG_TYPE_AHASH,
2215		.alg.hash = {
2216			.init = ahash_init,
2217			.update = ahash_update,
2218			.final = ahash_final,
2219			.finup = ahash_finup,
2220			.digest = ahash_digest,
2221			.halg.digestsize = SHA512_DIGEST_SIZE,
2222			.halg.base = {
2223				.cra_name = "sha512",
2224				.cra_driver_name = "sha512-talitos",
2225				.cra_blocksize = SHA512_BLOCK_SIZE,
2226				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2227					     CRYPTO_ALG_ASYNC,
2228				.cra_type = &crypto_ahash_type
2229			}
2230		},
2231		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2232				     DESC_HDR_SEL0_MDEUB |
2233				     DESC_HDR_MODE0_MDEUB_SHA512,
2234	},
2235};
2236
2237struct talitos_crypto_alg {
2238	struct list_head entry;
2239	struct device *dev;
2240	struct talitos_alg_template algt;
2241};
2242
2243static int talitos_cra_init(struct crypto_tfm *tfm)
2244{
2245	struct crypto_alg *alg = tfm->__crt_alg;
2246	struct talitos_crypto_alg *talitos_alg;
2247	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2248
2249	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2250		talitos_alg = container_of(__crypto_ahash_alg(alg),
2251					   struct talitos_crypto_alg,
2252					   algt.alg.hash);
2253	else
2254		talitos_alg = container_of(alg, struct talitos_crypto_alg,
2255					   algt.alg.crypto);
2256
2257	/* update context with ptr to dev */
2258	ctx->dev = talitos_alg->dev;
2259
2260	/* copy descriptor header template value */
2261	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2262
2263	return 0;
2264}
2265
2266static int talitos_cra_init_aead(struct crypto_tfm *tfm)
2267{
2268	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2269
2270	talitos_cra_init(tfm);
2271
2272	/* random first IV */
2273	get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
2274
2275	return 0;
2276}
2277
2278static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2279{
2280	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2281
2282	talitos_cra_init(tfm);
2283
2284	ctx->keylen = 0;
2285	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2286				 sizeof(struct talitos_ahash_req_ctx));
2287
2288	return 0;
2289}
2290
2291/*
2292 * given the alg's descriptor header template, determine whether descriptor
2293 * type and primary/secondary execution units required match the hw
2294 * capabilities description provided in the device tree node.
2295 */
2296static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2297{
2298	struct talitos_private *priv = dev_get_drvdata(dev);
2299	int ret;
2300
2301	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2302	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2303
2304	if (SECONDARY_EU(desc_hdr_template))
2305		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2306		              & priv->exec_units);
2307
2308	return ret;
2309}
2310
2311static int talitos_remove(struct platform_device *ofdev)
2312{
2313	struct device *dev = &ofdev->dev;
2314	struct talitos_private *priv = dev_get_drvdata(dev);
2315	struct talitos_crypto_alg *t_alg, *n;
2316	int i;
2317
2318	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2319		switch (t_alg->algt.type) {
2320		case CRYPTO_ALG_TYPE_ABLKCIPHER:
2321		case CRYPTO_ALG_TYPE_AEAD:
2322			crypto_unregister_alg(&t_alg->algt.alg.crypto);
2323			break;
2324		case CRYPTO_ALG_TYPE_AHASH:
2325			crypto_unregister_ahash(&t_alg->algt.alg.hash);
2326			break;
2327		}
2328		list_del(&t_alg->entry);
2329		kfree(t_alg);
2330	}
2331
2332	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2333		talitos_unregister_rng(dev);
2334
2335	for (i = 0; i < priv->num_channels; i++)
2336		if (priv->chan[i].fifo)
2337			kfree(priv->chan[i].fifo);
2338
2339	kfree(priv->chan);
2340
2341	if (priv->irq != NO_IRQ) {
2342		free_irq(priv->irq, dev);
2343		irq_dispose_mapping(priv->irq);
2344	}
2345
2346	tasklet_kill(&priv->done_task);
2347
2348	iounmap(priv->reg);
2349
2350	dev_set_drvdata(dev, NULL);
2351
2352	kfree(priv);
2353
2354	return 0;
2355}
2356
2357static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2358						    struct talitos_alg_template
2359						           *template)
2360{
2361	struct talitos_private *priv = dev_get_drvdata(dev);
2362	struct talitos_crypto_alg *t_alg;
2363	struct crypto_alg *alg;
2364
2365	t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2366	if (!t_alg)
2367		return ERR_PTR(-ENOMEM);
2368
2369	t_alg->algt = *template;
2370
2371	switch (t_alg->algt.type) {
2372	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2373		alg = &t_alg->algt.alg.crypto;
2374		alg->cra_init = talitos_cra_init;
2375		break;
2376	case CRYPTO_ALG_TYPE_AEAD:
2377		alg = &t_alg->algt.alg.crypto;
2378		alg->cra_init = talitos_cra_init_aead;
2379		break;
2380	case CRYPTO_ALG_TYPE_AHASH:
2381		alg = &t_alg->algt.alg.hash.halg.base;
2382		alg->cra_init = talitos_cra_init_ahash;
2383		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2384		    !strcmp(alg->cra_name, "sha224")) {
2385			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2386			t_alg->algt.desc_hdr_template =
2387					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2388					DESC_HDR_SEL0_MDEUA |
2389					DESC_HDR_MODE0_MDEU_SHA256;
2390		}
2391		break;
2392	}
2393
2394	alg->cra_module = THIS_MODULE;
2395	alg->cra_priority = TALITOS_CRA_PRIORITY;
2396	alg->cra_alignmask = 0;
2397	alg->cra_ctxsize = sizeof(struct talitos_ctx);
2398
2399	t_alg->dev = dev;
2400
2401	return t_alg;
2402}
2403
2404static int talitos_probe(struct platform_device *ofdev,
2405			 const struct of_device_id *match)
2406{
2407	struct device *dev = &ofdev->dev;
2408	struct device_node *np = ofdev->dev.of_node;
2409	struct talitos_private *priv;
2410	const unsigned int *prop;
2411	int i, err;
2412
2413	priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2414	if (!priv)
2415		return -ENOMEM;
2416
2417	dev_set_drvdata(dev, priv);
2418
2419	priv->ofdev = ofdev;
2420
2421	tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev);
2422
2423	INIT_LIST_HEAD(&priv->alg_list);
2424
2425	priv->irq = irq_of_parse_and_map(np, 0);
2426
2427	if (priv->irq == NO_IRQ) {
2428		dev_err(dev, "failed to map irq\n");
2429		err = -EINVAL;
2430		goto err_out;
2431	}
2432
2433	/* get the irq line */
2434	err = request_irq(priv->irq, talitos_interrupt, 0,
2435			  dev_driver_string(dev), dev);
2436	if (err) {
2437		dev_err(dev, "failed to request irq %d\n", priv->irq);
2438		irq_dispose_mapping(priv->irq);
2439		priv->irq = NO_IRQ;
2440		goto err_out;
2441	}
2442
2443	priv->reg = of_iomap(np, 0);
2444	if (!priv->reg) {
2445		dev_err(dev, "failed to of_iomap\n");
2446		err = -ENOMEM;
2447		goto err_out;
2448	}
2449
2450	/* get SEC version capabilities from device tree */
2451	prop = of_get_property(np, "fsl,num-channels", NULL);
2452	if (prop)
2453		priv->num_channels = *prop;
2454
2455	prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2456	if (prop)
2457		priv->chfifo_len = *prop;
2458
2459	prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2460	if (prop)
2461		priv->exec_units = *prop;
2462
2463	prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2464	if (prop)
2465		priv->desc_types = *prop;
2466
2467	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2468	    !priv->exec_units || !priv->desc_types) {
2469		dev_err(dev, "invalid property data in device tree node\n");
2470		err = -EINVAL;
2471		goto err_out;
2472	}
2473
2474	if (of_device_is_compatible(np, "fsl,sec3.0"))
2475		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2476
2477	if (of_device_is_compatible(np, "fsl,sec2.1"))
2478		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2479				  TALITOS_FTR_SHA224_HWINIT;
2480
2481	priv->chan = kzalloc(sizeof(struct talitos_channel) *
2482			     priv->num_channels, GFP_KERNEL);
2483	if (!priv->chan) {
2484		dev_err(dev, "failed to allocate channel management space\n");
2485		err = -ENOMEM;
2486		goto err_out;
2487	}
2488
2489	for (i = 0; i < priv->num_channels; i++) {
2490		spin_lock_init(&priv->chan[i].head_lock);
2491		spin_lock_init(&priv->chan[i].tail_lock);
2492	}
2493
2494	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2495
2496	for (i = 0; i < priv->num_channels; i++) {
2497		priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2498					     priv->fifo_len, GFP_KERNEL);
2499		if (!priv->chan[i].fifo) {
2500			dev_err(dev, "failed to allocate request fifo %d\n", i);
2501			err = -ENOMEM;
2502			goto err_out;
2503		}
2504	}
2505
2506	for (i = 0; i < priv->num_channels; i++)
2507		atomic_set(&priv->chan[i].submit_count,
2508			   -(priv->chfifo_len - 1));
2509
2510	dma_set_mask(dev, DMA_BIT_MASK(36));
2511
2512	/* reset and initialize the h/w */
2513	err = init_device(dev);
2514	if (err) {
2515		dev_err(dev, "failed to initialize device\n");
2516		goto err_out;
2517	}
2518
2519	/* register the RNG, if available */
2520	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
2521		err = talitos_register_rng(dev);
2522		if (err) {
2523			dev_err(dev, "failed to register hwrng: %d\n", err);
2524			goto err_out;
2525		} else
2526			dev_info(dev, "hwrng\n");
2527	}
2528
2529	/* register crypto algorithms the device supports */
2530	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2531		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2532			struct talitos_crypto_alg *t_alg;
2533			char *name = NULL;
2534
2535			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2536			if (IS_ERR(t_alg)) {
2537				err = PTR_ERR(t_alg);
2538				goto err_out;
2539			}
2540
2541			switch (t_alg->algt.type) {
2542			case CRYPTO_ALG_TYPE_ABLKCIPHER:
2543			case CRYPTO_ALG_TYPE_AEAD:
2544				err = crypto_register_alg(
2545						&t_alg->algt.alg.crypto);
2546				name = t_alg->algt.alg.crypto.cra_driver_name;
2547				break;
2548			case CRYPTO_ALG_TYPE_AHASH:
2549				err = crypto_register_ahash(
2550						&t_alg->algt.alg.hash);
2551				name =
2552				 t_alg->algt.alg.hash.halg.base.cra_driver_name;
2553				break;
2554			}
2555			if (err) {
2556				dev_err(dev, "%s alg registration failed\n",
2557					name);
2558				kfree(t_alg);
2559			} else {
2560				list_add_tail(&t_alg->entry, &priv->alg_list);
2561				dev_info(dev, "%s\n", name);
2562			}
2563		}
2564	}
2565
2566	return 0;
2567
2568err_out:
2569	talitos_remove(ofdev);
2570
2571	return err;
2572}
2573
2574static const struct of_device_id talitos_match[] = {
2575	{
2576		.compatible = "fsl,sec2.0",
2577	},
2578	{},
2579};
2580MODULE_DEVICE_TABLE(of, talitos_match);
2581
2582static struct of_platform_driver talitos_driver = {
2583	.driver = {
2584		.name = "talitos",
2585		.owner = THIS_MODULE,
2586		.of_match_table = talitos_match,
2587	},
2588	.probe = talitos_probe,
2589	.remove = talitos_remove,
2590};
2591
2592static int __init talitos_init(void)
2593{
2594	return of_register_platform_driver(&talitos_driver);
2595}
2596module_init(talitos_init);
2597
2598static void __exit talitos_exit(void)
2599{
2600	of_unregister_platform_driver(&talitos_driver);
2601}
2602module_exit(talitos_exit);
2603
2604MODULE_LICENSE("GPL");
2605MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
2606MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
2607