• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/spi/
1/*
2 * Driver for Atmel AT32 and AT91 SPI Controllers
3 *
4 * Copyright (C) 2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/clk.h>
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/delay.h>
17#include <linux/dma-mapping.h>
18#include <linux/err.h>
19#include <linux/interrupt.h>
20#include <linux/spi/spi.h>
21#include <linux/slab.h>
22
23#include <asm/io.h>
24#include <mach/board.h>
25#include <mach/gpio.h>
26#include <mach/cpu.h>
27
28#include "atmel_spi.h"
29
30/*
31 * The core SPI transfer engine just talks to a register bank to set up
32 * DMA transfers; transfer queue progress is driven by IRQs.  The clock
33 * framework provides the base clock, subdivided for each spi_device.
34 */
35struct atmel_spi {
36	spinlock_t		lock;
37
38	void __iomem		*regs;
39	int			irq;
40	struct clk		*clk;
41	struct platform_device	*pdev;
42	struct spi_device	*stay;
43
44	u8			stopping;
45	struct list_head	queue;
46	struct spi_transfer	*current_transfer;
47	unsigned long		current_remaining_bytes;
48	struct spi_transfer	*next_transfer;
49	unsigned long		next_remaining_bytes;
50
51	void			*buffer;
52	dma_addr_t		buffer_dma;
53};
54
55/* Controller-specific per-slave state */
56struct atmel_spi_device {
57	unsigned int		npcs_pin;
58	u32			csr;
59};
60
61#define BUFFER_SIZE		PAGE_SIZE
62#define INVALID_DMA_ADDRESS	0xffffffff
63
64/*
65 * Version 2 of the SPI controller has
66 *  - CR.LASTXFER
67 *  - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero)
68 *  - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs)
69 *  - SPI_CSRx.CSAAT
70 *  - SPI_CSRx.SBCR allows faster clocking
71 *
72 * We can determine the controller version by reading the VERSION
73 * register, but I haven't checked that it exists on all chips, and
74 * this is cheaper anyway.
75 */
76static bool atmel_spi_is_v2(void)
77{
78	return !cpu_is_at91rm9200();
79}
80
81
82static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
83{
84	struct atmel_spi_device *asd = spi->controller_state;
85	unsigned active = spi->mode & SPI_CS_HIGH;
86	u32 mr;
87
88	if (atmel_spi_is_v2()) {
89		/*
90		 * Always use CSR0. This ensures that the clock
91		 * switches to the correct idle polarity before we
92		 * toggle the CS.
93		 */
94		spi_writel(as, CSR0, asd->csr);
95		spi_writel(as, MR, SPI_BF(PCS, 0x0e) | SPI_BIT(MODFDIS)
96				| SPI_BIT(MSTR));
97		mr = spi_readl(as, MR);
98		gpio_set_value(asd->npcs_pin, active);
99	} else {
100		u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
101		int i;
102		u32 csr;
103
104		/* Make sure clock polarity is correct */
105		for (i = 0; i < spi->master->num_chipselect; i++) {
106			csr = spi_readl(as, CSR0 + 4 * i);
107			if ((csr ^ cpol) & SPI_BIT(CPOL))
108				spi_writel(as, CSR0 + 4 * i,
109						csr ^ SPI_BIT(CPOL));
110		}
111
112		mr = spi_readl(as, MR);
113		mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr);
114		if (spi->chip_select != 0)
115			gpio_set_value(asd->npcs_pin, active);
116		spi_writel(as, MR, mr);
117	}
118
119	dev_dbg(&spi->dev, "activate %u%s, mr %08x\n",
120			asd->npcs_pin, active ? " (high)" : "",
121			mr);
122}
123
124static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
125{
126	struct atmel_spi_device *asd = spi->controller_state;
127	unsigned active = spi->mode & SPI_CS_HIGH;
128	u32 mr;
129
130	/* only deactivate *this* device; sometimes transfers to
131	 * another device may be active when this routine is called.
132	 */
133	mr = spi_readl(as, MR);
134	if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) {
135		mr = SPI_BFINS(PCS, 0xf, mr);
136		spi_writel(as, MR, mr);
137	}
138
139	dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n",
140			asd->npcs_pin, active ? " (low)" : "",
141			mr);
142
143	if (atmel_spi_is_v2() || spi->chip_select != 0)
144		gpio_set_value(asd->npcs_pin, !active);
145}
146
147static inline int atmel_spi_xfer_is_last(struct spi_message *msg,
148					struct spi_transfer *xfer)
149{
150	return msg->transfers.prev == &xfer->transfer_list;
151}
152
153static inline int atmel_spi_xfer_can_be_chained(struct spi_transfer *xfer)
154{
155	return xfer->delay_usecs == 0 && !xfer->cs_change;
156}
157
158static void atmel_spi_next_xfer_data(struct spi_master *master,
159				struct spi_transfer *xfer,
160				dma_addr_t *tx_dma,
161				dma_addr_t *rx_dma,
162				u32 *plen)
163{
164	struct atmel_spi	*as = spi_master_get_devdata(master);
165	u32			len = *plen;
166
167	/* use scratch buffer only when rx or tx data is unspecified */
168	if (xfer->rx_buf)
169		*rx_dma = xfer->rx_dma + xfer->len - *plen;
170	else {
171		*rx_dma = as->buffer_dma;
172		if (len > BUFFER_SIZE)
173			len = BUFFER_SIZE;
174	}
175	if (xfer->tx_buf)
176		*tx_dma = xfer->tx_dma + xfer->len - *plen;
177	else {
178		*tx_dma = as->buffer_dma;
179		if (len > BUFFER_SIZE)
180			len = BUFFER_SIZE;
181		memset(as->buffer, 0, len);
182		dma_sync_single_for_device(&as->pdev->dev,
183				as->buffer_dma, len, DMA_TO_DEVICE);
184	}
185
186	*plen = len;
187}
188
189/*
190 * Submit next transfer for DMA.
191 * lock is held, spi irq is blocked
192 */
193static void atmel_spi_next_xfer(struct spi_master *master,
194				struct spi_message *msg)
195{
196	struct atmel_spi	*as = spi_master_get_devdata(master);
197	struct spi_transfer	*xfer;
198	u32			len, remaining;
199	u32			ieval;
200	dma_addr_t		tx_dma, rx_dma;
201
202	if (!as->current_transfer)
203		xfer = list_entry(msg->transfers.next,
204				struct spi_transfer, transfer_list);
205	else if (!as->next_transfer)
206		xfer = list_entry(as->current_transfer->transfer_list.next,
207				struct spi_transfer, transfer_list);
208	else
209		xfer = NULL;
210
211	if (xfer) {
212		spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
213
214		len = xfer->len;
215		atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
216		remaining = xfer->len - len;
217
218		spi_writel(as, RPR, rx_dma);
219		spi_writel(as, TPR, tx_dma);
220
221		if (msg->spi->bits_per_word > 8)
222			len >>= 1;
223		spi_writel(as, RCR, len);
224		spi_writel(as, TCR, len);
225
226		dev_dbg(&msg->spi->dev,
227			"  start xfer %p: len %u tx %p/%08x rx %p/%08x\n",
228			xfer, xfer->len, xfer->tx_buf, xfer->tx_dma,
229			xfer->rx_buf, xfer->rx_dma);
230	} else {
231		xfer = as->next_transfer;
232		remaining = as->next_remaining_bytes;
233	}
234
235	as->current_transfer = xfer;
236	as->current_remaining_bytes = remaining;
237
238	if (remaining > 0)
239		len = remaining;
240	else if (!atmel_spi_xfer_is_last(msg, xfer)
241			&& atmel_spi_xfer_can_be_chained(xfer)) {
242		xfer = list_entry(xfer->transfer_list.next,
243				struct spi_transfer, transfer_list);
244		len = xfer->len;
245	} else
246		xfer = NULL;
247
248	as->next_transfer = xfer;
249
250	if (xfer) {
251		u32	total;
252
253		total = len;
254		atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
255		as->next_remaining_bytes = total - len;
256
257		spi_writel(as, RNPR, rx_dma);
258		spi_writel(as, TNPR, tx_dma);
259
260		if (msg->spi->bits_per_word > 8)
261			len >>= 1;
262		spi_writel(as, RNCR, len);
263		spi_writel(as, TNCR, len);
264
265		dev_dbg(&msg->spi->dev,
266			"  next xfer %p: len %u tx %p/%08x rx %p/%08x\n",
267			xfer, xfer->len, xfer->tx_buf, xfer->tx_dma,
268			xfer->rx_buf, xfer->rx_dma);
269		ieval = SPI_BIT(ENDRX) | SPI_BIT(OVRES);
270	} else {
271		spi_writel(as, RNCR, 0);
272		spi_writel(as, TNCR, 0);
273		ieval = SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) | SPI_BIT(OVRES);
274	}
275
276	/* REVISIT: We're waiting for ENDRX before we start the next
277	 * transfer because we need to handle some difficult timing
278	 * issues otherwise. If we wait for ENDTX in one transfer and
279	 * then starts waiting for ENDRX in the next, it's difficult
280	 * to tell the difference between the ENDRX interrupt we're
281	 * actually waiting for and the ENDRX interrupt of the
282	 * previous transfer.
283	 *
284	 * It should be doable, though. Just not now...
285	 */
286	spi_writel(as, IER, ieval);
287	spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
288}
289
290static void atmel_spi_next_message(struct spi_master *master)
291{
292	struct atmel_spi	*as = spi_master_get_devdata(master);
293	struct spi_message	*msg;
294	struct spi_device	*spi;
295
296	BUG_ON(as->current_transfer);
297
298	msg = list_entry(as->queue.next, struct spi_message, queue);
299	spi = msg->spi;
300
301	dev_dbg(master->dev.parent, "start message %p for %s\n",
302			msg, dev_name(&spi->dev));
303
304	/* select chip if it's not still active */
305	if (as->stay) {
306		if (as->stay != spi) {
307			cs_deactivate(as, as->stay);
308			cs_activate(as, spi);
309		}
310		as->stay = NULL;
311	} else
312		cs_activate(as, spi);
313
314	atmel_spi_next_xfer(master, msg);
315}
316
317/*
318 * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
319 *  - The buffer is either valid for CPU access, else NULL
320 *  - If the buffer is valid, so is its DMA addresss
321 *
322 * This driver manages the dma addresss unless message->is_dma_mapped.
323 */
324static int
325atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
326{
327	struct device	*dev = &as->pdev->dev;
328
329	xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS;
330	if (xfer->tx_buf) {
331		xfer->tx_dma = dma_map_single(dev,
332				(void *) xfer->tx_buf, xfer->len,
333				DMA_TO_DEVICE);
334		if (dma_mapping_error(dev, xfer->tx_dma))
335			return -ENOMEM;
336	}
337	if (xfer->rx_buf) {
338		xfer->rx_dma = dma_map_single(dev,
339				xfer->rx_buf, xfer->len,
340				DMA_FROM_DEVICE);
341		if (dma_mapping_error(dev, xfer->rx_dma)) {
342			if (xfer->tx_buf)
343				dma_unmap_single(dev,
344						xfer->tx_dma, xfer->len,
345						DMA_TO_DEVICE);
346			return -ENOMEM;
347		}
348	}
349	return 0;
350}
351
352static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
353				     struct spi_transfer *xfer)
354{
355	if (xfer->tx_dma != INVALID_DMA_ADDRESS)
356		dma_unmap_single(master->dev.parent, xfer->tx_dma,
357				 xfer->len, DMA_TO_DEVICE);
358	if (xfer->rx_dma != INVALID_DMA_ADDRESS)
359		dma_unmap_single(master->dev.parent, xfer->rx_dma,
360				 xfer->len, DMA_FROM_DEVICE);
361}
362
363static void
364atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as,
365		struct spi_message *msg, int status, int stay)
366{
367	if (!stay || status < 0)
368		cs_deactivate(as, msg->spi);
369	else
370		as->stay = msg->spi;
371
372	list_del(&msg->queue);
373	msg->status = status;
374
375	dev_dbg(master->dev.parent,
376		"xfer complete: %u bytes transferred\n",
377		msg->actual_length);
378
379	spin_unlock(&as->lock);
380	msg->complete(msg->context);
381	spin_lock(&as->lock);
382
383	as->current_transfer = NULL;
384	as->next_transfer = NULL;
385
386	/* continue if needed */
387	if (list_empty(&as->queue) || as->stopping)
388		spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
389	else
390		atmel_spi_next_message(master);
391}
392
393static irqreturn_t
394atmel_spi_interrupt(int irq, void *dev_id)
395{
396	struct spi_master	*master = dev_id;
397	struct atmel_spi	*as = spi_master_get_devdata(master);
398	struct spi_message	*msg;
399	struct spi_transfer	*xfer;
400	u32			status, pending, imr;
401	int			ret = IRQ_NONE;
402
403	spin_lock(&as->lock);
404
405	xfer = as->current_transfer;
406	msg = list_entry(as->queue.next, struct spi_message, queue);
407
408	imr = spi_readl(as, IMR);
409	status = spi_readl(as, SR);
410	pending = status & imr;
411
412	if (pending & SPI_BIT(OVRES)) {
413		int timeout;
414
415		ret = IRQ_HANDLED;
416
417		spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX)
418				     | SPI_BIT(OVRES)));
419
420		/*
421		 * When we get an overrun, we disregard the current
422		 * transfer. Data will not be copied back from any
423		 * bounce buffer and msg->actual_len will not be
424		 * updated with the last xfer.
425		 *
426		 * We will also not process any remaning transfers in
427		 * the message.
428		 *
429		 * First, stop the transfer and unmap the DMA buffers.
430		 */
431		spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
432		if (!msg->is_dma_mapped)
433			atmel_spi_dma_unmap_xfer(master, xfer);
434
435		/* REVISIT: udelay in irq is unfriendly */
436		if (xfer->delay_usecs)
437			udelay(xfer->delay_usecs);
438
439		dev_warn(master->dev.parent, "overrun (%u/%u remaining)\n",
440			 spi_readl(as, TCR), spi_readl(as, RCR));
441
442		/*
443		 * Clean up DMA registers and make sure the data
444		 * registers are empty.
445		 */
446		spi_writel(as, RNCR, 0);
447		spi_writel(as, TNCR, 0);
448		spi_writel(as, RCR, 0);
449		spi_writel(as, TCR, 0);
450		for (timeout = 1000; timeout; timeout--)
451			if (spi_readl(as, SR) & SPI_BIT(TXEMPTY))
452				break;
453		if (!timeout)
454			dev_warn(master->dev.parent,
455				 "timeout waiting for TXEMPTY");
456		while (spi_readl(as, SR) & SPI_BIT(RDRF))
457			spi_readl(as, RDR);
458
459		/* Clear any overrun happening while cleaning up */
460		spi_readl(as, SR);
461
462		atmel_spi_msg_done(master, as, msg, -EIO, 0);
463	} else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) {
464		ret = IRQ_HANDLED;
465
466		spi_writel(as, IDR, pending);
467
468		if (as->current_remaining_bytes == 0) {
469			msg->actual_length += xfer->len;
470
471			if (!msg->is_dma_mapped)
472				atmel_spi_dma_unmap_xfer(master, xfer);
473
474			/* REVISIT: udelay in irq is unfriendly */
475			if (xfer->delay_usecs)
476				udelay(xfer->delay_usecs);
477
478			if (atmel_spi_xfer_is_last(msg, xfer)) {
479				/* report completed message */
480				atmel_spi_msg_done(master, as, msg, 0,
481						xfer->cs_change);
482			} else {
483				if (xfer->cs_change) {
484					cs_deactivate(as, msg->spi);
485					udelay(1);
486					cs_activate(as, msg->spi);
487				}
488
489				atmel_spi_next_xfer(master, msg);
490			}
491		} else {
492			/*
493			 * Keep going, we still have data to send in
494			 * the current transfer.
495			 */
496			atmel_spi_next_xfer(master, msg);
497		}
498	}
499
500	spin_unlock(&as->lock);
501
502	return ret;
503}
504
505static int atmel_spi_setup(struct spi_device *spi)
506{
507	struct atmel_spi	*as;
508	struct atmel_spi_device	*asd;
509	u32			scbr, csr;
510	unsigned int		bits = spi->bits_per_word;
511	unsigned long		bus_hz;
512	unsigned int		npcs_pin;
513	int			ret;
514
515	as = spi_master_get_devdata(spi->master);
516
517	if (as->stopping)
518		return -ESHUTDOWN;
519
520	if (spi->chip_select > spi->master->num_chipselect) {
521		dev_dbg(&spi->dev,
522				"setup: invalid chipselect %u (%u defined)\n",
523				spi->chip_select, spi->master->num_chipselect);
524		return -EINVAL;
525	}
526
527	if (bits < 8 || bits > 16) {
528		dev_dbg(&spi->dev,
529				"setup: invalid bits_per_word %u (8 to 16)\n",
530				bits);
531		return -EINVAL;
532	}
533
534	/* see notes above re chipselect */
535	if (!atmel_spi_is_v2()
536			&& spi->chip_select == 0
537			&& (spi->mode & SPI_CS_HIGH)) {
538		dev_dbg(&spi->dev, "setup: can't be active-high\n");
539		return -EINVAL;
540	}
541
542	/* v1 chips start out at half the peripheral bus speed. */
543	bus_hz = clk_get_rate(as->clk);
544	if (!atmel_spi_is_v2())
545		bus_hz /= 2;
546
547	if (spi->max_speed_hz) {
548		/*
549		 * Calculate the lowest divider that satisfies the
550		 * constraint, assuming div32/fdiv/mbz == 0.
551		 */
552		scbr = DIV_ROUND_UP(bus_hz, spi->max_speed_hz);
553
554		/*
555		 * If the resulting divider doesn't fit into the
556		 * register bitfield, we can't satisfy the constraint.
557		 */
558		if (scbr >= (1 << SPI_SCBR_SIZE)) {
559			dev_dbg(&spi->dev,
560				"setup: %d Hz too slow, scbr %u; min %ld Hz\n",
561				spi->max_speed_hz, scbr, bus_hz/255);
562			return -EINVAL;
563		}
564	} else
565		/* speed zero means "as slow as possible" */
566		scbr = 0xff;
567
568	csr = SPI_BF(SCBR, scbr) | SPI_BF(BITS, bits - 8);
569	if (spi->mode & SPI_CPOL)
570		csr |= SPI_BIT(CPOL);
571	if (!(spi->mode & SPI_CPHA))
572		csr |= SPI_BIT(NCPHA);
573
574	/* DLYBS is mostly irrelevant since we manage chipselect using GPIOs.
575	 *
576	 * DLYBCT would add delays between words, slowing down transfers.
577	 * It could potentially be useful to cope with DMA bottlenecks, but
578	 * in those cases it's probably best to just use a lower bitrate.
579	 */
580	csr |= SPI_BF(DLYBS, 0);
581	csr |= SPI_BF(DLYBCT, 0);
582
583	/* chipselect must have been muxed as GPIO (e.g. in board setup) */
584	npcs_pin = (unsigned int)spi->controller_data;
585	asd = spi->controller_state;
586	if (!asd) {
587		asd = kzalloc(sizeof(struct atmel_spi_device), GFP_KERNEL);
588		if (!asd)
589			return -ENOMEM;
590
591		ret = gpio_request(npcs_pin, dev_name(&spi->dev));
592		if (ret) {
593			kfree(asd);
594			return ret;
595		}
596
597		asd->npcs_pin = npcs_pin;
598		spi->controller_state = asd;
599		gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH));
600	} else {
601		unsigned long		flags;
602
603		spin_lock_irqsave(&as->lock, flags);
604		if (as->stay == spi)
605			as->stay = NULL;
606		cs_deactivate(as, spi);
607		spin_unlock_irqrestore(&as->lock, flags);
608	}
609
610	asd->csr = csr;
611
612	dev_dbg(&spi->dev,
613		"setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n",
614		bus_hz / scbr, bits, spi->mode, spi->chip_select, csr);
615
616	if (!atmel_spi_is_v2())
617		spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
618
619	return 0;
620}
621
622static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
623{
624	struct atmel_spi	*as;
625	struct spi_transfer	*xfer;
626	unsigned long		flags;
627	struct device		*controller = spi->master->dev.parent;
628
629	as = spi_master_get_devdata(spi->master);
630
631	dev_dbg(controller, "new message %p submitted for %s\n",
632			msg, dev_name(&spi->dev));
633
634	if (unlikely(list_empty(&msg->transfers)))
635		return -EINVAL;
636
637	if (as->stopping)
638		return -ESHUTDOWN;
639
640	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
641		if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) {
642			dev_dbg(&spi->dev, "missing rx or tx buf\n");
643			return -EINVAL;
644		}
645
646		if (xfer->bits_per_word || xfer->speed_hz) {
647			dev_dbg(&spi->dev, "no protocol options yet\n");
648			return -ENOPROTOOPT;
649		}
650
651		/*
652		 * DMA map early, for performance (empties dcache ASAP) and
653		 * better fault reporting.  This is a DMA-only driver.
654		 *
655		 * NOTE that if dma_unmap_single() ever starts to do work on
656		 * platforms supported by this driver, we would need to clean
657		 * up mappings for previously-mapped transfers.
658		 */
659		if (!msg->is_dma_mapped) {
660			if (atmel_spi_dma_map_xfer(as, xfer) < 0)
661				return -ENOMEM;
662		}
663	}
664
665#ifdef VERBOSE
666	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
667		dev_dbg(controller,
668			"  xfer %p: len %u tx %p/%08x rx %p/%08x\n",
669			xfer, xfer->len,
670			xfer->tx_buf, xfer->tx_dma,
671			xfer->rx_buf, xfer->rx_dma);
672	}
673#endif
674
675	msg->status = -EINPROGRESS;
676	msg->actual_length = 0;
677
678	spin_lock_irqsave(&as->lock, flags);
679	list_add_tail(&msg->queue, &as->queue);
680	if (!as->current_transfer)
681		atmel_spi_next_message(spi->master);
682	spin_unlock_irqrestore(&as->lock, flags);
683
684	return 0;
685}
686
687static void atmel_spi_cleanup(struct spi_device *spi)
688{
689	struct atmel_spi	*as = spi_master_get_devdata(spi->master);
690	struct atmel_spi_device	*asd = spi->controller_state;
691	unsigned		gpio = (unsigned) spi->controller_data;
692	unsigned long		flags;
693
694	if (!asd)
695		return;
696
697	spin_lock_irqsave(&as->lock, flags);
698	if (as->stay == spi) {
699		as->stay = NULL;
700		cs_deactivate(as, spi);
701	}
702	spin_unlock_irqrestore(&as->lock, flags);
703
704	spi->controller_state = NULL;
705	gpio_free(gpio);
706	kfree(asd);
707}
708
709/*-------------------------------------------------------------------------*/
710
711static int __init atmel_spi_probe(struct platform_device *pdev)
712{
713	struct resource		*regs;
714	int			irq;
715	struct clk		*clk;
716	int			ret;
717	struct spi_master	*master;
718	struct atmel_spi	*as;
719
720	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
721	if (!regs)
722		return -ENXIO;
723
724	irq = platform_get_irq(pdev, 0);
725	if (irq < 0)
726		return irq;
727
728	clk = clk_get(&pdev->dev, "spi_clk");
729	if (IS_ERR(clk))
730		return PTR_ERR(clk);
731
732	/* setup spi core then atmel-specific driver state */
733	ret = -ENOMEM;
734	master = spi_alloc_master(&pdev->dev, sizeof *as);
735	if (!master)
736		goto out_free;
737
738	/* the spi->mode bits understood by this driver: */
739	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
740
741	master->bus_num = pdev->id;
742	master->num_chipselect = 4;
743	master->setup = atmel_spi_setup;
744	master->transfer = atmel_spi_transfer;
745	master->cleanup = atmel_spi_cleanup;
746	platform_set_drvdata(pdev, master);
747
748	as = spi_master_get_devdata(master);
749
750	/*
751	 * Scratch buffer is used for throwaway rx and tx data.
752	 * It's coherent to minimize dcache pollution.
753	 */
754	as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE,
755					&as->buffer_dma, GFP_KERNEL);
756	if (!as->buffer)
757		goto out_free;
758
759	spin_lock_init(&as->lock);
760	INIT_LIST_HEAD(&as->queue);
761	as->pdev = pdev;
762	as->regs = ioremap(regs->start, resource_size(regs));
763	if (!as->regs)
764		goto out_free_buffer;
765	as->irq = irq;
766	as->clk = clk;
767
768	ret = request_irq(irq, atmel_spi_interrupt, 0,
769			dev_name(&pdev->dev), master);
770	if (ret)
771		goto out_unmap_regs;
772
773	/* Initialize the hardware */
774	clk_enable(clk);
775	spi_writel(as, CR, SPI_BIT(SWRST));
776	spi_writel(as, CR, SPI_BIT(SWRST));
777	spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS));
778	spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
779	spi_writel(as, CR, SPI_BIT(SPIEN));
780
781	/* go! */
782	dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
783			(unsigned long)regs->start, irq);
784
785	ret = spi_register_master(master);
786	if (ret)
787		goto out_reset_hw;
788
789	return 0;
790
791out_reset_hw:
792	spi_writel(as, CR, SPI_BIT(SWRST));
793	spi_writel(as, CR, SPI_BIT(SWRST));
794	clk_disable(clk);
795	free_irq(irq, master);
796out_unmap_regs:
797	iounmap(as->regs);
798out_free_buffer:
799	dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
800			as->buffer_dma);
801out_free:
802	clk_put(clk);
803	spi_master_put(master);
804	return ret;
805}
806
807static int __exit atmel_spi_remove(struct platform_device *pdev)
808{
809	struct spi_master	*master = platform_get_drvdata(pdev);
810	struct atmel_spi	*as = spi_master_get_devdata(master);
811	struct spi_message	*msg;
812
813	/* reset the hardware and block queue progress */
814	spin_lock_irq(&as->lock);
815	as->stopping = 1;
816	spi_writel(as, CR, SPI_BIT(SWRST));
817	spi_writel(as, CR, SPI_BIT(SWRST));
818	spi_readl(as, SR);
819	spin_unlock_irq(&as->lock);
820
821	/* Terminate remaining queued transfers */
822	list_for_each_entry(msg, &as->queue, queue) {
823		/* REVISIT unmapping the dma is a NOP on ARM and AVR32
824		 * but we shouldn't depend on that...
825		 */
826		msg->status = -ESHUTDOWN;
827		msg->complete(msg->context);
828	}
829
830	dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
831			as->buffer_dma);
832
833	clk_disable(as->clk);
834	clk_put(as->clk);
835	free_irq(as->irq, master);
836	iounmap(as->regs);
837
838	spi_unregister_master(master);
839
840	return 0;
841}
842
843#ifdef	CONFIG_PM
844
845static int atmel_spi_suspend(struct platform_device *pdev, pm_message_t mesg)
846{
847	struct spi_master	*master = platform_get_drvdata(pdev);
848	struct atmel_spi	*as = spi_master_get_devdata(master);
849
850	clk_disable(as->clk);
851	return 0;
852}
853
854static int atmel_spi_resume(struct platform_device *pdev)
855{
856	struct spi_master	*master = platform_get_drvdata(pdev);
857	struct atmel_spi	*as = spi_master_get_devdata(master);
858
859	clk_enable(as->clk);
860	return 0;
861}
862
863#else
864#define	atmel_spi_suspend	NULL
865#define	atmel_spi_resume	NULL
866#endif
867
868
869static struct platform_driver atmel_spi_driver = {
870	.driver		= {
871		.name	= "atmel_spi",
872		.owner	= THIS_MODULE,
873	},
874	.suspend	= atmel_spi_suspend,
875	.resume		= atmel_spi_resume,
876	.remove		= __exit_p(atmel_spi_remove),
877};
878
879static int __init atmel_spi_init(void)
880{
881	return platform_driver_probe(&atmel_spi_driver, atmel_spi_probe);
882}
883module_init(atmel_spi_init);
884
885static void __exit atmel_spi_exit(void)
886{
887	platform_driver_unregister(&atmel_spi_driver);
888}
889module_exit(atmel_spi_exit);
890
891MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver");
892MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
893MODULE_LICENSE("GPL");
894MODULE_ALIAS("platform:atmel_spi");
895