• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/spi/
1/* linux/drivers/spi/spi_s3c64xx.c
2 *
3 * Copyright (C) 2009 Samsung Electronics Ltd.
4 *	Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/workqueue.h>
24#include <linux/delay.h>
25#include <linux/clk.h>
26#include <linux/dma-mapping.h>
27#include <linux/platform_device.h>
28#include <linux/spi/spi.h>
29
30#include <mach/dma.h>
31#include <plat/s3c64xx-spi.h>
32
33/* Registers and bit-fields */
34
35#define S3C64XX_SPI_CH_CFG		0x00
36#define S3C64XX_SPI_CLK_CFG		0x04
37#define S3C64XX_SPI_MODE_CFG	0x08
38#define S3C64XX_SPI_SLAVE_SEL	0x0C
39#define S3C64XX_SPI_INT_EN		0x10
40#define S3C64XX_SPI_STATUS		0x14
41#define S3C64XX_SPI_TX_DATA		0x18
42#define S3C64XX_SPI_RX_DATA		0x1C
43#define S3C64XX_SPI_PACKET_CNT	0x20
44#define S3C64XX_SPI_PENDING_CLR	0x24
45#define S3C64XX_SPI_SWAP_CFG	0x28
46#define S3C64XX_SPI_FB_CLK		0x2C
47
48#define S3C64XX_SPI_CH_HS_EN		(1<<6)	/* High Speed Enable */
49#define S3C64XX_SPI_CH_SW_RST		(1<<5)
50#define S3C64XX_SPI_CH_SLAVE		(1<<4)
51#define S3C64XX_SPI_CPOL_L		(1<<3)
52#define S3C64XX_SPI_CPHA_B		(1<<2)
53#define S3C64XX_SPI_CH_RXCH_ON		(1<<1)
54#define S3C64XX_SPI_CH_TXCH_ON		(1<<0)
55
56#define S3C64XX_SPI_CLKSEL_SRCMSK	(3<<9)
57#define S3C64XX_SPI_CLKSEL_SRCSHFT	9
58#define S3C64XX_SPI_ENCLK_ENABLE	(1<<8)
59#define S3C64XX_SPI_PSR_MASK 		0xff
60
61#define S3C64XX_SPI_MODE_CH_TSZ_BYTE		(0<<29)
62#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD	(1<<29)
63#define S3C64XX_SPI_MODE_CH_TSZ_WORD		(2<<29)
64#define S3C64XX_SPI_MODE_CH_TSZ_MASK		(3<<29)
65#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE		(0<<17)
66#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD	(1<<17)
67#define S3C64XX_SPI_MODE_BUS_TSZ_WORD		(2<<17)
68#define S3C64XX_SPI_MODE_BUS_TSZ_MASK		(3<<17)
69#define S3C64XX_SPI_MODE_RXDMA_ON		(1<<2)
70#define S3C64XX_SPI_MODE_TXDMA_ON		(1<<1)
71#define S3C64XX_SPI_MODE_4BURST			(1<<0)
72
73#define S3C64XX_SPI_SLAVE_AUTO			(1<<1)
74#define S3C64XX_SPI_SLAVE_SIG_INACT		(1<<0)
75
76#define S3C64XX_SPI_ACT(c) writel(0, (c)->regs + S3C64XX_SPI_SLAVE_SEL)
77
78#define S3C64XX_SPI_DEACT(c) writel(S3C64XX_SPI_SLAVE_SIG_INACT, \
79					(c)->regs + S3C64XX_SPI_SLAVE_SEL)
80
81#define S3C64XX_SPI_INT_TRAILING_EN		(1<<6)
82#define S3C64XX_SPI_INT_RX_OVERRUN_EN		(1<<5)
83#define S3C64XX_SPI_INT_RX_UNDERRUN_EN		(1<<4)
84#define S3C64XX_SPI_INT_TX_OVERRUN_EN		(1<<3)
85#define S3C64XX_SPI_INT_TX_UNDERRUN_EN		(1<<2)
86#define S3C64XX_SPI_INT_RX_FIFORDY_EN		(1<<1)
87#define S3C64XX_SPI_INT_TX_FIFORDY_EN		(1<<0)
88
89#define S3C64XX_SPI_ST_RX_OVERRUN_ERR		(1<<5)
90#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR	(1<<4)
91#define S3C64XX_SPI_ST_TX_OVERRUN_ERR		(1<<3)
92#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR	(1<<2)
93#define S3C64XX_SPI_ST_RX_FIFORDY		(1<<1)
94#define S3C64XX_SPI_ST_TX_FIFORDY		(1<<0)
95
96#define S3C64XX_SPI_PACKET_CNT_EN		(1<<16)
97
98#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR		(1<<4)
99#define S3C64XX_SPI_PND_TX_OVERRUN_CLR		(1<<3)
100#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR		(1<<2)
101#define S3C64XX_SPI_PND_RX_OVERRUN_CLR		(1<<1)
102#define S3C64XX_SPI_PND_TRAILING_CLR		(1<<0)
103
104#define S3C64XX_SPI_SWAP_RX_HALF_WORD		(1<<7)
105#define S3C64XX_SPI_SWAP_RX_BYTE		(1<<6)
106#define S3C64XX_SPI_SWAP_RX_BIT			(1<<5)
107#define S3C64XX_SPI_SWAP_RX_EN			(1<<4)
108#define S3C64XX_SPI_SWAP_TX_HALF_WORD		(1<<3)
109#define S3C64XX_SPI_SWAP_TX_BYTE		(1<<2)
110#define S3C64XX_SPI_SWAP_TX_BIT			(1<<1)
111#define S3C64XX_SPI_SWAP_TX_EN			(1<<0)
112
113#define S3C64XX_SPI_FBCLK_MSK		(3<<0)
114
115#define S3C64XX_SPI_ST_TRLCNTZ(v, i) ((((v) >> (i)->rx_lvl_offset) & \
116					(((i)->fifo_lvl_mask + 1))) \
117					? 1 : 0)
118
119#define S3C64XX_SPI_ST_TX_DONE(v, i) ((((v) >> (i)->rx_lvl_offset) & \
120					(((i)->fifo_lvl_mask + 1) << 1)) \
121					? 1 : 0)
122#define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask)
123#define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask)
124
125#define S3C64XX_SPI_MAX_TRAILCNT	0x3ff
126#define S3C64XX_SPI_TRAILCNT_OFF	19
127
128#define S3C64XX_SPI_TRAILCNT		S3C64XX_SPI_MAX_TRAILCNT
129
130#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
131
132#define SUSPND    (1<<0)
133#define SPIBUSY   (1<<1)
134#define RXBUSY    (1<<2)
135#define TXBUSY    (1<<3)
136
137/**
138 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
139 * @clk: Pointer to the spi clock.
140 * @src_clk: Pointer to the clock used to generate SPI signals.
141 * @master: Pointer to the SPI Protocol master.
142 * @workqueue: Work queue for the SPI xfer requests.
143 * @cntrlr_info: Platform specific data for the controller this driver manages.
144 * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
145 * @work: Work
146 * @queue: To log SPI xfer requests.
147 * @lock: Controller specific lock.
148 * @state: Set of FLAGS to indicate status.
149 * @rx_dmach: Controller's DMA channel for Rx.
150 * @tx_dmach: Controller's DMA channel for Tx.
151 * @sfr_start: BUS address of SPI controller regs.
152 * @regs: Pointer to ioremap'ed controller registers.
153 * @xfer_completion: To indicate completion of xfer task.
154 * @cur_mode: Stores the active configuration of the controller.
155 * @cur_bpw: Stores the active bits per word settings.
156 * @cur_speed: Stores the active xfer clock speed.
157 */
158struct s3c64xx_spi_driver_data {
159	void __iomem                    *regs;
160	struct clk                      *clk;
161	struct clk                      *src_clk;
162	struct platform_device          *pdev;
163	struct spi_master               *master;
164	struct workqueue_struct	        *workqueue;
165	struct s3c64xx_spi_info  *cntrlr_info;
166	struct spi_device               *tgl_spi;
167	struct work_struct              work;
168	struct list_head                queue;
169	spinlock_t                      lock;
170	enum dma_ch                     rx_dmach;
171	enum dma_ch                     tx_dmach;
172	unsigned long                   sfr_start;
173	struct completion               xfer_completion;
174	unsigned                        state;
175	unsigned                        cur_mode, cur_bpw;
176	unsigned                        cur_speed;
177};
178
179static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
180	.name = "samsung-spi-dma",
181};
182
183static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
184{
185	struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
186	void __iomem *regs = sdd->regs;
187	unsigned long loops;
188	u32 val;
189
190	writel(0, regs + S3C64XX_SPI_PACKET_CNT);
191
192	val = readl(regs + S3C64XX_SPI_CH_CFG);
193	val |= S3C64XX_SPI_CH_SW_RST;
194	val &= ~S3C64XX_SPI_CH_HS_EN;
195	writel(val, regs + S3C64XX_SPI_CH_CFG);
196
197	/* Flush TxFIFO*/
198	loops = msecs_to_loops(1);
199	do {
200		val = readl(regs + S3C64XX_SPI_STATUS);
201	} while (TX_FIFO_LVL(val, sci) && loops--);
202
203	if (loops == 0)
204		dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
205
206	/* Flush RxFIFO*/
207	loops = msecs_to_loops(1);
208	do {
209		val = readl(regs + S3C64XX_SPI_STATUS);
210		if (RX_FIFO_LVL(val, sci))
211			readl(regs + S3C64XX_SPI_RX_DATA);
212		else
213			break;
214	} while (loops--);
215
216	if (loops == 0)
217		dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
218
219	val = readl(regs + S3C64XX_SPI_CH_CFG);
220	val &= ~S3C64XX_SPI_CH_SW_RST;
221	writel(val, regs + S3C64XX_SPI_CH_CFG);
222
223	val = readl(regs + S3C64XX_SPI_MODE_CFG);
224	val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
225	writel(val, regs + S3C64XX_SPI_MODE_CFG);
226
227	val = readl(regs + S3C64XX_SPI_CH_CFG);
228	val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
229	writel(val, regs + S3C64XX_SPI_CH_CFG);
230}
231
232static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
233				struct spi_device *spi,
234				struct spi_transfer *xfer, int dma_mode)
235{
236	struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
237	void __iomem *regs = sdd->regs;
238	u32 modecfg, chcfg;
239
240	modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
241	modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
242
243	chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
244	chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
245
246	if (dma_mode) {
247		chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
248	} else {
249		/* Always shift in data in FIFO, even if xfer is Tx only,
250		 * this helps setting PCKT_CNT value for generating clocks
251		 * as exactly needed.
252		 */
253		chcfg |= S3C64XX_SPI_CH_RXCH_ON;
254		writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
255					| S3C64XX_SPI_PACKET_CNT_EN,
256					regs + S3C64XX_SPI_PACKET_CNT);
257	}
258
259	if (xfer->tx_buf != NULL) {
260		sdd->state |= TXBUSY;
261		chcfg |= S3C64XX_SPI_CH_TXCH_ON;
262		if (dma_mode) {
263			modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
264			s3c2410_dma_config(sdd->tx_dmach, 1);
265			s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd,
266						xfer->tx_dma, xfer->len);
267			s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START);
268		} else {
269			unsigned char *buf = (unsigned char *) xfer->tx_buf;
270			int i = 0;
271			while (i < xfer->len)
272				writeb(buf[i++], regs + S3C64XX_SPI_TX_DATA);
273		}
274	}
275
276	if (xfer->rx_buf != NULL) {
277		sdd->state |= RXBUSY;
278
279		if (sci->high_speed && sdd->cur_speed >= 30000000UL
280					&& !(sdd->cur_mode & SPI_CPHA))
281			chcfg |= S3C64XX_SPI_CH_HS_EN;
282
283		if (dma_mode) {
284			modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
285			chcfg |= S3C64XX_SPI_CH_RXCH_ON;
286			writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
287					| S3C64XX_SPI_PACKET_CNT_EN,
288					regs + S3C64XX_SPI_PACKET_CNT);
289			s3c2410_dma_config(sdd->rx_dmach, 1);
290			s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd,
291						xfer->rx_dma, xfer->len);
292			s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START);
293		}
294	}
295
296	writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
297	writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
298}
299
300static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
301						struct spi_device *spi)
302{
303	struct s3c64xx_spi_csinfo *cs;
304
305	if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
306		if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
307			/* Deselect the last toggled device */
308			cs = sdd->tgl_spi->controller_data;
309			cs->set_level(cs->line,
310					spi->mode & SPI_CS_HIGH ? 0 : 1);
311		}
312		sdd->tgl_spi = NULL;
313	}
314
315	cs = spi->controller_data;
316	cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0);
317}
318
319static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
320				struct spi_transfer *xfer, int dma_mode)
321{
322	struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
323	void __iomem *regs = sdd->regs;
324	unsigned long val;
325	int ms;
326
327	/* millisecs to xfer 'len' bytes @ 'cur_speed' */
328	ms = xfer->len * 8 * 1000 / sdd->cur_speed;
329	ms += 10; /* some tolerance */
330
331	if (dma_mode) {
332		val = msecs_to_jiffies(ms) + 10;
333		val = wait_for_completion_timeout(&sdd->xfer_completion, val);
334	} else {
335		u32 status;
336		val = msecs_to_loops(ms);
337		do {
338			status = readl(regs + S3C64XX_SPI_STATUS);
339		} while (RX_FIFO_LVL(status, sci) < xfer->len && --val);
340	}
341
342	if (!val)
343		return -EIO;
344
345	if (dma_mode) {
346		u32 status;
347
348		/*
349		 * DmaTx returns after simply writing data in the FIFO,
350		 * w/o waiting for real transmission on the bus to finish.
351		 * DmaRx returns only after Dma read data from FIFO which
352		 * needs bus transmission to finish, so we don't worry if
353		 * Xfer involved Rx(with or without Tx).
354		 */
355		if (xfer->rx_buf == NULL) {
356			val = msecs_to_loops(10);
357			status = readl(regs + S3C64XX_SPI_STATUS);
358			while ((TX_FIFO_LVL(status, sci)
359				|| !S3C64XX_SPI_ST_TX_DONE(status, sci))
360					&& --val) {
361				cpu_relax();
362				status = readl(regs + S3C64XX_SPI_STATUS);
363			}
364
365			if (!val)
366				return -EIO;
367		}
368	} else {
369		unsigned char *buf;
370		int i;
371
372		/* If it was only Tx */
373		if (xfer->rx_buf == NULL) {
374			sdd->state &= ~TXBUSY;
375			return 0;
376		}
377
378		i = 0;
379		buf = xfer->rx_buf;
380		while (i < xfer->len)
381			buf[i++] = readb(regs + S3C64XX_SPI_RX_DATA);
382
383		sdd->state &= ~RXBUSY;
384	}
385
386	return 0;
387}
388
389static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
390						struct spi_device *spi)
391{
392	struct s3c64xx_spi_csinfo *cs = spi->controller_data;
393
394	if (sdd->tgl_spi == spi)
395		sdd->tgl_spi = NULL;
396
397	cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1);
398}
399
400static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
401{
402	void __iomem *regs = sdd->regs;
403	u32 val;
404
405	/* Disable Clock */
406	val = readl(regs + S3C64XX_SPI_CLK_CFG);
407	val &= ~S3C64XX_SPI_ENCLK_ENABLE;
408	writel(val, regs + S3C64XX_SPI_CLK_CFG);
409
410	/* Set Polarity and Phase */
411	val = readl(regs + S3C64XX_SPI_CH_CFG);
412	val &= ~(S3C64XX_SPI_CH_SLAVE |
413			S3C64XX_SPI_CPOL_L |
414			S3C64XX_SPI_CPHA_B);
415
416	if (sdd->cur_mode & SPI_CPOL)
417		val |= S3C64XX_SPI_CPOL_L;
418
419	if (sdd->cur_mode & SPI_CPHA)
420		val |= S3C64XX_SPI_CPHA_B;
421
422	writel(val, regs + S3C64XX_SPI_CH_CFG);
423
424	/* Set Channel & DMA Mode */
425	val = readl(regs + S3C64XX_SPI_MODE_CFG);
426	val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
427			| S3C64XX_SPI_MODE_CH_TSZ_MASK);
428
429	switch (sdd->cur_bpw) {
430	case 32:
431		val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
432		break;
433	case 16:
434		val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
435		break;
436	default:
437		val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
438		break;
439	}
440	val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; /* Always 8bits wide */
441
442	writel(val, regs + S3C64XX_SPI_MODE_CFG);
443
444	/* Configure Clock */
445	val = readl(regs + S3C64XX_SPI_CLK_CFG);
446	val &= ~S3C64XX_SPI_PSR_MASK;
447	val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
448			& S3C64XX_SPI_PSR_MASK);
449	writel(val, regs + S3C64XX_SPI_CLK_CFG);
450
451	/* Enable Clock */
452	val = readl(regs + S3C64XX_SPI_CLK_CFG);
453	val |= S3C64XX_SPI_ENCLK_ENABLE;
454	writel(val, regs + S3C64XX_SPI_CLK_CFG);
455}
456
457static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
458				 int size, enum s3c2410_dma_buffresult res)
459{
460	struct s3c64xx_spi_driver_data *sdd = buf_id;
461	unsigned long flags;
462
463	spin_lock_irqsave(&sdd->lock, flags);
464
465	if (res == S3C2410_RES_OK)
466		sdd->state &= ~RXBUSY;
467	else
468		dev_err(&sdd->pdev->dev, "DmaAbrtRx-%d\n", size);
469
470	/* If the other done */
471	if (!(sdd->state & TXBUSY))
472		complete(&sdd->xfer_completion);
473
474	spin_unlock_irqrestore(&sdd->lock, flags);
475}
476
477static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
478				 int size, enum s3c2410_dma_buffresult res)
479{
480	struct s3c64xx_spi_driver_data *sdd = buf_id;
481	unsigned long flags;
482
483	spin_lock_irqsave(&sdd->lock, flags);
484
485	if (res == S3C2410_RES_OK)
486		sdd->state &= ~TXBUSY;
487	else
488		dev_err(&sdd->pdev->dev, "DmaAbrtTx-%d \n", size);
489
490	/* If the other done */
491	if (!(sdd->state & RXBUSY))
492		complete(&sdd->xfer_completion);
493
494	spin_unlock_irqrestore(&sdd->lock, flags);
495}
496
497#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
498
499static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
500						struct spi_message *msg)
501{
502	struct device *dev = &sdd->pdev->dev;
503	struct spi_transfer *xfer;
504
505	if (msg->is_dma_mapped)
506		return 0;
507
508	/* First mark all xfer unmapped */
509	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
510		xfer->rx_dma = XFER_DMAADDR_INVALID;
511		xfer->tx_dma = XFER_DMAADDR_INVALID;
512	}
513
514	/* Map until end or first fail */
515	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
516
517		if (xfer->tx_buf != NULL) {
518			xfer->tx_dma = dma_map_single(dev,
519					(void *)xfer->tx_buf, xfer->len,
520					DMA_TO_DEVICE);
521			if (dma_mapping_error(dev, xfer->tx_dma)) {
522				dev_err(dev, "dma_map_single Tx failed\n");
523				xfer->tx_dma = XFER_DMAADDR_INVALID;
524				return -ENOMEM;
525			}
526		}
527
528		if (xfer->rx_buf != NULL) {
529			xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
530						xfer->len, DMA_FROM_DEVICE);
531			if (dma_mapping_error(dev, xfer->rx_dma)) {
532				dev_err(dev, "dma_map_single Rx failed\n");
533				dma_unmap_single(dev, xfer->tx_dma,
534						xfer->len, DMA_TO_DEVICE);
535				xfer->tx_dma = XFER_DMAADDR_INVALID;
536				xfer->rx_dma = XFER_DMAADDR_INVALID;
537				return -ENOMEM;
538			}
539		}
540	}
541
542	return 0;
543}
544
545static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
546						struct spi_message *msg)
547{
548	struct device *dev = &sdd->pdev->dev;
549	struct spi_transfer *xfer;
550
551	if (msg->is_dma_mapped)
552		return;
553
554	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
555
556		if (xfer->rx_buf != NULL
557				&& xfer->rx_dma != XFER_DMAADDR_INVALID)
558			dma_unmap_single(dev, xfer->rx_dma,
559						xfer->len, DMA_FROM_DEVICE);
560
561		if (xfer->tx_buf != NULL
562				&& xfer->tx_dma != XFER_DMAADDR_INVALID)
563			dma_unmap_single(dev, xfer->tx_dma,
564						xfer->len, DMA_TO_DEVICE);
565	}
566}
567
568static void handle_msg(struct s3c64xx_spi_driver_data *sdd,
569					struct spi_message *msg)
570{
571	struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
572	struct spi_device *spi = msg->spi;
573	struct s3c64xx_spi_csinfo *cs = spi->controller_data;
574	struct spi_transfer *xfer;
575	int status = 0, cs_toggle = 0;
576	u32 speed;
577	u8 bpw;
578
579	/* If Master's(controller) state differs from that needed by Slave */
580	if (sdd->cur_speed != spi->max_speed_hz
581			|| sdd->cur_mode != spi->mode
582			|| sdd->cur_bpw != spi->bits_per_word) {
583		sdd->cur_bpw = spi->bits_per_word;
584		sdd->cur_speed = spi->max_speed_hz;
585		sdd->cur_mode = spi->mode;
586		s3c64xx_spi_config(sdd);
587	}
588
589	/* Map all the transfers if needed */
590	if (s3c64xx_spi_map_mssg(sdd, msg)) {
591		dev_err(&spi->dev,
592			"Xfer: Unable to map message buffers!\n");
593		status = -ENOMEM;
594		goto out;
595	}
596
597	/* Configure feedback delay */
598	writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
599
600	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
601
602		unsigned long flags;
603		int use_dma;
604
605		INIT_COMPLETION(sdd->xfer_completion);
606
607		/* Only BPW and Speed may change across transfers */
608		bpw = xfer->bits_per_word ? : spi->bits_per_word;
609		speed = xfer->speed_hz ? : spi->max_speed_hz;
610
611		if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
612			sdd->cur_bpw = bpw;
613			sdd->cur_speed = speed;
614			s3c64xx_spi_config(sdd);
615		}
616
617		/* Polling method for xfers not bigger than FIFO capacity */
618		if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
619			use_dma = 0;
620		else
621			use_dma = 1;
622
623		spin_lock_irqsave(&sdd->lock, flags);
624
625		/* Pending only which is to be done */
626		sdd->state &= ~RXBUSY;
627		sdd->state &= ~TXBUSY;
628
629		enable_datapath(sdd, spi, xfer, use_dma);
630
631		/* Slave Select */
632		enable_cs(sdd, spi);
633
634		/* Start the signals */
635		S3C64XX_SPI_ACT(sdd);
636
637		spin_unlock_irqrestore(&sdd->lock, flags);
638
639		status = wait_for_xfer(sdd, xfer, use_dma);
640
641		/* Quiese the signals */
642		S3C64XX_SPI_DEACT(sdd);
643
644		if (status) {
645			dev_err(&spi->dev, "I/O Error: "
646				"rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
647				xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
648				(sdd->state & RXBUSY) ? 'f' : 'p',
649				(sdd->state & TXBUSY) ? 'f' : 'p',
650				xfer->len);
651
652			if (use_dma) {
653				if (xfer->tx_buf != NULL
654						&& (sdd->state & TXBUSY))
655					s3c2410_dma_ctrl(sdd->tx_dmach,
656							S3C2410_DMAOP_FLUSH);
657				if (xfer->rx_buf != NULL
658						&& (sdd->state & RXBUSY))
659					s3c2410_dma_ctrl(sdd->rx_dmach,
660							S3C2410_DMAOP_FLUSH);
661			}
662
663			goto out;
664		}
665
666		if (xfer->delay_usecs)
667			udelay(xfer->delay_usecs);
668
669		if (xfer->cs_change) {
670			/* Hint that the next mssg is gonna be
671			   for the same device */
672			if (list_is_last(&xfer->transfer_list,
673						&msg->transfers))
674				cs_toggle = 1;
675			else
676				disable_cs(sdd, spi);
677		}
678
679		msg->actual_length += xfer->len;
680
681		flush_fifo(sdd);
682	}
683
684out:
685	if (!cs_toggle || status)
686		disable_cs(sdd, spi);
687	else
688		sdd->tgl_spi = spi;
689
690	s3c64xx_spi_unmap_mssg(sdd, msg);
691
692	msg->status = status;
693
694	if (msg->complete)
695		msg->complete(msg->context);
696}
697
698static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
699{
700	if (s3c2410_dma_request(sdd->rx_dmach,
701					&s3c64xx_spi_dma_client, NULL) < 0) {
702		dev_err(&sdd->pdev->dev, "cannot get RxDMA\n");
703		return 0;
704	}
705	s3c2410_dma_set_buffdone_fn(sdd->rx_dmach, s3c64xx_spi_dma_rxcb);
706	s3c2410_dma_devconfig(sdd->rx_dmach, S3C2410_DMASRC_HW,
707					sdd->sfr_start + S3C64XX_SPI_RX_DATA);
708
709	if (s3c2410_dma_request(sdd->tx_dmach,
710					&s3c64xx_spi_dma_client, NULL) < 0) {
711		dev_err(&sdd->pdev->dev, "cannot get TxDMA\n");
712		s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
713		return 0;
714	}
715	s3c2410_dma_set_buffdone_fn(sdd->tx_dmach, s3c64xx_spi_dma_txcb);
716	s3c2410_dma_devconfig(sdd->tx_dmach, S3C2410_DMASRC_MEM,
717					sdd->sfr_start + S3C64XX_SPI_TX_DATA);
718
719	return 1;
720}
721
722static void s3c64xx_spi_work(struct work_struct *work)
723{
724	struct s3c64xx_spi_driver_data *sdd = container_of(work,
725					struct s3c64xx_spi_driver_data, work);
726	unsigned long flags;
727
728	/* Acquire DMA channels */
729	while (!acquire_dma(sdd))
730		msleep(10);
731
732	spin_lock_irqsave(&sdd->lock, flags);
733
734	while (!list_empty(&sdd->queue)
735				&& !(sdd->state & SUSPND)) {
736
737		struct spi_message *msg;
738
739		msg = container_of(sdd->queue.next, struct spi_message, queue);
740
741		list_del_init(&msg->queue);
742
743		/* Set Xfer busy flag */
744		sdd->state |= SPIBUSY;
745
746		spin_unlock_irqrestore(&sdd->lock, flags);
747
748		handle_msg(sdd, msg);
749
750		spin_lock_irqsave(&sdd->lock, flags);
751
752		sdd->state &= ~SPIBUSY;
753	}
754
755	spin_unlock_irqrestore(&sdd->lock, flags);
756
757	/* Free DMA channels */
758	s3c2410_dma_free(sdd->tx_dmach, &s3c64xx_spi_dma_client);
759	s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
760}
761
762static int s3c64xx_spi_transfer(struct spi_device *spi,
763						struct spi_message *msg)
764{
765	struct s3c64xx_spi_driver_data *sdd;
766	unsigned long flags;
767
768	sdd = spi_master_get_devdata(spi->master);
769
770	spin_lock_irqsave(&sdd->lock, flags);
771
772	if (sdd->state & SUSPND) {
773		spin_unlock_irqrestore(&sdd->lock, flags);
774		return -ESHUTDOWN;
775	}
776
777	msg->status = -EINPROGRESS;
778	msg->actual_length = 0;
779
780	list_add_tail(&msg->queue, &sdd->queue);
781
782	queue_work(sdd->workqueue, &sdd->work);
783
784	spin_unlock_irqrestore(&sdd->lock, flags);
785
786	return 0;
787}
788
789/*
790 * Here we only check the validity of requested configuration
791 * and save the configuration in a local data-structure.
792 * The controller is actually configured only just before we
793 * get a message to transfer.
794 */
795static int s3c64xx_spi_setup(struct spi_device *spi)
796{
797	struct s3c64xx_spi_csinfo *cs = spi->controller_data;
798	struct s3c64xx_spi_driver_data *sdd;
799	struct s3c64xx_spi_info *sci;
800	struct spi_message *msg;
801	u32 psr, speed;
802	unsigned long flags;
803	int err = 0;
804
805	if (cs == NULL || cs->set_level == NULL) {
806		dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
807		return -ENODEV;
808	}
809
810	sdd = spi_master_get_devdata(spi->master);
811	sci = sdd->cntrlr_info;
812
813	spin_lock_irqsave(&sdd->lock, flags);
814
815	list_for_each_entry(msg, &sdd->queue, queue) {
816		/* Is some mssg is already queued for this device */
817		if (msg->spi == spi) {
818			dev_err(&spi->dev,
819				"setup: attempt while mssg in queue!\n");
820			spin_unlock_irqrestore(&sdd->lock, flags);
821			return -EBUSY;
822		}
823	}
824
825	if (sdd->state & SUSPND) {
826		spin_unlock_irqrestore(&sdd->lock, flags);
827		dev_err(&spi->dev,
828			"setup: SPI-%d not active!\n", spi->master->bus_num);
829		return -ESHUTDOWN;
830	}
831
832	spin_unlock_irqrestore(&sdd->lock, flags);
833
834	if (spi->bits_per_word != 8
835			&& spi->bits_per_word != 16
836			&& spi->bits_per_word != 32) {
837		dev_err(&spi->dev, "setup: %dbits/wrd not supported!\n",
838							spi->bits_per_word);
839		err = -EINVAL;
840		goto setup_exit;
841	}
842
843	/* Check if we can provide the requested rate */
844	speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); /* Max possible */
845
846	if (spi->max_speed_hz > speed)
847		spi->max_speed_hz = speed;
848
849	psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
850	psr &= S3C64XX_SPI_PSR_MASK;
851	if (psr == S3C64XX_SPI_PSR_MASK)
852		psr--;
853
854	speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
855	if (spi->max_speed_hz < speed) {
856		if (psr+1 < S3C64XX_SPI_PSR_MASK) {
857			psr++;
858		} else {
859			err = -EINVAL;
860			goto setup_exit;
861		}
862	}
863
864	speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
865	if (spi->max_speed_hz >= speed)
866		spi->max_speed_hz = speed;
867	else
868		err = -EINVAL;
869
870setup_exit:
871
872	/* setup() returns with device de-selected */
873	disable_cs(sdd, spi);
874
875	return err;
876}
877
878static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
879{
880	struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
881	void __iomem *regs = sdd->regs;
882	unsigned int val;
883
884	sdd->cur_speed = 0;
885
886	S3C64XX_SPI_DEACT(sdd);
887
888	/* Disable Interrupts - we use Polling if not DMA mode */
889	writel(0, regs + S3C64XX_SPI_INT_EN);
890
891	writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
892				regs + S3C64XX_SPI_CLK_CFG);
893	writel(0, regs + S3C64XX_SPI_MODE_CFG);
894	writel(0, regs + S3C64XX_SPI_PACKET_CNT);
895
896	/* Clear any irq pending bits */
897	writel(readl(regs + S3C64XX_SPI_PENDING_CLR),
898				regs + S3C64XX_SPI_PENDING_CLR);
899
900	writel(0, regs + S3C64XX_SPI_SWAP_CFG);
901
902	val = readl(regs + S3C64XX_SPI_MODE_CFG);
903	val &= ~S3C64XX_SPI_MODE_4BURST;
904	val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
905	val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
906	writel(val, regs + S3C64XX_SPI_MODE_CFG);
907
908	flush_fifo(sdd);
909}
910
911static int __init s3c64xx_spi_probe(struct platform_device *pdev)
912{
913	struct resource	*mem_res, *dmatx_res, *dmarx_res;
914	struct s3c64xx_spi_driver_data *sdd;
915	struct s3c64xx_spi_info *sci;
916	struct spi_master *master;
917	int ret;
918
919	if (pdev->id < 0) {
920		dev_err(&pdev->dev,
921				"Invalid platform device id-%d\n", pdev->id);
922		return -ENODEV;
923	}
924
925	if (pdev->dev.platform_data == NULL) {
926		dev_err(&pdev->dev, "platform_data missing!\n");
927		return -ENODEV;
928	}
929
930	sci = pdev->dev.platform_data;
931	if (!sci->src_clk_name) {
932		dev_err(&pdev->dev,
933			"Board init must call s3c64xx_spi_set_info()\n");
934		return -EINVAL;
935	}
936
937	/* Check for availability of necessary resource */
938
939	dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
940	if (dmatx_res == NULL) {
941		dev_err(&pdev->dev, "Unable to get SPI-Tx dma resource\n");
942		return -ENXIO;
943	}
944
945	dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
946	if (dmarx_res == NULL) {
947		dev_err(&pdev->dev, "Unable to get SPI-Rx dma resource\n");
948		return -ENXIO;
949	}
950
951	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
952	if (mem_res == NULL) {
953		dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
954		return -ENXIO;
955	}
956
957	master = spi_alloc_master(&pdev->dev,
958				sizeof(struct s3c64xx_spi_driver_data));
959	if (master == NULL) {
960		dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
961		return -ENOMEM;
962	}
963
964	platform_set_drvdata(pdev, master);
965
966	sdd = spi_master_get_devdata(master);
967	sdd->master = master;
968	sdd->cntrlr_info = sci;
969	sdd->pdev = pdev;
970	sdd->sfr_start = mem_res->start;
971	sdd->tx_dmach = dmatx_res->start;
972	sdd->rx_dmach = dmarx_res->start;
973
974	sdd->cur_bpw = 8;
975
976	master->bus_num = pdev->id;
977	master->setup = s3c64xx_spi_setup;
978	master->transfer = s3c64xx_spi_transfer;
979	master->num_chipselect = sci->num_cs;
980	master->dma_alignment = 8;
981	/* the spi->mode bits understood by this driver: */
982	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
983
984	if (request_mem_region(mem_res->start,
985			resource_size(mem_res), pdev->name) == NULL) {
986		dev_err(&pdev->dev, "Req mem region failed\n");
987		ret = -ENXIO;
988		goto err0;
989	}
990
991	sdd->regs = ioremap(mem_res->start, resource_size(mem_res));
992	if (sdd->regs == NULL) {
993		dev_err(&pdev->dev, "Unable to remap IO\n");
994		ret = -ENXIO;
995		goto err1;
996	}
997
998	if (sci->cfg_gpio == NULL || sci->cfg_gpio(pdev)) {
999		dev_err(&pdev->dev, "Unable to config gpio\n");
1000		ret = -EBUSY;
1001		goto err2;
1002	}
1003
1004	/* Setup clocks */
1005	sdd->clk = clk_get(&pdev->dev, "spi");
1006	if (IS_ERR(sdd->clk)) {
1007		dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
1008		ret = PTR_ERR(sdd->clk);
1009		goto err3;
1010	}
1011
1012	if (clk_enable(sdd->clk)) {
1013		dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
1014		ret = -EBUSY;
1015		goto err4;
1016	}
1017
1018	sdd->src_clk = clk_get(&pdev->dev, sci->src_clk_name);
1019	if (IS_ERR(sdd->src_clk)) {
1020		dev_err(&pdev->dev,
1021			"Unable to acquire clock '%s'\n", sci->src_clk_name);
1022		ret = PTR_ERR(sdd->src_clk);
1023		goto err5;
1024	}
1025
1026	if (clk_enable(sdd->src_clk)) {
1027		dev_err(&pdev->dev, "Couldn't enable clock '%s'\n",
1028							sci->src_clk_name);
1029		ret = -EBUSY;
1030		goto err6;
1031	}
1032
1033	sdd->workqueue = create_singlethread_workqueue(
1034						dev_name(master->dev.parent));
1035	if (sdd->workqueue == NULL) {
1036		dev_err(&pdev->dev, "Unable to create workqueue\n");
1037		ret = -ENOMEM;
1038		goto err7;
1039	}
1040
1041	/* Setup Deufult Mode */
1042	s3c64xx_spi_hwinit(sdd, pdev->id);
1043
1044	spin_lock_init(&sdd->lock);
1045	init_completion(&sdd->xfer_completion);
1046	INIT_WORK(&sdd->work, s3c64xx_spi_work);
1047	INIT_LIST_HEAD(&sdd->queue);
1048
1049	if (spi_register_master(master)) {
1050		dev_err(&pdev->dev, "cannot register SPI master\n");
1051		ret = -EBUSY;
1052		goto err8;
1053	}
1054
1055	dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d "
1056					"with %d Slaves attached\n",
1057					pdev->id, master->num_chipselect);
1058	dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n",
1059					mem_res->end, mem_res->start,
1060					sdd->rx_dmach, sdd->tx_dmach);
1061
1062	return 0;
1063
1064err8:
1065	destroy_workqueue(sdd->workqueue);
1066err7:
1067	clk_disable(sdd->src_clk);
1068err6:
1069	clk_put(sdd->src_clk);
1070err5:
1071	clk_disable(sdd->clk);
1072err4:
1073	clk_put(sdd->clk);
1074err3:
1075err2:
1076	iounmap((void *) sdd->regs);
1077err1:
1078	release_mem_region(mem_res->start, resource_size(mem_res));
1079err0:
1080	platform_set_drvdata(pdev, NULL);
1081	spi_master_put(master);
1082
1083	return ret;
1084}
1085
1086static int s3c64xx_spi_remove(struct platform_device *pdev)
1087{
1088	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1089	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1090	struct resource	*mem_res;
1091	unsigned long flags;
1092
1093	spin_lock_irqsave(&sdd->lock, flags);
1094	sdd->state |= SUSPND;
1095	spin_unlock_irqrestore(&sdd->lock, flags);
1096
1097	while (sdd->state & SPIBUSY)
1098		msleep(10);
1099
1100	spi_unregister_master(master);
1101
1102	destroy_workqueue(sdd->workqueue);
1103
1104	clk_disable(sdd->src_clk);
1105	clk_put(sdd->src_clk);
1106
1107	clk_disable(sdd->clk);
1108	clk_put(sdd->clk);
1109
1110	iounmap((void *) sdd->regs);
1111
1112	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1113	if (mem_res != NULL)
1114		release_mem_region(mem_res->start, resource_size(mem_res));
1115
1116	platform_set_drvdata(pdev, NULL);
1117	spi_master_put(master);
1118
1119	return 0;
1120}
1121
1122#ifdef CONFIG_PM
1123static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
1124{
1125	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1126	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1127	unsigned long flags;
1128
1129	spin_lock_irqsave(&sdd->lock, flags);
1130	sdd->state |= SUSPND;
1131	spin_unlock_irqrestore(&sdd->lock, flags);
1132
1133	while (sdd->state & SPIBUSY)
1134		msleep(10);
1135
1136	/* Disable the clock */
1137	clk_disable(sdd->src_clk);
1138	clk_disable(sdd->clk);
1139
1140	sdd->cur_speed = 0; /* Output Clock is stopped */
1141
1142	return 0;
1143}
1144
1145static int s3c64xx_spi_resume(struct platform_device *pdev)
1146{
1147	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1148	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1149	struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1150	unsigned long flags;
1151
1152	sci->cfg_gpio(pdev);
1153
1154	/* Enable the clock */
1155	clk_enable(sdd->src_clk);
1156	clk_enable(sdd->clk);
1157
1158	s3c64xx_spi_hwinit(sdd, pdev->id);
1159
1160	spin_lock_irqsave(&sdd->lock, flags);
1161	sdd->state &= ~SUSPND;
1162	spin_unlock_irqrestore(&sdd->lock, flags);
1163
1164	return 0;
1165}
1166#else
1167#define s3c64xx_spi_suspend	NULL
1168#define s3c64xx_spi_resume	NULL
1169#endif /* CONFIG_PM */
1170
1171static struct platform_driver s3c64xx_spi_driver = {
1172	.driver = {
1173		.name	= "s3c64xx-spi",
1174		.owner = THIS_MODULE,
1175	},
1176	.remove = s3c64xx_spi_remove,
1177	.suspend = s3c64xx_spi_suspend,
1178	.resume = s3c64xx_spi_resume,
1179};
1180MODULE_ALIAS("platform:s3c64xx-spi");
1181
1182static int __init s3c64xx_spi_init(void)
1183{
1184	return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
1185}
1186subsys_initcall(s3c64xx_spi_init);
1187
1188static void __exit s3c64xx_spi_exit(void)
1189{
1190	platform_driver_unregister(&s3c64xx_spi_driver);
1191}
1192module_exit(s3c64xx_spi_exit);
1193
1194MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1195MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
1196MODULE_LICENSE("GPL");
1197