• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/spi/
1/*
2 * Driver for Cirrus Logic EP93xx SPI controller.
3 *
4 * Copyright (c) 2010 Mika Westerberg
5 *
6 * Explicit FIFO handling code was inspired by amba-pl022 driver.
7 *
8 * Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
9 *
10 * For more information about the SPI controller see documentation on Cirrus
11 * Logic web site:
12 *     http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18
19#include <linux/io.h>
20#include <linux/clk.h>
21#include <linux/err.h>
22#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/bitops.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/workqueue.h>
28#include <linux/sched.h>
29#include <linux/spi/spi.h>
30
31#include <mach/ep93xx_spi.h>
32
33#define SSPCR0			0x0000
34#define SSPCR0_MODE_SHIFT	6
35#define SSPCR0_SCR_SHIFT	8
36
37#define SSPCR1			0x0004
38#define SSPCR1_RIE		BIT(0)
39#define SSPCR1_TIE		BIT(1)
40#define SSPCR1_RORIE		BIT(2)
41#define SSPCR1_LBM		BIT(3)
42#define SSPCR1_SSE		BIT(4)
43#define SSPCR1_MS		BIT(5)
44#define SSPCR1_SOD		BIT(6)
45
46#define SSPDR			0x0008
47
48#define SSPSR			0x000c
49#define SSPSR_TFE		BIT(0)
50#define SSPSR_TNF		BIT(1)
51#define SSPSR_RNE		BIT(2)
52#define SSPSR_RFF		BIT(3)
53#define SSPSR_BSY		BIT(4)
54#define SSPCPSR			0x0010
55
56#define SSPIIR			0x0014
57#define SSPIIR_RIS		BIT(0)
58#define SSPIIR_TIS		BIT(1)
59#define SSPIIR_RORIS		BIT(2)
60#define SSPICR			SSPIIR
61
62/* timeout in milliseconds */
63#define SPI_TIMEOUT		5
64/* maximum depth of RX/TX FIFO */
65#define SPI_FIFO_SIZE		8
66
67/**
68 * struct ep93xx_spi - EP93xx SPI controller structure
69 * @lock: spinlock that protects concurrent accesses to fields @running,
70 *        @current_msg and @msg_queue
71 * @pdev: pointer to platform device
72 * @clk: clock for the controller
73 * @regs_base: pointer to ioremap()'d registers
74 * @irq: IRQ number used by the driver
75 * @min_rate: minimum clock rate (in Hz) supported by the controller
76 * @max_rate: maximum clock rate (in Hz) supported by the controller
77 * @running: is the queue running
78 * @wq: workqueue used by the driver
79 * @msg_work: work that is queued for the driver
80 * @wait: wait here until given transfer is completed
81 * @msg_queue: queue for the messages
82 * @current_msg: message that is currently processed (or %NULL if none)
83 * @tx: current byte in transfer to transmit
84 * @rx: current byte in transfer to receive
85 * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
86 *              frame decreases this level and sending one frame increases it.
87 *
88 * This structure holds EP93xx SPI controller specific information. When
89 * @running is %true, driver accepts transfer requests from protocol drivers.
90 * @current_msg is used to hold pointer to the message that is currently
91 * processed. If @current_msg is %NULL, it means that no processing is going
92 * on.
93 *
94 * Most of the fields are only written once and they can be accessed without
95 * taking the @lock. Fields that are accessed concurrently are: @current_msg,
96 * @running, and @msg_queue.
97 */
98struct ep93xx_spi {
99	spinlock_t			lock;
100	const struct platform_device	*pdev;
101	struct clk			*clk;
102	void __iomem			*regs_base;
103	int				irq;
104	unsigned long			min_rate;
105	unsigned long			max_rate;
106	bool				running;
107	struct workqueue_struct		*wq;
108	struct work_struct		msg_work;
109	struct completion		wait;
110	struct list_head		msg_queue;
111	struct spi_message		*current_msg;
112	size_t				tx;
113	size_t				rx;
114	size_t				fifo_level;
115};
116
117/**
118 * struct ep93xx_spi_chip - SPI device hardware settings
119 * @spi: back pointer to the SPI device
120 * @rate: max rate in hz this chip supports
121 * @div_cpsr: cpsr (pre-scaler) divider
122 * @div_scr: scr divider
123 * @dss: bits per word (4 - 16 bits)
124 * @ops: private chip operations
125 *
126 * This structure is used to store hardware register specific settings for each
127 * SPI device. Settings are written to hardware by function
128 * ep93xx_spi_chip_setup().
129 */
130struct ep93xx_spi_chip {
131	const struct spi_device		*spi;
132	unsigned long			rate;
133	u8				div_cpsr;
134	u8				div_scr;
135	u8				dss;
136	struct ep93xx_spi_chip_ops	*ops;
137};
138
139/* converts bits per word to CR0.DSS value */
140#define bits_per_word_to_dss(bpw)	((bpw) - 1)
141
142static inline void
143ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value)
144{
145	__raw_writeb(value, espi->regs_base + reg);
146}
147
148static inline u8
149ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg)
150{
151	return __raw_readb(spi->regs_base + reg);
152}
153
154static inline void
155ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value)
156{
157	__raw_writew(value, espi->regs_base + reg);
158}
159
160static inline u16
161ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg)
162{
163	return __raw_readw(spi->regs_base + reg);
164}
165
166static int ep93xx_spi_enable(const struct ep93xx_spi *espi)
167{
168	u8 regval;
169	int err;
170
171	err = clk_enable(espi->clk);
172	if (err)
173		return err;
174
175	regval = ep93xx_spi_read_u8(espi, SSPCR1);
176	regval |= SSPCR1_SSE;
177	ep93xx_spi_write_u8(espi, SSPCR1, regval);
178
179	return 0;
180}
181
182static void ep93xx_spi_disable(const struct ep93xx_spi *espi)
183{
184	u8 regval;
185
186	regval = ep93xx_spi_read_u8(espi, SSPCR1);
187	regval &= ~SSPCR1_SSE;
188	ep93xx_spi_write_u8(espi, SSPCR1, regval);
189
190	clk_disable(espi->clk);
191}
192
193static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi)
194{
195	u8 regval;
196
197	regval = ep93xx_spi_read_u8(espi, SSPCR1);
198	regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
199	ep93xx_spi_write_u8(espi, SSPCR1, regval);
200}
201
202static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
203{
204	u8 regval;
205
206	regval = ep93xx_spi_read_u8(espi, SSPCR1);
207	regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
208	ep93xx_spi_write_u8(espi, SSPCR1, regval);
209}
210
211/**
212 * ep93xx_spi_calc_divisors() - calculates SPI clock divisors
213 * @espi: ep93xx SPI controller struct
214 * @chip: divisors are calculated for this chip
215 * @rate: desired SPI output clock rate
216 *
217 * Function calculates cpsr (clock pre-scaler) and scr divisors based on
218 * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If,
219 * for some reason, divisors cannot be calculated nothing is stored and
220 * %-EINVAL is returned.
221 */
222static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
223				    struct ep93xx_spi_chip *chip,
224				    unsigned long rate)
225{
226	unsigned long spi_clk_rate = clk_get_rate(espi->clk);
227	int cpsr, scr;
228
229	/*
230	 * Make sure that max value is between values supported by the
231	 * controller. Note that minimum value is already checked in
232	 * ep93xx_spi_transfer().
233	 */
234	rate = clamp(rate, espi->min_rate, espi->max_rate);
235
236	/*
237	 * Calculate divisors so that we can get speed according the
238	 * following formula:
239	 *	rate = spi_clock_rate / (cpsr * (1 + scr))
240	 *
241	 * cpsr must be even number and starts from 2, scr can be any number
242	 * between 0 and 255.
243	 */
244	for (cpsr = 2; cpsr <= 254; cpsr += 2) {
245		for (scr = 0; scr <= 255; scr++) {
246			if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
247				chip->div_scr = (u8)scr;
248				chip->div_cpsr = (u8)cpsr;
249				return 0;
250			}
251		}
252	}
253
254	return -EINVAL;
255}
256
257static void ep93xx_spi_cs_control(struct spi_device *spi, bool control)
258{
259	struct ep93xx_spi_chip *chip = spi_get_ctldata(spi);
260	int value = (spi->mode & SPI_CS_HIGH) ? control : !control;
261
262	if (chip->ops && chip->ops->cs_control)
263		chip->ops->cs_control(spi, value);
264}
265
266/**
267 * ep93xx_spi_setup() - setup an SPI device
268 * @spi: SPI device to setup
269 *
270 * This function sets up SPI device mode, speed etc. Can be called multiple
271 * times for a single device. Returns %0 in case of success, negative error in
272 * case of failure. When this function returns success, the device is
273 * deselected.
274 */
275static int ep93xx_spi_setup(struct spi_device *spi)
276{
277	struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
278	struct ep93xx_spi_chip *chip;
279
280	if (spi->bits_per_word < 4 || spi->bits_per_word > 16) {
281		dev_err(&espi->pdev->dev, "invalid bits per word %d\n",
282			spi->bits_per_word);
283		return -EINVAL;
284	}
285
286	chip = spi_get_ctldata(spi);
287	if (!chip) {
288		dev_dbg(&espi->pdev->dev, "initial setup for %s\n",
289			spi->modalias);
290
291		chip = kzalloc(sizeof(*chip), GFP_KERNEL);
292		if (!chip)
293			return -ENOMEM;
294
295		chip->spi = spi;
296		chip->ops = spi->controller_data;
297
298		if (chip->ops && chip->ops->setup) {
299			int ret = chip->ops->setup(spi);
300			if (ret) {
301				kfree(chip);
302				return ret;
303			}
304		}
305
306		spi_set_ctldata(spi, chip);
307	}
308
309	if (spi->max_speed_hz != chip->rate) {
310		int err;
311
312		err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz);
313		if (err != 0) {
314			spi_set_ctldata(spi, NULL);
315			kfree(chip);
316			return err;
317		}
318		chip->rate = spi->max_speed_hz;
319	}
320
321	chip->dss = bits_per_word_to_dss(spi->bits_per_word);
322
323	ep93xx_spi_cs_control(spi, false);
324	return 0;
325}
326
327/**
328 * ep93xx_spi_transfer() - queue message to be transferred
329 * @spi: target SPI device
330 * @msg: message to be transferred
331 *
332 * This function is called by SPI device drivers when they are going to transfer
333 * a new message. It simply puts the message in the queue and schedules
334 * workqueue to perform the actual transfer later on.
335 *
336 * Returns %0 on success and negative error in case of failure.
337 */
338static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg)
339{
340	struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
341	struct spi_transfer *t;
342	unsigned long flags;
343
344	if (!msg || !msg->complete)
345		return -EINVAL;
346
347	/* first validate each transfer */
348	list_for_each_entry(t, &msg->transfers, transfer_list) {
349		if (t->bits_per_word) {
350			if (t->bits_per_word < 4 || t->bits_per_word > 16)
351				return -EINVAL;
352		}
353		if (t->speed_hz && t->speed_hz < espi->min_rate)
354				return -EINVAL;
355	}
356
357	/*
358	 * Now that we own the message, let's initialize it so that it is
359	 * suitable for us. We use @msg->status to signal whether there was
360	 * error in transfer and @msg->state is used to hold pointer to the
361	 * current transfer (or %NULL if no active current transfer).
362	 */
363	msg->state = NULL;
364	msg->status = 0;
365	msg->actual_length = 0;
366
367	spin_lock_irqsave(&espi->lock, flags);
368	if (!espi->running) {
369		spin_unlock_irqrestore(&espi->lock, flags);
370		return -ESHUTDOWN;
371	}
372	list_add_tail(&msg->queue, &espi->msg_queue);
373	queue_work(espi->wq, &espi->msg_work);
374	spin_unlock_irqrestore(&espi->lock, flags);
375
376	return 0;
377}
378
379/**
380 * ep93xx_spi_cleanup() - cleans up master controller specific state
381 * @spi: SPI device to cleanup
382 *
383 * This function releases master controller specific state for given @spi
384 * device.
385 */
386static void ep93xx_spi_cleanup(struct spi_device *spi)
387{
388	struct ep93xx_spi_chip *chip;
389
390	chip = spi_get_ctldata(spi);
391	if (chip) {
392		if (chip->ops && chip->ops->cleanup)
393			chip->ops->cleanup(spi);
394		spi_set_ctldata(spi, NULL);
395		kfree(chip);
396	}
397}
398
399/**
400 * ep93xx_spi_chip_setup() - configures hardware according to given @chip
401 * @espi: ep93xx SPI controller struct
402 * @chip: chip specific settings
403 *
404 * This function sets up the actual hardware registers with settings given in
405 * @chip. Note that no validation is done so make sure that callers validate
406 * settings before calling this.
407 */
408static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
409				  const struct ep93xx_spi_chip *chip)
410{
411	u16 cr0;
412
413	cr0 = chip->div_scr << SSPCR0_SCR_SHIFT;
414	cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT;
415	cr0 |= chip->dss;
416
417	dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
418		chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss);
419	dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0);
420
421	ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr);
422	ep93xx_spi_write_u16(espi, SSPCR0, cr0);
423}
424
425static inline int bits_per_word(const struct ep93xx_spi *espi)
426{
427	struct spi_message *msg = espi->current_msg;
428	struct spi_transfer *t = msg->state;
429
430	return t->bits_per_word ? t->bits_per_word : msg->spi->bits_per_word;
431}
432
433static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
434{
435	if (bits_per_word(espi) > 8) {
436		u16 tx_val = 0;
437
438		if (t->tx_buf)
439			tx_val = ((u16 *)t->tx_buf)[espi->tx];
440		ep93xx_spi_write_u16(espi, SSPDR, tx_val);
441		espi->tx += sizeof(tx_val);
442	} else {
443		u8 tx_val = 0;
444
445		if (t->tx_buf)
446			tx_val = ((u8 *)t->tx_buf)[espi->tx];
447		ep93xx_spi_write_u8(espi, SSPDR, tx_val);
448		espi->tx += sizeof(tx_val);
449	}
450}
451
452static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t)
453{
454	if (bits_per_word(espi) > 8) {
455		u16 rx_val;
456
457		rx_val = ep93xx_spi_read_u16(espi, SSPDR);
458		if (t->rx_buf)
459			((u16 *)t->rx_buf)[espi->rx] = rx_val;
460		espi->rx += sizeof(rx_val);
461	} else {
462		u8 rx_val;
463
464		rx_val = ep93xx_spi_read_u8(espi, SSPDR);
465		if (t->rx_buf)
466			((u8 *)t->rx_buf)[espi->rx] = rx_val;
467		espi->rx += sizeof(rx_val);
468	}
469}
470
471/**
472 * ep93xx_spi_read_write() - perform next RX/TX transfer
473 * @espi: ep93xx SPI controller struct
474 *
475 * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
476 * called several times, the whole transfer will be completed. Returns
477 * %-EINPROGRESS when current transfer was not yet completed otherwise %0.
478 *
479 * When this function is finished, RX FIFO should be empty and TX FIFO should be
480 * full.
481 */
482static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
483{
484	struct spi_message *msg = espi->current_msg;
485	struct spi_transfer *t = msg->state;
486
487	/* read as long as RX FIFO has frames in it */
488	while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) {
489		ep93xx_do_read(espi, t);
490		espi->fifo_level--;
491	}
492
493	/* write as long as TX FIFO has room */
494	while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) {
495		ep93xx_do_write(espi, t);
496		espi->fifo_level++;
497	}
498
499	if (espi->rx == t->len) {
500		msg->actual_length += t->len;
501		return 0;
502	}
503
504	return -EINPROGRESS;
505}
506
507/**
508 * ep93xx_spi_process_transfer() - processes one SPI transfer
509 * @espi: ep93xx SPI controller struct
510 * @msg: current message
511 * @t: transfer to process
512 *
513 * This function processes one SPI transfer given in @t. Function waits until
514 * transfer is complete (may sleep) and updates @msg->status based on whether
515 * transfer was succesfully processed or not.
516 */
517static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
518					struct spi_message *msg,
519					struct spi_transfer *t)
520{
521	struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi);
522
523	msg->state = t;
524
525	/*
526	 * Handle any transfer specific settings if needed. We use
527	 * temporary chip settings here and restore original later when
528	 * the transfer is finished.
529	 */
530	if (t->speed_hz || t->bits_per_word) {
531		struct ep93xx_spi_chip tmp_chip = *chip;
532
533		if (t->speed_hz) {
534			int err;
535
536			err = ep93xx_spi_calc_divisors(espi, &tmp_chip,
537						       t->speed_hz);
538			if (err) {
539				dev_err(&espi->pdev->dev,
540					"failed to adjust speed\n");
541				msg->status = err;
542				return;
543			}
544		}
545
546		if (t->bits_per_word)
547			tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word);
548
549		/*
550		 * Set up temporary new hw settings for this transfer.
551		 */
552		ep93xx_spi_chip_setup(espi, &tmp_chip);
553	}
554
555	espi->rx = 0;
556	espi->tx = 0;
557
558	/*
559	 * Now everything is set up for the current transfer. We prime the TX
560	 * FIFO, enable interrupts, and wait for the transfer to complete.
561	 */
562	if (ep93xx_spi_read_write(espi)) {
563		ep93xx_spi_enable_interrupts(espi);
564		wait_for_completion(&espi->wait);
565	}
566
567	/*
568	 * In case of error during transmit, we bail out from processing
569	 * the message.
570	 */
571	if (msg->status)
572		return;
573
574	/*
575	 * After this transfer is finished, perform any possible
576	 * post-transfer actions requested by the protocol driver.
577	 */
578	if (t->delay_usecs) {
579		set_current_state(TASK_UNINTERRUPTIBLE);
580		schedule_timeout(usecs_to_jiffies(t->delay_usecs));
581	}
582	if (t->cs_change) {
583		if (!list_is_last(&t->transfer_list, &msg->transfers)) {
584			/*
585			 * In case protocol driver is asking us to drop the
586			 * chipselect briefly, we let the scheduler to handle
587			 * any "delay" here.
588			 */
589			ep93xx_spi_cs_control(msg->spi, false);
590			cond_resched();
591			ep93xx_spi_cs_control(msg->spi, true);
592		}
593	}
594
595	if (t->speed_hz || t->bits_per_word)
596		ep93xx_spi_chip_setup(espi, chip);
597}
598
599/*
600 * ep93xx_spi_process_message() - process one SPI message
601 * @espi: ep93xx SPI controller struct
602 * @msg: message to process
603 *
604 * This function processes a single SPI message. We go through all transfers in
605 * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is
606 * asserted during the whole message (unless per transfer cs_change is set).
607 *
608 * @msg->status contains %0 in case of success or negative error code in case of
609 * failure.
610 */
611static void ep93xx_spi_process_message(struct ep93xx_spi *espi,
612				       struct spi_message *msg)
613{
614	unsigned long timeout;
615	struct spi_transfer *t;
616	int err;
617
618	/*
619	 * Enable the SPI controller and its clock.
620	 */
621	err = ep93xx_spi_enable(espi);
622	if (err) {
623		dev_err(&espi->pdev->dev, "failed to enable SPI controller\n");
624		msg->status = err;
625		return;
626	}
627
628	/*
629	 * Just to be sure: flush any data from RX FIFO.
630	 */
631	timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
632	while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) {
633		if (time_after(jiffies, timeout)) {
634			dev_warn(&espi->pdev->dev,
635				 "timeout while flushing RX FIFO\n");
636			msg->status = -ETIMEDOUT;
637			return;
638		}
639		ep93xx_spi_read_u16(espi, SSPDR);
640	}
641
642	/*
643	 * We explicitly handle FIFO level. This way we don't have to check TX
644	 * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
645	 */
646	espi->fifo_level = 0;
647
648	/*
649	 * Update SPI controller registers according to spi device and assert
650	 * the chipselect.
651	 */
652	ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi));
653	ep93xx_spi_cs_control(msg->spi, true);
654
655	list_for_each_entry(t, &msg->transfers, transfer_list) {
656		ep93xx_spi_process_transfer(espi, msg, t);
657		if (msg->status)
658			break;
659	}
660
661	/*
662	 * Now the whole message is transferred (or failed for some reason). We
663	 * deselect the device and disable the SPI controller.
664	 */
665	ep93xx_spi_cs_control(msg->spi, false);
666	ep93xx_spi_disable(espi);
667}
668
669#define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work))
670
671/**
672 * ep93xx_spi_work() - EP93xx SPI workqueue worker function
673 * @work: work struct
674 *
675 * Workqueue worker function. This function is called when there are new
676 * SPI messages to be processed. Message is taken out from the queue and then
677 * passed to ep93xx_spi_process_message().
678 *
679 * After message is transferred, protocol driver is notified by calling
680 * @msg->complete(). In case of error, @msg->status is set to negative error
681 * number, otherwise it contains zero (and @msg->actual_length is updated).
682 */
683static void ep93xx_spi_work(struct work_struct *work)
684{
685	struct ep93xx_spi *espi = work_to_espi(work);
686	struct spi_message *msg;
687
688	spin_lock_irq(&espi->lock);
689	if (!espi->running || espi->current_msg ||
690		list_empty(&espi->msg_queue)) {
691		spin_unlock_irq(&espi->lock);
692		return;
693	}
694	msg = list_first_entry(&espi->msg_queue, struct spi_message, queue);
695	list_del_init(&msg->queue);
696	espi->current_msg = msg;
697	spin_unlock_irq(&espi->lock);
698
699	ep93xx_spi_process_message(espi, msg);
700
701	/*
702	 * Update the current message and re-schedule ourselves if there are
703	 * more messages in the queue.
704	 */
705	spin_lock_irq(&espi->lock);
706	espi->current_msg = NULL;
707	if (espi->running && !list_empty(&espi->msg_queue))
708		queue_work(espi->wq, &espi->msg_work);
709	spin_unlock_irq(&espi->lock);
710
711	/* notify the protocol driver that we are done with this message */
712	msg->complete(msg->context);
713}
714
715static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
716{
717	struct ep93xx_spi *espi = dev_id;
718	u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR);
719
720	/*
721	 * If we got ROR (receive overrun) interrupt we know that something is
722	 * wrong. Just abort the message.
723	 */
724	if (unlikely(irq_status & SSPIIR_RORIS)) {
725		/* clear the overrun interrupt */
726		ep93xx_spi_write_u8(espi, SSPICR, 0);
727		dev_warn(&espi->pdev->dev,
728			 "receive overrun, aborting the message\n");
729		espi->current_msg->status = -EIO;
730	} else {
731		/*
732		 * Interrupt is either RX (RIS) or TX (TIS). For both cases we
733		 * simply execute next data transfer.
734		 */
735		if (ep93xx_spi_read_write(espi)) {
736			/*
737			 * In normal case, there still is some processing left
738			 * for current transfer. Let's wait for the next
739			 * interrupt then.
740			 */
741			return IRQ_HANDLED;
742		}
743	}
744
745	/*
746	 * Current transfer is finished, either with error or with success. In
747	 * any case we disable interrupts and notify the worker to handle
748	 * any post-processing of the message.
749	 */
750	ep93xx_spi_disable_interrupts(espi);
751	complete(&espi->wait);
752	return IRQ_HANDLED;
753}
754
755static int __init ep93xx_spi_probe(struct platform_device *pdev)
756{
757	struct spi_master *master;
758	struct ep93xx_spi_info *info;
759	struct ep93xx_spi *espi;
760	struct resource *res;
761	int error;
762
763	info = pdev->dev.platform_data;
764
765	master = spi_alloc_master(&pdev->dev, sizeof(*espi));
766	if (!master) {
767		dev_err(&pdev->dev, "failed to allocate spi master\n");
768		return -ENOMEM;
769	}
770
771	master->setup = ep93xx_spi_setup;
772	master->transfer = ep93xx_spi_transfer;
773	master->cleanup = ep93xx_spi_cleanup;
774	master->bus_num = pdev->id;
775	master->num_chipselect = info->num_chipselect;
776	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
777
778	platform_set_drvdata(pdev, master);
779
780	espi = spi_master_get_devdata(master);
781
782	espi->clk = clk_get(&pdev->dev, NULL);
783	if (IS_ERR(espi->clk)) {
784		dev_err(&pdev->dev, "unable to get spi clock\n");
785		error = PTR_ERR(espi->clk);
786		goto fail_release_master;
787	}
788
789	spin_lock_init(&espi->lock);
790	init_completion(&espi->wait);
791
792	/*
793	 * Calculate maximum and minimum supported clock rates
794	 * for the controller.
795	 */
796	espi->max_rate = clk_get_rate(espi->clk) / 2;
797	espi->min_rate = clk_get_rate(espi->clk) / (254 * 256);
798	espi->pdev = pdev;
799
800	espi->irq = platform_get_irq(pdev, 0);
801	if (espi->irq < 0) {
802		error = -EBUSY;
803		dev_err(&pdev->dev, "failed to get irq resources\n");
804		goto fail_put_clock;
805	}
806
807	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
808	if (!res) {
809		dev_err(&pdev->dev, "unable to get iomem resource\n");
810		error = -ENODEV;
811		goto fail_put_clock;
812	}
813
814	res = request_mem_region(res->start, resource_size(res), pdev->name);
815	if (!res) {
816		dev_err(&pdev->dev, "unable to request iomem resources\n");
817		error = -EBUSY;
818		goto fail_put_clock;
819	}
820
821	espi->regs_base = ioremap(res->start, resource_size(res));
822	if (!espi->regs_base) {
823		dev_err(&pdev->dev, "failed to map resources\n");
824		error = -ENODEV;
825		goto fail_free_mem;
826	}
827
828	error = request_irq(espi->irq, ep93xx_spi_interrupt, 0,
829			    "ep93xx-spi", espi);
830	if (error) {
831		dev_err(&pdev->dev, "failed to request irq\n");
832		goto fail_unmap_regs;
833	}
834
835	espi->wq = create_singlethread_workqueue("ep93xx_spid");
836	if (!espi->wq) {
837		dev_err(&pdev->dev, "unable to create workqueue\n");
838		goto fail_free_irq;
839	}
840	INIT_WORK(&espi->msg_work, ep93xx_spi_work);
841	INIT_LIST_HEAD(&espi->msg_queue);
842	espi->running = true;
843
844	/* make sure that the hardware is disabled */
845	ep93xx_spi_write_u8(espi, SSPCR1, 0);
846
847	error = spi_register_master(master);
848	if (error) {
849		dev_err(&pdev->dev, "failed to register SPI master\n");
850		goto fail_free_queue;
851	}
852
853	dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
854		 (unsigned long)res->start, espi->irq);
855
856	return 0;
857
858fail_free_queue:
859	destroy_workqueue(espi->wq);
860fail_free_irq:
861	free_irq(espi->irq, espi);
862fail_unmap_regs:
863	iounmap(espi->regs_base);
864fail_free_mem:
865	release_mem_region(res->start, resource_size(res));
866fail_put_clock:
867	clk_put(espi->clk);
868fail_release_master:
869	spi_master_put(master);
870	platform_set_drvdata(pdev, NULL);
871
872	return error;
873}
874
875static int __exit ep93xx_spi_remove(struct platform_device *pdev)
876{
877	struct spi_master *master = platform_get_drvdata(pdev);
878	struct ep93xx_spi *espi = spi_master_get_devdata(master);
879	struct resource *res;
880
881	spin_lock_irq(&espi->lock);
882	espi->running = false;
883	spin_unlock_irq(&espi->lock);
884
885	destroy_workqueue(espi->wq);
886
887	/*
888	 * Complete remaining messages with %-ESHUTDOWN status.
889	 */
890	spin_lock_irq(&espi->lock);
891	while (!list_empty(&espi->msg_queue)) {
892		struct spi_message *msg;
893
894		msg = list_first_entry(&espi->msg_queue,
895				       struct spi_message, queue);
896		list_del_init(&msg->queue);
897		msg->status = -ESHUTDOWN;
898		spin_unlock_irq(&espi->lock);
899		msg->complete(msg->context);
900		spin_lock_irq(&espi->lock);
901	}
902	spin_unlock_irq(&espi->lock);
903
904	free_irq(espi->irq, espi);
905	iounmap(espi->regs_base);
906	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
907	release_mem_region(res->start, resource_size(res));
908	clk_put(espi->clk);
909	platform_set_drvdata(pdev, NULL);
910
911	spi_unregister_master(master);
912	return 0;
913}
914
915static struct platform_driver ep93xx_spi_driver = {
916	.driver		= {
917		.name	= "ep93xx-spi",
918		.owner	= THIS_MODULE,
919	},
920	.remove		= __exit_p(ep93xx_spi_remove),
921};
922
923static int __init ep93xx_spi_init(void)
924{
925	return platform_driver_probe(&ep93xx_spi_driver, ep93xx_spi_probe);
926}
927module_init(ep93xx_spi_init);
928
929static void __exit ep93xx_spi_exit(void)
930{
931	platform_driver_unregister(&ep93xx_spi_driver);
932}
933module_exit(ep93xx_spi_exit);
934
935MODULE_DESCRIPTION("EP93xx SPI Controller driver");
936MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
937MODULE_LICENSE("GPL");
938MODULE_ALIAS("platform:ep93xx-spi");
939