1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Driver for Cirrus Logic EP93xx SPI controller.
4 *
5 * Copyright (C) 2010-2011 Mika Westerberg
6 *
7 * Explicit FIFO handling code was inspired by amba-pl022 driver.
8 *
9 * Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
10 *
11 * For more information about the SPI controller see documentation on Cirrus
12 * Logic web site:
13 *     https://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
14 */
15
16#include <linux/io.h>
17#include <linux/clk.h>
18#include <linux/err.h>
19#include <linux/delay.h>
20#include <linux/device.h>
21#include <linux/dmaengine.h>
22#include <linux/bitops.h>
23#include <linux/interrupt.h>
24#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/sched.h>
27#include <linux/scatterlist.h>
28#include <linux/spi/spi.h>
29
30#include <linux/platform_data/dma-ep93xx.h>
31#include <linux/platform_data/spi-ep93xx.h>
32
33#define SSPCR0			0x0000
34#define SSPCR0_SPO		BIT(6)
35#define SSPCR0_SPH		BIT(7)
36#define SSPCR0_SCR_SHIFT	8
37
38#define SSPCR1			0x0004
39#define SSPCR1_RIE		BIT(0)
40#define SSPCR1_TIE		BIT(1)
41#define SSPCR1_RORIE		BIT(2)
42#define SSPCR1_LBM		BIT(3)
43#define SSPCR1_SSE		BIT(4)
44#define SSPCR1_MS		BIT(5)
45#define SSPCR1_SOD		BIT(6)
46
47#define SSPDR			0x0008
48
49#define SSPSR			0x000c
50#define SSPSR_TFE		BIT(0)
51#define SSPSR_TNF		BIT(1)
52#define SSPSR_RNE		BIT(2)
53#define SSPSR_RFF		BIT(3)
54#define SSPSR_BSY		BIT(4)
55#define SSPCPSR			0x0010
56
57#define SSPIIR			0x0014
58#define SSPIIR_RIS		BIT(0)
59#define SSPIIR_TIS		BIT(1)
60#define SSPIIR_RORIS		BIT(2)
61#define SSPICR			SSPIIR
62
63/* timeout in milliseconds */
64#define SPI_TIMEOUT		5
65/* maximum depth of RX/TX FIFO */
66#define SPI_FIFO_SIZE		8
67
68/**
69 * struct ep93xx_spi - EP93xx SPI controller structure
70 * @clk: clock for the controller
71 * @mmio: pointer to ioremap()'d registers
72 * @sspdr_phys: physical address of the SSPDR register
73 * @tx: current byte in transfer to transmit
74 * @rx: current byte in transfer to receive
75 * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
76 *              frame decreases this level and sending one frame increases it.
77 * @dma_rx: RX DMA channel
78 * @dma_tx: TX DMA channel
79 * @dma_rx_data: RX parameters passed to the DMA engine
80 * @dma_tx_data: TX parameters passed to the DMA engine
81 * @rx_sgt: sg table for RX transfers
82 * @tx_sgt: sg table for TX transfers
83 * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
84 *            the client
85 */
86struct ep93xx_spi {
87	struct clk			*clk;
88	void __iomem			*mmio;
89	unsigned long			sspdr_phys;
90	size_t				tx;
91	size_t				rx;
92	size_t				fifo_level;
93	struct dma_chan			*dma_rx;
94	struct dma_chan			*dma_tx;
95	struct ep93xx_dma_data		dma_rx_data;
96	struct ep93xx_dma_data		dma_tx_data;
97	struct sg_table			rx_sgt;
98	struct sg_table			tx_sgt;
99	void				*zeropage;
100};
101
102/* converts bits per word to CR0.DSS value */
103#define bits_per_word_to_dss(bpw)	((bpw) - 1)
104
105/**
106 * ep93xx_spi_calc_divisors() - calculates SPI clock divisors
107 * @host: SPI host
108 * @rate: desired SPI output clock rate
109 * @div_cpsr: pointer to return the cpsr (pre-scaler) divider
110 * @div_scr: pointer to return the scr divider
111 */
112static int ep93xx_spi_calc_divisors(struct spi_controller *host,
113				    u32 rate, u8 *div_cpsr, u8 *div_scr)
114{
115	struct ep93xx_spi *espi = spi_controller_get_devdata(host);
116	unsigned long spi_clk_rate = clk_get_rate(espi->clk);
117	int cpsr, scr;
118
119	/*
120	 * Make sure that max value is between values supported by the
121	 * controller.
122	 */
123	rate = clamp(rate, host->min_speed_hz, host->max_speed_hz);
124
125	/*
126	 * Calculate divisors so that we can get speed according the
127	 * following formula:
128	 *	rate = spi_clock_rate / (cpsr * (1 + scr))
129	 *
130	 * cpsr must be even number and starts from 2, scr can be any number
131	 * between 0 and 255.
132	 */
133	for (cpsr = 2; cpsr <= 254; cpsr += 2) {
134		for (scr = 0; scr <= 255; scr++) {
135			if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
136				*div_scr = (u8)scr;
137				*div_cpsr = (u8)cpsr;
138				return 0;
139			}
140		}
141	}
142
143	return -EINVAL;
144}
145
146static int ep93xx_spi_chip_setup(struct spi_controller *host,
147				 struct spi_device *spi,
148				 struct spi_transfer *xfer)
149{
150	struct ep93xx_spi *espi = spi_controller_get_devdata(host);
151	u8 dss = bits_per_word_to_dss(xfer->bits_per_word);
152	u8 div_cpsr = 0;
153	u8 div_scr = 0;
154	u16 cr0;
155	int err;
156
157	err = ep93xx_spi_calc_divisors(host, xfer->speed_hz,
158				       &div_cpsr, &div_scr);
159	if (err)
160		return err;
161
162	cr0 = div_scr << SSPCR0_SCR_SHIFT;
163	if (spi->mode & SPI_CPOL)
164		cr0 |= SSPCR0_SPO;
165	if (spi->mode & SPI_CPHA)
166		cr0 |= SSPCR0_SPH;
167	cr0 |= dss;
168
169	dev_dbg(&host->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
170		spi->mode, div_cpsr, div_scr, dss);
171	dev_dbg(&host->dev, "setup: cr0 %#x\n", cr0);
172
173	writel(div_cpsr, espi->mmio + SSPCPSR);
174	writel(cr0, espi->mmio + SSPCR0);
175
176	return 0;
177}
178
179static void ep93xx_do_write(struct spi_controller *host)
180{
181	struct ep93xx_spi *espi = spi_controller_get_devdata(host);
182	struct spi_transfer *xfer = host->cur_msg->state;
183	u32 val = 0;
184
185	if (xfer->bits_per_word > 8) {
186		if (xfer->tx_buf)
187			val = ((u16 *)xfer->tx_buf)[espi->tx];
188		espi->tx += 2;
189	} else {
190		if (xfer->tx_buf)
191			val = ((u8 *)xfer->tx_buf)[espi->tx];
192		espi->tx += 1;
193	}
194	writel(val, espi->mmio + SSPDR);
195}
196
197static void ep93xx_do_read(struct spi_controller *host)
198{
199	struct ep93xx_spi *espi = spi_controller_get_devdata(host);
200	struct spi_transfer *xfer = host->cur_msg->state;
201	u32 val;
202
203	val = readl(espi->mmio + SSPDR);
204	if (xfer->bits_per_word > 8) {
205		if (xfer->rx_buf)
206			((u16 *)xfer->rx_buf)[espi->rx] = val;
207		espi->rx += 2;
208	} else {
209		if (xfer->rx_buf)
210			((u8 *)xfer->rx_buf)[espi->rx] = val;
211		espi->rx += 1;
212	}
213}
214
215/**
216 * ep93xx_spi_read_write() - perform next RX/TX transfer
217 * @host: SPI host
218 *
219 * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
220 * called several times, the whole transfer will be completed. Returns
221 * %-EINPROGRESS when current transfer was not yet completed otherwise %0.
222 *
223 * When this function is finished, RX FIFO should be empty and TX FIFO should be
224 * full.
225 */
226static int ep93xx_spi_read_write(struct spi_controller *host)
227{
228	struct ep93xx_spi *espi = spi_controller_get_devdata(host);
229	struct spi_transfer *xfer = host->cur_msg->state;
230
231	/* read as long as RX FIFO has frames in it */
232	while ((readl(espi->mmio + SSPSR) & SSPSR_RNE)) {
233		ep93xx_do_read(host);
234		espi->fifo_level--;
235	}
236
237	/* write as long as TX FIFO has room */
238	while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < xfer->len) {
239		ep93xx_do_write(host);
240		espi->fifo_level++;
241	}
242
243	if (espi->rx == xfer->len)
244		return 0;
245
246	return -EINPROGRESS;
247}
248
249static enum dma_transfer_direction
250ep93xx_dma_data_to_trans_dir(enum dma_data_direction dir)
251{
252	switch (dir) {
253	case DMA_TO_DEVICE:
254		return DMA_MEM_TO_DEV;
255	case DMA_FROM_DEVICE:
256		return DMA_DEV_TO_MEM;
257	default:
258		return DMA_TRANS_NONE;
259	}
260}
261
262/**
263 * ep93xx_spi_dma_prepare() - prepares a DMA transfer
264 * @host: SPI host
265 * @dir: DMA transfer direction
266 *
267 * Function configures the DMA, maps the buffer and prepares the DMA
268 * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
269 * in case of failure.
270 */
271static struct dma_async_tx_descriptor *
272ep93xx_spi_dma_prepare(struct spi_controller *host,
273		       enum dma_data_direction dir)
274{
275	struct ep93xx_spi *espi = spi_controller_get_devdata(host);
276	struct spi_transfer *xfer = host->cur_msg->state;
277	struct dma_async_tx_descriptor *txd;
278	enum dma_slave_buswidth buswidth;
279	struct dma_slave_config conf;
280	struct scatterlist *sg;
281	struct sg_table *sgt;
282	struct dma_chan *chan;
283	const void *buf, *pbuf;
284	size_t len = xfer->len;
285	int i, ret, nents;
286
287	if (xfer->bits_per_word > 8)
288		buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
289	else
290		buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
291
292	memset(&conf, 0, sizeof(conf));
293	conf.direction = ep93xx_dma_data_to_trans_dir(dir);
294
295	if (dir == DMA_FROM_DEVICE) {
296		chan = espi->dma_rx;
297		buf = xfer->rx_buf;
298		sgt = &espi->rx_sgt;
299
300		conf.src_addr = espi->sspdr_phys;
301		conf.src_addr_width = buswidth;
302	} else {
303		chan = espi->dma_tx;
304		buf = xfer->tx_buf;
305		sgt = &espi->tx_sgt;
306
307		conf.dst_addr = espi->sspdr_phys;
308		conf.dst_addr_width = buswidth;
309	}
310
311	ret = dmaengine_slave_config(chan, &conf);
312	if (ret)
313		return ERR_PTR(ret);
314
315	/*
316	 * We need to split the transfer into PAGE_SIZE'd chunks. This is
317	 * because we are using @espi->zeropage to provide a zero RX buffer
318	 * for the TX transfers and we have only allocated one page for that.
319	 *
320	 * For performance reasons we allocate a new sg_table only when
321	 * needed. Otherwise we will re-use the current one. Eventually the
322	 * last sg_table is released in ep93xx_spi_release_dma().
323	 */
324
325	nents = DIV_ROUND_UP(len, PAGE_SIZE);
326	if (nents != sgt->nents) {
327		sg_free_table(sgt);
328
329		ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
330		if (ret)
331			return ERR_PTR(ret);
332	}
333
334	pbuf = buf;
335	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
336		size_t bytes = min_t(size_t, len, PAGE_SIZE);
337
338		if (buf) {
339			sg_set_page(sg, virt_to_page(pbuf), bytes,
340				    offset_in_page(pbuf));
341		} else {
342			sg_set_page(sg, virt_to_page(espi->zeropage),
343				    bytes, 0);
344		}
345
346		pbuf += bytes;
347		len -= bytes;
348	}
349
350	if (WARN_ON(len)) {
351		dev_warn(&host->dev, "len = %zu expected 0!\n", len);
352		return ERR_PTR(-EINVAL);
353	}
354
355	nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
356	if (!nents)
357		return ERR_PTR(-ENOMEM);
358
359	txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, conf.direction,
360				      DMA_CTRL_ACK);
361	if (!txd) {
362		dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
363		return ERR_PTR(-ENOMEM);
364	}
365	return txd;
366}
367
368/**
369 * ep93xx_spi_dma_finish() - finishes with a DMA transfer
370 * @host: SPI host
371 * @dir: DMA transfer direction
372 *
373 * Function finishes with the DMA transfer. After this, the DMA buffer is
374 * unmapped.
375 */
376static void ep93xx_spi_dma_finish(struct spi_controller *host,
377				  enum dma_data_direction dir)
378{
379	struct ep93xx_spi *espi = spi_controller_get_devdata(host);
380	struct dma_chan *chan;
381	struct sg_table *sgt;
382
383	if (dir == DMA_FROM_DEVICE) {
384		chan = espi->dma_rx;
385		sgt = &espi->rx_sgt;
386	} else {
387		chan = espi->dma_tx;
388		sgt = &espi->tx_sgt;
389	}
390
391	dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
392}
393
394static void ep93xx_spi_dma_callback(void *callback_param)
395{
396	struct spi_controller *host = callback_param;
397
398	ep93xx_spi_dma_finish(host, DMA_TO_DEVICE);
399	ep93xx_spi_dma_finish(host, DMA_FROM_DEVICE);
400
401	spi_finalize_current_transfer(host);
402}
403
404static int ep93xx_spi_dma_transfer(struct spi_controller *host)
405{
406	struct ep93xx_spi *espi = spi_controller_get_devdata(host);
407	struct dma_async_tx_descriptor *rxd, *txd;
408
409	rxd = ep93xx_spi_dma_prepare(host, DMA_FROM_DEVICE);
410	if (IS_ERR(rxd)) {
411		dev_err(&host->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
412		return PTR_ERR(rxd);
413	}
414
415	txd = ep93xx_spi_dma_prepare(host, DMA_TO_DEVICE);
416	if (IS_ERR(txd)) {
417		ep93xx_spi_dma_finish(host, DMA_FROM_DEVICE);
418		dev_err(&host->dev, "DMA TX failed: %ld\n", PTR_ERR(txd));
419		return PTR_ERR(txd);
420	}
421
422	/* We are ready when RX is done */
423	rxd->callback = ep93xx_spi_dma_callback;
424	rxd->callback_param = host;
425
426	/* Now submit both descriptors and start DMA */
427	dmaengine_submit(rxd);
428	dmaengine_submit(txd);
429
430	dma_async_issue_pending(espi->dma_rx);
431	dma_async_issue_pending(espi->dma_tx);
432
433	/* signal that we need to wait for completion */
434	return 1;
435}
436
437static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
438{
439	struct spi_controller *host = dev_id;
440	struct ep93xx_spi *espi = spi_controller_get_devdata(host);
441	u32 val;
442
443	/*
444	 * If we got ROR (receive overrun) interrupt we know that something is
445	 * wrong. Just abort the message.
446	 */
447	if (readl(espi->mmio + SSPIIR) & SSPIIR_RORIS) {
448		/* clear the overrun interrupt */
449		writel(0, espi->mmio + SSPICR);
450		dev_warn(&host->dev,
451			 "receive overrun, aborting the message\n");
452		host->cur_msg->status = -EIO;
453	} else {
454		/*
455		 * Interrupt is either RX (RIS) or TX (TIS). For both cases we
456		 * simply execute next data transfer.
457		 */
458		if (ep93xx_spi_read_write(host)) {
459			/*
460			 * In normal case, there still is some processing left
461			 * for current transfer. Let's wait for the next
462			 * interrupt then.
463			 */
464			return IRQ_HANDLED;
465		}
466	}
467
468	/*
469	 * Current transfer is finished, either with error or with success. In
470	 * any case we disable interrupts and notify the worker to handle
471	 * any post-processing of the message.
472	 */
473	val = readl(espi->mmio + SSPCR1);
474	val &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
475	writel(val, espi->mmio + SSPCR1);
476
477	spi_finalize_current_transfer(host);
478
479	return IRQ_HANDLED;
480}
481
482static int ep93xx_spi_transfer_one(struct spi_controller *host,
483				   struct spi_device *spi,
484				   struct spi_transfer *xfer)
485{
486	struct ep93xx_spi *espi = spi_controller_get_devdata(host);
487	u32 val;
488	int ret;
489
490	ret = ep93xx_spi_chip_setup(host, spi, xfer);
491	if (ret) {
492		dev_err(&host->dev, "failed to setup chip for transfer\n");
493		return ret;
494	}
495
496	host->cur_msg->state = xfer;
497	espi->rx = 0;
498	espi->tx = 0;
499
500	/*
501	 * There is no point of setting up DMA for the transfers which will
502	 * fit into the FIFO and can be transferred with a single interrupt.
503	 * So in these cases we will be using PIO and don't bother for DMA.
504	 */
505	if (espi->dma_rx && xfer->len > SPI_FIFO_SIZE)
506		return ep93xx_spi_dma_transfer(host);
507
508	/* Using PIO so prime the TX FIFO and enable interrupts */
509	ep93xx_spi_read_write(host);
510
511	val = readl(espi->mmio + SSPCR1);
512	val |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
513	writel(val, espi->mmio + SSPCR1);
514
515	/* signal that we need to wait for completion */
516	return 1;
517}
518
519static int ep93xx_spi_prepare_message(struct spi_controller *host,
520				      struct spi_message *msg)
521{
522	struct ep93xx_spi *espi = spi_controller_get_devdata(host);
523	unsigned long timeout;
524
525	/*
526	 * Just to be sure: flush any data from RX FIFO.
527	 */
528	timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
529	while (readl(espi->mmio + SSPSR) & SSPSR_RNE) {
530		if (time_after(jiffies, timeout)) {
531			dev_warn(&host->dev,
532				 "timeout while flushing RX FIFO\n");
533			return -ETIMEDOUT;
534		}
535		readl(espi->mmio + SSPDR);
536	}
537
538	/*
539	 * We explicitly handle FIFO level. This way we don't have to check TX
540	 * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
541	 */
542	espi->fifo_level = 0;
543
544	return 0;
545}
546
547static int ep93xx_spi_prepare_hardware(struct spi_controller *host)
548{
549	struct ep93xx_spi *espi = spi_controller_get_devdata(host);
550	u32 val;
551	int ret;
552
553	ret = clk_prepare_enable(espi->clk);
554	if (ret)
555		return ret;
556
557	val = readl(espi->mmio + SSPCR1);
558	val |= SSPCR1_SSE;
559	writel(val, espi->mmio + SSPCR1);
560
561	return 0;
562}
563
564static int ep93xx_spi_unprepare_hardware(struct spi_controller *host)
565{
566	struct ep93xx_spi *espi = spi_controller_get_devdata(host);
567	u32 val;
568
569	val = readl(espi->mmio + SSPCR1);
570	val &= ~SSPCR1_SSE;
571	writel(val, espi->mmio + SSPCR1);
572
573	clk_disable_unprepare(espi->clk);
574
575	return 0;
576}
577
578static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
579{
580	if (ep93xx_dma_chan_is_m2p(chan))
581		return false;
582
583	chan->private = filter_param;
584	return true;
585}
586
587static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
588{
589	dma_cap_mask_t mask;
590	int ret;
591
592	espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
593	if (!espi->zeropage)
594		return -ENOMEM;
595
596	dma_cap_zero(mask);
597	dma_cap_set(DMA_SLAVE, mask);
598
599	espi->dma_rx_data.port = EP93XX_DMA_SSP;
600	espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
601	espi->dma_rx_data.name = "ep93xx-spi-rx";
602
603	espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
604					   &espi->dma_rx_data);
605	if (!espi->dma_rx) {
606		ret = -ENODEV;
607		goto fail_free_page;
608	}
609
610	espi->dma_tx_data.port = EP93XX_DMA_SSP;
611	espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
612	espi->dma_tx_data.name = "ep93xx-spi-tx";
613
614	espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
615					   &espi->dma_tx_data);
616	if (!espi->dma_tx) {
617		ret = -ENODEV;
618		goto fail_release_rx;
619	}
620
621	return 0;
622
623fail_release_rx:
624	dma_release_channel(espi->dma_rx);
625	espi->dma_rx = NULL;
626fail_free_page:
627	free_page((unsigned long)espi->zeropage);
628
629	return ret;
630}
631
632static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
633{
634	if (espi->dma_rx) {
635		dma_release_channel(espi->dma_rx);
636		sg_free_table(&espi->rx_sgt);
637	}
638	if (espi->dma_tx) {
639		dma_release_channel(espi->dma_tx);
640		sg_free_table(&espi->tx_sgt);
641	}
642
643	if (espi->zeropage)
644		free_page((unsigned long)espi->zeropage);
645}
646
647static int ep93xx_spi_probe(struct platform_device *pdev)
648{
649	struct spi_controller *host;
650	struct ep93xx_spi_info *info;
651	struct ep93xx_spi *espi;
652	struct resource *res;
653	int irq;
654	int error;
655
656	info = dev_get_platdata(&pdev->dev);
657	if (!info) {
658		dev_err(&pdev->dev, "missing platform data\n");
659		return -EINVAL;
660	}
661
662	irq = platform_get_irq(pdev, 0);
663	if (irq < 0)
664		return irq;
665
666	host = spi_alloc_host(&pdev->dev, sizeof(*espi));
667	if (!host)
668		return -ENOMEM;
669
670	host->use_gpio_descriptors = true;
671	host->prepare_transfer_hardware = ep93xx_spi_prepare_hardware;
672	host->unprepare_transfer_hardware = ep93xx_spi_unprepare_hardware;
673	host->prepare_message = ep93xx_spi_prepare_message;
674	host->transfer_one = ep93xx_spi_transfer_one;
675	host->bus_num = pdev->id;
676	host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
677	host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
678	/*
679	 * The SPI core will count the number of GPIO descriptors to figure
680	 * out the number of chip selects available on the platform.
681	 */
682	host->num_chipselect = 0;
683
684	platform_set_drvdata(pdev, host);
685
686	espi = spi_controller_get_devdata(host);
687
688	espi->clk = devm_clk_get(&pdev->dev, NULL);
689	if (IS_ERR(espi->clk)) {
690		dev_err(&pdev->dev, "unable to get spi clock\n");
691		error = PTR_ERR(espi->clk);
692		goto fail_release_host;
693	}
694
695	/*
696	 * Calculate maximum and minimum supported clock rates
697	 * for the controller.
698	 */
699	host->max_speed_hz = clk_get_rate(espi->clk) / 2;
700	host->min_speed_hz = clk_get_rate(espi->clk) / (254 * 256);
701
702	espi->mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
703	if (IS_ERR(espi->mmio)) {
704		error = PTR_ERR(espi->mmio);
705		goto fail_release_host;
706	}
707	espi->sspdr_phys = res->start + SSPDR;
708
709	error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt,
710				0, "ep93xx-spi", host);
711	if (error) {
712		dev_err(&pdev->dev, "failed to request irq\n");
713		goto fail_release_host;
714	}
715
716	if (info->use_dma && ep93xx_spi_setup_dma(espi))
717		dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
718
719	/* make sure that the hardware is disabled */
720	writel(0, espi->mmio + SSPCR1);
721
722	error = devm_spi_register_controller(&pdev->dev, host);
723	if (error) {
724		dev_err(&pdev->dev, "failed to register SPI host\n");
725		goto fail_free_dma;
726	}
727
728	dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
729		 (unsigned long)res->start, irq);
730
731	return 0;
732
733fail_free_dma:
734	ep93xx_spi_release_dma(espi);
735fail_release_host:
736	spi_controller_put(host);
737
738	return error;
739}
740
741static void ep93xx_spi_remove(struct platform_device *pdev)
742{
743	struct spi_controller *host = platform_get_drvdata(pdev);
744	struct ep93xx_spi *espi = spi_controller_get_devdata(host);
745
746	ep93xx_spi_release_dma(espi);
747}
748
749static struct platform_driver ep93xx_spi_driver = {
750	.driver		= {
751		.name	= "ep93xx-spi",
752	},
753	.probe		= ep93xx_spi_probe,
754	.remove_new	= ep93xx_spi_remove,
755};
756module_platform_driver(ep93xx_spi_driver);
757
758MODULE_DESCRIPTION("EP93xx SPI Controller driver");
759MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
760MODULE_LICENSE("GPL");
761MODULE_ALIAS("platform:ep93xx-spi");
762