1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
4 */
5
6#include <linux/clk.h>
7#include <linux/delay.h>
8#include <linux/err.h>
9#include <linux/interconnect.h>
10#include <linux/interrupt.h>
11#include <linux/io.h>
12#include <linux/list.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/platform_device.h>
16#include <linux/pm_opp.h>
17#include <linux/pm_runtime.h>
18#include <linux/spi/spi.h>
19#include <linux/dmaengine.h>
20#include <linux/dma-mapping.h>
21
22#define QUP_CONFIG			0x0000
23#define QUP_STATE			0x0004
24#define QUP_IO_M_MODES			0x0008
25#define QUP_SW_RESET			0x000c
26#define QUP_OPERATIONAL			0x0018
27#define QUP_ERROR_FLAGS			0x001c
28#define QUP_ERROR_FLAGS_EN		0x0020
29#define QUP_OPERATIONAL_MASK		0x0028
30#define QUP_HW_VERSION			0x0030
31#define QUP_MX_OUTPUT_CNT		0x0100
32#define QUP_OUTPUT_FIFO			0x0110
33#define QUP_MX_WRITE_CNT		0x0150
34#define QUP_MX_INPUT_CNT		0x0200
35#define QUP_MX_READ_CNT			0x0208
36#define QUP_INPUT_FIFO			0x0218
37
38#define SPI_CONFIG			0x0300
39#define SPI_IO_CONTROL			0x0304
40#define SPI_ERROR_FLAGS			0x0308
41#define SPI_ERROR_FLAGS_EN		0x030c
42
43/* QUP_CONFIG fields */
44#define QUP_CONFIG_SPI_MODE		(1 << 8)
45#define QUP_CONFIG_CLOCK_AUTO_GATE	BIT(13)
46#define QUP_CONFIG_NO_INPUT		BIT(7)
47#define QUP_CONFIG_NO_OUTPUT		BIT(6)
48#define QUP_CONFIG_N			0x001f
49
50/* QUP_STATE fields */
51#define QUP_STATE_VALID			BIT(2)
52#define QUP_STATE_RESET			0
53#define QUP_STATE_RUN			1
54#define QUP_STATE_PAUSE			3
55#define QUP_STATE_MASK			3
56#define QUP_STATE_CLEAR			2
57
58#define QUP_HW_VERSION_2_1_1		0x20010001
59
60/* QUP_IO_M_MODES fields */
61#define QUP_IO_M_PACK_EN		BIT(15)
62#define QUP_IO_M_UNPACK_EN		BIT(14)
63#define QUP_IO_M_INPUT_MODE_MASK_SHIFT	12
64#define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT	10
65#define QUP_IO_M_INPUT_MODE_MASK	(3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
66#define QUP_IO_M_OUTPUT_MODE_MASK	(3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
67
68#define QUP_IO_M_OUTPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 0)) >> 0)
69#define QUP_IO_M_OUTPUT_FIFO_SIZE(x)	(((x) & (0x07 << 2)) >> 2)
70#define QUP_IO_M_INPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 5)) >> 5)
71#define QUP_IO_M_INPUT_FIFO_SIZE(x)	(((x) & (0x07 << 7)) >> 7)
72
73#define QUP_IO_M_MODE_FIFO		0
74#define QUP_IO_M_MODE_BLOCK		1
75#define QUP_IO_M_MODE_DMOV		2
76#define QUP_IO_M_MODE_BAM		3
77
78/* QUP_OPERATIONAL fields */
79#define QUP_OP_IN_BLOCK_READ_REQ	BIT(13)
80#define QUP_OP_OUT_BLOCK_WRITE_REQ	BIT(12)
81#define QUP_OP_MAX_INPUT_DONE_FLAG	BIT(11)
82#define QUP_OP_MAX_OUTPUT_DONE_FLAG	BIT(10)
83#define QUP_OP_IN_SERVICE_FLAG		BIT(9)
84#define QUP_OP_OUT_SERVICE_FLAG		BIT(8)
85#define QUP_OP_IN_FIFO_FULL		BIT(7)
86#define QUP_OP_OUT_FIFO_FULL		BIT(6)
87#define QUP_OP_IN_FIFO_NOT_EMPTY	BIT(5)
88#define QUP_OP_OUT_FIFO_NOT_EMPTY	BIT(4)
89
90/* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
91#define QUP_ERROR_OUTPUT_OVER_RUN	BIT(5)
92#define QUP_ERROR_INPUT_UNDER_RUN	BIT(4)
93#define QUP_ERROR_OUTPUT_UNDER_RUN	BIT(3)
94#define QUP_ERROR_INPUT_OVER_RUN	BIT(2)
95
96/* SPI_CONFIG fields */
97#define SPI_CONFIG_HS_MODE		BIT(10)
98#define SPI_CONFIG_INPUT_FIRST		BIT(9)
99#define SPI_CONFIG_LOOPBACK		BIT(8)
100
101/* SPI_IO_CONTROL fields */
102#define SPI_IO_C_FORCE_CS		BIT(11)
103#define SPI_IO_C_CLK_IDLE_HIGH		BIT(10)
104#define SPI_IO_C_MX_CS_MODE		BIT(8)
105#define SPI_IO_C_CS_N_POLARITY_0	BIT(4)
106#define SPI_IO_C_CS_SELECT(x)		(((x) & 3) << 2)
107#define SPI_IO_C_CS_SELECT_MASK		0x000c
108#define SPI_IO_C_TRISTATE_CS		BIT(1)
109#define SPI_IO_C_NO_TRI_STATE		BIT(0)
110
111/* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
112#define SPI_ERROR_CLK_OVER_RUN		BIT(1)
113#define SPI_ERROR_CLK_UNDER_RUN		BIT(0)
114
115#define SPI_NUM_CHIPSELECTS		4
116
117#define SPI_MAX_XFER			(SZ_64K - 64)
118
119/* high speed mode is when bus rate is greater then 26MHz */
120#define SPI_HS_MIN_RATE			26000000
121#define SPI_MAX_RATE			50000000
122
123#define SPI_DELAY_THRESHOLD		1
124#define SPI_DELAY_RETRY			10
125
126#define SPI_BUS_WIDTH			8
127
128struct spi_qup {
129	void __iomem		*base;
130	struct device		*dev;
131	struct clk		*cclk;	/* core clock */
132	struct clk		*iclk;	/* interface clock */
133	struct icc_path		*icc_path; /* interconnect to RAM */
134	int			irq;
135	spinlock_t		lock;
136
137	int			in_fifo_sz;
138	int			out_fifo_sz;
139	int			in_blk_sz;
140	int			out_blk_sz;
141
142	struct spi_transfer	*xfer;
143	struct completion	done;
144	int			error;
145	int			w_size;	/* bytes per SPI word */
146	int			n_words;
147	int			tx_bytes;
148	int			rx_bytes;
149	const u8		*tx_buf;
150	u8			*rx_buf;
151	int			qup_v1;
152
153	int			mode;
154	struct dma_slave_config	rx_conf;
155	struct dma_slave_config	tx_conf;
156
157	u32			bw_speed_hz;
158};
159
160static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer);
161
162static inline bool spi_qup_is_flag_set(struct spi_qup *controller, u32 flag)
163{
164	u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL);
165
166	return (opflag & flag) != 0;
167}
168
169static inline bool spi_qup_is_dma_xfer(int mode)
170{
171	if (mode == QUP_IO_M_MODE_DMOV || mode == QUP_IO_M_MODE_BAM)
172		return true;
173
174	return false;
175}
176
177/* get's the transaction size length */
178static inline unsigned int spi_qup_len(struct spi_qup *controller)
179{
180	return controller->n_words * controller->w_size;
181}
182
183static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
184{
185	u32 opstate = readl_relaxed(controller->base + QUP_STATE);
186
187	return opstate & QUP_STATE_VALID;
188}
189
190static int spi_qup_vote_bw(struct spi_qup *controller, u32 speed_hz)
191{
192	u32 needed_peak_bw;
193	int ret;
194
195	if (controller->bw_speed_hz == speed_hz)
196		return 0;
197
198	needed_peak_bw = Bps_to_icc(speed_hz * SPI_BUS_WIDTH);
199	ret = icc_set_bw(controller->icc_path, 0, needed_peak_bw);
200	if (ret)
201		return ret;
202
203	controller->bw_speed_hz = speed_hz;
204	return 0;
205}
206
207static int spi_qup_set_state(struct spi_qup *controller, u32 state)
208{
209	unsigned long loop;
210	u32 cur_state;
211
212	loop = 0;
213	while (!spi_qup_is_valid_state(controller)) {
214
215		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
216
217		if (++loop > SPI_DELAY_RETRY)
218			return -EIO;
219	}
220
221	if (loop)
222		dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
223			loop, state);
224
225	cur_state = readl_relaxed(controller->base + QUP_STATE);
226	/*
227	 * Per spec: for PAUSE_STATE to RESET_STATE, two writes
228	 * of (b10) are required
229	 */
230	if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
231	    (state == QUP_STATE_RESET)) {
232		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
233		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
234	} else {
235		cur_state &= ~QUP_STATE_MASK;
236		cur_state |= state;
237		writel_relaxed(cur_state, controller->base + QUP_STATE);
238	}
239
240	loop = 0;
241	while (!spi_qup_is_valid_state(controller)) {
242
243		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
244
245		if (++loop > SPI_DELAY_RETRY)
246			return -EIO;
247	}
248
249	return 0;
250}
251
252static void spi_qup_read_from_fifo(struct spi_qup *controller, u32 num_words)
253{
254	u8 *rx_buf = controller->rx_buf;
255	int i, shift, num_bytes;
256	u32 word;
257
258	for (; num_words; num_words--) {
259
260		word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
261
262		num_bytes = min_t(int, spi_qup_len(controller) -
263				       controller->rx_bytes,
264				       controller->w_size);
265
266		if (!rx_buf) {
267			controller->rx_bytes += num_bytes;
268			continue;
269		}
270
271		for (i = 0; i < num_bytes; i++, controller->rx_bytes++) {
272			/*
273			 * The data format depends on bytes per SPI word:
274			 *  4 bytes: 0x12345678
275			 *  2 bytes: 0x00001234
276			 *  1 byte : 0x00000012
277			 */
278			shift = BITS_PER_BYTE;
279			shift *= (controller->w_size - i - 1);
280			rx_buf[controller->rx_bytes] = word >> shift;
281		}
282	}
283}
284
285static void spi_qup_read(struct spi_qup *controller, u32 *opflags)
286{
287	u32 remainder, words_per_block, num_words;
288	bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
289
290	remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->rx_bytes,
291				 controller->w_size);
292	words_per_block = controller->in_blk_sz >> 2;
293
294	do {
295		/* ACK by clearing service flag */
296		writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
297			       controller->base + QUP_OPERATIONAL);
298
299		if (!remainder)
300			goto exit;
301
302		if (is_block_mode) {
303			num_words = (remainder > words_per_block) ?
304					words_per_block : remainder;
305		} else {
306			if (!spi_qup_is_flag_set(controller,
307						 QUP_OP_IN_FIFO_NOT_EMPTY))
308				break;
309
310			num_words = 1;
311		}
312
313		/* read up to the maximum transfer size available */
314		spi_qup_read_from_fifo(controller, num_words);
315
316		remainder -= num_words;
317
318		/* if block mode, check to see if next block is available */
319		if (is_block_mode && !spi_qup_is_flag_set(controller,
320					QUP_OP_IN_BLOCK_READ_REQ))
321			break;
322
323	} while (remainder);
324
325	/*
326	 * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block
327	 * reads, it has to be cleared again at the very end.  However, be sure
328	 * to refresh opflags value because MAX_INPUT_DONE_FLAG may now be
329	 * present and this is used to determine if transaction is complete
330	 */
331exit:
332	if (!remainder) {
333		*opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
334		if (is_block_mode && *opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
335			writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
336				       controller->base + QUP_OPERATIONAL);
337	}
338}
339
340static void spi_qup_write_to_fifo(struct spi_qup *controller, u32 num_words)
341{
342	const u8 *tx_buf = controller->tx_buf;
343	int i, num_bytes;
344	u32 word, data;
345
346	for (; num_words; num_words--) {
347		word = 0;
348
349		num_bytes = min_t(int, spi_qup_len(controller) -
350				       controller->tx_bytes,
351				       controller->w_size);
352		if (tx_buf)
353			for (i = 0; i < num_bytes; i++) {
354				data = tx_buf[controller->tx_bytes + i];
355				word |= data << (BITS_PER_BYTE * (3 - i));
356			}
357
358		controller->tx_bytes += num_bytes;
359
360		writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
361	}
362}
363
364static void spi_qup_dma_done(void *data)
365{
366	struct spi_qup *qup = data;
367
368	complete(&qup->done);
369}
370
371static void spi_qup_write(struct spi_qup *controller)
372{
373	bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
374	u32 remainder, words_per_block, num_words;
375
376	remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->tx_bytes,
377				 controller->w_size);
378	words_per_block = controller->out_blk_sz >> 2;
379
380	do {
381		/* ACK by clearing service flag */
382		writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
383			       controller->base + QUP_OPERATIONAL);
384
385		/* make sure the interrupt is valid */
386		if (!remainder)
387			return;
388
389		if (is_block_mode) {
390			num_words = (remainder > words_per_block) ?
391				words_per_block : remainder;
392		} else {
393			if (spi_qup_is_flag_set(controller,
394						QUP_OP_OUT_FIFO_FULL))
395				break;
396
397			num_words = 1;
398		}
399
400		spi_qup_write_to_fifo(controller, num_words);
401
402		remainder -= num_words;
403
404		/* if block mode, check to see if next block is available */
405		if (is_block_mode && !spi_qup_is_flag_set(controller,
406					QUP_OP_OUT_BLOCK_WRITE_REQ))
407			break;
408
409	} while (remainder);
410}
411
412static int spi_qup_prep_sg(struct spi_controller *host, struct scatterlist *sgl,
413			   unsigned int nents, enum dma_transfer_direction dir,
414			   dma_async_tx_callback callback)
415{
416	struct spi_qup *qup = spi_controller_get_devdata(host);
417	unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
418	struct dma_async_tx_descriptor *desc;
419	struct dma_chan *chan;
420	dma_cookie_t cookie;
421
422	if (dir == DMA_MEM_TO_DEV)
423		chan = host->dma_tx;
424	else
425		chan = host->dma_rx;
426
427	desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
428	if (IS_ERR_OR_NULL(desc))
429		return desc ? PTR_ERR(desc) : -EINVAL;
430
431	desc->callback = callback;
432	desc->callback_param = qup;
433
434	cookie = dmaengine_submit(desc);
435
436	return dma_submit_error(cookie);
437}
438
439static void spi_qup_dma_terminate(struct spi_controller *host,
440				  struct spi_transfer *xfer)
441{
442	if (xfer->tx_buf)
443		dmaengine_terminate_all(host->dma_tx);
444	if (xfer->rx_buf)
445		dmaengine_terminate_all(host->dma_rx);
446}
447
448static u32 spi_qup_sgl_get_nents_len(struct scatterlist *sgl, u32 max,
449				     u32 *nents)
450{
451	struct scatterlist *sg;
452	u32 total = 0;
453
454	for (sg = sgl; sg; sg = sg_next(sg)) {
455		unsigned int len = sg_dma_len(sg);
456
457		/* check for overflow as well as limit */
458		if (((total + len) < total) || ((total + len) > max))
459			break;
460
461		total += len;
462		(*nents)++;
463	}
464
465	return total;
466}
467
468static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
469			  unsigned long timeout)
470{
471	dma_async_tx_callback rx_done = NULL, tx_done = NULL;
472	struct spi_controller *host = spi->controller;
473	struct spi_qup *qup = spi_controller_get_devdata(host);
474	struct scatterlist *tx_sgl, *rx_sgl;
475	int ret;
476
477	ret = spi_qup_vote_bw(qup, xfer->speed_hz);
478	if (ret) {
479		dev_err(qup->dev, "fail to vote for ICC bandwidth: %d\n", ret);
480		return -EIO;
481	}
482
483	if (xfer->rx_buf)
484		rx_done = spi_qup_dma_done;
485	else if (xfer->tx_buf)
486		tx_done = spi_qup_dma_done;
487
488	rx_sgl = xfer->rx_sg.sgl;
489	tx_sgl = xfer->tx_sg.sgl;
490
491	do {
492		u32 rx_nents = 0, tx_nents = 0;
493
494		if (rx_sgl)
495			qup->n_words = spi_qup_sgl_get_nents_len(rx_sgl,
496					SPI_MAX_XFER, &rx_nents) / qup->w_size;
497		if (tx_sgl)
498			qup->n_words = spi_qup_sgl_get_nents_len(tx_sgl,
499					SPI_MAX_XFER, &tx_nents) / qup->w_size;
500		if (!qup->n_words)
501			return -EIO;
502
503		ret = spi_qup_io_config(spi, xfer);
504		if (ret)
505			return ret;
506
507		/* before issuing the descriptors, set the QUP to run */
508		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
509		if (ret) {
510			dev_warn(qup->dev, "cannot set RUN state\n");
511			return ret;
512		}
513		if (rx_sgl) {
514			ret = spi_qup_prep_sg(host, rx_sgl, rx_nents,
515					      DMA_DEV_TO_MEM, rx_done);
516			if (ret)
517				return ret;
518			dma_async_issue_pending(host->dma_rx);
519		}
520
521		if (tx_sgl) {
522			ret = spi_qup_prep_sg(host, tx_sgl, tx_nents,
523					      DMA_MEM_TO_DEV, tx_done);
524			if (ret)
525				return ret;
526
527			dma_async_issue_pending(host->dma_tx);
528		}
529
530		if (!wait_for_completion_timeout(&qup->done, timeout))
531			return -ETIMEDOUT;
532
533		for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl))
534			;
535		for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl))
536			;
537
538	} while (rx_sgl || tx_sgl);
539
540	return 0;
541}
542
543static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer,
544			  unsigned long timeout)
545{
546	struct spi_controller *host = spi->controller;
547	struct spi_qup *qup = spi_controller_get_devdata(host);
548	int ret, n_words, iterations, offset = 0;
549
550	n_words = qup->n_words;
551	iterations = n_words / SPI_MAX_XFER; /* round down */
552	qup->rx_buf = xfer->rx_buf;
553	qup->tx_buf = xfer->tx_buf;
554
555	do {
556		if (iterations)
557			qup->n_words = SPI_MAX_XFER;
558		else
559			qup->n_words = n_words % SPI_MAX_XFER;
560
561		if (qup->tx_buf && offset)
562			qup->tx_buf = xfer->tx_buf + offset * SPI_MAX_XFER;
563
564		if (qup->rx_buf && offset)
565			qup->rx_buf = xfer->rx_buf + offset * SPI_MAX_XFER;
566
567		/*
568		 * if the transaction is small enough, we need
569		 * to fallback to FIFO mode
570		 */
571		if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
572			qup->mode = QUP_IO_M_MODE_FIFO;
573
574		ret = spi_qup_io_config(spi, xfer);
575		if (ret)
576			return ret;
577
578		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
579		if (ret) {
580			dev_warn(qup->dev, "cannot set RUN state\n");
581			return ret;
582		}
583
584		ret = spi_qup_set_state(qup, QUP_STATE_PAUSE);
585		if (ret) {
586			dev_warn(qup->dev, "cannot set PAUSE state\n");
587			return ret;
588		}
589
590		if (qup->mode == QUP_IO_M_MODE_FIFO)
591			spi_qup_write(qup);
592
593		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
594		if (ret) {
595			dev_warn(qup->dev, "cannot set RUN state\n");
596			return ret;
597		}
598
599		if (!wait_for_completion_timeout(&qup->done, timeout))
600			return -ETIMEDOUT;
601
602		offset++;
603	} while (iterations--);
604
605	return 0;
606}
607
608static bool spi_qup_data_pending(struct spi_qup *controller)
609{
610	unsigned int remainder_tx, remainder_rx;
611
612	remainder_tx = DIV_ROUND_UP(spi_qup_len(controller) -
613				    controller->tx_bytes, controller->w_size);
614
615	remainder_rx = DIV_ROUND_UP(spi_qup_len(controller) -
616				    controller->rx_bytes, controller->w_size);
617
618	return remainder_tx || remainder_rx;
619}
620
621static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
622{
623	struct spi_qup *controller = dev_id;
624	u32 opflags, qup_err, spi_err;
625	int error = 0;
626
627	qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
628	spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
629	opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
630
631	writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
632	writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
633
634	if (qup_err) {
635		if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
636			dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
637		if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
638			dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
639		if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
640			dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
641		if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
642			dev_warn(controller->dev, "INPUT_OVER_RUN\n");
643
644		error = -EIO;
645	}
646
647	if (spi_err) {
648		if (spi_err & SPI_ERROR_CLK_OVER_RUN)
649			dev_warn(controller->dev, "CLK_OVER_RUN\n");
650		if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
651			dev_warn(controller->dev, "CLK_UNDER_RUN\n");
652
653		error = -EIO;
654	}
655
656	spin_lock(&controller->lock);
657	if (!controller->error)
658		controller->error = error;
659	spin_unlock(&controller->lock);
660
661	if (spi_qup_is_dma_xfer(controller->mode)) {
662		writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
663	} else {
664		if (opflags & QUP_OP_IN_SERVICE_FLAG)
665			spi_qup_read(controller, &opflags);
666
667		if (opflags & QUP_OP_OUT_SERVICE_FLAG)
668			spi_qup_write(controller);
669
670		if (!spi_qup_data_pending(controller))
671			complete(&controller->done);
672	}
673
674	if (error)
675		complete(&controller->done);
676
677	if (opflags & QUP_OP_MAX_INPUT_DONE_FLAG) {
678		if (!spi_qup_is_dma_xfer(controller->mode)) {
679			if (spi_qup_data_pending(controller))
680				return IRQ_HANDLED;
681		}
682		complete(&controller->done);
683	}
684
685	return IRQ_HANDLED;
686}
687
688/* set clock freq ... bits per word, determine mode */
689static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer)
690{
691	struct spi_qup *controller = spi_controller_get_devdata(spi->controller);
692	int ret;
693
694	if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
695		dev_err(controller->dev, "too big size for loopback %d > %d\n",
696			xfer->len, controller->in_fifo_sz);
697		return -EIO;
698	}
699
700	ret = dev_pm_opp_set_rate(controller->dev, xfer->speed_hz);
701	if (ret) {
702		dev_err(controller->dev, "fail to set frequency %d",
703			xfer->speed_hz);
704		return -EIO;
705	}
706
707	controller->w_size = DIV_ROUND_UP(xfer->bits_per_word, 8);
708	controller->n_words = xfer->len / controller->w_size;
709
710	if (controller->n_words <= (controller->in_fifo_sz / sizeof(u32)))
711		controller->mode = QUP_IO_M_MODE_FIFO;
712	else if (spi->controller->can_dma &&
713		 spi->controller->can_dma(spi->controller, spi, xfer) &&
714		 spi->controller->cur_msg_mapped)
715		controller->mode = QUP_IO_M_MODE_BAM;
716	else
717		controller->mode = QUP_IO_M_MODE_BLOCK;
718
719	return 0;
720}
721
722/* prep qup for another spi transaction of specific type */
723static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
724{
725	struct spi_qup *controller = spi_controller_get_devdata(spi->controller);
726	u32 config, iomode, control;
727	unsigned long flags;
728
729	spin_lock_irqsave(&controller->lock, flags);
730	controller->xfer     = xfer;
731	controller->error    = 0;
732	controller->rx_bytes = 0;
733	controller->tx_bytes = 0;
734	spin_unlock_irqrestore(&controller->lock, flags);
735
736
737	if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
738		dev_err(controller->dev, "cannot set RESET state\n");
739		return -EIO;
740	}
741
742	switch (controller->mode) {
743	case QUP_IO_M_MODE_FIFO:
744		writel_relaxed(controller->n_words,
745			       controller->base + QUP_MX_READ_CNT);
746		writel_relaxed(controller->n_words,
747			       controller->base + QUP_MX_WRITE_CNT);
748		/* must be zero for FIFO */
749		writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
750		writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
751		break;
752	case QUP_IO_M_MODE_BAM:
753		writel_relaxed(controller->n_words,
754			       controller->base + QUP_MX_INPUT_CNT);
755		writel_relaxed(controller->n_words,
756			       controller->base + QUP_MX_OUTPUT_CNT);
757		/* must be zero for BLOCK and BAM */
758		writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
759		writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
760
761		if (!controller->qup_v1) {
762			void __iomem *input_cnt;
763
764			input_cnt = controller->base + QUP_MX_INPUT_CNT;
765			/*
766			 * for DMA transfers, both QUP_MX_INPUT_CNT and
767			 * QUP_MX_OUTPUT_CNT must be zero to all cases but one.
768			 * That case is a non-balanced transfer when there is
769			 * only a rx_buf.
770			 */
771			if (xfer->tx_buf)
772				writel_relaxed(0, input_cnt);
773			else
774				writel_relaxed(controller->n_words, input_cnt);
775
776			writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
777		}
778		break;
779	case QUP_IO_M_MODE_BLOCK:
780		reinit_completion(&controller->done);
781		writel_relaxed(controller->n_words,
782			       controller->base + QUP_MX_INPUT_CNT);
783		writel_relaxed(controller->n_words,
784			       controller->base + QUP_MX_OUTPUT_CNT);
785		/* must be zero for BLOCK and BAM */
786		writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
787		writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
788		break;
789	default:
790		dev_err(controller->dev, "unknown mode = %d\n",
791				controller->mode);
792		return -EIO;
793	}
794
795	iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
796	/* Set input and output transfer mode */
797	iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
798
799	if (!spi_qup_is_dma_xfer(controller->mode))
800		iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
801	else
802		iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
803
804	iomode |= (controller->mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
805	iomode |= (controller->mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
806
807	writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
808
809	control = readl_relaxed(controller->base + SPI_IO_CONTROL);
810
811	if (spi->mode & SPI_CPOL)
812		control |= SPI_IO_C_CLK_IDLE_HIGH;
813	else
814		control &= ~SPI_IO_C_CLK_IDLE_HIGH;
815
816	writel_relaxed(control, controller->base + SPI_IO_CONTROL);
817
818	config = readl_relaxed(controller->base + SPI_CONFIG);
819
820	if (spi->mode & SPI_LOOP)
821		config |= SPI_CONFIG_LOOPBACK;
822	else
823		config &= ~SPI_CONFIG_LOOPBACK;
824
825	if (spi->mode & SPI_CPHA)
826		config &= ~SPI_CONFIG_INPUT_FIRST;
827	else
828		config |= SPI_CONFIG_INPUT_FIRST;
829
830	/*
831	 * HS_MODE improves signal stability for spi-clk high rates,
832	 * but is invalid in loop back mode.
833	 */
834	if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
835		config |= SPI_CONFIG_HS_MODE;
836	else
837		config &= ~SPI_CONFIG_HS_MODE;
838
839	writel_relaxed(config, controller->base + SPI_CONFIG);
840
841	config = readl_relaxed(controller->base + QUP_CONFIG);
842	config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
843	config |= xfer->bits_per_word - 1;
844	config |= QUP_CONFIG_SPI_MODE;
845
846	if (spi_qup_is_dma_xfer(controller->mode)) {
847		if (!xfer->tx_buf)
848			config |= QUP_CONFIG_NO_OUTPUT;
849		if (!xfer->rx_buf)
850			config |= QUP_CONFIG_NO_INPUT;
851	}
852
853	writel_relaxed(config, controller->base + QUP_CONFIG);
854
855	/* only write to OPERATIONAL_MASK when register is present */
856	if (!controller->qup_v1) {
857		u32 mask = 0;
858
859		/*
860		 * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO
861		 * status change in BAM mode
862		 */
863
864		if (spi_qup_is_dma_xfer(controller->mode))
865			mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG;
866
867		writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK);
868	}
869
870	return 0;
871}
872
873static int spi_qup_transfer_one(struct spi_controller *host,
874			      struct spi_device *spi,
875			      struct spi_transfer *xfer)
876{
877	struct spi_qup *controller = spi_controller_get_devdata(host);
878	unsigned long timeout, flags;
879	int ret;
880
881	ret = spi_qup_io_prep(spi, xfer);
882	if (ret)
883		return ret;
884
885	timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
886	timeout = DIV_ROUND_UP(min_t(unsigned long, SPI_MAX_XFER,
887				     xfer->len) * 8, timeout);
888	timeout = 100 * msecs_to_jiffies(timeout);
889
890	reinit_completion(&controller->done);
891
892	spin_lock_irqsave(&controller->lock, flags);
893	controller->xfer     = xfer;
894	controller->error    = 0;
895	controller->rx_bytes = 0;
896	controller->tx_bytes = 0;
897	spin_unlock_irqrestore(&controller->lock, flags);
898
899	if (spi_qup_is_dma_xfer(controller->mode))
900		ret = spi_qup_do_dma(spi, xfer, timeout);
901	else
902		ret = spi_qup_do_pio(spi, xfer, timeout);
903
904	spi_qup_set_state(controller, QUP_STATE_RESET);
905	spin_lock_irqsave(&controller->lock, flags);
906	if (!ret)
907		ret = controller->error;
908	spin_unlock_irqrestore(&controller->lock, flags);
909
910	if (ret && spi_qup_is_dma_xfer(controller->mode))
911		spi_qup_dma_terminate(host, xfer);
912
913	return ret;
914}
915
916static bool spi_qup_can_dma(struct spi_controller *host, struct spi_device *spi,
917			    struct spi_transfer *xfer)
918{
919	struct spi_qup *qup = spi_controller_get_devdata(host);
920	size_t dma_align = dma_get_cache_alignment();
921	int n_words;
922
923	if (xfer->rx_buf) {
924		if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) ||
925		    IS_ERR_OR_NULL(host->dma_rx))
926			return false;
927		if (qup->qup_v1 && (xfer->len % qup->in_blk_sz))
928			return false;
929	}
930
931	if (xfer->tx_buf) {
932		if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) ||
933		    IS_ERR_OR_NULL(host->dma_tx))
934			return false;
935		if (qup->qup_v1 && (xfer->len % qup->out_blk_sz))
936			return false;
937	}
938
939	n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8);
940	if (n_words <= (qup->in_fifo_sz / sizeof(u32)))
941		return false;
942
943	return true;
944}
945
946static void spi_qup_release_dma(struct spi_controller *host)
947{
948	if (!IS_ERR_OR_NULL(host->dma_rx))
949		dma_release_channel(host->dma_rx);
950	if (!IS_ERR_OR_NULL(host->dma_tx))
951		dma_release_channel(host->dma_tx);
952}
953
954static int spi_qup_init_dma(struct spi_controller *host, resource_size_t base)
955{
956	struct spi_qup *spi = spi_controller_get_devdata(host);
957	struct dma_slave_config *rx_conf = &spi->rx_conf,
958				*tx_conf = &spi->tx_conf;
959	struct device *dev = spi->dev;
960	int ret;
961
962	/* allocate dma resources, if available */
963	host->dma_rx = dma_request_chan(dev, "rx");
964	if (IS_ERR(host->dma_rx))
965		return PTR_ERR(host->dma_rx);
966
967	host->dma_tx = dma_request_chan(dev, "tx");
968	if (IS_ERR(host->dma_tx)) {
969		ret = PTR_ERR(host->dma_tx);
970		goto err_tx;
971	}
972
973	/* set DMA parameters */
974	rx_conf->direction = DMA_DEV_TO_MEM;
975	rx_conf->device_fc = 1;
976	rx_conf->src_addr = base + QUP_INPUT_FIFO;
977	rx_conf->src_maxburst = spi->in_blk_sz;
978
979	tx_conf->direction = DMA_MEM_TO_DEV;
980	tx_conf->device_fc = 1;
981	tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
982	tx_conf->dst_maxburst = spi->out_blk_sz;
983
984	ret = dmaengine_slave_config(host->dma_rx, rx_conf);
985	if (ret) {
986		dev_err(dev, "failed to configure RX channel\n");
987		goto err;
988	}
989
990	ret = dmaengine_slave_config(host->dma_tx, tx_conf);
991	if (ret) {
992		dev_err(dev, "failed to configure TX channel\n");
993		goto err;
994	}
995
996	return 0;
997
998err:
999	dma_release_channel(host->dma_tx);
1000err_tx:
1001	dma_release_channel(host->dma_rx);
1002	return ret;
1003}
1004
1005static void spi_qup_set_cs(struct spi_device *spi, bool val)
1006{
1007	struct spi_qup *controller;
1008	u32 spi_ioc;
1009	u32 spi_ioc_orig;
1010
1011	controller = spi_controller_get_devdata(spi->controller);
1012	spi_ioc = readl_relaxed(controller->base + SPI_IO_CONTROL);
1013	spi_ioc_orig = spi_ioc;
1014	if (!val)
1015		spi_ioc |= SPI_IO_C_FORCE_CS;
1016	else
1017		spi_ioc &= ~SPI_IO_C_FORCE_CS;
1018
1019	if (spi_ioc != spi_ioc_orig)
1020		writel_relaxed(spi_ioc, controller->base + SPI_IO_CONTROL);
1021}
1022
1023static int spi_qup_probe(struct platform_device *pdev)
1024{
1025	struct spi_controller *host;
1026	struct icc_path *icc_path;
1027	struct clk *iclk, *cclk;
1028	struct spi_qup *controller;
1029	struct resource *res;
1030	struct device *dev;
1031	void __iomem *base;
1032	u32 max_freq, iomode, num_cs;
1033	int ret, irq, size;
1034
1035	dev = &pdev->dev;
1036	base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1037	if (IS_ERR(base))
1038		return PTR_ERR(base);
1039
1040	irq = platform_get_irq(pdev, 0);
1041	if (irq < 0)
1042		return irq;
1043
1044	cclk = devm_clk_get(dev, "core");
1045	if (IS_ERR(cclk))
1046		return PTR_ERR(cclk);
1047
1048	iclk = devm_clk_get(dev, "iface");
1049	if (IS_ERR(iclk))
1050		return PTR_ERR(iclk);
1051
1052	icc_path = devm_of_icc_get(dev, NULL);
1053	if (IS_ERR(icc_path))
1054		return dev_err_probe(dev, PTR_ERR(icc_path),
1055				     "failed to get interconnect path\n");
1056
1057	/* This is optional parameter */
1058	if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
1059		max_freq = SPI_MAX_RATE;
1060
1061	if (!max_freq || max_freq > SPI_MAX_RATE) {
1062		dev_err(dev, "invalid clock frequency %d\n", max_freq);
1063		return -ENXIO;
1064	}
1065
1066	ret = devm_pm_opp_set_clkname(dev, "core");
1067	if (ret)
1068		return ret;
1069
1070	/* OPP table is optional */
1071	ret = devm_pm_opp_of_add_table(dev);
1072	if (ret && ret != -ENODEV)
1073		return dev_err_probe(dev, ret, "invalid OPP table\n");
1074
1075	host = spi_alloc_host(dev, sizeof(struct spi_qup));
1076	if (!host) {
1077		dev_err(dev, "cannot allocate host\n");
1078		return -ENOMEM;
1079	}
1080
1081	/* use num-cs unless not present or out of range */
1082	if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
1083	    num_cs > SPI_NUM_CHIPSELECTS)
1084		host->num_chipselect = SPI_NUM_CHIPSELECTS;
1085	else
1086		host->num_chipselect = num_cs;
1087
1088	host->use_gpio_descriptors = true;
1089	host->max_native_cs = SPI_NUM_CHIPSELECTS;
1090	host->bus_num = pdev->id;
1091	host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
1092	host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1093	host->max_speed_hz = max_freq;
1094	host->transfer_one = spi_qup_transfer_one;
1095	host->dev.of_node = pdev->dev.of_node;
1096	host->auto_runtime_pm = true;
1097	host->dma_alignment = dma_get_cache_alignment();
1098	host->max_dma_len = SPI_MAX_XFER;
1099
1100	platform_set_drvdata(pdev, host);
1101
1102	controller = spi_controller_get_devdata(host);
1103
1104	controller->dev = dev;
1105	controller->base = base;
1106	controller->iclk = iclk;
1107	controller->cclk = cclk;
1108	controller->icc_path = icc_path;
1109	controller->irq = irq;
1110
1111	ret = spi_qup_init_dma(host, res->start);
1112	if (ret == -EPROBE_DEFER)
1113		goto error;
1114	else if (!ret)
1115		host->can_dma = spi_qup_can_dma;
1116
1117	controller->qup_v1 = (uintptr_t)of_device_get_match_data(dev);
1118
1119	if (!controller->qup_v1)
1120		host->set_cs = spi_qup_set_cs;
1121
1122	spin_lock_init(&controller->lock);
1123	init_completion(&controller->done);
1124
1125	ret = clk_prepare_enable(cclk);
1126	if (ret) {
1127		dev_err(dev, "cannot enable core clock\n");
1128		goto error_dma;
1129	}
1130
1131	ret = clk_prepare_enable(iclk);
1132	if (ret) {
1133		clk_disable_unprepare(cclk);
1134		dev_err(dev, "cannot enable iface clock\n");
1135		goto error_dma;
1136	}
1137
1138	iomode = readl_relaxed(base + QUP_IO_M_MODES);
1139
1140	size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
1141	if (size)
1142		controller->out_blk_sz = size * 16;
1143	else
1144		controller->out_blk_sz = 4;
1145
1146	size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
1147	if (size)
1148		controller->in_blk_sz = size * 16;
1149	else
1150		controller->in_blk_sz = 4;
1151
1152	size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
1153	controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
1154
1155	size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
1156	controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
1157
1158	dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
1159		 controller->in_blk_sz, controller->in_fifo_sz,
1160		 controller->out_blk_sz, controller->out_fifo_sz);
1161
1162	writel_relaxed(1, base + QUP_SW_RESET);
1163
1164	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1165	if (ret) {
1166		dev_err(dev, "cannot set RESET state\n");
1167		goto error_clk;
1168	}
1169
1170	writel_relaxed(0, base + QUP_OPERATIONAL);
1171	writel_relaxed(0, base + QUP_IO_M_MODES);
1172
1173	if (!controller->qup_v1)
1174		writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
1175
1176	writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
1177		       base + SPI_ERROR_FLAGS_EN);
1178
1179	/* if earlier version of the QUP, disable INPUT_OVERRUN */
1180	if (controller->qup_v1)
1181		writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
1182			QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
1183			base + QUP_ERROR_FLAGS_EN);
1184
1185	writel_relaxed(0, base + SPI_CONFIG);
1186	writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
1187
1188	ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
1189			       IRQF_TRIGGER_HIGH, pdev->name, controller);
1190	if (ret)
1191		goto error_clk;
1192
1193	pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
1194	pm_runtime_use_autosuspend(dev);
1195	pm_runtime_set_active(dev);
1196	pm_runtime_enable(dev);
1197
1198	ret = devm_spi_register_controller(dev, host);
1199	if (ret)
1200		goto disable_pm;
1201
1202	return 0;
1203
1204disable_pm:
1205	pm_runtime_disable(&pdev->dev);
1206error_clk:
1207	clk_disable_unprepare(cclk);
1208	clk_disable_unprepare(iclk);
1209error_dma:
1210	spi_qup_release_dma(host);
1211error:
1212	spi_controller_put(host);
1213	return ret;
1214}
1215
1216#ifdef CONFIG_PM
1217static int spi_qup_pm_suspend_runtime(struct device *device)
1218{
1219	struct spi_controller *host = dev_get_drvdata(device);
1220	struct spi_qup *controller = spi_controller_get_devdata(host);
1221	u32 config;
1222
1223	/* Enable clocks auto gaiting */
1224	config = readl(controller->base + QUP_CONFIG);
1225	config |= QUP_CONFIG_CLOCK_AUTO_GATE;
1226	writel_relaxed(config, controller->base + QUP_CONFIG);
1227
1228	clk_disable_unprepare(controller->cclk);
1229	spi_qup_vote_bw(controller, 0);
1230	clk_disable_unprepare(controller->iclk);
1231
1232	return 0;
1233}
1234
1235static int spi_qup_pm_resume_runtime(struct device *device)
1236{
1237	struct spi_controller *host = dev_get_drvdata(device);
1238	struct spi_qup *controller = spi_controller_get_devdata(host);
1239	u32 config;
1240	int ret;
1241
1242	ret = clk_prepare_enable(controller->iclk);
1243	if (ret)
1244		return ret;
1245
1246	ret = clk_prepare_enable(controller->cclk);
1247	if (ret) {
1248		clk_disable_unprepare(controller->iclk);
1249		return ret;
1250	}
1251
1252	/* Disable clocks auto gaiting */
1253	config = readl_relaxed(controller->base + QUP_CONFIG);
1254	config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
1255	writel_relaxed(config, controller->base + QUP_CONFIG);
1256	return 0;
1257}
1258#endif /* CONFIG_PM */
1259
1260#ifdef CONFIG_PM_SLEEP
1261static int spi_qup_suspend(struct device *device)
1262{
1263	struct spi_controller *host = dev_get_drvdata(device);
1264	struct spi_qup *controller = spi_controller_get_devdata(host);
1265	int ret;
1266
1267	if (pm_runtime_suspended(device)) {
1268		ret = spi_qup_pm_resume_runtime(device);
1269		if (ret)
1270			return ret;
1271	}
1272	ret = spi_controller_suspend(host);
1273	if (ret)
1274		return ret;
1275
1276	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1277	if (ret)
1278		return ret;
1279
1280	clk_disable_unprepare(controller->cclk);
1281	spi_qup_vote_bw(controller, 0);
1282	clk_disable_unprepare(controller->iclk);
1283	return 0;
1284}
1285
1286static int spi_qup_resume(struct device *device)
1287{
1288	struct spi_controller *host = dev_get_drvdata(device);
1289	struct spi_qup *controller = spi_controller_get_devdata(host);
1290	int ret;
1291
1292	ret = clk_prepare_enable(controller->iclk);
1293	if (ret)
1294		return ret;
1295
1296	ret = clk_prepare_enable(controller->cclk);
1297	if (ret) {
1298		clk_disable_unprepare(controller->iclk);
1299		return ret;
1300	}
1301
1302	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1303	if (ret)
1304		goto disable_clk;
1305
1306	ret = spi_controller_resume(host);
1307	if (ret)
1308		goto disable_clk;
1309
1310	return 0;
1311
1312disable_clk:
1313	clk_disable_unprepare(controller->cclk);
1314	clk_disable_unprepare(controller->iclk);
1315	return ret;
1316}
1317#endif /* CONFIG_PM_SLEEP */
1318
1319static void spi_qup_remove(struct platform_device *pdev)
1320{
1321	struct spi_controller *host = dev_get_drvdata(&pdev->dev);
1322	struct spi_qup *controller = spi_controller_get_devdata(host);
1323	int ret;
1324
1325	ret = pm_runtime_get_sync(&pdev->dev);
1326
1327	if (ret >= 0) {
1328		ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1329		if (ret)
1330			dev_warn(&pdev->dev, "failed to reset controller (%pe)\n",
1331				 ERR_PTR(ret));
1332
1333		clk_disable_unprepare(controller->cclk);
1334		clk_disable_unprepare(controller->iclk);
1335	} else {
1336		dev_warn(&pdev->dev, "failed to resume, skip hw disable (%pe)\n",
1337			 ERR_PTR(ret));
1338	}
1339
1340	spi_qup_release_dma(host);
1341
1342	pm_runtime_put_noidle(&pdev->dev);
1343	pm_runtime_disable(&pdev->dev);
1344}
1345
1346static const struct of_device_id spi_qup_dt_match[] = {
1347	{ .compatible = "qcom,spi-qup-v1.1.1", .data = (void *)1, },
1348	{ .compatible = "qcom,spi-qup-v2.1.1", },
1349	{ .compatible = "qcom,spi-qup-v2.2.1", },
1350	{ }
1351};
1352MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
1353
1354static const struct dev_pm_ops spi_qup_dev_pm_ops = {
1355	SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
1356	SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
1357			   spi_qup_pm_resume_runtime,
1358			   NULL)
1359};
1360
1361static struct platform_driver spi_qup_driver = {
1362	.driver = {
1363		.name		= "spi_qup",
1364		.pm		= &spi_qup_dev_pm_ops,
1365		.of_match_table = spi_qup_dt_match,
1366	},
1367	.probe = spi_qup_probe,
1368	.remove_new = spi_qup_remove,
1369};
1370module_platform_driver(spi_qup_driver);
1371
1372MODULE_LICENSE("GPL v2");
1373MODULE_ALIAS("platform:spi_qup");
1374