1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Synopsys DesignWare I2C adapter driver (master only).
4 *
5 * Based on the TI DAVINCI I2C adapter driver.
6 *
7 * Copyright (C) 2006 Texas Instruments.
8 * Copyright (C) 2007 MontaVista Software Inc.
9 * Copyright (C) 2009 Provigent Ltd.
10 */
11#include <linux/delay.h>
12#include <linux/err.h>
13#include <linux/errno.h>
14#include <linux/export.h>
15#include <linux/gpio/consumer.h>
16#include <linux/i2c.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/module.h>
20#include <linux/pinctrl/consumer.h>
21#include <linux/pm_runtime.h>
22#include <linux/regmap.h>
23#include <linux/reset.h>
24
25#include "i2c-designware-core.h"
26
27#define AMD_TIMEOUT_MIN_US	25
28#define AMD_TIMEOUT_MAX_US	250
29#define AMD_MASTERCFG_MASK	GENMASK(15, 0)
30
31static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
32{
33	/* Configure Tx/Rx FIFO threshold levels */
34	regmap_write(dev->map, DW_IC_TX_TL, dev->tx_fifo_depth / 2);
35	regmap_write(dev->map, DW_IC_RX_TL, 0);
36
37	/* Configure the I2C master */
38	regmap_write(dev->map, DW_IC_CON, dev->master_cfg);
39}
40
41static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
42{
43	unsigned int comp_param1;
44	u32 sda_falling_time, scl_falling_time;
45	struct i2c_timings *t = &dev->timings;
46	const char *fp_str = "";
47	u32 ic_clk;
48	int ret;
49
50	ret = i2c_dw_acquire_lock(dev);
51	if (ret)
52		return ret;
53
54	ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, &comp_param1);
55	i2c_dw_release_lock(dev);
56	if (ret)
57		return ret;
58
59	/* Set standard and fast speed dividers for high/low periods */
60	sda_falling_time = t->sda_fall_ns ?: 300; /* ns */
61	scl_falling_time = t->scl_fall_ns ?: 300; /* ns */
62
63	/* Calculate SCL timing parameters for standard mode if not set */
64	if (!dev->ss_hcnt || !dev->ss_lcnt) {
65		ic_clk = i2c_dw_clk_rate(dev);
66		dev->ss_hcnt =
67			i2c_dw_scl_hcnt(ic_clk,
68					4000,	/* tHD;STA = tHIGH = 4.0 us */
69					sda_falling_time,
70					0,	/* 0: DW default, 1: Ideal */
71					0);	/* No offset */
72		dev->ss_lcnt =
73			i2c_dw_scl_lcnt(ic_clk,
74					4700,	/* tLOW = 4.7 us */
75					scl_falling_time,
76					0);	/* No offset */
77	}
78	dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n",
79		dev->ss_hcnt, dev->ss_lcnt);
80
81	/*
82	 * Set SCL timing parameters for fast mode or fast mode plus. Only
83	 * difference is the timing parameter values since the registers are
84	 * the same.
85	 */
86	if (t->bus_freq_hz == I2C_MAX_FAST_MODE_PLUS_FREQ) {
87		/*
88		 * Check are Fast Mode Plus parameters available. Calculate
89		 * SCL timing parameters for Fast Mode Plus if not set.
90		 */
91		if (dev->fp_hcnt && dev->fp_lcnt) {
92			dev->fs_hcnt = dev->fp_hcnt;
93			dev->fs_lcnt = dev->fp_lcnt;
94		} else {
95			ic_clk = i2c_dw_clk_rate(dev);
96			dev->fs_hcnt =
97				i2c_dw_scl_hcnt(ic_clk,
98						260,	/* tHIGH = 260 ns */
99						sda_falling_time,
100						0,	/* DW default */
101						0);	/* No offset */
102			dev->fs_lcnt =
103				i2c_dw_scl_lcnt(ic_clk,
104						500,	/* tLOW = 500 ns */
105						scl_falling_time,
106						0);	/* No offset */
107		}
108		fp_str = " Plus";
109	}
110	/*
111	 * Calculate SCL timing parameters for fast mode if not set. They are
112	 * needed also in high speed mode.
113	 */
114	if (!dev->fs_hcnt || !dev->fs_lcnt) {
115		ic_clk = i2c_dw_clk_rate(dev);
116		dev->fs_hcnt =
117			i2c_dw_scl_hcnt(ic_clk,
118					600,	/* tHD;STA = tHIGH = 0.6 us */
119					sda_falling_time,
120					0,	/* 0: DW default, 1: Ideal */
121					0);	/* No offset */
122		dev->fs_lcnt =
123			i2c_dw_scl_lcnt(ic_clk,
124					1300,	/* tLOW = 1.3 us */
125					scl_falling_time,
126					0);	/* No offset */
127	}
128	dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n",
129		fp_str, dev->fs_hcnt, dev->fs_lcnt);
130
131	/* Check is high speed possible and fall back to fast mode if not */
132	if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) ==
133		DW_IC_CON_SPEED_HIGH) {
134		if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
135			!= DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
136			dev_err(dev->dev, "High Speed not supported!\n");
137			t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
138			dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
139			dev->master_cfg |= DW_IC_CON_SPEED_FAST;
140			dev->hs_hcnt = 0;
141			dev->hs_lcnt = 0;
142		} else if (!dev->hs_hcnt || !dev->hs_lcnt) {
143			ic_clk = i2c_dw_clk_rate(dev);
144			dev->hs_hcnt =
145				i2c_dw_scl_hcnt(ic_clk,
146						160,	/* tHIGH = 160 ns */
147						sda_falling_time,
148						0,	/* DW default */
149						0);	/* No offset */
150			dev->hs_lcnt =
151				i2c_dw_scl_lcnt(ic_clk,
152						320,	/* tLOW = 320 ns */
153						scl_falling_time,
154						0);	/* No offset */
155		}
156		dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n",
157			dev->hs_hcnt, dev->hs_lcnt);
158	}
159
160	ret = i2c_dw_set_sda_hold(dev);
161	if (ret)
162		return ret;
163
164	dev_dbg(dev->dev, "Bus speed: %s\n", i2c_freq_mode_string(t->bus_freq_hz));
165	return 0;
166}
167
168/**
169 * i2c_dw_init_master() - Initialize the designware I2C master hardware
170 * @dev: device private data
171 *
172 * This functions configures and enables the I2C master.
173 * This function is called during I2C init function, and in case of timeout at
174 * run time.
175 */
176static int i2c_dw_init_master(struct dw_i2c_dev *dev)
177{
178	int ret;
179
180	ret = i2c_dw_acquire_lock(dev);
181	if (ret)
182		return ret;
183
184	/* Disable the adapter */
185	__i2c_dw_disable(dev);
186
187	/* Write standard speed timing parameters */
188	regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt);
189	regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt);
190
191	/* Write fast mode/fast mode plus timing parameters */
192	regmap_write(dev->map, DW_IC_FS_SCL_HCNT, dev->fs_hcnt);
193	regmap_write(dev->map, DW_IC_FS_SCL_LCNT, dev->fs_lcnt);
194
195	/* Write high speed timing parameters if supported */
196	if (dev->hs_hcnt && dev->hs_lcnt) {
197		regmap_write(dev->map, DW_IC_HS_SCL_HCNT, dev->hs_hcnt);
198		regmap_write(dev->map, DW_IC_HS_SCL_LCNT, dev->hs_lcnt);
199	}
200
201	/* Write SDA hold time if supported */
202	if (dev->sda_hold_time)
203		regmap_write(dev->map, DW_IC_SDA_HOLD, dev->sda_hold_time);
204
205	i2c_dw_configure_fifo_master(dev);
206	i2c_dw_release_lock(dev);
207
208	return 0;
209}
210
211static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
212{
213	struct i2c_msg *msgs = dev->msgs;
214	u32 ic_con = 0, ic_tar = 0;
215	unsigned int dummy;
216
217	/* Disable the adapter */
218	__i2c_dw_disable(dev);
219
220	/* If the slave address is ten bit address, enable 10BITADDR */
221	if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
222		ic_con = DW_IC_CON_10BITADDR_MASTER;
223		/*
224		 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
225		 * mode has to be enabled via bit 12 of IC_TAR register.
226		 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
227		 * detected from registers.
228		 */
229		ic_tar = DW_IC_TAR_10BITADDR_MASTER;
230	}
231
232	regmap_update_bits(dev->map, DW_IC_CON, DW_IC_CON_10BITADDR_MASTER,
233			   ic_con);
234
235	/*
236	 * Set the slave (target) address and enable 10-bit addressing mode
237	 * if applicable.
238	 */
239	regmap_write(dev->map, DW_IC_TAR,
240		     msgs[dev->msg_write_idx].addr | ic_tar);
241
242	/* Enforce disabled interrupts (due to HW issues) */
243	__i2c_dw_write_intr_mask(dev, 0);
244
245	/* Enable the adapter */
246	__i2c_dw_enable(dev);
247
248	/* Dummy read to avoid the register getting stuck on Bay Trail */
249	regmap_read(dev->map, DW_IC_ENABLE_STATUS, &dummy);
250
251	/* Clear and enable interrupts */
252	regmap_read(dev->map, DW_IC_CLR_INTR, &dummy);
253	__i2c_dw_write_intr_mask(dev, DW_IC_INTR_MASTER_MASK);
254}
255
256static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev)
257{
258	u32 val;
259	int ret;
260
261	ret = regmap_read_poll_timeout(dev->map, DW_IC_INTR_STAT, val,
262				       !(val & DW_IC_INTR_STOP_DET),
263					1100, 20000);
264	if (ret)
265		dev_err(dev->dev, "i2c timeout error %d\n", ret);
266
267	return ret;
268}
269
270static int i2c_dw_status(struct dw_i2c_dev *dev)
271{
272	int status;
273
274	status = i2c_dw_wait_bus_not_busy(dev);
275	if (status)
276		return status;
277
278	return i2c_dw_check_stopbit(dev);
279}
280
281/*
282 * Initiate and continue master read/write transaction with polling
283 * based transfer routine afterward write messages into the Tx buffer.
284 */
285static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, int num_msgs)
286{
287	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
288	int msg_wrt_idx, msg_itr_lmt, buf_len, data_idx;
289	int cmd = 0, status;
290	u8 *tx_buf;
291	unsigned int val;
292
293	/*
294	 * In order to enable the interrupt for UCSI i.e. AMD NAVI GPU card,
295	 * it is mandatory to set the right value in specific register
296	 * (offset:0x474) as per the hardware IP specification.
297	 */
298	regmap_write(dev->map, AMD_UCSI_INTR_REG, AMD_UCSI_INTR_EN);
299
300	dev->msgs = msgs;
301	dev->msgs_num = num_msgs;
302	i2c_dw_xfer_init(dev);
303
304	/* Initiate messages read/write transaction */
305	for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) {
306		tx_buf = msgs[msg_wrt_idx].buf;
307		buf_len = msgs[msg_wrt_idx].len;
308
309		if (!(msgs[msg_wrt_idx].flags & I2C_M_RD))
310			regmap_write(dev->map, DW_IC_TX_TL, buf_len - 1);
311		/*
312		 * Initiate the i2c read/write transaction of buffer length,
313		 * and poll for bus busy status. For the last message transfer,
314		 * update the command with stopbit enable.
315		 */
316		for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) {
317			if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1)
318				cmd |= BIT(9);
319
320			if (msgs[msg_wrt_idx].flags & I2C_M_RD) {
321				/* Due to hardware bug, need to write the same command twice. */
322				regmap_write(dev->map, DW_IC_DATA_CMD, 0x100);
323				regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | cmd);
324				if (cmd) {
325					regmap_write(dev->map, DW_IC_TX_TL, 2 * (buf_len - 1));
326					regmap_write(dev->map, DW_IC_RX_TL, 2 * (buf_len - 1));
327					/*
328					 * Need to check the stop bit. However, it cannot be
329					 * detected from the registers so we check it always
330					 * when read/write the last byte.
331					 */
332					status = i2c_dw_status(dev);
333					if (status)
334						return status;
335
336					for (data_idx = 0; data_idx < buf_len; data_idx++) {
337						regmap_read(dev->map, DW_IC_DATA_CMD, &val);
338						tx_buf[data_idx] = val;
339					}
340					status = i2c_dw_check_stopbit(dev);
341					if (status)
342						return status;
343				}
344			} else {
345				regmap_write(dev->map, DW_IC_DATA_CMD, *tx_buf++ | cmd);
346				usleep_range(AMD_TIMEOUT_MIN_US, AMD_TIMEOUT_MAX_US);
347			}
348		}
349		status = i2c_dw_check_stopbit(dev);
350		if (status)
351			return status;
352	}
353
354	return 0;
355}
356
357/*
358 * Initiate (and continue) low level master read/write transaction.
359 * This function is only called from i2c_dw_isr, and pumping i2c_msg
360 * messages into the tx buffer.  Even if the size of i2c_msg data is
361 * longer than the size of the tx buffer, it handles everything.
362 */
363static void
364i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
365{
366	struct i2c_msg *msgs = dev->msgs;
367	u32 intr_mask;
368	int tx_limit, rx_limit;
369	u32 addr = msgs[dev->msg_write_idx].addr;
370	u32 buf_len = dev->tx_buf_len;
371	u8 *buf = dev->tx_buf;
372	bool need_restart = false;
373	unsigned int flr;
374
375	intr_mask = DW_IC_INTR_MASTER_MASK;
376
377	for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
378		u32 flags = msgs[dev->msg_write_idx].flags;
379
380		/*
381		 * If target address has changed, we need to
382		 * reprogram the target address in the I2C
383		 * adapter when we are done with this transfer.
384		 */
385		if (msgs[dev->msg_write_idx].addr != addr) {
386			dev_err(dev->dev,
387				"%s: invalid target address\n", __func__);
388			dev->msg_err = -EINVAL;
389			break;
390		}
391
392		if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
393			/* new i2c_msg */
394			buf = msgs[dev->msg_write_idx].buf;
395			buf_len = msgs[dev->msg_write_idx].len;
396
397			/* If both IC_EMPTYFIFO_HOLD_MASTER_EN and
398			 * IC_RESTART_EN are set, we must manually
399			 * set restart bit between messages.
400			 */
401			if ((dev->master_cfg & DW_IC_CON_RESTART_EN) &&
402					(dev->msg_write_idx > 0))
403				need_restart = true;
404		}
405
406		regmap_read(dev->map, DW_IC_TXFLR, &flr);
407		tx_limit = dev->tx_fifo_depth - flr;
408
409		regmap_read(dev->map, DW_IC_RXFLR, &flr);
410		rx_limit = dev->rx_fifo_depth - flr;
411
412		while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
413			u32 cmd = 0;
414
415			/*
416			 * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must
417			 * manually set the stop bit. However, it cannot be
418			 * detected from the registers so we set it always
419			 * when writing/reading the last byte.
420			 */
421
422			/*
423			 * i2c-core always sets the buffer length of
424			 * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will
425			 * be adjusted when receiving the first byte.
426			 * Thus we can't stop the transaction here.
427			 */
428			if (dev->msg_write_idx == dev->msgs_num - 1 &&
429			    buf_len == 1 && !(flags & I2C_M_RECV_LEN))
430				cmd |= BIT(9);
431
432			if (need_restart) {
433				cmd |= BIT(10);
434				need_restart = false;
435			}
436
437			if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
438
439				/* Avoid rx buffer overrun */
440				if (dev->rx_outstanding >= dev->rx_fifo_depth)
441					break;
442
443				regmap_write(dev->map, DW_IC_DATA_CMD,
444					     cmd | 0x100);
445				rx_limit--;
446				dev->rx_outstanding++;
447			} else {
448				regmap_write(dev->map, DW_IC_DATA_CMD,
449					     cmd | *buf++);
450			}
451			tx_limit--; buf_len--;
452		}
453
454		dev->tx_buf = buf;
455		dev->tx_buf_len = buf_len;
456
457		/*
458		 * Because we don't know the buffer length in the
459		 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop the
460		 * transaction here. Also disable the TX_EMPTY IRQ
461		 * while waiting for the data length byte to avoid the
462		 * bogus interrupts flood.
463		 */
464		if (flags & I2C_M_RECV_LEN) {
465			dev->status |= STATUS_WRITE_IN_PROGRESS;
466			intr_mask &= ~DW_IC_INTR_TX_EMPTY;
467			break;
468		} else if (buf_len > 0) {
469			/* more bytes to be written */
470			dev->status |= STATUS_WRITE_IN_PROGRESS;
471			break;
472		} else
473			dev->status &= ~STATUS_WRITE_IN_PROGRESS;
474	}
475
476	/*
477	 * If i2c_msg index search is completed, we don't need TX_EMPTY
478	 * interrupt any more.
479	 */
480	if (dev->msg_write_idx == dev->msgs_num)
481		intr_mask &= ~DW_IC_INTR_TX_EMPTY;
482
483	if (dev->msg_err)
484		intr_mask = 0;
485
486	__i2c_dw_write_intr_mask(dev, intr_mask);
487}
488
489static u8
490i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
491{
492	struct i2c_msg *msgs = dev->msgs;
493	u32 flags = msgs[dev->msg_read_idx].flags;
494	unsigned int intr_mask;
495
496	/*
497	 * Adjust the buffer length and mask the flag
498	 * after receiving the first byte.
499	 */
500	len += (flags & I2C_CLIENT_PEC) ? 2 : 1;
501	dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding);
502	msgs[dev->msg_read_idx].len = len;
503	msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
504
505	/*
506	 * Received buffer length, re-enable TX_EMPTY interrupt
507	 * to resume the SMBUS transaction.
508	 */
509	__i2c_dw_read_intr_mask(dev, &intr_mask);
510	intr_mask |= DW_IC_INTR_TX_EMPTY;
511	__i2c_dw_write_intr_mask(dev, intr_mask);
512
513	return len;
514}
515
516static void
517i2c_dw_read(struct dw_i2c_dev *dev)
518{
519	struct i2c_msg *msgs = dev->msgs;
520	unsigned int rx_valid;
521
522	for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) {
523		unsigned int tmp;
524		u32 len;
525		u8 *buf;
526
527		if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD))
528			continue;
529
530		if (!(dev->status & STATUS_READ_IN_PROGRESS)) {
531			len = msgs[dev->msg_read_idx].len;
532			buf = msgs[dev->msg_read_idx].buf;
533		} else {
534			len = dev->rx_buf_len;
535			buf = dev->rx_buf;
536		}
537
538		regmap_read(dev->map, DW_IC_RXFLR, &rx_valid);
539
540		for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
541			u32 flags = msgs[dev->msg_read_idx].flags;
542
543			regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
544			tmp &= DW_IC_DATA_CMD_DAT;
545			/* Ensure length byte is a valid value */
546			if (flags & I2C_M_RECV_LEN) {
547				/*
548				 * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be
549				 * detected from the registers, the controller can be
550				 * disabled if the STOP bit is set. But it is only set
551				 * after receiving block data response length in
552				 * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read
553				 * another byte with STOP bit set when the block data
554				 * response length is invalid to complete the transaction.
555				 */
556				if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX)
557					tmp = 1;
558
559				len = i2c_dw_recv_len(dev, tmp);
560			}
561			*buf++ = tmp;
562			dev->rx_outstanding--;
563		}
564
565		if (len > 0) {
566			dev->status |= STATUS_READ_IN_PROGRESS;
567			dev->rx_buf_len = len;
568			dev->rx_buf = buf;
569			return;
570		} else
571			dev->status &= ~STATUS_READ_IN_PROGRESS;
572	}
573}
574
575static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
576{
577	unsigned int stat, dummy;
578
579	/*
580	 * The IC_INTR_STAT register just indicates "enabled" interrupts.
581	 * The unmasked raw version of interrupt status bits is available
582	 * in the IC_RAW_INTR_STAT register.
583	 *
584	 * That is,
585	 *   stat = readl(IC_INTR_STAT);
586	 * equals to,
587	 *   stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK);
588	 *
589	 * The raw version might be useful for debugging purposes.
590	 */
591	if (!(dev->flags & ACCESS_POLLING)) {
592		regmap_read(dev->map, DW_IC_INTR_STAT, &stat);
593	} else {
594		regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
595		stat &= dev->sw_mask;
596	}
597
598	/*
599	 * Do not use the IC_CLR_INTR register to clear interrupts, or
600	 * you'll miss some interrupts, triggered during the period from
601	 * readl(IC_INTR_STAT) to readl(IC_CLR_INTR).
602	 *
603	 * Instead, use the separately-prepared IC_CLR_* registers.
604	 */
605	if (stat & DW_IC_INTR_RX_UNDER)
606		regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy);
607	if (stat & DW_IC_INTR_RX_OVER)
608		regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy);
609	if (stat & DW_IC_INTR_TX_OVER)
610		regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy);
611	if (stat & DW_IC_INTR_RD_REQ)
612		regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy);
613	if (stat & DW_IC_INTR_TX_ABRT) {
614		/*
615		 * The IC_TX_ABRT_SOURCE register is cleared whenever
616		 * the IC_CLR_TX_ABRT is read.  Preserve it beforehand.
617		 */
618		regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source);
619		regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy);
620	}
621	if (stat & DW_IC_INTR_RX_DONE)
622		regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy);
623	if (stat & DW_IC_INTR_ACTIVITY)
624		regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy);
625	if ((stat & DW_IC_INTR_STOP_DET) &&
626	    ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL)))
627		regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy);
628	if (stat & DW_IC_INTR_START_DET)
629		regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy);
630	if (stat & DW_IC_INTR_GEN_CALL)
631		regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy);
632
633	return stat;
634}
635
636static void i2c_dw_process_transfer(struct dw_i2c_dev *dev, unsigned int stat)
637{
638	if (stat & DW_IC_INTR_TX_ABRT) {
639		dev->cmd_err |= DW_IC_ERR_TX_ABRT;
640		dev->status &= ~STATUS_MASK;
641		dev->rx_outstanding = 0;
642
643		/*
644		 * Anytime TX_ABRT is set, the contents of the tx/rx
645		 * buffers are flushed. Make sure to skip them.
646		 */
647		__i2c_dw_write_intr_mask(dev, 0);
648		goto tx_aborted;
649	}
650
651	if (stat & DW_IC_INTR_RX_FULL)
652		i2c_dw_read(dev);
653
654	if (stat & DW_IC_INTR_TX_EMPTY)
655		i2c_dw_xfer_msg(dev);
656
657	/*
658	 * No need to modify or disable the interrupt mask here.
659	 * i2c_dw_xfer_msg() will take care of it according to
660	 * the current transmit status.
661	 */
662
663tx_aborted:
664	if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) &&
665	     (dev->rx_outstanding == 0))
666		complete(&dev->cmd_complete);
667	else if (unlikely(dev->flags & ACCESS_INTR_MASK)) {
668		/* Workaround to trigger pending interrupt */
669		__i2c_dw_read_intr_mask(dev, &stat);
670		__i2c_dw_write_intr_mask(dev, 0);
671		__i2c_dw_write_intr_mask(dev, stat);
672	}
673}
674
675/*
676 * Interrupt service routine. This gets called whenever an I2C master interrupt
677 * occurs.
678 */
679static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
680{
681	struct dw_i2c_dev *dev = dev_id;
682	unsigned int stat, enabled;
683
684	regmap_read(dev->map, DW_IC_ENABLE, &enabled);
685	regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
686	if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY))
687		return IRQ_NONE;
688	if (pm_runtime_suspended(dev->dev) || stat == GENMASK(31, 0))
689		return IRQ_NONE;
690	dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat);
691
692	stat = i2c_dw_read_clear_intrbits(dev);
693
694	if (!(dev->status & STATUS_ACTIVE)) {
695		/*
696		 * Unexpected interrupt in driver point of view. State
697		 * variables are either unset or stale so acknowledge and
698		 * disable interrupts for suppressing further interrupts if
699		 * interrupt really came from this HW (E.g. firmware has left
700		 * the HW active).
701		 */
702		__i2c_dw_write_intr_mask(dev, 0);
703		return IRQ_HANDLED;
704	}
705
706	i2c_dw_process_transfer(dev, stat);
707
708	return IRQ_HANDLED;
709}
710
711static int i2c_dw_wait_transfer(struct dw_i2c_dev *dev)
712{
713	unsigned long timeout = dev->adapter.timeout;
714	unsigned int stat;
715	int ret;
716
717	if (!(dev->flags & ACCESS_POLLING)) {
718		ret = wait_for_completion_timeout(&dev->cmd_complete, timeout);
719	} else {
720		timeout += jiffies;
721		do {
722			ret = try_wait_for_completion(&dev->cmd_complete);
723			if (ret)
724				break;
725
726			stat = i2c_dw_read_clear_intrbits(dev);
727			if (stat)
728				i2c_dw_process_transfer(dev, stat);
729			else
730				/* Try save some power */
731				usleep_range(3, 25);
732		} while (time_before(jiffies, timeout));
733	}
734
735	return ret ? 0 : -ETIMEDOUT;
736}
737
738/*
739 * Prepare controller for a transaction and call i2c_dw_xfer_msg.
740 */
741static int
742i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
743{
744	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
745	int ret;
746
747	dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
748
749	pm_runtime_get_sync(dev->dev);
750
751	switch (dev->flags & MODEL_MASK) {
752	case MODEL_AMD_NAVI_GPU:
753		ret = amd_i2c_dw_xfer_quirk(adap, msgs, num);
754		goto done_nolock;
755	default:
756		break;
757	}
758
759	reinit_completion(&dev->cmd_complete);
760	dev->msgs = msgs;
761	dev->msgs_num = num;
762	dev->cmd_err = 0;
763	dev->msg_write_idx = 0;
764	dev->msg_read_idx = 0;
765	dev->msg_err = 0;
766	dev->status = 0;
767	dev->abort_source = 0;
768	dev->rx_outstanding = 0;
769
770	ret = i2c_dw_acquire_lock(dev);
771	if (ret)
772		goto done_nolock;
773
774	ret = i2c_dw_wait_bus_not_busy(dev);
775	if (ret < 0)
776		goto done;
777
778	/* Start the transfers */
779	i2c_dw_xfer_init(dev);
780
781	/* Wait for tx to complete */
782	ret = i2c_dw_wait_transfer(dev);
783	if (ret) {
784		dev_err(dev->dev, "controller timed out\n");
785		/* i2c_dw_init_master() implicitly disables the adapter */
786		i2c_recover_bus(&dev->adapter);
787		i2c_dw_init_master(dev);
788		goto done;
789	}
790
791	/*
792	 * We must disable the adapter before returning and signaling the end
793	 * of the current transfer. Otherwise the hardware might continue
794	 * generating interrupts which in turn causes a race condition with
795	 * the following transfer. Needs some more investigation if the
796	 * additional interrupts are a hardware bug or this driver doesn't
797	 * handle them correctly yet.
798	 */
799	__i2c_dw_disable_nowait(dev);
800
801	if (dev->msg_err) {
802		ret = dev->msg_err;
803		goto done;
804	}
805
806	/* No error */
807	if (likely(!dev->cmd_err && !dev->status)) {
808		ret = num;
809		goto done;
810	}
811
812	/* We have an error */
813	if (dev->cmd_err == DW_IC_ERR_TX_ABRT) {
814		ret = i2c_dw_handle_tx_abort(dev);
815		goto done;
816	}
817
818	if (dev->status)
819		dev_err(dev->dev,
820			"transfer terminated early - interrupt latency too high?\n");
821
822	ret = -EIO;
823
824done:
825	i2c_dw_release_lock(dev);
826
827done_nolock:
828	pm_runtime_mark_last_busy(dev->dev);
829	pm_runtime_put_autosuspend(dev->dev);
830
831	return ret;
832}
833
834static const struct i2c_algorithm i2c_dw_algo = {
835	.master_xfer = i2c_dw_xfer,
836	.functionality = i2c_dw_func,
837};
838
839static const struct i2c_adapter_quirks i2c_dw_quirks = {
840	.flags = I2C_AQ_NO_ZERO_LEN,
841};
842
843void i2c_dw_configure_master(struct dw_i2c_dev *dev)
844{
845	struct i2c_timings *t = &dev->timings;
846
847	dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;
848
849	dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
850			  DW_IC_CON_RESTART_EN;
851
852	dev->mode = DW_IC_MASTER;
853
854	switch (t->bus_freq_hz) {
855	case I2C_MAX_STANDARD_MODE_FREQ:
856		dev->master_cfg |= DW_IC_CON_SPEED_STD;
857		break;
858	case I2C_MAX_HIGH_SPEED_MODE_FREQ:
859		dev->master_cfg |= DW_IC_CON_SPEED_HIGH;
860		break;
861	default:
862		dev->master_cfg |= DW_IC_CON_SPEED_FAST;
863	}
864}
865EXPORT_SYMBOL_GPL(i2c_dw_configure_master);
866
867static void i2c_dw_prepare_recovery(struct i2c_adapter *adap)
868{
869	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
870
871	i2c_dw_disable(dev);
872	reset_control_assert(dev->rst);
873	i2c_dw_prepare_clk(dev, false);
874}
875
876static void i2c_dw_unprepare_recovery(struct i2c_adapter *adap)
877{
878	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
879
880	i2c_dw_prepare_clk(dev, true);
881	reset_control_deassert(dev->rst);
882	i2c_dw_init_master(dev);
883}
884
885static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
886{
887	struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
888	struct i2c_adapter *adap = &dev->adapter;
889	struct gpio_desc *gpio;
890
891	gpio = devm_gpiod_get_optional(dev->dev, "scl", GPIOD_OUT_HIGH);
892	if (IS_ERR_OR_NULL(gpio))
893		return PTR_ERR_OR_ZERO(gpio);
894
895	rinfo->scl_gpiod = gpio;
896
897	gpio = devm_gpiod_get_optional(dev->dev, "sda", GPIOD_IN);
898	if (IS_ERR(gpio))
899		return PTR_ERR(gpio);
900	rinfo->sda_gpiod = gpio;
901
902	rinfo->pinctrl = devm_pinctrl_get(dev->dev);
903	if (IS_ERR(rinfo->pinctrl)) {
904		if (PTR_ERR(rinfo->pinctrl) == -EPROBE_DEFER)
905			return PTR_ERR(rinfo->pinctrl);
906
907		rinfo->pinctrl = NULL;
908		dev_err(dev->dev, "getting pinctrl info failed: bus recovery might not work\n");
909	} else if (!rinfo->pinctrl) {
910		dev_dbg(dev->dev, "pinctrl is disabled, bus recovery might not work\n");
911	}
912
913	rinfo->recover_bus = i2c_generic_scl_recovery;
914	rinfo->prepare_recovery = i2c_dw_prepare_recovery;
915	rinfo->unprepare_recovery = i2c_dw_unprepare_recovery;
916	adap->bus_recovery_info = rinfo;
917
918	dev_info(dev->dev, "running with gpio recovery mode! scl%s",
919		 rinfo->sda_gpiod ? ",sda" : "");
920
921	return 0;
922}
923
924int i2c_dw_probe_master(struct dw_i2c_dev *dev)
925{
926	struct i2c_adapter *adap = &dev->adapter;
927	unsigned long irq_flags;
928	unsigned int ic_con;
929	int ret;
930
931	init_completion(&dev->cmd_complete);
932
933	dev->init = i2c_dw_init_master;
934	dev->disable = i2c_dw_disable;
935
936	ret = i2c_dw_init_regmap(dev);
937	if (ret)
938		return ret;
939
940	ret = i2c_dw_set_timings_master(dev);
941	if (ret)
942		return ret;
943
944	ret = i2c_dw_set_fifo_size(dev);
945	if (ret)
946		return ret;
947
948	/* Lock the bus for accessing DW_IC_CON */
949	ret = i2c_dw_acquire_lock(dev);
950	if (ret)
951		return ret;
952
953	/*
954	 * On AMD platforms BIOS advertises the bus clear feature
955	 * and enables the SCL/SDA stuck low. SMU FW does the
956	 * bus recovery process. Driver should not ignore this BIOS
957	 * advertisement of bus clear feature.
958	 */
959	ret = regmap_read(dev->map, DW_IC_CON, &ic_con);
960	i2c_dw_release_lock(dev);
961	if (ret)
962		return ret;
963
964	if (ic_con & DW_IC_CON_BUS_CLEAR_CTRL)
965		dev->master_cfg |= DW_IC_CON_BUS_CLEAR_CTRL;
966
967	ret = dev->init(dev);
968	if (ret)
969		return ret;
970
971	snprintf(adap->name, sizeof(adap->name),
972		 "Synopsys DesignWare I2C adapter");
973	adap->retries = 3;
974	adap->algo = &i2c_dw_algo;
975	adap->quirks = &i2c_dw_quirks;
976	adap->dev.parent = dev->dev;
977	i2c_set_adapdata(adap, dev);
978
979	if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
980		irq_flags = IRQF_NO_SUSPEND;
981	} else {
982		irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND;
983	}
984
985	ret = i2c_dw_acquire_lock(dev);
986	if (ret)
987		return ret;
988
989	__i2c_dw_write_intr_mask(dev, 0);
990	i2c_dw_release_lock(dev);
991
992	if (!(dev->flags & ACCESS_POLLING)) {
993		ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr,
994				       irq_flags, dev_name(dev->dev), dev);
995		if (ret) {
996			dev_err(dev->dev, "failure requesting irq %i: %d\n",
997				dev->irq, ret);
998			return ret;
999		}
1000	}
1001
1002	ret = i2c_dw_init_recovery_info(dev);
1003	if (ret)
1004		return ret;
1005
1006	/*
1007	 * Increment PM usage count during adapter registration in order to
1008	 * avoid possible spurious runtime suspend when adapter device is
1009	 * registered to the device core and immediate resume in case bus has
1010	 * registered I2C slaves that do I2C transfers in their probe.
1011	 */
1012	pm_runtime_get_noresume(dev->dev);
1013	ret = i2c_add_numbered_adapter(adap);
1014	if (ret)
1015		dev_err(dev->dev, "failure adding adapter: %d\n", ret);
1016	pm_runtime_put_noidle(dev->dev);
1017
1018	return ret;
1019}
1020EXPORT_SYMBOL_GPL(i2c_dw_probe_master);
1021
1022MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter");
1023MODULE_LICENSE("GPL");
1024