1// SPDX-License-Identifier: GPL-2.0
2
3/* Texas Instruments ICSSG Industrial Ethernet Peripheral (IEP) Driver
4 *
5 * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com
6 *
7 */
8
9#include <linux/bitops.h>
10#include <linux/clk.h>
11#include <linux/err.h>
12#include <linux/io.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/of_platform.h>
16#include <linux/platform_device.h>
17#include <linux/timekeeping.h>
18#include <linux/interrupt.h>
19#include <linux/of_irq.h>
20
21#include "icss_iep.h"
22
23#define IEP_MAX_DEF_INC		0xf
24#define IEP_MAX_COMPEN_INC		0xfff
25#define IEP_MAX_COMPEN_COUNT	0xffffff
26
27#define IEP_GLOBAL_CFG_CNT_ENABLE	BIT(0)
28#define IEP_GLOBAL_CFG_DEFAULT_INC_MASK		GENMASK(7, 4)
29#define IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT	4
30#define IEP_GLOBAL_CFG_COMPEN_INC_MASK		GENMASK(19, 8)
31#define IEP_GLOBAL_CFG_COMPEN_INC_SHIFT		8
32
33#define IEP_GLOBAL_STATUS_CNT_OVF	BIT(0)
34
35#define IEP_CMP_CFG_SHADOW_EN		BIT(17)
36#define IEP_CMP_CFG_CMP0_RST_CNT_EN	BIT(0)
37#define IEP_CMP_CFG_CMP_EN(cmp)		(GENMASK(16, 1) & (1 << ((cmp) + 1)))
38
39#define IEP_CMP_STATUS(cmp)		(1 << (cmp))
40
41#define IEP_SYNC_CTRL_SYNC_EN		BIT(0)
42#define IEP_SYNC_CTRL_SYNC_N_EN(n)	(GENMASK(2, 1) & (BIT(1) << (n)))
43
44#define IEP_MIN_CMP	0
45#define IEP_MAX_CMP	15
46
47#define ICSS_IEP_64BIT_COUNTER_SUPPORT		BIT(0)
48#define ICSS_IEP_SLOW_COMPEN_REG_SUPPORT	BIT(1)
49#define ICSS_IEP_SHADOW_MODE_SUPPORT		BIT(2)
50
51#define LATCH_INDEX(ts_index)			((ts_index) + 6)
52#define IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(n)	BIT(LATCH_INDEX(n))
53#define IEP_CAP_CFG_CAP_ASYNC_EN(n)		BIT(LATCH_INDEX(n) + 10)
54
55enum {
56	ICSS_IEP_GLOBAL_CFG_REG,
57	ICSS_IEP_GLOBAL_STATUS_REG,
58	ICSS_IEP_COMPEN_REG,
59	ICSS_IEP_SLOW_COMPEN_REG,
60	ICSS_IEP_COUNT_REG0,
61	ICSS_IEP_COUNT_REG1,
62	ICSS_IEP_CAPTURE_CFG_REG,
63	ICSS_IEP_CAPTURE_STAT_REG,
64
65	ICSS_IEP_CAP6_RISE_REG0,
66	ICSS_IEP_CAP6_RISE_REG1,
67
68	ICSS_IEP_CAP7_RISE_REG0,
69	ICSS_IEP_CAP7_RISE_REG1,
70
71	ICSS_IEP_CMP_CFG_REG,
72	ICSS_IEP_CMP_STAT_REG,
73	ICSS_IEP_CMP0_REG0,
74	ICSS_IEP_CMP0_REG1,
75	ICSS_IEP_CMP1_REG0,
76	ICSS_IEP_CMP1_REG1,
77
78	ICSS_IEP_CMP8_REG0,
79	ICSS_IEP_CMP8_REG1,
80	ICSS_IEP_SYNC_CTRL_REG,
81	ICSS_IEP_SYNC0_STAT_REG,
82	ICSS_IEP_SYNC1_STAT_REG,
83	ICSS_IEP_SYNC_PWIDTH_REG,
84	ICSS_IEP_SYNC0_PERIOD_REG,
85	ICSS_IEP_SYNC1_DELAY_REG,
86	ICSS_IEP_SYNC_START_REG,
87	ICSS_IEP_MAX_REGS,
88};
89
90/**
91 * struct icss_iep_plat_data - Plat data to handle SoC variants
92 * @config: Regmap configuration data
93 * @reg_offs: register offsets to capture offset differences across SoCs
94 * @flags: Flags to represent IEP properties
95 */
96struct icss_iep_plat_data {
97	struct regmap_config *config;
98	u32 reg_offs[ICSS_IEP_MAX_REGS];
99	u32 flags;
100};
101
102struct icss_iep {
103	struct device *dev;
104	void __iomem *base;
105	const struct icss_iep_plat_data *plat_data;
106	struct regmap *map;
107	struct device_node *client_np;
108	unsigned long refclk_freq;
109	int clk_tick_time;	/* one refclk tick time in ns */
110	struct ptp_clock_info ptp_info;
111	struct ptp_clock *ptp_clock;
112	struct mutex ptp_clk_mutex;	/* PHC access serializer */
113	spinlock_t irq_lock; /* CMP IRQ vs icss_iep_ptp_enable access */
114	u32 def_inc;
115	s16 slow_cmp_inc;
116	u32 slow_cmp_count;
117	const struct icss_iep_clockops *ops;
118	void *clockops_data;
119	u32 cycle_time_ns;
120	u32 perout_enabled;
121	bool pps_enabled;
122	int cap_cmp_irq;
123	u64 period;
124	u32 latch_enable;
125};
126
127/**
128 * icss_iep_get_count_hi() - Get the upper 32 bit IEP counter
129 * @iep: Pointer to structure representing IEP.
130 *
131 * Return: upper 32 bit IEP counter
132 */
133int icss_iep_get_count_hi(struct icss_iep *iep)
134{
135	u32 val = 0;
136
137	if (iep && (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT))
138		val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
139
140	return val;
141}
142EXPORT_SYMBOL_GPL(icss_iep_get_count_hi);
143
144/**
145 * icss_iep_get_count_low() - Get the lower 32 bit IEP counter
146 * @iep: Pointer to structure representing IEP.
147 *
148 * Return: lower 32 bit IEP counter
149 */
150int icss_iep_get_count_low(struct icss_iep *iep)
151{
152	u32 val = 0;
153
154	if (iep)
155		val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
156
157	return val;
158}
159EXPORT_SYMBOL_GPL(icss_iep_get_count_low);
160
161/**
162 * icss_iep_get_ptp_clock_idx() - Get PTP clock index using IEP driver
163 * @iep: Pointer to structure representing IEP.
164 *
165 * Return: PTP clock index, -1 if not registered
166 */
167int icss_iep_get_ptp_clock_idx(struct icss_iep *iep)
168{
169	if (!iep || !iep->ptp_clock)
170		return -1;
171	return ptp_clock_index(iep->ptp_clock);
172}
173EXPORT_SYMBOL_GPL(icss_iep_get_ptp_clock_idx);
174
175static void icss_iep_set_counter(struct icss_iep *iep, u64 ns)
176{
177	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
178		writel(upper_32_bits(ns), iep->base +
179		       iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
180	writel(lower_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
181}
182
183static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns);
184
185/**
186 * icss_iep_settime() - Set time of the PTP clock using IEP driver
187 * @iep: Pointer to structure representing IEP.
188 * @ns: Time to be set in nanoseconds
189 *
190 * This API uses writel() instead of regmap_write() for write operations as
191 * regmap_write() is too slow and this API is time sensitive.
192 */
193static void icss_iep_settime(struct icss_iep *iep, u64 ns)
194{
195	unsigned long flags;
196
197	if (iep->ops && iep->ops->settime) {
198		iep->ops->settime(iep->clockops_data, ns);
199		return;
200	}
201
202	spin_lock_irqsave(&iep->irq_lock, flags);
203	if (iep->pps_enabled || iep->perout_enabled)
204		writel(0, iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
205
206	icss_iep_set_counter(iep, ns);
207
208	if (iep->pps_enabled || iep->perout_enabled) {
209		icss_iep_update_to_next_boundary(iep, ns);
210		writel(IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN,
211		       iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
212	}
213	spin_unlock_irqrestore(&iep->irq_lock, flags);
214}
215
216/**
217 * icss_iep_gettime() - Get time of the PTP clock using IEP driver
218 * @iep: Pointer to structure representing IEP.
219 * @sts: Pointer to structure representing PTP system timestamp.
220 *
221 * This API uses readl() instead of regmap_read() for read operations as
222 * regmap_read() is too slow and this API is time sensitive.
223 *
224 * Return: The current timestamp of the PTP clock using IEP driver
225 */
226static u64 icss_iep_gettime(struct icss_iep *iep,
227			    struct ptp_system_timestamp *sts)
228{
229	u32 ts_hi = 0, ts_lo;
230	unsigned long flags;
231
232	if (iep->ops && iep->ops->gettime)
233		return iep->ops->gettime(iep->clockops_data, sts);
234
235	/* use local_irq_x() to make it work for both RT/non-RT */
236	local_irq_save(flags);
237
238	/* no need to play with hi-lo, hi is latched when lo is read */
239	ptp_read_system_prets(sts);
240	ts_lo = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
241	ptp_read_system_postts(sts);
242	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
243		ts_hi = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
244
245	local_irq_restore(flags);
246
247	return (u64)ts_lo | (u64)ts_hi << 32;
248}
249
250static void icss_iep_enable(struct icss_iep *iep)
251{
252	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
253			   IEP_GLOBAL_CFG_CNT_ENABLE,
254			   IEP_GLOBAL_CFG_CNT_ENABLE);
255}
256
257static void icss_iep_disable(struct icss_iep *iep)
258{
259	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
260			   IEP_GLOBAL_CFG_CNT_ENABLE,
261			   0);
262}
263
264static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
265{
266	u32 cycle_time;
267	int cmp;
268
269	cycle_time = iep->cycle_time_ns - iep->def_inc;
270
271	icss_iep_disable(iep);
272
273	/* disable shadow mode */
274	regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
275			   IEP_CMP_CFG_SHADOW_EN, 0);
276
277	/* enable shadow mode */
278	regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
279			   IEP_CMP_CFG_SHADOW_EN, IEP_CMP_CFG_SHADOW_EN);
280
281	/* clear counters */
282	icss_iep_set_counter(iep, 0);
283
284	/* clear overflow status */
285	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_STATUS_REG,
286			   IEP_GLOBAL_STATUS_CNT_OVF,
287			   IEP_GLOBAL_STATUS_CNT_OVF);
288
289	/* clear compare status */
290	for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
291		regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
292				   IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
293	}
294
295	/* enable reset counter on CMP0 event */
296	regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
297			   IEP_CMP_CFG_CMP0_RST_CNT_EN,
298			   IEP_CMP_CFG_CMP0_RST_CNT_EN);
299	/* enable compare */
300	regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
301			   IEP_CMP_CFG_CMP_EN(0),
302			   IEP_CMP_CFG_CMP_EN(0));
303
304	/* set CMP0 value to cycle time */
305	regmap_write(iep->map, ICSS_IEP_CMP0_REG0, cycle_time);
306	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
307		regmap_write(iep->map, ICSS_IEP_CMP0_REG1, cycle_time);
308
309	icss_iep_set_counter(iep, 0);
310	icss_iep_enable(iep);
311}
312
313static void icss_iep_set_default_inc(struct icss_iep *iep, u8 def_inc)
314{
315	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
316			   IEP_GLOBAL_CFG_DEFAULT_INC_MASK,
317			   def_inc << IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT);
318}
319
320static void icss_iep_set_compensation_inc(struct icss_iep *iep, u16 compen_inc)
321{
322	struct device *dev = regmap_get_device(iep->map);
323
324	if (compen_inc > IEP_MAX_COMPEN_INC) {
325		dev_err(dev, "%s: too high compensation inc %d\n",
326			__func__, compen_inc);
327		compen_inc = IEP_MAX_COMPEN_INC;
328	}
329
330	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
331			   IEP_GLOBAL_CFG_COMPEN_INC_MASK,
332			   compen_inc << IEP_GLOBAL_CFG_COMPEN_INC_SHIFT);
333}
334
335static void icss_iep_set_compensation_count(struct icss_iep *iep,
336					    u32 compen_count)
337{
338	struct device *dev = regmap_get_device(iep->map);
339
340	if (compen_count > IEP_MAX_COMPEN_COUNT) {
341		dev_err(dev, "%s: too high compensation count %d\n",
342			__func__, compen_count);
343		compen_count = IEP_MAX_COMPEN_COUNT;
344	}
345
346	regmap_write(iep->map, ICSS_IEP_COMPEN_REG, compen_count);
347}
348
349static void icss_iep_set_slow_compensation_count(struct icss_iep *iep,
350						 u32 compen_count)
351{
352	regmap_write(iep->map, ICSS_IEP_SLOW_COMPEN_REG, compen_count);
353}
354
355/* PTP PHC operations */
356static int icss_iep_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
357{
358	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
359	s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
360	u32 cyc_count;
361	u16 cmp_inc;
362
363	mutex_lock(&iep->ptp_clk_mutex);
364
365	/* ppb is amount of frequency we want to adjust in 1GHz (billion)
366	 * e.g. 100ppb means we need to speed up clock by 100Hz
367	 * i.e. at end of 1 second (1 billion ns) clock time, we should be
368	 * counting 100 more ns.
369	 * We use IEP slow compensation to achieve continuous freq. adjustment.
370	 * There are 2 parts. Cycle time and adjustment per cycle.
371	 * Simplest case would be 1 sec Cycle time. Then adjustment
372	 * pre cycle would be (def_inc + ppb) value.
373	 * Cycle time will have to be chosen based on how worse the ppb is.
374	 * e.g. smaller the ppb, cycle time has to be large.
375	 * The minimum adjustment we can do is +-1ns per cycle so let's
376	 * reduce the cycle time to get 1ns per cycle adjustment.
377	 *	1ppb = 1sec cycle time & 1ns adjust
378	 *	1000ppb = 1/1000 cycle time & 1ns adjust per cycle
379	 */
380
381	if (iep->cycle_time_ns)
382		iep->slow_cmp_inc = iep->clk_tick_time;	/* 4ns adj per cycle */
383	else
384		iep->slow_cmp_inc = 1;	/* 1ns adjust per cycle */
385
386	if (ppb < 0) {
387		iep->slow_cmp_inc = -iep->slow_cmp_inc;
388		ppb = -ppb;
389	}
390
391	cyc_count = NSEC_PER_SEC;		/* 1s cycle time @1GHz */
392	cyc_count /= ppb;		/* cycle time per ppb */
393
394	/* slow_cmp_count is decremented every clock cycle, e.g. @250MHz */
395	if (!iep->cycle_time_ns)
396		cyc_count /= iep->clk_tick_time;
397	iep->slow_cmp_count = cyc_count;
398
399	/* iep->clk_tick_time is def_inc */
400	cmp_inc = iep->clk_tick_time + iep->slow_cmp_inc;
401	icss_iep_set_compensation_inc(iep, cmp_inc);
402	icss_iep_set_slow_compensation_count(iep, iep->slow_cmp_count);
403
404	mutex_unlock(&iep->ptp_clk_mutex);
405
406	return 0;
407}
408
409static int icss_iep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
410{
411	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
412	s64 ns;
413
414	mutex_lock(&iep->ptp_clk_mutex);
415	if (iep->ops && iep->ops->adjtime) {
416		iep->ops->adjtime(iep->clockops_data, delta);
417	} else {
418		ns = icss_iep_gettime(iep, NULL);
419		ns += delta;
420		icss_iep_settime(iep, ns);
421	}
422	mutex_unlock(&iep->ptp_clk_mutex);
423
424	return 0;
425}
426
427static int icss_iep_ptp_gettimeex(struct ptp_clock_info *ptp,
428				  struct timespec64 *ts,
429				  struct ptp_system_timestamp *sts)
430{
431	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
432	u64 ns;
433
434	mutex_lock(&iep->ptp_clk_mutex);
435	ns = icss_iep_gettime(iep, sts);
436	*ts = ns_to_timespec64(ns);
437	mutex_unlock(&iep->ptp_clk_mutex);
438
439	return 0;
440}
441
442static int icss_iep_ptp_settime(struct ptp_clock_info *ptp,
443				const struct timespec64 *ts)
444{
445	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
446	u64 ns;
447
448	mutex_lock(&iep->ptp_clk_mutex);
449	ns = timespec64_to_ns(ts);
450	icss_iep_settime(iep, ns);
451	mutex_unlock(&iep->ptp_clk_mutex);
452
453	return 0;
454}
455
456static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
457{
458	u64 ns, p_ns;
459	u32 offset;
460
461	ns = icss_iep_gettime(iep, NULL);
462	if (start_ns < ns)
463		start_ns = ns;
464	p_ns = iep->period;
465	/* Round up to next period boundary */
466	start_ns += p_ns - 1;
467	offset = do_div(start_ns, p_ns);
468	start_ns = start_ns * p_ns;
469	/* If it is too close to update, shift to next boundary */
470	if (p_ns - offset < 10)
471		start_ns += p_ns;
472
473	regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(start_ns));
474	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
475		regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(start_ns));
476}
477
478static int icss_iep_perout_enable_hw(struct icss_iep *iep,
479				     struct ptp_perout_request *req, int on)
480{
481	int ret;
482	u64 cmp;
483
484	if (iep->ops && iep->ops->perout_enable) {
485		ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp);
486		if (ret)
487			return ret;
488
489		if (on) {
490			/* Configure CMP */
491			regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
492			if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
493				regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
494			/* Configure SYNC, 1ms pulse width */
495			regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, 1000000);
496			regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
497			regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, 0);
498			regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
499			/* Enable CMP 1 */
500			regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
501					   IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
502		} else {
503			/* Disable CMP 1 */
504			regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
505					   IEP_CMP_CFG_CMP_EN(1), 0);
506
507			/* clear regs */
508			regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
509			if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
510				regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
511		}
512	} else {
513		if (on) {
514			u64 start_ns;
515
516			iep->period = ((u64)req->period.sec * NSEC_PER_SEC) +
517				      req->period.nsec;
518			start_ns = ((u64)req->period.sec * NSEC_PER_SEC)
519				   + req->period.nsec;
520			icss_iep_update_to_next_boundary(iep, start_ns);
521
522			/* Enable Sync in single shot mode  */
523			regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
524				     IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
525			/* Enable CMP 1 */
526			regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
527					   IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
528		} else {
529			/* Disable CMP 1 */
530			regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
531					   IEP_CMP_CFG_CMP_EN(1), 0);
532
533			/* clear CMP regs */
534			regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
535			if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
536				regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
537
538			/* Disable sync */
539			regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
540		}
541	}
542
543	return 0;
544}
545
546static int icss_iep_perout_enable(struct icss_iep *iep,
547				  struct ptp_perout_request *req, int on)
548{
549	unsigned long flags;
550	int ret = 0;
551
552	mutex_lock(&iep->ptp_clk_mutex);
553
554	if (iep->pps_enabled) {
555		ret = -EBUSY;
556		goto exit;
557	}
558
559	if (iep->perout_enabled == !!on)
560		goto exit;
561
562	spin_lock_irqsave(&iep->irq_lock, flags);
563	ret = icss_iep_perout_enable_hw(iep, req, on);
564	if (!ret)
565		iep->perout_enabled = !!on;
566	spin_unlock_irqrestore(&iep->irq_lock, flags);
567
568exit:
569	mutex_unlock(&iep->ptp_clk_mutex);
570
571	return ret;
572}
573
574static int icss_iep_pps_enable(struct icss_iep *iep, int on)
575{
576	struct ptp_clock_request rq;
577	struct timespec64 ts;
578	unsigned long flags;
579	int ret = 0;
580	u64 ns;
581
582	mutex_lock(&iep->ptp_clk_mutex);
583
584	if (iep->perout_enabled) {
585		ret = -EBUSY;
586		goto exit;
587	}
588
589	if (iep->pps_enabled == !!on)
590		goto exit;
591
592	spin_lock_irqsave(&iep->irq_lock, flags);
593
594	rq.perout.index = 0;
595	if (on) {
596		ns = icss_iep_gettime(iep, NULL);
597		ts = ns_to_timespec64(ns);
598		rq.perout.period.sec = 1;
599		rq.perout.period.nsec = 0;
600		rq.perout.start.sec = ts.tv_sec + 2;
601		rq.perout.start.nsec = 0;
602		ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
603	} else {
604		ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
605	}
606
607	if (!ret)
608		iep->pps_enabled = !!on;
609
610	spin_unlock_irqrestore(&iep->irq_lock, flags);
611
612exit:
613	mutex_unlock(&iep->ptp_clk_mutex);
614
615	return ret;
616}
617
618static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on)
619{
620	u32 val, cap, ret = 0;
621
622	mutex_lock(&iep->ptp_clk_mutex);
623
624	if (iep->ops && iep->ops->extts_enable) {
625		ret = iep->ops->extts_enable(iep->clockops_data, index, on);
626		goto exit;
627	}
628
629	if (((iep->latch_enable & BIT(index)) >> index) == on)
630		goto exit;
631
632	regmap_read(iep->map, ICSS_IEP_CAPTURE_CFG_REG, &val);
633	cap = IEP_CAP_CFG_CAP_ASYNC_EN(index) | IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(index);
634	if (on) {
635		val |= cap;
636		iep->latch_enable |= BIT(index);
637	} else {
638		val &= ~cap;
639		iep->latch_enable &= ~BIT(index);
640	}
641	regmap_write(iep->map, ICSS_IEP_CAPTURE_CFG_REG, val);
642
643exit:
644	mutex_unlock(&iep->ptp_clk_mutex);
645
646	return ret;
647}
648
649static int icss_iep_ptp_enable(struct ptp_clock_info *ptp,
650			       struct ptp_clock_request *rq, int on)
651{
652	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
653
654	switch (rq->type) {
655	case PTP_CLK_REQ_PEROUT:
656		return icss_iep_perout_enable(iep, &rq->perout, on);
657	case PTP_CLK_REQ_PPS:
658		return icss_iep_pps_enable(iep, on);
659	case PTP_CLK_REQ_EXTTS:
660		return icss_iep_extts_enable(iep, rq->extts.index, on);
661	default:
662		break;
663	}
664
665	return -EOPNOTSUPP;
666}
667
668static struct ptp_clock_info icss_iep_ptp_info = {
669	.owner		= THIS_MODULE,
670	.name		= "ICSS IEP timer",
671	.max_adj	= 10000000,
672	.adjfine	= icss_iep_ptp_adjfine,
673	.adjtime	= icss_iep_ptp_adjtime,
674	.gettimex64	= icss_iep_ptp_gettimeex,
675	.settime64	= icss_iep_ptp_settime,
676	.enable		= icss_iep_ptp_enable,
677};
678
679struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
680{
681	struct platform_device *pdev;
682	struct device_node *iep_np;
683	struct icss_iep *iep;
684
685	iep_np = of_parse_phandle(np, "ti,iep", idx);
686	if (!iep_np || !of_device_is_available(iep_np))
687		return ERR_PTR(-ENODEV);
688
689	pdev = of_find_device_by_node(iep_np);
690	of_node_put(iep_np);
691
692	if (!pdev)
693		/* probably IEP not yet probed */
694		return ERR_PTR(-EPROBE_DEFER);
695
696	iep = platform_get_drvdata(pdev);
697	if (!iep)
698		return ERR_PTR(-EPROBE_DEFER);
699
700	device_lock(iep->dev);
701	if (iep->client_np) {
702		device_unlock(iep->dev);
703		dev_err(iep->dev, "IEP is already acquired by %s",
704			iep->client_np->name);
705		return ERR_PTR(-EBUSY);
706	}
707	iep->client_np = np;
708	device_unlock(iep->dev);
709	get_device(iep->dev);
710
711	return iep;
712}
713EXPORT_SYMBOL_GPL(icss_iep_get_idx);
714
715struct icss_iep *icss_iep_get(struct device_node *np)
716{
717	return icss_iep_get_idx(np, 0);
718}
719EXPORT_SYMBOL_GPL(icss_iep_get);
720
721void icss_iep_put(struct icss_iep *iep)
722{
723	device_lock(iep->dev);
724	iep->client_np = NULL;
725	device_unlock(iep->dev);
726	put_device(iep->dev);
727}
728EXPORT_SYMBOL_GPL(icss_iep_put);
729
730void icss_iep_init_fw(struct icss_iep *iep)
731{
732	/* start IEP for FW use in raw 64bit mode, no PTP support */
733	iep->clk_tick_time = iep->def_inc;
734	iep->cycle_time_ns = 0;
735	iep->ops = NULL;
736	iep->clockops_data = NULL;
737	icss_iep_set_default_inc(iep, iep->def_inc);
738	icss_iep_set_compensation_inc(iep, iep->def_inc);
739	icss_iep_set_compensation_count(iep, 0);
740	regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
741	regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
742	if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
743		icss_iep_set_slow_compensation_count(iep, 0);
744
745	icss_iep_enable(iep);
746	icss_iep_settime(iep, 0);
747}
748EXPORT_SYMBOL_GPL(icss_iep_init_fw);
749
750void icss_iep_exit_fw(struct icss_iep *iep)
751{
752	icss_iep_disable(iep);
753}
754EXPORT_SYMBOL_GPL(icss_iep_exit_fw);
755
756int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops,
757		  void *clockops_data, u32 cycle_time_ns)
758{
759	int ret = 0;
760
761	iep->cycle_time_ns = cycle_time_ns;
762	iep->clk_tick_time = iep->def_inc;
763	iep->ops = clkops;
764	iep->clockops_data = clockops_data;
765	icss_iep_set_default_inc(iep, iep->def_inc);
766	icss_iep_set_compensation_inc(iep, iep->def_inc);
767	icss_iep_set_compensation_count(iep, 0);
768	regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
769	regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
770	if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
771		icss_iep_set_slow_compensation_count(iep, 0);
772
773	if (!(iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) ||
774	    !(iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT))
775		goto skip_perout;
776
777	if (iep->ops && iep->ops->perout_enable) {
778		iep->ptp_info.n_per_out = 1;
779		iep->ptp_info.pps = 1;
780	}
781
782	if (iep->ops && iep->ops->extts_enable)
783		iep->ptp_info.n_ext_ts = 2;
784
785skip_perout:
786	if (cycle_time_ns)
787		icss_iep_enable_shadow_mode(iep);
788	else
789		icss_iep_enable(iep);
790	icss_iep_settime(iep, ktime_get_real_ns());
791
792	iep->ptp_clock = ptp_clock_register(&iep->ptp_info, iep->dev);
793	if (IS_ERR(iep->ptp_clock)) {
794		ret = PTR_ERR(iep->ptp_clock);
795		iep->ptp_clock = NULL;
796		dev_err(iep->dev, "Failed to register ptp clk %d\n", ret);
797	}
798
799	return ret;
800}
801EXPORT_SYMBOL_GPL(icss_iep_init);
802
803int icss_iep_exit(struct icss_iep *iep)
804{
805	if (iep->ptp_clock) {
806		ptp_clock_unregister(iep->ptp_clock);
807		iep->ptp_clock = NULL;
808	}
809	icss_iep_disable(iep);
810
811	return 0;
812}
813EXPORT_SYMBOL_GPL(icss_iep_exit);
814
815static int icss_iep_probe(struct platform_device *pdev)
816{
817	struct device *dev = &pdev->dev;
818	struct icss_iep *iep;
819	struct clk *iep_clk;
820
821	iep = devm_kzalloc(dev, sizeof(*iep), GFP_KERNEL);
822	if (!iep)
823		return -ENOMEM;
824
825	iep->dev = dev;
826	iep->base = devm_platform_ioremap_resource(pdev, 0);
827	if (IS_ERR(iep->base))
828		return -ENODEV;
829
830	iep_clk = devm_clk_get(dev, NULL);
831	if (IS_ERR(iep_clk))
832		return PTR_ERR(iep_clk);
833
834	iep->refclk_freq = clk_get_rate(iep_clk);
835
836	iep->def_inc = NSEC_PER_SEC / iep->refclk_freq;	/* ns per clock tick */
837	if (iep->def_inc > IEP_MAX_DEF_INC) {
838		dev_err(dev, "Failed to set def_inc %d.  IEP_clock is too slow to be supported\n",
839			iep->def_inc);
840		return -EINVAL;
841	}
842
843	iep->plat_data = device_get_match_data(dev);
844	if (!iep->plat_data)
845		return -EINVAL;
846
847	iep->map = devm_regmap_init(dev, NULL, iep, iep->plat_data->config);
848	if (IS_ERR(iep->map)) {
849		dev_err(dev, "Failed to create regmap for IEP %ld\n",
850			PTR_ERR(iep->map));
851		return PTR_ERR(iep->map);
852	}
853
854	iep->ptp_info = icss_iep_ptp_info;
855	mutex_init(&iep->ptp_clk_mutex);
856	spin_lock_init(&iep->irq_lock);
857	dev_set_drvdata(dev, iep);
858	icss_iep_disable(iep);
859
860	return 0;
861}
862
863static bool am654_icss_iep_valid_reg(struct device *dev, unsigned int reg)
864{
865	switch (reg) {
866	case ICSS_IEP_GLOBAL_CFG_REG ... ICSS_IEP_SYNC_START_REG:
867		return true;
868	default:
869		return false;
870	}
871
872	return false;
873}
874
875static int icss_iep_regmap_write(void *context, unsigned int reg,
876				 unsigned int val)
877{
878	struct icss_iep *iep = context;
879
880	writel(val, iep->base + iep->plat_data->reg_offs[reg]);
881
882	return 0;
883}
884
885static int icss_iep_regmap_read(void *context, unsigned int reg,
886				unsigned int *val)
887{
888	struct icss_iep *iep = context;
889
890	*val = readl(iep->base + iep->plat_data->reg_offs[reg]);
891
892	return 0;
893}
894
895static struct regmap_config am654_icss_iep_regmap_config = {
896	.name = "icss iep",
897	.reg_stride = 1,
898	.reg_write = icss_iep_regmap_write,
899	.reg_read = icss_iep_regmap_read,
900	.writeable_reg = am654_icss_iep_valid_reg,
901	.readable_reg = am654_icss_iep_valid_reg,
902	.fast_io = 1,
903};
904
905static const struct icss_iep_plat_data am654_icss_iep_plat_data = {
906	.flags = ICSS_IEP_64BIT_COUNTER_SUPPORT |
907		 ICSS_IEP_SLOW_COMPEN_REG_SUPPORT |
908		 ICSS_IEP_SHADOW_MODE_SUPPORT,
909	.reg_offs = {
910		[ICSS_IEP_GLOBAL_CFG_REG] = 0x00,
911		[ICSS_IEP_COMPEN_REG] = 0x08,
912		[ICSS_IEP_SLOW_COMPEN_REG] = 0x0C,
913		[ICSS_IEP_COUNT_REG0] = 0x10,
914		[ICSS_IEP_COUNT_REG1] = 0x14,
915		[ICSS_IEP_CAPTURE_CFG_REG] = 0x18,
916		[ICSS_IEP_CAPTURE_STAT_REG] = 0x1c,
917
918		[ICSS_IEP_CAP6_RISE_REG0] = 0x50,
919		[ICSS_IEP_CAP6_RISE_REG1] = 0x54,
920
921		[ICSS_IEP_CAP7_RISE_REG0] = 0x60,
922		[ICSS_IEP_CAP7_RISE_REG1] = 0x64,
923
924		[ICSS_IEP_CMP_CFG_REG] = 0x70,
925		[ICSS_IEP_CMP_STAT_REG] = 0x74,
926		[ICSS_IEP_CMP0_REG0] = 0x78,
927		[ICSS_IEP_CMP0_REG1] = 0x7c,
928		[ICSS_IEP_CMP1_REG0] = 0x80,
929		[ICSS_IEP_CMP1_REG1] = 0x84,
930
931		[ICSS_IEP_CMP8_REG0] = 0xc0,
932		[ICSS_IEP_CMP8_REG1] = 0xc4,
933		[ICSS_IEP_SYNC_CTRL_REG] = 0x180,
934		[ICSS_IEP_SYNC0_STAT_REG] = 0x188,
935		[ICSS_IEP_SYNC1_STAT_REG] = 0x18c,
936		[ICSS_IEP_SYNC_PWIDTH_REG] = 0x190,
937		[ICSS_IEP_SYNC0_PERIOD_REG] = 0x194,
938		[ICSS_IEP_SYNC1_DELAY_REG] = 0x198,
939		[ICSS_IEP_SYNC_START_REG] = 0x19c,
940	},
941	.config = &am654_icss_iep_regmap_config,
942};
943
944static const struct of_device_id icss_iep_of_match[] = {
945	{
946		.compatible = "ti,am654-icss-iep",
947		.data = &am654_icss_iep_plat_data,
948	},
949	{},
950};
951MODULE_DEVICE_TABLE(of, icss_iep_of_match);
952
953static struct platform_driver icss_iep_driver = {
954	.driver = {
955		.name = "icss-iep",
956		.of_match_table = icss_iep_of_match,
957	},
958	.probe = icss_iep_probe,
959};
960module_platform_driver(icss_iep_driver);
961
962MODULE_LICENSE("GPL");
963MODULE_DESCRIPTION("TI ICSS IEP driver");
964MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
965MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
966