1// SPDX-License-Identifier: GPL-2.0
2/* Microchip KSZ PTP Implementation
3 *
4 * Copyright (C) 2020 ARRI Lighting
5 * Copyright (C) 2022 Microchip Technology Inc.
6 */
7
8#include <linux/dsa/ksz_common.h>
9#include <linux/irq.h>
10#include <linux/irqdomain.h>
11#include <linux/kernel.h>
12#include <linux/ptp_classify.h>
13#include <linux/ptp_clock_kernel.h>
14
15#include "ksz_common.h"
16#include "ksz_ptp.h"
17#include "ksz_ptp_reg.h"
18
19#define ptp_caps_to_data(d) container_of((d), struct ksz_ptp_data, caps)
20#define ptp_data_to_ksz_dev(d) container_of((d), struct ksz_device, ptp_data)
21#define work_to_xmit_work(w) \
22		container_of((w), struct ksz_deferred_xmit_work, work)
23
24/* Sub-nanoseconds-adj,max * sub-nanoseconds / 40ns * 1ns
25 * = (2^30-1) * (2 ^ 32) / 40 ns * 1 ns = 6249999
26 */
27#define KSZ_MAX_DRIFT_CORR 6249999
28#define KSZ_MAX_PULSE_WIDTH 125000000LL
29
30#define KSZ_PTP_INC_NS 40ULL  /* HW clock is incremented every 40 ns (by 40) */
31#define KSZ_PTP_SUBNS_BITS 32
32
33#define KSZ_PTP_INT_START 13
34
35static int ksz_ptp_tou_gpio(struct ksz_device *dev)
36{
37	int ret;
38
39	if (!is_lan937x(dev))
40		return 0;
41
42	ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, GPIO_OUT,
43			GPIO_OUT);
44	if (ret)
45		return ret;
46
47	ret = ksz_rmw32(dev, REG_SW_GLOBAL_LED_OVR__4, LED_OVR_1 | LED_OVR_2,
48			LED_OVR_1 | LED_OVR_2);
49	if (ret)
50		return ret;
51
52	return ksz_rmw32(dev, REG_SW_GLOBAL_LED_SRC__4,
53			 LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2,
54			 LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2);
55}
56
57static int ksz_ptp_tou_reset(struct ksz_device *dev, u8 unit)
58{
59	u32 data;
60	int ret;
61
62	/* Reset trigger unit (clears TRIGGER_EN, but not GPIOSTATx) */
63	ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_RESET, TRIG_RESET);
64
65	data = FIELD_PREP(TRIG_DONE_M, BIT(unit));
66	ret = ksz_write32(dev, REG_PTP_TRIG_STATUS__4, data);
67	if (ret)
68		return ret;
69
70	data = FIELD_PREP(TRIG_INT_M, BIT(unit));
71	ret = ksz_write32(dev, REG_PTP_INT_STATUS__4, data);
72	if (ret)
73		return ret;
74
75	/* Clear reset and set GPIO direction */
76	return ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, (TRIG_RESET | TRIG_ENABLE),
77			 0);
78}
79
80static int ksz_ptp_tou_pulse_verify(u64 pulse_ns)
81{
82	u32 data;
83
84	if (pulse_ns & 0x3)
85		return -EINVAL;
86
87	data = (pulse_ns / 8);
88	if (!FIELD_FIT(TRIG_PULSE_WIDTH_M, data))
89		return -ERANGE;
90
91	return 0;
92}
93
94static int ksz_ptp_tou_target_time_set(struct ksz_device *dev,
95				       struct timespec64 const *ts)
96{
97	int ret;
98
99	/* Hardware has only 32 bit */
100	if ((ts->tv_sec & 0xffffffff) != ts->tv_sec)
101		return -EINVAL;
102
103	ret = ksz_write32(dev, REG_TRIG_TARGET_NANOSEC, ts->tv_nsec);
104	if (ret)
105		return ret;
106
107	ret = ksz_write32(dev, REG_TRIG_TARGET_SEC, ts->tv_sec);
108	if (ret)
109		return ret;
110
111	return 0;
112}
113
114static int ksz_ptp_tou_start(struct ksz_device *dev, u8 unit)
115{
116	u32 data;
117	int ret;
118
119	ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_ENABLE, TRIG_ENABLE);
120	if (ret)
121		return ret;
122
123	/* Check error flag:
124	 * - the ACTIVE flag is NOT cleared an error!
125	 */
126	ret = ksz_read32(dev, REG_PTP_TRIG_STATUS__4, &data);
127	if (ret)
128		return ret;
129
130	if (FIELD_GET(TRIG_ERROR_M, data) & (1 << unit)) {
131		dev_err(dev->dev, "%s: Trigger unit%d error!\n", __func__,
132			unit);
133		ret = -EIO;
134		/* Unit will be reset on next access */
135		return ret;
136	}
137
138	return 0;
139}
140
141static int ksz_ptp_configure_perout(struct ksz_device *dev,
142				    u32 cycle_width_ns, u32 pulse_width_ns,
143				    struct timespec64 const *target_time,
144				    u8 index)
145{
146	u32 data;
147	int ret;
148
149	data = FIELD_PREP(TRIG_NOTIFY, 1) |
150		FIELD_PREP(TRIG_GPO_M, index) |
151		FIELD_PREP(TRIG_PATTERN_M, TRIG_POS_PERIOD);
152	ret = ksz_write32(dev, REG_TRIG_CTRL__4, data);
153	if (ret)
154		return ret;
155
156	ret = ksz_write32(dev, REG_TRIG_CYCLE_WIDTH, cycle_width_ns);
157	if (ret)
158		return ret;
159
160	/* Set cycle count 0 - Infinite */
161	ret = ksz_rmw32(dev, REG_TRIG_CYCLE_CNT, TRIG_CYCLE_CNT_M, 0);
162	if (ret)
163		return ret;
164
165	data = (pulse_width_ns / 8);
166	ret = ksz_write32(dev, REG_TRIG_PULSE_WIDTH__4, data);
167	if (ret)
168		return ret;
169
170	ret = ksz_ptp_tou_target_time_set(dev, target_time);
171	if (ret)
172		return ret;
173
174	return 0;
175}
176
177static int ksz_ptp_enable_perout(struct ksz_device *dev,
178				 struct ptp_perout_request const *request,
179				 int on)
180{
181	struct ksz_ptp_data *ptp_data = &dev->ptp_data;
182	u64 req_pulse_width_ns;
183	u64 cycle_width_ns;
184	u64 pulse_width_ns;
185	int pin = 0;
186	u32 data32;
187	int ret;
188
189	if (request->flags & ~PTP_PEROUT_DUTY_CYCLE)
190		return -EOPNOTSUPP;
191
192	if (ptp_data->tou_mode != KSZ_PTP_TOU_PEROUT &&
193	    ptp_data->tou_mode != KSZ_PTP_TOU_IDLE)
194		return -EBUSY;
195
196	pin = ptp_find_pin(ptp_data->clock, PTP_PF_PEROUT, request->index);
197	if (pin < 0)
198		return -EINVAL;
199
200	data32 = FIELD_PREP(PTP_GPIO_INDEX, pin) |
201		 FIELD_PREP(PTP_TOU_INDEX, request->index);
202	ret = ksz_rmw32(dev, REG_PTP_UNIT_INDEX__4,
203			PTP_GPIO_INDEX | PTP_TOU_INDEX, data32);
204	if (ret)
205		return ret;
206
207	ret = ksz_ptp_tou_reset(dev, request->index);
208	if (ret)
209		return ret;
210
211	if (!on) {
212		ptp_data->tou_mode = KSZ_PTP_TOU_IDLE;
213		return 0;
214	}
215
216	ptp_data->perout_target_time_first.tv_sec  = request->start.sec;
217	ptp_data->perout_target_time_first.tv_nsec = request->start.nsec;
218
219	ptp_data->perout_period.tv_sec = request->period.sec;
220	ptp_data->perout_period.tv_nsec = request->period.nsec;
221
222	cycle_width_ns = timespec64_to_ns(&ptp_data->perout_period);
223	if ((cycle_width_ns & TRIG_CYCLE_WIDTH_M) != cycle_width_ns)
224		return -EINVAL;
225
226	if (request->flags & PTP_PEROUT_DUTY_CYCLE) {
227		pulse_width_ns = request->on.sec * NSEC_PER_SEC +
228			request->on.nsec;
229	} else {
230		/* Use a duty cycle of 50%. Maximum pulse width supported by the
231		 * hardware is a little bit more than 125 ms.
232		 */
233		req_pulse_width_ns = (request->period.sec * NSEC_PER_SEC +
234				      request->period.nsec) / 2;
235		pulse_width_ns = min_t(u64, req_pulse_width_ns,
236				       KSZ_MAX_PULSE_WIDTH);
237	}
238
239	ret = ksz_ptp_tou_pulse_verify(pulse_width_ns);
240	if (ret)
241		return ret;
242
243	ret = ksz_ptp_configure_perout(dev, cycle_width_ns, pulse_width_ns,
244				       &ptp_data->perout_target_time_first,
245				       pin);
246	if (ret)
247		return ret;
248
249	ret = ksz_ptp_tou_gpio(dev);
250	if (ret)
251		return ret;
252
253	ret = ksz_ptp_tou_start(dev, request->index);
254	if (ret)
255		return ret;
256
257	ptp_data->tou_mode = KSZ_PTP_TOU_PEROUT;
258
259	return 0;
260}
261
262static int ksz_ptp_enable_mode(struct ksz_device *dev)
263{
264	struct ksz_tagger_data *tagger_data = ksz_tagger_data(dev->ds);
265	struct ksz_ptp_data *ptp_data = &dev->ptp_data;
266	struct ksz_port *prt;
267	struct dsa_port *dp;
268	bool tag_en = false;
269	int ret;
270
271	dsa_switch_for_each_user_port(dp, dev->ds) {
272		prt = &dev->ports[dp->index];
273		if (prt->hwts_tx_en || prt->hwts_rx_en) {
274			tag_en = true;
275			break;
276		}
277	}
278
279	if (tag_en) {
280		ret = ptp_schedule_worker(ptp_data->clock, 0);
281		if (ret)
282			return ret;
283	} else {
284		ptp_cancel_worker_sync(ptp_data->clock);
285	}
286
287	tagger_data->hwtstamp_set_state(dev->ds, tag_en);
288
289	return ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_ENABLE,
290			 tag_en ? PTP_ENABLE : 0);
291}
292
293/* The function is return back the capability of timestamping feature when
294 * requested through ethtool -T <interface> utility
295 */
296int ksz_get_ts_info(struct dsa_switch *ds, int port, struct ethtool_ts_info *ts)
297{
298	struct ksz_device *dev = ds->priv;
299	struct ksz_ptp_data *ptp_data;
300
301	ptp_data = &dev->ptp_data;
302
303	if (!ptp_data->clock)
304		return -ENODEV;
305
306	ts->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
307			      SOF_TIMESTAMPING_RX_HARDWARE |
308			      SOF_TIMESTAMPING_RAW_HARDWARE;
309
310	ts->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ONESTEP_P2P);
311
312	if (is_lan937x(dev))
313		ts->tx_types |= BIT(HWTSTAMP_TX_ON);
314
315	ts->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
316			 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
317			 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
318			 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
319
320	ts->phc_index = ptp_clock_index(ptp_data->clock);
321
322	return 0;
323}
324
325int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
326{
327	struct ksz_device *dev = ds->priv;
328	struct hwtstamp_config *config;
329	struct ksz_port *prt;
330
331	prt = &dev->ports[port];
332	config = &prt->tstamp_config;
333
334	return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
335		-EFAULT : 0;
336}
337
338static int ksz_set_hwtstamp_config(struct ksz_device *dev,
339				   struct ksz_port *prt,
340				   struct hwtstamp_config *config)
341{
342	int ret;
343
344	if (config->flags)
345		return -EINVAL;
346
347	switch (config->tx_type) {
348	case HWTSTAMP_TX_OFF:
349		prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en  = false;
350		prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = false;
351		prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
352		prt->hwts_tx_en = false;
353		break;
354	case HWTSTAMP_TX_ONESTEP_P2P:
355		prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en  = false;
356		prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
357		prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
358		prt->hwts_tx_en = true;
359
360		ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, PTP_1STEP);
361		if (ret)
362			return ret;
363
364		break;
365	case HWTSTAMP_TX_ON:
366		if (!is_lan937x(dev))
367			return -ERANGE;
368
369		prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en  = true;
370		prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
371		prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = true;
372		prt->hwts_tx_en = true;
373
374		ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, 0);
375		if (ret)
376			return ret;
377
378		break;
379	default:
380		return -ERANGE;
381	}
382
383	switch (config->rx_filter) {
384	case HWTSTAMP_FILTER_NONE:
385		prt->hwts_rx_en = false;
386		break;
387	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
388	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
389		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
390		prt->hwts_rx_en = true;
391		break;
392	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
393	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
394		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
395		prt->hwts_rx_en = true;
396		break;
397	case HWTSTAMP_FILTER_PTP_V2_EVENT:
398	case HWTSTAMP_FILTER_PTP_V2_SYNC:
399		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
400		prt->hwts_rx_en = true;
401		break;
402	default:
403		config->rx_filter = HWTSTAMP_FILTER_NONE;
404		return -ERANGE;
405	}
406
407	return ksz_ptp_enable_mode(dev);
408}
409
410int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
411{
412	struct ksz_device *dev = ds->priv;
413	struct hwtstamp_config config;
414	struct ksz_port *prt;
415	int ret;
416
417	prt = &dev->ports[port];
418
419	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
420		return -EFAULT;
421
422	ret = ksz_set_hwtstamp_config(dev, prt, &config);
423	if (ret)
424		return ret;
425
426	memcpy(&prt->tstamp_config, &config, sizeof(config));
427
428	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
429		return -EFAULT;
430
431	return 0;
432}
433
434static ktime_t ksz_tstamp_reconstruct(struct ksz_device *dev, ktime_t tstamp)
435{
436	struct timespec64 ptp_clock_time;
437	struct ksz_ptp_data *ptp_data;
438	struct timespec64 diff;
439	struct timespec64 ts;
440
441	ptp_data = &dev->ptp_data;
442	ts = ktime_to_timespec64(tstamp);
443
444	spin_lock_bh(&ptp_data->clock_lock);
445	ptp_clock_time = ptp_data->clock_time;
446	spin_unlock_bh(&ptp_data->clock_lock);
447
448	/* calculate full time from partial time stamp */
449	ts.tv_sec = (ptp_clock_time.tv_sec & ~3) | ts.tv_sec;
450
451	/* find nearest possible point in time */
452	diff = timespec64_sub(ts, ptp_clock_time);
453	if (diff.tv_sec > 2)
454		ts.tv_sec -= 4;
455	else if (diff.tv_sec < -2)
456		ts.tv_sec += 4;
457
458	return timespec64_to_ktime(ts);
459}
460
461bool ksz_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb,
462		       unsigned int type)
463{
464	struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
465	struct ksz_device *dev = ds->priv;
466	struct ptp_header *ptp_hdr;
467	struct ksz_port *prt;
468	u8 ptp_msg_type;
469	ktime_t tstamp;
470	s64 correction;
471
472	prt = &dev->ports[port];
473
474	tstamp = KSZ_SKB_CB(skb)->tstamp;
475	memset(hwtstamps, 0, sizeof(*hwtstamps));
476	hwtstamps->hwtstamp = ksz_tstamp_reconstruct(dev, tstamp);
477
478	if (prt->tstamp_config.tx_type != HWTSTAMP_TX_ONESTEP_P2P)
479		goto out;
480
481	ptp_hdr = ptp_parse_header(skb, type);
482	if (!ptp_hdr)
483		goto out;
484
485	ptp_msg_type = ptp_get_msgtype(ptp_hdr, type);
486	if (ptp_msg_type != PTP_MSGTYPE_PDELAY_REQ)
487		goto out;
488
489	/* Only subtract the partial time stamp from the correction field.  When
490	 * the hardware adds the egress time stamp to the correction field of
491	 * the PDelay_Resp message on tx, also only the partial time stamp will
492	 * be added.
493	 */
494	correction = (s64)get_unaligned_be64(&ptp_hdr->correction);
495	correction -= ktime_to_ns(tstamp) << 16;
496
497	ptp_header_update_correction(skb, type, ptp_hdr, correction);
498
499out:
500	return false;
501}
502
503void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
504{
505	struct ksz_device *dev = ds->priv;
506	struct ptp_header *hdr;
507	struct sk_buff *clone;
508	struct ksz_port *prt;
509	unsigned int type;
510	u8 ptp_msg_type;
511
512	prt = &dev->ports[port];
513
514	if (!prt->hwts_tx_en)
515		return;
516
517	type = ptp_classify_raw(skb);
518	if (type == PTP_CLASS_NONE)
519		return;
520
521	hdr = ptp_parse_header(skb, type);
522	if (!hdr)
523		return;
524
525	ptp_msg_type = ptp_get_msgtype(hdr, type);
526
527	switch (ptp_msg_type) {
528	case PTP_MSGTYPE_SYNC:
529		if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P)
530			return;
531		break;
532	case PTP_MSGTYPE_PDELAY_REQ:
533		break;
534	case PTP_MSGTYPE_PDELAY_RESP:
535		if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P) {
536			KSZ_SKB_CB(skb)->ptp_type = type;
537			KSZ_SKB_CB(skb)->update_correction = true;
538			return;
539		}
540		break;
541
542	default:
543		return;
544	}
545
546	clone = skb_clone_sk(skb);
547	if (!clone)
548		return;
549
550	/* caching the value to be used in tag_ksz.c */
551	KSZ_SKB_CB(skb)->clone = clone;
552}
553
554static void ksz_ptp_txtstamp_skb(struct ksz_device *dev,
555				 struct ksz_port *prt, struct sk_buff *skb)
556{
557	struct skb_shared_hwtstamps hwtstamps = {};
558	int ret;
559
560	/* timeout must include DSA conduit to transmit data, tstamp latency,
561	 * IRQ latency and time for reading the time stamp.
562	 */
563	ret = wait_for_completion_timeout(&prt->tstamp_msg_comp,
564					  msecs_to_jiffies(100));
565	if (!ret)
566		return;
567
568	hwtstamps.hwtstamp = prt->tstamp_msg;
569	skb_complete_tx_timestamp(skb, &hwtstamps);
570}
571
572void ksz_port_deferred_xmit(struct kthread_work *work)
573{
574	struct ksz_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
575	struct sk_buff *clone, *skb = xmit_work->skb;
576	struct dsa_switch *ds = xmit_work->dp->ds;
577	struct ksz_device *dev = ds->priv;
578	struct ksz_port *prt;
579
580	prt = &dev->ports[xmit_work->dp->index];
581
582	clone = KSZ_SKB_CB(skb)->clone;
583
584	skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
585
586	reinit_completion(&prt->tstamp_msg_comp);
587
588	dsa_enqueue_skb(skb, skb->dev);
589
590	ksz_ptp_txtstamp_skb(dev, prt, clone);
591
592	kfree(xmit_work);
593}
594
595static int _ksz_ptp_gettime(struct ksz_device *dev, struct timespec64 *ts)
596{
597	u32 nanoseconds;
598	u32 seconds;
599	u8 phase;
600	int ret;
601
602	/* Copy current PTP clock into shadow registers and read */
603	ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_READ_TIME, PTP_READ_TIME);
604	if (ret)
605		return ret;
606
607	ret = ksz_read8(dev, REG_PTP_RTC_SUB_NANOSEC__2, &phase);
608	if (ret)
609		return ret;
610
611	ret = ksz_read32(dev, REG_PTP_RTC_NANOSEC, &nanoseconds);
612	if (ret)
613		return ret;
614
615	ret = ksz_read32(dev, REG_PTP_RTC_SEC, &seconds);
616	if (ret)
617		return ret;
618
619	ts->tv_sec = seconds;
620	ts->tv_nsec = nanoseconds + phase * 8;
621
622	return 0;
623}
624
625static int ksz_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
626{
627	struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
628	struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
629	int ret;
630
631	mutex_lock(&ptp_data->lock);
632	ret = _ksz_ptp_gettime(dev, ts);
633	mutex_unlock(&ptp_data->lock);
634
635	return ret;
636}
637
638static int ksz_ptp_restart_perout(struct ksz_device *dev)
639{
640	struct ksz_ptp_data *ptp_data = &dev->ptp_data;
641	s64 now_ns, first_ns, period_ns, next_ns;
642	struct ptp_perout_request request;
643	struct timespec64 next;
644	struct timespec64 now;
645	unsigned int count;
646	int ret;
647
648	dev_info(dev->dev, "Restarting periodic output signal\n");
649
650	ret = _ksz_ptp_gettime(dev, &now);
651	if (ret)
652		return ret;
653
654	now_ns = timespec64_to_ns(&now);
655	first_ns = timespec64_to_ns(&ptp_data->perout_target_time_first);
656
657	/* Calculate next perout event based on start time and period */
658	period_ns = timespec64_to_ns(&ptp_data->perout_period);
659
660	if (first_ns < now_ns) {
661		count = div_u64(now_ns - first_ns, period_ns);
662		next_ns = first_ns + count * period_ns;
663	} else {
664		next_ns = first_ns;
665	}
666
667	/* Ensure 100 ms guard time prior next event */
668	while (next_ns < now_ns + 100000000)
669		next_ns += period_ns;
670
671	/* Restart periodic output signal */
672	next = ns_to_timespec64(next_ns);
673	request.start.sec  = next.tv_sec;
674	request.start.nsec = next.tv_nsec;
675	request.period.sec  = ptp_data->perout_period.tv_sec;
676	request.period.nsec = ptp_data->perout_period.tv_nsec;
677	request.index = 0;
678	request.flags = 0;
679
680	return ksz_ptp_enable_perout(dev, &request, 1);
681}
682
683static int ksz_ptp_settime(struct ptp_clock_info *ptp,
684			   const struct timespec64 *ts)
685{
686	struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
687	struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
688	int ret;
689
690	mutex_lock(&ptp_data->lock);
691
692	/* Write to shadow registers and Load PTP clock */
693	ret = ksz_write16(dev, REG_PTP_RTC_SUB_NANOSEC__2, PTP_RTC_0NS);
694	if (ret)
695		goto unlock;
696
697	ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, ts->tv_nsec);
698	if (ret)
699		goto unlock;
700
701	ret = ksz_write32(dev, REG_PTP_RTC_SEC, ts->tv_sec);
702	if (ret)
703		goto unlock;
704
705	ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_LOAD_TIME, PTP_LOAD_TIME);
706	if (ret)
707		goto unlock;
708
709	switch (ptp_data->tou_mode) {
710	case KSZ_PTP_TOU_IDLE:
711		break;
712
713	case KSZ_PTP_TOU_PEROUT:
714		ret = ksz_ptp_restart_perout(dev);
715		if (ret)
716			goto unlock;
717
718		break;
719	}
720
721	spin_lock_bh(&ptp_data->clock_lock);
722	ptp_data->clock_time = *ts;
723	spin_unlock_bh(&ptp_data->clock_lock);
724
725unlock:
726	mutex_unlock(&ptp_data->lock);
727
728	return ret;
729}
730
731static int ksz_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
732{
733	struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
734	struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
735	u64 base, adj;
736	bool negative;
737	u32 data32;
738	int ret;
739
740	mutex_lock(&ptp_data->lock);
741
742	if (scaled_ppm) {
743		base = KSZ_PTP_INC_NS << KSZ_PTP_SUBNS_BITS;
744		negative = diff_by_scaled_ppm(base, scaled_ppm, &adj);
745
746		data32 = (u32)adj;
747		data32 &= PTP_SUBNANOSEC_M;
748		if (!negative)
749			data32 |= PTP_RATE_DIR;
750
751		ret = ksz_write32(dev, REG_PTP_SUBNANOSEC_RATE, data32);
752		if (ret)
753			goto unlock;
754
755		ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE,
756				PTP_CLK_ADJ_ENABLE);
757		if (ret)
758			goto unlock;
759	} else {
760		ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE, 0);
761		if (ret)
762			goto unlock;
763	}
764
765unlock:
766	mutex_unlock(&ptp_data->lock);
767	return ret;
768}
769
770static int ksz_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
771{
772	struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
773	struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
774	struct timespec64 delta64 = ns_to_timespec64(delta);
775	s32 sec, nsec;
776	u16 data16;
777	int ret;
778
779	mutex_lock(&ptp_data->lock);
780
781	/* do not use ns_to_timespec64(),
782	 * both sec and nsec are subtracted by hw
783	 */
784	sec = div_s64_rem(delta, NSEC_PER_SEC, &nsec);
785
786	ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, abs(nsec));
787	if (ret)
788		goto unlock;
789
790	ret = ksz_write32(dev, REG_PTP_RTC_SEC, abs(sec));
791	if (ret)
792		goto unlock;
793
794	ret = ksz_read16(dev, REG_PTP_CLK_CTRL, &data16);
795	if (ret)
796		goto unlock;
797
798	data16 |= PTP_STEP_ADJ;
799
800	/* PTP_STEP_DIR -- 0: subtract, 1: add */
801	if (delta < 0)
802		data16 &= ~PTP_STEP_DIR;
803	else
804		data16 |= PTP_STEP_DIR;
805
806	ret = ksz_write16(dev, REG_PTP_CLK_CTRL, data16);
807	if (ret)
808		goto unlock;
809
810	switch (ptp_data->tou_mode) {
811	case KSZ_PTP_TOU_IDLE:
812		break;
813
814	case KSZ_PTP_TOU_PEROUT:
815		ret = ksz_ptp_restart_perout(dev);
816		if (ret)
817			goto unlock;
818
819		break;
820	}
821
822	spin_lock_bh(&ptp_data->clock_lock);
823	ptp_data->clock_time = timespec64_add(ptp_data->clock_time, delta64);
824	spin_unlock_bh(&ptp_data->clock_lock);
825
826unlock:
827	mutex_unlock(&ptp_data->lock);
828	return ret;
829}
830
831static int ksz_ptp_enable(struct ptp_clock_info *ptp,
832			  struct ptp_clock_request *req, int on)
833{
834	struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
835	struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
836	int ret;
837
838	switch (req->type) {
839	case PTP_CLK_REQ_PEROUT:
840		mutex_lock(&ptp_data->lock);
841		ret = ksz_ptp_enable_perout(dev, &req->perout, on);
842		mutex_unlock(&ptp_data->lock);
843		break;
844	default:
845		return -EOPNOTSUPP;
846	}
847
848	return ret;
849}
850
851static int ksz_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
852			      enum ptp_pin_function func, unsigned int chan)
853{
854	int ret = 0;
855
856	switch (func) {
857	case PTP_PF_NONE:
858	case PTP_PF_PEROUT:
859		break;
860	default:
861		ret = -1;
862		break;
863	}
864
865	return ret;
866}
867
868/*  Function is pointer to the do_aux_work in the ptp_clock capability */
869static long ksz_ptp_do_aux_work(struct ptp_clock_info *ptp)
870{
871	struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
872	struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
873	struct timespec64 ts;
874	int ret;
875
876	mutex_lock(&ptp_data->lock);
877	ret = _ksz_ptp_gettime(dev, &ts);
878	if (ret)
879		goto out;
880
881	spin_lock_bh(&ptp_data->clock_lock);
882	ptp_data->clock_time = ts;
883	spin_unlock_bh(&ptp_data->clock_lock);
884
885out:
886	mutex_unlock(&ptp_data->lock);
887
888	return HZ;  /* reschedule in 1 second */
889}
890
891static int ksz_ptp_start_clock(struct ksz_device *dev)
892{
893	struct ksz_ptp_data *ptp_data = &dev->ptp_data;
894	int ret;
895
896	ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ENABLE, PTP_CLK_ENABLE);
897	if (ret)
898		return ret;
899
900	ptp_data->clock_time.tv_sec = 0;
901	ptp_data->clock_time.tv_nsec = 0;
902
903	return 0;
904}
905
906int ksz_ptp_clock_register(struct dsa_switch *ds)
907{
908	struct ksz_device *dev = ds->priv;
909	struct ksz_ptp_data *ptp_data;
910	int ret;
911	u8 i;
912
913	ptp_data = &dev->ptp_data;
914	mutex_init(&ptp_data->lock);
915	spin_lock_init(&ptp_data->clock_lock);
916
917	ptp_data->caps.owner		= THIS_MODULE;
918	snprintf(ptp_data->caps.name, 16, "Microchip Clock");
919	ptp_data->caps.max_adj		= KSZ_MAX_DRIFT_CORR;
920	ptp_data->caps.gettime64	= ksz_ptp_gettime;
921	ptp_data->caps.settime64	= ksz_ptp_settime;
922	ptp_data->caps.adjfine		= ksz_ptp_adjfine;
923	ptp_data->caps.adjtime		= ksz_ptp_adjtime;
924	ptp_data->caps.do_aux_work	= ksz_ptp_do_aux_work;
925	ptp_data->caps.enable		= ksz_ptp_enable;
926	ptp_data->caps.verify		= ksz_ptp_verify_pin;
927	ptp_data->caps.n_pins		= KSZ_PTP_N_GPIO;
928	ptp_data->caps.n_per_out	= 3;
929
930	ret = ksz_ptp_start_clock(dev);
931	if (ret)
932		return ret;
933
934	for (i = 0; i < KSZ_PTP_N_GPIO; i++) {
935		struct ptp_pin_desc *ptp_pin = &ptp_data->pin_config[i];
936
937		snprintf(ptp_pin->name,
938			 sizeof(ptp_pin->name), "ksz_ptp_pin_%02d", i);
939		ptp_pin->index = i;
940		ptp_pin->func = PTP_PF_NONE;
941	}
942
943	ptp_data->caps.pin_config = ptp_data->pin_config;
944
945	/* Currently only P2P mode is supported. When 802_1AS bit is set, it
946	 * forwards all PTP packets to host port and none to other ports.
947	 */
948	ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_TC_P2P | PTP_802_1AS,
949			PTP_TC_P2P | PTP_802_1AS);
950	if (ret)
951		return ret;
952
953	ptp_data->clock = ptp_clock_register(&ptp_data->caps, dev->dev);
954	if (IS_ERR_OR_NULL(ptp_data->clock))
955		return PTR_ERR(ptp_data->clock);
956
957	return 0;
958}
959
960void ksz_ptp_clock_unregister(struct dsa_switch *ds)
961{
962	struct ksz_device *dev = ds->priv;
963	struct ksz_ptp_data *ptp_data;
964
965	ptp_data = &dev->ptp_data;
966
967	if (ptp_data->clock)
968		ptp_clock_unregister(ptp_data->clock);
969}
970
971static irqreturn_t ksz_ptp_msg_thread_fn(int irq, void *dev_id)
972{
973	struct ksz_ptp_irq *ptpmsg_irq = dev_id;
974	struct ksz_device *dev;
975	struct ksz_port *port;
976	u32 tstamp_raw;
977	ktime_t tstamp;
978	int ret;
979
980	port = ptpmsg_irq->port;
981	dev = port->ksz_dev;
982
983	if (ptpmsg_irq->ts_en) {
984		ret = ksz_read32(dev, ptpmsg_irq->ts_reg, &tstamp_raw);
985		if (ret)
986			return IRQ_NONE;
987
988		tstamp = ksz_decode_tstamp(tstamp_raw);
989
990		port->tstamp_msg = ksz_tstamp_reconstruct(dev, tstamp);
991
992		complete(&port->tstamp_msg_comp);
993	}
994
995	return IRQ_HANDLED;
996}
997
998static irqreturn_t ksz_ptp_irq_thread_fn(int irq, void *dev_id)
999{
1000	struct ksz_irq *ptpirq = dev_id;
1001	unsigned int nhandled = 0;
1002	struct ksz_device *dev;
1003	unsigned int sub_irq;
1004	u16 data;
1005	int ret;
1006	u8 n;
1007
1008	dev = ptpirq->dev;
1009
1010	ret = ksz_read16(dev, ptpirq->reg_status, &data);
1011	if (ret)
1012		goto out;
1013
1014	/* Clear the interrupts W1C */
1015	ret = ksz_write16(dev, ptpirq->reg_status, data);
1016	if (ret)
1017		return IRQ_NONE;
1018
1019	for (n = 0; n < ptpirq->nirqs; ++n) {
1020		if (data & BIT(n + KSZ_PTP_INT_START)) {
1021			sub_irq = irq_find_mapping(ptpirq->domain, n);
1022			handle_nested_irq(sub_irq);
1023			++nhandled;
1024		}
1025	}
1026
1027out:
1028	return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
1029}
1030
1031static void ksz_ptp_irq_mask(struct irq_data *d)
1032{
1033	struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
1034
1035	kirq->masked &= ~BIT(d->hwirq + KSZ_PTP_INT_START);
1036}
1037
1038static void ksz_ptp_irq_unmask(struct irq_data *d)
1039{
1040	struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
1041
1042	kirq->masked |= BIT(d->hwirq + KSZ_PTP_INT_START);
1043}
1044
1045static void ksz_ptp_irq_bus_lock(struct irq_data *d)
1046{
1047	struct ksz_irq *kirq  = irq_data_get_irq_chip_data(d);
1048
1049	mutex_lock(&kirq->dev->lock_irq);
1050}
1051
1052static void ksz_ptp_irq_bus_sync_unlock(struct irq_data *d)
1053{
1054	struct ksz_irq *kirq  = irq_data_get_irq_chip_data(d);
1055	struct ksz_device *dev = kirq->dev;
1056	int ret;
1057
1058	ret = ksz_write16(dev, kirq->reg_mask, kirq->masked);
1059	if (ret)
1060		dev_err(dev->dev, "failed to change IRQ mask\n");
1061
1062	mutex_unlock(&dev->lock_irq);
1063}
1064
1065static const struct irq_chip ksz_ptp_irq_chip = {
1066	.name			= "ksz-irq",
1067	.irq_mask		= ksz_ptp_irq_mask,
1068	.irq_unmask		= ksz_ptp_irq_unmask,
1069	.irq_bus_lock		= ksz_ptp_irq_bus_lock,
1070	.irq_bus_sync_unlock	= ksz_ptp_irq_bus_sync_unlock,
1071};
1072
1073static int ksz_ptp_irq_domain_map(struct irq_domain *d,
1074				  unsigned int irq, irq_hw_number_t hwirq)
1075{
1076	irq_set_chip_data(irq, d->host_data);
1077	irq_set_chip_and_handler(irq, &ksz_ptp_irq_chip, handle_level_irq);
1078	irq_set_noprobe(irq);
1079
1080	return 0;
1081}
1082
1083static const struct irq_domain_ops ksz_ptp_irq_domain_ops = {
1084	.map	= ksz_ptp_irq_domain_map,
1085	.xlate	= irq_domain_xlate_twocell,
1086};
1087
1088static void ksz_ptp_msg_irq_free(struct ksz_port *port, u8 n)
1089{
1090	struct ksz_ptp_irq *ptpmsg_irq;
1091
1092	ptpmsg_irq = &port->ptpmsg_irq[n];
1093
1094	free_irq(ptpmsg_irq->num, ptpmsg_irq);
1095	irq_dispose_mapping(ptpmsg_irq->num);
1096}
1097
1098static int ksz_ptp_msg_irq_setup(struct ksz_port *port, u8 n)
1099{
1100	u16 ts_reg[] = {REG_PTP_PORT_PDRESP_TS, REG_PTP_PORT_XDELAY_TS,
1101			REG_PTP_PORT_SYNC_TS};
1102	static const char * const name[] = {"pdresp-msg", "xdreq-msg",
1103					    "sync-msg"};
1104	const struct ksz_dev_ops *ops = port->ksz_dev->dev_ops;
1105	struct ksz_ptp_irq *ptpmsg_irq;
1106
1107	ptpmsg_irq = &port->ptpmsg_irq[n];
1108
1109	ptpmsg_irq->port = port;
1110	ptpmsg_irq->ts_reg = ops->get_port_addr(port->num, ts_reg[n]);
1111
1112	snprintf(ptpmsg_irq->name, sizeof(ptpmsg_irq->name), name[n]);
1113
1114	ptpmsg_irq->num = irq_find_mapping(port->ptpirq.domain, n);
1115	if (ptpmsg_irq->num < 0)
1116		return ptpmsg_irq->num;
1117
1118	return request_threaded_irq(ptpmsg_irq->num, NULL,
1119				    ksz_ptp_msg_thread_fn, IRQF_ONESHOT,
1120				    ptpmsg_irq->name, ptpmsg_irq);
1121}
1122
1123int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
1124{
1125	struct ksz_device *dev = ds->priv;
1126	const struct ksz_dev_ops *ops = dev->dev_ops;
1127	struct ksz_port *port = &dev->ports[p];
1128	struct ksz_irq *ptpirq = &port->ptpirq;
1129	int irq;
1130	int ret;
1131
1132	ptpirq->dev = dev;
1133	ptpirq->masked = 0;
1134	ptpirq->nirqs = 3;
1135	ptpirq->reg_mask = ops->get_port_addr(p, REG_PTP_PORT_TX_INT_ENABLE__2);
1136	ptpirq->reg_status = ops->get_port_addr(p,
1137						REG_PTP_PORT_TX_INT_STATUS__2);
1138	snprintf(ptpirq->name, sizeof(ptpirq->name), "ptp-irq-%d", p);
1139
1140	init_completion(&port->tstamp_msg_comp);
1141
1142	ptpirq->domain = irq_domain_add_linear(dev->dev->of_node, ptpirq->nirqs,
1143					       &ksz_ptp_irq_domain_ops, ptpirq);
1144	if (!ptpirq->domain)
1145		return -ENOMEM;
1146
1147	for (irq = 0; irq < ptpirq->nirqs; irq++)
1148		irq_create_mapping(ptpirq->domain, irq);
1149
1150	ptpirq->irq_num = irq_find_mapping(port->pirq.domain, PORT_SRC_PTP_INT);
1151	if (ptpirq->irq_num < 0) {
1152		ret = ptpirq->irq_num;
1153		goto out;
1154	}
1155
1156	ret = request_threaded_irq(ptpirq->irq_num, NULL, ksz_ptp_irq_thread_fn,
1157				   IRQF_ONESHOT, ptpirq->name, ptpirq);
1158	if (ret)
1159		goto out;
1160
1161	for (irq = 0; irq < ptpirq->nirqs; irq++) {
1162		ret = ksz_ptp_msg_irq_setup(port, irq);
1163		if (ret)
1164			goto out_ptp_msg;
1165	}
1166
1167	return 0;
1168
1169out_ptp_msg:
1170	free_irq(ptpirq->irq_num, ptpirq);
1171	while (irq--)
1172		free_irq(port->ptpmsg_irq[irq].num, &port->ptpmsg_irq[irq]);
1173out:
1174	for (irq = 0; irq < ptpirq->nirqs; irq++)
1175		irq_dispose_mapping(port->ptpmsg_irq[irq].num);
1176
1177	irq_domain_remove(ptpirq->domain);
1178
1179	return ret;
1180}
1181
1182void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p)
1183{
1184	struct ksz_device *dev = ds->priv;
1185	struct ksz_port *port = &dev->ports[p];
1186	struct ksz_irq *ptpirq = &port->ptpirq;
1187	u8 n;
1188
1189	for (n = 0; n < ptpirq->nirqs; n++)
1190		ksz_ptp_msg_irq_free(port, n);
1191
1192	free_irq(ptpirq->irq_num, ptpirq);
1193	irq_dispose_mapping(ptpirq->irq_num);
1194
1195	irq_domain_remove(ptpirq->domain);
1196}
1197
1198MODULE_AUTHOR("Christian Eggers <ceggers@arri.de>");
1199MODULE_AUTHOR("Arun Ramadoss <arun.ramadoss@microchip.com>");
1200MODULE_DESCRIPTION("PTP support for KSZ switch");
1201MODULE_LICENSE("GPL");
1202