1/*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/clocksource.h>
34#include <linux/highmem.h>
35#include <linux/log2.h>
36#include <linux/ptp_clock_kernel.h>
37#include <rdma/mlx5-abi.h>
38#include "lib/eq.h"
39#include "en.h"
40#include "clock.h"
41
42enum {
43	MLX5_PIN_MODE_IN		= 0x0,
44	MLX5_PIN_MODE_OUT		= 0x1,
45};
46
47enum {
48	MLX5_OUT_PATTERN_PULSE		= 0x0,
49	MLX5_OUT_PATTERN_PERIODIC	= 0x1,
50};
51
52enum {
53	MLX5_EVENT_MODE_DISABLE	= 0x0,
54	MLX5_EVENT_MODE_REPETETIVE	= 0x1,
55	MLX5_EVENT_MODE_ONCE_TILL_ARM	= 0x2,
56};
57
58enum {
59	MLX5_MTPPS_FS_ENABLE			= BIT(0x0),
60	MLX5_MTPPS_FS_PATTERN			= BIT(0x2),
61	MLX5_MTPPS_FS_PIN_MODE			= BIT(0x3),
62	MLX5_MTPPS_FS_TIME_STAMP		= BIT(0x4),
63	MLX5_MTPPS_FS_OUT_PULSE_DURATION	= BIT(0x5),
64	MLX5_MTPPS_FS_ENH_OUT_PER_ADJ		= BIT(0x7),
65	MLX5_MTPPS_FS_NPPS_PERIOD               = BIT(0x9),
66	MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS     = BIT(0xa),
67};
68
69enum {
70	MLX5_MTUTC_OPERATION_ADJUST_TIME_MIN          = S16_MIN,
71	MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX          = S16_MAX,
72	MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MIN = -200000,
73	MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX = 200000,
74};
75
76static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
77{
78	return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
79}
80
81static bool mlx5_npps_real_time_supported(struct mlx5_core_dev *mdev)
82{
83	return (mlx5_real_time_mode(mdev) &&
84		MLX5_CAP_MCAM_FEATURE(mdev, npps_period) &&
85		MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns));
86}
87
88static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
89{
90	return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
91}
92
93static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz)
94{
95	/* Optimal shift constant leads to corrections above just 1 scaled ppm.
96	 *
97	 * Two sets of equations are needed to derive the optimal shift
98	 * constant for the cyclecounter.
99	 *
100	 *    dev_freq_khz * 1000 / 2^shift_constant = 1 scaled_ppm
101	 *    ppb = scaled_ppm * 1000 / 2^16
102	 *
103	 * Using the two equations together
104	 *
105	 *    dev_freq_khz * 1000 / 1 scaled_ppm = 2^shift_constant
106	 *    dev_freq_khz * 2^16 / 1 ppb = 2^shift_constant
107	 *    dev_freq_khz = 2^(shift_constant - 16)
108	 *
109	 * then yields
110	 *
111	 *    shift_constant = ilog2(dev_freq_khz) + 16
112	 */
113
114	return min(ilog2(dev_freq_khz) + 16,
115		   ilog2((U32_MAX / NSEC_PER_MSEC) * dev_freq_khz));
116}
117
118static s32 mlx5_ptp_getmaxphase(struct ptp_clock_info *ptp)
119{
120	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
121	struct mlx5_core_dev *mdev;
122
123	mdev = container_of(clock, struct mlx5_core_dev, clock);
124
125	return MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range) ?
126		       MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX :
127			     MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
128}
129
130static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta)
131{
132	s64 max = mlx5_ptp_getmaxphase(&mdev->clock.ptp_info);
133
134	if (delta < -max || delta > max)
135		return false;
136
137	return true;
138}
139
140static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
141{
142	u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
143
144	if (!MLX5_CAP_MCAM_REG(dev, mtutc))
145		return -EOPNOTSUPP;
146
147	return mlx5_core_access_reg(dev, mtutc, size, out, sizeof(out),
148				    MLX5_REG_MTUTC, 0, 1);
149}
150
151static u64 mlx5_read_time(struct mlx5_core_dev *dev,
152			  struct ptp_system_timestamp *sts,
153			  bool real_time)
154{
155	u32 timer_h, timer_h1, timer_l;
156
157	timer_h = ioread32be(real_time ? &dev->iseg->real_time_h :
158			     &dev->iseg->internal_timer_h);
159	ptp_read_system_prets(sts);
160	timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
161			     &dev->iseg->internal_timer_l);
162	ptp_read_system_postts(sts);
163	timer_h1 = ioread32be(real_time ? &dev->iseg->real_time_h :
164			      &dev->iseg->internal_timer_h);
165	if (timer_h != timer_h1) {
166		/* wrap around */
167		ptp_read_system_prets(sts);
168		timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
169				     &dev->iseg->internal_timer_l);
170		ptp_read_system_postts(sts);
171	}
172
173	return real_time ? REAL_TIME_TO_NS(timer_h1, timer_l) :
174			   (u64)timer_l | (u64)timer_h1 << 32;
175}
176
177static u64 read_internal_timer(const struct cyclecounter *cc)
178{
179	struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
180	struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
181	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
182						  clock);
183
184	return mlx5_read_time(mdev, NULL, false) & cc->mask;
185}
186
187static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
188{
189	struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
190	struct mlx5_clock *clock = &mdev->clock;
191	struct mlx5_timer *timer;
192	u32 sign;
193
194	if (!clock_info)
195		return;
196
197	sign = smp_load_acquire(&clock_info->sign);
198	smp_store_mb(clock_info->sign,
199		     sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
200
201	timer = &clock->timer;
202	clock_info->cycles = timer->tc.cycle_last;
203	clock_info->mult   = timer->cycles.mult;
204	clock_info->nsec   = timer->tc.nsec;
205	clock_info->frac   = timer->tc.frac;
206
207	smp_store_release(&clock_info->sign,
208			  sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
209}
210
211static void mlx5_pps_out(struct work_struct *work)
212{
213	struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
214						 out_work);
215	struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
216						pps_info);
217	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
218						  clock);
219	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
220	unsigned long flags;
221	int i;
222
223	for (i = 0; i < clock->ptp_info.n_pins; i++) {
224		u64 tstart;
225
226		write_seqlock_irqsave(&clock->lock, flags);
227		tstart = clock->pps_info.start[i];
228		clock->pps_info.start[i] = 0;
229		write_sequnlock_irqrestore(&clock->lock, flags);
230		if (!tstart)
231			continue;
232
233		MLX5_SET(mtpps_reg, in, pin, i);
234		MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
235		MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
236		mlx5_set_mtpps(mdev, in, sizeof(in));
237	}
238}
239
240static void mlx5_timestamp_overflow(struct work_struct *work)
241{
242	struct delayed_work *dwork = to_delayed_work(work);
243	struct mlx5_core_dev *mdev;
244	struct mlx5_timer *timer;
245	struct mlx5_clock *clock;
246	unsigned long flags;
247
248	timer = container_of(dwork, struct mlx5_timer, overflow_work);
249	clock = container_of(timer, struct mlx5_clock, timer);
250	mdev = container_of(clock, struct mlx5_core_dev, clock);
251
252	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
253		goto out;
254
255	write_seqlock_irqsave(&clock->lock, flags);
256	timecounter_read(&timer->tc);
257	mlx5_update_clock_info_page(mdev);
258	write_sequnlock_irqrestore(&clock->lock, flags);
259
260out:
261	schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
262}
263
264static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
265				      const struct timespec64 *ts)
266{
267	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
268
269	if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX ||
270	    ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC)
271		return -EINVAL;
272
273	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE);
274	MLX5_SET(mtutc_reg, in, utc_sec, ts->tv_sec);
275	MLX5_SET(mtutc_reg, in, utc_nsec, ts->tv_nsec);
276
277	return mlx5_set_mtutc(mdev, in, sizeof(in));
278}
279
280static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
281{
282	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
283	struct mlx5_timer *timer = &clock->timer;
284	struct mlx5_core_dev *mdev;
285	unsigned long flags;
286
287	mdev = container_of(clock, struct mlx5_core_dev, clock);
288
289	if (mlx5_modify_mtutc_allowed(mdev)) {
290		int err = mlx5_ptp_settime_real_time(mdev, ts);
291
292		if (err)
293			return err;
294	}
295
296	write_seqlock_irqsave(&clock->lock, flags);
297	timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts));
298	mlx5_update_clock_info_page(mdev);
299	write_sequnlock_irqrestore(&clock->lock, flags);
300
301	return 0;
302}
303
304static
305struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
306					      struct ptp_system_timestamp *sts)
307{
308	struct timespec64 ts;
309	u64 time;
310
311	time = mlx5_read_time(mdev, sts, true);
312	ts = ns_to_timespec64(time);
313	return ts;
314}
315
316static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
317			     struct ptp_system_timestamp *sts)
318{
319	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
320	struct mlx5_timer *timer = &clock->timer;
321	struct mlx5_core_dev *mdev;
322	unsigned long flags;
323	u64 cycles, ns;
324
325	mdev = container_of(clock, struct mlx5_core_dev, clock);
326	if (mlx5_real_time_mode(mdev)) {
327		*ts = mlx5_ptp_gettimex_real_time(mdev, sts);
328		goto out;
329	}
330
331	write_seqlock_irqsave(&clock->lock, flags);
332	cycles = mlx5_read_time(mdev, sts, false);
333	ns = timecounter_cyc2time(&timer->tc, cycles);
334	write_sequnlock_irqrestore(&clock->lock, flags);
335	*ts = ns_to_timespec64(ns);
336out:
337	return 0;
338}
339
340static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
341{
342	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
343
344	/* HW time adjustment range is checked. If out of range, settime instead */
345	if (!mlx5_is_mtutc_time_adj_cap(mdev, delta)) {
346		struct timespec64 ts;
347		s64 ns;
348
349		ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
350		ns = timespec64_to_ns(&ts) + delta;
351		ts = ns_to_timespec64(ns);
352		return mlx5_ptp_settime_real_time(mdev, &ts);
353	}
354
355	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_TIME);
356	MLX5_SET(mtutc_reg, in, time_adjustment, delta);
357
358	return mlx5_set_mtutc(mdev, in, sizeof(in));
359}
360
361static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
362{
363	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
364	struct mlx5_timer *timer = &clock->timer;
365	struct mlx5_core_dev *mdev;
366	unsigned long flags;
367
368	mdev = container_of(clock, struct mlx5_core_dev, clock);
369
370	if (mlx5_modify_mtutc_allowed(mdev)) {
371		int err = mlx5_ptp_adjtime_real_time(mdev, delta);
372
373		if (err)
374			return err;
375	}
376
377	write_seqlock_irqsave(&clock->lock, flags);
378	timecounter_adjtime(&timer->tc, delta);
379	mlx5_update_clock_info_page(mdev);
380	write_sequnlock_irqrestore(&clock->lock, flags);
381
382	return 0;
383}
384
385static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
386{
387	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
388	struct mlx5_core_dev *mdev;
389
390	mdev = container_of(clock, struct mlx5_core_dev, clock);
391
392	return mlx5_ptp_adjtime_real_time(mdev, delta);
393}
394
395static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
396{
397	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
398
399	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
400
401	if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_freq_adj_units) &&
402	    scaled_ppm <= S32_MAX && scaled_ppm >= S32_MIN) {
403		/* HW scaled_ppm support on mlx5 devices only supports a 32-bit value */
404		MLX5_SET(mtutc_reg, in, freq_adj_units,
405			 MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM);
406		MLX5_SET(mtutc_reg, in, freq_adjustment, (s32)scaled_ppm);
407	} else {
408		MLX5_SET(mtutc_reg, in, freq_adj_units, MLX5_MTUTC_FREQ_ADJ_UNITS_PPB);
409		MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm_to_ppb(scaled_ppm));
410	}
411
412	return mlx5_set_mtutc(mdev, in, sizeof(in));
413}
414
415static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
416{
417	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
418	struct mlx5_timer *timer = &clock->timer;
419	struct mlx5_core_dev *mdev;
420	unsigned long flags;
421	u32 mult;
422
423	mdev = container_of(clock, struct mlx5_core_dev, clock);
424
425	if (mlx5_modify_mtutc_allowed(mdev)) {
426		int err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
427
428		if (err)
429			return err;
430	}
431
432	mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm);
433
434	write_seqlock_irqsave(&clock->lock, flags);
435	timecounter_read(&timer->tc);
436	timer->cycles.mult = mult;
437	mlx5_update_clock_info_page(mdev);
438	write_sequnlock_irqrestore(&clock->lock, flags);
439
440	return 0;
441}
442
443static int mlx5_extts_configure(struct ptp_clock_info *ptp,
444				struct ptp_clock_request *rq,
445				int on)
446{
447	struct mlx5_clock *clock =
448			container_of(ptp, struct mlx5_clock, ptp_info);
449	struct mlx5_core_dev *mdev =
450			container_of(clock, struct mlx5_core_dev, clock);
451	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
452	u32 field_select = 0;
453	u8 pin_mode = 0;
454	u8 pattern = 0;
455	int pin = -1;
456	int err = 0;
457
458	if (!MLX5_PPS_CAP(mdev))
459		return -EOPNOTSUPP;
460
461	/* Reject requests with unsupported flags */
462	if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
463				PTP_RISING_EDGE |
464				PTP_FALLING_EDGE |
465				PTP_STRICT_FLAGS))
466		return -EOPNOTSUPP;
467
468	/* Reject requests to enable time stamping on both edges. */
469	if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
470	    (rq->extts.flags & PTP_ENABLE_FEATURE) &&
471	    (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
472		return -EOPNOTSUPP;
473
474	if (rq->extts.index >= clock->ptp_info.n_pins)
475		return -EINVAL;
476
477	pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
478	if (pin < 0)
479		return -EBUSY;
480
481	if (on) {
482		pin_mode = MLX5_PIN_MODE_IN;
483		pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
484		field_select = MLX5_MTPPS_FS_PIN_MODE |
485			       MLX5_MTPPS_FS_PATTERN |
486			       MLX5_MTPPS_FS_ENABLE;
487	} else {
488		field_select = MLX5_MTPPS_FS_ENABLE;
489	}
490
491	MLX5_SET(mtpps_reg, in, pin, pin);
492	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
493	MLX5_SET(mtpps_reg, in, pattern, pattern);
494	MLX5_SET(mtpps_reg, in, enable, on);
495	MLX5_SET(mtpps_reg, in, field_select, field_select);
496
497	err = mlx5_set_mtpps(mdev, in, sizeof(in));
498	if (err)
499		return err;
500
501	return mlx5_set_mtppse(mdev, pin, 0,
502			       MLX5_EVENT_MODE_REPETETIVE & on);
503}
504
505static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
506{
507	struct mlx5_clock *clock = &mdev->clock;
508	u64 cycles_now, cycles_delta;
509	u64 nsec_now, nsec_delta;
510	struct mlx5_timer *timer;
511	unsigned long flags;
512
513	timer = &clock->timer;
514
515	cycles_now = mlx5_read_time(mdev, NULL, false);
516	write_seqlock_irqsave(&clock->lock, flags);
517	nsec_now = timecounter_cyc2time(&timer->tc, cycles_now);
518	nsec_delta = target_ns - nsec_now;
519	cycles_delta = div64_u64(nsec_delta << timer->cycles.shift,
520				 timer->cycles.mult);
521	write_sequnlock_irqrestore(&clock->lock, flags);
522
523	return cycles_now + cycles_delta;
524}
525
526static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec)
527{
528	struct timespec64 ts = {};
529	s64 target_ns;
530
531	ts.tv_sec = sec;
532	target_ns = timespec64_to_ns(&ts);
533
534	return find_target_cycles(mdev, target_ns);
535}
536
537static u64 perout_conf_real_time(s64 sec, u32 nsec)
538{
539	return (u64)nsec | (u64)sec << 32;
540}
541
542static int perout_conf_1pps(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
543			    u64 *time_stamp, bool real_time)
544{
545	struct timespec64 ts;
546	s64 ns;
547
548	ts.tv_nsec = rq->perout.period.nsec;
549	ts.tv_sec = rq->perout.period.sec;
550	ns = timespec64_to_ns(&ts);
551
552	if ((ns >> 1) != 500000000LL)
553		return -EINVAL;
554
555	*time_stamp = real_time ? perout_conf_real_time(rq->perout.start.sec, 0) :
556		      perout_conf_internal_timer(mdev, rq->perout.start.sec);
557
558	return 0;
559}
560
561#define MLX5_MAX_PULSE_DURATION (BIT(__mlx5_bit_sz(mtpps_reg, out_pulse_duration_ns)) - 1)
562static int mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev,
563					       struct ptp_clock_request *rq,
564					       u32 *out_pulse_duration_ns)
565{
566	struct mlx5_pps *pps_info = &mdev->clock.pps_info;
567	u32 out_pulse_duration;
568	struct timespec64 ts;
569
570	if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
571		ts.tv_sec = rq->perout.on.sec;
572		ts.tv_nsec = rq->perout.on.nsec;
573		out_pulse_duration = (u32)timespec64_to_ns(&ts);
574	} else {
575		/* out_pulse_duration_ns should be up to 50% of the
576		 * pulse period as default
577		 */
578		ts.tv_sec = rq->perout.period.sec;
579		ts.tv_nsec = rq->perout.period.nsec;
580		out_pulse_duration = (u32)timespec64_to_ns(&ts) >> 1;
581	}
582
583	if (out_pulse_duration < pps_info->min_out_pulse_duration_ns ||
584	    out_pulse_duration > MLX5_MAX_PULSE_DURATION) {
585		mlx5_core_err(mdev, "NPPS pulse duration %u is not in [%llu, %lu]\n",
586			      out_pulse_duration, pps_info->min_out_pulse_duration_ns,
587			      MLX5_MAX_PULSE_DURATION);
588		return -EINVAL;
589	}
590	*out_pulse_duration_ns = out_pulse_duration;
591
592	return 0;
593}
594
595static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
596				      u32 *field_select, u32 *out_pulse_duration_ns,
597				      u64 *period, u64 *time_stamp)
598{
599	struct mlx5_pps *pps_info = &mdev->clock.pps_info;
600	struct ptp_clock_time *time = &rq->perout.start;
601	struct timespec64 ts;
602
603	ts.tv_sec = rq->perout.period.sec;
604	ts.tv_nsec = rq->perout.period.nsec;
605	if (timespec64_to_ns(&ts) < pps_info->min_npps_period) {
606		mlx5_core_err(mdev, "NPPS period is lower than minimal npps period %llu\n",
607			      pps_info->min_npps_period);
608		return -EINVAL;
609	}
610	*period = perout_conf_real_time(rq->perout.period.sec, rq->perout.period.nsec);
611
612	if (mlx5_perout_conf_out_pulse_duration(mdev, rq, out_pulse_duration_ns))
613		return -EINVAL;
614
615	*time_stamp = perout_conf_real_time(time->sec, time->nsec);
616	*field_select |= MLX5_MTPPS_FS_NPPS_PERIOD |
617			 MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS;
618
619	return 0;
620}
621
622static bool mlx5_perout_verify_flags(struct mlx5_core_dev *mdev, unsigned int flags)
623{
624	return ((!mlx5_npps_real_time_supported(mdev) && flags) ||
625		(mlx5_npps_real_time_supported(mdev) && flags & ~PTP_PEROUT_DUTY_CYCLE));
626}
627
628static int mlx5_perout_configure(struct ptp_clock_info *ptp,
629				 struct ptp_clock_request *rq,
630				 int on)
631{
632	struct mlx5_clock *clock =
633			container_of(ptp, struct mlx5_clock, ptp_info);
634	struct mlx5_core_dev *mdev =
635			container_of(clock, struct mlx5_core_dev, clock);
636	bool rt_mode = mlx5_real_time_mode(mdev);
637	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
638	u32 out_pulse_duration_ns = 0;
639	u32 field_select = 0;
640	u64 npps_period = 0;
641	u64 time_stamp = 0;
642	u8 pin_mode = 0;
643	u8 pattern = 0;
644	int pin = -1;
645	int err = 0;
646
647	if (!MLX5_PPS_CAP(mdev))
648		return -EOPNOTSUPP;
649
650	/* Reject requests with unsupported flags */
651	if (mlx5_perout_verify_flags(mdev, rq->perout.flags))
652		return -EOPNOTSUPP;
653
654	if (rq->perout.index >= clock->ptp_info.n_pins)
655		return -EINVAL;
656
657	field_select = MLX5_MTPPS_FS_ENABLE;
658	pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
659	if (pin < 0)
660		return -EBUSY;
661
662	if (on) {
663		bool rt_mode = mlx5_real_time_mode(mdev);
664
665		pin_mode = MLX5_PIN_MODE_OUT;
666		pattern = MLX5_OUT_PATTERN_PERIODIC;
667
668		if (rt_mode &&  rq->perout.start.sec > U32_MAX)
669			return -EINVAL;
670
671		field_select |= MLX5_MTPPS_FS_PIN_MODE |
672				MLX5_MTPPS_FS_PATTERN |
673				MLX5_MTPPS_FS_TIME_STAMP;
674
675		if (mlx5_npps_real_time_supported(mdev))
676			err = perout_conf_npps_real_time(mdev, rq, &field_select,
677							 &out_pulse_duration_ns, &npps_period,
678							 &time_stamp);
679		else
680			err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode);
681		if (err)
682			return err;
683	}
684
685	MLX5_SET(mtpps_reg, in, pin, pin);
686	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
687	MLX5_SET(mtpps_reg, in, pattern, pattern);
688	MLX5_SET(mtpps_reg, in, enable, on);
689	MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
690	MLX5_SET(mtpps_reg, in, field_select, field_select);
691	MLX5_SET64(mtpps_reg, in, npps_period, npps_period);
692	MLX5_SET(mtpps_reg, in, out_pulse_duration_ns, out_pulse_duration_ns);
693	err = mlx5_set_mtpps(mdev, in, sizeof(in));
694	if (err)
695		return err;
696
697	if (rt_mode)
698		return 0;
699
700	return mlx5_set_mtppse(mdev, pin, 0,
701			       MLX5_EVENT_MODE_REPETETIVE & on);
702}
703
704static int mlx5_pps_configure(struct ptp_clock_info *ptp,
705			      struct ptp_clock_request *rq,
706			      int on)
707{
708	struct mlx5_clock *clock =
709			container_of(ptp, struct mlx5_clock, ptp_info);
710
711	clock->pps_info.enabled = !!on;
712	return 0;
713}
714
715static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
716			   struct ptp_clock_request *rq,
717			   int on)
718{
719	switch (rq->type) {
720	case PTP_CLK_REQ_EXTTS:
721		return mlx5_extts_configure(ptp, rq, on);
722	case PTP_CLK_REQ_PEROUT:
723		return mlx5_perout_configure(ptp, rq, on);
724	case PTP_CLK_REQ_PPS:
725		return mlx5_pps_configure(ptp, rq, on);
726	default:
727		return -EOPNOTSUPP;
728	}
729	return 0;
730}
731
732enum {
733	MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
734	MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
735};
736
737static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
738			   enum ptp_pin_function func, unsigned int chan)
739{
740	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
741						ptp_info);
742
743	switch (func) {
744	case PTP_PF_NONE:
745		return 0;
746	case PTP_PF_EXTTS:
747		return !(clock->pps_info.pin_caps[pin] &
748			 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
749	case PTP_PF_PEROUT:
750		return !(clock->pps_info.pin_caps[pin] &
751			 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
752	default:
753		return -EOPNOTSUPP;
754	}
755}
756
757static const struct ptp_clock_info mlx5_ptp_clock_info = {
758	.owner		= THIS_MODULE,
759	.name		= "mlx5_ptp",
760	.max_adj	= 50000000,
761	.n_alarm	= 0,
762	.n_ext_ts	= 0,
763	.n_per_out	= 0,
764	.n_pins		= 0,
765	.pps		= 0,
766	.adjfine	= mlx5_ptp_adjfine,
767	.adjphase	= mlx5_ptp_adjphase,
768	.getmaxphase    = mlx5_ptp_getmaxphase,
769	.adjtime	= mlx5_ptp_adjtime,
770	.gettimex64	= mlx5_ptp_gettimex,
771	.settime64	= mlx5_ptp_settime,
772	.enable		= NULL,
773	.verify		= NULL,
774};
775
776static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
777				     u32 *mtpps, u32 mtpps_size)
778{
779	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
780
781	MLX5_SET(mtpps_reg, in, pin, pin);
782
783	return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
784				    mtpps_size, MLX5_REG_MTPPS, 0, 0);
785}
786
787static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
788{
789	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
790
791	u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
792	u8 mode;
793	int err;
794
795	err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
796	if (err || !MLX5_GET(mtpps_reg, out, enable))
797		return PTP_PF_NONE;
798
799	mode = MLX5_GET(mtpps_reg, out, pin_mode);
800
801	if (mode == MLX5_PIN_MODE_IN)
802		return PTP_PF_EXTTS;
803	else if (mode == MLX5_PIN_MODE_OUT)
804		return PTP_PF_PEROUT;
805
806	return PTP_PF_NONE;
807}
808
809static void mlx5_init_pin_config(struct mlx5_clock *clock)
810{
811	int i;
812
813	if (!clock->ptp_info.n_pins)
814		return;
815
816	clock->ptp_info.pin_config =
817			kcalloc(clock->ptp_info.n_pins,
818				sizeof(*clock->ptp_info.pin_config),
819				GFP_KERNEL);
820	if (!clock->ptp_info.pin_config)
821		return;
822	clock->ptp_info.enable = mlx5_ptp_enable;
823	clock->ptp_info.verify = mlx5_ptp_verify;
824	clock->ptp_info.pps = 1;
825
826	for (i = 0; i < clock->ptp_info.n_pins; i++) {
827		snprintf(clock->ptp_info.pin_config[i].name,
828			 sizeof(clock->ptp_info.pin_config[i].name),
829			 "mlx5_pps%d", i);
830		clock->ptp_info.pin_config[i].index = i;
831		clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
832		clock->ptp_info.pin_config[i].chan = 0;
833	}
834}
835
836static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
837{
838	struct mlx5_clock *clock = &mdev->clock;
839	u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
840
841	mlx5_query_mtpps(mdev, out, sizeof(out));
842
843	clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
844					  cap_number_of_pps_pins);
845	clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
846					    cap_max_num_of_pps_in_pins);
847	clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
848					     cap_max_num_of_pps_out_pins);
849
850	if (MLX5_CAP_MCAM_FEATURE(mdev, npps_period))
851		clock->pps_info.min_npps_period = 1 << MLX5_GET(mtpps_reg, out,
852								cap_log_min_npps_period);
853	if (MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns))
854		clock->pps_info.min_out_pulse_duration_ns = 1 << MLX5_GET(mtpps_reg, out,
855								cap_log_min_out_pulse_duration_ns);
856
857	clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
858	clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
859	clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
860	clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
861	clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
862	clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
863	clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
864	clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
865}
866
867static void ts_next_sec(struct timespec64 *ts)
868{
869	ts->tv_sec += 1;
870	ts->tv_nsec = 0;
871}
872
873static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
874					struct mlx5_clock *clock)
875{
876	struct timespec64 ts;
877	s64 target_ns;
878
879	mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
880	ts_next_sec(&ts);
881	target_ns = timespec64_to_ns(&ts);
882
883	return find_target_cycles(mdev, target_ns);
884}
885
886static int mlx5_pps_event(struct notifier_block *nb,
887			  unsigned long type, void *data)
888{
889	struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
890	struct ptp_clock_event ptp_event;
891	struct mlx5_eqe *eqe = data;
892	int pin = eqe->data.pps.pin;
893	struct mlx5_core_dev *mdev;
894	unsigned long flags;
895	u64 ns;
896
897	mdev = container_of(clock, struct mlx5_core_dev, clock);
898
899	switch (clock->ptp_info.pin_config[pin].func) {
900	case PTP_PF_EXTTS:
901		ptp_event.index = pin;
902		ptp_event.timestamp = mlx5_real_time_mode(mdev) ?
903			mlx5_real_time_cyc2time(clock,
904						be64_to_cpu(eqe->data.pps.time_stamp)) :
905			mlx5_timecounter_cyc2time(clock,
906						  be64_to_cpu(eqe->data.pps.time_stamp));
907		if (clock->pps_info.enabled) {
908			ptp_event.type = PTP_CLOCK_PPSUSR;
909			ptp_event.pps_times.ts_real =
910					ns_to_timespec64(ptp_event.timestamp);
911		} else {
912			ptp_event.type = PTP_CLOCK_EXTTS;
913		}
914		/* TODOL clock->ptp can be NULL if ptp_clock_register fails */
915		ptp_clock_event(clock->ptp, &ptp_event);
916		break;
917	case PTP_PF_PEROUT:
918		ns = perout_conf_next_event_timer(mdev, clock);
919		write_seqlock_irqsave(&clock->lock, flags);
920		clock->pps_info.start[pin] = ns;
921		write_sequnlock_irqrestore(&clock->lock, flags);
922		schedule_work(&clock->pps_info.out_work);
923		break;
924	default:
925		mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
926			      clock->ptp_info.pin_config[pin].func);
927	}
928
929	return NOTIFY_OK;
930}
931
932static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
933{
934	struct mlx5_clock *clock = &mdev->clock;
935	struct mlx5_timer *timer = &clock->timer;
936	u32 dev_freq;
937
938	dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
939	timer->cycles.read = read_internal_timer;
940	timer->cycles.shift = mlx5_ptp_shift_constant(dev_freq);
941	timer->cycles.mult = clocksource_khz2mult(dev_freq,
942						  timer->cycles.shift);
943	timer->nominal_c_mult = timer->cycles.mult;
944	timer->cycles.mask = CLOCKSOURCE_MASK(41);
945
946	timecounter_init(&timer->tc, &timer->cycles,
947			 ktime_to_ns(ktime_get_real()));
948}
949
950static void mlx5_init_overflow_period(struct mlx5_clock *clock)
951{
952	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
953	struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
954	struct mlx5_timer *timer = &clock->timer;
955	u64 overflow_cycles;
956	u64 frac = 0;
957	u64 ns;
958
959	/* Calculate period in seconds to call the overflow watchdog - to make
960	 * sure counter is checked at least twice every wrap around.
961	 * The period is calculated as the minimum between max HW cycles count
962	 * (The clock source mask) and max amount of cycles that can be
963	 * multiplied by clock multiplier where the result doesn't exceed
964	 * 64bits.
965	 */
966	overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult);
967	overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3));
968
969	ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles,
970				 frac, &frac);
971	do_div(ns, NSEC_PER_SEC / HZ);
972	timer->overflow_period = ns;
973
974	INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
975	if (timer->overflow_period)
976		schedule_delayed_work(&timer->overflow_work, 0);
977	else
978		mlx5_core_warn(mdev,
979			       "invalid overflow period, overflow_work is not scheduled\n");
980
981	if (clock_info)
982		clock_info->overflow_period = timer->overflow_period;
983}
984
985static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
986{
987	struct mlx5_clock *clock = &mdev->clock;
988	struct mlx5_ib_clock_info *info;
989	struct mlx5_timer *timer;
990
991	mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
992	if (!mdev->clock_info) {
993		mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n");
994		return;
995	}
996
997	info = mdev->clock_info;
998	timer = &clock->timer;
999
1000	info->nsec = timer->tc.nsec;
1001	info->cycles = timer->tc.cycle_last;
1002	info->mask = timer->cycles.mask;
1003	info->mult = timer->nominal_c_mult;
1004	info->shift = timer->cycles.shift;
1005	info->frac = timer->tc.frac;
1006}
1007
1008static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
1009{
1010	struct mlx5_clock *clock = &mdev->clock;
1011	u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
1012	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
1013	u8 log_max_freq_adjustment = 0;
1014	int err;
1015
1016	err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
1017				   MLX5_REG_MTUTC, 0, 0);
1018	if (!err)
1019		log_max_freq_adjustment =
1020			MLX5_GET(mtutc_reg, out, log_max_freq_adjustment);
1021
1022	if (log_max_freq_adjustment)
1023		clock->ptp_info.max_adj =
1024			min(S32_MAX, 1 << log_max_freq_adjustment);
1025}
1026
1027static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
1028{
1029	struct mlx5_clock *clock = &mdev->clock;
1030
1031	/* Configure the PHC */
1032	clock->ptp_info = mlx5_ptp_clock_info;
1033
1034	if (MLX5_CAP_MCAM_REG(mdev, mtutc))
1035		mlx5_init_timer_max_freq_adjustment(mdev);
1036
1037	mlx5_timecounter_init(mdev);
1038	mlx5_init_clock_info(mdev);
1039	mlx5_init_overflow_period(clock);
1040
1041	if (mlx5_real_time_mode(mdev)) {
1042		struct timespec64 ts;
1043
1044		ktime_get_real_ts64(&ts);
1045		mlx5_ptp_settime(&clock->ptp_info, &ts);
1046	}
1047}
1048
1049static void mlx5_init_pps(struct mlx5_core_dev *mdev)
1050{
1051	struct mlx5_clock *clock = &mdev->clock;
1052
1053	if (!MLX5_PPS_CAP(mdev))
1054		return;
1055
1056	mlx5_get_pps_caps(mdev);
1057	mlx5_init_pin_config(clock);
1058}
1059
1060void mlx5_init_clock(struct mlx5_core_dev *mdev)
1061{
1062	struct mlx5_clock *clock = &mdev->clock;
1063
1064	if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
1065		mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
1066		return;
1067	}
1068
1069	seqlock_init(&clock->lock);
1070	INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
1071
1072	/* Initialize the device clock */
1073	mlx5_init_timer_clock(mdev);
1074
1075	/* Initialize 1PPS data structures */
1076	mlx5_init_pps(mdev);
1077
1078	clock->ptp = ptp_clock_register(&clock->ptp_info,
1079					&mdev->pdev->dev);
1080	if (IS_ERR(clock->ptp)) {
1081		mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
1082			       PTR_ERR(clock->ptp));
1083		clock->ptp = NULL;
1084	}
1085
1086	MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
1087	mlx5_eq_notifier_register(mdev, &clock->pps_nb);
1088}
1089
1090void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
1091{
1092	struct mlx5_clock *clock = &mdev->clock;
1093
1094	if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
1095		return;
1096
1097	mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
1098	if (clock->ptp) {
1099		ptp_clock_unregister(clock->ptp);
1100		clock->ptp = NULL;
1101	}
1102
1103	cancel_work_sync(&clock->pps_info.out_work);
1104	cancel_delayed_work_sync(&clock->timer.overflow_work);
1105
1106	if (mdev->clock_info) {
1107		free_page((unsigned long)mdev->clock_info);
1108		mdev->clock_info = NULL;
1109	}
1110
1111	kfree(clock->ptp_info.pin_config);
1112}
1113