1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2021, Intel Corporation. */
3
4#include <linux/delay.h>
5#include "ice_common.h"
6#include "ice_ptp_hw.h"
7#include "ice_ptp_consts.h"
8#include "ice_cgu_regs.h"
9
10static struct dpll_pin_frequency ice_cgu_pin_freq_common[] = {
11	DPLL_PIN_FREQUENCY_1PPS,
12	DPLL_PIN_FREQUENCY_10MHZ,
13};
14
15static struct dpll_pin_frequency ice_cgu_pin_freq_1_hz[] = {
16	DPLL_PIN_FREQUENCY_1PPS,
17};
18
19static struct dpll_pin_frequency ice_cgu_pin_freq_10_mhz[] = {
20	DPLL_PIN_FREQUENCY_10MHZ,
21};
22
23static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_inputs[] = {
24	{ "CVL-SDP22",	  ZL_REF0P, DPLL_PIN_TYPE_INT_OSCILLATOR,
25		ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
26	{ "CVL-SDP20",	  ZL_REF0N, DPLL_PIN_TYPE_INT_OSCILLATOR,
27		ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
28	{ "C827_0-RCLKA", ZL_REF1P, DPLL_PIN_TYPE_MUX, 0, },
29	{ "C827_0-RCLKB", ZL_REF1N, DPLL_PIN_TYPE_MUX, 0, },
30	{ "SMA1",	  ZL_REF3P, DPLL_PIN_TYPE_EXT,
31		ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
32	{ "SMA2/U.FL2",	  ZL_REF3N, DPLL_PIN_TYPE_EXT,
33		ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
34	{ "GNSS-1PPS",	  ZL_REF4P, DPLL_PIN_TYPE_GNSS,
35		ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
36	{ "OCXO",	  ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0, },
37};
38
39static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_inputs[] = {
40	{ "CVL-SDP22",	  ZL_REF0P, DPLL_PIN_TYPE_INT_OSCILLATOR,
41		ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
42	{ "CVL-SDP20",	  ZL_REF0N, DPLL_PIN_TYPE_INT_OSCILLATOR,
43		ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
44	{ "C827_0-RCLKA", ZL_REF1P, DPLL_PIN_TYPE_MUX, },
45	{ "C827_0-RCLKB", ZL_REF1N, DPLL_PIN_TYPE_MUX, },
46	{ "C827_1-RCLKA", ZL_REF2P, DPLL_PIN_TYPE_MUX, },
47	{ "C827_1-RCLKB", ZL_REF2N, DPLL_PIN_TYPE_MUX, },
48	{ "SMA1",	  ZL_REF3P, DPLL_PIN_TYPE_EXT,
49		ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
50	{ "SMA2/U.FL2",	  ZL_REF3N, DPLL_PIN_TYPE_EXT,
51		ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
52	{ "GNSS-1PPS",	  ZL_REF4P, DPLL_PIN_TYPE_GNSS,
53		ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
54	{ "OCXO",	  ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, },
55};
56
57static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_outputs[] = {
58	{ "REF-SMA1",	    ZL_OUT0, DPLL_PIN_TYPE_EXT,
59		ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
60	{ "REF-SMA2/U.FL2", ZL_OUT1, DPLL_PIN_TYPE_EXT,
61		ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
62	{ "PHY-CLK",	    ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, },
63	{ "MAC-CLK",	    ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, },
64	{ "CVL-SDP21",	    ZL_OUT4, DPLL_PIN_TYPE_EXT,
65		ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
66	{ "CVL-SDP23",	    ZL_OUT5, DPLL_PIN_TYPE_EXT,
67		ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
68};
69
70static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_outputs[] = {
71	{ "REF-SMA1",	    ZL_OUT0, DPLL_PIN_TYPE_EXT,
72		ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
73	{ "REF-SMA2/U.FL2", ZL_OUT1, DPLL_PIN_TYPE_EXT,
74		ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
75	{ "PHY-CLK",	    ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
76	{ "PHY2-CLK",	    ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
77	{ "MAC-CLK",	    ZL_OUT4, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
78	{ "CVL-SDP21",	    ZL_OUT5, DPLL_PIN_TYPE_EXT,
79		ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
80	{ "CVL-SDP23",	    ZL_OUT6, DPLL_PIN_TYPE_EXT,
81		ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
82};
83
84static const struct ice_cgu_pin_desc ice_e823_si_cgu_inputs[] = {
85	{ "NONE",	  SI_REF0P, 0, 0 },
86	{ "NONE",	  SI_REF0N, 0, 0 },
87	{ "SYNCE0_DP",	  SI_REF1P, DPLL_PIN_TYPE_MUX, 0 },
88	{ "SYNCE0_DN",	  SI_REF1N, DPLL_PIN_TYPE_MUX, 0 },
89	{ "EXT_CLK_SYNC", SI_REF2P, DPLL_PIN_TYPE_EXT,
90		ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
91	{ "NONE",	  SI_REF2N, 0, 0 },
92	{ "EXT_PPS_OUT",  SI_REF3,  DPLL_PIN_TYPE_EXT,
93		ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
94	{ "INT_PPS_OUT",  SI_REF4,  DPLL_PIN_TYPE_EXT,
95		ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
96};
97
98static const struct ice_cgu_pin_desc ice_e823_si_cgu_outputs[] = {
99	{ "1588-TIME_SYNC", SI_OUT0, DPLL_PIN_TYPE_EXT,
100		ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
101	{ "PHY-CLK",	    SI_OUT1, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
102	{ "10MHZ-SMA2",	    SI_OUT2, DPLL_PIN_TYPE_EXT,
103		ARRAY_SIZE(ice_cgu_pin_freq_10_mhz), ice_cgu_pin_freq_10_mhz },
104	{ "PPS-SMA1",	    SI_OUT3, DPLL_PIN_TYPE_EXT,
105		ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
106};
107
108static const struct ice_cgu_pin_desc ice_e823_zl_cgu_inputs[] = {
109	{ "NONE",	  ZL_REF0P, 0, 0 },
110	{ "INT_PPS_OUT",  ZL_REF0N, DPLL_PIN_TYPE_EXT,
111		ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
112	{ "SYNCE0_DP",	  ZL_REF1P, DPLL_PIN_TYPE_MUX, 0 },
113	{ "SYNCE0_DN",	  ZL_REF1N, DPLL_PIN_TYPE_MUX, 0 },
114	{ "NONE",	  ZL_REF2P, 0, 0 },
115	{ "NONE",	  ZL_REF2N, 0, 0 },
116	{ "EXT_CLK_SYNC", ZL_REF3P, DPLL_PIN_TYPE_EXT,
117		ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
118	{ "NONE",	  ZL_REF3N, 0, 0 },
119	{ "EXT_PPS_OUT",  ZL_REF4P, DPLL_PIN_TYPE_EXT,
120		ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
121	{ "OCXO",	  ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0 },
122};
123
124static const struct ice_cgu_pin_desc ice_e823_zl_cgu_outputs[] = {
125	{ "PPS-SMA1",	   ZL_OUT0, DPLL_PIN_TYPE_EXT,
126		ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
127	{ "10MHZ-SMA2",	   ZL_OUT1, DPLL_PIN_TYPE_EXT,
128		ARRAY_SIZE(ice_cgu_pin_freq_10_mhz), ice_cgu_pin_freq_10_mhz },
129	{ "PHY-CLK",	   ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
130	{ "1588-TIME_REF", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
131	{ "CPK-TIME_SYNC", ZL_OUT4, DPLL_PIN_TYPE_EXT,
132		ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
133	{ "NONE",	   ZL_OUT5, 0, 0 },
134};
135
136/* Low level functions for interacting with and managing the device clock used
137 * for the Precision Time Protocol.
138 *
139 * The ice hardware represents the current time using three registers:
140 *
141 *    GLTSYN_TIME_H     GLTSYN_TIME_L     GLTSYN_TIME_R
142 *  +---------------+ +---------------+ +---------------+
143 *  |    32 bits    | |    32 bits    | |    32 bits    |
144 *  +---------------+ +---------------+ +---------------+
145 *
146 * The registers are incremented every clock tick using a 40bit increment
147 * value defined over two registers:
148 *
149 *                     GLTSYN_INCVAL_H   GLTSYN_INCVAL_L
150 *                    +---------------+ +---------------+
151 *                    |    8 bit s    | |    32 bits    |
152 *                    +---------------+ +---------------+
153 *
154 * The increment value is added to the GLSTYN_TIME_R and GLSTYN_TIME_L
155 * registers every clock source tick. Depending on the specific device
156 * configuration, the clock source frequency could be one of a number of
157 * values.
158 *
159 * For E810 devices, the increment frequency is 812.5 MHz
160 *
161 * For E822 devices the clock can be derived from different sources, and the
162 * increment has an effective frequency of one of the following:
163 * - 823.4375 MHz
164 * - 783.36 MHz
165 * - 796.875 MHz
166 * - 816 MHz
167 * - 830.078125 MHz
168 * - 783.36 MHz
169 *
170 * The hardware captures timestamps in the PHY for incoming packets, and for
171 * outgoing packets on request. To support this, the PHY maintains a timer
172 * that matches the lower 64 bits of the global source timer.
173 *
174 * In order to ensure that the PHY timers and the source timer are equivalent,
175 * shadow registers are used to prepare the desired initial values. A special
176 * sync command is issued to trigger copying from the shadow registers into
177 * the appropriate source and PHY registers simultaneously.
178 *
179 * The driver supports devices which have different PHYs with subtly different
180 * mechanisms to program and control the timers. We divide the devices into
181 * families named after the first major device, E810 and similar devices, and
182 * E822 and similar devices.
183 *
184 * - E822 based devices have additional support for fine grained Vernier
185 *   calibration which requires significant setup
186 * - The layout of timestamp data in the PHY register blocks is different
187 * - The way timer synchronization commands are issued is different.
188 *
189 * To support this, very low level functions have an e810 or e822 suffix
190 * indicating what type of device they work on. Higher level abstractions for
191 * tasks that can be done on both devices do not have the suffix and will
192 * correctly look up the appropriate low level function when running.
193 *
194 * Functions which only make sense on a single device family may not have
195 * a suitable generic implementation
196 */
197
198/**
199 * ice_get_ptp_src_clock_index - determine source clock index
200 * @hw: pointer to HW struct
201 *
202 * Determine the source clock index currently in use, based on device
203 * capabilities reported during initialization.
204 */
205u8 ice_get_ptp_src_clock_index(struct ice_hw *hw)
206{
207	return hw->func_caps.ts_func_info.tmr_index_assoc;
208}
209
210/**
211 * ice_ptp_read_src_incval - Read source timer increment value
212 * @hw: pointer to HW struct
213 *
214 * Read the increment value of the source timer and return it.
215 */
216static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
217{
218	u32 lo, hi;
219	u8 tmr_idx;
220
221	tmr_idx = ice_get_ptp_src_clock_index(hw);
222
223	lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
224	hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
225
226	return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo;
227}
228
229/**
230 * ice_ptp_src_cmd - Prepare source timer for a timer command
231 * @hw: pointer to HW structure
232 * @cmd: Timer command
233 *
234 * Prepare the source timer for an upcoming timer sync command.
235 */
236void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
237{
238	u32 cmd_val;
239	u8 tmr_idx;
240
241	tmr_idx = ice_get_ptp_src_clock_index(hw);
242	cmd_val = tmr_idx << SEL_CPK_SRC;
243
244	switch (cmd) {
245	case ICE_PTP_INIT_TIME:
246		cmd_val |= GLTSYN_CMD_INIT_TIME;
247		break;
248	case ICE_PTP_INIT_INCVAL:
249		cmd_val |= GLTSYN_CMD_INIT_INCVAL;
250		break;
251	case ICE_PTP_ADJ_TIME:
252		cmd_val |= GLTSYN_CMD_ADJ_TIME;
253		break;
254	case ICE_PTP_ADJ_TIME_AT_TIME:
255		cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
256		break;
257	case ICE_PTP_READ_TIME:
258		cmd_val |= GLTSYN_CMD_READ_TIME;
259		break;
260	case ICE_PTP_NOP:
261		break;
262	}
263
264	wr32(hw, GLTSYN_CMD, cmd_val);
265}
266
267/**
268 * ice_ptp_exec_tmr_cmd - Execute all prepared timer commands
269 * @hw: pointer to HW struct
270 *
271 * Write the SYNC_EXEC_CMD bit to the GLTSYN_CMD_SYNC register, and flush the
272 * write immediately. This triggers the hardware to begin executing all of the
273 * source and PHY timer commands synchronously.
274 */
275static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
276{
277	wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
278	ice_flush(hw);
279}
280
281/* E822 family functions
282 *
283 * The following functions operate on the E822 family of devices.
284 */
285
286/**
287 * ice_fill_phy_msg_e82x - Fill message data for a PHY register access
288 * @msg: the PHY message buffer to fill in
289 * @port: the port to access
290 * @offset: the register offset
291 */
292static void
293ice_fill_phy_msg_e82x(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
294{
295	int phy_port, phy, quadtype;
296
297	phy_port = port % ICE_PORTS_PER_PHY_E82X;
298	phy = port / ICE_PORTS_PER_PHY_E82X;
299	quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E82X;
300
301	if (quadtype == 0) {
302		msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
303		msg->msg_addr_high = P_Q0_H(P_0_BASE + offset, phy_port);
304	} else {
305		msg->msg_addr_low = P_Q1_L(P_4_BASE + offset, phy_port);
306		msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
307	}
308
309	if (phy == 0)
310		msg->dest_dev = rmn_0;
311	else if (phy == 1)
312		msg->dest_dev = rmn_1;
313	else
314		msg->dest_dev = rmn_2;
315}
316
317/**
318 * ice_is_64b_phy_reg_e82x - Check if this is a 64bit PHY register
319 * @low_addr: the low address to check
320 * @high_addr: on return, contains the high address of the 64bit register
321 *
322 * Checks if the provided low address is one of the known 64bit PHY values
323 * represented as two 32bit registers. If it is, return the appropriate high
324 * register offset to use.
325 */
326static bool ice_is_64b_phy_reg_e82x(u16 low_addr, u16 *high_addr)
327{
328	switch (low_addr) {
329	case P_REG_PAR_PCS_TX_OFFSET_L:
330		*high_addr = P_REG_PAR_PCS_TX_OFFSET_U;
331		return true;
332	case P_REG_PAR_PCS_RX_OFFSET_L:
333		*high_addr = P_REG_PAR_PCS_RX_OFFSET_U;
334		return true;
335	case P_REG_PAR_TX_TIME_L:
336		*high_addr = P_REG_PAR_TX_TIME_U;
337		return true;
338	case P_REG_PAR_RX_TIME_L:
339		*high_addr = P_REG_PAR_RX_TIME_U;
340		return true;
341	case P_REG_TOTAL_TX_OFFSET_L:
342		*high_addr = P_REG_TOTAL_TX_OFFSET_U;
343		return true;
344	case P_REG_TOTAL_RX_OFFSET_L:
345		*high_addr = P_REG_TOTAL_RX_OFFSET_U;
346		return true;
347	case P_REG_UIX66_10G_40G_L:
348		*high_addr = P_REG_UIX66_10G_40G_U;
349		return true;
350	case P_REG_UIX66_25G_100G_L:
351		*high_addr = P_REG_UIX66_25G_100G_U;
352		return true;
353	case P_REG_TX_CAPTURE_L:
354		*high_addr = P_REG_TX_CAPTURE_U;
355		return true;
356	case P_REG_RX_CAPTURE_L:
357		*high_addr = P_REG_RX_CAPTURE_U;
358		return true;
359	case P_REG_TX_TIMER_INC_PRE_L:
360		*high_addr = P_REG_TX_TIMER_INC_PRE_U;
361		return true;
362	case P_REG_RX_TIMER_INC_PRE_L:
363		*high_addr = P_REG_RX_TIMER_INC_PRE_U;
364		return true;
365	default:
366		return false;
367	}
368}
369
370/**
371 * ice_is_40b_phy_reg_e82x - Check if this is a 40bit PHY register
372 * @low_addr: the low address to check
373 * @high_addr: on return, contains the high address of the 40bit value
374 *
375 * Checks if the provided low address is one of the known 40bit PHY values
376 * split into two registers with the lower 8 bits in the low register and the
377 * upper 32 bits in the high register. If it is, return the appropriate high
378 * register offset to use.
379 */
380static bool ice_is_40b_phy_reg_e82x(u16 low_addr, u16 *high_addr)
381{
382	switch (low_addr) {
383	case P_REG_TIMETUS_L:
384		*high_addr = P_REG_TIMETUS_U;
385		return true;
386	case P_REG_PAR_RX_TUS_L:
387		*high_addr = P_REG_PAR_RX_TUS_U;
388		return true;
389	case P_REG_PAR_TX_TUS_L:
390		*high_addr = P_REG_PAR_TX_TUS_U;
391		return true;
392	case P_REG_PCS_RX_TUS_L:
393		*high_addr = P_REG_PCS_RX_TUS_U;
394		return true;
395	case P_REG_PCS_TX_TUS_L:
396		*high_addr = P_REG_PCS_TX_TUS_U;
397		return true;
398	case P_REG_DESK_PAR_RX_TUS_L:
399		*high_addr = P_REG_DESK_PAR_RX_TUS_U;
400		return true;
401	case P_REG_DESK_PAR_TX_TUS_L:
402		*high_addr = P_REG_DESK_PAR_TX_TUS_U;
403		return true;
404	case P_REG_DESK_PCS_RX_TUS_L:
405		*high_addr = P_REG_DESK_PCS_RX_TUS_U;
406		return true;
407	case P_REG_DESK_PCS_TX_TUS_L:
408		*high_addr = P_REG_DESK_PCS_TX_TUS_U;
409		return true;
410	default:
411		return false;
412	}
413}
414
415/**
416 * ice_read_phy_reg_e82x - Read a PHY register
417 * @hw: pointer to the HW struct
418 * @port: PHY port to read from
419 * @offset: PHY register offset to read
420 * @val: on return, the contents read from the PHY
421 *
422 * Read a PHY register for the given port over the device sideband queue.
423 */
424static int
425ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
426{
427	struct ice_sbq_msg_input msg = {0};
428	int err;
429
430	ice_fill_phy_msg_e82x(&msg, port, offset);
431	msg.opcode = ice_sbq_msg_rd;
432
433	err = ice_sbq_rw_reg(hw, &msg);
434	if (err) {
435		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
436			  err);
437		return err;
438	}
439
440	*val = msg.data;
441
442	return 0;
443}
444
445/**
446 * ice_read_64b_phy_reg_e82x - Read a 64bit value from PHY registers
447 * @hw: pointer to the HW struct
448 * @port: PHY port to read from
449 * @low_addr: offset of the lower register to read from
450 * @val: on return, the contents of the 64bit value from the PHY registers
451 *
452 * Reads the two registers associated with a 64bit value and returns it in the
453 * val pointer. The offset always specifies the lower register offset to use.
454 * The high offset is looked up. This function only operates on registers
455 * known to be two parts of a 64bit value.
456 */
457static int
458ice_read_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
459{
460	u32 low, high;
461	u16 high_addr;
462	int err;
463
464	/* Only operate on registers known to be split into two 32bit
465	 * registers.
466	 */
467	if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) {
468		ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
469			  low_addr);
470		return -EINVAL;
471	}
472
473	err = ice_read_phy_reg_e82x(hw, port, low_addr, &low);
474	if (err) {
475		ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d",
476			  low_addr, err);
477		return err;
478	}
479
480	err = ice_read_phy_reg_e82x(hw, port, high_addr, &high);
481	if (err) {
482		ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d",
483			  high_addr, err);
484		return err;
485	}
486
487	*val = (u64)high << 32 | low;
488
489	return 0;
490}
491
492/**
493 * ice_write_phy_reg_e82x - Write a PHY register
494 * @hw: pointer to the HW struct
495 * @port: PHY port to write to
496 * @offset: PHY register offset to write
497 * @val: The value to write to the register
498 *
499 * Write a PHY register for the given port over the device sideband queue.
500 */
501static int
502ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val)
503{
504	struct ice_sbq_msg_input msg = {0};
505	int err;
506
507	ice_fill_phy_msg_e82x(&msg, port, offset);
508	msg.opcode = ice_sbq_msg_wr;
509	msg.data = val;
510
511	err = ice_sbq_rw_reg(hw, &msg);
512	if (err) {
513		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
514			  err);
515		return err;
516	}
517
518	return 0;
519}
520
521/**
522 * ice_write_40b_phy_reg_e82x - Write a 40b value to the PHY
523 * @hw: pointer to the HW struct
524 * @port: port to write to
525 * @low_addr: offset of the low register
526 * @val: 40b value to write
527 *
528 * Write the provided 40b value to the two associated registers by splitting
529 * it up into two chunks, the lower 8 bits and the upper 32 bits.
530 */
531static int
532ice_write_40b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
533{
534	u32 low, high;
535	u16 high_addr;
536	int err;
537
538	/* Only operate on registers known to be split into a lower 8 bit
539	 * register and an upper 32 bit register.
540	 */
541	if (!ice_is_40b_phy_reg_e82x(low_addr, &high_addr)) {
542		ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n",
543			  low_addr);
544		return -EINVAL;
545	}
546
547	low = (u32)(val & P_REG_40B_LOW_M);
548	high = (u32)(val >> P_REG_40B_HIGH_S);
549
550	err = ice_write_phy_reg_e82x(hw, port, low_addr, low);
551	if (err) {
552		ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
553			  low_addr, err);
554		return err;
555	}
556
557	err = ice_write_phy_reg_e82x(hw, port, high_addr, high);
558	if (err) {
559		ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
560			  high_addr, err);
561		return err;
562	}
563
564	return 0;
565}
566
567/**
568 * ice_write_64b_phy_reg_e82x - Write a 64bit value to PHY registers
569 * @hw: pointer to the HW struct
570 * @port: PHY port to read from
571 * @low_addr: offset of the lower register to read from
572 * @val: the contents of the 64bit value to write to PHY
573 *
574 * Write the 64bit value to the two associated 32bit PHY registers. The offset
575 * is always specified as the lower register, and the high address is looked
576 * up. This function only operates on registers known to be two parts of
577 * a 64bit value.
578 */
579static int
580ice_write_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
581{
582	u32 low, high;
583	u16 high_addr;
584	int err;
585
586	/* Only operate on registers known to be split into two 32bit
587	 * registers.
588	 */
589	if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) {
590		ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
591			  low_addr);
592		return -EINVAL;
593	}
594
595	low = lower_32_bits(val);
596	high = upper_32_bits(val);
597
598	err = ice_write_phy_reg_e82x(hw, port, low_addr, low);
599	if (err) {
600		ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
601			  low_addr, err);
602		return err;
603	}
604
605	err = ice_write_phy_reg_e82x(hw, port, high_addr, high);
606	if (err) {
607		ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
608			  high_addr, err);
609		return err;
610	}
611
612	return 0;
613}
614
615/**
616 * ice_fill_quad_msg_e82x - Fill message data for quad register access
617 * @msg: the PHY message buffer to fill in
618 * @quad: the quad to access
619 * @offset: the register offset
620 *
621 * Fill a message buffer for accessing a register in a quad shared between
622 * multiple PHYs.
623 */
624static int
625ice_fill_quad_msg_e82x(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
626{
627	u32 addr;
628
629	if (quad >= ICE_MAX_QUAD)
630		return -EINVAL;
631
632	msg->dest_dev = rmn_0;
633
634	if ((quad % ICE_QUADS_PER_PHY_E82X) == 0)
635		addr = Q_0_BASE + offset;
636	else
637		addr = Q_1_BASE + offset;
638
639	msg->msg_addr_low = lower_16_bits(addr);
640	msg->msg_addr_high = upper_16_bits(addr);
641
642	return 0;
643}
644
645/**
646 * ice_read_quad_reg_e82x - Read a PHY quad register
647 * @hw: pointer to the HW struct
648 * @quad: quad to read from
649 * @offset: quad register offset to read
650 * @val: on return, the contents read from the quad
651 *
652 * Read a quad register over the device sideband queue. Quad registers are
653 * shared between multiple PHYs.
654 */
655int
656ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
657{
658	struct ice_sbq_msg_input msg = {0};
659	int err;
660
661	err = ice_fill_quad_msg_e82x(&msg, quad, offset);
662	if (err)
663		return err;
664
665	msg.opcode = ice_sbq_msg_rd;
666
667	err = ice_sbq_rw_reg(hw, &msg);
668	if (err) {
669		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
670			  err);
671		return err;
672	}
673
674	*val = msg.data;
675
676	return 0;
677}
678
679/**
680 * ice_write_quad_reg_e82x - Write a PHY quad register
681 * @hw: pointer to the HW struct
682 * @quad: quad to write to
683 * @offset: quad register offset to write
684 * @val: The value to write to the register
685 *
686 * Write a quad register over the device sideband queue. Quad registers are
687 * shared between multiple PHYs.
688 */
689int
690ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
691{
692	struct ice_sbq_msg_input msg = {0};
693	int err;
694
695	err = ice_fill_quad_msg_e82x(&msg, quad, offset);
696	if (err)
697		return err;
698
699	msg.opcode = ice_sbq_msg_wr;
700	msg.data = val;
701
702	err = ice_sbq_rw_reg(hw, &msg);
703	if (err) {
704		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
705			  err);
706		return err;
707	}
708
709	return 0;
710}
711
712/**
713 * ice_read_phy_tstamp_e82x - Read a PHY timestamp out of the quad block
714 * @hw: pointer to the HW struct
715 * @quad: the quad to read from
716 * @idx: the timestamp index to read
717 * @tstamp: on return, the 40bit timestamp value
718 *
719 * Read a 40bit timestamp value out of the two associated registers in the
720 * quad memory block that is shared between the internal PHYs of the E822
721 * family of devices.
722 */
723static int
724ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
725{
726	u16 lo_addr, hi_addr;
727	u32 lo, hi;
728	int err;
729
730	lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
731	hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
732
733	err = ice_read_quad_reg_e82x(hw, quad, lo_addr, &lo);
734	if (err) {
735		ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
736			  err);
737		return err;
738	}
739
740	err = ice_read_quad_reg_e82x(hw, quad, hi_addr, &hi);
741	if (err) {
742		ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
743			  err);
744		return err;
745	}
746
747	/* For E822 based internal PHYs, the timestamp is reported with the
748	 * lower 8 bits in the low register, and the upper 32 bits in the high
749	 * register.
750	 */
751	*tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
752
753	return 0;
754}
755
756/**
757 * ice_clear_phy_tstamp_e82x - Clear a timestamp from the quad block
758 * @hw: pointer to the HW struct
759 * @quad: the quad to read from
760 * @idx: the timestamp index to reset
761 *
762 * Read the timestamp out of the quad to clear its timestamp status bit from
763 * the PHY quad block that is shared between the internal PHYs of the E822
764 * devices.
765 *
766 * Note that unlike E810, software cannot directly write to the quad memory
767 * bank registers. E822 relies on the ice_get_phy_tx_tstamp_ready() function
768 * to determine which timestamps are valid. Reading a timestamp auto-clears
769 * the valid bit.
770 *
771 * To directly clear the contents of the timestamp block entirely, discarding
772 * all timestamp data at once, software should instead use
773 * ice_ptp_reset_ts_memory_quad_e82x().
774 *
775 * This function should only be called on an idx whose bit is set according to
776 * ice_get_phy_tx_tstamp_ready().
777 */
778static int
779ice_clear_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx)
780{
781	u64 unused_tstamp;
782	int err;
783
784	err = ice_read_phy_tstamp_e82x(hw, quad, idx, &unused_tstamp);
785	if (err) {
786		ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for quad %u, idx %u, err %d\n",
787			  quad, idx, err);
788		return err;
789	}
790
791	return 0;
792}
793
794/**
795 * ice_ptp_reset_ts_memory_quad_e82x - Clear all timestamps from the quad block
796 * @hw: pointer to the HW struct
797 * @quad: the quad to read from
798 *
799 * Clear all timestamps from the PHY quad block that is shared between the
800 * internal PHYs on the E822 devices.
801 */
802void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad)
803{
804	ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M);
805	ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M);
806}
807
808/**
809 * ice_ptp_reset_ts_memory_e82x - Clear all timestamps from all quad blocks
810 * @hw: pointer to the HW struct
811 */
812static void ice_ptp_reset_ts_memory_e82x(struct ice_hw *hw)
813{
814	unsigned int quad;
815
816	for (quad = 0; quad < ICE_MAX_QUAD; quad++)
817		ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
818}
819
820/**
821 * ice_read_cgu_reg_e82x - Read a CGU register
822 * @hw: pointer to the HW struct
823 * @addr: Register address to read
824 * @val: storage for register value read
825 *
826 * Read the contents of a register of the Clock Generation Unit. Only
827 * applicable to E822 devices.
828 */
829static int
830ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
831{
832	struct ice_sbq_msg_input cgu_msg;
833	int err;
834
835	cgu_msg.opcode = ice_sbq_msg_rd;
836	cgu_msg.dest_dev = cgu;
837	cgu_msg.msg_addr_low = addr;
838	cgu_msg.msg_addr_high = 0x0;
839
840	err = ice_sbq_rw_reg(hw, &cgu_msg);
841	if (err) {
842		ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
843			  addr, err);
844		return err;
845	}
846
847	*val = cgu_msg.data;
848
849	return err;
850}
851
852/**
853 * ice_write_cgu_reg_e82x - Write a CGU register
854 * @hw: pointer to the HW struct
855 * @addr: Register address to write
856 * @val: value to write into the register
857 *
858 * Write the specified value to a register of the Clock Generation Unit. Only
859 * applicable to E822 devices.
860 */
861static int
862ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
863{
864	struct ice_sbq_msg_input cgu_msg;
865	int err;
866
867	cgu_msg.opcode = ice_sbq_msg_wr;
868	cgu_msg.dest_dev = cgu;
869	cgu_msg.msg_addr_low = addr;
870	cgu_msg.msg_addr_high = 0x0;
871	cgu_msg.data = val;
872
873	err = ice_sbq_rw_reg(hw, &cgu_msg);
874	if (err) {
875		ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
876			  addr, err);
877		return err;
878	}
879
880	return err;
881}
882
883/**
884 * ice_clk_freq_str - Convert time_ref_freq to string
885 * @clk_freq: Clock frequency
886 *
887 * Convert the specified TIME_REF clock frequency to a string.
888 */
889static const char *ice_clk_freq_str(u8 clk_freq)
890{
891	switch ((enum ice_time_ref_freq)clk_freq) {
892	case ICE_TIME_REF_FREQ_25_000:
893		return "25 MHz";
894	case ICE_TIME_REF_FREQ_122_880:
895		return "122.88 MHz";
896	case ICE_TIME_REF_FREQ_125_000:
897		return "125 MHz";
898	case ICE_TIME_REF_FREQ_153_600:
899		return "153.6 MHz";
900	case ICE_TIME_REF_FREQ_156_250:
901		return "156.25 MHz";
902	case ICE_TIME_REF_FREQ_245_760:
903		return "245.76 MHz";
904	default:
905		return "Unknown";
906	}
907}
908
909/**
910 * ice_clk_src_str - Convert time_ref_src to string
911 * @clk_src: Clock source
912 *
913 * Convert the specified clock source to its string name.
914 */
915static const char *ice_clk_src_str(u8 clk_src)
916{
917	switch ((enum ice_clk_src)clk_src) {
918	case ICE_CLK_SRC_TCX0:
919		return "TCX0";
920	case ICE_CLK_SRC_TIME_REF:
921		return "TIME_REF";
922	default:
923		return "Unknown";
924	}
925}
926
927/**
928 * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit
929 * @hw: pointer to the HW struct
930 * @clk_freq: Clock frequency to program
931 * @clk_src: Clock source to select (TIME_REF, or TCX0)
932 *
933 * Configure the Clock Generation Unit with the desired clock frequency and
934 * time reference, enabling the PLL which drives the PTP hardware clock.
935 */
936static int
937ice_cfg_cgu_pll_e82x(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
938		     enum ice_clk_src clk_src)
939{
940	union tspll_ro_bwm_lf bwm_lf;
941	union nac_cgu_dword19 dw19;
942	union nac_cgu_dword22 dw22;
943	union nac_cgu_dword24 dw24;
944	union nac_cgu_dword9 dw9;
945	int err;
946
947	if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
948		dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
949			 clk_freq);
950		return -EINVAL;
951	}
952
953	if (clk_src >= NUM_ICE_CLK_SRC) {
954		dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
955			 clk_src);
956		return -EINVAL;
957	}
958
959	if (clk_src == ICE_CLK_SRC_TCX0 &&
960	    clk_freq != ICE_TIME_REF_FREQ_25_000) {
961		dev_warn(ice_hw_to_dev(hw),
962			 "TCX0 only supports 25 MHz frequency\n");
963		return -EINVAL;
964	}
965
966	err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
967	if (err)
968		return err;
969
970	err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
971	if (err)
972		return err;
973
974	err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
975	if (err)
976		return err;
977
978	/* Log the current clock configuration */
979	ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
980		  dw24.field.ts_pll_enable ? "enabled" : "disabled",
981		  ice_clk_src_str(dw24.field.time_ref_sel),
982		  ice_clk_freq_str(dw9.field.time_ref_freq_sel),
983		  bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
984
985	/* Disable the PLL before changing the clock source or frequency */
986	if (dw24.field.ts_pll_enable) {
987		dw24.field.ts_pll_enable = 0;
988
989		err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
990		if (err)
991			return err;
992	}
993
994	/* Set the frequency */
995	dw9.field.time_ref_freq_sel = clk_freq;
996	err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
997	if (err)
998		return err;
999
1000	/* Configure the TS PLL feedback divisor */
1001	err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
1002	if (err)
1003		return err;
1004
1005	dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
1006	dw19.field.tspll_ndivratio = 1;
1007
1008	err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
1009	if (err)
1010		return err;
1011
1012	/* Configure the TS PLL post divisor */
1013	err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
1014	if (err)
1015		return err;
1016
1017	dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
1018	dw22.field.time1588clk_sel_div2 = 0;
1019
1020	err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
1021	if (err)
1022		return err;
1023
1024	/* Configure the TS PLL pre divisor and clock source */
1025	err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
1026	if (err)
1027		return err;
1028
1029	dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
1030	dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
1031	dw24.field.time_ref_sel = clk_src;
1032
1033	err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
1034	if (err)
1035		return err;
1036
1037	/* Finally, enable the PLL */
1038	dw24.field.ts_pll_enable = 1;
1039
1040	err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
1041	if (err)
1042		return err;
1043
1044	/* Wait to verify if the PLL locks */
1045	usleep_range(1000, 5000);
1046
1047	err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
1048	if (err)
1049		return err;
1050
1051	if (!bwm_lf.field.plllock_true_lock_cri) {
1052		dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
1053		return -EBUSY;
1054	}
1055
1056	/* Log the current clock configuration */
1057	ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
1058		  dw24.field.ts_pll_enable ? "enabled" : "disabled",
1059		  ice_clk_src_str(dw24.field.time_ref_sel),
1060		  ice_clk_freq_str(dw9.field.time_ref_freq_sel),
1061		  bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
1062
1063	return 0;
1064}
1065
1066/**
1067 * ice_init_cgu_e82x - Initialize CGU with settings from firmware
1068 * @hw: pointer to the HW structure
1069 *
1070 * Initialize the Clock Generation Unit of the E822 device.
1071 */
1072static int ice_init_cgu_e82x(struct ice_hw *hw)
1073{
1074	struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
1075	union tspll_cntr_bist_settings cntr_bist;
1076	int err;
1077
1078	err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
1079				    &cntr_bist.val);
1080	if (err)
1081		return err;
1082
1083	/* Disable sticky lock detection so lock err reported is accurate */
1084	cntr_bist.field.i_plllock_sel_0 = 0;
1085	cntr_bist.field.i_plllock_sel_1 = 0;
1086
1087	err = ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
1088				     cntr_bist.val);
1089	if (err)
1090		return err;
1091
1092	/* Configure the CGU PLL using the parameters from the function
1093	 * capabilities.
1094	 */
1095	err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref,
1096				   (enum ice_clk_src)ts_info->clk_src);
1097	if (err)
1098		return err;
1099
1100	return 0;
1101}
1102
1103/**
1104 * ice_ptp_set_vernier_wl - Set the window length for vernier calibration
1105 * @hw: pointer to the HW struct
1106 *
1107 * Set the window length used for the vernier port calibration process.
1108 */
1109static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
1110{
1111	u8 port;
1112
1113	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1114		int err;
1115
1116		err = ice_write_phy_reg_e82x(hw, port, P_REG_WL,
1117					     PTP_VERNIER_WL);
1118		if (err) {
1119			ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n",
1120				  port, err);
1121			return err;
1122		}
1123	}
1124
1125	return 0;
1126}
1127
1128/**
1129 * ice_ptp_init_phc_e82x - Perform E822 specific PHC initialization
1130 * @hw: pointer to HW struct
1131 *
1132 * Perform PHC initialization steps specific to E822 devices.
1133 */
1134static int ice_ptp_init_phc_e82x(struct ice_hw *hw)
1135{
1136	int err;
1137	u32 regval;
1138
1139	/* Enable reading switch and PHY registers over the sideband queue */
1140#define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1)
1141#define PF_SB_REM_DEV_CTL_PHY0 BIT(2)
1142	regval = rd32(hw, PF_SB_REM_DEV_CTL);
1143	regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ |
1144		   PF_SB_REM_DEV_CTL_PHY0);
1145	wr32(hw, PF_SB_REM_DEV_CTL, regval);
1146
1147	/* Initialize the Clock Generation Unit */
1148	err = ice_init_cgu_e82x(hw);
1149	if (err)
1150		return err;
1151
1152	/* Set window length for all the ports */
1153	return ice_ptp_set_vernier_wl(hw);
1154}
1155
1156/**
1157 * ice_ptp_prep_phy_time_e82x - Prepare PHY port with initial time
1158 * @hw: pointer to the HW struct
1159 * @time: Time to initialize the PHY port clocks to
1160 *
1161 * Program the PHY port registers with a new initial time value. The port
1162 * clock will be initialized once the driver issues an ICE_PTP_INIT_TIME sync
1163 * command. The time value is the upper 32 bits of the PHY timer, usually in
1164 * units of nominal nanoseconds.
1165 */
1166static int
1167ice_ptp_prep_phy_time_e82x(struct ice_hw *hw, u32 time)
1168{
1169	u64 phy_time;
1170	u8 port;
1171	int err;
1172
1173	/* The time represents the upper 32 bits of the PHY timer, so we need
1174	 * to shift to account for this when programming.
1175	 */
1176	phy_time = (u64)time << 32;
1177
1178	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1179		/* Tx case */
1180		err = ice_write_64b_phy_reg_e82x(hw, port,
1181						 P_REG_TX_TIMER_INC_PRE_L,
1182						 phy_time);
1183		if (err)
1184			goto exit_err;
1185
1186		/* Rx case */
1187		err = ice_write_64b_phy_reg_e82x(hw, port,
1188						 P_REG_RX_TIMER_INC_PRE_L,
1189						 phy_time);
1190		if (err)
1191			goto exit_err;
1192	}
1193
1194	return 0;
1195
1196exit_err:
1197	ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n",
1198		  port, err);
1199
1200	return err;
1201}
1202
1203/**
1204 * ice_ptp_prep_port_adj_e82x - Prepare a single port for time adjust
1205 * @hw: pointer to HW struct
1206 * @port: Port number to be programmed
1207 * @time: time in cycles to adjust the port Tx and Rx clocks
1208 *
1209 * Program the port for an atomic adjustment by writing the Tx and Rx timer
1210 * registers. The atomic adjustment won't be completed until the driver issues
1211 * an ICE_PTP_ADJ_TIME command.
1212 *
1213 * Note that time is not in units of nanoseconds. It is in clock time
1214 * including the lower sub-nanosecond portion of the port timer.
1215 *
1216 * Negative adjustments are supported using 2s complement arithmetic.
1217 */
1218static int
1219ice_ptp_prep_port_adj_e82x(struct ice_hw *hw, u8 port, s64 time)
1220{
1221	u32 l_time, u_time;
1222	int err;
1223
1224	l_time = lower_32_bits(time);
1225	u_time = upper_32_bits(time);
1226
1227	/* Tx case */
1228	err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_L,
1229				     l_time);
1230	if (err)
1231		goto exit_err;
1232
1233	err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_U,
1234				     u_time);
1235	if (err)
1236		goto exit_err;
1237
1238	/* Rx case */
1239	err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_L,
1240				     l_time);
1241	if (err)
1242		goto exit_err;
1243
1244	err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_U,
1245				     u_time);
1246	if (err)
1247		goto exit_err;
1248
1249	return 0;
1250
1251exit_err:
1252	ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n",
1253		  port, err);
1254	return err;
1255}
1256
1257/**
1258 * ice_ptp_prep_phy_adj_e82x - Prep PHY ports for a time adjustment
1259 * @hw: pointer to HW struct
1260 * @adj: adjustment in nanoseconds
1261 *
1262 * Prepare the PHY ports for an atomic time adjustment by programming the PHY
1263 * Tx and Rx port registers. The actual adjustment is completed by issuing an
1264 * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command.
1265 */
1266static int
1267ice_ptp_prep_phy_adj_e82x(struct ice_hw *hw, s32 adj)
1268{
1269	s64 cycles;
1270	u8 port;
1271
1272	/* The port clock supports adjustment of the sub-nanosecond portion of
1273	 * the clock. We shift the provided adjustment in nanoseconds to
1274	 * calculate the appropriate adjustment to program into the PHY ports.
1275	 */
1276	if (adj > 0)
1277		cycles = (s64)adj << 32;
1278	else
1279		cycles = -(((s64)-adj) << 32);
1280
1281	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1282		int err;
1283
1284		err = ice_ptp_prep_port_adj_e82x(hw, port, cycles);
1285		if (err)
1286			return err;
1287	}
1288
1289	return 0;
1290}
1291
1292/**
1293 * ice_ptp_prep_phy_incval_e82x - Prepare PHY ports for time adjustment
1294 * @hw: pointer to HW struct
1295 * @incval: new increment value to prepare
1296 *
1297 * Prepare each of the PHY ports for a new increment value by programming the
1298 * port's TIMETUS registers. The new increment value will be updated after
1299 * issuing an ICE_PTP_INIT_INCVAL command.
1300 */
1301static int
1302ice_ptp_prep_phy_incval_e82x(struct ice_hw *hw, u64 incval)
1303{
1304	int err;
1305	u8 port;
1306
1307	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1308		err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L,
1309						 incval);
1310		if (err)
1311			goto exit_err;
1312	}
1313
1314	return 0;
1315
1316exit_err:
1317	ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n",
1318		  port, err);
1319
1320	return err;
1321}
1322
1323/**
1324 * ice_ptp_read_port_capture - Read a port's local time capture
1325 * @hw: pointer to HW struct
1326 * @port: Port number to read
1327 * @tx_ts: on return, the Tx port time capture
1328 * @rx_ts: on return, the Rx port time capture
1329 *
1330 * Read the port's Tx and Rx local time capture values.
1331 *
1332 * Note this has no equivalent for the E810 devices.
1333 */
1334static int
1335ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
1336{
1337	int err;
1338
1339	/* Tx case */
1340	err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
1341	if (err) {
1342		ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
1343			  err);
1344		return err;
1345	}
1346
1347	ice_debug(hw, ICE_DBG_PTP, "tx_init = 0x%016llx\n",
1348		  (unsigned long long)*tx_ts);
1349
1350	/* Rx case */
1351	err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
1352	if (err) {
1353		ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
1354			  err);
1355		return err;
1356	}
1357
1358	ice_debug(hw, ICE_DBG_PTP, "rx_init = 0x%016llx\n",
1359		  (unsigned long long)*rx_ts);
1360
1361	return 0;
1362}
1363
1364/**
1365 * ice_ptp_write_port_cmd_e82x - Prepare a single PHY port for a timer command
1366 * @hw: pointer to HW struct
1367 * @port: Port to which cmd has to be sent
1368 * @cmd: Command to be sent to the port
1369 *
1370 * Prepare the requested port for an upcoming timer sync command.
1371 *
1372 * Do not use this function directly. If you want to configure exactly one
1373 * port, use ice_ptp_one_port_cmd() instead.
1374 */
1375static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port,
1376				       enum ice_ptp_tmr_cmd cmd)
1377{
1378	u32 cmd_val, val;
1379	u8 tmr_idx;
1380	int err;
1381
1382	tmr_idx = ice_get_ptp_src_clock_index(hw);
1383	cmd_val = tmr_idx << SEL_PHY_SRC;
1384	switch (cmd) {
1385	case ICE_PTP_INIT_TIME:
1386		cmd_val |= PHY_CMD_INIT_TIME;
1387		break;
1388	case ICE_PTP_INIT_INCVAL:
1389		cmd_val |= PHY_CMD_INIT_INCVAL;
1390		break;
1391	case ICE_PTP_ADJ_TIME:
1392		cmd_val |= PHY_CMD_ADJ_TIME;
1393		break;
1394	case ICE_PTP_READ_TIME:
1395		cmd_val |= PHY_CMD_READ_TIME;
1396		break;
1397	case ICE_PTP_ADJ_TIME_AT_TIME:
1398		cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
1399		break;
1400	case ICE_PTP_NOP:
1401		break;
1402	}
1403
1404	/* Tx case */
1405	/* Read, modify, write */
1406	err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, &val);
1407	if (err) {
1408		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n",
1409			  err);
1410		return err;
1411	}
1412
1413	/* Modify necessary bits only and perform write */
1414	val &= ~TS_CMD_MASK;
1415	val |= cmd_val;
1416
1417	err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, val);
1418	if (err) {
1419		ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
1420			  err);
1421		return err;
1422	}
1423
1424	/* Rx case */
1425	/* Read, modify, write */
1426	err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, &val);
1427	if (err) {
1428		ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n",
1429			  err);
1430		return err;
1431	}
1432
1433	/* Modify necessary bits only and perform write */
1434	val &= ~TS_CMD_MASK;
1435	val |= cmd_val;
1436
1437	err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, val);
1438	if (err) {
1439		ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
1440			  err);
1441		return err;
1442	}
1443
1444	return 0;
1445}
1446
1447/**
1448 * ice_ptp_one_port_cmd - Prepare one port for a timer command
1449 * @hw: pointer to the HW struct
1450 * @configured_port: the port to configure with configured_cmd
1451 * @configured_cmd: timer command to prepare on the configured_port
1452 *
1453 * Prepare the configured_port for the configured_cmd, and prepare all other
1454 * ports for ICE_PTP_NOP. This causes the configured_port to execute the
1455 * desired command while all other ports perform no operation.
1456 */
1457static int
1458ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
1459		     enum ice_ptp_tmr_cmd configured_cmd)
1460{
1461	u8 port;
1462
1463	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1464		enum ice_ptp_tmr_cmd cmd;
1465		int err;
1466
1467		if (port == configured_port)
1468			cmd = configured_cmd;
1469		else
1470			cmd = ICE_PTP_NOP;
1471
1472		err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
1473		if (err)
1474			return err;
1475	}
1476
1477	return 0;
1478}
1479
1480/**
1481 * ice_ptp_port_cmd_e82x - Prepare all ports for a timer command
1482 * @hw: pointer to the HW struct
1483 * @cmd: timer command to prepare
1484 *
1485 * Prepare all ports connected to this device for an upcoming timer sync
1486 * command.
1487 */
1488static int
1489ice_ptp_port_cmd_e82x(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
1490{
1491	u8 port;
1492
1493	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1494		int err;
1495
1496		err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
1497		if (err)
1498			return err;
1499	}
1500
1501	return 0;
1502}
1503
1504/* E822 Vernier calibration functions
1505 *
1506 * The following functions are used as part of the vernier calibration of
1507 * a port. This calibration increases the precision of the timestamps on the
1508 * port.
1509 */
1510
1511/**
1512 * ice_phy_get_speed_and_fec_e82x - Get link speed and FEC based on serdes mode
1513 * @hw: pointer to HW struct
1514 * @port: the port to read from
1515 * @link_out: if non-NULL, holds link speed on success
1516 * @fec_out: if non-NULL, holds FEC algorithm on success
1517 *
1518 * Read the serdes data for the PHY port and extract the link speed and FEC
1519 * algorithm.
1520 */
1521static int
1522ice_phy_get_speed_and_fec_e82x(struct ice_hw *hw, u8 port,
1523			       enum ice_ptp_link_spd *link_out,
1524			       enum ice_ptp_fec_mode *fec_out)
1525{
1526	enum ice_ptp_link_spd link;
1527	enum ice_ptp_fec_mode fec;
1528	u32 serdes;
1529	int err;
1530
1531	err = ice_read_phy_reg_e82x(hw, port, P_REG_LINK_SPEED, &serdes);
1532	if (err) {
1533		ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n");
1534		return err;
1535	}
1536
1537	/* Determine the FEC algorithm */
1538	fec = (enum ice_ptp_fec_mode)P_REG_LINK_SPEED_FEC_MODE(serdes);
1539
1540	serdes &= P_REG_LINK_SPEED_SERDES_M;
1541
1542	/* Determine the link speed */
1543	if (fec == ICE_PTP_FEC_MODE_RS_FEC) {
1544		switch (serdes) {
1545		case ICE_PTP_SERDES_25G:
1546			link = ICE_PTP_LNK_SPD_25G_RS;
1547			break;
1548		case ICE_PTP_SERDES_50G:
1549			link = ICE_PTP_LNK_SPD_50G_RS;
1550			break;
1551		case ICE_PTP_SERDES_100G:
1552			link = ICE_PTP_LNK_SPD_100G_RS;
1553			break;
1554		default:
1555			return -EIO;
1556		}
1557	} else {
1558		switch (serdes) {
1559		case ICE_PTP_SERDES_1G:
1560			link = ICE_PTP_LNK_SPD_1G;
1561			break;
1562		case ICE_PTP_SERDES_10G:
1563			link = ICE_PTP_LNK_SPD_10G;
1564			break;
1565		case ICE_PTP_SERDES_25G:
1566			link = ICE_PTP_LNK_SPD_25G;
1567			break;
1568		case ICE_PTP_SERDES_40G:
1569			link = ICE_PTP_LNK_SPD_40G;
1570			break;
1571		case ICE_PTP_SERDES_50G:
1572			link = ICE_PTP_LNK_SPD_50G;
1573			break;
1574		default:
1575			return -EIO;
1576		}
1577	}
1578
1579	if (link_out)
1580		*link_out = link;
1581	if (fec_out)
1582		*fec_out = fec;
1583
1584	return 0;
1585}
1586
1587/**
1588 * ice_phy_cfg_lane_e82x - Configure PHY quad for single/multi-lane timestamp
1589 * @hw: pointer to HW struct
1590 * @port: to configure the quad for
1591 */
1592static void ice_phy_cfg_lane_e82x(struct ice_hw *hw, u8 port)
1593{
1594	enum ice_ptp_link_spd link_spd;
1595	int err;
1596	u32 val;
1597	u8 quad;
1598
1599	err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, NULL);
1600	if (err) {
1601		ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n",
1602			  err);
1603		return;
1604	}
1605
1606	quad = port / ICE_PORTS_PER_QUAD;
1607
1608	err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
1609	if (err) {
1610		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n",
1611			  err);
1612		return;
1613	}
1614
1615	if (link_spd >= ICE_PTP_LNK_SPD_40G)
1616		val &= ~Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
1617	else
1618		val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
1619
1620	err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
1621	if (err) {
1622		ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n",
1623			  err);
1624		return;
1625	}
1626}
1627
1628/**
1629 * ice_phy_cfg_uix_e82x - Configure Serdes UI to TU conversion for E822
1630 * @hw: pointer to the HW structure
1631 * @port: the port to configure
1632 *
1633 * Program the conversion ration of Serdes clock "unit intervals" (UIs) to PHC
1634 * hardware clock time units (TUs). That is, determine the number of TUs per
1635 * serdes unit interval, and program the UIX registers with this conversion.
1636 *
1637 * This conversion is used as part of the calibration process when determining
1638 * the additional error of a timestamp vs the real time of transmission or
1639 * receipt of the packet.
1640 *
1641 * Hardware uses the number of TUs per 66 UIs, written to the UIX registers
1642 * for the two main serdes clock rates, 10G/40G and 25G/100G serdes clocks.
1643 *
1644 * To calculate the conversion ratio, we use the following facts:
1645 *
1646 * a) the clock frequency in Hz (cycles per second)
1647 * b) the number of TUs per cycle (the increment value of the clock)
1648 * c) 1 second per 1 billion nanoseconds
1649 * d) the duration of 66 UIs in nanoseconds
1650 *
1651 * Given these facts, we can use the following table to work out what ratios
1652 * to multiply in order to get the number of TUs per 66 UIs:
1653 *
1654 * cycles |   1 second   | incval (TUs) | nanoseconds
1655 * -------+--------------+--------------+-------------
1656 * second | 1 billion ns |    cycle     |   66 UIs
1657 *
1658 * To perform the multiplication using integers without too much loss of
1659 * precision, we can take use the following equation:
1660 *
1661 * (freq * incval * 6600 LINE_UI ) / ( 100 * 1 billion)
1662 *
1663 * We scale up to using 6600 UI instead of 66 in order to avoid fractional
1664 * nanosecond UIs (66 UI at 10G/40G is 6.4 ns)
1665 *
1666 * The increment value has a maximum expected range of about 34 bits, while
1667 * the frequency value is about 29 bits. Multiplying these values shouldn't
1668 * overflow the 64 bits. However, we must then further multiply them again by
1669 * the Serdes unit interval duration. To avoid overflow here, we split the
1670 * overall divide by 1e11 into a divide by 256 (shift down by 8 bits) and
1671 * a divide by 390,625,000. This does lose some precision, but avoids
1672 * miscalculation due to arithmetic overflow.
1673 */
1674static int ice_phy_cfg_uix_e82x(struct ice_hw *hw, u8 port)
1675{
1676	u64 cur_freq, clk_incval, tu_per_sec, uix;
1677	int err;
1678
1679	cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
1680	clk_incval = ice_ptp_read_src_incval(hw);
1681
1682	/* Calculate TUs per second divided by 256 */
1683	tu_per_sec = (cur_freq * clk_incval) >> 8;
1684
1685#define LINE_UI_10G_40G 640 /* 6600 UIs is 640 nanoseconds at 10Gb/40Gb */
1686#define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */
1687
1688	/* Program the 10Gb/40Gb conversion ratio */
1689	uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000);
1690
1691	err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_10G_40G_L,
1692					 uix);
1693	if (err) {
1694		ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n",
1695			  err);
1696		return err;
1697	}
1698
1699	/* Program the 25Gb/100Gb conversion ratio */
1700	uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000);
1701
1702	err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_25G_100G_L,
1703					 uix);
1704	if (err) {
1705		ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n",
1706			  err);
1707		return err;
1708	}
1709
1710	return 0;
1711}
1712
1713/**
1714 * ice_phy_cfg_parpcs_e82x - Configure TUs per PAR/PCS clock cycle
1715 * @hw: pointer to the HW struct
1716 * @port: port to configure
1717 *
1718 * Configure the number of TUs for the PAR and PCS clocks used as part of the
1719 * timestamp calibration process. This depends on the link speed, as the PHY
1720 * uses different markers depending on the speed.
1721 *
1722 * 1Gb/10Gb/25Gb:
1723 * - Tx/Rx PAR/PCS markers
1724 *
1725 * 25Gb RS:
1726 * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
1727 *
1728 * 40Gb/50Gb:
1729 * - Tx/Rx PAR/PCS markers
1730 * - Rx Deskew PAR/PCS markers
1731 *
1732 * 50G RS and 100GB RS:
1733 * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
1734 * - Rx Deskew PAR/PCS markers
1735 * - Tx PAR/PCS markers
1736 *
1737 * To calculate the conversion, we use the PHC clock frequency (cycles per
1738 * second), the increment value (TUs per cycle), and the related PHY clock
1739 * frequency to calculate the TUs per unit of the PHY link clock. The
1740 * following table shows how the units convert:
1741 *
1742 * cycles |  TUs  | second
1743 * -------+-------+--------
1744 * second | cycle | cycles
1745 *
1746 * For each conversion register, look up the appropriate frequency from the
1747 * e822 PAR/PCS table and calculate the TUs per unit of that clock. Program
1748 * this to the appropriate register, preparing hardware to perform timestamp
1749 * calibration to calculate the total Tx or Rx offset to adjust the timestamp
1750 * in order to calibrate for the internal PHY delays.
1751 *
1752 * Note that the increment value ranges up to ~34 bits, and the clock
1753 * frequency is ~29 bits, so multiplying them together should fit within the
1754 * 64 bit arithmetic.
1755 */
1756static int ice_phy_cfg_parpcs_e82x(struct ice_hw *hw, u8 port)
1757{
1758	u64 cur_freq, clk_incval, tu_per_sec, phy_tus;
1759	enum ice_ptp_link_spd link_spd;
1760	enum ice_ptp_fec_mode fec_mode;
1761	int err;
1762
1763	err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
1764	if (err)
1765		return err;
1766
1767	cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
1768	clk_incval = ice_ptp_read_src_incval(hw);
1769
1770	/* Calculate TUs per cycle of the PHC clock */
1771	tu_per_sec = cur_freq * clk_incval;
1772
1773	/* For each PHY conversion register, look up the appropriate link
1774	 * speed frequency and determine the TUs per that clock's cycle time.
1775	 * Split this into a high and low value and then program the
1776	 * appropriate register. If that link speed does not use the
1777	 * associated register, write zeros to clear it instead.
1778	 */
1779
1780	/* P_REG_PAR_TX_TUS */
1781	if (e822_vernier[link_spd].tx_par_clk)
1782		phy_tus = div_u64(tu_per_sec,
1783				  e822_vernier[link_spd].tx_par_clk);
1784	else
1785		phy_tus = 0;
1786
1787	err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_TX_TUS_L,
1788					 phy_tus);
1789	if (err)
1790		return err;
1791
1792	/* P_REG_PAR_RX_TUS */
1793	if (e822_vernier[link_spd].rx_par_clk)
1794		phy_tus = div_u64(tu_per_sec,
1795				  e822_vernier[link_spd].rx_par_clk);
1796	else
1797		phy_tus = 0;
1798
1799	err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_RX_TUS_L,
1800					 phy_tus);
1801	if (err)
1802		return err;
1803
1804	/* P_REG_PCS_TX_TUS */
1805	if (e822_vernier[link_spd].tx_pcs_clk)
1806		phy_tus = div_u64(tu_per_sec,
1807				  e822_vernier[link_spd].tx_pcs_clk);
1808	else
1809		phy_tus = 0;
1810
1811	err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_TX_TUS_L,
1812					 phy_tus);
1813	if (err)
1814		return err;
1815
1816	/* P_REG_PCS_RX_TUS */
1817	if (e822_vernier[link_spd].rx_pcs_clk)
1818		phy_tus = div_u64(tu_per_sec,
1819				  e822_vernier[link_spd].rx_pcs_clk);
1820	else
1821		phy_tus = 0;
1822
1823	err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_RX_TUS_L,
1824					 phy_tus);
1825	if (err)
1826		return err;
1827
1828	/* P_REG_DESK_PAR_TX_TUS */
1829	if (e822_vernier[link_spd].tx_desk_rsgb_par)
1830		phy_tus = div_u64(tu_per_sec,
1831				  e822_vernier[link_spd].tx_desk_rsgb_par);
1832	else
1833		phy_tus = 0;
1834
1835	err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_TX_TUS_L,
1836					 phy_tus);
1837	if (err)
1838		return err;
1839
1840	/* P_REG_DESK_PAR_RX_TUS */
1841	if (e822_vernier[link_spd].rx_desk_rsgb_par)
1842		phy_tus = div_u64(tu_per_sec,
1843				  e822_vernier[link_spd].rx_desk_rsgb_par);
1844	else
1845		phy_tus = 0;
1846
1847	err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_RX_TUS_L,
1848					 phy_tus);
1849	if (err)
1850		return err;
1851
1852	/* P_REG_DESK_PCS_TX_TUS */
1853	if (e822_vernier[link_spd].tx_desk_rsgb_pcs)
1854		phy_tus = div_u64(tu_per_sec,
1855				  e822_vernier[link_spd].tx_desk_rsgb_pcs);
1856	else
1857		phy_tus = 0;
1858
1859	err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_TX_TUS_L,
1860					 phy_tus);
1861	if (err)
1862		return err;
1863
1864	/* P_REG_DESK_PCS_RX_TUS */
1865	if (e822_vernier[link_spd].rx_desk_rsgb_pcs)
1866		phy_tus = div_u64(tu_per_sec,
1867				  e822_vernier[link_spd].rx_desk_rsgb_pcs);
1868	else
1869		phy_tus = 0;
1870
1871	return ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_RX_TUS_L,
1872					  phy_tus);
1873}
1874
1875/**
1876 * ice_calc_fixed_tx_offset_e82x - Calculated Fixed Tx offset for a port
1877 * @hw: pointer to the HW struct
1878 * @link_spd: the Link speed to calculate for
1879 *
1880 * Calculate the fixed offset due to known static latency data.
1881 */
1882static u64
1883ice_calc_fixed_tx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
1884{
1885	u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
1886
1887	cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
1888	clk_incval = ice_ptp_read_src_incval(hw);
1889
1890	/* Calculate TUs per second */
1891	tu_per_sec = cur_freq * clk_incval;
1892
1893	/* Calculate number of TUs to add for the fixed Tx latency. Since the
1894	 * latency measurement is in 1/100th of a nanosecond, we need to
1895	 * multiply by tu_per_sec and then divide by 1e11. This calculation
1896	 * overflows 64 bit integer arithmetic, so break it up into two
1897	 * divisions by 1e4 first then by 1e7.
1898	 */
1899	fixed_offset = div_u64(tu_per_sec, 10000);
1900	fixed_offset *= e822_vernier[link_spd].tx_fixed_delay;
1901	fixed_offset = div_u64(fixed_offset, 10000000);
1902
1903	return fixed_offset;
1904}
1905
1906/**
1907 * ice_phy_cfg_tx_offset_e82x - Configure total Tx timestamp offset
1908 * @hw: pointer to the HW struct
1909 * @port: the PHY port to configure
1910 *
1911 * Program the P_REG_TOTAL_TX_OFFSET register with the total number of TUs to
1912 * adjust Tx timestamps by. This is calculated by combining some known static
1913 * latency along with the Vernier offset computations done by hardware.
1914 *
1915 * This function will not return successfully until the Tx offset calculations
1916 * have been completed, which requires waiting until at least one packet has
1917 * been transmitted by the device. It is safe to call this function
1918 * periodically until calibration succeeds, as it will only program the offset
1919 * once.
1920 *
1921 * To avoid overflow, when calculating the offset based on the known static
1922 * latency values, we use measurements in 1/100th of a nanosecond, and divide
1923 * the TUs per second up front. This avoids overflow while allowing
1924 * calculation of the adjustment using integer arithmetic.
1925 *
1926 * Returns zero on success, -EBUSY if the hardware vernier offset
1927 * calibration has not completed, or another error code on failure.
1928 */
1929int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port)
1930{
1931	enum ice_ptp_link_spd link_spd;
1932	enum ice_ptp_fec_mode fec_mode;
1933	u64 total_offset, val;
1934	int err;
1935	u32 reg;
1936
1937	/* Nothing to do if we've already programmed the offset */
1938	err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OR, &reg);
1939	if (err) {
1940		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OR for port %u, err %d\n",
1941			  port, err);
1942		return err;
1943	}
1944
1945	if (reg)
1946		return 0;
1947
1948	err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OV_STATUS, &reg);
1949	if (err) {
1950		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n",
1951			  port, err);
1952		return err;
1953	}
1954
1955	if (!(reg & P_REG_TX_OV_STATUS_OV_M))
1956		return -EBUSY;
1957
1958	err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
1959	if (err)
1960		return err;
1961
1962	total_offset = ice_calc_fixed_tx_offset_e82x(hw, link_spd);
1963
1964	/* Read the first Vernier offset from the PHY register and add it to
1965	 * the total offset.
1966	 */
1967	if (link_spd == ICE_PTP_LNK_SPD_1G ||
1968	    link_spd == ICE_PTP_LNK_SPD_10G ||
1969	    link_spd == ICE_PTP_LNK_SPD_25G ||
1970	    link_spd == ICE_PTP_LNK_SPD_25G_RS ||
1971	    link_spd == ICE_PTP_LNK_SPD_40G ||
1972	    link_spd == ICE_PTP_LNK_SPD_50G) {
1973		err = ice_read_64b_phy_reg_e82x(hw, port,
1974						P_REG_PAR_PCS_TX_OFFSET_L,
1975						&val);
1976		if (err)
1977			return err;
1978
1979		total_offset += val;
1980	}
1981
1982	/* For Tx, we only need to use the second Vernier offset for
1983	 * multi-lane link speeds with RS-FEC. The lanes will always be
1984	 * aligned.
1985	 */
1986	if (link_spd == ICE_PTP_LNK_SPD_50G_RS ||
1987	    link_spd == ICE_PTP_LNK_SPD_100G_RS) {
1988		err = ice_read_64b_phy_reg_e82x(hw, port,
1989						P_REG_PAR_TX_TIME_L,
1990						&val);
1991		if (err)
1992			return err;
1993
1994		total_offset += val;
1995	}
1996
1997	/* Now that the total offset has been calculated, program it to the
1998	 * PHY and indicate that the Tx offset is ready. After this,
1999	 * timestamps will be enabled.
2000	 */
2001	err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_TX_OFFSET_L,
2002					 total_offset);
2003	if (err)
2004		return err;
2005
2006	err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 1);
2007	if (err)
2008		return err;
2009
2010	dev_info(ice_hw_to_dev(hw), "Port=%d Tx vernier offset calibration complete\n",
2011		 port);
2012
2013	return 0;
2014}
2015
2016/**
2017 * ice_phy_calc_pmd_adj_e82x - Calculate PMD adjustment for Rx
2018 * @hw: pointer to the HW struct
2019 * @port: the PHY port to adjust for
2020 * @link_spd: the current link speed of the PHY
2021 * @fec_mode: the current FEC mode of the PHY
2022 * @pmd_adj: on return, the amount to adjust the Rx total offset by
2023 *
2024 * Calculates the adjustment to Rx timestamps due to PMD alignment in the PHY.
2025 * This varies by link speed and FEC mode. The value calculated accounts for
2026 * various delays caused when receiving a packet.
2027 */
2028static int
2029ice_phy_calc_pmd_adj_e82x(struct ice_hw *hw, u8 port,
2030			  enum ice_ptp_link_spd link_spd,
2031			  enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj)
2032{
2033	u64 cur_freq, clk_incval, tu_per_sec, mult, adj;
2034	u8 pmd_align;
2035	u32 val;
2036	int err;
2037
2038	err = ice_read_phy_reg_e82x(hw, port, P_REG_PMD_ALIGNMENT, &val);
2039	if (err) {
2040		ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n",
2041			  err);
2042		return err;
2043	}
2044
2045	pmd_align = (u8)val;
2046
2047	cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
2048	clk_incval = ice_ptp_read_src_incval(hw);
2049
2050	/* Calculate TUs per second */
2051	tu_per_sec = cur_freq * clk_incval;
2052
2053	/* The PMD alignment adjustment measurement depends on the link speed,
2054	 * and whether FEC is enabled. For each link speed, the alignment
2055	 * adjustment is calculated by dividing a value by the length of
2056	 * a Time Unit in nanoseconds.
2057	 *
2058	 * 1G: align == 4 ? 10 * 0.8 : (align + 6 % 10) * 0.8
2059	 * 10G: align == 65 ? 0 : (align * 0.1 * 32/33)
2060	 * 10G w/FEC: align * 0.1 * 32/33
2061	 * 25G: align == 65 ? 0 : (align * 0.4 * 32/33)
2062	 * 25G w/FEC: align * 0.4 * 32/33
2063	 * 40G: align == 65 ? 0 : (align * 0.1 * 32/33)
2064	 * 40G w/FEC: align * 0.1 * 32/33
2065	 * 50G: align == 65 ? 0 : (align * 0.4 * 32/33)
2066	 * 50G w/FEC: align * 0.8 * 32/33
2067	 *
2068	 * For RS-FEC, if align is < 17 then we must also add 1.6 * 32/33.
2069	 *
2070	 * To allow for calculating this value using integer arithmetic, we
2071	 * instead start with the number of TUs per second, (inverse of the
2072	 * length of a Time Unit in nanoseconds), multiply by a value based
2073	 * on the PMD alignment register, and then divide by the right value
2074	 * calculated based on the table above. To avoid integer overflow this
2075	 * division is broken up into a step of dividing by 125 first.
2076	 */
2077	if (link_spd == ICE_PTP_LNK_SPD_1G) {
2078		if (pmd_align == 4)
2079			mult = 10;
2080		else
2081			mult = (pmd_align + 6) % 10;
2082	} else if (link_spd == ICE_PTP_LNK_SPD_10G ||
2083		   link_spd == ICE_PTP_LNK_SPD_25G ||
2084		   link_spd == ICE_PTP_LNK_SPD_40G ||
2085		   link_spd == ICE_PTP_LNK_SPD_50G) {
2086		/* If Clause 74 FEC, always calculate PMD adjust */
2087		if (pmd_align != 65 || fec_mode == ICE_PTP_FEC_MODE_CLAUSE74)
2088			mult = pmd_align;
2089		else
2090			mult = 0;
2091	} else if (link_spd == ICE_PTP_LNK_SPD_25G_RS ||
2092		   link_spd == ICE_PTP_LNK_SPD_50G_RS ||
2093		   link_spd == ICE_PTP_LNK_SPD_100G_RS) {
2094		if (pmd_align < 17)
2095			mult = pmd_align + 40;
2096		else
2097			mult = pmd_align;
2098	} else {
2099		ice_debug(hw, ICE_DBG_PTP, "Unknown link speed %d, skipping PMD adjustment\n",
2100			  link_spd);
2101		mult = 0;
2102	}
2103
2104	/* In some cases, there's no need to adjust for the PMD alignment */
2105	if (!mult) {
2106		*pmd_adj = 0;
2107		return 0;
2108	}
2109
2110	/* Calculate the adjustment by multiplying TUs per second by the
2111	 * appropriate multiplier and divisor. To avoid overflow, we first
2112	 * divide by 125, and then handle remaining divisor based on the link
2113	 * speed pmd_adj_divisor value.
2114	 */
2115	adj = div_u64(tu_per_sec, 125);
2116	adj *= mult;
2117	adj = div_u64(adj, e822_vernier[link_spd].pmd_adj_divisor);
2118
2119	/* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx
2120	 * cycle count is necessary.
2121	 */
2122	if (link_spd == ICE_PTP_LNK_SPD_25G_RS) {
2123		u64 cycle_adj;
2124		u8 rx_cycle;
2125
2126		err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_40_TO_160_CNT,
2127					    &val);
2128		if (err) {
2129			ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n",
2130				  err);
2131			return err;
2132		}
2133
2134		rx_cycle = val & P_REG_RX_40_TO_160_CNT_RXCYC_M;
2135		if (rx_cycle) {
2136			mult = (4 - rx_cycle) * 40;
2137
2138			cycle_adj = div_u64(tu_per_sec, 125);
2139			cycle_adj *= mult;
2140			cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
2141
2142			adj += cycle_adj;
2143		}
2144	} else if (link_spd == ICE_PTP_LNK_SPD_50G_RS) {
2145		u64 cycle_adj;
2146		u8 rx_cycle;
2147
2148		err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_80_TO_160_CNT,
2149					    &val);
2150		if (err) {
2151			ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n",
2152				  err);
2153			return err;
2154		}
2155
2156		rx_cycle = val & P_REG_RX_80_TO_160_CNT_RXCYC_M;
2157		if (rx_cycle) {
2158			mult = rx_cycle * 40;
2159
2160			cycle_adj = div_u64(tu_per_sec, 125);
2161			cycle_adj *= mult;
2162			cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
2163
2164			adj += cycle_adj;
2165		}
2166	}
2167
2168	/* Return the calculated adjustment */
2169	*pmd_adj = adj;
2170
2171	return 0;
2172}
2173
2174/**
2175 * ice_calc_fixed_rx_offset_e82x - Calculated the fixed Rx offset for a port
2176 * @hw: pointer to HW struct
2177 * @link_spd: The Link speed to calculate for
2178 *
2179 * Determine the fixed Rx latency for a given link speed.
2180 */
2181static u64
2182ice_calc_fixed_rx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
2183{
2184	u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
2185
2186	cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
2187	clk_incval = ice_ptp_read_src_incval(hw);
2188
2189	/* Calculate TUs per second */
2190	tu_per_sec = cur_freq * clk_incval;
2191
2192	/* Calculate number of TUs to add for the fixed Rx latency. Since the
2193	 * latency measurement is in 1/100th of a nanosecond, we need to
2194	 * multiply by tu_per_sec and then divide by 1e11. This calculation
2195	 * overflows 64 bit integer arithmetic, so break it up into two
2196	 * divisions by 1e4 first then by 1e7.
2197	 */
2198	fixed_offset = div_u64(tu_per_sec, 10000);
2199	fixed_offset *= e822_vernier[link_spd].rx_fixed_delay;
2200	fixed_offset = div_u64(fixed_offset, 10000000);
2201
2202	return fixed_offset;
2203}
2204
2205/**
2206 * ice_phy_cfg_rx_offset_e82x - Configure total Rx timestamp offset
2207 * @hw: pointer to the HW struct
2208 * @port: the PHY port to configure
2209 *
2210 * Program the P_REG_TOTAL_RX_OFFSET register with the number of Time Units to
2211 * adjust Rx timestamps by. This combines calculations from the Vernier offset
2212 * measurements taken in hardware with some data about known fixed delay as
2213 * well as adjusting for multi-lane alignment delay.
2214 *
2215 * This function will not return successfully until the Rx offset calculations
2216 * have been completed, which requires waiting until at least one packet has
2217 * been received by the device. It is safe to call this function periodically
2218 * until calibration succeeds, as it will only program the offset once.
2219 *
2220 * This function must be called only after the offset registers are valid,
2221 * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
2222 * has measured the offset.
2223 *
2224 * To avoid overflow, when calculating the offset based on the known static
2225 * latency values, we use measurements in 1/100th of a nanosecond, and divide
2226 * the TUs per second up front. This avoids overflow while allowing
2227 * calculation of the adjustment using integer arithmetic.
2228 *
2229 * Returns zero on success, -EBUSY if the hardware vernier offset
2230 * calibration has not completed, or another error code on failure.
2231 */
2232int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port)
2233{
2234	enum ice_ptp_link_spd link_spd;
2235	enum ice_ptp_fec_mode fec_mode;
2236	u64 total_offset, pmd, val;
2237	int err;
2238	u32 reg;
2239
2240	/* Nothing to do if we've already programmed the offset */
2241	err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OR, &reg);
2242	if (err) {
2243		ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OR for port %u, err %d\n",
2244			  port, err);
2245		return err;
2246	}
2247
2248	if (reg)
2249		return 0;
2250
2251	err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OV_STATUS, &reg);
2252	if (err) {
2253		ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n",
2254			  port, err);
2255		return err;
2256	}
2257
2258	if (!(reg & P_REG_RX_OV_STATUS_OV_M))
2259		return -EBUSY;
2260
2261	err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
2262	if (err)
2263		return err;
2264
2265	total_offset = ice_calc_fixed_rx_offset_e82x(hw, link_spd);
2266
2267	/* Read the first Vernier offset from the PHY register and add it to
2268	 * the total offset.
2269	 */
2270	err = ice_read_64b_phy_reg_e82x(hw, port,
2271					P_REG_PAR_PCS_RX_OFFSET_L,
2272					&val);
2273	if (err)
2274		return err;
2275
2276	total_offset += val;
2277
2278	/* For Rx, all multi-lane link speeds include a second Vernier
2279	 * calibration, because the lanes might not be aligned.
2280	 */
2281	if (link_spd == ICE_PTP_LNK_SPD_40G ||
2282	    link_spd == ICE_PTP_LNK_SPD_50G ||
2283	    link_spd == ICE_PTP_LNK_SPD_50G_RS ||
2284	    link_spd == ICE_PTP_LNK_SPD_100G_RS) {
2285		err = ice_read_64b_phy_reg_e82x(hw, port,
2286						P_REG_PAR_RX_TIME_L,
2287						&val);
2288		if (err)
2289			return err;
2290
2291		total_offset += val;
2292	}
2293
2294	/* In addition, Rx must account for the PMD alignment */
2295	err = ice_phy_calc_pmd_adj_e82x(hw, port, link_spd, fec_mode, &pmd);
2296	if (err)
2297		return err;
2298
2299	/* For RS-FEC, this adjustment adds delay, but for other modes, it
2300	 * subtracts delay.
2301	 */
2302	if (fec_mode == ICE_PTP_FEC_MODE_RS_FEC)
2303		total_offset += pmd;
2304	else
2305		total_offset -= pmd;
2306
2307	/* Now that the total offset has been calculated, program it to the
2308	 * PHY and indicate that the Rx offset is ready. After this,
2309	 * timestamps will be enabled.
2310	 */
2311	err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_RX_OFFSET_L,
2312					 total_offset);
2313	if (err)
2314		return err;
2315
2316	err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 1);
2317	if (err)
2318		return err;
2319
2320	dev_info(ice_hw_to_dev(hw), "Port=%d Rx vernier offset calibration complete\n",
2321		 port);
2322
2323	return 0;
2324}
2325
2326/**
2327 * ice_read_phy_and_phc_time_e82x - Simultaneously capture PHC and PHY time
2328 * @hw: pointer to the HW struct
2329 * @port: the PHY port to read
2330 * @phy_time: on return, the 64bit PHY timer value
2331 * @phc_time: on return, the lower 64bits of PHC time
2332 *
2333 * Issue a ICE_PTP_READ_TIME timer command to simultaneously capture the PHY
2334 * and PHC timer values.
2335 */
2336static int
2337ice_read_phy_and_phc_time_e82x(struct ice_hw *hw, u8 port, u64 *phy_time,
2338			       u64 *phc_time)
2339{
2340	u64 tx_time, rx_time;
2341	u32 zo, lo;
2342	u8 tmr_idx;
2343	int err;
2344
2345	tmr_idx = ice_get_ptp_src_clock_index(hw);
2346
2347	/* Prepare the PHC timer for a ICE_PTP_READ_TIME capture command */
2348	ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
2349
2350	/* Prepare the PHY timer for a ICE_PTP_READ_TIME capture command */
2351	err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_READ_TIME);
2352	if (err)
2353		return err;
2354
2355	/* Issue the sync to start the ICE_PTP_READ_TIME capture */
2356	ice_ptp_exec_tmr_cmd(hw);
2357
2358	/* Read the captured PHC time from the shadow time registers */
2359	zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
2360	lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
2361	*phc_time = (u64)lo << 32 | zo;
2362
2363	/* Read the captured PHY time from the PHY shadow registers */
2364	err = ice_ptp_read_port_capture(hw, port, &tx_time, &rx_time);
2365	if (err)
2366		return err;
2367
2368	/* If the PHY Tx and Rx timers don't match, log a warning message.
2369	 * Note that this should not happen in normal circumstances since the
2370	 * driver always programs them together.
2371	 */
2372	if (tx_time != rx_time)
2373		dev_warn(ice_hw_to_dev(hw),
2374			 "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n",
2375			 port, (unsigned long long)tx_time,
2376			 (unsigned long long)rx_time);
2377
2378	*phy_time = tx_time;
2379
2380	return 0;
2381}
2382
2383/**
2384 * ice_sync_phy_timer_e82x - Synchronize the PHY timer with PHC timer
2385 * @hw: pointer to the HW struct
2386 * @port: the PHY port to synchronize
2387 *
2388 * Perform an adjustment to ensure that the PHY and PHC timers are in sync.
2389 * This is done by issuing a ICE_PTP_READ_TIME command which triggers a
2390 * simultaneous read of the PHY timer and PHC timer. Then we use the
2391 * difference to calculate an appropriate 2s complement addition to add
2392 * to the PHY timer in order to ensure it reads the same value as the
2393 * primary PHC timer.
2394 */
2395static int ice_sync_phy_timer_e82x(struct ice_hw *hw, u8 port)
2396{
2397	u64 phc_time, phy_time, difference;
2398	int err;
2399
2400	if (!ice_ptp_lock(hw)) {
2401		ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n");
2402		return -EBUSY;
2403	}
2404
2405	err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time);
2406	if (err)
2407		goto err_unlock;
2408
2409	/* Calculate the amount required to add to the port time in order for
2410	 * it to match the PHC time.
2411	 *
2412	 * Note that the port adjustment is done using 2s complement
2413	 * arithmetic. This is convenient since it means that we can simply
2414	 * calculate the difference between the PHC time and the port time,
2415	 * and it will be interpreted correctly.
2416	 */
2417	difference = phc_time - phy_time;
2418
2419	err = ice_ptp_prep_port_adj_e82x(hw, port, (s64)difference);
2420	if (err)
2421		goto err_unlock;
2422
2423	err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_ADJ_TIME);
2424	if (err)
2425		goto err_unlock;
2426
2427	/* Do not perform any action on the main timer */
2428	ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2429
2430	/* Issue the sync to activate the time adjustment */
2431	ice_ptp_exec_tmr_cmd(hw);
2432
2433	/* Re-capture the timer values to flush the command registers and
2434	 * verify that the time was properly adjusted.
2435	 */
2436	err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time);
2437	if (err)
2438		goto err_unlock;
2439
2440	dev_info(ice_hw_to_dev(hw),
2441		 "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n",
2442		 port, (unsigned long long)phy_time,
2443		 (unsigned long long)phc_time);
2444
2445	ice_ptp_unlock(hw);
2446
2447	return 0;
2448
2449err_unlock:
2450	ice_ptp_unlock(hw);
2451	return err;
2452}
2453
2454/**
2455 * ice_stop_phy_timer_e82x - Stop the PHY clock timer
2456 * @hw: pointer to the HW struct
2457 * @port: the PHY port to stop
2458 * @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS
2459 *
2460 * Stop the clock of a PHY port. This must be done as part of the flow to
2461 * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
2462 * initialized or when link speed changes.
2463 */
2464int
2465ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset)
2466{
2467	int err;
2468	u32 val;
2469
2470	err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 0);
2471	if (err)
2472		return err;
2473
2474	err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 0);
2475	if (err)
2476		return err;
2477
2478	err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val);
2479	if (err)
2480		return err;
2481
2482	val &= ~P_REG_PS_START_M;
2483	err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2484	if (err)
2485		return err;
2486
2487	val &= ~P_REG_PS_ENA_CLK_M;
2488	err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2489	if (err)
2490		return err;
2491
2492	if (soft_reset) {
2493		val |= P_REG_PS_SFT_RESET_M;
2494		err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2495		if (err)
2496			return err;
2497	}
2498
2499	ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port);
2500
2501	return 0;
2502}
2503
2504/**
2505 * ice_start_phy_timer_e82x - Start the PHY clock timer
2506 * @hw: pointer to the HW struct
2507 * @port: the PHY port to start
2508 *
2509 * Start the clock of a PHY port. This must be done as part of the flow to
2510 * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
2511 * initialized or when link speed changes.
2512 *
2513 * Hardware will take Vernier measurements on Tx or Rx of packets.
2514 */
2515int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port)
2516{
2517	u32 lo, hi, val;
2518	u64 incval;
2519	u8 tmr_idx;
2520	int err;
2521
2522	tmr_idx = ice_get_ptp_src_clock_index(hw);
2523
2524	err = ice_stop_phy_timer_e82x(hw, port, false);
2525	if (err)
2526		return err;
2527
2528	ice_phy_cfg_lane_e82x(hw, port);
2529
2530	err = ice_phy_cfg_uix_e82x(hw, port);
2531	if (err)
2532		return err;
2533
2534	err = ice_phy_cfg_parpcs_e82x(hw, port);
2535	if (err)
2536		return err;
2537
2538	lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
2539	hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
2540	incval = (u64)hi << 32 | lo;
2541
2542	err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L, incval);
2543	if (err)
2544		return err;
2545
2546	err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL);
2547	if (err)
2548		return err;
2549
2550	/* Do not perform any action on the main timer */
2551	ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2552
2553	ice_ptp_exec_tmr_cmd(hw);
2554
2555	err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val);
2556	if (err)
2557		return err;
2558
2559	val |= P_REG_PS_SFT_RESET_M;
2560	err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2561	if (err)
2562		return err;
2563
2564	val |= P_REG_PS_START_M;
2565	err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2566	if (err)
2567		return err;
2568
2569	val &= ~P_REG_PS_SFT_RESET_M;
2570	err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2571	if (err)
2572		return err;
2573
2574	err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL);
2575	if (err)
2576		return err;
2577
2578	ice_ptp_exec_tmr_cmd(hw);
2579
2580	val |= P_REG_PS_ENA_CLK_M;
2581	err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2582	if (err)
2583		return err;
2584
2585	val |= P_REG_PS_LOAD_OFFSET_M;
2586	err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
2587	if (err)
2588		return err;
2589
2590	ice_ptp_exec_tmr_cmd(hw);
2591
2592	err = ice_sync_phy_timer_e82x(hw, port);
2593	if (err)
2594		return err;
2595
2596	ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port);
2597
2598	return 0;
2599}
2600
2601/**
2602 * ice_get_phy_tx_tstamp_ready_e82x - Read Tx memory status register
2603 * @hw: pointer to the HW struct
2604 * @quad: the timestamp quad to read from
2605 * @tstamp_ready: contents of the Tx memory status register
2606 *
2607 * Read the Q_REG_TX_MEMORY_STATUS register indicating which timestamps in
2608 * the PHY are ready. A set bit means the corresponding timestamp is valid and
2609 * ready to be captured from the PHY timestamp block.
2610 */
2611static int
2612ice_get_phy_tx_tstamp_ready_e82x(struct ice_hw *hw, u8 quad, u64 *tstamp_ready)
2613{
2614	u32 hi, lo;
2615	int err;
2616
2617	err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi);
2618	if (err) {
2619		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_U for quad %u, err %d\n",
2620			  quad, err);
2621		return err;
2622	}
2623
2624	err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo);
2625	if (err) {
2626		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_L for quad %u, err %d\n",
2627			  quad, err);
2628		return err;
2629	}
2630
2631	*tstamp_ready = (u64)hi << 32 | (u64)lo;
2632
2633	return 0;
2634}
2635
2636/* E810 functions
2637 *
2638 * The following functions operate on the E810 series devices which use
2639 * a separate external PHY.
2640 */
2641
2642/**
2643 * ice_read_phy_reg_e810 - Read register from external PHY on E810
2644 * @hw: pointer to the HW struct
2645 * @addr: the address to read from
2646 * @val: On return, the value read from the PHY
2647 *
2648 * Read a register from the external PHY on the E810 device.
2649 */
2650static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
2651{
2652	struct ice_sbq_msg_input msg = {0};
2653	int err;
2654
2655	msg.msg_addr_low = lower_16_bits(addr);
2656	msg.msg_addr_high = upper_16_bits(addr);
2657	msg.opcode = ice_sbq_msg_rd;
2658	msg.dest_dev = rmn_0;
2659
2660	err = ice_sbq_rw_reg(hw, &msg);
2661	if (err) {
2662		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
2663			  err);
2664		return err;
2665	}
2666
2667	*val = msg.data;
2668
2669	return 0;
2670}
2671
2672/**
2673 * ice_write_phy_reg_e810 - Write register on external PHY on E810
2674 * @hw: pointer to the HW struct
2675 * @addr: the address to writem to
2676 * @val: the value to write to the PHY
2677 *
2678 * Write a value to a register of the external PHY on the E810 device.
2679 */
2680static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
2681{
2682	struct ice_sbq_msg_input msg = {0};
2683	int err;
2684
2685	msg.msg_addr_low = lower_16_bits(addr);
2686	msg.msg_addr_high = upper_16_bits(addr);
2687	msg.opcode = ice_sbq_msg_wr;
2688	msg.dest_dev = rmn_0;
2689	msg.data = val;
2690
2691	err = ice_sbq_rw_reg(hw, &msg);
2692	if (err) {
2693		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
2694			  err);
2695		return err;
2696	}
2697
2698	return 0;
2699}
2700
2701/**
2702 * ice_read_phy_tstamp_ll_e810 - Read a PHY timestamp registers through the FW
2703 * @hw: pointer to the HW struct
2704 * @idx: the timestamp index to read
2705 * @hi: 8 bit timestamp high value
2706 * @lo: 32 bit timestamp low value
2707 *
2708 * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
2709 * timestamp block of the external PHY on the E810 device using the low latency
2710 * timestamp read.
2711 */
2712static int
2713ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
2714{
2715	u32 val;
2716	u8 i;
2717
2718	/* Write TS index to read to the PF register so the FW can read it */
2719	val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS;
2720	wr32(hw, PF_SB_ATQBAL, val);
2721
2722	/* Read the register repeatedly until the FW provides us the TS */
2723	for (i = TS_LL_READ_RETRIES; i > 0; i--) {
2724		val = rd32(hw, PF_SB_ATQBAL);
2725
2726		/* When the bit is cleared, the TS is ready in the register */
2727		if (!(FIELD_GET(TS_LL_READ_TS, val))) {
2728			/* High 8 bit value of the TS is on the bits 16:23 */
2729			*hi = FIELD_GET(TS_LL_READ_TS_HIGH, val);
2730
2731			/* Read the low 32 bit value and set the TS valid bit */
2732			*lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
2733			return 0;
2734		}
2735
2736		udelay(10);
2737	}
2738
2739	/* FW failed to provide the TS in time */
2740	ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
2741	return -EINVAL;
2742}
2743
2744/**
2745 * ice_read_phy_tstamp_sbq_e810 - Read a PHY timestamp registers through the sbq
2746 * @hw: pointer to the HW struct
2747 * @lport: the lport to read from
2748 * @idx: the timestamp index to read
2749 * @hi: 8 bit timestamp high value
2750 * @lo: 32 bit timestamp low value
2751 *
2752 * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
2753 * timestamp block of the external PHY on the E810 device using sideband queue.
2754 */
2755static int
2756ice_read_phy_tstamp_sbq_e810(struct ice_hw *hw, u8 lport, u8 idx, u8 *hi,
2757			     u32 *lo)
2758{
2759	u32 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
2760	u32 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
2761	u32 lo_val, hi_val;
2762	int err;
2763
2764	err = ice_read_phy_reg_e810(hw, lo_addr, &lo_val);
2765	if (err) {
2766		ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
2767			  err);
2768		return err;
2769	}
2770
2771	err = ice_read_phy_reg_e810(hw, hi_addr, &hi_val);
2772	if (err) {
2773		ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
2774			  err);
2775		return err;
2776	}
2777
2778	*lo = lo_val;
2779	*hi = (u8)hi_val;
2780
2781	return 0;
2782}
2783
2784/**
2785 * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
2786 * @hw: pointer to the HW struct
2787 * @lport: the lport to read from
2788 * @idx: the timestamp index to read
2789 * @tstamp: on return, the 40bit timestamp value
2790 *
2791 * Read a 40bit timestamp value out of the timestamp block of the external PHY
2792 * on the E810 device.
2793 */
2794static int
2795ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
2796{
2797	u32 lo = 0;
2798	u8 hi = 0;
2799	int err;
2800
2801	if (hw->dev_caps.ts_dev_info.ts_ll_read)
2802		err = ice_read_phy_tstamp_ll_e810(hw, idx, &hi, &lo);
2803	else
2804		err = ice_read_phy_tstamp_sbq_e810(hw, lport, idx, &hi, &lo);
2805
2806	if (err)
2807		return err;
2808
2809	/* For E810 devices, the timestamp is reported with the lower 32 bits
2810	 * in the low register, and the upper 8 bits in the high register.
2811	 */
2812	*tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M);
2813
2814	return 0;
2815}
2816
2817/**
2818 * ice_clear_phy_tstamp_e810 - Clear a timestamp from the external PHY
2819 * @hw: pointer to the HW struct
2820 * @lport: the lport to read from
2821 * @idx: the timestamp index to reset
2822 *
2823 * Read the timestamp and then forcibly overwrite its value to clear the valid
2824 * bit from the timestamp block of the external PHY on the E810 device.
2825 *
2826 * This function should only be called on an idx whose bit is set according to
2827 * ice_get_phy_tx_tstamp_ready().
2828 */
2829static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
2830{
2831	u32 lo_addr, hi_addr;
2832	u64 unused_tstamp;
2833	int err;
2834
2835	err = ice_read_phy_tstamp_e810(hw, lport, idx, &unused_tstamp);
2836	if (err) {
2837		ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for lport %u, idx %u, err %d\n",
2838			  lport, idx, err);
2839		return err;
2840	}
2841
2842	lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
2843	hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
2844
2845	err = ice_write_phy_reg_e810(hw, lo_addr, 0);
2846	if (err) {
2847		ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register for lport %u, idx %u, err %d\n",
2848			  lport, idx, err);
2849		return err;
2850	}
2851
2852	err = ice_write_phy_reg_e810(hw, hi_addr, 0);
2853	if (err) {
2854		ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register for lport %u, idx %u, err %d\n",
2855			  lport, idx, err);
2856		return err;
2857	}
2858
2859	return 0;
2860}
2861
2862/**
2863 * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY
2864 * @hw: pointer to HW struct
2865 *
2866 * Enable the timesync PTP functionality for the external PHY connected to
2867 * this function.
2868 */
2869int ice_ptp_init_phy_e810(struct ice_hw *hw)
2870{
2871	u8 tmr_idx;
2872	int err;
2873
2874	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2875	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
2876				     GLTSYN_ENA_TSYN_ENA_M);
2877	if (err)
2878		ice_debug(hw, ICE_DBG_PTP, "PTP failed in ena_phy_time_syn %d\n",
2879			  err);
2880
2881	return err;
2882}
2883
2884/**
2885 * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
2886 * @hw: pointer to HW struct
2887 *
2888 * Perform E810-specific PTP hardware clock initialization steps.
2889 */
2890static int ice_ptp_init_phc_e810(struct ice_hw *hw)
2891{
2892	/* Ensure synchronization delay is zero */
2893	wr32(hw, GLTSYN_SYNC_DLAY, 0);
2894
2895	/* Initialize the PHY */
2896	return ice_ptp_init_phy_e810(hw);
2897}
2898
2899/**
2900 * ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time
2901 * @hw: Board private structure
2902 * @time: Time to initialize the PHY port clock to
2903 *
2904 * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
2905 * initial clock time. The time will not actually be programmed until the
2906 * driver issues an ICE_PTP_INIT_TIME command.
2907 *
2908 * The time value is the upper 32 bits of the PHY timer, usually in units of
2909 * nominal nanoseconds.
2910 */
2911static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
2912{
2913	u8 tmr_idx;
2914	int err;
2915
2916	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2917	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
2918	if (err) {
2919		ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, err %d\n",
2920			  err);
2921		return err;
2922	}
2923
2924	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time);
2925	if (err) {
2926		ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, err %d\n",
2927			  err);
2928		return err;
2929	}
2930
2931	return 0;
2932}
2933
2934/**
2935 * ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment
2936 * @hw: pointer to HW struct
2937 * @adj: adjustment value to program
2938 *
2939 * Prepare the PHY port for an atomic adjustment by programming the PHY
2940 * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment
2941 * is completed by issuing an ICE_PTP_ADJ_TIME sync command.
2942 *
2943 * The adjustment value only contains the portion used for the upper 32bits of
2944 * the PHY timer, usually in units of nominal nanoseconds. Negative
2945 * adjustments are supported using 2s complement arithmetic.
2946 */
2947static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
2948{
2949	u8 tmr_idx;
2950	int err;
2951
2952	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2953
2954	/* Adjustments are represented as signed 2's complement values in
2955	 * nanoseconds. Sub-nanosecond adjustment is not supported.
2956	 */
2957	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0);
2958	if (err) {
2959		ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, err %d\n",
2960			  err);
2961		return err;
2962	}
2963
2964	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj);
2965	if (err) {
2966		ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, err %d\n",
2967			  err);
2968		return err;
2969	}
2970
2971	return 0;
2972}
2973
2974/**
2975 * ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change
2976 * @hw: pointer to HW struct
2977 * @incval: The new 40bit increment value to prepare
2978 *
2979 * Prepare the PHY port for a new increment value by programming the PHY
2980 * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is
2981 * completed by issuing an ICE_PTP_INIT_INCVAL command.
2982 */
2983static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
2984{
2985	u32 high, low;
2986	u8 tmr_idx;
2987	int err;
2988
2989	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2990	low = lower_32_bits(incval);
2991	high = upper_32_bits(incval);
2992
2993	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low);
2994	if (err) {
2995		ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, err %d\n",
2996			  err);
2997		return err;
2998	}
2999
3000	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high);
3001	if (err) {
3002		ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, err %d\n",
3003			  err);
3004		return err;
3005	}
3006
3007	return 0;
3008}
3009
3010/**
3011 * ice_ptp_port_cmd_e810 - Prepare all external PHYs for a timer command
3012 * @hw: pointer to HW struct
3013 * @cmd: Command to be sent to the port
3014 *
3015 * Prepare the external PHYs connected to this device for a timer sync
3016 * command.
3017 */
3018static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
3019{
3020	u32 cmd_val, val;
3021	int err;
3022
3023	switch (cmd) {
3024	case ICE_PTP_INIT_TIME:
3025		cmd_val = GLTSYN_CMD_INIT_TIME;
3026		break;
3027	case ICE_PTP_INIT_INCVAL:
3028		cmd_val = GLTSYN_CMD_INIT_INCVAL;
3029		break;
3030	case ICE_PTP_ADJ_TIME:
3031		cmd_val = GLTSYN_CMD_ADJ_TIME;
3032		break;
3033	case ICE_PTP_READ_TIME:
3034		cmd_val = GLTSYN_CMD_READ_TIME;
3035		break;
3036	case ICE_PTP_ADJ_TIME_AT_TIME:
3037		cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
3038		break;
3039	case ICE_PTP_NOP:
3040		return 0;
3041	}
3042
3043	/* Read, modify, write */
3044	err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
3045	if (err) {
3046		ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err);
3047		return err;
3048	}
3049
3050	/* Modify necessary bits only and perform write */
3051	val &= ~TS_CMD_MASK_E810;
3052	val |= cmd_val;
3053
3054	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
3055	if (err) {
3056		ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err);
3057		return err;
3058	}
3059
3060	return 0;
3061}
3062
3063/**
3064 * ice_get_phy_tx_tstamp_ready_e810 - Read Tx memory status register
3065 * @hw: pointer to the HW struct
3066 * @port: the PHY port to read
3067 * @tstamp_ready: contents of the Tx memory status register
3068 *
3069 * E810 devices do not use a Tx memory status register. Instead simply
3070 * indicate that all timestamps are currently ready.
3071 */
3072static int
3073ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
3074{
3075	*tstamp_ready = 0xFFFFFFFFFFFFFFFF;
3076	return 0;
3077}
3078
3079/* E810T SMA functions
3080 *
3081 * The following functions operate specifically on E810T hardware and are used
3082 * to access the extended GPIOs available.
3083 */
3084
3085/**
3086 * ice_get_pca9575_handle
3087 * @hw: pointer to the hw struct
3088 * @pca9575_handle: GPIO controller's handle
3089 *
3090 * Find and return the GPIO controller's handle in the netlist.
3091 * When found - the value will be cached in the hw structure and following calls
3092 * will return cached value
3093 */
3094static int
3095ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
3096{
3097	struct ice_aqc_get_link_topo *cmd;
3098	struct ice_aq_desc desc;
3099	int status;
3100	u8 idx;
3101
3102	/* If handle was read previously return cached value */
3103	if (hw->io_expander_handle) {
3104		*pca9575_handle = hw->io_expander_handle;
3105		return 0;
3106	}
3107
3108	/* If handle was not detected read it from the netlist */
3109	cmd = &desc.params.get_link_topo;
3110	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
3111
3112	/* Set node type to GPIO controller */
3113	cmd->addr.topo_params.node_type_ctx =
3114		(ICE_AQC_LINK_TOPO_NODE_TYPE_M &
3115		 ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
3116
3117#define SW_PCA9575_SFP_TOPO_IDX		2
3118#define SW_PCA9575_QSFP_TOPO_IDX	1
3119
3120	/* Check if the SW IO expander controlling SMA exists in the netlist. */
3121	if (hw->device_id == ICE_DEV_ID_E810C_SFP)
3122		idx = SW_PCA9575_SFP_TOPO_IDX;
3123	else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
3124		idx = SW_PCA9575_QSFP_TOPO_IDX;
3125	else
3126		return -EOPNOTSUPP;
3127
3128	cmd->addr.topo_params.index = idx;
3129
3130	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3131	if (status)
3132		return -EOPNOTSUPP;
3133
3134	/* Verify if we found the right IO expander type */
3135	if (desc.params.get_link_topo.node_part_num !=
3136		ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
3137		return -EOPNOTSUPP;
3138
3139	/* If present save the handle and return it */
3140	hw->io_expander_handle =
3141		le16_to_cpu(desc.params.get_link_topo.addr.handle);
3142	*pca9575_handle = hw->io_expander_handle;
3143
3144	return 0;
3145}
3146
3147/**
3148 * ice_read_sma_ctrl_e810t
3149 * @hw: pointer to the hw struct
3150 * @data: pointer to data to be read from the GPIO controller
3151 *
3152 * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
3153 * PCA9575 expander, so only bits 3-7 in data are valid.
3154 */
3155int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
3156{
3157	int status;
3158	u16 handle;
3159	u8 i;
3160
3161	status = ice_get_pca9575_handle(hw, &handle);
3162	if (status)
3163		return status;
3164
3165	*data = 0;
3166
3167	for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
3168		bool pin;
3169
3170		status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
3171					 &pin, NULL);
3172		if (status)
3173			break;
3174		*data |= (u8)(!pin) << i;
3175	}
3176
3177	return status;
3178}
3179
3180/**
3181 * ice_write_sma_ctrl_e810t
3182 * @hw: pointer to the hw struct
3183 * @data: data to be written to the GPIO controller
3184 *
3185 * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
3186 * of the PCA9575 expander, so only bits 3-7 in data are valid.
3187 */
3188int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
3189{
3190	int status;
3191	u16 handle;
3192	u8 i;
3193
3194	status = ice_get_pca9575_handle(hw, &handle);
3195	if (status)
3196		return status;
3197
3198	for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
3199		bool pin;
3200
3201		pin = !(data & (1 << i));
3202		status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
3203					 pin, NULL);
3204		if (status)
3205			break;
3206	}
3207
3208	return status;
3209}
3210
3211/**
3212 * ice_read_pca9575_reg_e810t
3213 * @hw: pointer to the hw struct
3214 * @offset: GPIO controller register offset
3215 * @data: pointer to data to be read from the GPIO controller
3216 *
3217 * Read the register from the GPIO controller
3218 */
3219int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
3220{
3221	struct ice_aqc_link_topo_addr link_topo;
3222	__le16 addr;
3223	u16 handle;
3224	int err;
3225
3226	memset(&link_topo, 0, sizeof(link_topo));
3227
3228	err = ice_get_pca9575_handle(hw, &handle);
3229	if (err)
3230		return err;
3231
3232	link_topo.handle = cpu_to_le16(handle);
3233	link_topo.topo_params.node_type_ctx =
3234		FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
3235			   ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
3236
3237	addr = cpu_to_le16((u16)offset);
3238
3239	return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
3240}
3241
3242/* Device agnostic functions
3243 *
3244 * The following functions implement shared behavior common to both E822 and
3245 * E810 devices, possibly calling a device specific implementation where
3246 * necessary.
3247 */
3248
3249/**
3250 * ice_ptp_lock - Acquire PTP global semaphore register lock
3251 * @hw: pointer to the HW struct
3252 *
3253 * Acquire the global PTP hardware semaphore lock. Returns true if the lock
3254 * was acquired, false otherwise.
3255 *
3256 * The PFTSYN_SEM register sets the busy bit on read, returning the previous
3257 * value. If software sees the busy bit cleared, this means that this function
3258 * acquired the lock (and the busy bit is now set). If software sees the busy
3259 * bit set, it means that another function acquired the lock.
3260 *
3261 * Software must clear the busy bit with a write to release the lock for other
3262 * functions when done.
3263 */
3264bool ice_ptp_lock(struct ice_hw *hw)
3265{
3266	u32 hw_lock;
3267	int i;
3268
3269#define MAX_TRIES 15
3270
3271	for (i = 0; i < MAX_TRIES; i++) {
3272		hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
3273		hw_lock = hw_lock & PFTSYN_SEM_BUSY_M;
3274		if (hw_lock) {
3275			/* Somebody is holding the lock */
3276			usleep_range(5000, 6000);
3277			continue;
3278		}
3279
3280		break;
3281	}
3282
3283	return !hw_lock;
3284}
3285
3286/**
3287 * ice_ptp_unlock - Release PTP global semaphore register lock
3288 * @hw: pointer to the HW struct
3289 *
3290 * Release the global PTP hardware semaphore lock. This is done by writing to
3291 * the PFTSYN_SEM register.
3292 */
3293void ice_ptp_unlock(struct ice_hw *hw)
3294{
3295	wr32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), 0);
3296}
3297
3298/**
3299 * ice_ptp_init_phy_model - Initialize hw->phy_model based on device type
3300 * @hw: pointer to the HW structure
3301 *
3302 * Determine the PHY model for the device, and initialize hw->phy_model
3303 * for use by other functions.
3304 */
3305void ice_ptp_init_phy_model(struct ice_hw *hw)
3306{
3307	if (ice_is_e810(hw))
3308		hw->phy_model = ICE_PHY_E810;
3309	else
3310		hw->phy_model = ICE_PHY_E82X;
3311}
3312
3313/**
3314 * ice_ptp_tmr_cmd - Prepare and trigger a timer sync command
3315 * @hw: pointer to HW struct
3316 * @cmd: the command to issue
3317 *
3318 * Prepare the source timer and PHY timers and then trigger the requested
3319 * command. This causes the shadow registers previously written in preparation
3320 * for the command to be synchronously applied to both the source and PHY
3321 * timers.
3322 */
3323static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
3324{
3325	int err;
3326
3327	/* First, prepare the source timer */
3328	ice_ptp_src_cmd(hw, cmd);
3329
3330	/* Next, prepare the ports */
3331	switch (hw->phy_model) {
3332	case ICE_PHY_E810:
3333		err = ice_ptp_port_cmd_e810(hw, cmd);
3334		break;
3335	case ICE_PHY_E82X:
3336		err = ice_ptp_port_cmd_e82x(hw, cmd);
3337		break;
3338	default:
3339		err = -EOPNOTSUPP;
3340	}
3341
3342	if (err) {
3343		ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n",
3344			  cmd, err);
3345		return err;
3346	}
3347
3348	/* Write the sync command register to drive both source and PHY timer
3349	 * commands synchronously
3350	 */
3351	ice_ptp_exec_tmr_cmd(hw);
3352
3353	return 0;
3354}
3355
3356/**
3357 * ice_ptp_init_time - Initialize device time to provided value
3358 * @hw: pointer to HW struct
3359 * @time: 64bits of time (GLTSYN_TIME_L and GLTSYN_TIME_H)
3360 *
3361 * Initialize the device to the specified time provided. This requires a three
3362 * step process:
3363 *
3364 * 1) write the new init time to the source timer shadow registers
3365 * 2) write the new init time to the PHY timer shadow registers
3366 * 3) issue an init_time timer command to synchronously switch both the source
3367 *    and port timers to the new init time value at the next clock cycle.
3368 */
3369int ice_ptp_init_time(struct ice_hw *hw, u64 time)
3370{
3371	u8 tmr_idx;
3372	int err;
3373
3374	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3375
3376	/* Source timers */
3377	wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time));
3378	wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time));
3379	wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0);
3380
3381	/* PHY timers */
3382	/* Fill Rx and Tx ports and send msg to PHY */
3383	switch (hw->phy_model) {
3384	case ICE_PHY_E810:
3385		err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
3386		break;
3387	case ICE_PHY_E82X:
3388		err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF);
3389		break;
3390	default:
3391		err = -EOPNOTSUPP;
3392	}
3393
3394	if (err)
3395		return err;
3396
3397	return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_TIME);
3398}
3399
3400/**
3401 * ice_ptp_write_incval - Program PHC with new increment value
3402 * @hw: pointer to HW struct
3403 * @incval: Source timer increment value per clock cycle
3404 *
3405 * Program the PHC with a new increment value. This requires a three-step
3406 * process:
3407 *
3408 * 1) Write the increment value to the source timer shadow registers
3409 * 2) Write the increment value to the PHY timer shadow registers
3410 * 3) Issue an ICE_PTP_INIT_INCVAL timer command to synchronously switch both
3411 *    the source and port timers to the new increment value at the next clock
3412 *    cycle.
3413 */
3414int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
3415{
3416	u8 tmr_idx;
3417	int err;
3418
3419	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3420
3421	/* Shadow Adjust */
3422	wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
3423	wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
3424
3425	switch (hw->phy_model) {
3426	case ICE_PHY_E810:
3427		err = ice_ptp_prep_phy_incval_e810(hw, incval);
3428		break;
3429	case ICE_PHY_E82X:
3430		err = ice_ptp_prep_phy_incval_e82x(hw, incval);
3431		break;
3432	default:
3433		err = -EOPNOTSUPP;
3434	}
3435
3436	if (err)
3437		return err;
3438
3439	return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_INCVAL);
3440}
3441
3442/**
3443 * ice_ptp_write_incval_locked - Program new incval while holding semaphore
3444 * @hw: pointer to HW struct
3445 * @incval: Source timer increment value per clock cycle
3446 *
3447 * Program a new PHC incval while holding the PTP semaphore.
3448 */
3449int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
3450{
3451	int err;
3452
3453	if (!ice_ptp_lock(hw))
3454		return -EBUSY;
3455
3456	err = ice_ptp_write_incval(hw, incval);
3457
3458	ice_ptp_unlock(hw);
3459
3460	return err;
3461}
3462
3463/**
3464 * ice_ptp_adj_clock - Adjust PHC clock time atomically
3465 * @hw: pointer to HW struct
3466 * @adj: Adjustment in nanoseconds
3467 *
3468 * Perform an atomic adjustment of the PHC time by the specified number of
3469 * nanoseconds. This requires a three-step process:
3470 *
3471 * 1) Write the adjustment to the source timer shadow registers
3472 * 2) Write the adjustment to the PHY timer shadow registers
3473 * 3) Issue an ICE_PTP_ADJ_TIME timer command to synchronously apply the
3474 *    adjustment to both the source and port timers at the next clock cycle.
3475 */
3476int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
3477{
3478	u8 tmr_idx;
3479	int err;
3480
3481	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3482
3483	/* Write the desired clock adjustment into the GLTSYN_SHADJ register.
3484	 * For an ICE_PTP_ADJ_TIME command, this set of registers represents
3485	 * the value to add to the clock time. It supports subtraction by
3486	 * interpreting the value as a 2's complement integer.
3487	 */
3488	wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
3489	wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
3490
3491	switch (hw->phy_model) {
3492	case ICE_PHY_E810:
3493		err = ice_ptp_prep_phy_adj_e810(hw, adj);
3494		break;
3495	case ICE_PHY_E82X:
3496		err = ice_ptp_prep_phy_adj_e82x(hw, adj);
3497		break;
3498	default:
3499		err = -EOPNOTSUPP;
3500	}
3501
3502	if (err)
3503		return err;
3504
3505	return ice_ptp_tmr_cmd(hw, ICE_PTP_ADJ_TIME);
3506}
3507
3508/**
3509 * ice_read_phy_tstamp - Read a PHY timestamp from the timestamo block
3510 * @hw: pointer to the HW struct
3511 * @block: the block to read from
3512 * @idx: the timestamp index to read
3513 * @tstamp: on return, the 40bit timestamp value
3514 *
3515 * Read a 40bit timestamp value out of the timestamp block. For E822 devices,
3516 * the block is the quad to read from. For E810 devices, the block is the
3517 * logical port to read from.
3518 */
3519int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
3520{
3521	switch (hw->phy_model) {
3522	case ICE_PHY_E810:
3523		return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
3524	case ICE_PHY_E82X:
3525		return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp);
3526	default:
3527		return -EOPNOTSUPP;
3528	}
3529}
3530
3531/**
3532 * ice_clear_phy_tstamp - Clear a timestamp from the timestamp block
3533 * @hw: pointer to the HW struct
3534 * @block: the block to read from
3535 * @idx: the timestamp index to reset
3536 *
3537 * Clear a timestamp from the timestamp block, discarding its value without
3538 * returning it. This resets the memory status bit for the timestamp index
3539 * allowing it to be reused for another timestamp in the future.
3540 *
3541 * For E822 devices, the block number is the PHY quad to clear from. For E810
3542 * devices, the block number is the logical port to clear from.
3543 *
3544 * This function must only be called on a timestamp index whose valid bit is
3545 * set according to ice_get_phy_tx_tstamp_ready().
3546 */
3547int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
3548{
3549	switch (hw->phy_model) {
3550	case ICE_PHY_E810:
3551		return ice_clear_phy_tstamp_e810(hw, block, idx);
3552	case ICE_PHY_E82X:
3553		return ice_clear_phy_tstamp_e82x(hw, block, idx);
3554	default:
3555		return -EOPNOTSUPP;
3556	}
3557}
3558
3559/**
3560 * ice_get_pf_c827_idx - find and return the C827 index for the current pf
3561 * @hw: pointer to the hw struct
3562 * @idx: index of the found C827 PHY
3563 * Return:
3564 * * 0 - success
3565 * * negative - failure
3566 */
3567static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
3568{
3569	struct ice_aqc_get_link_topo cmd;
3570	u8 node_part_number;
3571	u16 node_handle;
3572	int status;
3573	u8 ctx;
3574
3575	if (hw->mac_type != ICE_MAC_E810)
3576		return -ENODEV;
3577
3578	if (hw->device_id != ICE_DEV_ID_E810C_QSFP) {
3579		*idx = C827_0;
3580		return 0;
3581	}
3582
3583	memset(&cmd, 0, sizeof(cmd));
3584
3585	ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_PHY << ICE_AQC_LINK_TOPO_NODE_TYPE_S;
3586	ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S;
3587	cmd.addr.topo_params.node_type_ctx = ctx;
3588
3589	status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
3590					 &node_handle);
3591	if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
3592		return -ENOENT;
3593
3594	if (node_handle == E810C_QSFP_C827_0_HANDLE)
3595		*idx = C827_0;
3596	else if (node_handle == E810C_QSFP_C827_1_HANDLE)
3597		*idx = C827_1;
3598	else
3599		return -EIO;
3600
3601	return 0;
3602}
3603
3604/**
3605 * ice_ptp_reset_ts_memory - Reset timestamp memory for all blocks
3606 * @hw: pointer to the HW struct
3607 */
3608void ice_ptp_reset_ts_memory(struct ice_hw *hw)
3609{
3610	switch (hw->phy_model) {
3611	case ICE_PHY_E82X:
3612		ice_ptp_reset_ts_memory_e82x(hw);
3613		break;
3614	case ICE_PHY_E810:
3615	default:
3616		return;
3617	}
3618}
3619
3620/**
3621 * ice_ptp_init_phc - Initialize PTP hardware clock
3622 * @hw: pointer to the HW struct
3623 *
3624 * Perform the steps required to initialize the PTP hardware clock.
3625 */
3626int ice_ptp_init_phc(struct ice_hw *hw)
3627{
3628	u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3629
3630	/* Enable source clocks */
3631	wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
3632
3633	/* Clear event err indications for auxiliary pins */
3634	(void)rd32(hw, GLTSYN_STAT(src_idx));
3635
3636	switch (hw->phy_model) {
3637	case ICE_PHY_E810:
3638		return ice_ptp_init_phc_e810(hw);
3639	case ICE_PHY_E82X:
3640		return ice_ptp_init_phc_e82x(hw);
3641	default:
3642		return -EOPNOTSUPP;
3643	}
3644}
3645
3646/**
3647 * ice_get_phy_tx_tstamp_ready - Read PHY Tx memory status indication
3648 * @hw: pointer to the HW struct
3649 * @block: the timestamp block to check
3650 * @tstamp_ready: storage for the PHY Tx memory status information
3651 *
3652 * Check the PHY for Tx timestamp memory status. This reports a 64 bit value
3653 * which indicates which timestamps in the block may be captured. A set bit
3654 * means the timestamp can be read. An unset bit means the timestamp is not
3655 * ready and software should avoid reading the register.
3656 */
3657int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
3658{
3659	switch (hw->phy_model) {
3660	case ICE_PHY_E810:
3661		return ice_get_phy_tx_tstamp_ready_e810(hw, block,
3662							tstamp_ready);
3663	case ICE_PHY_E82X:
3664		return ice_get_phy_tx_tstamp_ready_e82x(hw, block,
3665							tstamp_ready);
3666		break;
3667	default:
3668		return -EOPNOTSUPP;
3669	}
3670}
3671
3672/**
3673 * ice_cgu_get_pin_desc_e823 - get pin description array
3674 * @hw: pointer to the hw struct
3675 * @input: if request is done against input or output pin
3676 * @size: number of inputs/outputs
3677 *
3678 * Return: pointer to pin description array associated to given hw.
3679 */
3680static const struct ice_cgu_pin_desc *
3681ice_cgu_get_pin_desc_e823(struct ice_hw *hw, bool input, int *size)
3682{
3683	static const struct ice_cgu_pin_desc *t;
3684
3685	if (hw->cgu_part_number ==
3686	    ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032) {
3687		if (input) {
3688			t = ice_e823_zl_cgu_inputs;
3689			*size = ARRAY_SIZE(ice_e823_zl_cgu_inputs);
3690		} else {
3691			t = ice_e823_zl_cgu_outputs;
3692			*size = ARRAY_SIZE(ice_e823_zl_cgu_outputs);
3693		}
3694	} else if (hw->cgu_part_number ==
3695		   ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384) {
3696		if (input) {
3697			t = ice_e823_si_cgu_inputs;
3698			*size = ARRAY_SIZE(ice_e823_si_cgu_inputs);
3699		} else {
3700			t = ice_e823_si_cgu_outputs;
3701			*size = ARRAY_SIZE(ice_e823_si_cgu_outputs);
3702		}
3703	} else {
3704		t = NULL;
3705		*size = 0;
3706	}
3707
3708	return t;
3709}
3710
3711/**
3712 * ice_cgu_get_pin_desc - get pin description array
3713 * @hw: pointer to the hw struct
3714 * @input: if request is done against input or output pins
3715 * @size: size of array returned by function
3716 *
3717 * Return: pointer to pin description array associated to given hw.
3718 */
3719static const struct ice_cgu_pin_desc *
3720ice_cgu_get_pin_desc(struct ice_hw *hw, bool input, int *size)
3721{
3722	const struct ice_cgu_pin_desc *t = NULL;
3723
3724	switch (hw->device_id) {
3725	case ICE_DEV_ID_E810C_SFP:
3726		if (input) {
3727			t = ice_e810t_sfp_cgu_inputs;
3728			*size = ARRAY_SIZE(ice_e810t_sfp_cgu_inputs);
3729		} else {
3730			t = ice_e810t_sfp_cgu_outputs;
3731			*size = ARRAY_SIZE(ice_e810t_sfp_cgu_outputs);
3732		}
3733		break;
3734	case ICE_DEV_ID_E810C_QSFP:
3735		if (input) {
3736			t = ice_e810t_qsfp_cgu_inputs;
3737			*size = ARRAY_SIZE(ice_e810t_qsfp_cgu_inputs);
3738		} else {
3739			t = ice_e810t_qsfp_cgu_outputs;
3740			*size = ARRAY_SIZE(ice_e810t_qsfp_cgu_outputs);
3741		}
3742		break;
3743	case ICE_DEV_ID_E823L_10G_BASE_T:
3744	case ICE_DEV_ID_E823L_1GBE:
3745	case ICE_DEV_ID_E823L_BACKPLANE:
3746	case ICE_DEV_ID_E823L_QSFP:
3747	case ICE_DEV_ID_E823L_SFP:
3748	case ICE_DEV_ID_E823C_10G_BASE_T:
3749	case ICE_DEV_ID_E823C_BACKPLANE:
3750	case ICE_DEV_ID_E823C_QSFP:
3751	case ICE_DEV_ID_E823C_SFP:
3752	case ICE_DEV_ID_E823C_SGMII:
3753		t = ice_cgu_get_pin_desc_e823(hw, input, size);
3754		break;
3755	default:
3756		break;
3757	}
3758
3759	return t;
3760}
3761
3762/**
3763 * ice_cgu_get_pin_type - get pin's type
3764 * @hw: pointer to the hw struct
3765 * @pin: pin index
3766 * @input: if request is done against input or output pin
3767 *
3768 * Return: type of a pin.
3769 */
3770enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input)
3771{
3772	const struct ice_cgu_pin_desc *t;
3773	int t_size;
3774
3775	t = ice_cgu_get_pin_desc(hw, input, &t_size);
3776
3777	if (!t)
3778		return 0;
3779
3780	if (pin >= t_size)
3781		return 0;
3782
3783	return t[pin].type;
3784}
3785
3786/**
3787 * ice_cgu_get_pin_freq_supp - get pin's supported frequency
3788 * @hw: pointer to the hw struct
3789 * @pin: pin index
3790 * @input: if request is done against input or output pin
3791 * @num: output number of supported frequencies
3792 *
3793 * Get frequency supported number and array of supported frequencies.
3794 *
3795 * Return: array of supported frequencies for given pin.
3796 */
3797struct dpll_pin_frequency *
3798ice_cgu_get_pin_freq_supp(struct ice_hw *hw, u8 pin, bool input, u8 *num)
3799{
3800	const struct ice_cgu_pin_desc *t;
3801	int t_size;
3802
3803	*num = 0;
3804	t = ice_cgu_get_pin_desc(hw, input, &t_size);
3805	if (!t)
3806		return NULL;
3807	if (pin >= t_size)
3808		return NULL;
3809	*num = t[pin].freq_supp_num;
3810
3811	return t[pin].freq_supp;
3812}
3813
3814/**
3815 * ice_cgu_get_pin_name - get pin's name
3816 * @hw: pointer to the hw struct
3817 * @pin: pin index
3818 * @input: if request is done against input or output pin
3819 *
3820 * Return:
3821 * * null terminated char array with name
3822 * * NULL in case of failure
3823 */
3824const char *ice_cgu_get_pin_name(struct ice_hw *hw, u8 pin, bool input)
3825{
3826	const struct ice_cgu_pin_desc *t;
3827	int t_size;
3828
3829	t = ice_cgu_get_pin_desc(hw, input, &t_size);
3830
3831	if (!t)
3832		return NULL;
3833
3834	if (pin >= t_size)
3835		return NULL;
3836
3837	return t[pin].name;
3838}
3839
3840/**
3841 * ice_get_cgu_state - get the state of the DPLL
3842 * @hw: pointer to the hw struct
3843 * @dpll_idx: Index of internal DPLL unit
3844 * @last_dpll_state: last known state of DPLL
3845 * @pin: pointer to a buffer for returning currently active pin
3846 * @ref_state: reference clock state
3847 * @eec_mode: eec mode of the DPLL
3848 * @phase_offset: pointer to a buffer for returning phase offset
3849 * @dpll_state: state of the DPLL (output)
3850 *
3851 * This function will read the state of the DPLL(dpll_idx). Non-null
3852 * 'pin', 'ref_state', 'eec_mode' and 'phase_offset' parameters are used to
3853 * retrieve currently active pin, state, mode and phase_offset respectively.
3854 *
3855 * Return: state of the DPLL
3856 */
3857int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx,
3858		      enum dpll_lock_status last_dpll_state, u8 *pin,
3859		      u8 *ref_state, u8 *eec_mode, s64 *phase_offset,
3860		      enum dpll_lock_status *dpll_state)
3861{
3862	u8 hw_ref_state, hw_dpll_state, hw_eec_mode, hw_config;
3863	s64 hw_phase_offset;
3864	int status;
3865
3866	status = ice_aq_get_cgu_dpll_status(hw, dpll_idx, &hw_ref_state,
3867					    &hw_dpll_state, &hw_config,
3868					    &hw_phase_offset, &hw_eec_mode);
3869	if (status)
3870		return status;
3871
3872	if (pin)
3873		/* current ref pin in dpll_state_refsel_status_X register */
3874		*pin = hw_config & ICE_AQC_GET_CGU_DPLL_CONFIG_CLK_REF_SEL;
3875	if (phase_offset)
3876		*phase_offset = hw_phase_offset;
3877	if (ref_state)
3878		*ref_state = hw_ref_state;
3879	if (eec_mode)
3880		*eec_mode = hw_eec_mode;
3881	if (!dpll_state)
3882		return 0;
3883
3884	/* According to ZL DPLL documentation, once state reach LOCKED_HO_ACQ
3885	 * it would never return to FREERUN. This aligns to ITU-T G.781
3886	 * Recommendation. We cannot report HOLDOVER as HO memory is cleared
3887	 * while switching to another reference.
3888	 * Only for situations where previous state was either: "LOCKED without
3889	 * HO_ACQ" or "HOLDOVER" we actually back to FREERUN.
3890	 */
3891	if (hw_dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_LOCK) {
3892		if (hw_dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO_READY)
3893			*dpll_state = DPLL_LOCK_STATUS_LOCKED_HO_ACQ;
3894		else
3895			*dpll_state = DPLL_LOCK_STATUS_LOCKED;
3896	} else if (last_dpll_state == DPLL_LOCK_STATUS_LOCKED_HO_ACQ ||
3897		   last_dpll_state == DPLL_LOCK_STATUS_HOLDOVER) {
3898		*dpll_state = DPLL_LOCK_STATUS_HOLDOVER;
3899	} else {
3900		*dpll_state = DPLL_LOCK_STATUS_UNLOCKED;
3901	}
3902
3903	return 0;
3904}
3905
3906/**
3907 * ice_get_cgu_rclk_pin_info - get info on available recovered clock pins
3908 * @hw: pointer to the hw struct
3909 * @base_idx: returns index of first recovered clock pin on device
3910 * @pin_num: returns number of recovered clock pins available on device
3911 *
3912 * Based on hw provide caller info about recovery clock pins available on the
3913 * board.
3914 *
3915 * Return:
3916 * * 0 - success, information is valid
3917 * * negative - failure, information is not valid
3918 */
3919int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num)
3920{
3921	u8 phy_idx;
3922	int ret;
3923
3924	switch (hw->device_id) {
3925	case ICE_DEV_ID_E810C_SFP:
3926	case ICE_DEV_ID_E810C_QSFP:
3927
3928		ret = ice_get_pf_c827_idx(hw, &phy_idx);
3929		if (ret)
3930			return ret;
3931		*base_idx = E810T_CGU_INPUT_C827(phy_idx, ICE_RCLKA_PIN);
3932		*pin_num = ICE_E810_RCLK_PINS_NUM;
3933		ret = 0;
3934		break;
3935	case ICE_DEV_ID_E823L_10G_BASE_T:
3936	case ICE_DEV_ID_E823L_1GBE:
3937	case ICE_DEV_ID_E823L_BACKPLANE:
3938	case ICE_DEV_ID_E823L_QSFP:
3939	case ICE_DEV_ID_E823L_SFP:
3940	case ICE_DEV_ID_E823C_10G_BASE_T:
3941	case ICE_DEV_ID_E823C_BACKPLANE:
3942	case ICE_DEV_ID_E823C_QSFP:
3943	case ICE_DEV_ID_E823C_SFP:
3944	case ICE_DEV_ID_E823C_SGMII:
3945		*pin_num = ICE_E82X_RCLK_PINS_NUM;
3946		ret = 0;
3947		if (hw->cgu_part_number ==
3948		    ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032)
3949			*base_idx = ZL_REF1P;
3950		else if (hw->cgu_part_number ==
3951			 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384)
3952			*base_idx = SI_REF1P;
3953		else
3954			ret = -ENODEV;
3955
3956		break;
3957	default:
3958		ret = -ENODEV;
3959		break;
3960	}
3961
3962	return ret;
3963}
3964
3965/**
3966 * ice_cgu_get_output_pin_state_caps - get output pin state capabilities
3967 * @hw: pointer to the hw struct
3968 * @pin_id: id of a pin
3969 * @caps: capabilities to modify
3970 *
3971 * Return:
3972 * * 0 - success, state capabilities were modified
3973 * * negative - failure, capabilities were not modified
3974 */
3975int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
3976				      unsigned long *caps)
3977{
3978	bool can_change = true;
3979
3980	switch (hw->device_id) {
3981	case ICE_DEV_ID_E810C_SFP:
3982		if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3)
3983			can_change = false;
3984		break;
3985	case ICE_DEV_ID_E810C_QSFP:
3986		if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3 || pin_id == ZL_OUT4)
3987			can_change = false;
3988		break;
3989	case ICE_DEV_ID_E823L_10G_BASE_T:
3990	case ICE_DEV_ID_E823L_1GBE:
3991	case ICE_DEV_ID_E823L_BACKPLANE:
3992	case ICE_DEV_ID_E823L_QSFP:
3993	case ICE_DEV_ID_E823L_SFP:
3994	case ICE_DEV_ID_E823C_10G_BASE_T:
3995	case ICE_DEV_ID_E823C_BACKPLANE:
3996	case ICE_DEV_ID_E823C_QSFP:
3997	case ICE_DEV_ID_E823C_SFP:
3998	case ICE_DEV_ID_E823C_SGMII:
3999		if (hw->cgu_part_number ==
4000		    ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032 &&
4001		    pin_id == ZL_OUT2)
4002			can_change = false;
4003		else if (hw->cgu_part_number ==
4004			 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384 &&
4005			 pin_id == SI_OUT1)
4006			can_change = false;
4007		break;
4008	default:
4009		return -EINVAL;
4010	}
4011	if (can_change)
4012		*caps |= DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
4013	else
4014		*caps &= ~DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
4015
4016	return 0;
4017}
4018