1// SPDX-License-Identifier: GPL-2.0+
2
3#include "lan966x_main.h"
4
5#define LAN966X_TAPRIO_TIMEOUT_MS		1000
6#define LAN966X_TAPRIO_ENTRIES_PER_PORT		2
7
8/* Minimum supported cycle time in nanoseconds */
9#define LAN966X_TAPRIO_MIN_CYCLE_TIME_NS	NSEC_PER_USEC
10
11/* Maximum supported cycle time in nanoseconds */
12#define LAN966X_TAPRIO_MAX_CYCLE_TIME_NS	(NSEC_PER_SEC - 1)
13
14/* Total number of TAS GCL entries */
15#define LAN966X_TAPRIO_NUM_GCL			256
16
17/* TAPRIO link speeds for calculation of guard band */
18enum lan966x_taprio_link_speed {
19	LAN966X_TAPRIO_SPEED_NO_GB,
20	LAN966X_TAPRIO_SPEED_10,
21	LAN966X_TAPRIO_SPEED_100,
22	LAN966X_TAPRIO_SPEED_1000,
23	LAN966X_TAPRIO_SPEED_2500,
24};
25
26/* TAPRIO list states */
27enum lan966x_taprio_state {
28	LAN966X_TAPRIO_STATE_ADMIN,
29	LAN966X_TAPRIO_STATE_ADVANCING,
30	LAN966X_TAPRIO_STATE_PENDING,
31	LAN966X_TAPRIO_STATE_OPERATING,
32	LAN966X_TAPRIO_STATE_TERMINATING,
33	LAN966X_TAPRIO_STATE_MAX,
34};
35
36/* TAPRIO GCL command */
37enum lan966x_taprio_gcl_cmd {
38	LAN966X_TAPRIO_GCL_CMD_SET_GATE_STATES = 0,
39};
40
41static u32 lan966x_taprio_list_index(struct lan966x_port *port, u8 entry)
42{
43	return port->chip_port * LAN966X_TAPRIO_ENTRIES_PER_PORT + entry;
44}
45
46static u32 lan966x_taprio_list_state_get(struct lan966x_port *port)
47{
48	struct lan966x *lan966x = port->lan966x;
49	u32 val;
50
51	val = lan_rd(lan966x, QSYS_TAS_LST);
52	return QSYS_TAS_LST_LIST_STATE_GET(val);
53}
54
55static u32 lan966x_taprio_list_index_state_get(struct lan966x_port *port,
56					       u32 list)
57{
58	struct lan966x *lan966x = port->lan966x;
59
60	lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_SET(list),
61		QSYS_TAS_CFG_CTRL_LIST_NUM,
62		lan966x, QSYS_TAS_CFG_CTRL);
63
64	return lan966x_taprio_list_state_get(port);
65}
66
67static void lan966x_taprio_list_state_set(struct lan966x_port *port,
68					  u32 state)
69{
70	struct lan966x *lan966x = port->lan966x;
71
72	lan_rmw(QSYS_TAS_LST_LIST_STATE_SET(state),
73		QSYS_TAS_LST_LIST_STATE,
74		lan966x, QSYS_TAS_LST);
75}
76
77static int lan966x_taprio_list_shutdown(struct lan966x_port *port,
78					u32 list)
79{
80	struct lan966x *lan966x = port->lan966x;
81	bool pending, operating;
82	unsigned long end;
83	u32 state;
84
85	end = jiffies +  msecs_to_jiffies(LAN966X_TAPRIO_TIMEOUT_MS);
86	/* It is required to try multiple times to set the state of list,
87	 * because the HW can overwrite this.
88	 */
89	do {
90		state = lan966x_taprio_list_state_get(port);
91
92		pending = false;
93		operating = false;
94
95		if (state == LAN966X_TAPRIO_STATE_ADVANCING ||
96		    state == LAN966X_TAPRIO_STATE_PENDING) {
97			lan966x_taprio_list_state_set(port,
98						      LAN966X_TAPRIO_STATE_ADMIN);
99			pending = true;
100		}
101
102		if (state == LAN966X_TAPRIO_STATE_OPERATING) {
103			lan966x_taprio_list_state_set(port,
104						      LAN966X_TAPRIO_STATE_TERMINATING);
105			operating = true;
106		}
107
108		/* If the entry was in pending and now gets in admin, then there
109		 * is nothing else to do, so just bail out
110		 */
111		state = lan966x_taprio_list_state_get(port);
112		if (pending &&
113		    state == LAN966X_TAPRIO_STATE_ADMIN)
114			return 0;
115
116		/* If the list was in operating and now is in terminating or
117		 * admin, then is OK to exit but it needs to wait until the list
118		 * will get in admin. It is not required to set the state
119		 * again.
120		 */
121		if (operating &&
122		    (state == LAN966X_TAPRIO_STATE_TERMINATING ||
123		     state == LAN966X_TAPRIO_STATE_ADMIN))
124			break;
125
126	} while (!time_after(jiffies, end));
127
128	end = jiffies + msecs_to_jiffies(LAN966X_TAPRIO_TIMEOUT_MS);
129	do {
130		state = lan966x_taprio_list_state_get(port);
131		if (state == LAN966X_TAPRIO_STATE_ADMIN)
132			break;
133
134	} while (!time_after(jiffies, end));
135
136	/* If the list was in operating mode, it could be stopped while some
137	 * queues where closed, so make sure to restore "all-queues-open"
138	 */
139	if (operating) {
140		lan_wr(QSYS_TAS_GS_CTRL_HSCH_POS_SET(port->chip_port),
141		       lan966x, QSYS_TAS_GS_CTRL);
142
143		lan_wr(QSYS_TAS_GATE_STATE_TAS_GATE_STATE_SET(0xff),
144		       lan966x, QSYS_TAS_GATE_STATE);
145	}
146
147	return 0;
148}
149
150static int lan966x_taprio_shutdown(struct lan966x_port *port)
151{
152	u32 i, list, state;
153	int err;
154
155	for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
156		list = lan966x_taprio_list_index(port, i);
157		state = lan966x_taprio_list_index_state_get(port, list);
158		if (state == LAN966X_TAPRIO_STATE_ADMIN)
159			continue;
160
161		err = lan966x_taprio_list_shutdown(port, list);
162		if (err)
163			return err;
164	}
165
166	return 0;
167}
168
169/* Find a suitable list for a new schedule. First priority is a list in state
170 * pending. Second priority is a list in state admin.
171 */
172static int lan966x_taprio_find_list(struct lan966x_port *port,
173				    struct tc_taprio_qopt_offload *qopt,
174				    int *new_list, int *obs_list)
175{
176	int state[LAN966X_TAPRIO_ENTRIES_PER_PORT];
177	int list[LAN966X_TAPRIO_ENTRIES_PER_PORT];
178	int err, oper = -1;
179	u32 i;
180
181	*new_list = -1;
182	*obs_list = -1;
183
184	/* If there is already an entry in operating mode, return this list in
185	 * obs_list, such that when the new list will get activated the
186	 * operating list will be stopped. In this way is possible to have
187	 * smooth transitions between the lists
188	 */
189	for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
190		list[i] = lan966x_taprio_list_index(port, i);
191		state[i] = lan966x_taprio_list_index_state_get(port, list[i]);
192		if (state[i] == LAN966X_TAPRIO_STATE_OPERATING)
193			oper = list[i];
194	}
195
196	for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
197		if (state[i] == LAN966X_TAPRIO_STATE_PENDING) {
198			err = lan966x_taprio_shutdown(port);
199			if (err)
200				return err;
201
202			*new_list = list[i];
203			*obs_list = (oper == -1) ? *new_list : oper;
204			return 0;
205		}
206	}
207
208	for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
209		if (state[i] == LAN966X_TAPRIO_STATE_ADMIN) {
210			*new_list = list[i];
211			*obs_list = (oper == -1) ? *new_list : oper;
212			return 0;
213		}
214	}
215
216	return -ENOSPC;
217}
218
219static int lan966x_taprio_check(struct tc_taprio_qopt_offload *qopt)
220{
221	u64 total_time = 0;
222	u32 i;
223
224	/* This is not supported by th HW */
225	if (qopt->cycle_time_extension)
226		return -EOPNOTSUPP;
227
228	/* There is a limited number of gcl entries that can be used, they are
229	 * shared by all ports
230	 */
231	if (qopt->num_entries > LAN966X_TAPRIO_NUM_GCL)
232		return -EINVAL;
233
234	/* Don't allow cycle times bigger than 1 sec or smaller than 1 usec */
235	if (qopt->cycle_time < LAN966X_TAPRIO_MIN_CYCLE_TIME_NS ||
236	    qopt->cycle_time > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
237		return -EINVAL;
238
239	for (i = 0; i < qopt->num_entries; ++i) {
240		struct tc_taprio_sched_entry *entry = &qopt->entries[i];
241
242		/* Don't allow intervals bigger than 1 sec or smaller than 1
243		 * usec
244		 */
245		if (entry->interval < LAN966X_TAPRIO_MIN_CYCLE_TIME_NS ||
246		    entry->interval > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
247			return -EINVAL;
248
249		if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
250			return -EINVAL;
251
252		total_time += qopt->entries[i].interval;
253	}
254
255	/* Don't allow the total time of intervals be bigger than 1 sec */
256	if (total_time > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
257		return -EINVAL;
258
259	/* The HW expects that the cycle time to be at least as big as sum of
260	 * each interval of gcl
261	 */
262	if (qopt->cycle_time < total_time)
263		return -EINVAL;
264
265	return 0;
266}
267
268static int lan966x_taprio_gcl_free_get(struct lan966x_port *port,
269				       unsigned long *free_list)
270{
271	struct lan966x *lan966x = port->lan966x;
272	u32 num_free, state, list;
273	u32 base, next, max_list;
274
275	/* By default everything is free */
276	bitmap_fill(free_list, LAN966X_TAPRIO_NUM_GCL);
277	num_free = LAN966X_TAPRIO_NUM_GCL;
278
279	/* Iterate over all gcl entries and find out which are free. And mark
280	 * those that are not free.
281	 */
282	max_list = lan966x->num_phys_ports * LAN966X_TAPRIO_ENTRIES_PER_PORT;
283	for (list = 0; list < max_list; ++list) {
284		state = lan966x_taprio_list_index_state_get(port, list);
285		if (state == LAN966X_TAPRIO_STATE_ADMIN)
286			continue;
287
288		base = lan_rd(lan966x, QSYS_TAS_LIST_CFG);
289		base = QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_GET(base);
290		next = base;
291
292		do {
293			clear_bit(next, free_list);
294			num_free--;
295
296			lan_rmw(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(next),
297				QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM,
298				lan966x, QSYS_TAS_CFG_CTRL);
299
300			next = lan_rd(lan966x, QSYS_TAS_GCL_CT_CFG2);
301			next = QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_GET(next);
302		} while (base != next);
303	}
304
305	return num_free;
306}
307
308static void lan966x_taprio_gcl_setup_entry(struct lan966x_port *port,
309					   struct tc_taprio_sched_entry *entry,
310					   u32 next_entry)
311{
312	struct lan966x *lan966x = port->lan966x;
313
314	/* Setup a single gcl entry */
315	lan_wr(QSYS_TAS_GCL_CT_CFG_GATE_STATE_SET(entry->gate_mask) |
316	       QSYS_TAS_GCL_CT_CFG_HSCH_POS_SET(port->chip_port) |
317	       QSYS_TAS_GCL_CT_CFG_OP_TYPE_SET(LAN966X_TAPRIO_GCL_CMD_SET_GATE_STATES),
318	       lan966x, QSYS_TAS_GCL_CT_CFG);
319
320	lan_wr(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_SET(port->chip_port) |
321	       QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_SET(next_entry),
322	       lan966x, QSYS_TAS_GCL_CT_CFG2);
323
324	lan_wr(entry->interval, lan966x, QSYS_TAS_GCL_TM_CFG);
325}
326
327static int lan966x_taprio_gcl_setup(struct lan966x_port *port,
328				    struct tc_taprio_qopt_offload *qopt,
329				    int list)
330{
331	DECLARE_BITMAP(free_list, LAN966X_TAPRIO_NUM_GCL);
332	struct lan966x *lan966x = port->lan966x;
333	u32 i, base, next;
334
335	if (lan966x_taprio_gcl_free_get(port, free_list) < qopt->num_entries)
336		return -ENOSPC;
337
338	/* Select list */
339	lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_SET(list),
340		QSYS_TAS_CFG_CTRL_LIST_NUM,
341		lan966x, QSYS_TAS_CFG_CTRL);
342
343	/* Setup the address of the first gcl entry */
344	base = find_first_bit(free_list, LAN966X_TAPRIO_NUM_GCL);
345	lan_rmw(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_SET(base),
346		QSYS_TAS_LIST_CFG_LIST_BASE_ADDR,
347		lan966x, QSYS_TAS_LIST_CFG);
348
349	/* Iterate over entries and add them to the gcl list */
350	next = base;
351	for (i = 0; i < qopt->num_entries; ++i) {
352		lan_rmw(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(next),
353			QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM,
354			lan966x, QSYS_TAS_CFG_CTRL);
355
356		/* If the entry is last, point back to the start of the list */
357		if (i == qopt->num_entries - 1)
358			next = base;
359		else
360			next = find_next_bit(free_list, LAN966X_TAPRIO_NUM_GCL,
361					     next + 1);
362
363		lan966x_taprio_gcl_setup_entry(port, &qopt->entries[i], next);
364	}
365
366	return 0;
367}
368
369/* Calculate new base_time based on cycle_time. The HW recommends to have the
370 * new base time at least 2 * cycle type + current time
371 */
372static void lan966x_taprio_new_base_time(struct lan966x *lan966x,
373					 const u32 cycle_time,
374					 const ktime_t org_base_time,
375					 ktime_t *new_base_time)
376{
377	ktime_t current_time, threshold_time;
378	struct timespec64 ts;
379
380	/* Get the current time and calculate the threshold_time */
381	lan966x_ptp_gettime64(&lan966x->phc[LAN966X_PHC_PORT].info, &ts);
382	current_time = timespec64_to_ktime(ts);
383	threshold_time = current_time + (2 * cycle_time);
384
385	/* If the org_base_time is in enough in future just use it */
386	if (org_base_time >= threshold_time) {
387		*new_base_time = org_base_time;
388		return;
389	}
390
391	/* If the org_base_time is smaller than current_time, calculate the new
392	 * base time as following.
393	 */
394	if (org_base_time <= current_time) {
395		u64 tmp = current_time - org_base_time;
396		u32 rem = 0;
397
398		if (tmp > cycle_time)
399			div_u64_rem(tmp, cycle_time, &rem);
400		rem = cycle_time - rem;
401		*new_base_time = threshold_time + rem;
402		return;
403	}
404
405	/* The only left place for org_base_time is between current_time and
406	 * threshold_time. In this case the new_base_time is calculated like
407	 * org_base_time + 2 * cycletime
408	 */
409	*new_base_time = org_base_time + 2 * cycle_time;
410}
411
412int lan966x_taprio_speed_set(struct lan966x_port *port, int speed)
413{
414	struct lan966x *lan966x = port->lan966x;
415	u8 taprio_speed;
416
417	switch (speed) {
418	case SPEED_10:
419		taprio_speed = LAN966X_TAPRIO_SPEED_10;
420		break;
421	case SPEED_100:
422		taprio_speed = LAN966X_TAPRIO_SPEED_100;
423		break;
424	case SPEED_1000:
425		taprio_speed = LAN966X_TAPRIO_SPEED_1000;
426		break;
427	case SPEED_2500:
428		taprio_speed = LAN966X_TAPRIO_SPEED_2500;
429		break;
430	default:
431		return -EINVAL;
432	}
433
434	lan_rmw(QSYS_TAS_PROFILE_CFG_LINK_SPEED_SET(taprio_speed),
435		QSYS_TAS_PROFILE_CFG_LINK_SPEED,
436		lan966x, QSYS_TAS_PROFILE_CFG(port->chip_port));
437
438	return 0;
439}
440
441int lan966x_taprio_add(struct lan966x_port *port,
442		       struct tc_taprio_qopt_offload *qopt)
443{
444	struct lan966x *lan966x = port->lan966x;
445	int err, new_list, obs_list;
446	struct timespec64 ts;
447	ktime_t base_time;
448
449	err = lan966x_taprio_check(qopt);
450	if (err)
451		return err;
452
453	err = lan966x_taprio_find_list(port, qopt, &new_list, &obs_list);
454	if (err)
455		return err;
456
457	err = lan966x_taprio_gcl_setup(port, qopt, new_list);
458	if (err)
459		return err;
460
461	lan966x_taprio_new_base_time(lan966x, qopt->cycle_time,
462				     qopt->base_time, &base_time);
463
464	ts = ktime_to_timespec64(base_time);
465	lan_wr(QSYS_TAS_BT_NSEC_NSEC_SET(ts.tv_nsec),
466	       lan966x, QSYS_TAS_BT_NSEC);
467
468	lan_wr(lower_32_bits(ts.tv_sec),
469	       lan966x, QSYS_TAS_BT_SEC_LSB);
470
471	lan_wr(QSYS_TAS_BT_SEC_MSB_SEC_MSB_SET(upper_32_bits(ts.tv_sec)),
472	       lan966x, QSYS_TAS_BT_SEC_MSB);
473
474	lan_wr(qopt->cycle_time, lan966x, QSYS_TAS_CT_CFG);
475
476	lan_rmw(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_SET(obs_list),
477		QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX,
478		lan966x, QSYS_TAS_STARTUP_CFG);
479
480	/* Start list processing */
481	lan_rmw(QSYS_TAS_LST_LIST_STATE_SET(LAN966X_TAPRIO_STATE_ADVANCING),
482		QSYS_TAS_LST_LIST_STATE,
483		lan966x, QSYS_TAS_LST);
484
485	return err;
486}
487
488int lan966x_taprio_del(struct lan966x_port *port)
489{
490	return lan966x_taprio_shutdown(port);
491}
492
493void lan966x_taprio_init(struct lan966x *lan966x)
494{
495	int num_taprio_lists;
496	int p;
497
498	lan_wr(QSYS_TAS_STM_CFG_REVISIT_DLY_SET((256 * 1000) /
499						lan966x_ptp_get_period_ps()),
500	       lan966x, QSYS_TAS_STM_CFG);
501
502	num_taprio_lists = lan966x->num_phys_ports *
503			   LAN966X_TAPRIO_ENTRIES_PER_PORT;
504
505	/* For now we always use guard band on all queues */
506	lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_SET(num_taprio_lists) |
507		QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_SET(1),
508		QSYS_TAS_CFG_CTRL_LIST_NUM_MAX |
509		QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q,
510		lan966x, QSYS_TAS_CFG_CTRL);
511
512	for (p = 0; p < lan966x->num_phys_ports; p++)
513		lan_rmw(QSYS_TAS_PROFILE_CFG_PORT_NUM_SET(p),
514			QSYS_TAS_PROFILE_CFG_PORT_NUM,
515			lan966x, QSYS_TAS_PROFILE_CFG(p));
516}
517
518void lan966x_taprio_deinit(struct lan966x *lan966x)
519{
520	int p;
521
522	for (p = 0; p < lan966x->num_phys_ports; ++p) {
523		if (!lan966x->ports[p])
524			continue;
525
526		lan966x_taprio_del(lan966x->ports[p]);
527	}
528}
529