1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Thunderbolt link controller support
4 *
5 * Copyright (C) 2019, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 */
8
9#include <linux/delay.h>
10
11#include "tb.h"
12
13/**
14 * tb_lc_read_uuid() - Read switch UUID from link controller common register
15 * @sw: Switch whose UUID is read
16 * @uuid: UUID is placed here
17 */
18int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid)
19{
20	if (!sw->cap_lc)
21		return -EINVAL;
22	return tb_sw_read(sw, uuid, TB_CFG_SWITCH, sw->cap_lc + TB_LC_FUSE, 4);
23}
24
25static int read_lc_desc(struct tb_switch *sw, u32 *desc)
26{
27	if (!sw->cap_lc)
28		return -EINVAL;
29	return tb_sw_read(sw, desc, TB_CFG_SWITCH, sw->cap_lc + TB_LC_DESC, 1);
30}
31
32static int find_port_lc_cap(struct tb_port *port)
33{
34	struct tb_switch *sw = port->sw;
35	int start, phys, ret, size;
36	u32 desc;
37
38	ret = read_lc_desc(sw, &desc);
39	if (ret)
40		return ret;
41
42	/* Start of port LC registers */
43	start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
44	size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
45	phys = tb_phy_port_from_link(port->port);
46
47	return sw->cap_lc + start + phys * size;
48}
49
50/**
51 * tb_lc_reset_port() - Trigger downstream port reset through LC
52 * @port: Port that is reset
53 *
54 * Triggers downstream port reset through link controller registers.
55 * Returns %0 in case of success negative errno otherwise. Only supports
56 * non-USB4 routers with link controller (that's Thunderbolt 2 and
57 * Thunderbolt 3).
58 */
59int tb_lc_reset_port(struct tb_port *port)
60{
61	struct tb_switch *sw = port->sw;
62	int cap, ret;
63	u32 mode;
64
65	if (sw->generation < 2)
66		return -EINVAL;
67
68	cap = find_port_lc_cap(port);
69	if (cap < 0)
70		return cap;
71
72	ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
73	if (ret)
74		return ret;
75
76	mode |= TB_LC_PORT_MODE_DPR;
77
78	ret = tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
79	if (ret)
80		return ret;
81
82	fsleep(10000);
83
84	ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
85	if (ret)
86		return ret;
87
88	mode &= ~TB_LC_PORT_MODE_DPR;
89
90	return tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
91}
92
93static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
94{
95	bool upstream = tb_is_upstream_port(port);
96	struct tb_switch *sw = port->sw;
97	u32 ctrl, lane;
98	int cap, ret;
99
100	if (sw->generation < 2)
101		return 0;
102
103	cap = find_port_lc_cap(port);
104	if (cap < 0)
105		return cap;
106
107	ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
108	if (ret)
109		return ret;
110
111	/* Resolve correct lane */
112	if (port->port % 2)
113		lane = TB_LC_SX_CTRL_L1C;
114	else
115		lane = TB_LC_SX_CTRL_L2C;
116
117	if (configured) {
118		ctrl |= lane;
119		if (upstream)
120			ctrl |= TB_LC_SX_CTRL_UPSTREAM;
121	} else {
122		ctrl &= ~lane;
123		if (upstream)
124			ctrl &= ~TB_LC_SX_CTRL_UPSTREAM;
125	}
126
127	return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
128}
129
130/**
131 * tb_lc_configure_port() - Let LC know about configured port
132 * @port: Port that is set as configured
133 *
134 * Sets the port configured for power management purposes.
135 */
136int tb_lc_configure_port(struct tb_port *port)
137{
138	return tb_lc_set_port_configured(port, true);
139}
140
141/**
142 * tb_lc_unconfigure_port() - Let LC know about unconfigured port
143 * @port: Port that is set as configured
144 *
145 * Sets the port unconfigured for power management purposes.
146 */
147void tb_lc_unconfigure_port(struct tb_port *port)
148{
149	tb_lc_set_port_configured(port, false);
150}
151
152static int tb_lc_set_xdomain_configured(struct tb_port *port, bool configure)
153{
154	struct tb_switch *sw = port->sw;
155	u32 ctrl, lane;
156	int cap, ret;
157
158	if (sw->generation < 2)
159		return 0;
160
161	cap = find_port_lc_cap(port);
162	if (cap < 0)
163		return cap;
164
165	ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
166	if (ret)
167		return ret;
168
169	/* Resolve correct lane */
170	if (port->port % 2)
171		lane = TB_LC_SX_CTRL_L1D;
172	else
173		lane = TB_LC_SX_CTRL_L2D;
174
175	if (configure)
176		ctrl |= lane;
177	else
178		ctrl &= ~lane;
179
180	return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
181}
182
183/**
184 * tb_lc_configure_xdomain() - Inform LC that the link is XDomain
185 * @port: Switch downstream port connected to another host
186 *
187 * Sets the lane configured for XDomain accordingly so that the LC knows
188 * about this. Returns %0 in success and negative errno in failure.
189 */
190int tb_lc_configure_xdomain(struct tb_port *port)
191{
192	return tb_lc_set_xdomain_configured(port, true);
193}
194
195/**
196 * tb_lc_unconfigure_xdomain() - Unconfigure XDomain from port
197 * @port: Switch downstream port that was connected to another host
198 *
199 * Unsets the lane XDomain configuration.
200 */
201void tb_lc_unconfigure_xdomain(struct tb_port *port)
202{
203	tb_lc_set_xdomain_configured(port, false);
204}
205
206/**
207 * tb_lc_start_lane_initialization() - Start lane initialization
208 * @port: Device router lane 0 adapter
209 *
210 * Starts lane initialization for @port after the router resumed from
211 * sleep. Should be called for those downstream lane adapters that were
212 * not connected (tb_lc_configure_port() was not called) before sleep.
213 *
214 * Returns %0 in success and negative errno in case of failure.
215 */
216int tb_lc_start_lane_initialization(struct tb_port *port)
217{
218	struct tb_switch *sw = port->sw;
219	int ret, cap;
220	u32 ctrl;
221
222	if (!tb_route(sw))
223		return 0;
224
225	if (sw->generation < 2)
226		return 0;
227
228	cap = find_port_lc_cap(port);
229	if (cap < 0)
230		return cap;
231
232	ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
233	if (ret)
234		return ret;
235
236	ctrl |= TB_LC_SX_CTRL_SLI;
237
238	return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
239}
240
241/**
242 * tb_lc_is_clx_supported() - Check whether CLx is supported by the lane adapter
243 * @port: Lane adapter
244 *
245 * TB_LC_LINK_ATTR_CPS bit reflects if the link supports CLx including
246 * active cables (if connected on the link).
247 */
248bool tb_lc_is_clx_supported(struct tb_port *port)
249{
250	struct tb_switch *sw = port->sw;
251	int cap, ret;
252	u32 val;
253
254	cap = find_port_lc_cap(port);
255	if (cap < 0)
256		return false;
257
258	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_ATTR, 1);
259	if (ret)
260		return false;
261
262	return !!(val & TB_LC_LINK_ATTR_CPS);
263}
264
265/**
266 * tb_lc_is_usb_plugged() - Is there USB device connected to port
267 * @port: Device router lane 0 adapter
268 *
269 * Returns true if the @port has USB type-C device connected.
270 */
271bool tb_lc_is_usb_plugged(struct tb_port *port)
272{
273	struct tb_switch *sw = port->sw;
274	int cap, ret;
275	u32 val;
276
277	if (sw->generation != 3)
278		return false;
279
280	cap = find_port_lc_cap(port);
281	if (cap < 0)
282		return false;
283
284	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_CS_42, 1);
285	if (ret)
286		return false;
287
288	return !!(val & TB_LC_CS_42_USB_PLUGGED);
289}
290
291/**
292 * tb_lc_is_xhci_connected() - Is the internal xHCI connected
293 * @port: Device router lane 0 adapter
294 *
295 * Returns true if the internal xHCI has been connected to @port.
296 */
297bool tb_lc_is_xhci_connected(struct tb_port *port)
298{
299	struct tb_switch *sw = port->sw;
300	int cap, ret;
301	u32 val;
302
303	if (sw->generation != 3)
304		return false;
305
306	cap = find_port_lc_cap(port);
307	if (cap < 0)
308		return false;
309
310	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_REQ, 1);
311	if (ret)
312		return false;
313
314	return !!(val & TB_LC_LINK_REQ_XHCI_CONNECT);
315}
316
317static int __tb_lc_xhci_connect(struct tb_port *port, bool connect)
318{
319	struct tb_switch *sw = port->sw;
320	int cap, ret;
321	u32 val;
322
323	if (sw->generation != 3)
324		return -EINVAL;
325
326	cap = find_port_lc_cap(port);
327	if (cap < 0)
328		return cap;
329
330	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_REQ, 1);
331	if (ret)
332		return ret;
333
334	if (connect)
335		val |= TB_LC_LINK_REQ_XHCI_CONNECT;
336	else
337		val &= ~TB_LC_LINK_REQ_XHCI_CONNECT;
338
339	return tb_sw_write(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_REQ, 1);
340}
341
342/**
343 * tb_lc_xhci_connect() - Connect internal xHCI
344 * @port: Device router lane 0 adapter
345 *
346 * Tells LC to connect the internal xHCI to @port. Returns %0 on success
347 * and negative errno in case of failure. Can be called for Thunderbolt 3
348 * routers only.
349 */
350int tb_lc_xhci_connect(struct tb_port *port)
351{
352	int ret;
353
354	ret = __tb_lc_xhci_connect(port, true);
355	if (ret)
356		return ret;
357
358	tb_port_dbg(port, "xHCI connected\n");
359	return 0;
360}
361
362/**
363 * tb_lc_xhci_disconnect() - Disconnect internal xHCI
364 * @port: Device router lane 0 adapter
365 *
366 * Tells LC to disconnect the internal xHCI from @port. Can be called
367 * for Thunderbolt 3 routers only.
368 */
369void tb_lc_xhci_disconnect(struct tb_port *port)
370{
371	__tb_lc_xhci_connect(port, false);
372	tb_port_dbg(port, "xHCI disconnected\n");
373}
374
375static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
376			      unsigned int flags)
377{
378	u32 ctrl;
379	int ret;
380
381	/*
382	 * Enable wake on PCIe and USB4 (wake coming from another
383	 * router).
384	 */
385	ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
386			 offset + TB_LC_SX_CTRL, 1);
387	if (ret)
388		return ret;
389
390	ctrl &= ~(TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD | TB_LC_SX_CTRL_WODPC |
391		  TB_LC_SX_CTRL_WODPD | TB_LC_SX_CTRL_WOP | TB_LC_SX_CTRL_WOU4);
392
393	if (flags & TB_WAKE_ON_CONNECT)
394		ctrl |= TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD;
395	if (flags & TB_WAKE_ON_USB4)
396		ctrl |= TB_LC_SX_CTRL_WOU4;
397	if (flags & TB_WAKE_ON_PCIE)
398		ctrl |= TB_LC_SX_CTRL_WOP;
399	if (flags & TB_WAKE_ON_DP)
400		ctrl |= TB_LC_SX_CTRL_WODPC | TB_LC_SX_CTRL_WODPD;
401
402	return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, offset + TB_LC_SX_CTRL, 1);
403}
404
405/**
406 * tb_lc_set_wake() - Enable/disable wake
407 * @sw: Switch whose wakes to configure
408 * @flags: Wakeup flags (%0 to disable)
409 *
410 * For each LC sets wake bits accordingly.
411 */
412int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags)
413{
414	int start, size, nlc, ret, i;
415	u32 desc;
416
417	if (sw->generation < 2)
418		return 0;
419
420	if (!tb_route(sw))
421		return 0;
422
423	ret = read_lc_desc(sw, &desc);
424	if (ret)
425		return ret;
426
427	/* Figure out number of link controllers */
428	nlc = desc & TB_LC_DESC_NLC_MASK;
429	start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
430	size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
431
432	/* For each link controller set sleep bit */
433	for (i = 0; i < nlc; i++) {
434		unsigned int offset = sw->cap_lc + start + i * size;
435
436		ret = tb_lc_set_wake_one(sw, offset, flags);
437		if (ret)
438			return ret;
439	}
440
441	return 0;
442}
443
444/**
445 * tb_lc_set_sleep() - Inform LC that the switch is going to sleep
446 * @sw: Switch to set sleep
447 *
448 * Let the switch link controllers know that the switch is going to
449 * sleep.
450 */
451int tb_lc_set_sleep(struct tb_switch *sw)
452{
453	int start, size, nlc, ret, i;
454	u32 desc;
455
456	if (sw->generation < 2)
457		return 0;
458
459	ret = read_lc_desc(sw, &desc);
460	if (ret)
461		return ret;
462
463	/* Figure out number of link controllers */
464	nlc = desc & TB_LC_DESC_NLC_MASK;
465	start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
466	size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
467
468	/* For each link controller set sleep bit */
469	for (i = 0; i < nlc; i++) {
470		unsigned int offset = sw->cap_lc + start + i * size;
471		u32 ctrl;
472
473		ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
474				 offset + TB_LC_SX_CTRL, 1);
475		if (ret)
476			return ret;
477
478		ctrl |= TB_LC_SX_CTRL_SLP;
479		ret = tb_sw_write(sw, &ctrl, TB_CFG_SWITCH,
480				  offset + TB_LC_SX_CTRL, 1);
481		if (ret)
482			return ret;
483	}
484
485	return 0;
486}
487
488/**
489 * tb_lc_lane_bonding_possible() - Is lane bonding possible towards switch
490 * @sw: Switch to check
491 *
492 * Checks whether conditions for lane bonding from parent to @sw are
493 * possible.
494 */
495bool tb_lc_lane_bonding_possible(struct tb_switch *sw)
496{
497	struct tb_port *up;
498	int cap, ret;
499	u32 val;
500
501	if (sw->generation < 2)
502		return false;
503
504	up = tb_upstream_port(sw);
505	cap = find_port_lc_cap(up);
506	if (cap < 0)
507		return false;
508
509	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_PORT_ATTR, 1);
510	if (ret)
511		return false;
512
513	return !!(val & TB_LC_PORT_ATTR_BE);
514}
515
516static int tb_lc_dp_sink_from_port(const struct tb_switch *sw,
517				   struct tb_port *in)
518{
519	struct tb_port *port;
520
521	/* The first DP IN port is sink 0 and second is sink 1 */
522	tb_switch_for_each_port(sw, port) {
523		if (tb_port_is_dpin(port))
524			return in != port;
525	}
526
527	return -EINVAL;
528}
529
530static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink)
531{
532	u32 val, alloc;
533	int ret;
534
535	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
536			 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
537	if (ret)
538		return ret;
539
540	/*
541	 * Sink is available for CM/SW to use if the allocation valie is
542	 * either 0 or 1.
543	 */
544	if (!sink) {
545		alloc = val & TB_LC_SNK_ALLOCATION_SNK0_MASK;
546		if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK0_CM)
547			return 0;
548	} else {
549		alloc = (val & TB_LC_SNK_ALLOCATION_SNK1_MASK) >>
550			TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
551		if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK1_CM)
552			return 0;
553	}
554
555	return -EBUSY;
556}
557
558/**
559 * tb_lc_dp_sink_query() - Is DP sink available for DP IN port
560 * @sw: Switch whose DP sink is queried
561 * @in: DP IN port to check
562 *
563 * Queries through LC SNK_ALLOCATION registers whether DP sink is available
564 * for the given DP IN port or not.
565 */
566bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in)
567{
568	int sink;
569
570	/*
571	 * For older generations sink is always available as there is no
572	 * allocation mechanism.
573	 */
574	if (sw->generation < 3)
575		return true;
576
577	sink = tb_lc_dp_sink_from_port(sw, in);
578	if (sink < 0)
579		return false;
580
581	return !tb_lc_dp_sink_available(sw, sink);
582}
583
584/**
585 * tb_lc_dp_sink_alloc() - Allocate DP sink
586 * @sw: Switch whose DP sink is allocated
587 * @in: DP IN port the DP sink is allocated for
588 *
589 * Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the
590 * resource is available and allocation is successful returns %0. In all
591 * other cases returs negative errno. In particular %-EBUSY is returned if
592 * the resource was not available.
593 */
594int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in)
595{
596	int ret, sink;
597	u32 val;
598
599	if (sw->generation < 3)
600		return 0;
601
602	sink = tb_lc_dp_sink_from_port(sw, in);
603	if (sink < 0)
604		return sink;
605
606	ret = tb_lc_dp_sink_available(sw, sink);
607	if (ret)
608		return ret;
609
610	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
611			 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
612	if (ret)
613		return ret;
614
615	if (!sink) {
616		val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
617		val |= TB_LC_SNK_ALLOCATION_SNK0_CM;
618	} else {
619		val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
620		val |= TB_LC_SNK_ALLOCATION_SNK1_CM <<
621			TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
622	}
623
624	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
625			  sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
626
627	if (ret)
628		return ret;
629
630	tb_port_dbg(in, "sink %d allocated\n", sink);
631	return 0;
632}
633
634/**
635 * tb_lc_dp_sink_dealloc() - De-allocate DP sink
636 * @sw: Switch whose DP sink is de-allocated
637 * @in: DP IN port whose DP sink is de-allocated
638 *
639 * De-allocate DP sink from @in using LC SNK_ALLOCATION registers.
640 */
641int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
642{
643	int ret, sink;
644	u32 val;
645
646	if (sw->generation < 3)
647		return 0;
648
649	sink = tb_lc_dp_sink_from_port(sw, in);
650	if (sink < 0)
651		return sink;
652
653	/* Needs to be owned by CM/SW */
654	ret = tb_lc_dp_sink_available(sw, sink);
655	if (ret)
656		return ret;
657
658	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
659			 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
660	if (ret)
661		return ret;
662
663	if (!sink)
664		val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
665	else
666		val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
667
668	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
669			  sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
670	if (ret)
671		return ret;
672
673	tb_port_dbg(in, "sink %d de-allocated\n", sink);
674	return 0;
675}
676
677/**
678 * tb_lc_force_power() - Forces LC to be powered on
679 * @sw: Thunderbolt switch
680 *
681 * This is useful to let authentication cycle pass even without
682 * a Thunderbolt link present.
683 */
684int tb_lc_force_power(struct tb_switch *sw)
685{
686	u32 in = 0xffff;
687
688	return tb_sw_write(sw, &in, TB_CFG_SWITCH, TB_LC_POWER, 1);
689}
690