Lines Matching refs:tb

15 #include "tb.h"
66 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
68 return ((void *)tcm - sizeof(struct tb));
73 struct tb *tb;
81 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
89 ev->tb = tb;
94 queue_work(tb->wq, &ev->work);
101 struct tb_cm *tcm = tb_priv(sw->tb);
128 struct tb_cm *tcm = tb_priv(sw->tb);
145 static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
147 struct tb_cm *tcm = tb_priv(tb);
160 static void tb_discover_dp_resources(struct tb *tb)
162 struct tb_cm *tcm = tb_priv(tb);
167 tb_discover_dp_resource(tb, tunnel->dst_port);
174 struct tb_cm *tcm = tb_priv(sw->tb);
287 sw = tunnel->tb->root_switch;
334 struct tb *tb = sw->tb;
342 tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
347 tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
351 tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
388 struct tb *tb = sw->tb;
396 xd = tb_xdomain_find_by_route(tb, route);
402 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
446 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
450 struct tb_cm *tcm = tb_priv(tb);
464 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
478 if (sw == tb->root_switch)
482 port = tb_port_at(tb_route(sw), tb->root_switch);
484 usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
488 return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
493 * @tb: Domain structure
504 static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
516 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
541 * @tb: Domain structure
556 static int tb_consumed_dp_bandwidth(struct tb *tb,
564 struct tb_cm *tcm = tb_priv(tb);
642 * @tb: Domain structure
656 static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port,
745 * @tb: Domain structure
762 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
779 ret = tb_maximum_bandwidth(tb, src_port, dst_port, port,
784 ret = tb_consumed_usb3_pcie_bandwidth(tb, src_port, dst_port,
792 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port,
813 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
819 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
823 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
829 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
839 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
852 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
857 struct tb_cm *tcm = tb_priv(tb);
861 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
893 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
898 ret = tb_available_bandwidth(tb, down, up, &available_up, &available_down,
906 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
922 tb_reclaim_usb3_bandwidth(tb, down, up);
930 tb_reclaim_usb3_bandwidth(tb, down, up);
944 ret = tb_tunnel_usb3(sw->tb, sw);
962 * @tb: Domain structure
975 static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
999 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1072 * @tb: Domain structure
1081 static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
1109 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1169 struct tb *tb = sw->tb;
1196 host_port = tb_port_at(tb_route(sw), tb->root_switch);
1197 tb_configure_sym(tb, host_port, up, false);
1227 struct tb_cm *tcm = tb_priv(port->sw->tb);
1238 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
1263 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
1342 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
1359 struct tb *tb = group->tb;
1363 tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
1375 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1388 ret = tb_release_unused_usb3_bandwidth(tb,
1398 ret = tb_available_bandwidth(tb, in, out, &estimated_up,
1433 tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
1436 tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
1439 static void tb_recalc_estimated_bandwidth(struct tb *tb)
1441 struct tb_cm *tcm = tb_priv(tb);
1444 tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
1453 tb_dbg(tb, "bandwidth re-calculation done\n");
1459 tb_dbg(group->tb, "group %d released total %d Mb/s\n", group->index,
1486 tunnel = tb_find_tunnel(group->tb, TB_TUNNEL_DP, in, NULL);
1488 tb_configure_sym(group->tb, in, tunnel->dst_port, true);
1495 struct tb *tb = group->tb;
1497 mutex_lock(&tb->lock);
1499 tb_recalc_estimated_bandwidth(tb);
1501 mutex_unlock(&tb->lock);
1511 group->tb = tcm_to_tb(tcm);
1618 static void tb_discover_tunnels(struct tb *tb)
1620 struct tb_cm *tcm = tb_priv(tb);
1623 tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
1649 struct tb *tb;
1657 tb = tunnel->tb;
1673 tb_configure_sym(tb, src_port, dst_port, true);
1682 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
1699 static void tb_free_invalid_tunnels(struct tb *tb)
1701 struct tb_cm *tcm = tb_priv(tb);
1787 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1790 struct tb_cm *tcm = tb_priv(tb);
1793 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1819 p = tb_port_at(tb_route(port->sw), tb->root_switch);
1830 static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
1834 struct tb_cm *tcm = tb_priv(tb);
1872 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1874 tb_warn(tb, "failed to release unused bandwidth\n");
1878 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
1883 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
1886 tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
1905 tb_reclaim_usb3_bandwidth(tb, in, out);
1910 tb_configure_asym(tb, in, out, consumed_up, consumed_down);
1913 tb_recalc_estimated_bandwidth(tb);
1927 tb_reclaim_usb3_bandwidth(tb, in, out);
1941 static void tb_tunnel_dp(struct tb *tb)
1943 struct tb_cm *tcm = tb_priv(tb);
1947 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1955 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
1971 out = tb_find_dp_out(tb, port);
1973 tb_tunnel_one_dp(tb, in, out);
1979 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
2025 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
2040 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
2051 tb_recalc_estimated_bandwidth(tb);
2052 tb_tunnel_dp(tb);
2055 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
2057 struct tb_cm *tcm = tb_priv(tb);
2074 tb_tunnel_dp(tb);
2077 static void tb_disconnect_and_release_dp(struct tb *tb)
2079 struct tb_cm *tcm = tb_priv(tb);
2100 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
2109 tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
2121 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
2124 struct tb_cm *tcm = tb_priv(tb);
2140 tunnel = tb_tunnel_alloc_pci(tb, up, down);
2165 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2169 struct tb_cm *tcm = tb_priv(tb);
2177 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2179 mutex_lock(&tb->lock);
2187 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
2202 mutex_unlock(&tb->lock);
2209 mutex_unlock(&tb->lock);
2214 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2218 struct tb_cm *tcm = tb_priv(tb);
2225 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2246 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2251 mutex_lock(&tb->lock);
2252 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
2255 mutex_unlock(&tb->lock);
2265 * Executes on tb->wq.
2270 struct tb *tb = ev->tb;
2271 struct tb_cm *tcm = tb_priv(tb);
2276 pm_runtime_get_sync(&tb->dev);
2278 mutex_lock(&tb->lock);
2282 sw = tb_switch_find_by_route(tb, ev->route);
2284 tb_warn(tb,
2290 tb_warn(tb,
2297 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
2310 tb_free_invalid_tunnels(tb);
2321 tb_recalc_estimated_bandwidth(tb);
2322 tb_tunnel_dp(tb);
2337 __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
2341 tb_dp_resource_unavailable(tb, port);
2361 tb_dp_resource_available(tb, port);
2371 mutex_unlock(&tb->lock);
2373 pm_runtime_mark_last_busy(&tb->dev);
2374 pm_runtime_put_autosuspend(&tb->dev);
2386 struct tb *tb = tunnel->tb;
2474 tb_dbg(tb, "group %d reserved %d total %d Mb/s\n",
2498 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
2507 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
2525 ret = tb_configure_asym(tb, in, out, *requested_up,
2528 tb_configure_sym(tb, in, out, true);
2536 tb_configure_sym(tb, in, out, true);
2548 tb_dbg(tb, "group %d released %d total %d Mb/s\n",
2556 tb_reclaim_usb3_bandwidth(tb, in, out);
2577 struct tb *tb = ev->tb;
2578 struct tb_cm *tcm = tb_priv(tb);
2582 pm_runtime_get_sync(&tb->dev);
2584 mutex_lock(&tb->lock);
2588 sw = tb_switch_find_by_route(tb, ev->route);
2590 tb_warn(tb, "bandwidth request from non-existent router %llx\n",
2603 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
2634 tb_recalc_estimated_bandwidth(tb);
2666 tb_recalc_estimated_bandwidth(tb);
2672 mutex_unlock(&tb->lock);
2674 pm_runtime_mark_last_busy(&tb->dev);
2675 pm_runtime_put_autosuspend(&tb->dev);
2680 static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
2688 ev->tb = tb;
2692 queue_work(tb->wq, &ev->work);
2695 static void tb_handle_notification(struct tb *tb, u64 route,
2703 if (tb_cfg_ack_notification(tb->ctl, route, error))
2704 tb_warn(tb, "could not ack notification on %llx\n",
2709 if (tb_cfg_ack_notification(tb->ctl, route, error))
2710 tb_warn(tb, "could not ack notification on %llx\n",
2712 tb_queue_dp_bandwidth_request(tb, route, error->port);
2726 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
2734 tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
2739 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
2743 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
2744 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
2748 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
2751 static void tb_stop(struct tb *tb)
2753 struct tb_cm *tcm = tb_priv(tb);
2769 tb_switch_remove(tb->root_switch);
2773 static void tb_deinit(struct tb *tb)
2775 struct tb_cm *tcm = tb_priv(tb);
2804 static int tb_start(struct tb *tb, bool reset)
2806 struct tb_cm *tcm = tb_priv(tb);
2810 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
2811 if (IS_ERR(tb->root_switch))
2812 return PTR_ERR(tb->root_switch);
2822 tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
2824 tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
2826 ret = tb_switch_configure(tb->root_switch);
2828 tb_switch_put(tb->root_switch);
2833 ret = tb_switch_add(tb->root_switch);
2835 tb_switch_put(tb->root_switch);
2843 tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
2845 tb_switch_tmu_enable(tb->root_switch);
2853 if (reset && tb_switch_is_usb4(tb->root_switch)) {
2855 if (usb4_switch_version(tb->root_switch) == 1)
2856 tb_switch_reset(tb->root_switch);
2861 tb_scan_switch(tb->root_switch);
2863 tb_discover_tunnels(tb);
2865 tb_discover_dp_resources(tb);
2872 tb_create_usb3_tunnels(tb->root_switch);
2874 tb_add_dp_resources(tb->root_switch);
2876 device_for_each_child(&tb->root_switch->dev, NULL,
2884 static int tb_suspend_noirq(struct tb *tb)
2886 struct tb_cm *tcm = tb_priv(tb);
2888 tb_dbg(tb, "suspending...\n");
2889 tb_disconnect_and_release_dp(tb);
2890 tb_switch_suspend(tb->root_switch, false);
2892 tb_dbg(tb, "suspend finished\n");
2929 static int tb_resume_noirq(struct tb *tb)
2931 struct tb_cm *tcm = tb_priv(tb);
2936 tb_dbg(tb, "resuming...\n");
2942 if (!tb_switch_is_usb4(tb->root_switch))
2943 tb_switch_reset(tb->root_switch);
2945 tb_switch_resume(tb->root_switch, false);
2946 tb_free_invalid_tunnels(tb);
2947 tb_free_unplugged_children(tb->root_switch);
2948 tb_restore_children(tb->root_switch);
2956 tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
2979 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
2984 tb_dbg(tb, "resume finished\n");
3011 static int tb_freeze_noirq(struct tb *tb)
3013 struct tb_cm *tcm = tb_priv(tb);
3019 static int tb_thaw_noirq(struct tb *tb)
3021 struct tb_cm *tcm = tb_priv(tb);
3027 static void tb_complete(struct tb *tb)
3034 mutex_lock(&tb->lock);
3035 if (tb_free_unplugged_xdomains(tb->root_switch))
3036 tb_scan_switch(tb->root_switch);
3037 mutex_unlock(&tb->lock);
3040 static int tb_runtime_suspend(struct tb *tb)
3042 struct tb_cm *tcm = tb_priv(tb);
3044 mutex_lock(&tb->lock);
3045 tb_switch_suspend(tb->root_switch, true);
3047 mutex_unlock(&tb->lock);
3055 struct tb *tb = tcm_to_tb(tcm);
3057 mutex_lock(&tb->lock);
3058 if (tb->root_switch) {
3059 tb_free_unplugged_children(tb->root_switch);
3060 tb_free_unplugged_xdomains(tb->root_switch);
3062 mutex_unlock(&tb->lock);
3065 static int tb_runtime_resume(struct tb *tb)
3067 struct tb_cm *tcm = tb_priv(tb);
3070 mutex_lock(&tb->lock);
3071 tb_switch_resume(tb->root_switch, true);
3072 tb_free_invalid_tunnels(tb);
3073 tb_restore_children(tb->root_switch);
3077 mutex_unlock(&tb->lock);
3084 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
3174 struct tb *tb_probe(struct tb_nhi *nhi)
3177 struct tb *tb;
3179 tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
3180 if (!tb)
3184 tb->security_level = TB_SECURITY_USER;
3186 tb->security_level = TB_SECURITY_NOPCIE;
3188 tb->cm_ops = &tb_cm_ops;
3190 tcm = tb_priv(tb);
3196 tb_dbg(tb, "using software connection manager\n");
3204 tb_warn(tb, "device links to tunneled native ports are missing!\n");
3206 return tb;