Lines Matching defs:in

189 	 * in upstream router is still possible.
312 * clean them up by calling tb_tunnel_deactivate() below in that
414 * @reserved_up: Upstream bandwidth in Mb/s to reserve
415 * @reserved_down: Downstream bandwidth in Mb/s to reserve
418 * bandwidth needs to be left in reserve for possible PCIe bulk traffic.
462 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
470 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
657 struct tb_port *in = tunnel->src_port;
664 if (in->sw->generation < 2 || out->sw->generation < 2)
671 ret = tb_dp_cm_handshake(in, out, 3000);
676 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
677 in->cap_adap + DP_LOCAL_CAP, 1);
748 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
749 in->cap_adap + DP_REMOTE_CAP, 1);
756 struct tb_port *in = tunnel->src_port;
764 ret = usb4_dp_port_set_cm_bandwidth_mode_supported(in, true);
768 ret = usb4_dp_port_set_group_id(in, in->group->index);
776 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
777 in->cap_adap + DP_LOCAL_CAP, 1);
798 ret = usb4_dp_port_set_nrd(in, rate, lanes);
825 ret = usb4_dp_port_set_granularity(in, granularity);
830 * Bandwidth estimation is pretty much what we have in
841 ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
846 ret = usb4_dp_port_allocate_bandwidth(in, 0);
856 struct tb_port *in = tunnel->src_port;
857 struct tb_switch *sw = in->sw;
858 struct tb *tb = in->sw->tb;
868 if (!usb4_dp_port_bandwidth_mode_supported(in))
873 ret = usb4_dp_port_set_cm_id(in, tb->index);
882 struct tb_port *in = tunnel->src_port;
884 if (!usb4_dp_port_bandwidth_mode_supported(in))
886 if (usb4_dp_port_bandwidth_mode_enabled(in)) {
887 usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
932 * @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity
934 * Returns maximum possible bandwidth for this tunnel in Mb/s.
939 struct tb_port *in = tunnel->src_port;
950 ret = tb_port_read(in, &cap, TB_CFG_PORT,
951 in->cap_adap + DP_LOCAL_CAP, 1);
961 ret = usb4_dp_port_granularity(in);
974 struct tb_port *in = tunnel->src_port;
977 if (!usb4_dp_port_bandwidth_mode_enabled(in))
984 ret = usb4_dp_port_allocated_bandwidth(in);
1009 struct tb_port *in = tunnel->src_port;
1015 if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
1018 ret = usb4_dp_port_allocated_bandwidth(in);
1047 struct tb_port *in = tunnel->src_port;
1050 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1059 ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1066 ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1082 struct tb_port *in = tunnel->src_port;
1092 ret = tb_port_read(in, &val, TB_CFG_PORT,
1093 in->cap_adap + DP_COMMON_CAP, 1);
1112 struct tb_port *in = tunnel->src_port;
1131 ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1);
1188 * negotiation shall compete in 5 seconds after tunnel
1189 * established. We give it 100ms extra just in case.
1303 struct tb_port *in, *out;
1306 in = tunnel->src_port;
1309 if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1310 in->cap_adap + DP_LOCAL_CAP, 1))
1331 if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1332 in->cap_adap + DP_REMOTE_CAP, 1))
1345 * @in: DP in adapter
1348 * If @in adapter is active, follows the tunnel to the DP out adapter
1354 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
1361 if (!tb_dp_port_is_enabled(in))
1375 tunnel->src_port = in;
1377 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
1381 tb_dp_port_enable(in, false);
1388 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
1395 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
1404 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
1435 * @in: DP in adapter port
1443 * Allocates a tunnel between @in and @out that is capable of tunneling
1448 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
1457 if (WARN_ON(!in->cap_adap || !out->cap_adap))
1471 tunnel->src_port = in;
1477 pm_support = usb4_switch_version(in->sw) >= 2;
1479 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
1486 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
1493 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
1974 * clean them up by calling tb_tunnel_deactivate() below in that
2178 * Return: 0 on success and negative errno in case if failure
2304 * @max_up: Maximum upstream bandwidth in Mb/s
2305 * @max_down: Maximum downstream bandwidth in Mb/s
2325 * @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here
2326 * @allocated_down: Currently allocated downstream bandwidth in Mb/s is
2347 * @alloc_up: New upstream bandwidth in Mb/s
2348 * @alloc_down: New downstream bandwidth in Mb/s
2353 * in case of failure.
2370 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
2372 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
2375 * Stores the amount of isochronous bandwidth @tunnel consumes in
2414 * Returns %0 in case of success and negative errno otherwise.
2435 * @available_up: Available upstream bandwidth (in Mb/s)
2436 * @available_down: Available downstream bandwidth (in Mb/s)