Lines Matching refs:tb
39 static inline struct tb *tcm_to_tb(struct tb_cm *tcm) in tcm_to_tb()
41 return ((void *)tcm - sizeof(struct tb)); in tcm_to_tb()
46 struct tb *tb; member
54 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) in tb_queue_hotplug() argument
62 ev->tb = tb; in tb_queue_hotplug()
67 queue_work(tb->wq, &ev->work); in tb_queue_hotplug()
74 struct tb_cm *tcm = tb_priv(sw->tb); in tb_add_dp_resources()
91 struct tb_cm *tcm = tb_priv(sw->tb); in tb_remove_dp_resources()
110 struct tb *tb = sw->tb; in tb_discover_tunnels() local
111 struct tb_cm *tcm = tb_priv(tb); in tb_discover_tunnels()
119 tunnel = tb_tunnel_discover_dp(tb, port); in tb_discover_tunnels()
123 tunnel = tb_tunnel_discover_pci(tb, port); in tb_discover_tunnels()
127 tunnel = tb_tunnel_discover_usb3(tb, port); in tb_discover_tunnels()
185 struct tb *tb = sw->tb; in tb_scan_xdomain() local
193 xd = tb_xdomain_find_by_route(tb, route); in tb_scan_xdomain()
199 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, in tb_scan_xdomain()
262 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, in tb_find_tunnel() argument
266 struct tb_cm *tcm = tb_priv(tb); in tb_find_tunnel()
280 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb, in tb_find_first_usb3_tunnel() argument
294 if (sw == tb->root_switch) in tb_find_first_usb3_tunnel()
298 port = tb_port_at(tb_route(sw), tb->root_switch); in tb_find_first_usb3_tunnel()
300 usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port); in tb_find_first_usb3_tunnel()
304 return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL); in tb_find_first_usb3_tunnel()
307 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port, in tb_available_bandwidth() argument
311 struct tb_cm *tcm = tb_priv(tb); in tb_available_bandwidth()
317 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); in tb_available_bandwidth()
400 static int tb_release_unused_usb3_bandwidth(struct tb *tb, in tb_release_unused_usb3_bandwidth() argument
406 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); in tb_release_unused_usb3_bandwidth()
410 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port, in tb_reclaim_usb3_bandwidth() argument
416 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); in tb_reclaim_usb3_bandwidth()
420 tb_dbg(tb, "reclaiming unused bandwidth for USB3\n"); in tb_reclaim_usb3_bandwidth()
426 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port, in tb_reclaim_usb3_bandwidth()
429 tb_warn(tb, "failed to calculate available bandwidth\n"); in tb_reclaim_usb3_bandwidth()
433 tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n", in tb_reclaim_usb3_bandwidth()
439 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) in tb_tunnel_usb3() argument
444 struct tb_cm *tcm = tb_priv(tb); in tb_tunnel_usb3()
448 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n"); in tb_tunnel_usb3()
480 ret = tb_release_unused_usb3_bandwidth(tb, down, up); in tb_tunnel_usb3()
485 ret = tb_available_bandwidth(tb, down, up, &available_up, in tb_tunnel_usb3()
493 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up, in tb_tunnel_usb3()
509 tb_reclaim_usb3_bandwidth(tb, down, up); in tb_tunnel_usb3()
517 tb_reclaim_usb3_bandwidth(tb, down, up); in tb_tunnel_usb3()
531 ret = tb_tunnel_usb3(sw->tb, sw); in tb_create_usb3_tunnels()
570 struct tb_cm *tcm = tb_priv(port->sw->tb); in tb_scan_port()
580 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port, in tb_scan_port()
601 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, in tb_scan_port()
674 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw)) in tb_scan_port()
684 struct tb *tb; in tb_deactivate_and_free_tunnel() local
692 tb = tunnel->tb; in tb_deactivate_and_free_tunnel()
711 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port); in tb_deactivate_and_free_tunnel()
728 static void tb_free_invalid_tunnels(struct tb *tb) in tb_free_invalid_tunnels() argument
730 struct tb_cm *tcm = tb_priv(tb); in tb_free_invalid_tunnels()
815 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in) in tb_find_dp_out() argument
818 struct tb_cm *tcm = tb_priv(tb); in tb_find_dp_out()
821 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL; in tb_find_dp_out()
841 p = tb_port_at(tb_route(port->sw), tb->root_switch); in tb_find_dp_out()
852 static void tb_tunnel_dp(struct tb *tb) in tb_tunnel_dp() argument
855 struct tb_cm *tcm = tb_priv(tb); in tb_tunnel_dp()
860 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n"); in tb_tunnel_dp()
868 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n"); in tb_tunnel_dp()
883 out = tb_find_dp_out(tb, port); in tb_tunnel_dp()
891 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n"); in tb_tunnel_dp()
895 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n"); in tb_tunnel_dp()
916 ret = tb_release_unused_usb3_bandwidth(tb, in, out); in tb_tunnel_dp()
918 tb_warn(tb, "failed to release unused bandwidth\n"); in tb_tunnel_dp()
922 ret = tb_available_bandwidth(tb, in, out, &available_up, in tb_tunnel_dp()
927 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n", in tb_tunnel_dp()
930 tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down); in tb_tunnel_dp()
942 tb_reclaim_usb3_bandwidth(tb, in, out); in tb_tunnel_dp()
948 tb_reclaim_usb3_bandwidth(tb, in, out); in tb_tunnel_dp()
958 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) in tb_dp_resource_unavailable() argument
973 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out); in tb_dp_resource_unavailable()
981 tb_tunnel_dp(tb); in tb_dp_resource_unavailable()
984 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) in tb_dp_resource_available() argument
986 struct tb_cm *tcm = tb_priv(tb); in tb_dp_resource_available()
1002 tb_tunnel_dp(tb); in tb_dp_resource_available()
1005 static void tb_disconnect_and_release_dp(struct tb *tb) in tb_disconnect_and_release_dp() argument
1007 struct tb_cm *tcm = tb_priv(tb); in tb_disconnect_and_release_dp()
1028 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw) in tb_disconnect_pci() argument
1037 tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up); in tb_disconnect_pci()
1047 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) in tb_tunnel_pci() argument
1050 struct tb_cm *tcm = tb_priv(tb); in tb_tunnel_pci()
1068 tunnel = tb_tunnel_alloc_pci(tb, up, down); in tb_tunnel_pci()
1083 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, in tb_approve_xdomain_paths() argument
1087 struct tb_cm *tcm = tb_priv(tb); in tb_approve_xdomain_paths()
1094 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); in tb_approve_xdomain_paths()
1096 mutex_lock(&tb->lock); in tb_approve_xdomain_paths()
1097 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path, in tb_approve_xdomain_paths()
1100 mutex_unlock(&tb->lock); in tb_approve_xdomain_paths()
1108 mutex_unlock(&tb->lock); in tb_approve_xdomain_paths()
1113 mutex_unlock(&tb->lock); in tb_approve_xdomain_paths()
1117 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, in __tb_disconnect_xdomain_paths() argument
1121 struct tb_cm *tcm = tb_priv(tb); in __tb_disconnect_xdomain_paths()
1128 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); in __tb_disconnect_xdomain_paths()
1142 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, in tb_disconnect_xdomain_paths() argument
1147 mutex_lock(&tb->lock); in tb_disconnect_xdomain_paths()
1148 __tb_disconnect_xdomain_paths(tb, xd, transmit_path, in tb_disconnect_xdomain_paths()
1151 mutex_unlock(&tb->lock); in tb_disconnect_xdomain_paths()
1166 struct tb *tb = ev->tb; in tb_handle_hotplug() local
1167 struct tb_cm *tcm = tb_priv(tb); in tb_handle_hotplug()
1172 pm_runtime_get_sync(&tb->dev); in tb_handle_hotplug()
1174 mutex_lock(&tb->lock); in tb_handle_hotplug()
1178 sw = tb_switch_find_by_route(tb, ev->route); in tb_handle_hotplug()
1180 tb_warn(tb, in tb_handle_hotplug()
1186 tb_warn(tb, in tb_handle_hotplug()
1193 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n", in tb_handle_hotplug()
1206 tb_free_invalid_tunnels(tb); in tb_handle_hotplug()
1216 tb_tunnel_dp(tb); in tb_handle_hotplug()
1231 __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1); in tb_handle_hotplug()
1235 tb_dp_resource_unavailable(tb, port); in tb_handle_hotplug()
1249 tb_dp_resource_available(tb, port); in tb_handle_hotplug()
1259 mutex_unlock(&tb->lock); in tb_handle_hotplug()
1261 pm_runtime_mark_last_busy(&tb->dev); in tb_handle_hotplug()
1262 pm_runtime_put_autosuspend(&tb->dev); in tb_handle_hotplug()
1272 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, in tb_handle_event() argument
1279 tb_warn(tb, "unexpected event %#x, ignoring\n", type); in tb_handle_event()
1285 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) { in tb_handle_event()
1286 tb_warn(tb, "could not ack plug event on %llx:%x\n", route, in tb_handle_event()
1290 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug); in tb_handle_event()
1293 static void tb_stop(struct tb *tb) in tb_stop() argument
1295 struct tb_cm *tcm = tb_priv(tb); in tb_stop()
1311 tb_switch_remove(tb->root_switch); in tb_stop()
1336 static int tb_start(struct tb *tb) in tb_start() argument
1338 struct tb_cm *tcm = tb_priv(tb); in tb_start()
1341 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); in tb_start()
1342 if (IS_ERR(tb->root_switch)) in tb_start()
1343 return PTR_ERR(tb->root_switch); in tb_start()
1350 tb->root_switch->no_nvm_upgrade = true; in tb_start()
1352 tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch); in tb_start()
1354 ret = tb_switch_configure(tb->root_switch); in tb_start()
1356 tb_switch_put(tb->root_switch); in tb_start()
1361 ret = tb_switch_add(tb->root_switch); in tb_start()
1363 tb_switch_put(tb->root_switch); in tb_start()
1368 tb_switch_tmu_enable(tb->root_switch); in tb_start()
1370 tb_scan_switch(tb->root_switch); in tb_start()
1372 tb_discover_tunnels(tb->root_switch); in tb_start()
1377 tb_create_usb3_tunnels(tb->root_switch); in tb_start()
1379 tb_add_dp_resources(tb->root_switch); in tb_start()
1381 device_for_each_child(&tb->root_switch->dev, NULL, in tb_start()
1389 static int tb_suspend_noirq(struct tb *tb) in tb_suspend_noirq() argument
1391 struct tb_cm *tcm = tb_priv(tb); in tb_suspend_noirq()
1393 tb_dbg(tb, "suspending...\n"); in tb_suspend_noirq()
1394 tb_disconnect_and_release_dp(tb); in tb_suspend_noirq()
1395 tb_switch_suspend(tb->root_switch, false); in tb_suspend_noirq()
1397 tb_dbg(tb, "suspend finished\n"); in tb_suspend_noirq()
1428 static int tb_resume_noirq(struct tb *tb) in tb_resume_noirq() argument
1430 struct tb_cm *tcm = tb_priv(tb); in tb_resume_noirq()
1433 tb_dbg(tb, "resuming...\n"); in tb_resume_noirq()
1436 tb_switch_reset(tb->root_switch); in tb_resume_noirq()
1438 tb_switch_resume(tb->root_switch); in tb_resume_noirq()
1439 tb_free_invalid_tunnels(tb); in tb_resume_noirq()
1440 tb_free_unplugged_children(tb->root_switch); in tb_resume_noirq()
1441 tb_restore_children(tb->root_switch); in tb_resume_noirq()
1449 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n"); in tb_resume_noirq()
1454 tb_dbg(tb, "resume finished\n"); in tb_resume_noirq()
1481 static int tb_freeze_noirq(struct tb *tb) in tb_freeze_noirq() argument
1483 struct tb_cm *tcm = tb_priv(tb); in tb_freeze_noirq()
1489 static int tb_thaw_noirq(struct tb *tb) in tb_thaw_noirq() argument
1491 struct tb_cm *tcm = tb_priv(tb); in tb_thaw_noirq()
1497 static void tb_complete(struct tb *tb) in tb_complete() argument
1504 mutex_lock(&tb->lock); in tb_complete()
1505 if (tb_free_unplugged_xdomains(tb->root_switch)) in tb_complete()
1506 tb_scan_switch(tb->root_switch); in tb_complete()
1507 mutex_unlock(&tb->lock); in tb_complete()
1510 static int tb_runtime_suspend(struct tb *tb) in tb_runtime_suspend() argument
1512 struct tb_cm *tcm = tb_priv(tb); in tb_runtime_suspend()
1514 mutex_lock(&tb->lock); in tb_runtime_suspend()
1515 tb_switch_suspend(tb->root_switch, true); in tb_runtime_suspend()
1517 mutex_unlock(&tb->lock); in tb_runtime_suspend()
1525 struct tb *tb = tcm_to_tb(tcm); in tb_remove_work() local
1527 mutex_lock(&tb->lock); in tb_remove_work()
1528 if (tb->root_switch) { in tb_remove_work()
1529 tb_free_unplugged_children(tb->root_switch); in tb_remove_work()
1530 tb_free_unplugged_xdomains(tb->root_switch); in tb_remove_work()
1532 mutex_unlock(&tb->lock); in tb_remove_work()
1535 static int tb_runtime_resume(struct tb *tb) in tb_runtime_resume() argument
1537 struct tb_cm *tcm = tb_priv(tb); in tb_runtime_resume()
1540 mutex_lock(&tb->lock); in tb_runtime_resume()
1541 tb_switch_resume(tb->root_switch); in tb_runtime_resume()
1542 tb_free_invalid_tunnels(tb); in tb_runtime_resume()
1543 tb_restore_children(tb->root_switch); in tb_runtime_resume()
1547 mutex_unlock(&tb->lock); in tb_runtime_resume()
1554 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50)); in tb_runtime_resume()
1638 struct tb *tb_probe(struct tb_nhi *nhi) in tb_probe()
1641 struct tb *tb; in tb_probe() local
1643 tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm)); in tb_probe()
1644 if (!tb) in tb_probe()
1648 tb->security_level = TB_SECURITY_USER; in tb_probe()
1650 tb->security_level = TB_SECURITY_NOPCIE; in tb_probe()
1652 tb->cm_ops = &tb_cm_ops; in tb_probe()
1654 tcm = tb_priv(tb); in tb_probe()
1659 tb_dbg(tb, "using software connection manager\n"); in tb_probe()
1664 return tb; in tb_probe()