Lines Matching refs:tb

36 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)  in tcm_to_tb()
38 return ((void *)tcm - sizeof(struct tb)); in tcm_to_tb()
43 struct tb *tb; member
51 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) in tb_queue_hotplug() argument
59 ev->tb = tb; in tb_queue_hotplug()
64 queue_work(tb->wq, &ev->work); in tb_queue_hotplug()
71 struct tb_cm *tcm = tb_priv(sw->tb); in tb_add_dp_resources()
88 struct tb_cm *tcm = tb_priv(sw->tb); in tb_remove_dp_resources()
107 struct tb *tb = sw->tb; in tb_discover_tunnels() local
108 struct tb_cm *tcm = tb_priv(tb); in tb_discover_tunnels()
116 tunnel = tb_tunnel_discover_dp(tb, port); in tb_discover_tunnels()
120 tunnel = tb_tunnel_discover_pci(tb, port); in tb_discover_tunnels()
124 tunnel = tb_tunnel_discover_usb3(tb, port); in tb_discover_tunnels()
182 struct tb *tb = sw->tb; in tb_scan_xdomain() local
187 xd = tb_xdomain_find_by_route(tb, route); in tb_scan_xdomain()
193 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, in tb_scan_xdomain()
256 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, in tb_find_tunnel() argument
260 struct tb_cm *tcm = tb_priv(tb); in tb_find_tunnel()
274 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb, in tb_find_first_usb3_tunnel() argument
288 if (sw == tb->root_switch) in tb_find_first_usb3_tunnel()
292 port = tb_port_at(tb_route(sw), tb->root_switch); in tb_find_first_usb3_tunnel()
294 usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port); in tb_find_first_usb3_tunnel()
298 return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL); in tb_find_first_usb3_tunnel()
301 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port, in tb_available_bandwidth() argument
305 struct tb_cm *tcm = tb_priv(tb); in tb_available_bandwidth()
311 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); in tb_available_bandwidth()
394 static int tb_release_unused_usb3_bandwidth(struct tb *tb, in tb_release_unused_usb3_bandwidth() argument
400 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); in tb_release_unused_usb3_bandwidth()
404 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port, in tb_reclaim_usb3_bandwidth() argument
410 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); in tb_reclaim_usb3_bandwidth()
414 tb_dbg(tb, "reclaiming unused bandwidth for USB3\n"); in tb_reclaim_usb3_bandwidth()
420 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port, in tb_reclaim_usb3_bandwidth()
423 tb_warn(tb, "failed to calculate available bandwidth\n"); in tb_reclaim_usb3_bandwidth()
427 tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n", in tb_reclaim_usb3_bandwidth()
433 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) in tb_tunnel_usb3() argument
438 struct tb_cm *tcm = tb_priv(tb); in tb_tunnel_usb3()
469 ret = tb_release_unused_usb3_bandwidth(tb, down, up); in tb_tunnel_usb3()
474 ret = tb_available_bandwidth(tb, down, up, &available_up, in tb_tunnel_usb3()
482 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up, in tb_tunnel_usb3()
498 tb_reclaim_usb3_bandwidth(tb, down, up); in tb_tunnel_usb3()
506 tb_reclaim_usb3_bandwidth(tb, down, up); in tb_tunnel_usb3()
517 ret = tb_tunnel_usb3(sw->tb, sw); in tb_create_usb3_tunnels()
556 struct tb_cm *tcm = tb_priv(port->sw->tb); in tb_scan_port()
566 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port, in tb_scan_port()
587 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, in tb_scan_port()
660 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw)) in tb_scan_port()
670 struct tb *tb; in tb_deactivate_and_free_tunnel() local
678 tb = tunnel->tb; in tb_deactivate_and_free_tunnel()
697 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port); in tb_deactivate_and_free_tunnel()
714 static void tb_free_invalid_tunnels(struct tb *tb) in tb_free_invalid_tunnels() argument
716 struct tb_cm *tcm = tb_priv(tb); in tb_free_invalid_tunnels()
801 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in) in tb_find_dp_out() argument
804 struct tb_cm *tcm = tb_priv(tb); in tb_find_dp_out()
807 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL; in tb_find_dp_out()
827 p = tb_port_at(tb_route(port->sw), tb->root_switch); in tb_find_dp_out()
838 static void tb_tunnel_dp(struct tb *tb) in tb_tunnel_dp() argument
841 struct tb_cm *tcm = tb_priv(tb); in tb_tunnel_dp()
849 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n"); in tb_tunnel_dp()
864 out = tb_find_dp_out(tb, port); in tb_tunnel_dp()
872 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n"); in tb_tunnel_dp()
876 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n"); in tb_tunnel_dp()
897 ret = tb_release_unused_usb3_bandwidth(tb, in, out); in tb_tunnel_dp()
899 tb_warn(tb, "failed to release unused bandwidth\n"); in tb_tunnel_dp()
903 ret = tb_available_bandwidth(tb, in, out, &available_up, in tb_tunnel_dp()
908 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n", in tb_tunnel_dp()
911 tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down); in tb_tunnel_dp()
923 tb_reclaim_usb3_bandwidth(tb, in, out); in tb_tunnel_dp()
929 tb_reclaim_usb3_bandwidth(tb, in, out); in tb_tunnel_dp()
939 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) in tb_dp_resource_unavailable() argument
954 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out); in tb_dp_resource_unavailable()
962 tb_tunnel_dp(tb); in tb_dp_resource_unavailable()
965 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) in tb_dp_resource_available() argument
967 struct tb_cm *tcm = tb_priv(tb); in tb_dp_resource_available()
983 tb_tunnel_dp(tb); in tb_dp_resource_available()
986 static void tb_disconnect_and_release_dp(struct tb *tb) in tb_disconnect_and_release_dp() argument
988 struct tb_cm *tcm = tb_priv(tb); in tb_disconnect_and_release_dp()
1009 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) in tb_tunnel_pci() argument
1012 struct tb_cm *tcm = tb_priv(tb); in tb_tunnel_pci()
1030 tunnel = tb_tunnel_alloc_pci(tb, up, down); in tb_tunnel_pci()
1045 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) in tb_approve_xdomain_paths() argument
1047 struct tb_cm *tcm = tb_priv(tb); in tb_approve_xdomain_paths()
1054 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); in tb_approve_xdomain_paths()
1056 mutex_lock(&tb->lock); in tb_approve_xdomain_paths()
1057 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring, in tb_approve_xdomain_paths()
1061 mutex_unlock(&tb->lock); in tb_approve_xdomain_paths()
1069 mutex_unlock(&tb->lock); in tb_approve_xdomain_paths()
1074 mutex_unlock(&tb->lock); in tb_approve_xdomain_paths()
1078 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) in __tb_disconnect_xdomain_paths() argument
1092 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port); in __tb_disconnect_xdomain_paths()
1096 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) in tb_disconnect_xdomain_paths() argument
1099 mutex_lock(&tb->lock); in tb_disconnect_xdomain_paths()
1100 __tb_disconnect_xdomain_paths(tb, xd); in tb_disconnect_xdomain_paths()
1101 mutex_unlock(&tb->lock); in tb_disconnect_xdomain_paths()
1116 struct tb *tb = ev->tb; in tb_handle_hotplug() local
1117 struct tb_cm *tcm = tb_priv(tb); in tb_handle_hotplug()
1122 pm_runtime_get_sync(&tb->dev); in tb_handle_hotplug()
1124 mutex_lock(&tb->lock); in tb_handle_hotplug()
1128 sw = tb_switch_find_by_route(tb, ev->route); in tb_handle_hotplug()
1130 tb_warn(tb, in tb_handle_hotplug()
1136 tb_warn(tb, in tb_handle_hotplug()
1143 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n", in tb_handle_hotplug()
1156 tb_free_invalid_tunnels(tb); in tb_handle_hotplug()
1166 tb_tunnel_dp(tb); in tb_handle_hotplug()
1181 __tb_disconnect_xdomain_paths(tb, xd); in tb_handle_hotplug()
1185 tb_dp_resource_unavailable(tb, port); in tb_handle_hotplug()
1199 tb_dp_resource_available(tb, port); in tb_handle_hotplug()
1209 mutex_unlock(&tb->lock); in tb_handle_hotplug()
1211 pm_runtime_mark_last_busy(&tb->dev); in tb_handle_hotplug()
1212 pm_runtime_put_autosuspend(&tb->dev); in tb_handle_hotplug()
1222 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, in tb_handle_event() argument
1229 tb_warn(tb, "unexpected event %#x, ignoring\n", type); in tb_handle_event()
1235 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) { in tb_handle_event()
1236 tb_warn(tb, "could not ack plug event on %llx:%x\n", route, in tb_handle_event()
1240 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug); in tb_handle_event()
1243 static void tb_stop(struct tb *tb) in tb_stop() argument
1245 struct tb_cm *tcm = tb_priv(tb); in tb_stop()
1261 tb_switch_remove(tb->root_switch); in tb_stop()
1286 static int tb_start(struct tb *tb) in tb_start() argument
1288 struct tb_cm *tcm = tb_priv(tb); in tb_start()
1291 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); in tb_start()
1292 if (IS_ERR(tb->root_switch)) in tb_start()
1293 return PTR_ERR(tb->root_switch); in tb_start()
1300 tb->root_switch->no_nvm_upgrade = true; in tb_start()
1302 tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch); in tb_start()
1304 ret = tb_switch_configure(tb->root_switch); in tb_start()
1306 tb_switch_put(tb->root_switch); in tb_start()
1311 ret = tb_switch_add(tb->root_switch); in tb_start()
1313 tb_switch_put(tb->root_switch); in tb_start()
1318 tb_switch_tmu_enable(tb->root_switch); in tb_start()
1320 tb_scan_switch(tb->root_switch); in tb_start()
1322 tb_discover_tunnels(tb->root_switch); in tb_start()
1327 tb_create_usb3_tunnels(tb->root_switch); in tb_start()
1329 tb_add_dp_resources(tb->root_switch); in tb_start()
1331 device_for_each_child(&tb->root_switch->dev, NULL, in tb_start()
1339 static int tb_suspend_noirq(struct tb *tb) in tb_suspend_noirq() argument
1341 struct tb_cm *tcm = tb_priv(tb); in tb_suspend_noirq()
1343 tb_dbg(tb, "suspending...\n"); in tb_suspend_noirq()
1344 tb_disconnect_and_release_dp(tb); in tb_suspend_noirq()
1345 tb_switch_suspend(tb->root_switch, false); in tb_suspend_noirq()
1347 tb_dbg(tb, "suspend finished\n"); in tb_suspend_noirq()
1378 static int tb_resume_noirq(struct tb *tb) in tb_resume_noirq() argument
1380 struct tb_cm *tcm = tb_priv(tb); in tb_resume_noirq()
1383 tb_dbg(tb, "resuming...\n"); in tb_resume_noirq()
1386 tb_switch_reset(tb->root_switch); in tb_resume_noirq()
1388 tb_switch_resume(tb->root_switch); in tb_resume_noirq()
1389 tb_free_invalid_tunnels(tb); in tb_resume_noirq()
1390 tb_free_unplugged_children(tb->root_switch); in tb_resume_noirq()
1391 tb_restore_children(tb->root_switch); in tb_resume_noirq()
1399 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n"); in tb_resume_noirq()
1404 tb_dbg(tb, "resume finished\n"); in tb_resume_noirq()
1431 static int tb_freeze_noirq(struct tb *tb) in tb_freeze_noirq() argument
1433 struct tb_cm *tcm = tb_priv(tb); in tb_freeze_noirq()
1439 static int tb_thaw_noirq(struct tb *tb) in tb_thaw_noirq() argument
1441 struct tb_cm *tcm = tb_priv(tb); in tb_thaw_noirq()
1447 static void tb_complete(struct tb *tb) in tb_complete() argument
1454 mutex_lock(&tb->lock); in tb_complete()
1455 if (tb_free_unplugged_xdomains(tb->root_switch)) in tb_complete()
1456 tb_scan_switch(tb->root_switch); in tb_complete()
1457 mutex_unlock(&tb->lock); in tb_complete()
1460 static int tb_runtime_suspend(struct tb *tb) in tb_runtime_suspend() argument
1462 struct tb_cm *tcm = tb_priv(tb); in tb_runtime_suspend()
1464 mutex_lock(&tb->lock); in tb_runtime_suspend()
1465 tb_switch_suspend(tb->root_switch, true); in tb_runtime_suspend()
1467 mutex_unlock(&tb->lock); in tb_runtime_suspend()
1475 struct tb *tb = tcm_to_tb(tcm); in tb_remove_work() local
1477 mutex_lock(&tb->lock); in tb_remove_work()
1478 if (tb->root_switch) { in tb_remove_work()
1479 tb_free_unplugged_children(tb->root_switch); in tb_remove_work()
1480 tb_free_unplugged_xdomains(tb->root_switch); in tb_remove_work()
1482 mutex_unlock(&tb->lock); in tb_remove_work()
1485 static int tb_runtime_resume(struct tb *tb) in tb_runtime_resume() argument
1487 struct tb_cm *tcm = tb_priv(tb); in tb_runtime_resume()
1490 mutex_lock(&tb->lock); in tb_runtime_resume()
1491 tb_switch_resume(tb->root_switch); in tb_runtime_resume()
1492 tb_free_invalid_tunnels(tb); in tb_runtime_resume()
1493 tb_restore_children(tb->root_switch); in tb_runtime_resume()
1497 mutex_unlock(&tb->lock); in tb_runtime_resume()
1504 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50)); in tb_runtime_resume()
1524 struct tb *tb_probe(struct tb_nhi *nhi) in tb_probe()
1527 struct tb *tb; in tb_probe() local
1529 tb = tb_domain_alloc(nhi, sizeof(*tcm)); in tb_probe()
1530 if (!tb) in tb_probe()
1533 tb->security_level = TB_SECURITY_USER; in tb_probe()
1534 tb->cm_ops = &tb_cm_ops; in tb_probe()
1536 tcm = tb_priv(tb); in tb_probe()
1541 return tb; in tb_probe()