• Home
  • Raw
  • Download

Lines Matching full:tcm

42 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)  in tcm_to_tb()  argument
44 return ((void *)tcm - sizeof(struct tb)); in tcm_to_tb()
55 static void tb_init_bandwidth_groups(struct tb_cm *tcm) in tb_init_bandwidth_groups() argument
59 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { in tb_init_bandwidth_groups()
60 struct tb_bandwidth_group *group = &tcm->groups[i]; in tb_init_bandwidth_groups()
62 group->tb = tcm_to_tb(tcm); in tb_init_bandwidth_groups()
80 static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm) in tb_find_free_bandwidth_group() argument
84 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { in tb_find_free_bandwidth_group()
85 struct tb_bandwidth_group *group = &tcm->groups[i]; in tb_find_free_bandwidth_group()
95 tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in, in tb_attach_bandwidth_group() argument
107 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { in tb_attach_bandwidth_group()
122 group = tb_find_free_bandwidth_group(tcm); in tb_attach_bandwidth_group()
131 static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in, in tb_discover_bandwidth_group() argument
138 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { in tb_discover_bandwidth_group()
139 if (tcm->groups[i].index == index) { in tb_discover_bandwidth_group()
140 tb_bandwidth_group_attach_port(&tcm->groups[i], in); in tb_discover_bandwidth_group()
146 tb_attach_bandwidth_group(tcm, in, out); in tb_discover_bandwidth_group()
183 struct tb_cm *tcm = tb_priv(sw->tb); in tb_add_dp_resources() local
193 list_add_tail(&port->list, &tcm->dp_resources); in tb_add_dp_resources()
200 struct tb_cm *tcm = tb_priv(sw->tb); in tb_remove_dp_resources() local
209 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) { in tb_remove_dp_resources()
219 struct tb_cm *tcm = tb_priv(tb); in tb_discover_dp_resource() local
222 list_for_each_entry(p, &tcm->dp_resources, list) { in tb_discover_dp_resource()
229 list_add_tail(&port->list, &tcm->dp_resources); in tb_discover_dp_resource()
234 struct tb_cm *tcm = tb_priv(tb); in tb_discover_dp_resources() local
237 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { in tb_discover_dp_resources()
246 struct tb_cm *tcm = tb_priv(sw->tb); in tb_enable_clx() local
271 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { in tb_enable_clx()
426 struct tb_cm *tcm = tb_priv(tb); in tb_discover_tunnels() local
429 tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true); in tb_discover_tunnels()
431 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { in tb_discover_tunnels()
447 tb_discover_bandwidth_group(tcm, in, out); in tb_discover_tunnels()
534 struct tb_cm *tcm = tb_priv(tb); in tb_find_tunnel() local
537 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { in tb_find_tunnel()
579 struct tb_cm *tcm = tb_priv(tb); in tb_available_bandwidth() local
658 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { in tb_available_bandwidth()
756 struct tb_cm *tcm = tb_priv(tb); in tb_tunnel_usb3() local
819 list_add_tail(&tunnel->list, &tcm->tunnel_list); in tb_tunnel_usb3()
882 struct tb_cm *tcm = tb_priv(port->sw->tb); in tb_scan_port() local
951 if (!tcm->hotplug_active) { in tb_scan_port()
1007 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw)) in tb_scan_port()
1070 struct tb_cm *tcm = tb_priv(tb); in tb_free_invalid_tunnels() local
1074 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { in tb_free_invalid_tunnels()
1233 struct tb_cm *tcm = tb_priv(tb); in tb_recalc_estimated_bandwidth() local
1238 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { in tb_recalc_estimated_bandwidth()
1239 struct tb_bandwidth_group *group = &tcm->groups[i]; in tb_recalc_estimated_bandwidth()
1251 struct tb_cm *tcm = tb_priv(tb); in tb_find_dp_out() local
1256 list_for_each_entry(port, &tcm->dp_resources, list) { in tb_find_dp_out()
1288 struct tb_cm *tcm = tb_priv(tb); in tb_tunnel_dp() local
1305 list_for_each_entry(port, &tcm->dp_resources, list) { in tb_tunnel_dp()
1339 list_for_each_entry(tunnel, &tcm->tunnel_list, list) { in tb_tunnel_dp()
1362 if (!tb_attach_bandwidth_group(tcm, in, out)) in tb_tunnel_dp()
1391 list_add_tail(&tunnel->list, &tcm->tunnel_list); in tb_tunnel_dp()
1448 struct tb_cm *tcm = tb_priv(tb); in tb_dp_resource_available() local
1454 list_for_each_entry(p, &tcm->dp_resources, list) { in tb_dp_resource_available()
1461 list_add_tail(&port->list, &tcm->dp_resources); in tb_dp_resource_available()
1469 struct tb_cm *tcm = tb_priv(tb); in tb_disconnect_and_release_dp() local
1476 list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) { in tb_disconnect_and_release_dp()
1481 while (!list_empty(&tcm->dp_resources)) { in tb_disconnect_and_release_dp()
1484 port = list_first_entry(&tcm->dp_resources, in tb_disconnect_and_release_dp()
1514 struct tb_cm *tcm = tb_priv(tb); in tb_tunnel_pci() local
1551 list_add_tail(&tunnel->list, &tcm->tunnel_list); in tb_tunnel_pci()
1559 struct tb_cm *tcm = tb_priv(tb); in tb_approve_xdomain_paths() local
1591 list_add_tail(&tunnel->list, &tcm->tunnel_list); in tb_approve_xdomain_paths()
1608 struct tb_cm *tcm = tb_priv(tb); in __tb_disconnect_xdomain_paths() local
1617 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { in __tb_disconnect_xdomain_paths()
1661 struct tb_cm *tcm = tb_priv(tb); in tb_handle_hotplug() local
1669 if (!tcm->hotplug_active) in tb_handle_hotplug()
1891 struct tb_cm *tcm = tb_priv(tb); in tb_handle_dp_bandwidth_request() local
1897 if (!tcm->hotplug_active) in tb_handle_dp_bandwidth_request()
2046 struct tb_cm *tcm = tb_priv(tb); in tb_stop() local
2050 cancel_delayed_work(&tcm->remove_work); in tb_stop()
2052 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { in tb_stop()
2063 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ in tb_stop()
2089 struct tb_cm *tcm = tb_priv(tb); in tb_start() local
2146 tcm->hotplug_active = true; in tb_start()
2152 struct tb_cm *tcm = tb_priv(tb); in tb_suspend_noirq() local
2157 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ in tb_suspend_noirq()
2196 struct tb_cm *tcm = tb_priv(tb); in tb_resume_noirq() local
2226 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { in tb_resume_noirq()
2235 if (!list_empty(&tcm->tunnel_list)) { in tb_resume_noirq()
2244 tcm->hotplug_active = true; in tb_resume_noirq()
2274 struct tb_cm *tcm = tb_priv(tb); in tb_freeze_noirq() local
2276 tcm->hotplug_active = false; in tb_freeze_noirq()
2282 struct tb_cm *tcm = tb_priv(tb); in tb_thaw_noirq() local
2284 tcm->hotplug_active = true; in tb_thaw_noirq()
2303 struct tb_cm *tcm = tb_priv(tb); in tb_runtime_suspend() local
2307 tcm->hotplug_active = false; in tb_runtime_suspend()
2315 struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work); in tb_remove_work() local
2316 struct tb *tb = tcm_to_tb(tcm); in tb_remove_work()
2328 struct tb_cm *tcm = tb_priv(tb); in tb_runtime_resume() local
2335 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) in tb_runtime_resume()
2337 tcm->hotplug_active = true; in tb_runtime_resume()
2345 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50)); in tb_runtime_resume()
2436 struct tb_cm *tcm; in tb_probe() local
2439 tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm)); in tb_probe()
2450 tcm = tb_priv(tb); in tb_probe()
2451 INIT_LIST_HEAD(&tcm->tunnel_list); in tb_probe()
2452 INIT_LIST_HEAD(&tcm->dp_resources); in tb_probe()
2453 INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work); in tb_probe()
2454 tb_init_bandwidth_groups(tcm); in tb_probe()