• Home
  • Raw
  • Download

Lines Matching +full:sw +full:- +full:reset +full:- +full:number

1 // SPDX-License-Identifier: GPL-2.0
4 * the Thunderbolt host controller performing most of the low-level
53 * struct icm - Internal connection manager private data
62 * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
67 * @cio_reset: Trigger CIO reset
75 * @xdomain_connected - Handle XDomain connected ICM message
76 * @xdomain_disconnected - Handle XDomain disconnected ICM message
142 if (!ep->len) in parse_intel_vss()
144 if (ep_name + ep->len > end) in parse_intel_vss()
147 if (ep->type == EP_NAME_INTEL_VSS) in parse_intel_vss()
148 return (const struct intel_vss *)ep->data; in parse_intel_vss()
150 ep_name += ep->len; in parse_intel_vss()
162 return !!(vss->flags & INTEL_VSS_FLAGS_RTD3); in intel_vss_is_rtd3()
169 return ((void *)icm - sizeof(struct tb)); in icm_to_tb()
176 link = depth ? route >> ((depth - 1) * 8) : route; in phy_port_from_route()
182 return link ? ((link - 1) ^ 0x01) + 1 : 0; in dual_link_from_link()
193 return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0; in get_parent_route()
202 pci_read_config_dword(icm->upstream_port, in pci2cio_wait_completion()
203 icm->vnd_cap + PCIE2CIO_CMD, &cmd); in pci2cio_wait_completion()
213 return -ETIMEDOUT; in pci2cio_wait_completion()
219 struct pci_dev *pdev = icm->upstream_port; in pcie2cio_read()
220 int ret, vnd_cap = icm->vnd_cap; in pcie2cio_read()
240 struct pci_dev *pdev = icm->upstream_port; in pcie2cio_write()
241 int vnd_cap = icm->vnd_cap; in pcie2cio_write()
258 const struct icm_pkg_header *res_hdr = pkg->buffer; in icm_match()
259 const struct icm_pkg_header *req_hdr = req->request; in icm_match()
261 if (pkg->frame.eof != req->response_type) in icm_match()
263 if (res_hdr->code != req_hdr->code) in icm_match()
271 const struct icm_pkg_header *hdr = pkg->buffer; in icm_copy()
273 if (hdr->packet_id < req->npackets) { in icm_copy()
274 size_t offset = hdr->packet_id * req->response_size; in icm_copy()
276 memcpy(req->response + offset, pkg->buffer, req->response_size); in icm_copy()
279 return hdr->packet_id == hdr->total_packets - 1; in icm_copy()
294 return -ENOMEM; in icm_request()
296 req->match = icm_match; in icm_request()
297 req->copy = icm_copy; in icm_request()
298 req->request = request; in icm_request()
299 req->request_size = request_size; in icm_request()
300 req->request_type = TB_CFG_PKG_ICM_CMD; in icm_request()
301 req->response = response; in icm_request()
302 req->npackets = npackets; in icm_request()
303 req->response_size = response_size; in icm_request()
304 req->response_type = TB_CFG_PKG_ICM_RESP; in icm_request()
306 mutex_lock(&icm->request_lock); in icm_request()
307 res = tb_cfg_request_sync(tb->ctl, req, timeout_msec); in icm_request()
308 mutex_unlock(&icm->request_lock); in icm_request()
312 if (res.err != -ETIMEDOUT) in icm_request()
313 return res.err == 1 ? -EIO : res.err; in icm_request()
316 } while (retries--); in icm_request()
318 return -ETIMEDOUT; in icm_request()
330 if (delayed_work_pending(&icm->rescan_work)) in icm_postpone_rescan()
331 mod_delayed_work(tb->wq, &icm->rescan_work, in icm_postpone_rescan()
339 if (!icm->veto) { in icm_veto_begin()
340 icm->veto = true; in icm_veto_begin()
342 pm_runtime_get(&tb->dev); in icm_veto_begin()
350 if (icm->veto) { in icm_veto_end()
351 icm->veto = false; in icm_veto_end()
353 pm_runtime_mark_last_busy(&tb->dev); in icm_veto_end()
354 pm_runtime_put_autosuspend(&tb->dev); in icm_veto_end()
362 val = ioread32(nhi->iobase + REG_FW_STS); in icm_firmware_running()
384 struct icm_fr_pkg_get_topology_response *switches, *sw; in icm_fr_get_route() local
394 return -ENOMEM; in icm_fr_get_route()
401 sw = &switches[0]; in icm_fr_get_route()
402 index = icm_fr_get_switch_index(sw->ports[link]); in icm_fr_get_route()
404 ret = -ENODEV; in icm_fr_get_route()
408 sw = &switches[index]; in icm_fr_get_route()
412 if (!(sw->first_data & ICM_SWITCH_USED)) { in icm_fr_get_route()
413 ret = -ENODEV; in icm_fr_get_route()
417 for (j = 0; j < ARRAY_SIZE(sw->ports); j++) { in icm_fr_get_route()
418 index = icm_fr_get_switch_index(sw->ports[j]); in icm_fr_get_route()
419 if (index > sw->switch_index) { in icm_fr_get_route()
420 sw = &switches[index]; in icm_fr_get_route()
426 *route = get_route(sw->route_hi, sw->route_lo); in icm_fr_get_route()
435 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0); in icm_fr_save_devices()
460 static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw) in icm_fr_approve_switch() argument
467 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); in icm_fr_approve_switch()
469 request.connection_id = sw->connection_id; in icm_fr_approve_switch()
470 request.connection_key = sw->connection_key; in icm_fr_approve_switch()
481 return -EIO; in icm_fr_approve_switch()
487 static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw) in icm_fr_add_switch_key() argument
494 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); in icm_fr_add_switch_key()
496 request.connection_id = sw->connection_id; in icm_fr_add_switch_key()
497 request.connection_key = sw->connection_key; in icm_fr_add_switch_key()
498 memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE); in icm_fr_add_switch_key()
508 return -EIO; in icm_fr_add_switch_key()
514 static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, in icm_fr_challenge_switch_key() argument
522 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); in icm_fr_challenge_switch_key()
524 request.connection_id = sw->connection_id; in icm_fr_challenge_switch_key()
525 request.connection_key = sw->connection_key; in icm_fr_challenge_switch_key()
535 return -EKEYREJECTED; in icm_fr_challenge_switch_key()
537 return -ENOKEY; in icm_fr_challenge_switch_key()
552 request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link; in icm_fr_approve_xdomain_paths()
553 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); in icm_fr_approve_xdomain_paths()
555 request.transmit_path = xd->transmit_path; in icm_fr_approve_xdomain_paths()
556 request.transmit_ring = xd->transmit_ring; in icm_fr_approve_xdomain_paths()
557 request.receive_path = xd->receive_path; in icm_fr_approve_xdomain_paths()
558 request.receive_ring = xd->receive_ring; in icm_fr_approve_xdomain_paths()
567 return -EIO; in icm_fr_approve_xdomain_paths()
577 phy_port = tb_phy_port_from_link(xd->link); in icm_fr_disconnect_xdomain_paths()
583 nhi_mailbox_cmd(tb->nhi, cmd, 1); in icm_fr_disconnect_xdomain_paths()
585 nhi_mailbox_cmd(tb->nhi, cmd, 2); in icm_fr_disconnect_xdomain_paths()
592 struct tb *tb = parent_sw->tb; in alloc_switch()
593 struct tb_switch *sw; in alloc_switch() local
595 sw = tb_switch_alloc(tb, &parent_sw->dev, route); in alloc_switch()
596 if (IS_ERR(sw)) { in alloc_switch()
598 return sw; in alloc_switch()
601 sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL); in alloc_switch()
602 if (!sw->uuid) { in alloc_switch()
603 tb_switch_put(sw); in alloc_switch()
604 return ERR_PTR(-ENOMEM); in alloc_switch()
607 init_completion(&sw->rpm_complete); in alloc_switch()
608 return sw; in alloc_switch()
611 static int add_switch(struct tb_switch *parent_sw, struct tb_switch *sw) in add_switch() argument
613 u64 route = tb_route(sw); in add_switch()
617 tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); in add_switch()
618 tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw); in add_switch()
620 ret = tb_switch_add(sw); in add_switch()
622 tb_port_at(tb_route(sw), parent_sw)->remote = NULL; in add_switch()
627 static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw, in update_switch() argument
632 tb_port_at(tb_route(sw), parent_sw)->remote = NULL; in update_switch()
633 /* Re-connect via updated port*/ in update_switch()
634 tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); in update_switch()
637 sw->config.route_hi = upper_32_bits(route); in update_switch()
638 sw->config.route_lo = lower_32_bits(route); in update_switch()
639 sw->connection_id = connection_id; in update_switch()
640 sw->connection_key = connection_key; in update_switch()
641 sw->link = link; in update_switch()
642 sw->depth = depth; in update_switch()
643 sw->boot = boot; in update_switch()
646 sw->is_unplugged = false; in update_switch()
649 complete(&sw->rpm_complete); in update_switch()
652 static void remove_switch(struct tb_switch *sw) in remove_switch() argument
656 parent_sw = tb_to_switch(sw->dev.parent); in remove_switch()
657 tb_port_at(tb_route(sw), parent_sw)->remote = NULL; in remove_switch()
658 tb_switch_remove(sw); in remove_switch()
661 static void add_xdomain(struct tb_switch *sw, u64 route, in add_xdomain() argument
667 pm_runtime_get_sync(&sw->dev); in add_xdomain()
669 xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid); in add_xdomain()
673 xd->link = link; in add_xdomain()
674 xd->depth = depth; in add_xdomain()
676 tb_port_at(route, sw)->xdomain = xd; in add_xdomain()
681 pm_runtime_mark_last_busy(&sw->dev); in add_xdomain()
682 pm_runtime_put_autosuspend(&sw->dev); in add_xdomain()
687 xd->link = link; in update_xdomain()
688 xd->route = route; in update_xdomain()
689 xd->is_unplugged = false; in update_xdomain()
694 struct tb_switch *sw; in remove_xdomain() local
696 sw = tb_to_switch(xd->dev.parent); in remove_xdomain()
697 tb_port_at(xd->route, sw)->xdomain = NULL; in remove_xdomain()
707 struct tb_switch *sw, *parent_sw; in icm_fr_device_connected() local
718 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; in icm_fr_device_connected()
719 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> in icm_fr_device_connected()
721 authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; in icm_fr_device_connected()
722 security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> in icm_fr_device_connected()
724 boot = pkg->link_info & ICM_LINK_INFO_BOOT; in icm_fr_device_connected()
725 dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE; in icm_fr_device_connected()
726 speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3; in icm_fr_device_connected()
728 if (pkg->link_info & ICM_LINK_INFO_REJECTED) { in icm_fr_device_connected()
734 sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid); in icm_fr_device_connected()
735 if (sw) { in icm_fr_device_connected()
738 parent_sw = tb_to_switch(sw->dev.parent); in icm_fr_device_connected()
739 sw_phy_port = tb_phy_port_from_link(sw->link); in icm_fr_device_connected()
746 * fact that a switch on a dual-link connection might in icm_fr_device_connected()
750 if (sw->depth == depth && sw_phy_port == phy_port && in icm_fr_device_connected()
751 !!sw->authorized == authorized) { in icm_fr_device_connected()
756 if (sw->link != link) { in icm_fr_device_connected()
757 ret = icm->get_route(tb, link, depth, &route); in icm_fr_device_connected()
761 tb_switch_put(sw); in icm_fr_device_connected()
765 route = tb_route(sw); in icm_fr_device_connected()
768 update_switch(parent_sw, sw, route, pkg->connection_id, in icm_fr_device_connected()
769 pkg->connection_key, link, depth, boot); in icm_fr_device_connected()
770 tb_switch_put(sw); in icm_fr_device_connected()
779 remove_switch(sw); in icm_fr_device_connected()
780 tb_switch_put(sw); in icm_fr_device_connected()
789 sw = tb_switch_find_by_link_depth(tb, link, depth); in icm_fr_device_connected()
790 if (!sw) { in icm_fr_device_connected()
795 sw = tb_switch_find_by_link_depth(tb, dual_link, depth); in icm_fr_device_connected()
797 if (sw) { in icm_fr_device_connected()
798 remove_switch(sw); in icm_fr_device_connected()
799 tb_switch_put(sw); in icm_fr_device_connected()
809 parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1); in icm_fr_device_connected()
816 ret = icm->get_route(tb, link, depth, &route); in icm_fr_device_connected()
824 pm_runtime_get_sync(&parent_sw->dev); in icm_fr_device_connected()
826 sw = alloc_switch(parent_sw, route, &pkg->ep_uuid); in icm_fr_device_connected()
827 if (!IS_ERR(sw)) { in icm_fr_device_connected()
828 sw->connection_id = pkg->connection_id; in icm_fr_device_connected()
829 sw->connection_key = pkg->connection_key; in icm_fr_device_connected()
830 sw->link = link; in icm_fr_device_connected()
831 sw->depth = depth; in icm_fr_device_connected()
832 sw->authorized = authorized; in icm_fr_device_connected()
833 sw->security_level = security_level; in icm_fr_device_connected()
834 sw->boot = boot; in icm_fr_device_connected()
835 sw->link_speed = speed_gen3 ? 20 : 10; in icm_fr_device_connected()
836 sw->link_width = dual_lane ? 2 : 1; in icm_fr_device_connected()
837 sw->rpm = intel_vss_is_rtd3(pkg->ep_name, sizeof(pkg->ep_name)); in icm_fr_device_connected()
839 if (add_switch(parent_sw, sw)) in icm_fr_device_connected()
840 tb_switch_put(sw); in icm_fr_device_connected()
843 pm_runtime_mark_last_busy(&parent_sw->dev); in icm_fr_device_connected()
844 pm_runtime_put_autosuspend(&parent_sw->dev); in icm_fr_device_connected()
854 struct tb_switch *sw; in icm_fr_device_disconnected() local
857 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; in icm_fr_device_disconnected()
858 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> in icm_fr_device_disconnected()
866 sw = tb_switch_find_by_link_depth(tb, link, depth); in icm_fr_device_disconnected()
867 if (!sw) { in icm_fr_device_disconnected()
873 remove_switch(sw); in icm_fr_device_disconnected()
874 tb_switch_put(sw); in icm_fr_device_disconnected()
883 struct tb_switch *sw; in icm_fr_xdomain_connected() local
887 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; in icm_fr_xdomain_connected()
888 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> in icm_fr_xdomain_connected()
896 route = get_route(pkg->local_route_hi, pkg->local_route_lo); in icm_fr_xdomain_connected()
898 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); in icm_fr_xdomain_connected()
902 xd_phy_port = phy_port_from_route(xd->route, xd->depth); in icm_fr_xdomain_connected()
905 if (xd->depth == depth && xd_phy_port == phy_port) { in icm_fr_xdomain_connected()
914 * everything anyway to be able to re-establish the in icm_fr_xdomain_connected()
945 sw = tb_switch_find_by_route(tb, route); in icm_fr_xdomain_connected()
946 if (sw) { in icm_fr_xdomain_connected()
947 remove_switch(sw); in icm_fr_xdomain_connected()
948 tb_switch_put(sw); in icm_fr_xdomain_connected()
951 sw = tb_switch_find_by_link_depth(tb, link, depth); in icm_fr_xdomain_connected()
952 if (!sw) { in icm_fr_xdomain_connected()
958 add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link, in icm_fr_xdomain_connected()
960 tb_switch_put(sw); in icm_fr_xdomain_connected()
975 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); in icm_fr_xdomain_disconnected()
1014 static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw) in icm_tr_approve_switch() argument
1021 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); in icm_tr_approve_switch()
1023 request.route_lo = sw->config.route_lo; in icm_tr_approve_switch()
1024 request.route_hi = sw->config.route_hi; in icm_tr_approve_switch()
1025 request.connection_id = sw->connection_id; in icm_tr_approve_switch()
1035 return -EIO; in icm_tr_approve_switch()
1041 static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw) in icm_tr_add_switch_key() argument
1048 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); in icm_tr_add_switch_key()
1050 request.route_lo = sw->config.route_lo; in icm_tr_add_switch_key()
1051 request.route_hi = sw->config.route_hi; in icm_tr_add_switch_key()
1052 request.connection_id = sw->connection_id; in icm_tr_add_switch_key()
1053 memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE); in icm_tr_add_switch_key()
1063 return -EIO; in icm_tr_add_switch_key()
1069 static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, in icm_tr_challenge_switch_key() argument
1077 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); in icm_tr_challenge_switch_key()
1079 request.route_lo = sw->config.route_lo; in icm_tr_challenge_switch_key()
1080 request.route_hi = sw->config.route_hi; in icm_tr_challenge_switch_key()
1081 request.connection_id = sw->connection_id; in icm_tr_challenge_switch_key()
1091 return -EKEYREJECTED; in icm_tr_challenge_switch_key()
1093 return -ENOKEY; in icm_tr_challenge_switch_key()
1108 request.route_hi = upper_32_bits(xd->route); in icm_tr_approve_xdomain_paths()
1109 request.route_lo = lower_32_bits(xd->route); in icm_tr_approve_xdomain_paths()
1110 request.transmit_path = xd->transmit_path; in icm_tr_approve_xdomain_paths()
1111 request.transmit_ring = xd->transmit_ring; in icm_tr_approve_xdomain_paths()
1112 request.receive_path = xd->receive_path; in icm_tr_approve_xdomain_paths()
1113 request.receive_ring = xd->receive_ring; in icm_tr_approve_xdomain_paths()
1114 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); in icm_tr_approve_xdomain_paths()
1123 return -EIO; in icm_tr_approve_xdomain_paths()
1138 request.route_hi = upper_32_bits(xd->route); in icm_tr_xdomain_tear_down()
1139 request.route_lo = lower_32_bits(xd->route); in icm_tr_xdomain_tear_down()
1140 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); in icm_tr_xdomain_tear_down()
1149 return -EIO; in icm_tr_xdomain_tear_down()
1174 struct tb_switch *sw, *parent_sw; in __icm_tr_device_connected() local
1185 if (pkg->hdr.packet_id) in __icm_tr_device_connected()
1188 route = get_route(pkg->route_hi, pkg->route_lo); in __icm_tr_device_connected()
1189 authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; in __icm_tr_device_connected()
1190 security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> in __icm_tr_device_connected()
1192 boot = pkg->link_info & ICM_LINK_INFO_BOOT; in __icm_tr_device_connected()
1193 dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE; in __icm_tr_device_connected()
1194 speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3; in __icm_tr_device_connected()
1196 if (pkg->link_info & ICM_LINK_INFO_REJECTED) { in __icm_tr_device_connected()
1202 sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid); in __icm_tr_device_connected()
1203 if (sw) { in __icm_tr_device_connected()
1205 if (tb_route(sw) == route && !!sw->authorized == authorized) { in __icm_tr_device_connected()
1206 parent_sw = tb_to_switch(sw->dev.parent); in __icm_tr_device_connected()
1207 update_switch(parent_sw, sw, route, pkg->connection_id, in __icm_tr_device_connected()
1209 tb_switch_put(sw); in __icm_tr_device_connected()
1213 remove_switch(sw); in __icm_tr_device_connected()
1214 tb_switch_put(sw); in __icm_tr_device_connected()
1218 sw = tb_switch_find_by_route(tb, route); in __icm_tr_device_connected()
1219 if (sw) { in __icm_tr_device_connected()
1220 remove_switch(sw); in __icm_tr_device_connected()
1221 tb_switch_put(sw); in __icm_tr_device_connected()
1237 pm_runtime_get_sync(&parent_sw->dev); in __icm_tr_device_connected()
1239 sw = alloc_switch(parent_sw, route, &pkg->ep_uuid); in __icm_tr_device_connected()
1240 if (!IS_ERR(sw)) { in __icm_tr_device_connected()
1241 sw->connection_id = pkg->connection_id; in __icm_tr_device_connected()
1242 sw->authorized = authorized; in __icm_tr_device_connected()
1243 sw->security_level = security_level; in __icm_tr_device_connected()
1244 sw->boot = boot; in __icm_tr_device_connected()
1245 sw->link_speed = speed_gen3 ? 20 : 10; in __icm_tr_device_connected()
1246 sw->link_width = dual_lane ? 2 : 1; in __icm_tr_device_connected()
1247 sw->rpm = force_rtd3; in __icm_tr_device_connected()
1248 if (!sw->rpm) in __icm_tr_device_connected()
1249 sw->rpm = intel_vss_is_rtd3(pkg->ep_name, in __icm_tr_device_connected()
1250 sizeof(pkg->ep_name)); in __icm_tr_device_connected()
1252 if (add_switch(parent_sw, sw)) in __icm_tr_device_connected()
1253 tb_switch_put(sw); in __icm_tr_device_connected()
1256 pm_runtime_mark_last_busy(&parent_sw->dev); in __icm_tr_device_connected()
1257 pm_runtime_put_autosuspend(&parent_sw->dev); in __icm_tr_device_connected()
1273 struct tb_switch *sw; in icm_tr_device_disconnected() local
1276 route = get_route(pkg->route_hi, pkg->route_lo); in icm_tr_device_disconnected()
1278 sw = tb_switch_find_by_route(tb, route); in icm_tr_device_disconnected()
1279 if (!sw) { in icm_tr_device_disconnected()
1284 remove_switch(sw); in icm_tr_device_disconnected()
1285 tb_switch_put(sw); in icm_tr_device_disconnected()
1294 struct tb_switch *sw; in icm_tr_xdomain_connected() local
1297 if (!tb->root_switch) in icm_tr_xdomain_connected()
1300 route = get_route(pkg->local_route_hi, pkg->local_route_lo); in icm_tr_xdomain_connected()
1302 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); in icm_tr_xdomain_connected()
1304 if (xd->route == route) { in icm_tr_xdomain_connected()
1326 sw = tb_switch_find_by_route(tb, route); in icm_tr_xdomain_connected()
1327 if (sw) { in icm_tr_xdomain_connected()
1328 remove_switch(sw); in icm_tr_xdomain_connected()
1329 tb_switch_put(sw); in icm_tr_xdomain_connected()
1332 sw = tb_switch_find_by_route(tb, get_parent_route(route)); in icm_tr_xdomain_connected()
1333 if (!sw) { in icm_tr_xdomain_connected()
1338 add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0); in icm_tr_xdomain_connected()
1339 tb_switch_put(sw); in icm_tr_xdomain_connected()
1350 route = get_route(pkg->route_hi, pkg->route_lo); in icm_tr_xdomain_disconnected()
1375 switch (parent->device) { in get_upstream_port()
1396 * as well. We just need to reset and re-enable it first. in icm_ar_is_supported()
1399 if (icm_firmware_running(tb->nhi)) in icm_ar_is_supported()
1405 * Find the upstream PCIe port in case we need to do reset in icm_ar_is_supported()
1408 upstream_port = get_upstream_port(tb->nhi->pdev); in icm_ar_is_supported()
1415 icm->upstream_port = upstream_port; in icm_ar_is_supported()
1416 icm->vnd_cap = cap; in icm_ar_is_supported()
1432 struct tb_nhi *nhi = tb->nhi; in icm_ar_get_mode()
1437 val = ioread32(nhi->iobase + REG_FW_STS); in icm_ar_get_mode()
1441 } while (--retries); in icm_ar_get_mode()
1444 dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n"); in icm_ar_get_mode()
1445 return -ENODEV; in icm_ar_get_mode()
1494 return -EIO; in icm_ar_get_route()
1515 return -EIO; in icm_ar_get_boot_acl()
1562 return -EINVAL; in icm_ar_set_boot_acl()
1576 return -EIO; in icm_ar_set_boot_acl()
1606 struct tb_nhi *nhi = tb->nhi; in icm_icl_set_uuid()
1609 pci_read_config_dword(nhi->pdev, VS_CAP_10, &uuid[0]); in icm_icl_set_uuid()
1610 pci_read_config_dword(nhi->pdev, VS_CAP_11, &uuid[1]); in icm_icl_set_uuid()
1614 tb->root_switch->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); in icm_icl_set_uuid()
1628 tb_dbg(tb, "ICM rtd3 veto=0x%08x\n", pkg->veto_reason); in icm_icl_rtd3_veto()
1630 if (pkg->veto_reason) in icm_icl_rtd3_veto()
1644 val = ioread32(tb->nhi->iobase + REG_FW_STS); in icm_tgl_is_supported()
1651 struct tb *tb = n->tb; in icm_handle_notification()
1654 mutex_lock(&tb->lock); in icm_handle_notification()
1661 if (tb->root_switch) { in icm_handle_notification()
1662 switch (n->pkg->code) { in icm_handle_notification()
1664 icm->device_connected(tb, n->pkg); in icm_handle_notification()
1667 icm->device_disconnected(tb, n->pkg); in icm_handle_notification()
1670 icm->xdomain_connected(tb, n->pkg); in icm_handle_notification()
1673 icm->xdomain_disconnected(tb, n->pkg); in icm_handle_notification()
1676 icm->rtd3_veto(tb, n->pkg); in icm_handle_notification()
1681 mutex_unlock(&tb->lock); in icm_handle_notification()
1683 kfree(n->pkg); in icm_handle_notification()
1696 INIT_WORK(&n->work, icm_handle_notification); in icm_handle_event()
1697 n->pkg = kmemdup(buf, size, GFP_KERNEL); in icm_handle_event()
1698 n->tb = tb; in icm_handle_event()
1700 queue_work(tb->wq, &n->work); in icm_handle_event()
1711 ret = icm->driver_ready(tb, security_level, nboot_acl, rpm); in __icm_driver_ready()
1725 res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH, in __icm_driver_ready()
1731 } while (--retries); in __icm_driver_ready()
1734 return -ETIMEDOUT; in __icm_driver_ready()
1742 if (!icm->upstream_port) in icm_firmware_reset()
1743 return -ENODEV; in icm_firmware_reset()
1745 /* Put ARC to wait for CIO reset event to happen */ in icm_firmware_reset()
1746 val = ioread32(nhi->iobase + REG_FW_STS); in icm_firmware_reset()
1748 iowrite32(val, nhi->iobase + REG_FW_STS); in icm_firmware_reset()
1750 /* Re-start ARC */ in icm_firmware_reset()
1751 val = ioread32(nhi->iobase + REG_FW_STS); in icm_firmware_reset()
1754 iowrite32(val, nhi->iobase + REG_FW_STS); in icm_firmware_reset()
1756 /* Trigger CIO reset now */ in icm_firmware_reset()
1757 return icm->cio_reset(tb); in icm_firmware_reset()
1770 dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n"); in icm_firmware_start()
1779 val = ioread32(nhi->iobase + REG_FW_STS); in icm_firmware_start()
1784 } while (--retries); in icm_firmware_start()
1786 return -ETIMEDOUT; in icm_firmware_start()
1797 if (!icm->upstream_port) in icm_reset_phy_port()
1824 /* If they are both up we need to reset them now */ in icm_reset_phy_port()
1838 /* Wait a bit and then re-enable both ports */ in icm_reset_phy_port()
1860 struct tb_nhi *nhi = tb->nhi; in icm_firmware_init()
1865 dev_err(&nhi->pdev->dev, "could not start ICM firmware\n"); in icm_firmware_init()
1869 if (icm->get_mode) { in icm_firmware_init()
1870 ret = icm->get_mode(tb); in icm_firmware_init()
1874 icm->safe_mode = true; in icm_firmware_init()
1887 return -ENODEV; in icm_firmware_init()
1892 * Reset both physical ports if there is anything connected to in icm_firmware_init()
1897 dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n"); in icm_firmware_init()
1900 dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n"); in icm_firmware_init()
1914 if (icm->safe_mode) { in icm_driver_ready()
1921 ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl, in icm_driver_ready()
1922 &icm->rpm); in icm_driver_ready()
1927 * Make sure the number of supported preboot ACL matches what we in icm_driver_ready()
1930 if (tb->nboot_acl > icm->max_boot_acl) in icm_driver_ready()
1931 tb->nboot_acl = 0; in icm_driver_ready()
1940 if (icm->save_devices) in icm_suspend()
1941 icm->save_devices(tb); in icm_suspend()
1943 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); in icm_suspend()
1953 static void icm_unplug_children(struct tb_switch *sw) in icm_unplug_children() argument
1957 if (tb_route(sw)) in icm_unplug_children()
1958 sw->is_unplugged = true; in icm_unplug_children()
1960 tb_switch_for_each_port(sw, port) { in icm_unplug_children()
1961 if (port->xdomain) in icm_unplug_children()
1962 port->xdomain->is_unplugged = true; in icm_unplug_children()
1964 icm_unplug_children(port->remote->sw); in icm_unplug_children()
1970 struct tb_switch *sw = tb_to_switch(dev); in complete_rpm() local
1972 if (sw) in complete_rpm()
1973 complete(&sw->rpm_complete); in complete_rpm()
1977 static void remove_unplugged_switch(struct tb_switch *sw) in remove_unplugged_switch() argument
1979 struct device *parent = get_device(sw->dev.parent); in remove_unplugged_switch()
1988 complete_rpm(&sw->dev, NULL); in remove_unplugged_switch()
1989 bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm); in remove_unplugged_switch()
1990 tb_switch_remove(sw); in remove_unplugged_switch()
1998 static void icm_free_unplugged_children(struct tb_switch *sw) in icm_free_unplugged_children() argument
2002 tb_switch_for_each_port(sw, port) { in icm_free_unplugged_children()
2003 if (port->xdomain && port->xdomain->is_unplugged) { in icm_free_unplugged_children()
2004 tb_xdomain_remove(port->xdomain); in icm_free_unplugged_children()
2005 port->xdomain = NULL; in icm_free_unplugged_children()
2007 if (port->remote->sw->is_unplugged) { in icm_free_unplugged_children()
2008 remove_unplugged_switch(port->remote->sw); in icm_free_unplugged_children()
2009 port->remote = NULL; in icm_free_unplugged_children()
2011 icm_free_unplugged_children(port->remote->sw); in icm_free_unplugged_children()
2022 mutex_lock(&tb->lock); in icm_rescan_work()
2023 if (tb->root_switch) in icm_rescan_work()
2024 icm_free_unplugged_children(tb->root_switch); in icm_rescan_work()
2025 mutex_unlock(&tb->lock); in icm_rescan_work()
2032 if (tb->nhi->going_away) in icm_complete()
2042 icm_unplug_children(tb->root_switch); in icm_complete()
2055 queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500)); in icm_complete()
2060 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); in icm_runtime_suspend()
2064 static int icm_runtime_suspend_switch(struct tb_switch *sw) in icm_runtime_suspend_switch() argument
2066 if (tb_route(sw)) in icm_runtime_suspend_switch()
2067 reinit_completion(&sw->rpm_complete); in icm_runtime_suspend_switch()
2071 static int icm_runtime_resume_switch(struct tb_switch *sw) in icm_runtime_resume_switch() argument
2073 if (tb_route(sw)) { in icm_runtime_resume_switch()
2074 if (!wait_for_completion_timeout(&sw->rpm_complete, in icm_runtime_resume_switch()
2076 dev_dbg(&sw->dev, "runtime resuming timed out\n"); in icm_runtime_resume_switch()
2097 if (icm->safe_mode) in icm_start()
2098 tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0); in icm_start()
2100 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); in icm_start()
2101 if (IS_ERR(tb->root_switch)) in icm_start()
2102 return PTR_ERR(tb->root_switch); in icm_start()
2104 tb->root_switch->no_nvm_upgrade = !icm->can_upgrade_nvm; in icm_start()
2105 tb->root_switch->rpm = icm->rpm; in icm_start()
2107 if (icm->set_uuid) in icm_start()
2108 icm->set_uuid(tb); in icm_start()
2110 ret = tb_switch_add(tb->root_switch); in icm_start()
2112 tb_switch_put(tb->root_switch); in icm_start()
2113 tb->root_switch = NULL; in icm_start()
2123 cancel_delayed_work(&icm->rescan_work); in icm_stop()
2124 tb_switch_remove(tb->root_switch); in icm_stop()
2125 tb->root_switch = NULL; in icm_stop()
2126 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); in icm_stop()
2131 return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0); in icm_disconnect_pcie_paths()
2217 INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work); in icm_probe()
2218 mutex_init(&icm->request_lock); in icm_probe()
2220 switch (nhi->pdev->device) { in icm_probe()
2223 icm->can_upgrade_nvm = true; in icm_probe()
2224 icm->is_supported = icm_fr_is_supported; in icm_probe()
2225 icm->get_route = icm_fr_get_route; in icm_probe()
2226 icm->save_devices = icm_fr_save_devices; in icm_probe()
2227 icm->driver_ready = icm_fr_driver_ready; in icm_probe()
2228 icm->device_connected = icm_fr_device_connected; in icm_probe()
2229 icm->device_disconnected = icm_fr_device_disconnected; in icm_probe()
2230 icm->xdomain_connected = icm_fr_xdomain_connected; in icm_probe()
2231 icm->xdomain_disconnected = icm_fr_xdomain_disconnected; in icm_probe()
2232 tb->cm_ops = &icm_fr_ops; in icm_probe()
2240 icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; in icm_probe()
2247 icm->can_upgrade_nvm = !x86_apple_machine; in icm_probe()
2248 icm->is_supported = icm_ar_is_supported; in icm_probe()
2249 icm->cio_reset = icm_ar_cio_reset; in icm_probe()
2250 icm->get_mode = icm_ar_get_mode; in icm_probe()
2251 icm->get_route = icm_ar_get_route; in icm_probe()
2252 icm->save_devices = icm_fr_save_devices; in icm_probe()
2253 icm->driver_ready = icm_ar_driver_ready; in icm_probe()
2254 icm->device_connected = icm_fr_device_connected; in icm_probe()
2255 icm->device_disconnected = icm_fr_device_disconnected; in icm_probe()
2256 icm->xdomain_connected = icm_fr_xdomain_connected; in icm_probe()
2257 icm->xdomain_disconnected = icm_fr_xdomain_disconnected; in icm_probe()
2258 tb->cm_ops = &icm_ar_ops; in icm_probe()
2263 icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; in icm_probe()
2264 icm->can_upgrade_nvm = !x86_apple_machine; in icm_probe()
2265 icm->is_supported = icm_ar_is_supported; in icm_probe()
2266 icm->cio_reset = icm_tr_cio_reset; in icm_probe()
2267 icm->get_mode = icm_ar_get_mode; in icm_probe()
2268 icm->driver_ready = icm_tr_driver_ready; in icm_probe()
2269 icm->device_connected = icm_tr_device_connected; in icm_probe()
2270 icm->device_disconnected = icm_tr_device_disconnected; in icm_probe()
2271 icm->xdomain_connected = icm_tr_xdomain_connected; in icm_probe()
2272 icm->xdomain_disconnected = icm_tr_xdomain_disconnected; in icm_probe()
2273 tb->cm_ops = &icm_tr_ops; in icm_probe()
2278 icm->is_supported = icm_fr_is_supported; in icm_probe()
2279 icm->driver_ready = icm_icl_driver_ready; in icm_probe()
2280 icm->set_uuid = icm_icl_set_uuid; in icm_probe()
2281 icm->device_connected = icm_icl_device_connected; in icm_probe()
2282 icm->device_disconnected = icm_tr_device_disconnected; in icm_probe()
2283 icm->xdomain_connected = icm_tr_xdomain_connected; in icm_probe()
2284 icm->xdomain_disconnected = icm_tr_xdomain_disconnected; in icm_probe()
2285 icm->rtd3_veto = icm_icl_rtd3_veto; in icm_probe()
2286 tb->cm_ops = &icm_icl_ops; in icm_probe()
2293 icm->is_supported = icm_tgl_is_supported; in icm_probe()
2294 icm->driver_ready = icm_icl_driver_ready; in icm_probe()
2295 icm->set_uuid = icm_icl_set_uuid; in icm_probe()
2296 icm->device_connected = icm_icl_device_connected; in icm_probe()
2297 icm->device_disconnected = icm_tr_device_disconnected; in icm_probe()
2298 icm->xdomain_connected = icm_tr_xdomain_connected; in icm_probe()
2299 icm->xdomain_disconnected = icm_tr_xdomain_disconnected; in icm_probe()
2300 icm->rtd3_veto = icm_icl_rtd3_veto; in icm_probe()
2301 tb->cm_ops = &icm_icl_ops; in icm_probe()
2306 icm->is_supported = icm_tgl_is_supported; in icm_probe()
2307 icm->get_mode = icm_ar_get_mode; in icm_probe()
2308 icm->driver_ready = icm_tr_driver_ready; in icm_probe()
2309 icm->device_connected = icm_tr_device_connected; in icm_probe()
2310 icm->device_disconnected = icm_tr_device_disconnected; in icm_probe()
2311 icm->xdomain_connected = icm_tr_xdomain_connected; in icm_probe()
2312 icm->xdomain_disconnected = icm_tr_xdomain_disconnected; in icm_probe()
2313 tb->cm_ops = &icm_tr_ops; in icm_probe()
2317 if (!icm->is_supported || !icm->is_supported(tb)) { in icm_probe()
2318 dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n"); in icm_probe()