1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Thunderbolt driver - switch/port utility functions
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
7 */
8
9 #include <linux/delay.h>
10 #include <linux/idr.h>
11 #include <linux/module.h>
12 #include <linux/nvmem-provider.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sizes.h>
16 #include <linux/slab.h>
17 #include <linux/string_helpers.h>
18
19 #include "tb.h"
20
21 /* Switch NVM support */
22
23 struct nvm_auth_status {
24 struct list_head list;
25 uuid_t uuid;
26 u32 status;
27 };
28
29 /*
30 * Hold NVM authentication failure status per switch This information
31 * needs to stay around even when the switch gets power cycled so we
32 * keep it separately.
33 */
34 static LIST_HEAD(nvm_auth_status_cache);
35 static DEFINE_MUTEX(nvm_auth_status_lock);
36
__nvm_get_auth_status(const struct tb_switch * sw)37 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
38 {
39 struct nvm_auth_status *st;
40
41 list_for_each_entry(st, &nvm_auth_status_cache, list) {
42 if (uuid_equal(&st->uuid, sw->uuid))
43 return st;
44 }
45
46 return NULL;
47 }
48
nvm_get_auth_status(const struct tb_switch * sw,u32 * status)49 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
50 {
51 struct nvm_auth_status *st;
52
53 mutex_lock(&nvm_auth_status_lock);
54 st = __nvm_get_auth_status(sw);
55 mutex_unlock(&nvm_auth_status_lock);
56
57 *status = st ? st->status : 0;
58 }
59
nvm_set_auth_status(const struct tb_switch * sw,u32 status)60 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
61 {
62 struct nvm_auth_status *st;
63
64 if (WARN_ON(!sw->uuid))
65 return;
66
67 mutex_lock(&nvm_auth_status_lock);
68 st = __nvm_get_auth_status(sw);
69
70 if (!st) {
71 st = kzalloc(sizeof(*st), GFP_KERNEL);
72 if (!st)
73 goto unlock;
74
75 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
76 INIT_LIST_HEAD(&st->list);
77 list_add_tail(&st->list, &nvm_auth_status_cache);
78 }
79
80 st->status = status;
81 unlock:
82 mutex_unlock(&nvm_auth_status_lock);
83 }
84
nvm_clear_auth_status(const struct tb_switch * sw)85 static void nvm_clear_auth_status(const struct tb_switch *sw)
86 {
87 struct nvm_auth_status *st;
88
89 mutex_lock(&nvm_auth_status_lock);
90 st = __nvm_get_auth_status(sw);
91 if (st) {
92 list_del(&st->list);
93 kfree(st);
94 }
95 mutex_unlock(&nvm_auth_status_lock);
96 }
97
nvm_validate_and_write(struct tb_switch * sw)98 static int nvm_validate_and_write(struct tb_switch *sw)
99 {
100 unsigned int image_size;
101 const u8 *buf;
102 int ret;
103
104 ret = tb_nvm_validate(sw->nvm);
105 if (ret)
106 return ret;
107
108 ret = tb_nvm_write_headers(sw->nvm);
109 if (ret)
110 return ret;
111
112 buf = sw->nvm->buf_data_start;
113 image_size = sw->nvm->buf_data_size;
114
115 if (tb_switch_is_usb4(sw))
116 ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
117 else
118 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
119 if (ret)
120 return ret;
121
122 sw->nvm->flushed = true;
123 return 0;
124 }
125
nvm_authenticate_host_dma_port(struct tb_switch * sw)126 static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
127 {
128 int ret = 0;
129
130 /*
131 * Root switch NVM upgrade requires that we disconnect the
132 * existing paths first (in case it is not in safe mode
133 * already).
134 */
135 if (!sw->safe_mode) {
136 u32 status;
137
138 ret = tb_domain_disconnect_all_paths(sw->tb);
139 if (ret)
140 return ret;
141 /*
142 * The host controller goes away pretty soon after this if
143 * everything goes well so getting timeout is expected.
144 */
145 ret = dma_port_flash_update_auth(sw->dma_port);
146 if (!ret || ret == -ETIMEDOUT)
147 return 0;
148
149 /*
150 * Any error from update auth operation requires power
151 * cycling of the host router.
152 */
153 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
154 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
155 nvm_set_auth_status(sw, status);
156 }
157
158 /*
159 * From safe mode we can get out by just power cycling the
160 * switch.
161 */
162 dma_port_power_cycle(sw->dma_port);
163 return ret;
164 }
165
nvm_authenticate_device_dma_port(struct tb_switch * sw)166 static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
167 {
168 int ret, retries = 10;
169
170 ret = dma_port_flash_update_auth(sw->dma_port);
171 switch (ret) {
172 case 0:
173 case -ETIMEDOUT:
174 case -EACCES:
175 case -EINVAL:
176 /* Power cycle is required */
177 break;
178 default:
179 return ret;
180 }
181
182 /*
183 * Poll here for the authentication status. It takes some time
184 * for the device to respond (we get timeout for a while). Once
185 * we get response the device needs to be power cycled in order
186 * to the new NVM to be taken into use.
187 */
188 do {
189 u32 status;
190
191 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
192 if (ret < 0 && ret != -ETIMEDOUT)
193 return ret;
194 if (ret > 0) {
195 if (status) {
196 tb_sw_warn(sw, "failed to authenticate NVM\n");
197 nvm_set_auth_status(sw, status);
198 }
199
200 tb_sw_info(sw, "power cycling the switch now\n");
201 dma_port_power_cycle(sw->dma_port);
202 return 0;
203 }
204
205 msleep(500);
206 } while (--retries);
207
208 return -ETIMEDOUT;
209 }
210
nvm_authenticate_start_dma_port(struct tb_switch * sw)211 static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
212 {
213 struct pci_dev *root_port;
214
215 /*
216 * During host router NVM upgrade we should not allow root port to
217 * go into D3cold because some root ports cannot trigger PME
218 * itself. To be on the safe side keep the root port in D0 during
219 * the whole upgrade process.
220 */
221 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
222 if (root_port)
223 pm_runtime_get_noresume(&root_port->dev);
224 }
225
nvm_authenticate_complete_dma_port(struct tb_switch * sw)226 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
227 {
228 struct pci_dev *root_port;
229
230 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
231 if (root_port)
232 pm_runtime_put(&root_port->dev);
233 }
234
nvm_readable(struct tb_switch * sw)235 static inline bool nvm_readable(struct tb_switch *sw)
236 {
237 if (tb_switch_is_usb4(sw)) {
238 /*
239 * USB4 devices must support NVM operations but it is
240 * optional for hosts. Therefore we query the NVM sector
241 * size here and if it is supported assume NVM
242 * operations are implemented.
243 */
244 return usb4_switch_nvm_sector_size(sw) > 0;
245 }
246
247 /* Thunderbolt 2 and 3 devices support NVM through DMA port */
248 return !!sw->dma_port;
249 }
250
nvm_upgradeable(struct tb_switch * sw)251 static inline bool nvm_upgradeable(struct tb_switch *sw)
252 {
253 if (sw->no_nvm_upgrade)
254 return false;
255 return nvm_readable(sw);
256 }
257
nvm_authenticate(struct tb_switch * sw,bool auth_only)258 static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
259 {
260 int ret;
261
262 if (tb_switch_is_usb4(sw)) {
263 if (auth_only) {
264 ret = usb4_switch_nvm_set_offset(sw, 0);
265 if (ret)
266 return ret;
267 }
268 sw->nvm->authenticating = true;
269 return usb4_switch_nvm_authenticate(sw);
270 }
271 if (auth_only)
272 return -EOPNOTSUPP;
273
274 sw->nvm->authenticating = true;
275 if (!tb_route(sw)) {
276 nvm_authenticate_start_dma_port(sw);
277 ret = nvm_authenticate_host_dma_port(sw);
278 } else {
279 ret = nvm_authenticate_device_dma_port(sw);
280 }
281
282 return ret;
283 }
284
285 /**
286 * tb_switch_nvm_read() - Read router NVM
287 * @sw: Router whose NVM to read
288 * @address: Start address on the NVM
289 * @buf: Buffer where the read data is copied
290 * @size: Size of the buffer in bytes
291 *
292 * Reads from router NVM and returns the requested data in @buf. Locking
293 * is up to the caller. Returns %0 in success and negative errno in case
294 * of failure.
295 */
tb_switch_nvm_read(struct tb_switch * sw,unsigned int address,void * buf,size_t size)296 int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
297 size_t size)
298 {
299 if (tb_switch_is_usb4(sw))
300 return usb4_switch_nvm_read(sw, address, buf, size);
301 return dma_port_flash_read(sw->dma_port, address, buf, size);
302 }
303
nvm_read(void * priv,unsigned int offset,void * val,size_t bytes)304 static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
305 {
306 struct tb_nvm *nvm = priv;
307 struct tb_switch *sw = tb_to_switch(nvm->dev);
308 int ret;
309
310 pm_runtime_get_sync(&sw->dev);
311
312 if (!mutex_trylock(&sw->tb->lock)) {
313 ret = restart_syscall();
314 goto out;
315 }
316
317 ret = tb_switch_nvm_read(sw, offset, val, bytes);
318 mutex_unlock(&sw->tb->lock);
319
320 out:
321 pm_runtime_mark_last_busy(&sw->dev);
322 pm_runtime_put_autosuspend(&sw->dev);
323
324 return ret;
325 }
326
nvm_write(void * priv,unsigned int offset,void * val,size_t bytes)327 static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
328 {
329 struct tb_nvm *nvm = priv;
330 struct tb_switch *sw = tb_to_switch(nvm->dev);
331 int ret;
332
333 if (!mutex_trylock(&sw->tb->lock))
334 return restart_syscall();
335
336 /*
337 * Since writing the NVM image might require some special steps,
338 * for example when CSS headers are written, we cache the image
339 * locally here and handle the special cases when the user asks
340 * us to authenticate the image.
341 */
342 ret = tb_nvm_write_buf(nvm, offset, val, bytes);
343 mutex_unlock(&sw->tb->lock);
344
345 return ret;
346 }
347
tb_switch_nvm_add(struct tb_switch * sw)348 static int tb_switch_nvm_add(struct tb_switch *sw)
349 {
350 struct tb_nvm *nvm;
351 int ret;
352
353 if (!nvm_readable(sw))
354 return 0;
355
356 nvm = tb_nvm_alloc(&sw->dev);
357 if (IS_ERR(nvm)) {
358 ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
359 goto err_nvm;
360 }
361
362 ret = tb_nvm_read_version(nvm);
363 if (ret)
364 goto err_nvm;
365
366 /*
367 * If the switch is in safe-mode the only accessible portion of
368 * the NVM is the non-active one where userspace is expected to
369 * write new functional NVM.
370 */
371 if (!sw->safe_mode) {
372 ret = tb_nvm_add_active(nvm, nvm_read);
373 if (ret)
374 goto err_nvm;
375 }
376
377 if (!sw->no_nvm_upgrade) {
378 ret = tb_nvm_add_non_active(nvm, nvm_write);
379 if (ret)
380 goto err_nvm;
381 }
382
383 sw->nvm = nvm;
384 return 0;
385
386 err_nvm:
387 tb_sw_dbg(sw, "NVM upgrade disabled\n");
388 sw->no_nvm_upgrade = true;
389 if (!IS_ERR(nvm))
390 tb_nvm_free(nvm);
391
392 return ret;
393 }
394
tb_switch_nvm_remove(struct tb_switch * sw)395 static void tb_switch_nvm_remove(struct tb_switch *sw)
396 {
397 struct tb_nvm *nvm;
398
399 nvm = sw->nvm;
400 sw->nvm = NULL;
401
402 if (!nvm)
403 return;
404
405 /* Remove authentication status in case the switch is unplugged */
406 if (!nvm->authenticating)
407 nvm_clear_auth_status(sw);
408
409 tb_nvm_free(nvm);
410 }
411
412 /* port utility functions */
413
tb_port_type(const struct tb_regs_port_header * port)414 static const char *tb_port_type(const struct tb_regs_port_header *port)
415 {
416 switch (port->type >> 16) {
417 case 0:
418 switch ((u8) port->type) {
419 case 0:
420 return "Inactive";
421 case 1:
422 return "Port";
423 case 2:
424 return "NHI";
425 default:
426 return "unknown";
427 }
428 case 0x2:
429 return "Ethernet";
430 case 0x8:
431 return "SATA";
432 case 0xe:
433 return "DP/HDMI";
434 case 0x10:
435 return "PCIe";
436 case 0x20:
437 return "USB";
438 default:
439 return "unknown";
440 }
441 }
442
tb_dump_port(struct tb * tb,const struct tb_port * port)443 static void tb_dump_port(struct tb *tb, const struct tb_port *port)
444 {
445 const struct tb_regs_port_header *regs = &port->config;
446
447 tb_dbg(tb,
448 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
449 regs->port_number, regs->vendor_id, regs->device_id,
450 regs->revision, regs->thunderbolt_version, tb_port_type(regs),
451 regs->type);
452 tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
453 regs->max_in_hop_id, regs->max_out_hop_id);
454 tb_dbg(tb, " Max counters: %d\n", regs->max_counters);
455 tb_dbg(tb, " NFC Credits: %#x\n", regs->nfc_credits);
456 tb_dbg(tb, " Credits (total/control): %u/%u\n", port->total_credits,
457 port->ctl_credits);
458 }
459
460 /**
461 * tb_port_state() - get connectedness state of a port
462 * @port: the port to check
463 *
464 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
465 *
466 * Return: Returns an enum tb_port_state on success or an error code on failure.
467 */
tb_port_state(struct tb_port * port)468 int tb_port_state(struct tb_port *port)
469 {
470 struct tb_cap_phy phy;
471 int res;
472 if (port->cap_phy == 0) {
473 tb_port_WARN(port, "does not have a PHY\n");
474 return -EINVAL;
475 }
476 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
477 if (res)
478 return res;
479 return phy.state;
480 }
481
482 /**
483 * tb_wait_for_port() - wait for a port to become ready
484 * @port: Port to wait
485 * @wait_if_unplugged: Wait also when port is unplugged
486 *
487 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
488 * wait_if_unplugged is set then we also wait if the port is in state
489 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
490 * switch resume). Otherwise we only wait if a device is registered but the link
491 * has not yet been established.
492 *
493 * Return: Returns an error code on failure. Returns 0 if the port is not
494 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
495 * if the port is connected and in state TB_PORT_UP.
496 */
tb_wait_for_port(struct tb_port * port,bool wait_if_unplugged)497 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
498 {
499 int retries = 10;
500 int state;
501 if (!port->cap_phy) {
502 tb_port_WARN(port, "does not have PHY\n");
503 return -EINVAL;
504 }
505 if (tb_is_upstream_port(port)) {
506 tb_port_WARN(port, "is the upstream port\n");
507 return -EINVAL;
508 }
509
510 while (retries--) {
511 state = tb_port_state(port);
512 switch (state) {
513 case TB_PORT_DISABLED:
514 tb_port_dbg(port, "is disabled (state: 0)\n");
515 return 0;
516
517 case TB_PORT_UNPLUGGED:
518 if (wait_if_unplugged) {
519 /* used during resume */
520 tb_port_dbg(port,
521 "is unplugged (state: 7), retrying...\n");
522 msleep(100);
523 break;
524 }
525 tb_port_dbg(port, "is unplugged (state: 7)\n");
526 return 0;
527
528 case TB_PORT_UP:
529 case TB_PORT_TX_CL0S:
530 case TB_PORT_RX_CL0S:
531 case TB_PORT_CL1:
532 case TB_PORT_CL2:
533 tb_port_dbg(port, "is connected, link is up (state: %d)\n", state);
534 return 1;
535
536 default:
537 if (state < 0)
538 return state;
539
540 /*
541 * After plug-in the state is TB_PORT_CONNECTING. Give it some
542 * time.
543 */
544 tb_port_dbg(port,
545 "is connected, link is not up (state: %d), retrying...\n",
546 state);
547 msleep(100);
548 }
549
550 }
551 tb_port_warn(port,
552 "failed to reach state TB_PORT_UP. Ignoring port...\n");
553 return 0;
554 }
555
556 /**
557 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
558 * @port: Port to add/remove NFC credits
559 * @credits: Credits to add/remove
560 *
561 * Change the number of NFC credits allocated to @port by @credits. To remove
562 * NFC credits pass a negative amount of credits.
563 *
564 * Return: Returns 0 on success or an error code on failure.
565 */
tb_port_add_nfc_credits(struct tb_port * port,int credits)566 int tb_port_add_nfc_credits(struct tb_port *port, int credits)
567 {
568 u32 nfc_credits;
569
570 if (credits == 0 || port->sw->is_unplugged)
571 return 0;
572
573 /*
574 * USB4 restricts programming NFC buffers to lane adapters only
575 * so skip other ports.
576 */
577 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
578 return 0;
579
580 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
581 if (credits < 0)
582 credits = max_t(int, -nfc_credits, credits);
583
584 nfc_credits += credits;
585
586 tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
587 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
588
589 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
590 port->config.nfc_credits |= nfc_credits;
591
592 return tb_port_write(port, &port->config.nfc_credits,
593 TB_CFG_PORT, ADP_CS_4, 1);
594 }
595
596 /**
597 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
598 * @port: Port whose counters to clear
599 * @counter: Counter index to clear
600 *
601 * Return: Returns 0 on success or an error code on failure.
602 */
tb_port_clear_counter(struct tb_port * port,int counter)603 int tb_port_clear_counter(struct tb_port *port, int counter)
604 {
605 u32 zero[3] = { 0, 0, 0 };
606 tb_port_dbg(port, "clearing counter %d\n", counter);
607 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
608 }
609
610 /**
611 * tb_port_unlock() - Unlock downstream port
612 * @port: Port to unlock
613 *
614 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
615 * downstream router accessible for CM.
616 */
tb_port_unlock(struct tb_port * port)617 int tb_port_unlock(struct tb_port *port)
618 {
619 if (tb_switch_is_icm(port->sw))
620 return 0;
621 if (!tb_port_is_null(port))
622 return -EINVAL;
623 if (tb_switch_is_usb4(port->sw))
624 return usb4_port_unlock(port);
625 return 0;
626 }
627
__tb_port_enable(struct tb_port * port,bool enable)628 static int __tb_port_enable(struct tb_port *port, bool enable)
629 {
630 int ret;
631 u32 phy;
632
633 if (!tb_port_is_null(port))
634 return -EINVAL;
635
636 ret = tb_port_read(port, &phy, TB_CFG_PORT,
637 port->cap_phy + LANE_ADP_CS_1, 1);
638 if (ret)
639 return ret;
640
641 if (enable)
642 phy &= ~LANE_ADP_CS_1_LD;
643 else
644 phy |= LANE_ADP_CS_1_LD;
645
646
647 ret = tb_port_write(port, &phy, TB_CFG_PORT,
648 port->cap_phy + LANE_ADP_CS_1, 1);
649 if (ret)
650 return ret;
651
652 tb_port_dbg(port, "lane %s\n", str_enabled_disabled(enable));
653 return 0;
654 }
655
656 /**
657 * tb_port_enable() - Enable lane adapter
658 * @port: Port to enable (can be %NULL)
659 *
660 * This is used for lane 0 and 1 adapters to enable it.
661 */
tb_port_enable(struct tb_port * port)662 int tb_port_enable(struct tb_port *port)
663 {
664 return __tb_port_enable(port, true);
665 }
666
667 /**
668 * tb_port_disable() - Disable lane adapter
669 * @port: Port to disable (can be %NULL)
670 *
671 * This is used for lane 0 and 1 adapters to disable it.
672 */
tb_port_disable(struct tb_port * port)673 int tb_port_disable(struct tb_port *port)
674 {
675 return __tb_port_enable(port, false);
676 }
677
tb_port_reset(struct tb_port * port)678 static int tb_port_reset(struct tb_port *port)
679 {
680 if (tb_switch_is_usb4(port->sw))
681 return port->cap_usb4 ? usb4_port_reset(port) : 0;
682 return tb_lc_reset_port(port);
683 }
684
685 /*
686 * tb_init_port() - initialize a port
687 *
688 * This is a helper method for tb_switch_alloc. Does not check or initialize
689 * any downstream switches.
690 *
691 * Return: Returns 0 on success or an error code on failure.
692 */
tb_init_port(struct tb_port * port)693 static int tb_init_port(struct tb_port *port)
694 {
695 int res;
696 int cap;
697
698 INIT_LIST_HEAD(&port->list);
699
700 /* Control adapter does not have configuration space */
701 if (!port->port)
702 return 0;
703
704 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
705 if (res) {
706 if (res == -ENODEV) {
707 tb_dbg(port->sw->tb, " Port %d: not implemented\n",
708 port->port);
709 port->disabled = true;
710 return 0;
711 }
712 return res;
713 }
714
715 /* Port 0 is the switch itself and has no PHY. */
716 if (port->config.type == TB_TYPE_PORT) {
717 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
718
719 if (cap > 0)
720 port->cap_phy = cap;
721 else
722 tb_port_WARN(port, "non switch port without a PHY\n");
723
724 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
725 if (cap > 0)
726 port->cap_usb4 = cap;
727
728 /*
729 * USB4 ports the buffers allocated for the control path
730 * can be read from the path config space. Legacy
731 * devices we use hard-coded value.
732 */
733 if (port->cap_usb4) {
734 struct tb_regs_hop hop;
735
736 if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2))
737 port->ctl_credits = hop.initial_credits;
738 }
739 if (!port->ctl_credits)
740 port->ctl_credits = 2;
741
742 } else {
743 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
744 if (cap > 0)
745 port->cap_adap = cap;
746 }
747
748 port->total_credits =
749 (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
750 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
751
752 tb_dump_port(port->sw->tb, port);
753 return 0;
754 }
755
tb_port_alloc_hopid(struct tb_port * port,bool in,int min_hopid,int max_hopid)756 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
757 int max_hopid)
758 {
759 int port_max_hopid;
760 struct ida *ida;
761
762 if (in) {
763 port_max_hopid = port->config.max_in_hop_id;
764 ida = &port->in_hopids;
765 } else {
766 port_max_hopid = port->config.max_out_hop_id;
767 ida = &port->out_hopids;
768 }
769
770 /*
771 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
772 * reserved.
773 */
774 if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
775 min_hopid = TB_PATH_MIN_HOPID;
776
777 if (max_hopid < 0 || max_hopid > port_max_hopid)
778 max_hopid = port_max_hopid;
779
780 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
781 }
782
783 /**
784 * tb_port_alloc_in_hopid() - Allocate input HopID from port
785 * @port: Port to allocate HopID for
786 * @min_hopid: Minimum acceptable input HopID
787 * @max_hopid: Maximum acceptable input HopID
788 *
789 * Return: HopID between @min_hopid and @max_hopid or negative errno in
790 * case of error.
791 */
tb_port_alloc_in_hopid(struct tb_port * port,int min_hopid,int max_hopid)792 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
793 {
794 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
795 }
796
797 /**
798 * tb_port_alloc_out_hopid() - Allocate output HopID from port
799 * @port: Port to allocate HopID for
800 * @min_hopid: Minimum acceptable output HopID
801 * @max_hopid: Maximum acceptable output HopID
802 *
803 * Return: HopID between @min_hopid and @max_hopid or negative errno in
804 * case of error.
805 */
tb_port_alloc_out_hopid(struct tb_port * port,int min_hopid,int max_hopid)806 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
807 {
808 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
809 }
810
811 /**
812 * tb_port_release_in_hopid() - Release allocated input HopID from port
813 * @port: Port whose HopID to release
814 * @hopid: HopID to release
815 */
tb_port_release_in_hopid(struct tb_port * port,int hopid)816 void tb_port_release_in_hopid(struct tb_port *port, int hopid)
817 {
818 ida_simple_remove(&port->in_hopids, hopid);
819 }
820
821 /**
822 * tb_port_release_out_hopid() - Release allocated output HopID from port
823 * @port: Port whose HopID to release
824 * @hopid: HopID to release
825 */
tb_port_release_out_hopid(struct tb_port * port,int hopid)826 void tb_port_release_out_hopid(struct tb_port *port, int hopid)
827 {
828 ida_simple_remove(&port->out_hopids, hopid);
829 }
830
tb_switch_is_reachable(const struct tb_switch * parent,const struct tb_switch * sw)831 static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
832 const struct tb_switch *sw)
833 {
834 u64 mask = (1ULL << parent->config.depth * 8) - 1;
835 return (tb_route(parent) & mask) == (tb_route(sw) & mask);
836 }
837
838 /**
839 * tb_next_port_on_path() - Return next port for given port on a path
840 * @start: Start port of the walk
841 * @end: End port of the walk
842 * @prev: Previous port (%NULL if this is the first)
843 *
844 * This function can be used to walk from one port to another if they
845 * are connected through zero or more switches. If the @prev is dual
846 * link port, the function follows that link and returns another end on
847 * that same link.
848 *
849 * If the @end port has been reached, return %NULL.
850 *
851 * Domain tb->lock must be held when this function is called.
852 */
tb_next_port_on_path(struct tb_port * start,struct tb_port * end,struct tb_port * prev)853 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
854 struct tb_port *prev)
855 {
856 struct tb_port *next;
857
858 if (!prev)
859 return start;
860
861 if (prev->sw == end->sw) {
862 if (prev == end)
863 return NULL;
864 return end;
865 }
866
867 if (tb_switch_is_reachable(prev->sw, end->sw)) {
868 next = tb_port_at(tb_route(end->sw), prev->sw);
869 /* Walk down the topology if next == prev */
870 if (prev->remote &&
871 (next == prev || next->dual_link_port == prev))
872 next = prev->remote;
873 } else {
874 if (tb_is_upstream_port(prev)) {
875 next = prev->remote;
876 } else {
877 next = tb_upstream_port(prev->sw);
878 /*
879 * Keep the same link if prev and next are both
880 * dual link ports.
881 */
882 if (next->dual_link_port &&
883 next->link_nr != prev->link_nr) {
884 next = next->dual_link_port;
885 }
886 }
887 }
888
889 return next != prev ? next : NULL;
890 }
891
892 /**
893 * tb_port_get_link_speed() - Get current link speed
894 * @port: Port to check (USB4 or CIO)
895 *
896 * Returns link speed in Gb/s or negative errno in case of failure.
897 */
tb_port_get_link_speed(struct tb_port * port)898 int tb_port_get_link_speed(struct tb_port *port)
899 {
900 u32 val, speed;
901 int ret;
902
903 if (!port->cap_phy)
904 return -EINVAL;
905
906 ret = tb_port_read(port, &val, TB_CFG_PORT,
907 port->cap_phy + LANE_ADP_CS_1, 1);
908 if (ret)
909 return ret;
910
911 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
912 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
913
914 switch (speed) {
915 case LANE_ADP_CS_1_CURRENT_SPEED_GEN4:
916 return 40;
917 case LANE_ADP_CS_1_CURRENT_SPEED_GEN3:
918 return 20;
919 default:
920 return 10;
921 }
922 }
923
924 /**
925 * tb_port_get_link_width() - Get current link width
926 * @port: Port to check (USB4 or CIO)
927 *
928 * Returns link width. Return the link width as encoded in &enum
929 * tb_link_width or negative errno in case of failure.
930 */
tb_port_get_link_width(struct tb_port * port)931 int tb_port_get_link_width(struct tb_port *port)
932 {
933 u32 val;
934 int ret;
935
936 if (!port->cap_phy)
937 return -EINVAL;
938
939 ret = tb_port_read(port, &val, TB_CFG_PORT,
940 port->cap_phy + LANE_ADP_CS_1, 1);
941 if (ret)
942 return ret;
943
944 /* Matches the values in enum tb_link_width */
945 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
946 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
947 }
948
tb_port_is_width_supported(struct tb_port * port,unsigned int width_mask)949 static bool tb_port_is_width_supported(struct tb_port *port,
950 unsigned int width_mask)
951 {
952 u32 phy, widths;
953 int ret;
954
955 if (!port->cap_phy)
956 return false;
957
958 ret = tb_port_read(port, &phy, TB_CFG_PORT,
959 port->cap_phy + LANE_ADP_CS_0, 1);
960 if (ret)
961 return false;
962
963 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
964 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
965
966 return widths & width_mask;
967 }
968
is_gen4_link(struct tb_port * port)969 static bool is_gen4_link(struct tb_port *port)
970 {
971 return tb_port_get_link_speed(port) > 20;
972 }
973
974 /**
975 * tb_port_set_link_width() - Set target link width of the lane adapter
976 * @port: Lane adapter
977 * @width: Target link width
978 *
979 * Sets the target link width of the lane adapter to @width. Does not
980 * enable/disable lane bonding. For that call tb_port_set_lane_bonding().
981 *
982 * Return: %0 in case of success and negative errno in case of error
983 */
tb_port_set_link_width(struct tb_port * port,enum tb_link_width width)984 int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width)
985 {
986 u32 val;
987 int ret;
988
989 if (!port->cap_phy)
990 return -EINVAL;
991
992 ret = tb_port_read(port, &val, TB_CFG_PORT,
993 port->cap_phy + LANE_ADP_CS_1, 1);
994 if (ret)
995 return ret;
996
997 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
998 switch (width) {
999 case TB_LINK_WIDTH_SINGLE:
1000 /* Gen 4 link cannot be single */
1001 if (is_gen4_link(port))
1002 return -EOPNOTSUPP;
1003 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
1004 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1005 break;
1006 case TB_LINK_WIDTH_DUAL:
1007 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
1008 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1009 break;
1010 default:
1011 return -EINVAL;
1012 }
1013
1014 return tb_port_write(port, &val, TB_CFG_PORT,
1015 port->cap_phy + LANE_ADP_CS_1, 1);
1016 }
1017
1018 /**
1019 * tb_port_set_lane_bonding() - Enable/disable lane bonding
1020 * @port: Lane adapter
1021 * @bonding: enable/disable bonding
1022 *
1023 * Enables or disables lane bonding. This should be called after target
1024 * link width has been set (tb_port_set_link_width()). Note in most
1025 * cases one should use tb_port_lane_bonding_enable() instead to enable
1026 * lane bonding.
1027 *
1028 * Return: %0 in case of success and negative errno in case of error
1029 */
tb_port_set_lane_bonding(struct tb_port * port,bool bonding)1030 static int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
1031 {
1032 u32 val;
1033 int ret;
1034
1035 if (!port->cap_phy)
1036 return -EINVAL;
1037
1038 ret = tb_port_read(port, &val, TB_CFG_PORT,
1039 port->cap_phy + LANE_ADP_CS_1, 1);
1040 if (ret)
1041 return ret;
1042
1043 if (bonding)
1044 val |= LANE_ADP_CS_1_LB;
1045 else
1046 val &= ~LANE_ADP_CS_1_LB;
1047
1048 return tb_port_write(port, &val, TB_CFG_PORT,
1049 port->cap_phy + LANE_ADP_CS_1, 1);
1050 }
1051
1052 /**
1053 * tb_port_lane_bonding_enable() - Enable bonding on port
1054 * @port: port to enable
1055 *
1056 * Enable bonding by setting the link width of the port and the other
1057 * port in case of dual link port. Does not wait for the link to
1058 * actually reach the bonded state so caller needs to call
1059 * tb_port_wait_for_link_width() before enabling any paths through the
1060 * link to make sure the link is in expected state.
1061 *
1062 * Return: %0 in case of success and negative errno in case of error
1063 */
tb_port_lane_bonding_enable(struct tb_port * port)1064 int tb_port_lane_bonding_enable(struct tb_port *port)
1065 {
1066 enum tb_link_width width;
1067 int ret;
1068
1069 /*
1070 * Enable lane bonding for both links if not already enabled by
1071 * for example the boot firmware.
1072 */
1073 width = tb_port_get_link_width(port);
1074 if (width == TB_LINK_WIDTH_SINGLE) {
1075 ret = tb_port_set_link_width(port, TB_LINK_WIDTH_DUAL);
1076 if (ret)
1077 goto err_lane0;
1078 }
1079
1080 width = tb_port_get_link_width(port->dual_link_port);
1081 if (width == TB_LINK_WIDTH_SINGLE) {
1082 ret = tb_port_set_link_width(port->dual_link_port,
1083 TB_LINK_WIDTH_DUAL);
1084 if (ret)
1085 goto err_lane0;
1086 }
1087
1088 /*
1089 * Only set bonding if the link was not already bonded. This
1090 * avoids the lane adapter to re-enter bonding state.
1091 */
1092 if (width == TB_LINK_WIDTH_SINGLE && !tb_is_upstream_port(port)) {
1093 ret = tb_port_set_lane_bonding(port, true);
1094 if (ret)
1095 goto err_lane1;
1096 }
1097
1098 /*
1099 * When lane 0 bonding is set it will affect lane 1 too so
1100 * update both.
1101 */
1102 port->bonded = true;
1103 port->dual_link_port->bonded = true;
1104
1105 return 0;
1106
1107 err_lane1:
1108 tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE);
1109 err_lane0:
1110 tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE);
1111
1112 return ret;
1113 }
1114
1115 /**
1116 * tb_port_lane_bonding_disable() - Disable bonding on port
1117 * @port: port to disable
1118 *
1119 * Disable bonding by setting the link width of the port and the
1120 * other port in case of dual link port.
1121 */
tb_port_lane_bonding_disable(struct tb_port * port)1122 void tb_port_lane_bonding_disable(struct tb_port *port)
1123 {
1124 tb_port_set_lane_bonding(port, false);
1125 tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE);
1126 tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE);
1127 port->dual_link_port->bonded = false;
1128 port->bonded = false;
1129 }
1130
1131 /**
1132 * tb_port_wait_for_link_width() - Wait until link reaches specific width
1133 * @port: Port to wait for
1134 * @width_mask: Expected link width mask
1135 * @timeout_msec: Timeout in ms how long to wait
1136 *
1137 * Should be used after both ends of the link have been bonded (or
1138 * bonding has been disabled) to wait until the link actually reaches
1139 * the expected state. Returns %-ETIMEDOUT if the width was not reached
1140 * within the given timeout, %0 if it did. Can be passed a mask of
1141 * expected widths and succeeds if any of the widths is reached.
1142 */
tb_port_wait_for_link_width(struct tb_port * port,unsigned int width_mask,int timeout_msec)1143 int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask,
1144 int timeout_msec)
1145 {
1146 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1147 int ret;
1148
1149 /* Gen 4 link does not support single lane */
1150 if ((width_mask & TB_LINK_WIDTH_SINGLE) && is_gen4_link(port))
1151 return -EOPNOTSUPP;
1152
1153 do {
1154 ret = tb_port_get_link_width(port);
1155 if (ret < 0) {
1156 /*
1157 * Sometimes we get port locked error when
1158 * polling the lanes so we can ignore it and
1159 * retry.
1160 */
1161 if (ret != -EACCES)
1162 return ret;
1163 } else if (ret & width_mask) {
1164 return 0;
1165 }
1166
1167 usleep_range(1000, 2000);
1168 } while (ktime_before(ktime_get(), timeout));
1169
1170 return -ETIMEDOUT;
1171 }
1172
tb_port_do_update_credits(struct tb_port * port)1173 static int tb_port_do_update_credits(struct tb_port *port)
1174 {
1175 u32 nfc_credits;
1176 int ret;
1177
1178 ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1);
1179 if (ret)
1180 return ret;
1181
1182 if (nfc_credits != port->config.nfc_credits) {
1183 u32 total;
1184
1185 total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
1186 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
1187
1188 tb_port_dbg(port, "total credits changed %u -> %u\n",
1189 port->total_credits, total);
1190
1191 port->config.nfc_credits = nfc_credits;
1192 port->total_credits = total;
1193 }
1194
1195 return 0;
1196 }
1197
1198 /**
1199 * tb_port_update_credits() - Re-read port total credits
1200 * @port: Port to update
1201 *
1202 * After the link is bonded (or bonding was disabled) the port total
1203 * credits may change, so this function needs to be called to re-read
1204 * the credits. Updates also the second lane adapter.
1205 */
tb_port_update_credits(struct tb_port * port)1206 int tb_port_update_credits(struct tb_port *port)
1207 {
1208 int ret;
1209
1210 ret = tb_port_do_update_credits(port);
1211 if (ret)
1212 return ret;
1213 return tb_port_do_update_credits(port->dual_link_port);
1214 }
1215
tb_port_start_lane_initialization(struct tb_port * port)1216 static int tb_port_start_lane_initialization(struct tb_port *port)
1217 {
1218 int ret;
1219
1220 if (tb_switch_is_usb4(port->sw))
1221 return 0;
1222
1223 ret = tb_lc_start_lane_initialization(port);
1224 return ret == -EINVAL ? 0 : ret;
1225 }
1226
1227 /*
1228 * Returns true if the port had something (router, XDomain) connected
1229 * before suspend.
1230 */
tb_port_resume(struct tb_port * port)1231 static bool tb_port_resume(struct tb_port *port)
1232 {
1233 bool has_remote = tb_port_has_remote(port);
1234
1235 if (port->usb4) {
1236 usb4_port_device_resume(port->usb4);
1237 } else if (!has_remote) {
1238 /*
1239 * For disconnected downstream lane adapters start lane
1240 * initialization now so we detect future connects.
1241 *
1242 * For XDomain start the lane initialzation now so the
1243 * link gets re-established.
1244 *
1245 * This is only needed for non-USB4 ports.
1246 */
1247 if (!tb_is_upstream_port(port) || port->xdomain)
1248 tb_port_start_lane_initialization(port);
1249 }
1250
1251 return has_remote || port->xdomain;
1252 }
1253
1254 /**
1255 * tb_port_is_enabled() - Is the adapter port enabled
1256 * @port: Port to check
1257 */
tb_port_is_enabled(struct tb_port * port)1258 bool tb_port_is_enabled(struct tb_port *port)
1259 {
1260 switch (port->config.type) {
1261 case TB_TYPE_PCIE_UP:
1262 case TB_TYPE_PCIE_DOWN:
1263 return tb_pci_port_is_enabled(port);
1264
1265 case TB_TYPE_DP_HDMI_IN:
1266 case TB_TYPE_DP_HDMI_OUT:
1267 return tb_dp_port_is_enabled(port);
1268
1269 case TB_TYPE_USB3_UP:
1270 case TB_TYPE_USB3_DOWN:
1271 return tb_usb3_port_is_enabled(port);
1272
1273 default:
1274 return false;
1275 }
1276 }
1277
1278 /**
1279 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1280 * @port: USB3 adapter port to check
1281 */
tb_usb3_port_is_enabled(struct tb_port * port)1282 bool tb_usb3_port_is_enabled(struct tb_port *port)
1283 {
1284 u32 data;
1285
1286 if (tb_port_read(port, &data, TB_CFG_PORT,
1287 port->cap_adap + ADP_USB3_CS_0, 1))
1288 return false;
1289
1290 return !!(data & ADP_USB3_CS_0_PE);
1291 }
1292
1293 /**
1294 * tb_usb3_port_enable() - Enable USB3 adapter port
1295 * @port: USB3 adapter port to enable
1296 * @enable: Enable/disable the USB3 adapter
1297 */
tb_usb3_port_enable(struct tb_port * port,bool enable)1298 int tb_usb3_port_enable(struct tb_port *port, bool enable)
1299 {
1300 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
1301 : ADP_USB3_CS_0_V;
1302
1303 if (!port->cap_adap)
1304 return -ENXIO;
1305 return tb_port_write(port, &word, TB_CFG_PORT,
1306 port->cap_adap + ADP_USB3_CS_0, 1);
1307 }
1308
1309 /**
1310 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1311 * @port: PCIe port to check
1312 */
tb_pci_port_is_enabled(struct tb_port * port)1313 bool tb_pci_port_is_enabled(struct tb_port *port)
1314 {
1315 u32 data;
1316
1317 if (tb_port_read(port, &data, TB_CFG_PORT,
1318 port->cap_adap + ADP_PCIE_CS_0, 1))
1319 return false;
1320
1321 return !!(data & ADP_PCIE_CS_0_PE);
1322 }
1323
1324 /**
1325 * tb_pci_port_enable() - Enable PCIe adapter port
1326 * @port: PCIe port to enable
1327 * @enable: Enable/disable the PCIe adapter
1328 */
tb_pci_port_enable(struct tb_port * port,bool enable)1329 int tb_pci_port_enable(struct tb_port *port, bool enable)
1330 {
1331 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
1332 if (!port->cap_adap)
1333 return -ENXIO;
1334 return tb_port_write(port, &word, TB_CFG_PORT,
1335 port->cap_adap + ADP_PCIE_CS_0, 1);
1336 }
1337
1338 /**
1339 * tb_dp_port_hpd_is_active() - Is HPD already active
1340 * @port: DP out port to check
1341 *
1342 * Checks if the DP OUT adapter port has HDP bit already set.
1343 */
tb_dp_port_hpd_is_active(struct tb_port * port)1344 int tb_dp_port_hpd_is_active(struct tb_port *port)
1345 {
1346 u32 data;
1347 int ret;
1348
1349 ret = tb_port_read(port, &data, TB_CFG_PORT,
1350 port->cap_adap + ADP_DP_CS_2, 1);
1351 if (ret)
1352 return ret;
1353
1354 return !!(data & ADP_DP_CS_2_HDP);
1355 }
1356
1357 /**
1358 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1359 * @port: Port to clear HPD
1360 *
1361 * If the DP IN port has HDP set, this function can be used to clear it.
1362 */
tb_dp_port_hpd_clear(struct tb_port * port)1363 int tb_dp_port_hpd_clear(struct tb_port *port)
1364 {
1365 u32 data;
1366 int ret;
1367
1368 ret = tb_port_read(port, &data, TB_CFG_PORT,
1369 port->cap_adap + ADP_DP_CS_3, 1);
1370 if (ret)
1371 return ret;
1372
1373 data |= ADP_DP_CS_3_HDPC;
1374 return tb_port_write(port, &data, TB_CFG_PORT,
1375 port->cap_adap + ADP_DP_CS_3, 1);
1376 }
1377
1378 /**
1379 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1380 * @port: DP IN/OUT port to set hops
1381 * @video: Video Hop ID
1382 * @aux_tx: AUX TX Hop ID
1383 * @aux_rx: AUX RX Hop ID
1384 *
1385 * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4
1386 * router DP adapters too but does not program the values as the fields
1387 * are read-only.
1388 */
tb_dp_port_set_hops(struct tb_port * port,unsigned int video,unsigned int aux_tx,unsigned int aux_rx)1389 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1390 unsigned int aux_tx, unsigned int aux_rx)
1391 {
1392 u32 data[2];
1393 int ret;
1394
1395 if (tb_switch_is_usb4(port->sw))
1396 return 0;
1397
1398 ret = tb_port_read(port, data, TB_CFG_PORT,
1399 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1400 if (ret)
1401 return ret;
1402
1403 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
1404 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1405 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1406
1407 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
1408 ADP_DP_CS_0_VIDEO_HOPID_MASK;
1409 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
1410 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
1411 ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1412
1413 return tb_port_write(port, data, TB_CFG_PORT,
1414 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1415 }
1416
1417 /**
1418 * tb_dp_port_is_enabled() - Is DP adapter port enabled
1419 * @port: DP adapter port to check
1420 */
tb_dp_port_is_enabled(struct tb_port * port)1421 bool tb_dp_port_is_enabled(struct tb_port *port)
1422 {
1423 u32 data[2];
1424
1425 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
1426 ARRAY_SIZE(data)))
1427 return false;
1428
1429 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
1430 }
1431
1432 /**
1433 * tb_dp_port_enable() - Enables/disables DP paths of a port
1434 * @port: DP IN/OUT port
1435 * @enable: Enable/disable DP path
1436 *
1437 * Once Hop IDs are programmed DP paths can be enabled or disabled by
1438 * calling this function.
1439 */
tb_dp_port_enable(struct tb_port * port,bool enable)1440 int tb_dp_port_enable(struct tb_port *port, bool enable)
1441 {
1442 u32 data[2];
1443 int ret;
1444
1445 ret = tb_port_read(port, data, TB_CFG_PORT,
1446 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1447 if (ret)
1448 return ret;
1449
1450 if (enable)
1451 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
1452 else
1453 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
1454
1455 return tb_port_write(port, data, TB_CFG_PORT,
1456 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1457 }
1458
1459 /* switch utility functions */
1460
tb_switch_generation_name(const struct tb_switch * sw)1461 static const char *tb_switch_generation_name(const struct tb_switch *sw)
1462 {
1463 switch (sw->generation) {
1464 case 1:
1465 return "Thunderbolt 1";
1466 case 2:
1467 return "Thunderbolt 2";
1468 case 3:
1469 return "Thunderbolt 3";
1470 case 4:
1471 return "USB4";
1472 default:
1473 return "Unknown";
1474 }
1475 }
1476
tb_dump_switch(const struct tb * tb,const struct tb_switch * sw)1477 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
1478 {
1479 const struct tb_regs_switch_header *regs = &sw->config;
1480
1481 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1482 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1483 regs->revision, regs->thunderbolt_version);
1484 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number);
1485 tb_dbg(tb, " Config:\n");
1486 tb_dbg(tb,
1487 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
1488 regs->upstream_port_number, regs->depth,
1489 (((u64) regs->route_hi) << 32) | regs->route_lo,
1490 regs->enabled, regs->plug_events_delay);
1491 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
1492 regs->__unknown1, regs->__unknown4);
1493 }
1494
tb_switch_reset_host(struct tb_switch * sw)1495 static int tb_switch_reset_host(struct tb_switch *sw)
1496 {
1497 if (sw->generation > 1) {
1498 struct tb_port *port;
1499
1500 tb_switch_for_each_port(sw, port) {
1501 int i, ret;
1502
1503 /*
1504 * For lane adapters we issue downstream port
1505 * reset and clear up path config spaces.
1506 *
1507 * For protocol adapters we disable the path and
1508 * clear path config space one by one (from 8 to
1509 * Max Input HopID of the adapter).
1510 */
1511 if (tb_port_is_null(port) && !tb_is_upstream_port(port)) {
1512 ret = tb_port_reset(port);
1513 if (ret)
1514 return ret;
1515 } else if (tb_port_is_usb3_down(port) ||
1516 tb_port_is_usb3_up(port)) {
1517 tb_usb3_port_enable(port, false);
1518 } else if (tb_port_is_dpin(port) ||
1519 tb_port_is_dpout(port)) {
1520 tb_dp_port_enable(port, false);
1521 } else if (tb_port_is_pcie_down(port) ||
1522 tb_port_is_pcie_up(port)) {
1523 tb_pci_port_enable(port, false);
1524 } else {
1525 continue;
1526 }
1527
1528 /* Cleanup path config space of protocol adapter */
1529 for (i = TB_PATH_MIN_HOPID;
1530 i <= port->config.max_in_hop_id; i++) {
1531 ret = tb_path_deactivate_hop(port, i);
1532 if (ret)
1533 return ret;
1534 }
1535 }
1536 } else {
1537 struct tb_cfg_result res;
1538
1539 /* Thunderbolt 1 uses the "reset" config space packet */
1540 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
1541 TB_CFG_SWITCH, 2, 2);
1542 if (res.err)
1543 return res.err;
1544 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
1545 if (res.err > 0)
1546 return -EIO;
1547 else if (res.err < 0)
1548 return res.err;
1549 }
1550
1551 return 0;
1552 }
1553
tb_switch_reset_device(struct tb_switch * sw)1554 static int tb_switch_reset_device(struct tb_switch *sw)
1555 {
1556 return tb_port_reset(tb_switch_downstream_port(sw));
1557 }
1558
tb_switch_enumerated(struct tb_switch * sw)1559 static bool tb_switch_enumerated(struct tb_switch *sw)
1560 {
1561 u32 val;
1562 int ret;
1563
1564 /*
1565 * Read directly from the hardware because we use this also
1566 * during system sleep where sw->config.enabled is already set
1567 * by us.
1568 */
1569 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_3, 1);
1570 if (ret)
1571 return false;
1572
1573 return !!(val & ROUTER_CS_3_V);
1574 }
1575
1576 /**
1577 * tb_switch_reset() - Perform reset to the router
1578 * @sw: Router to reset
1579 *
1580 * Issues reset to the router @sw. Can be used for any router. For host
1581 * routers, resets all the downstream ports and cleans up path config
1582 * spaces accordingly. For device routers issues downstream port reset
1583 * through the parent router, so as side effect there will be unplug
1584 * soon after this is finished.
1585 *
1586 * If the router is not enumerated does nothing.
1587 *
1588 * Returns %0 on success or negative errno in case of failure.
1589 */
tb_switch_reset(struct tb_switch * sw)1590 int tb_switch_reset(struct tb_switch *sw)
1591 {
1592 int ret;
1593
1594 /*
1595 * We cannot access the port config spaces unless the router is
1596 * already enumerated. If the router is not enumerated it is
1597 * equal to being reset so we can skip that here.
1598 */
1599 if (!tb_switch_enumerated(sw))
1600 return 0;
1601
1602 tb_sw_dbg(sw, "resetting\n");
1603
1604 if (tb_route(sw))
1605 ret = tb_switch_reset_device(sw);
1606 else
1607 ret = tb_switch_reset_host(sw);
1608
1609 if (ret)
1610 tb_sw_warn(sw, "failed to reset\n");
1611
1612 return ret;
1613 }
1614
1615 /**
1616 * tb_switch_wait_for_bit() - Wait for specified value of bits in offset
1617 * @sw: Router to read the offset value from
1618 * @offset: Offset in the router config space to read from
1619 * @bit: Bit mask in the offset to wait for
1620 * @value: Value of the bits to wait for
1621 * @timeout_msec: Timeout in ms how long to wait
1622 *
1623 * Wait till the specified bits in specified offset reach specified value.
1624 * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached
1625 * within the given timeout or a negative errno in case of failure.
1626 */
tb_switch_wait_for_bit(struct tb_switch * sw,u32 offset,u32 bit,u32 value,int timeout_msec)1627 int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
1628 u32 value, int timeout_msec)
1629 {
1630 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1631
1632 do {
1633 u32 val;
1634 int ret;
1635
1636 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
1637 if (ret)
1638 return ret;
1639
1640 if ((val & bit) == value)
1641 return 0;
1642
1643 usleep_range(50, 100);
1644 } while (ktime_before(ktime_get(), timeout));
1645
1646 return -ETIMEDOUT;
1647 }
1648
1649 /*
1650 * tb_plug_events_active() - enable/disable plug events on a switch
1651 *
1652 * Also configures a sane plug_events_delay of 255ms.
1653 *
1654 * Return: Returns 0 on success or an error code on failure.
1655 */
tb_plug_events_active(struct tb_switch * sw,bool active)1656 static int tb_plug_events_active(struct tb_switch *sw, bool active)
1657 {
1658 u32 data;
1659 int res;
1660
1661 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
1662 return 0;
1663
1664 sw->config.plug_events_delay = 0xff;
1665 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1666 if (res)
1667 return res;
1668
1669 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1670 if (res)
1671 return res;
1672
1673 if (active) {
1674 data = data & 0xFFFFFF83;
1675 switch (sw->config.device_id) {
1676 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1677 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1678 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1679 break;
1680 default:
1681 /*
1682 * Skip Alpine Ridge, it needs to have vendor
1683 * specific USB hotplug event enabled for the
1684 * internal xHCI to work.
1685 */
1686 if (!tb_switch_is_alpine_ridge(sw))
1687 data |= TB_PLUG_EVENTS_USB_DISABLE;
1688 }
1689 } else {
1690 data = data | 0x7c;
1691 }
1692 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1693 sw->cap_plug_events + 1, 1);
1694 }
1695
authorized_show(struct device * dev,struct device_attribute * attr,char * buf)1696 static ssize_t authorized_show(struct device *dev,
1697 struct device_attribute *attr,
1698 char *buf)
1699 {
1700 struct tb_switch *sw = tb_to_switch(dev);
1701
1702 return sysfs_emit(buf, "%u\n", sw->authorized);
1703 }
1704
disapprove_switch(struct device * dev,void * not_used)1705 static int disapprove_switch(struct device *dev, void *not_used)
1706 {
1707 char *envp[] = { "AUTHORIZED=0", NULL };
1708 struct tb_switch *sw;
1709
1710 sw = tb_to_switch(dev);
1711 if (sw && sw->authorized) {
1712 int ret;
1713
1714 /* First children */
1715 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
1716 if (ret)
1717 return ret;
1718
1719 ret = tb_domain_disapprove_switch(sw->tb, sw);
1720 if (ret)
1721 return ret;
1722
1723 sw->authorized = 0;
1724 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
1725 }
1726
1727 return 0;
1728 }
1729
tb_switch_set_authorized(struct tb_switch * sw,unsigned int val)1730 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1731 {
1732 char envp_string[13];
1733 int ret = -EINVAL;
1734 char *envp[] = { envp_string, NULL };
1735
1736 if (!mutex_trylock(&sw->tb->lock))
1737 return restart_syscall();
1738
1739 if (!!sw->authorized == !!val)
1740 goto unlock;
1741
1742 switch (val) {
1743 /* Disapprove switch */
1744 case 0:
1745 if (tb_route(sw)) {
1746 ret = disapprove_switch(&sw->dev, NULL);
1747 goto unlock;
1748 }
1749 break;
1750
1751 /* Approve switch */
1752 case 1:
1753 if (sw->key)
1754 ret = tb_domain_approve_switch_key(sw->tb, sw);
1755 else
1756 ret = tb_domain_approve_switch(sw->tb, sw);
1757 break;
1758
1759 /* Challenge switch */
1760 case 2:
1761 if (sw->key)
1762 ret = tb_domain_challenge_switch_key(sw->tb, sw);
1763 break;
1764
1765 default:
1766 break;
1767 }
1768
1769 if (!ret) {
1770 sw->authorized = val;
1771 /*
1772 * Notify status change to the userspace, informing the new
1773 * value of /sys/bus/thunderbolt/devices/.../authorized.
1774 */
1775 sprintf(envp_string, "AUTHORIZED=%u", sw->authorized);
1776 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
1777 }
1778
1779 unlock:
1780 mutex_unlock(&sw->tb->lock);
1781 return ret;
1782 }
1783
authorized_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1784 static ssize_t authorized_store(struct device *dev,
1785 struct device_attribute *attr,
1786 const char *buf, size_t count)
1787 {
1788 struct tb_switch *sw = tb_to_switch(dev);
1789 unsigned int val;
1790 ssize_t ret;
1791
1792 ret = kstrtouint(buf, 0, &val);
1793 if (ret)
1794 return ret;
1795 if (val > 2)
1796 return -EINVAL;
1797
1798 pm_runtime_get_sync(&sw->dev);
1799 ret = tb_switch_set_authorized(sw, val);
1800 pm_runtime_mark_last_busy(&sw->dev);
1801 pm_runtime_put_autosuspend(&sw->dev);
1802
1803 return ret ? ret : count;
1804 }
1805 static DEVICE_ATTR_RW(authorized);
1806
boot_show(struct device * dev,struct device_attribute * attr,char * buf)1807 static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1808 char *buf)
1809 {
1810 struct tb_switch *sw = tb_to_switch(dev);
1811
1812 return sysfs_emit(buf, "%u\n", sw->boot);
1813 }
1814 static DEVICE_ATTR_RO(boot);
1815
device_show(struct device * dev,struct device_attribute * attr,char * buf)1816 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1817 char *buf)
1818 {
1819 struct tb_switch *sw = tb_to_switch(dev);
1820
1821 return sysfs_emit(buf, "%#x\n", sw->device);
1822 }
1823 static DEVICE_ATTR_RO(device);
1824
1825 static ssize_t
device_name_show(struct device * dev,struct device_attribute * attr,char * buf)1826 device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1827 {
1828 struct tb_switch *sw = tb_to_switch(dev);
1829
1830 return sysfs_emit(buf, "%s\n", sw->device_name ?: "");
1831 }
1832 static DEVICE_ATTR_RO(device_name);
1833
1834 static ssize_t
generation_show(struct device * dev,struct device_attribute * attr,char * buf)1835 generation_show(struct device *dev, struct device_attribute *attr, char *buf)
1836 {
1837 struct tb_switch *sw = tb_to_switch(dev);
1838
1839 return sysfs_emit(buf, "%u\n", sw->generation);
1840 }
1841 static DEVICE_ATTR_RO(generation);
1842
key_show(struct device * dev,struct device_attribute * attr,char * buf)1843 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1844 char *buf)
1845 {
1846 struct tb_switch *sw = tb_to_switch(dev);
1847 ssize_t ret;
1848
1849 if (!mutex_trylock(&sw->tb->lock))
1850 return restart_syscall();
1851
1852 if (sw->key)
1853 ret = sysfs_emit(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1854 else
1855 ret = sysfs_emit(buf, "\n");
1856
1857 mutex_unlock(&sw->tb->lock);
1858 return ret;
1859 }
1860
key_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1861 static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1862 const char *buf, size_t count)
1863 {
1864 struct tb_switch *sw = tb_to_switch(dev);
1865 u8 key[TB_SWITCH_KEY_SIZE];
1866 ssize_t ret = count;
1867 bool clear = false;
1868
1869 if (!strcmp(buf, "\n"))
1870 clear = true;
1871 else if (hex2bin(key, buf, sizeof(key)))
1872 return -EINVAL;
1873
1874 if (!mutex_trylock(&sw->tb->lock))
1875 return restart_syscall();
1876
1877 if (sw->authorized) {
1878 ret = -EBUSY;
1879 } else {
1880 kfree(sw->key);
1881 if (clear) {
1882 sw->key = NULL;
1883 } else {
1884 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1885 if (!sw->key)
1886 ret = -ENOMEM;
1887 }
1888 }
1889
1890 mutex_unlock(&sw->tb->lock);
1891 return ret;
1892 }
1893 static DEVICE_ATTR(key, 0600, key_show, key_store);
1894
speed_show(struct device * dev,struct device_attribute * attr,char * buf)1895 static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1896 char *buf)
1897 {
1898 struct tb_switch *sw = tb_to_switch(dev);
1899
1900 return sysfs_emit(buf, "%u.0 Gb/s\n", sw->link_speed);
1901 }
1902
1903 /*
1904 * Currently all lanes must run at the same speed but we expose here
1905 * both directions to allow possible asymmetric links in the future.
1906 */
1907 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1908 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1909
rx_lanes_show(struct device * dev,struct device_attribute * attr,char * buf)1910 static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
1911 char *buf)
1912 {
1913 struct tb_switch *sw = tb_to_switch(dev);
1914 unsigned int width;
1915
1916 switch (sw->link_width) {
1917 case TB_LINK_WIDTH_SINGLE:
1918 case TB_LINK_WIDTH_ASYM_TX:
1919 width = 1;
1920 break;
1921 case TB_LINK_WIDTH_DUAL:
1922 width = 2;
1923 break;
1924 case TB_LINK_WIDTH_ASYM_RX:
1925 width = 3;
1926 break;
1927 default:
1928 WARN_ON_ONCE(1);
1929 return -EINVAL;
1930 }
1931
1932 return sysfs_emit(buf, "%u\n", width);
1933 }
1934 static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL);
1935
tx_lanes_show(struct device * dev,struct device_attribute * attr,char * buf)1936 static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
1937 char *buf)
1938 {
1939 struct tb_switch *sw = tb_to_switch(dev);
1940 unsigned int width;
1941
1942 switch (sw->link_width) {
1943 case TB_LINK_WIDTH_SINGLE:
1944 case TB_LINK_WIDTH_ASYM_RX:
1945 width = 1;
1946 break;
1947 case TB_LINK_WIDTH_DUAL:
1948 width = 2;
1949 break;
1950 case TB_LINK_WIDTH_ASYM_TX:
1951 width = 3;
1952 break;
1953 default:
1954 WARN_ON_ONCE(1);
1955 return -EINVAL;
1956 }
1957
1958 return sysfs_emit(buf, "%u\n", width);
1959 }
1960 static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL);
1961
nvm_authenticate_show(struct device * dev,struct device_attribute * attr,char * buf)1962 static ssize_t nvm_authenticate_show(struct device *dev,
1963 struct device_attribute *attr, char *buf)
1964 {
1965 struct tb_switch *sw = tb_to_switch(dev);
1966 u32 status;
1967
1968 nvm_get_auth_status(sw, &status);
1969 return sysfs_emit(buf, "%#x\n", status);
1970 }
1971
nvm_authenticate_sysfs(struct device * dev,const char * buf,bool disconnect)1972 static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
1973 bool disconnect)
1974 {
1975 struct tb_switch *sw = tb_to_switch(dev);
1976 int val, ret;
1977
1978 pm_runtime_get_sync(&sw->dev);
1979
1980 if (!mutex_trylock(&sw->tb->lock)) {
1981 ret = restart_syscall();
1982 goto exit_rpm;
1983 }
1984
1985 if (sw->no_nvm_upgrade) {
1986 ret = -EOPNOTSUPP;
1987 goto exit_unlock;
1988 }
1989
1990 /* If NVMem devices are not yet added */
1991 if (!sw->nvm) {
1992 ret = -EAGAIN;
1993 goto exit_unlock;
1994 }
1995
1996 ret = kstrtoint(buf, 10, &val);
1997 if (ret)
1998 goto exit_unlock;
1999
2000 /* Always clear the authentication status */
2001 nvm_clear_auth_status(sw);
2002
2003 if (val > 0) {
2004 if (val == AUTHENTICATE_ONLY) {
2005 if (disconnect)
2006 ret = -EINVAL;
2007 else
2008 ret = nvm_authenticate(sw, true);
2009 } else {
2010 if (!sw->nvm->flushed) {
2011 if (!sw->nvm->buf) {
2012 ret = -EINVAL;
2013 goto exit_unlock;
2014 }
2015
2016 ret = nvm_validate_and_write(sw);
2017 if (ret || val == WRITE_ONLY)
2018 goto exit_unlock;
2019 }
2020 if (val == WRITE_AND_AUTHENTICATE) {
2021 if (disconnect)
2022 ret = tb_lc_force_power(sw);
2023 else
2024 ret = nvm_authenticate(sw, false);
2025 }
2026 }
2027 }
2028
2029 exit_unlock:
2030 mutex_unlock(&sw->tb->lock);
2031 exit_rpm:
2032 pm_runtime_mark_last_busy(&sw->dev);
2033 pm_runtime_put_autosuspend(&sw->dev);
2034
2035 return ret;
2036 }
2037
nvm_authenticate_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2038 static ssize_t nvm_authenticate_store(struct device *dev,
2039 struct device_attribute *attr, const char *buf, size_t count)
2040 {
2041 int ret = nvm_authenticate_sysfs(dev, buf, false);
2042 if (ret)
2043 return ret;
2044 return count;
2045 }
2046 static DEVICE_ATTR_RW(nvm_authenticate);
2047
nvm_authenticate_on_disconnect_show(struct device * dev,struct device_attribute * attr,char * buf)2048 static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
2049 struct device_attribute *attr, char *buf)
2050 {
2051 return nvm_authenticate_show(dev, attr, buf);
2052 }
2053
nvm_authenticate_on_disconnect_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2054 static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
2055 struct device_attribute *attr, const char *buf, size_t count)
2056 {
2057 int ret;
2058
2059 ret = nvm_authenticate_sysfs(dev, buf, true);
2060 return ret ? ret : count;
2061 }
2062 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
2063
nvm_version_show(struct device * dev,struct device_attribute * attr,char * buf)2064 static ssize_t nvm_version_show(struct device *dev,
2065 struct device_attribute *attr, char *buf)
2066 {
2067 struct tb_switch *sw = tb_to_switch(dev);
2068 int ret;
2069
2070 if (!mutex_trylock(&sw->tb->lock))
2071 return restart_syscall();
2072
2073 if (sw->safe_mode)
2074 ret = -ENODATA;
2075 else if (!sw->nvm)
2076 ret = -EAGAIN;
2077 else
2078 ret = sysfs_emit(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
2079
2080 mutex_unlock(&sw->tb->lock);
2081
2082 return ret;
2083 }
2084 static DEVICE_ATTR_RO(nvm_version);
2085
vendor_show(struct device * dev,struct device_attribute * attr,char * buf)2086 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
2087 char *buf)
2088 {
2089 struct tb_switch *sw = tb_to_switch(dev);
2090
2091 return sysfs_emit(buf, "%#x\n", sw->vendor);
2092 }
2093 static DEVICE_ATTR_RO(vendor);
2094
2095 static ssize_t
vendor_name_show(struct device * dev,struct device_attribute * attr,char * buf)2096 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
2097 {
2098 struct tb_switch *sw = tb_to_switch(dev);
2099
2100 return sysfs_emit(buf, "%s\n", sw->vendor_name ?: "");
2101 }
2102 static DEVICE_ATTR_RO(vendor_name);
2103
unique_id_show(struct device * dev,struct device_attribute * attr,char * buf)2104 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
2105 char *buf)
2106 {
2107 struct tb_switch *sw = tb_to_switch(dev);
2108
2109 return sysfs_emit(buf, "%pUb\n", sw->uuid);
2110 }
2111 static DEVICE_ATTR_RO(unique_id);
2112
2113 static struct attribute *switch_attrs[] = {
2114 &dev_attr_authorized.attr,
2115 &dev_attr_boot.attr,
2116 &dev_attr_device.attr,
2117 &dev_attr_device_name.attr,
2118 &dev_attr_generation.attr,
2119 &dev_attr_key.attr,
2120 &dev_attr_nvm_authenticate.attr,
2121 &dev_attr_nvm_authenticate_on_disconnect.attr,
2122 &dev_attr_nvm_version.attr,
2123 &dev_attr_rx_speed.attr,
2124 &dev_attr_rx_lanes.attr,
2125 &dev_attr_tx_speed.attr,
2126 &dev_attr_tx_lanes.attr,
2127 &dev_attr_vendor.attr,
2128 &dev_attr_vendor_name.attr,
2129 &dev_attr_unique_id.attr,
2130 NULL,
2131 };
2132
switch_attr_is_visible(struct kobject * kobj,struct attribute * attr,int n)2133 static umode_t switch_attr_is_visible(struct kobject *kobj,
2134 struct attribute *attr, int n)
2135 {
2136 struct device *dev = kobj_to_dev(kobj);
2137 struct tb_switch *sw = tb_to_switch(dev);
2138
2139 if (attr == &dev_attr_authorized.attr) {
2140 if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
2141 sw->tb->security_level == TB_SECURITY_DPONLY)
2142 return 0;
2143 } else if (attr == &dev_attr_device.attr) {
2144 if (!sw->device)
2145 return 0;
2146 } else if (attr == &dev_attr_device_name.attr) {
2147 if (!sw->device_name)
2148 return 0;
2149 } else if (attr == &dev_attr_vendor.attr) {
2150 if (!sw->vendor)
2151 return 0;
2152 } else if (attr == &dev_attr_vendor_name.attr) {
2153 if (!sw->vendor_name)
2154 return 0;
2155 } else if (attr == &dev_attr_key.attr) {
2156 if (tb_route(sw) &&
2157 sw->tb->security_level == TB_SECURITY_SECURE &&
2158 sw->security_level == TB_SECURITY_SECURE)
2159 return attr->mode;
2160 return 0;
2161 } else if (attr == &dev_attr_rx_speed.attr ||
2162 attr == &dev_attr_rx_lanes.attr ||
2163 attr == &dev_attr_tx_speed.attr ||
2164 attr == &dev_attr_tx_lanes.attr) {
2165 if (tb_route(sw))
2166 return attr->mode;
2167 return 0;
2168 } else if (attr == &dev_attr_nvm_authenticate.attr) {
2169 if (nvm_upgradeable(sw))
2170 return attr->mode;
2171 return 0;
2172 } else if (attr == &dev_attr_nvm_version.attr) {
2173 if (nvm_readable(sw))
2174 return attr->mode;
2175 return 0;
2176 } else if (attr == &dev_attr_boot.attr) {
2177 if (tb_route(sw))
2178 return attr->mode;
2179 return 0;
2180 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
2181 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
2182 return attr->mode;
2183 return 0;
2184 }
2185
2186 return sw->safe_mode ? 0 : attr->mode;
2187 }
2188
2189 static const struct attribute_group switch_group = {
2190 .is_visible = switch_attr_is_visible,
2191 .attrs = switch_attrs,
2192 };
2193
2194 static const struct attribute_group *switch_groups[] = {
2195 &switch_group,
2196 NULL,
2197 };
2198
tb_switch_release(struct device * dev)2199 static void tb_switch_release(struct device *dev)
2200 {
2201 struct tb_switch *sw = tb_to_switch(dev);
2202 struct tb_port *port;
2203
2204 dma_port_free(sw->dma_port);
2205
2206 tb_switch_for_each_port(sw, port) {
2207 ida_destroy(&port->in_hopids);
2208 ida_destroy(&port->out_hopids);
2209 }
2210
2211 kfree(sw->uuid);
2212 kfree(sw->device_name);
2213 kfree(sw->vendor_name);
2214 kfree(sw->ports);
2215 kfree(sw->drom);
2216 kfree(sw->key);
2217 kfree(sw);
2218 }
2219
tb_switch_uevent(const struct device * dev,struct kobj_uevent_env * env)2220 static int tb_switch_uevent(const struct device *dev, struct kobj_uevent_env *env)
2221 {
2222 const struct tb_switch *sw = tb_to_switch(dev);
2223 const char *type;
2224
2225 if (tb_switch_is_usb4(sw)) {
2226 if (add_uevent_var(env, "USB4_VERSION=%u.0",
2227 usb4_switch_version(sw)))
2228 return -ENOMEM;
2229 }
2230
2231 if (!tb_route(sw)) {
2232 type = "host";
2233 } else {
2234 const struct tb_port *port;
2235 bool hub = false;
2236
2237 /* Device is hub if it has any downstream ports */
2238 tb_switch_for_each_port(sw, port) {
2239 if (!port->disabled && !tb_is_upstream_port(port) &&
2240 tb_port_is_null(port)) {
2241 hub = true;
2242 break;
2243 }
2244 }
2245
2246 type = hub ? "hub" : "device";
2247 }
2248
2249 if (add_uevent_var(env, "USB4_TYPE=%s", type))
2250 return -ENOMEM;
2251 return 0;
2252 }
2253
2254 /*
2255 * Currently only need to provide the callbacks. Everything else is handled
2256 * in the connection manager.
2257 */
tb_switch_runtime_suspend(struct device * dev)2258 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
2259 {
2260 struct tb_switch *sw = tb_to_switch(dev);
2261 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2262
2263 if (cm_ops->runtime_suspend_switch)
2264 return cm_ops->runtime_suspend_switch(sw);
2265
2266 return 0;
2267 }
2268
tb_switch_runtime_resume(struct device * dev)2269 static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
2270 {
2271 struct tb_switch *sw = tb_to_switch(dev);
2272 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2273
2274 if (cm_ops->runtime_resume_switch)
2275 return cm_ops->runtime_resume_switch(sw);
2276 return 0;
2277 }
2278
2279 static const struct dev_pm_ops tb_switch_pm_ops = {
2280 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
2281 NULL)
2282 };
2283
2284 struct device_type tb_switch_type = {
2285 .name = "thunderbolt_device",
2286 .release = tb_switch_release,
2287 .uevent = tb_switch_uevent,
2288 .pm = &tb_switch_pm_ops,
2289 };
2290
tb_switch_get_generation(struct tb_switch * sw)2291 static int tb_switch_get_generation(struct tb_switch *sw)
2292 {
2293 if (tb_switch_is_usb4(sw))
2294 return 4;
2295
2296 if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
2297 switch (sw->config.device_id) {
2298 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2299 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
2300 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
2301 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
2302 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2303 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
2304 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
2305 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
2306 return 1;
2307
2308 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
2309 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
2310 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
2311 return 2;
2312
2313 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
2314 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
2315 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
2316 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
2317 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
2318 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
2319 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
2320 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
2321 case PCI_DEVICE_ID_INTEL_ICL_NHI0:
2322 case PCI_DEVICE_ID_INTEL_ICL_NHI1:
2323 return 3;
2324 }
2325 }
2326
2327 /*
2328 * For unknown switches assume generation to be 1 to be on the
2329 * safe side.
2330 */
2331 tb_sw_warn(sw, "unsupported switch device id %#x\n",
2332 sw->config.device_id);
2333 return 1;
2334 }
2335
tb_switch_exceeds_max_depth(const struct tb_switch * sw,int depth)2336 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
2337 {
2338 int max_depth;
2339
2340 if (tb_switch_is_usb4(sw) ||
2341 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
2342 max_depth = USB4_SWITCH_MAX_DEPTH;
2343 else
2344 max_depth = TB_SWITCH_MAX_DEPTH;
2345
2346 return depth > max_depth;
2347 }
2348
2349 /**
2350 * tb_switch_alloc() - allocate a switch
2351 * @tb: Pointer to the owning domain
2352 * @parent: Parent device for this switch
2353 * @route: Route string for this switch
2354 *
2355 * Allocates and initializes a switch. Will not upload configuration to
2356 * the switch. For that you need to call tb_switch_configure()
2357 * separately. The returned switch should be released by calling
2358 * tb_switch_put().
2359 *
2360 * Return: Pointer to the allocated switch or ERR_PTR() in case of
2361 * failure.
2362 */
tb_switch_alloc(struct tb * tb,struct device * parent,u64 route)2363 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
2364 u64 route)
2365 {
2366 struct tb_switch *sw;
2367 int upstream_port;
2368 int i, ret, depth;
2369
2370 /* Unlock the downstream port so we can access the switch below */
2371 if (route) {
2372 struct tb_switch *parent_sw = tb_to_switch(parent);
2373 struct tb_port *down;
2374
2375 down = tb_port_at(route, parent_sw);
2376 tb_port_unlock(down);
2377 }
2378
2379 depth = tb_route_length(route);
2380
2381 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
2382 if (upstream_port < 0)
2383 return ERR_PTR(upstream_port);
2384
2385 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2386 if (!sw)
2387 return ERR_PTR(-ENOMEM);
2388
2389 sw->tb = tb;
2390 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
2391 if (ret)
2392 goto err_free_sw_ports;
2393
2394 sw->generation = tb_switch_get_generation(sw);
2395
2396 tb_dbg(tb, "current switch config:\n");
2397 tb_dump_switch(tb, sw);
2398
2399 /* configure switch */
2400 sw->config.upstream_port_number = upstream_port;
2401 sw->config.depth = depth;
2402 sw->config.route_hi = upper_32_bits(route);
2403 sw->config.route_lo = lower_32_bits(route);
2404 sw->config.enabled = 0;
2405
2406 /* Make sure we do not exceed maximum topology limit */
2407 if (tb_switch_exceeds_max_depth(sw, depth)) {
2408 ret = -EADDRNOTAVAIL;
2409 goto err_free_sw_ports;
2410 }
2411
2412 /* initialize ports */
2413 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
2414 GFP_KERNEL);
2415 if (!sw->ports) {
2416 ret = -ENOMEM;
2417 goto err_free_sw_ports;
2418 }
2419
2420 for (i = 0; i <= sw->config.max_port_number; i++) {
2421 /* minimum setup for tb_find_cap and tb_drom_read to work */
2422 sw->ports[i].sw = sw;
2423 sw->ports[i].port = i;
2424
2425 /* Control port does not need HopID allocation */
2426 if (i) {
2427 ida_init(&sw->ports[i].in_hopids);
2428 ida_init(&sw->ports[i].out_hopids);
2429 }
2430 }
2431
2432 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
2433 if (ret > 0)
2434 sw->cap_plug_events = ret;
2435
2436 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2);
2437 if (ret > 0)
2438 sw->cap_vsec_tmu = ret;
2439
2440 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
2441 if (ret > 0)
2442 sw->cap_lc = ret;
2443
2444 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP);
2445 if (ret > 0)
2446 sw->cap_lp = ret;
2447
2448 /* Root switch is always authorized */
2449 if (!route)
2450 sw->authorized = true;
2451
2452 device_initialize(&sw->dev);
2453 sw->dev.parent = parent;
2454 sw->dev.bus = &tb_bus_type;
2455 sw->dev.type = &tb_switch_type;
2456 sw->dev.groups = switch_groups;
2457 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2458
2459 return sw;
2460
2461 err_free_sw_ports:
2462 kfree(sw->ports);
2463 kfree(sw);
2464
2465 return ERR_PTR(ret);
2466 }
2467
2468 /**
2469 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
2470 * @tb: Pointer to the owning domain
2471 * @parent: Parent device for this switch
2472 * @route: Route string for this switch
2473 *
2474 * This creates a switch in safe mode. This means the switch pretty much
2475 * lacks all capabilities except DMA configuration port before it is
2476 * flashed with a valid NVM firmware.
2477 *
2478 * The returned switch must be released by calling tb_switch_put().
2479 *
2480 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
2481 */
2482 struct tb_switch *
tb_switch_alloc_safe_mode(struct tb * tb,struct device * parent,u64 route)2483 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
2484 {
2485 struct tb_switch *sw;
2486
2487 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2488 if (!sw)
2489 return ERR_PTR(-ENOMEM);
2490
2491 sw->tb = tb;
2492 sw->config.depth = tb_route_length(route);
2493 sw->config.route_hi = upper_32_bits(route);
2494 sw->config.route_lo = lower_32_bits(route);
2495 sw->safe_mode = true;
2496
2497 device_initialize(&sw->dev);
2498 sw->dev.parent = parent;
2499 sw->dev.bus = &tb_bus_type;
2500 sw->dev.type = &tb_switch_type;
2501 sw->dev.groups = switch_groups;
2502 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2503
2504 return sw;
2505 }
2506
2507 /**
2508 * tb_switch_configure() - Uploads configuration to the switch
2509 * @sw: Switch to configure
2510 *
2511 * Call this function before the switch is added to the system. It will
2512 * upload configuration to the switch and makes it available for the
2513 * connection manager to use. Can be called to the switch again after
2514 * resume from low power states to re-initialize it.
2515 *
2516 * Return: %0 in case of success and negative errno in case of failure
2517 */
tb_switch_configure(struct tb_switch * sw)2518 int tb_switch_configure(struct tb_switch *sw)
2519 {
2520 struct tb *tb = sw->tb;
2521 u64 route;
2522 int ret;
2523
2524 route = tb_route(sw);
2525
2526 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
2527 sw->config.enabled ? "restoring" : "initializing", route,
2528 tb_route_length(route), sw->config.upstream_port_number);
2529
2530 sw->config.enabled = 1;
2531
2532 if (tb_switch_is_usb4(sw)) {
2533 /*
2534 * For USB4 devices, we need to program the CM version
2535 * accordingly so that it knows to expose all the
2536 * additional capabilities. Program it according to USB4
2537 * version to avoid changing existing (v1) routers behaviour.
2538 */
2539 if (usb4_switch_version(sw) < 2)
2540 sw->config.cmuv = ROUTER_CS_4_CMUV_V1;
2541 else
2542 sw->config.cmuv = ROUTER_CS_4_CMUV_V2;
2543 sw->config.plug_events_delay = 0xa;
2544
2545 /* Enumerate the switch */
2546 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2547 ROUTER_CS_1, 4);
2548 if (ret)
2549 return ret;
2550
2551 ret = usb4_switch_setup(sw);
2552 } else {
2553 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2554 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2555 sw->config.vendor_id);
2556
2557 if (!sw->cap_plug_events) {
2558 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2559 return -ENODEV;
2560 }
2561
2562 /* Enumerate the switch */
2563 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2564 ROUTER_CS_1, 3);
2565 }
2566 if (ret)
2567 return ret;
2568
2569 return tb_plug_events_active(sw, true);
2570 }
2571
2572 /**
2573 * tb_switch_configuration_valid() - Set the tunneling configuration to be valid
2574 * @sw: Router to configure
2575 *
2576 * Needs to be called before any tunnels can be setup through the
2577 * router. Can be called to any router.
2578 *
2579 * Returns %0 in success and negative errno otherwise.
2580 */
tb_switch_configuration_valid(struct tb_switch * sw)2581 int tb_switch_configuration_valid(struct tb_switch *sw)
2582 {
2583 if (tb_switch_is_usb4(sw))
2584 return usb4_switch_configuration_valid(sw);
2585 return 0;
2586 }
2587
tb_switch_set_uuid(struct tb_switch * sw)2588 static int tb_switch_set_uuid(struct tb_switch *sw)
2589 {
2590 bool uid = false;
2591 u32 uuid[4];
2592 int ret;
2593
2594 if (sw->uuid)
2595 return 0;
2596
2597 if (tb_switch_is_usb4(sw)) {
2598 ret = usb4_switch_read_uid(sw, &sw->uid);
2599 if (ret)
2600 return ret;
2601 uid = true;
2602 } else {
2603 /*
2604 * The newer controllers include fused UUID as part of
2605 * link controller specific registers
2606 */
2607 ret = tb_lc_read_uuid(sw, uuid);
2608 if (ret) {
2609 if (ret != -EINVAL)
2610 return ret;
2611 uid = true;
2612 }
2613 }
2614
2615 if (uid) {
2616 /*
2617 * ICM generates UUID based on UID and fills the upper
2618 * two words with ones. This is not strictly following
2619 * UUID format but we want to be compatible with it so
2620 * we do the same here.
2621 */
2622 uuid[0] = sw->uid & 0xffffffff;
2623 uuid[1] = (sw->uid >> 32) & 0xffffffff;
2624 uuid[2] = 0xffffffff;
2625 uuid[3] = 0xffffffff;
2626 }
2627
2628 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2629 if (!sw->uuid)
2630 return -ENOMEM;
2631 return 0;
2632 }
2633
tb_switch_add_dma_port(struct tb_switch * sw)2634 static int tb_switch_add_dma_port(struct tb_switch *sw)
2635 {
2636 u32 status;
2637 int ret;
2638
2639 switch (sw->generation) {
2640 case 2:
2641 /* Only root switch can be upgraded */
2642 if (tb_route(sw))
2643 return 0;
2644
2645 fallthrough;
2646 case 3:
2647 case 4:
2648 ret = tb_switch_set_uuid(sw);
2649 if (ret)
2650 return ret;
2651 break;
2652
2653 default:
2654 /*
2655 * DMA port is the only thing available when the switch
2656 * is in safe mode.
2657 */
2658 if (!sw->safe_mode)
2659 return 0;
2660 break;
2661 }
2662
2663 if (sw->no_nvm_upgrade)
2664 return 0;
2665
2666 if (tb_switch_is_usb4(sw)) {
2667 ret = usb4_switch_nvm_authenticate_status(sw, &status);
2668 if (ret)
2669 return ret;
2670
2671 if (status) {
2672 tb_sw_info(sw, "switch flash authentication failed\n");
2673 nvm_set_auth_status(sw, status);
2674 }
2675
2676 return 0;
2677 }
2678
2679 /* Root switch DMA port requires running firmware */
2680 if (!tb_route(sw) && !tb_switch_is_icm(sw))
2681 return 0;
2682
2683 sw->dma_port = dma_port_alloc(sw);
2684 if (!sw->dma_port)
2685 return 0;
2686
2687 /*
2688 * If there is status already set then authentication failed
2689 * when the dma_port_flash_update_auth() returned. Power cycling
2690 * is not needed (it was done already) so only thing we do here
2691 * is to unblock runtime PM of the root port.
2692 */
2693 nvm_get_auth_status(sw, &status);
2694 if (status) {
2695 if (!tb_route(sw))
2696 nvm_authenticate_complete_dma_port(sw);
2697 return 0;
2698 }
2699
2700 /*
2701 * Check status of the previous flash authentication. If there
2702 * is one we need to power cycle the switch in any case to make
2703 * it functional again.
2704 */
2705 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2706 if (ret <= 0)
2707 return ret;
2708
2709 /* Now we can allow root port to suspend again */
2710 if (!tb_route(sw))
2711 nvm_authenticate_complete_dma_port(sw);
2712
2713 if (status) {
2714 tb_sw_info(sw, "switch flash authentication failed\n");
2715 nvm_set_auth_status(sw, status);
2716 }
2717
2718 tb_sw_info(sw, "power cycling the switch now\n");
2719 dma_port_power_cycle(sw->dma_port);
2720
2721 /*
2722 * We return error here which causes the switch adding failure.
2723 * It should appear back after power cycle is complete.
2724 */
2725 return -ESHUTDOWN;
2726 }
2727
tb_switch_default_link_ports(struct tb_switch * sw)2728 static void tb_switch_default_link_ports(struct tb_switch *sw)
2729 {
2730 int i;
2731
2732 for (i = 1; i <= sw->config.max_port_number; i++) {
2733 struct tb_port *port = &sw->ports[i];
2734 struct tb_port *subordinate;
2735
2736 if (!tb_port_is_null(port))
2737 continue;
2738
2739 /* Check for the subordinate port */
2740 if (i == sw->config.max_port_number ||
2741 !tb_port_is_null(&sw->ports[i + 1]))
2742 continue;
2743
2744 /* Link them if not already done so (by DROM) */
2745 subordinate = &sw->ports[i + 1];
2746 if (!port->dual_link_port && !subordinate->dual_link_port) {
2747 port->link_nr = 0;
2748 port->dual_link_port = subordinate;
2749 subordinate->link_nr = 1;
2750 subordinate->dual_link_port = port;
2751
2752 tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2753 port->port, subordinate->port);
2754 }
2755 }
2756 }
2757
tb_switch_lane_bonding_possible(struct tb_switch * sw)2758 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2759 {
2760 const struct tb_port *up = tb_upstream_port(sw);
2761
2762 if (!up->dual_link_port || !up->dual_link_port->remote)
2763 return false;
2764
2765 if (tb_switch_is_usb4(sw))
2766 return usb4_switch_lane_bonding_possible(sw);
2767 return tb_lc_lane_bonding_possible(sw);
2768 }
2769
tb_switch_update_link_attributes(struct tb_switch * sw)2770 static int tb_switch_update_link_attributes(struct tb_switch *sw)
2771 {
2772 struct tb_port *up;
2773 bool change = false;
2774 int ret;
2775
2776 if (!tb_route(sw) || tb_switch_is_icm(sw))
2777 return 0;
2778
2779 up = tb_upstream_port(sw);
2780
2781 ret = tb_port_get_link_speed(up);
2782 if (ret < 0)
2783 return ret;
2784 if (sw->link_speed != ret)
2785 change = true;
2786 sw->link_speed = ret;
2787
2788 ret = tb_port_get_link_width(up);
2789 if (ret < 0)
2790 return ret;
2791 if (sw->link_width != ret)
2792 change = true;
2793 sw->link_width = ret;
2794
2795 /* Notify userspace that there is possible link attribute change */
2796 if (device_is_registered(&sw->dev) && change)
2797 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2798
2799 return 0;
2800 }
2801
2802 /**
2803 * tb_switch_lane_bonding_enable() - Enable lane bonding
2804 * @sw: Switch to enable lane bonding
2805 *
2806 * Connection manager can call this function to enable lane bonding of a
2807 * switch. If conditions are correct and both switches support the feature,
2808 * lanes are bonded. It is safe to call this to any switch.
2809 */
tb_switch_lane_bonding_enable(struct tb_switch * sw)2810 int tb_switch_lane_bonding_enable(struct tb_switch *sw)
2811 {
2812 struct tb_port *up, *down;
2813 u64 route = tb_route(sw);
2814 unsigned int width_mask;
2815 int ret;
2816
2817 if (!route)
2818 return 0;
2819
2820 if (!tb_switch_lane_bonding_possible(sw))
2821 return 0;
2822
2823 up = tb_upstream_port(sw);
2824 down = tb_switch_downstream_port(sw);
2825
2826 if (!tb_port_is_width_supported(up, TB_LINK_WIDTH_DUAL) ||
2827 !tb_port_is_width_supported(down, TB_LINK_WIDTH_DUAL))
2828 return 0;
2829
2830 /*
2831 * Both lanes need to be in CL0. Here we assume lane 0 already be in
2832 * CL0 and check just for lane 1.
2833 */
2834 if (tb_wait_for_port(down->dual_link_port, false) <= 0)
2835 return -ENOTCONN;
2836
2837 ret = tb_port_lane_bonding_enable(up);
2838 if (ret) {
2839 tb_port_warn(up, "failed to enable lane bonding\n");
2840 return ret;
2841 }
2842
2843 ret = tb_port_lane_bonding_enable(down);
2844 if (ret) {
2845 tb_port_warn(down, "failed to enable lane bonding\n");
2846 tb_port_lane_bonding_disable(up);
2847 return ret;
2848 }
2849
2850 /* Any of the widths are all bonded */
2851 width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
2852 TB_LINK_WIDTH_ASYM_RX;
2853
2854 ret = tb_port_wait_for_link_width(down, width_mask, 100);
2855 if (ret) {
2856 tb_port_warn(down, "timeout enabling lane bonding\n");
2857 return ret;
2858 }
2859
2860 tb_port_update_credits(down);
2861 tb_port_update_credits(up);
2862 tb_switch_update_link_attributes(sw);
2863
2864 tb_sw_dbg(sw, "lane bonding enabled\n");
2865 return ret;
2866 }
2867
2868 /**
2869 * tb_switch_lane_bonding_disable() - Disable lane bonding
2870 * @sw: Switch whose lane bonding to disable
2871 *
2872 * Disables lane bonding between @sw and parent. This can be called even
2873 * if lanes were not bonded originally.
2874 */
tb_switch_lane_bonding_disable(struct tb_switch * sw)2875 void tb_switch_lane_bonding_disable(struct tb_switch *sw)
2876 {
2877 struct tb_port *up, *down;
2878 int ret;
2879
2880 if (!tb_route(sw))
2881 return;
2882
2883 up = tb_upstream_port(sw);
2884 if (!up->bonded)
2885 return;
2886
2887 down = tb_switch_downstream_port(sw);
2888
2889 tb_port_lane_bonding_disable(up);
2890 tb_port_lane_bonding_disable(down);
2891
2892 /*
2893 * It is fine if we get other errors as the router might have
2894 * been unplugged.
2895 */
2896 ret = tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100);
2897 if (ret == -ETIMEDOUT)
2898 tb_sw_warn(sw, "timeout disabling lane bonding\n");
2899
2900 tb_port_update_credits(down);
2901 tb_port_update_credits(up);
2902 tb_switch_update_link_attributes(sw);
2903
2904 tb_sw_dbg(sw, "lane bonding disabled\n");
2905 }
2906
2907 /**
2908 * tb_switch_configure_link() - Set link configured
2909 * @sw: Switch whose link is configured
2910 *
2911 * Sets the link upstream from @sw configured (from both ends) so that
2912 * it will not be disconnected when the domain exits sleep. Can be
2913 * called for any switch.
2914 *
2915 * It is recommended that this is called after lane bonding is enabled.
2916 *
2917 * Returns %0 on success and negative errno in case of error.
2918 */
tb_switch_configure_link(struct tb_switch * sw)2919 int tb_switch_configure_link(struct tb_switch *sw)
2920 {
2921 struct tb_port *up, *down;
2922 int ret;
2923
2924 if (!tb_route(sw) || tb_switch_is_icm(sw))
2925 return 0;
2926
2927 up = tb_upstream_port(sw);
2928 if (tb_switch_is_usb4(up->sw))
2929 ret = usb4_port_configure(up);
2930 else
2931 ret = tb_lc_configure_port(up);
2932 if (ret)
2933 return ret;
2934
2935 down = up->remote;
2936 if (tb_switch_is_usb4(down->sw))
2937 return usb4_port_configure(down);
2938 return tb_lc_configure_port(down);
2939 }
2940
2941 /**
2942 * tb_switch_unconfigure_link() - Unconfigure link
2943 * @sw: Switch whose link is unconfigured
2944 *
2945 * Sets the link unconfigured so the @sw will be disconnected if the
2946 * domain exists sleep.
2947 */
tb_switch_unconfigure_link(struct tb_switch * sw)2948 void tb_switch_unconfigure_link(struct tb_switch *sw)
2949 {
2950 struct tb_port *up, *down;
2951
2952 if (!tb_route(sw) || tb_switch_is_icm(sw))
2953 return;
2954
2955 /*
2956 * Unconfigure downstream port so that wake-on-connect can be
2957 * configured after router unplug. No need to unconfigure upstream port
2958 * since its router is unplugged.
2959 */
2960 up = tb_upstream_port(sw);
2961 down = up->remote;
2962 if (tb_switch_is_usb4(down->sw))
2963 usb4_port_unconfigure(down);
2964 else
2965 tb_lc_unconfigure_port(down);
2966
2967 if (sw->is_unplugged)
2968 return;
2969
2970 up = tb_upstream_port(sw);
2971 if (tb_switch_is_usb4(up->sw))
2972 usb4_port_unconfigure(up);
2973 else
2974 tb_lc_unconfigure_port(up);
2975 }
2976
tb_switch_credits_init(struct tb_switch * sw)2977 static void tb_switch_credits_init(struct tb_switch *sw)
2978 {
2979 if (tb_switch_is_icm(sw))
2980 return;
2981 if (!tb_switch_is_usb4(sw))
2982 return;
2983 if (usb4_switch_credits_init(sw))
2984 tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
2985 }
2986
tb_switch_port_hotplug_enable(struct tb_switch * sw)2987 static int tb_switch_port_hotplug_enable(struct tb_switch *sw)
2988 {
2989 struct tb_port *port;
2990
2991 if (tb_switch_is_icm(sw))
2992 return 0;
2993
2994 tb_switch_for_each_port(sw, port) {
2995 int res;
2996
2997 if (!port->cap_usb4)
2998 continue;
2999
3000 res = usb4_port_hotplug_enable(port);
3001 if (res)
3002 return res;
3003 }
3004 return 0;
3005 }
3006
3007 /**
3008 * tb_switch_add() - Add a switch to the domain
3009 * @sw: Switch to add
3010 *
3011 * This is the last step in adding switch to the domain. It will read
3012 * identification information from DROM and initializes ports so that
3013 * they can be used to connect other switches. The switch will be
3014 * exposed to the userspace when this function successfully returns. To
3015 * remove and release the switch, call tb_switch_remove().
3016 *
3017 * Return: %0 in case of success and negative errno in case of failure
3018 */
tb_switch_add(struct tb_switch * sw)3019 int tb_switch_add(struct tb_switch *sw)
3020 {
3021 int i, ret;
3022
3023 /*
3024 * Initialize DMA control port now before we read DROM. Recent
3025 * host controllers have more complete DROM on NVM that includes
3026 * vendor and model identification strings which we then expose
3027 * to the userspace. NVM can be accessed through DMA
3028 * configuration based mailbox.
3029 */
3030 ret = tb_switch_add_dma_port(sw);
3031 if (ret) {
3032 dev_err(&sw->dev, "failed to add DMA port\n");
3033 return ret;
3034 }
3035
3036 if (!sw->safe_mode) {
3037 tb_switch_credits_init(sw);
3038
3039 /* read drom */
3040 ret = tb_drom_read(sw);
3041 if (ret)
3042 dev_warn(&sw->dev, "reading DROM failed: %d\n", ret);
3043 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
3044
3045 ret = tb_switch_set_uuid(sw);
3046 if (ret) {
3047 dev_err(&sw->dev, "failed to set UUID\n");
3048 return ret;
3049 }
3050
3051 for (i = 0; i <= sw->config.max_port_number; i++) {
3052 if (sw->ports[i].disabled) {
3053 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
3054 continue;
3055 }
3056 ret = tb_init_port(&sw->ports[i]);
3057 if (ret) {
3058 dev_err(&sw->dev, "failed to initialize port %d\n", i);
3059 return ret;
3060 }
3061 }
3062
3063 tb_check_quirks(sw);
3064
3065 tb_switch_default_link_ports(sw);
3066
3067 ret = tb_switch_update_link_attributes(sw);
3068 if (ret)
3069 return ret;
3070
3071 ret = tb_switch_clx_init(sw);
3072 if (ret)
3073 return ret;
3074
3075 ret = tb_switch_tmu_init(sw);
3076 if (ret)
3077 return ret;
3078 }
3079
3080 ret = tb_switch_port_hotplug_enable(sw);
3081 if (ret)
3082 return ret;
3083
3084 ret = device_add(&sw->dev);
3085 if (ret) {
3086 dev_err(&sw->dev, "failed to add device: %d\n", ret);
3087 return ret;
3088 }
3089
3090 if (tb_route(sw)) {
3091 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
3092 sw->vendor, sw->device);
3093 if (sw->vendor_name && sw->device_name)
3094 dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
3095 sw->device_name);
3096 }
3097
3098 ret = usb4_switch_add_ports(sw);
3099 if (ret) {
3100 dev_err(&sw->dev, "failed to add USB4 ports\n");
3101 goto err_del;
3102 }
3103
3104 ret = tb_switch_nvm_add(sw);
3105 if (ret) {
3106 dev_err(&sw->dev, "failed to add NVM devices\n");
3107 goto err_ports;
3108 }
3109
3110 /*
3111 * Thunderbolt routers do not generate wakeups themselves but
3112 * they forward wakeups from tunneled protocols, so enable it
3113 * here.
3114 */
3115 device_init_wakeup(&sw->dev, true);
3116
3117 pm_runtime_set_active(&sw->dev);
3118 if (sw->rpm) {
3119 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
3120 pm_runtime_use_autosuspend(&sw->dev);
3121 pm_runtime_mark_last_busy(&sw->dev);
3122 pm_runtime_enable(&sw->dev);
3123 pm_request_autosuspend(&sw->dev);
3124 }
3125
3126 tb_switch_debugfs_init(sw);
3127 return 0;
3128
3129 err_ports:
3130 usb4_switch_remove_ports(sw);
3131 err_del:
3132 device_del(&sw->dev);
3133
3134 return ret;
3135 }
3136
3137 /**
3138 * tb_switch_remove() - Remove and release a switch
3139 * @sw: Switch to remove
3140 *
3141 * This will remove the switch from the domain and release it after last
3142 * reference count drops to zero. If there are switches connected below
3143 * this switch, they will be removed as well.
3144 */
tb_switch_remove(struct tb_switch * sw)3145 void tb_switch_remove(struct tb_switch *sw)
3146 {
3147 struct tb_port *port;
3148
3149 tb_switch_debugfs_remove(sw);
3150
3151 if (sw->rpm) {
3152 pm_runtime_get_sync(&sw->dev);
3153 pm_runtime_disable(&sw->dev);
3154 }
3155
3156 /* port 0 is the switch itself and never has a remote */
3157 tb_switch_for_each_port(sw, port) {
3158 if (tb_port_has_remote(port)) {
3159 tb_switch_remove(port->remote->sw);
3160 port->remote = NULL;
3161 } else if (port->xdomain) {
3162 port->xdomain->is_unplugged = true;
3163 tb_xdomain_remove(port->xdomain);
3164 port->xdomain = NULL;
3165 }
3166
3167 /* Remove any downstream retimers */
3168 tb_retimer_remove_all(port);
3169 }
3170
3171 if (!sw->is_unplugged)
3172 tb_plug_events_active(sw, false);
3173
3174 tb_switch_nvm_remove(sw);
3175 usb4_switch_remove_ports(sw);
3176
3177 if (tb_route(sw))
3178 dev_info(&sw->dev, "device disconnected\n");
3179 device_unregister(&sw->dev);
3180 }
3181
3182 /**
3183 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
3184 * @sw: Router to mark unplugged
3185 */
tb_sw_set_unplugged(struct tb_switch * sw)3186 void tb_sw_set_unplugged(struct tb_switch *sw)
3187 {
3188 struct tb_port *port;
3189
3190 if (sw == sw->tb->root_switch) {
3191 tb_sw_WARN(sw, "cannot unplug root switch\n");
3192 return;
3193 }
3194 if (sw->is_unplugged) {
3195 tb_sw_WARN(sw, "is_unplugged already set\n");
3196 return;
3197 }
3198 sw->is_unplugged = true;
3199 tb_switch_for_each_port(sw, port) {
3200 if (tb_port_has_remote(port))
3201 tb_sw_set_unplugged(port->remote->sw);
3202 else if (port->xdomain)
3203 port->xdomain->is_unplugged = true;
3204 }
3205 }
3206
tb_switch_set_wake(struct tb_switch * sw,unsigned int flags)3207 static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
3208 {
3209 if (flags)
3210 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
3211 else
3212 tb_sw_dbg(sw, "disabling wakeup\n");
3213
3214 if (tb_switch_is_usb4(sw))
3215 return usb4_switch_set_wake(sw, flags);
3216 return tb_lc_set_wake(sw, flags);
3217 }
3218
tb_switch_check_wakes(struct tb_switch * sw)3219 static void tb_switch_check_wakes(struct tb_switch *sw)
3220 {
3221 if (device_may_wakeup(&sw->dev)) {
3222 if (tb_switch_is_usb4(sw))
3223 usb4_switch_check_wakes(sw);
3224 }
3225 }
3226
3227 /**
3228 * tb_switch_resume() - Resume a switch after sleep
3229 * @sw: Switch to resume
3230 * @runtime: Is this resume from runtime suspend or system sleep
3231 *
3232 * Resumes and re-enumerates router (and all its children), if still plugged
3233 * after suspend. Don't enumerate device router whose UID was changed during
3234 * suspend. If this is resume from system sleep, notifies PM core about the
3235 * wakes occurred during suspend. Disables all wakes, except USB4 wake of
3236 * upstream port for USB4 routers that shall be always enabled.
3237 */
tb_switch_resume(struct tb_switch * sw,bool runtime)3238 int tb_switch_resume(struct tb_switch *sw, bool runtime)
3239 {
3240 struct tb_port *port;
3241 int err;
3242
3243 tb_sw_dbg(sw, "resuming switch\n");
3244
3245 /*
3246 * Check for UID of the connected switches except for root
3247 * switch which we assume cannot be removed.
3248 */
3249 if (tb_route(sw)) {
3250 u64 uid;
3251
3252 /*
3253 * Check first that we can still read the switch config
3254 * space. It may be that there is now another domain
3255 * connected.
3256 */
3257 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
3258 if (err < 0) {
3259 tb_sw_info(sw, "switch not present anymore\n");
3260 return err;
3261 }
3262
3263 /* We don't have any way to confirm this was the same device */
3264 if (!sw->uid)
3265 return -ENODEV;
3266
3267 if (tb_switch_is_usb4(sw))
3268 err = usb4_switch_read_uid(sw, &uid);
3269 else
3270 err = tb_drom_read_uid_only(sw, &uid);
3271 if (err) {
3272 tb_sw_warn(sw, "uid read failed\n");
3273 return err;
3274 }
3275 if (sw->uid != uid) {
3276 tb_sw_info(sw,
3277 "changed while suspended (uid %#llx -> %#llx)\n",
3278 sw->uid, uid);
3279 return -ENODEV;
3280 }
3281 }
3282
3283 err = tb_switch_configure(sw);
3284 if (err)
3285 return err;
3286
3287 if (!runtime)
3288 tb_switch_check_wakes(sw);
3289
3290 /* Disable wakes */
3291 tb_switch_set_wake(sw, 0);
3292
3293 err = tb_switch_tmu_init(sw);
3294 if (err)
3295 return err;
3296
3297 /* check for surviving downstream switches */
3298 tb_switch_for_each_port(sw, port) {
3299 if (!tb_port_is_null(port))
3300 continue;
3301
3302 if (!tb_port_resume(port))
3303 continue;
3304
3305 if (tb_wait_for_port(port, true) <= 0) {
3306 tb_port_warn(port,
3307 "lost during suspend, disconnecting\n");
3308 if (tb_port_has_remote(port))
3309 tb_sw_set_unplugged(port->remote->sw);
3310 else if (port->xdomain)
3311 port->xdomain->is_unplugged = true;
3312 } else {
3313 /*
3314 * Always unlock the port so the downstream
3315 * switch/domain is accessible.
3316 */
3317 if (tb_port_unlock(port))
3318 tb_port_warn(port, "failed to unlock port\n");
3319 if (port->remote &&
3320 tb_switch_resume(port->remote->sw, runtime)) {
3321 tb_port_warn(port,
3322 "lost during suspend, disconnecting\n");
3323 tb_sw_set_unplugged(port->remote->sw);
3324 }
3325 }
3326 }
3327 return 0;
3328 }
3329
3330 /**
3331 * tb_switch_suspend() - Put a switch to sleep
3332 * @sw: Switch to suspend
3333 * @runtime: Is this runtime suspend or system sleep
3334 *
3335 * Suspends router and all its children. Enables wakes according to
3336 * value of @runtime and then sets sleep bit for the router. If @sw is
3337 * host router the domain is ready to go to sleep once this function
3338 * returns.
3339 */
tb_switch_suspend(struct tb_switch * sw,bool runtime)3340 void tb_switch_suspend(struct tb_switch *sw, bool runtime)
3341 {
3342 unsigned int flags = 0;
3343 struct tb_port *port;
3344 int err;
3345
3346 tb_sw_dbg(sw, "suspending switch\n");
3347
3348 /*
3349 * Actually only needed for Titan Ridge but for simplicity can be
3350 * done for USB4 device too as CLx is re-enabled at resume.
3351 */
3352 tb_switch_clx_disable(sw);
3353
3354 err = tb_plug_events_active(sw, false);
3355 if (err)
3356 return;
3357
3358 tb_switch_for_each_port(sw, port) {
3359 if (tb_port_has_remote(port))
3360 tb_switch_suspend(port->remote->sw, runtime);
3361 }
3362
3363 if (runtime) {
3364 /* Trigger wake when something is plugged in/out */
3365 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
3366 flags |= TB_WAKE_ON_USB4;
3367 flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
3368 } else if (device_may_wakeup(&sw->dev)) {
3369 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
3370 }
3371
3372 tb_switch_set_wake(sw, flags);
3373
3374 if (tb_switch_is_usb4(sw))
3375 usb4_switch_set_sleep(sw);
3376 else
3377 tb_lc_set_sleep(sw);
3378 }
3379
3380 /**
3381 * tb_switch_query_dp_resource() - Query availability of DP resource
3382 * @sw: Switch whose DP resource is queried
3383 * @in: DP IN port
3384 *
3385 * Queries availability of DP resource for DP tunneling using switch
3386 * specific means. Returns %true if resource is available.
3387 */
tb_switch_query_dp_resource(struct tb_switch * sw,struct tb_port * in)3388 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
3389 {
3390 if (tb_switch_is_usb4(sw))
3391 return usb4_switch_query_dp_resource(sw, in);
3392 return tb_lc_dp_sink_query(sw, in);
3393 }
3394
3395 /**
3396 * tb_switch_alloc_dp_resource() - Allocate available DP resource
3397 * @sw: Switch whose DP resource is allocated
3398 * @in: DP IN port
3399 *
3400 * Allocates DP resource for DP tunneling. The resource must be
3401 * available for this to succeed (see tb_switch_query_dp_resource()).
3402 * Returns %0 in success and negative errno otherwise.
3403 */
tb_switch_alloc_dp_resource(struct tb_switch * sw,struct tb_port * in)3404 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3405 {
3406 int ret;
3407
3408 if (tb_switch_is_usb4(sw))
3409 ret = usb4_switch_alloc_dp_resource(sw, in);
3410 else
3411 ret = tb_lc_dp_sink_alloc(sw, in);
3412
3413 if (ret)
3414 tb_sw_warn(sw, "failed to allocate DP resource for port %d\n",
3415 in->port);
3416 else
3417 tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port);
3418
3419 return ret;
3420 }
3421
3422 /**
3423 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
3424 * @sw: Switch whose DP resource is de-allocated
3425 * @in: DP IN port
3426 *
3427 * De-allocates DP resource that was previously allocated for DP
3428 * tunneling.
3429 */
tb_switch_dealloc_dp_resource(struct tb_switch * sw,struct tb_port * in)3430 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3431 {
3432 int ret;
3433
3434 if (tb_switch_is_usb4(sw))
3435 ret = usb4_switch_dealloc_dp_resource(sw, in);
3436 else
3437 ret = tb_lc_dp_sink_dealloc(sw, in);
3438
3439 if (ret)
3440 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
3441 in->port);
3442 else
3443 tb_sw_dbg(sw, "released DP resource for port %d\n", in->port);
3444 }
3445
3446 struct tb_sw_lookup {
3447 struct tb *tb;
3448 u8 link;
3449 u8 depth;
3450 const uuid_t *uuid;
3451 u64 route;
3452 };
3453
tb_switch_match(struct device * dev,const void * data)3454 static int tb_switch_match(struct device *dev, const void *data)
3455 {
3456 struct tb_switch *sw = tb_to_switch(dev);
3457 const struct tb_sw_lookup *lookup = data;
3458
3459 if (!sw)
3460 return 0;
3461 if (sw->tb != lookup->tb)
3462 return 0;
3463
3464 if (lookup->uuid)
3465 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
3466
3467 if (lookup->route) {
3468 return sw->config.route_lo == lower_32_bits(lookup->route) &&
3469 sw->config.route_hi == upper_32_bits(lookup->route);
3470 }
3471
3472 /* Root switch is matched only by depth */
3473 if (!lookup->depth)
3474 return !sw->depth;
3475
3476 return sw->link == lookup->link && sw->depth == lookup->depth;
3477 }
3478
3479 /**
3480 * tb_switch_find_by_link_depth() - Find switch by link and depth
3481 * @tb: Domain the switch belongs
3482 * @link: Link number the switch is connected
3483 * @depth: Depth of the switch in link
3484 *
3485 * Returned switch has reference count increased so the caller needs to
3486 * call tb_switch_put() when done with the switch.
3487 */
tb_switch_find_by_link_depth(struct tb * tb,u8 link,u8 depth)3488 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
3489 {
3490 struct tb_sw_lookup lookup;
3491 struct device *dev;
3492
3493 memset(&lookup, 0, sizeof(lookup));
3494 lookup.tb = tb;
3495 lookup.link = link;
3496 lookup.depth = depth;
3497
3498 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3499 if (dev)
3500 return tb_to_switch(dev);
3501
3502 return NULL;
3503 }
3504
3505 /**
3506 * tb_switch_find_by_uuid() - Find switch by UUID
3507 * @tb: Domain the switch belongs
3508 * @uuid: UUID to look for
3509 *
3510 * Returned switch has reference count increased so the caller needs to
3511 * call tb_switch_put() when done with the switch.
3512 */
tb_switch_find_by_uuid(struct tb * tb,const uuid_t * uuid)3513 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
3514 {
3515 struct tb_sw_lookup lookup;
3516 struct device *dev;
3517
3518 memset(&lookup, 0, sizeof(lookup));
3519 lookup.tb = tb;
3520 lookup.uuid = uuid;
3521
3522 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3523 if (dev)
3524 return tb_to_switch(dev);
3525
3526 return NULL;
3527 }
3528
3529 /**
3530 * tb_switch_find_by_route() - Find switch by route string
3531 * @tb: Domain the switch belongs
3532 * @route: Route string to look for
3533 *
3534 * Returned switch has reference count increased so the caller needs to
3535 * call tb_switch_put() when done with the switch.
3536 */
tb_switch_find_by_route(struct tb * tb,u64 route)3537 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
3538 {
3539 struct tb_sw_lookup lookup;
3540 struct device *dev;
3541
3542 if (!route)
3543 return tb_switch_get(tb->root_switch);
3544
3545 memset(&lookup, 0, sizeof(lookup));
3546 lookup.tb = tb;
3547 lookup.route = route;
3548
3549 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3550 if (dev)
3551 return tb_to_switch(dev);
3552
3553 return NULL;
3554 }
3555
3556 /**
3557 * tb_switch_find_port() - return the first port of @type on @sw or NULL
3558 * @sw: Switch to find the port from
3559 * @type: Port type to look for
3560 */
tb_switch_find_port(struct tb_switch * sw,enum tb_port_type type)3561 struct tb_port *tb_switch_find_port(struct tb_switch *sw,
3562 enum tb_port_type type)
3563 {
3564 struct tb_port *port;
3565
3566 tb_switch_for_each_port(sw, port) {
3567 if (port->config.type == type)
3568 return port;
3569 }
3570
3571 return NULL;
3572 }
3573
3574 /*
3575 * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3
3576 * device. For now used only for Titan Ridge.
3577 */
tb_switch_pcie_bridge_write(struct tb_switch * sw,unsigned int bridge,unsigned int pcie_offset,u32 value)3578 static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge,
3579 unsigned int pcie_offset, u32 value)
3580 {
3581 u32 offset, command, val;
3582 int ret;
3583
3584 if (sw->generation != 3)
3585 return -EOPNOTSUPP;
3586
3587 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA;
3588 ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
3589 if (ret)
3590 return ret;
3591
3592 command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK;
3593 command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT);
3594 command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK;
3595 command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL
3596 << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT;
3597 command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK;
3598
3599 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD;
3600
3601 ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1);
3602 if (ret)
3603 return ret;
3604
3605 ret = tb_switch_wait_for_bit(sw, offset,
3606 TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100);
3607 if (ret)
3608 return ret;
3609
3610 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
3611 if (ret)
3612 return ret;
3613
3614 if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK)
3615 return -ETIMEDOUT;
3616
3617 return 0;
3618 }
3619
3620 /**
3621 * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state
3622 * @sw: Router to enable PCIe L1
3623 *
3624 * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable
3625 * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel
3626 * was configured. Due to Intel platforms limitation, shall be called only
3627 * for first hop switch.
3628 */
tb_switch_pcie_l1_enable(struct tb_switch * sw)3629 int tb_switch_pcie_l1_enable(struct tb_switch *sw)
3630 {
3631 struct tb_switch *parent = tb_switch_parent(sw);
3632 int ret;
3633
3634 if (!tb_route(sw))
3635 return 0;
3636
3637 if (!tb_switch_is_titan_ridge(sw))
3638 return 0;
3639
3640 /* Enable PCIe L1 enable only for first hop router (depth = 1) */
3641 if (tb_route(parent))
3642 return 0;
3643
3644 /* Write to downstream PCIe bridge #5 aka Dn4 */
3645 ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1);
3646 if (ret)
3647 return ret;
3648
3649 /* Write to Upstream PCIe bridge #0 aka Up0 */
3650 return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1);
3651 }
3652
3653 /**
3654 * tb_switch_xhci_connect() - Connect internal xHCI
3655 * @sw: Router whose xHCI to connect
3656 *
3657 * Can be called to any router. For Alpine Ridge and Titan Ridge
3658 * performs special flows that bring the xHCI functional for any device
3659 * connected to the type-C port. Call only after PCIe tunnel has been
3660 * established. The function only does the connect if not done already
3661 * so can be called several times for the same router.
3662 */
tb_switch_xhci_connect(struct tb_switch * sw)3663 int tb_switch_xhci_connect(struct tb_switch *sw)
3664 {
3665 struct tb_port *port1, *port3;
3666 int ret;
3667
3668 if (sw->generation != 3)
3669 return 0;
3670
3671 port1 = &sw->ports[1];
3672 port3 = &sw->ports[3];
3673
3674 if (tb_switch_is_alpine_ridge(sw)) {
3675 bool usb_port1, usb_port3, xhci_port1, xhci_port3;
3676
3677 usb_port1 = tb_lc_is_usb_plugged(port1);
3678 usb_port3 = tb_lc_is_usb_plugged(port3);
3679 xhci_port1 = tb_lc_is_xhci_connected(port1);
3680 xhci_port3 = tb_lc_is_xhci_connected(port3);
3681
3682 /* Figure out correct USB port to connect */
3683 if (usb_port1 && !xhci_port1) {
3684 ret = tb_lc_xhci_connect(port1);
3685 if (ret)
3686 return ret;
3687 }
3688 if (usb_port3 && !xhci_port3)
3689 return tb_lc_xhci_connect(port3);
3690 } else if (tb_switch_is_titan_ridge(sw)) {
3691 ret = tb_lc_xhci_connect(port1);
3692 if (ret)
3693 return ret;
3694 return tb_lc_xhci_connect(port3);
3695 }
3696
3697 return 0;
3698 }
3699
3700 /**
3701 * tb_switch_xhci_disconnect() - Disconnect internal xHCI
3702 * @sw: Router whose xHCI to disconnect
3703 *
3704 * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both
3705 * ports.
3706 */
tb_switch_xhci_disconnect(struct tb_switch * sw)3707 void tb_switch_xhci_disconnect(struct tb_switch *sw)
3708 {
3709 if (sw->generation == 3) {
3710 struct tb_port *port1 = &sw->ports[1];
3711 struct tb_port *port3 = &sw->ports[3];
3712
3713 tb_lc_xhci_disconnect(port1);
3714 tb_port_dbg(port1, "disconnected xHCI\n");
3715 tb_lc_xhci_disconnect(port3);
3716 tb_port_dbg(port3, "disconnected xHCI\n");
3717 }
3718 }
3719