1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Thunderbolt driver - bus logic (NHI independent)
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
7 */
8
9 #ifndef TB_H_
10 #define TB_H_
11
12 #include <linux/nvmem-provider.h>
13 #include <linux/pci.h>
14 #include <linux/thunderbolt.h>
15 #include <linux/uuid.h>
16 #include <linux/bitfield.h>
17
18 #include "tb_regs.h"
19 #include "ctl.h"
20 #include "dma_port.h"
21
22 /* Keep link controller awake during update */
23 #define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
24 /* Disable CLx if not supported */
25 #define QUIRK_NO_CLX BIT(1)
26 /* Need to keep power on while USB4 port is in redrive mode */
27 #define QUIRK_KEEP_POWER_IN_DP_REDRIVE BIT(2)
28
29 /**
30 * struct tb_nvm - Structure holding NVM information
31 * @dev: Owner of the NVM
32 * @major: Major version number of the active NVM portion
33 * @minor: Minor version number of the active NVM portion
34 * @id: Identifier used with both NVM portions
35 * @active: Active portion NVMem device
36 * @active_size: Size in bytes of the active NVM
37 * @non_active: Non-active portion NVMem device
38 * @buf: Buffer where the NVM image is stored before it is written to
39 * the actual NVM flash device
40 * @buf_data_start: Where the actual image starts after skipping
41 * possible headers
42 * @buf_data_size: Number of bytes actually consumed by the new NVM
43 * image
44 * @authenticating: The device is authenticating the new NVM
45 * @flushed: The image has been flushed to the storage area
46 * @vops: Router vendor specific NVM operations (optional)
47 *
48 * The user of this structure needs to handle serialization of possible
49 * concurrent access.
50 */
51 struct tb_nvm {
52 struct device *dev;
53 u32 major;
54 u32 minor;
55 int id;
56 struct nvmem_device *active;
57 size_t active_size;
58 struct nvmem_device *non_active;
59 void *buf;
60 void *buf_data_start;
61 size_t buf_data_size;
62 bool authenticating;
63 bool flushed;
64 const struct tb_nvm_vendor_ops *vops;
65 };
66
67 enum tb_nvm_write_ops {
68 WRITE_AND_AUTHENTICATE = 1,
69 WRITE_ONLY = 2,
70 AUTHENTICATE_ONLY = 3,
71 };
72
73 #define TB_SWITCH_KEY_SIZE 32
74 #define TB_SWITCH_MAX_DEPTH 6
75 #define USB4_SWITCH_MAX_DEPTH 5
76
77 /**
78 * enum tb_switch_tmu_mode - TMU mode
79 * @TB_SWITCH_TMU_MODE_OFF: TMU is off
80 * @TB_SWITCH_TMU_MODE_LOWRES: Uni-directional, normal mode
81 * @TB_SWITCH_TMU_MODE_HIFI_UNI: Uni-directional, HiFi mode
82 * @TB_SWITCH_TMU_MODE_HIFI_BI: Bi-directional, HiFi mode
83 * @TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: Enhanced Uni-directional, MedRes mode
84 *
85 * Ordering is based on TMU accuracy level (highest last).
86 */
87 enum tb_switch_tmu_mode {
88 TB_SWITCH_TMU_MODE_OFF,
89 TB_SWITCH_TMU_MODE_LOWRES,
90 TB_SWITCH_TMU_MODE_HIFI_UNI,
91 TB_SWITCH_TMU_MODE_HIFI_BI,
92 TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI,
93 };
94
95 /**
96 * struct tb_switch_tmu - Structure holding router TMU configuration
97 * @cap: Offset to the TMU capability (%0 if not found)
98 * @has_ucap: Does the switch support uni-directional mode
99 * @mode: TMU mode related to the upstream router. Reflects the HW
100 * setting. Don't care for host router.
101 * @mode_request: TMU mode requested to set. Related to upstream router.
102 * Don't care for host router.
103 */
104 struct tb_switch_tmu {
105 int cap;
106 bool has_ucap;
107 enum tb_switch_tmu_mode mode;
108 enum tb_switch_tmu_mode mode_request;
109 };
110
111 /**
112 * struct tb_switch - a thunderbolt switch
113 * @dev: Device for the switch
114 * @config: Switch configuration
115 * @ports: Ports in this switch
116 * @dma_port: If the switch has port supporting DMA configuration based
117 * mailbox this will hold the pointer to that (%NULL
118 * otherwise). If set it also means the switch has
119 * upgradeable NVM.
120 * @tmu: The switch TMU configuration
121 * @tb: Pointer to the domain the switch belongs to
122 * @uid: Unique ID of the switch
123 * @uuid: UUID of the switch (or %NULL if not supported)
124 * @vendor: Vendor ID of the switch
125 * @device: Device ID of the switch
126 * @vendor_name: Name of the vendor (or %NULL if not known)
127 * @device_name: Name of the device (or %NULL if not known)
128 * @link_speed: Speed of the link in Gb/s
129 * @link_width: Width of the upstream facing link
130 * @link_usb4: Upstream link is USB4
131 * @generation: Switch Thunderbolt generation
132 * @cap_plug_events: Offset to the plug events capability (%0 if not found)
133 * @cap_vsec_tmu: Offset to the TMU vendor specific capability (%0 if not found)
134 * @cap_lc: Offset to the link controller capability (%0 if not found)
135 * @cap_lp: Offset to the low power (CLx for TBT) capability (%0 if not found)
136 * @is_unplugged: The switch is going away
137 * @drom: DROM of the switch (%NULL if not found)
138 * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise)
139 * @no_nvm_upgrade: Prevent NVM upgrade of this switch
140 * @safe_mode: The switch is in safe-mode
141 * @boot: Whether the switch was already authorized on boot or not
142 * @rpm: The switch supports runtime PM
143 * @authorized: Whether the switch is authorized by user or policy
144 * @security_level: Switch supported security level
145 * @debugfs_dir: Pointer to the debugfs structure
146 * @key: Contains the key used to challenge the device or %NULL if not
147 * supported. Size of the key is %TB_SWITCH_KEY_SIZE.
148 * @connection_id: Connection ID used with ICM messaging
149 * @connection_key: Connection key used with ICM messaging
150 * @link: Root switch link this switch is connected (ICM only)
151 * @depth: Depth in the chain this switch is connected (ICM only)
152 * @rpm_complete: Completion used to wait for runtime resume to
153 * complete (ICM only)
154 * @quirks: Quirks used for this Thunderbolt switch
155 * @credit_allocation: Are the below buffer allocation parameters valid
156 * @max_usb3_credits: Router preferred number of buffers for USB 3.x
157 * @min_dp_aux_credits: Router preferred minimum number of buffers for DP AUX
158 * @min_dp_main_credits: Router preferred minimum number of buffers for DP MAIN
159 * @max_pcie_credits: Router preferred number of buffers for PCIe
160 * @max_dma_credits: Router preferred number of buffers for DMA/P2P
161 * @clx: CLx states on the upstream link of the router
162 *
163 * When the switch is being added or removed to the domain (other
164 * switches) you need to have domain lock held.
165 *
166 * In USB4 terminology this structure represents a router.
167 *
168 * Note @link_width is not the same as whether link is bonded or not.
169 * For Gen 4 links the link is also bonded when it is asymmetric. The
170 * correct way to find out whether the link is bonded or not is to look
171 * @bonded field of the upstream port.
172 */
173 struct tb_switch {
174 struct device dev;
175 struct tb_regs_switch_header config;
176 struct tb_port *ports;
177 struct tb_dma_port *dma_port;
178 struct tb_switch_tmu tmu;
179 struct tb *tb;
180 u64 uid;
181 uuid_t *uuid;
182 u16 vendor;
183 u16 device;
184 const char *vendor_name;
185 const char *device_name;
186 unsigned int link_speed;
187 enum tb_link_width link_width;
188 bool link_usb4;
189 unsigned int generation;
190 int cap_plug_events;
191 int cap_vsec_tmu;
192 int cap_lc;
193 int cap_lp;
194 bool is_unplugged;
195 u8 *drom;
196 struct tb_nvm *nvm;
197 bool no_nvm_upgrade;
198 bool safe_mode;
199 bool boot;
200 bool rpm;
201 unsigned int authorized;
202 enum tb_security_level security_level;
203 struct dentry *debugfs_dir;
204 u8 *key;
205 u8 connection_id;
206 u8 connection_key;
207 u8 link;
208 u8 depth;
209 struct completion rpm_complete;
210 unsigned long quirks;
211 bool credit_allocation;
212 unsigned int max_usb3_credits;
213 unsigned int min_dp_aux_credits;
214 unsigned int min_dp_main_credits;
215 unsigned int max_pcie_credits;
216 unsigned int max_dma_credits;
217 unsigned int clx;
218 };
219
220 /**
221 * struct tb_bandwidth_group - Bandwidth management group
222 * @tb: Pointer to the domain the group belongs to
223 * @index: Index of the group (aka Group_ID). Valid values %1-%7
224 * @ports: DP IN adapters belonging to this group are linked here
225 *
226 * Any tunnel that requires isochronous bandwidth (that's DP for now) is
227 * attached to a bandwidth group. All tunnels going through the same
228 * USB4 links share the same group and can dynamically distribute the
229 * bandwidth within the group.
230 */
231 struct tb_bandwidth_group {
232 struct tb *tb;
233 int index;
234 struct list_head ports;
235 };
236
237 /**
238 * struct tb_port - a thunderbolt port, part of a tb_switch
239 * @config: Cached port configuration read from registers
240 * @sw: Switch the port belongs to
241 * @remote: Remote port (%NULL if not connected)
242 * @xdomain: Remote host (%NULL if not connected)
243 * @cap_phy: Offset, zero if not found
244 * @cap_tmu: Offset of the adapter specific TMU capability (%0 if not present)
245 * @cap_adap: Offset of the adapter specific capability (%0 if not present)
246 * @cap_usb4: Offset to the USB4 port capability (%0 if not present)
247 * @usb4: Pointer to the USB4 port structure (only if @cap_usb4 is != %0)
248 * @port: Port number on switch
249 * @disabled: Disabled by eeprom or enabled but not implemented
250 * @bonded: true if the port is bonded (two lanes combined as one)
251 * @dual_link_port: If the switch is connected using two ports, points
252 * to the other port.
253 * @link_nr: Is this primary or secondary port on the dual_link.
254 * @in_hopids: Currently allocated input HopIDs
255 * @out_hopids: Currently allocated output HopIDs
256 * @list: Used to link ports to DP resources list
257 * @total_credits: Total number of buffers available for this port
258 * @ctl_credits: Buffers reserved for control path
259 * @dma_credits: Number of credits allocated for DMA tunneling for all
260 * DMA paths through this port.
261 * @group: Bandwidth allocation group the adapter is assigned to. Only
262 * used for DP IN adapters for now.
263 * @group_list: The adapter is linked to the group's list of ports through this
264 * @max_bw: Maximum possible bandwidth through this adapter if set to
265 * non-zero.
266 * @redrive: For DP IN, if true the adapter is in redrive mode.
267 *
268 * In USB4 terminology this structure represents an adapter (protocol or
269 * lane adapter).
270 */
271 struct tb_port {
272 struct tb_regs_port_header config;
273 struct tb_switch *sw;
274 struct tb_port *remote;
275 struct tb_xdomain *xdomain;
276 int cap_phy;
277 int cap_tmu;
278 int cap_adap;
279 int cap_usb4;
280 struct usb4_port *usb4;
281 u8 port;
282 bool disabled;
283 bool bonded;
284 struct tb_port *dual_link_port;
285 u8 link_nr:1;
286 struct ida in_hopids;
287 struct ida out_hopids;
288 struct list_head list;
289 unsigned int total_credits;
290 unsigned int ctl_credits;
291 unsigned int dma_credits;
292 struct tb_bandwidth_group *group;
293 struct list_head group_list;
294 unsigned int max_bw;
295 bool redrive;
296 };
297
298 /**
299 * struct usb4_port - USB4 port device
300 * @dev: Device for the port
301 * @port: Pointer to the lane 0 adapter
302 * @can_offline: Does the port have necessary platform support to moved
303 * it into offline mode and back
304 * @offline: The port is currently in offline mode
305 * @margining: Pointer to margining structure if enabled
306 */
307 struct usb4_port {
308 struct device dev;
309 struct tb_port *port;
310 bool can_offline;
311 bool offline;
312 #ifdef CONFIG_USB4_DEBUGFS_MARGINING
313 struct tb_margining *margining;
314 #endif
315 };
316
317 /**
318 * tb_retimer: Thunderbolt retimer
319 * @dev: Device for the retimer
320 * @tb: Pointer to the domain the retimer belongs to
321 * @index: Retimer index facing the router USB4 port
322 * @vendor: Vendor ID of the retimer
323 * @device: Device ID of the retimer
324 * @port: Pointer to the lane 0 adapter
325 * @nvm: Pointer to the NVM if the retimer has one (%NULL otherwise)
326 * @no_nvm_upgrade: Prevent NVM upgrade of this retimer
327 * @auth_status: Status of last NVM authentication
328 */
329 struct tb_retimer {
330 struct device dev;
331 struct tb *tb;
332 u8 index;
333 u32 vendor;
334 u32 device;
335 struct tb_port *port;
336 struct tb_nvm *nvm;
337 bool no_nvm_upgrade;
338 u32 auth_status;
339 };
340
341 /**
342 * struct tb_path_hop - routing information for a tb_path
343 * @in_port: Ingress port of a switch
344 * @out_port: Egress port of a switch where the packet is routed out
345 * (must be on the same switch than @in_port)
346 * @in_hop_index: HopID where the path configuration entry is placed in
347 * the path config space of @in_port.
348 * @in_counter_index: Used counter index (not used in the driver
349 * currently, %-1 to disable)
350 * @next_hop_index: HopID of the packet when it is routed out from @out_port
351 * @initial_credits: Number of initial flow control credits allocated for
352 * the path
353 * @nfc_credits: Number of non-flow controlled buffers allocated for the
354 * @in_port.
355 *
356 * Hop configuration is always done on the IN port of a switch.
357 * in_port and out_port have to be on the same switch. Packets arriving on
358 * in_port with "hop" = in_hop_index will get routed to through out_port. The
359 * next hop to take (on out_port->remote) is determined by
360 * next_hop_index. When routing packet to another switch (out->remote is
361 * set) the @next_hop_index must match the @in_hop_index of that next
362 * hop to make routing possible.
363 *
364 * in_counter_index is the index of a counter (in TB_CFG_COUNTERS) on the in
365 * port.
366 */
367 struct tb_path_hop {
368 struct tb_port *in_port;
369 struct tb_port *out_port;
370 int in_hop_index;
371 int in_counter_index;
372 int next_hop_index;
373 unsigned int initial_credits;
374 unsigned int nfc_credits;
375 };
376
377 /**
378 * enum tb_path_port - path options mask
379 * @TB_PATH_NONE: Do not activate on any hop on path
380 * @TB_PATH_SOURCE: Activate on the first hop (out of src)
381 * @TB_PATH_INTERNAL: Activate on the intermediate hops (not the first/last)
382 * @TB_PATH_DESTINATION: Activate on the last hop (into dst)
383 * @TB_PATH_ALL: Activate on all hops on the path
384 */
385 enum tb_path_port {
386 TB_PATH_NONE = 0,
387 TB_PATH_SOURCE = 1,
388 TB_PATH_INTERNAL = 2,
389 TB_PATH_DESTINATION = 4,
390 TB_PATH_ALL = 7,
391 };
392
393 /**
394 * struct tb_path - a unidirectional path between two ports
395 * @tb: Pointer to the domain structure
396 * @name: Name of the path (used for debugging)
397 * @ingress_shared_buffer: Shared buffering used for ingress ports on the path
398 * @egress_shared_buffer: Shared buffering used for egress ports on the path
399 * @ingress_fc_enable: Flow control for ingress ports on the path
400 * @egress_fc_enable: Flow control for egress ports on the path
401 * @priority: Priority group if the path
402 * @weight: Weight of the path inside the priority group
403 * @drop_packages: Drop packages from queue tail or head
404 * @activated: Is the path active
405 * @clear_fc: Clear all flow control from the path config space entries
406 * when deactivating this path
407 * @hops: Path hops
408 * @path_length: How many hops the path uses
409 * @alloc_hopid: Does this path consume port HopID
410 *
411 * A path consists of a number of hops (see &struct tb_path_hop). To
412 * establish a PCIe tunnel two paths have to be created between the two
413 * PCIe ports.
414 */
415 struct tb_path {
416 struct tb *tb;
417 const char *name;
418 enum tb_path_port ingress_shared_buffer;
419 enum tb_path_port egress_shared_buffer;
420 enum tb_path_port ingress_fc_enable;
421 enum tb_path_port egress_fc_enable;
422
423 unsigned int priority:3;
424 int weight:4;
425 bool drop_packages;
426 bool activated;
427 bool clear_fc;
428 struct tb_path_hop *hops;
429 int path_length;
430 bool alloc_hopid;
431 };
432
433 /* HopIDs 0-7 are reserved by the Thunderbolt protocol */
434 #define TB_PATH_MIN_HOPID 8
435 /*
436 * Support paths from the farthest (depth 6) router to the host and back
437 * to the same level (not necessarily to the same router).
438 */
439 #define TB_PATH_MAX_HOPS (7 * 2)
440
441 /* Possible wake types */
442 #define TB_WAKE_ON_CONNECT BIT(0)
443 #define TB_WAKE_ON_DISCONNECT BIT(1)
444 #define TB_WAKE_ON_USB4 BIT(2)
445 #define TB_WAKE_ON_USB3 BIT(3)
446 #define TB_WAKE_ON_PCIE BIT(4)
447 #define TB_WAKE_ON_DP BIT(5)
448
449 /* CL states */
450 #define TB_CL0S BIT(0)
451 #define TB_CL1 BIT(1)
452 #define TB_CL2 BIT(2)
453
454 /**
455 * struct tb_cm_ops - Connection manager specific operations vector
456 * @driver_ready: Called right after control channel is started. Used by
457 * ICM to send driver ready message to the firmware.
458 * @start: Starts the domain
459 * @stop: Stops the domain
460 * @suspend_noirq: Connection manager specific suspend_noirq
461 * @resume_noirq: Connection manager specific resume_noirq
462 * @suspend: Connection manager specific suspend
463 * @freeze_noirq: Connection manager specific freeze_noirq
464 * @thaw_noirq: Connection manager specific thaw_noirq
465 * @complete: Connection manager specific complete
466 * @runtime_suspend: Connection manager specific runtime_suspend
467 * @runtime_resume: Connection manager specific runtime_resume
468 * @runtime_suspend_switch: Runtime suspend a switch
469 * @runtime_resume_switch: Runtime resume a switch
470 * @handle_event: Handle thunderbolt event
471 * @get_boot_acl: Get boot ACL list
472 * @set_boot_acl: Set boot ACL list
473 * @disapprove_switch: Disapprove switch (disconnect PCIe tunnel)
474 * @approve_switch: Approve switch
475 * @add_switch_key: Add key to switch
476 * @challenge_switch_key: Challenge switch using key
477 * @disconnect_pcie_paths: Disconnects PCIe paths before NVM update
478 * @approve_xdomain_paths: Approve (establish) XDomain DMA paths
479 * @disconnect_xdomain_paths: Disconnect XDomain DMA paths
480 * @usb4_switch_op: Optional proxy for USB4 router operations. If set
481 * this will be called whenever USB4 router operation is
482 * performed. If this returns %-EOPNOTSUPP then the
483 * native USB4 router operation is called.
484 * @usb4_switch_nvm_authenticate_status: Optional callback that the CM
485 * implementation can be used to
486 * return status of USB4 NVM_AUTH
487 * router operation.
488 */
489 struct tb_cm_ops {
490 int (*driver_ready)(struct tb *tb);
491 int (*start)(struct tb *tb, bool reset);
492 void (*stop)(struct tb *tb);
493 int (*suspend_noirq)(struct tb *tb);
494 int (*resume_noirq)(struct tb *tb);
495 int (*suspend)(struct tb *tb);
496 int (*freeze_noirq)(struct tb *tb);
497 int (*thaw_noirq)(struct tb *tb);
498 void (*complete)(struct tb *tb);
499 int (*runtime_suspend)(struct tb *tb);
500 int (*runtime_resume)(struct tb *tb);
501 int (*runtime_suspend_switch)(struct tb_switch *sw);
502 int (*runtime_resume_switch)(struct tb_switch *sw);
503 void (*handle_event)(struct tb *tb, enum tb_cfg_pkg_type,
504 const void *buf, size_t size);
505 int (*get_boot_acl)(struct tb *tb, uuid_t *uuids, size_t nuuids);
506 int (*set_boot_acl)(struct tb *tb, const uuid_t *uuids, size_t nuuids);
507 int (*disapprove_switch)(struct tb *tb, struct tb_switch *sw);
508 int (*approve_switch)(struct tb *tb, struct tb_switch *sw);
509 int (*add_switch_key)(struct tb *tb, struct tb_switch *sw);
510 int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw,
511 const u8 *challenge, u8 *response);
512 int (*disconnect_pcie_paths)(struct tb *tb);
513 int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd,
514 int transmit_path, int transmit_ring,
515 int receive_path, int receive_ring);
516 int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd,
517 int transmit_path, int transmit_ring,
518 int receive_path, int receive_ring);
519 int (*usb4_switch_op)(struct tb_switch *sw, u16 opcode, u32 *metadata,
520 u8 *status, const void *tx_data, size_t tx_data_len,
521 void *rx_data, size_t rx_data_len);
522 int (*usb4_switch_nvm_authenticate_status)(struct tb_switch *sw,
523 u32 *status);
524 };
525
tb_priv(struct tb * tb)526 static inline void *tb_priv(struct tb *tb)
527 {
528 return (void *)tb->privdata;
529 }
530
531 #define TB_AUTOSUSPEND_DELAY 15000 /* ms */
532
533 /* helper functions & macros */
534
535 /**
536 * tb_upstream_port() - return the upstream port of a switch
537 *
538 * Every switch has an upstream port (for the root switch it is the NHI).
539 *
540 * During switch alloc/init tb_upstream_port()->remote may be NULL, even for
541 * non root switches (on the NHI port remote is always NULL).
542 *
543 * Return: Returns the upstream port of the switch.
544 */
tb_upstream_port(struct tb_switch * sw)545 static inline struct tb_port *tb_upstream_port(struct tb_switch *sw)
546 {
547 return &sw->ports[sw->config.upstream_port_number];
548 }
549
550 /**
551 * tb_is_upstream_port() - Is the port upstream facing
552 * @port: Port to check
553 *
554 * Returns true if @port is upstream facing port. In case of dual link
555 * ports both return true.
556 */
tb_is_upstream_port(const struct tb_port * port)557 static inline bool tb_is_upstream_port(const struct tb_port *port)
558 {
559 const struct tb_port *upstream_port = tb_upstream_port(port->sw);
560 return port == upstream_port || port->dual_link_port == upstream_port;
561 }
562
tb_route(const struct tb_switch * sw)563 static inline u64 tb_route(const struct tb_switch *sw)
564 {
565 return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo;
566 }
567
tb_port_at(u64 route,struct tb_switch * sw)568 static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw)
569 {
570 u8 port;
571
572 port = route >> (sw->config.depth * 8);
573 if (WARN_ON(port > sw->config.max_port_number))
574 return NULL;
575 return &sw->ports[port];
576 }
577
578 /**
579 * tb_port_has_remote() - Does the port have switch connected downstream
580 * @port: Port to check
581 *
582 * Returns true only when the port is primary port and has remote set.
583 */
tb_port_has_remote(const struct tb_port * port)584 static inline bool tb_port_has_remote(const struct tb_port *port)
585 {
586 if (tb_is_upstream_port(port))
587 return false;
588 if (!port->remote)
589 return false;
590 if (port->dual_link_port && port->link_nr)
591 return false;
592
593 return true;
594 }
595
tb_port_is_null(const struct tb_port * port)596 static inline bool tb_port_is_null(const struct tb_port *port)
597 {
598 return port && port->port && port->config.type == TB_TYPE_PORT;
599 }
600
tb_port_is_nhi(const struct tb_port * port)601 static inline bool tb_port_is_nhi(const struct tb_port *port)
602 {
603 return port && port->config.type == TB_TYPE_NHI;
604 }
605
tb_port_is_pcie_down(const struct tb_port * port)606 static inline bool tb_port_is_pcie_down(const struct tb_port *port)
607 {
608 return port && port->config.type == TB_TYPE_PCIE_DOWN;
609 }
610
tb_port_is_pcie_up(const struct tb_port * port)611 static inline bool tb_port_is_pcie_up(const struct tb_port *port)
612 {
613 return port && port->config.type == TB_TYPE_PCIE_UP;
614 }
615
tb_port_is_dpin(const struct tb_port * port)616 static inline bool tb_port_is_dpin(const struct tb_port *port)
617 {
618 return port && port->config.type == TB_TYPE_DP_HDMI_IN;
619 }
620
tb_port_is_dpout(const struct tb_port * port)621 static inline bool tb_port_is_dpout(const struct tb_port *port)
622 {
623 return port && port->config.type == TB_TYPE_DP_HDMI_OUT;
624 }
625
tb_port_is_usb3_down(const struct tb_port * port)626 static inline bool tb_port_is_usb3_down(const struct tb_port *port)
627 {
628 return port && port->config.type == TB_TYPE_USB3_DOWN;
629 }
630
tb_port_is_usb3_up(const struct tb_port * port)631 static inline bool tb_port_is_usb3_up(const struct tb_port *port)
632 {
633 return port && port->config.type == TB_TYPE_USB3_UP;
634 }
635
tb_sw_read(struct tb_switch * sw,void * buffer,enum tb_cfg_space space,u32 offset,u32 length)636 static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
637 enum tb_cfg_space space, u32 offset, u32 length)
638 {
639 if (sw->is_unplugged)
640 return -ENODEV;
641 return tb_cfg_read(sw->tb->ctl,
642 buffer,
643 tb_route(sw),
644 0,
645 space,
646 offset,
647 length);
648 }
649
tb_sw_write(struct tb_switch * sw,const void * buffer,enum tb_cfg_space space,u32 offset,u32 length)650 static inline int tb_sw_write(struct tb_switch *sw, const void *buffer,
651 enum tb_cfg_space space, u32 offset, u32 length)
652 {
653 if (sw->is_unplugged)
654 return -ENODEV;
655 return tb_cfg_write(sw->tb->ctl,
656 buffer,
657 tb_route(sw),
658 0,
659 space,
660 offset,
661 length);
662 }
663
tb_port_read(struct tb_port * port,void * buffer,enum tb_cfg_space space,u32 offset,u32 length)664 static inline int tb_port_read(struct tb_port *port, void *buffer,
665 enum tb_cfg_space space, u32 offset, u32 length)
666 {
667 if (port->sw->is_unplugged)
668 return -ENODEV;
669 return tb_cfg_read(port->sw->tb->ctl,
670 buffer,
671 tb_route(port->sw),
672 port->port,
673 space,
674 offset,
675 length);
676 }
677
tb_port_write(struct tb_port * port,const void * buffer,enum tb_cfg_space space,u32 offset,u32 length)678 static inline int tb_port_write(struct tb_port *port, const void *buffer,
679 enum tb_cfg_space space, u32 offset, u32 length)
680 {
681 if (port->sw->is_unplugged)
682 return -ENODEV;
683 return tb_cfg_write(port->sw->tb->ctl,
684 buffer,
685 tb_route(port->sw),
686 port->port,
687 space,
688 offset,
689 length);
690 }
691
692 #define tb_err(tb, fmt, arg...) dev_err(&(tb)->nhi->pdev->dev, fmt, ## arg)
693 #define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg)
694 #define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg)
695 #define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg)
696 #define tb_dbg(tb, fmt, arg...) dev_dbg(&(tb)->nhi->pdev->dev, fmt, ## arg)
697
698 #define __TB_SW_PRINT(level, sw, fmt, arg...) \
699 do { \
700 const struct tb_switch *__sw = (sw); \
701 level(__sw->tb, "%llx: " fmt, \
702 tb_route(__sw), ## arg); \
703 } while (0)
704 #define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg)
705 #define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg)
706 #define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg)
707 #define tb_sw_dbg(sw, fmt, arg...) __TB_SW_PRINT(tb_dbg, sw, fmt, ##arg)
708
709 #define __TB_PORT_PRINT(level, _port, fmt, arg...) \
710 do { \
711 const struct tb_port *__port = (_port); \
712 level(__port->sw->tb, "%llx:%u: " fmt, \
713 tb_route(__port->sw), __port->port, ## arg); \
714 } while (0)
715 #define tb_port_WARN(port, fmt, arg...) \
716 __TB_PORT_PRINT(tb_WARN, port, fmt, ##arg)
717 #define tb_port_warn(port, fmt, arg...) \
718 __TB_PORT_PRINT(tb_warn, port, fmt, ##arg)
719 #define tb_port_info(port, fmt, arg...) \
720 __TB_PORT_PRINT(tb_info, port, fmt, ##arg)
721 #define tb_port_dbg(port, fmt, arg...) \
722 __TB_PORT_PRINT(tb_dbg, port, fmt, ##arg)
723
724 struct tb *icm_probe(struct tb_nhi *nhi);
725 struct tb *tb_probe(struct tb_nhi *nhi);
726
727 extern struct device_type tb_domain_type;
728 extern struct device_type tb_retimer_type;
729 extern struct device_type tb_switch_type;
730 extern struct device_type usb4_port_device_type;
731
732 int tb_domain_init(void);
733 void tb_domain_exit(void);
734 int tb_xdomain_init(void);
735 void tb_xdomain_exit(void);
736
737 struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize);
738 int tb_domain_add(struct tb *tb, bool reset);
739 void tb_domain_remove(struct tb *tb);
740 int tb_domain_suspend_noirq(struct tb *tb);
741 int tb_domain_resume_noirq(struct tb *tb);
742 int tb_domain_suspend(struct tb *tb);
743 int tb_domain_freeze_noirq(struct tb *tb);
744 int tb_domain_thaw_noirq(struct tb *tb);
745 void tb_domain_complete(struct tb *tb);
746 int tb_domain_runtime_suspend(struct tb *tb);
747 int tb_domain_runtime_resume(struct tb *tb);
748 int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw);
749 int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw);
750 int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw);
751 int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw);
752 int tb_domain_disconnect_pcie_paths(struct tb *tb);
753 int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
754 int transmit_path, int transmit_ring,
755 int receive_path, int receive_ring);
756 int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
757 int transmit_path, int transmit_ring,
758 int receive_path, int receive_ring);
759 int tb_domain_disconnect_all_paths(struct tb *tb);
760
tb_domain_get(struct tb * tb)761 static inline struct tb *tb_domain_get(struct tb *tb)
762 {
763 if (tb)
764 get_device(&tb->dev);
765 return tb;
766 }
767
tb_domain_put(struct tb * tb)768 static inline void tb_domain_put(struct tb *tb)
769 {
770 put_device(&tb->dev);
771 }
772
773 struct tb_nvm *tb_nvm_alloc(struct device *dev);
774 int tb_nvm_read_version(struct tb_nvm *nvm);
775 int tb_nvm_validate(struct tb_nvm *nvm);
776 int tb_nvm_write_headers(struct tb_nvm *nvm);
777 int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read);
778 int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
779 size_t bytes);
780 int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write);
781 void tb_nvm_free(struct tb_nvm *nvm);
782 void tb_nvm_exit(void);
783
784 typedef int (*read_block_fn)(void *, unsigned int, void *, size_t);
785 typedef int (*write_block_fn)(void *, unsigned int, const void *, size_t);
786
787 int tb_nvm_read_data(unsigned int address, void *buf, size_t size,
788 unsigned int retries, read_block_fn read_block,
789 void *read_block_data);
790 int tb_nvm_write_data(unsigned int address, const void *buf, size_t size,
791 unsigned int retries, write_block_fn write_next_block,
792 void *write_block_data);
793
794 int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
795 size_t size);
796 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
797 u64 route);
798 struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb,
799 struct device *parent, u64 route);
800 int tb_switch_configure(struct tb_switch *sw);
801 int tb_switch_configuration_valid(struct tb_switch *sw);
802 int tb_switch_add(struct tb_switch *sw);
803 void tb_switch_remove(struct tb_switch *sw);
804 void tb_switch_suspend(struct tb_switch *sw, bool runtime);
805 int tb_switch_resume(struct tb_switch *sw, bool runtime);
806 int tb_switch_reset(struct tb_switch *sw);
807 int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
808 u32 value, int timeout_msec);
809 void tb_sw_set_unplugged(struct tb_switch *sw);
810 struct tb_port *tb_switch_find_port(struct tb_switch *sw,
811 enum tb_port_type type);
812 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
813 u8 depth);
814 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
815 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route);
816
817 /**
818 * tb_switch_for_each_port() - Iterate over each switch port
819 * @sw: Switch whose ports to iterate
820 * @p: Port used as iterator
821 *
822 * Iterates over each switch port skipping the control port (port %0).
823 */
824 #define tb_switch_for_each_port(sw, p) \
825 for ((p) = &(sw)->ports[1]; \
826 (p) <= &(sw)->ports[(sw)->config.max_port_number]; (p)++)
827
tb_switch_get(struct tb_switch * sw)828 static inline struct tb_switch *tb_switch_get(struct tb_switch *sw)
829 {
830 if (sw)
831 get_device(&sw->dev);
832 return sw;
833 }
834
tb_switch_put(struct tb_switch * sw)835 static inline void tb_switch_put(struct tb_switch *sw)
836 {
837 put_device(&sw->dev);
838 }
839
tb_is_switch(const struct device * dev)840 static inline bool tb_is_switch(const struct device *dev)
841 {
842 return dev->type == &tb_switch_type;
843 }
844
tb_to_switch(const struct device * dev)845 static inline struct tb_switch *tb_to_switch(const struct device *dev)
846 {
847 if (tb_is_switch(dev))
848 return container_of(dev, struct tb_switch, dev);
849 return NULL;
850 }
851
tb_switch_parent(struct tb_switch * sw)852 static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw)
853 {
854 return tb_to_switch(sw->dev.parent);
855 }
856
857 /**
858 * tb_switch_downstream_port() - Return downstream facing port of parent router
859 * @sw: Device router pointer
860 *
861 * Only call for device routers. Returns the downstream facing port of
862 * the parent router.
863 */
tb_switch_downstream_port(struct tb_switch * sw)864 static inline struct tb_port *tb_switch_downstream_port(struct tb_switch *sw)
865 {
866 if (WARN_ON(!tb_route(sw)))
867 return NULL;
868 return tb_port_at(tb_route(sw), tb_switch_parent(sw));
869 }
870
tb_switch_is_light_ridge(const struct tb_switch * sw)871 static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw)
872 {
873 return sw->config.vendor_id == PCI_VENDOR_ID_INTEL &&
874 sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE;
875 }
876
tb_switch_is_eagle_ridge(const struct tb_switch * sw)877 static inline bool tb_switch_is_eagle_ridge(const struct tb_switch *sw)
878 {
879 return sw->config.vendor_id == PCI_VENDOR_ID_INTEL &&
880 sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE;
881 }
882
tb_switch_is_cactus_ridge(const struct tb_switch * sw)883 static inline bool tb_switch_is_cactus_ridge(const struct tb_switch *sw)
884 {
885 if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
886 switch (sw->config.device_id) {
887 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
888 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
889 return true;
890 }
891 }
892 return false;
893 }
894
tb_switch_is_falcon_ridge(const struct tb_switch * sw)895 static inline bool tb_switch_is_falcon_ridge(const struct tb_switch *sw)
896 {
897 if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
898 switch (sw->config.device_id) {
899 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
900 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
901 return true;
902 }
903 }
904 return false;
905 }
906
tb_switch_is_alpine_ridge(const struct tb_switch * sw)907 static inline bool tb_switch_is_alpine_ridge(const struct tb_switch *sw)
908 {
909 if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
910 switch (sw->config.device_id) {
911 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
912 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
913 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
914 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
915 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
916 return true;
917 }
918 }
919 return false;
920 }
921
tb_switch_is_titan_ridge(const struct tb_switch * sw)922 static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw)
923 {
924 if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
925 switch (sw->config.device_id) {
926 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
927 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
928 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
929 return true;
930 }
931 }
932 return false;
933 }
934
tb_switch_is_tiger_lake(const struct tb_switch * sw)935 static inline bool tb_switch_is_tiger_lake(const struct tb_switch *sw)
936 {
937 if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
938 switch (sw->config.device_id) {
939 case PCI_DEVICE_ID_INTEL_TGL_NHI0:
940 case PCI_DEVICE_ID_INTEL_TGL_NHI1:
941 case PCI_DEVICE_ID_INTEL_TGL_H_NHI0:
942 case PCI_DEVICE_ID_INTEL_TGL_H_NHI1:
943 return true;
944 }
945 }
946 return false;
947 }
948
949 /**
950 * tb_switch_is_icm() - Is the switch handled by ICM firmware
951 * @sw: Switch to check
952 *
953 * In case there is a need to differentiate whether ICM firmware or SW CM
954 * is handling @sw this function can be called. It is valid to call this
955 * after tb_switch_alloc() and tb_switch_configure() has been called
956 * (latter only for SW CM case).
957 */
tb_switch_is_icm(const struct tb_switch * sw)958 static inline bool tb_switch_is_icm(const struct tb_switch *sw)
959 {
960 return !sw->config.enabled;
961 }
962
963 int tb_switch_lane_bonding_enable(struct tb_switch *sw);
964 void tb_switch_lane_bonding_disable(struct tb_switch *sw);
965 int tb_switch_configure_link(struct tb_switch *sw);
966 void tb_switch_unconfigure_link(struct tb_switch *sw);
967
968 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
969 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
970 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
971
972 int tb_switch_tmu_init(struct tb_switch *sw);
973 int tb_switch_tmu_post_time(struct tb_switch *sw);
974 int tb_switch_tmu_disable(struct tb_switch *sw);
975 int tb_switch_tmu_enable(struct tb_switch *sw);
976 int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode);
977
978 /**
979 * tb_switch_tmu_is_configured() - Is given TMU mode configured
980 * @sw: Router whose mode to check
981 * @mode: Mode to check
982 *
983 * Checks if given router TMU mode is configured to @mode. Note the
984 * router TMU might not be enabled to this mode.
985 */
tb_switch_tmu_is_configured(const struct tb_switch * sw,enum tb_switch_tmu_mode mode)986 static inline bool tb_switch_tmu_is_configured(const struct tb_switch *sw,
987 enum tb_switch_tmu_mode mode)
988 {
989 return sw->tmu.mode_request == mode;
990 }
991
992 /**
993 * tb_switch_tmu_is_enabled() - Checks if the specified TMU mode is enabled
994 * @sw: Router whose TMU mode to check
995 *
996 * Return true if hardware TMU configuration matches the requested
997 * configuration (and is not %TB_SWITCH_TMU_MODE_OFF).
998 */
tb_switch_tmu_is_enabled(const struct tb_switch * sw)999 static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw)
1000 {
1001 return sw->tmu.mode != TB_SWITCH_TMU_MODE_OFF &&
1002 sw->tmu.mode == sw->tmu.mode_request;
1003 }
1004
1005 bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx);
1006
1007 int tb_switch_clx_init(struct tb_switch *sw);
1008 bool tb_switch_clx_is_supported(const struct tb_switch *sw);
1009 int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx);
1010 int tb_switch_clx_disable(struct tb_switch *sw);
1011
1012 /**
1013 * tb_switch_clx_is_enabled() - Checks if the CLx is enabled
1014 * @sw: Router to check for the CLx
1015 * @clx: The CLx states to check for
1016 *
1017 * Checks if the specified CLx is enabled on the router upstream link.
1018 * Returns true if any of the given states is enabled.
1019 *
1020 * Not applicable for a host router.
1021 */
tb_switch_clx_is_enabled(const struct tb_switch * sw,unsigned int clx)1022 static inline bool tb_switch_clx_is_enabled(const struct tb_switch *sw,
1023 unsigned int clx)
1024 {
1025 return sw->clx & clx;
1026 }
1027
1028 int tb_switch_pcie_l1_enable(struct tb_switch *sw);
1029
1030 int tb_switch_xhci_connect(struct tb_switch *sw);
1031 void tb_switch_xhci_disconnect(struct tb_switch *sw);
1032
1033 int tb_port_state(struct tb_port *port);
1034 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
1035 int tb_port_add_nfc_credits(struct tb_port *port, int credits);
1036 int tb_port_clear_counter(struct tb_port *port, int counter);
1037 int tb_port_unlock(struct tb_port *port);
1038 int tb_port_enable(struct tb_port *port);
1039 int tb_port_disable(struct tb_port *port);
1040 int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid);
1041 void tb_port_release_in_hopid(struct tb_port *port, int hopid);
1042 int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid);
1043 void tb_port_release_out_hopid(struct tb_port *port, int hopid);
1044 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
1045 struct tb_port *prev);
1046
tb_port_use_credit_allocation(const struct tb_port * port)1047 static inline bool tb_port_use_credit_allocation(const struct tb_port *port)
1048 {
1049 return tb_port_is_null(port) && port->sw->credit_allocation;
1050 }
1051
1052 /**
1053 * tb_for_each_port_on_path() - Iterate over each port on path
1054 * @src: Source port
1055 * @dst: Destination port
1056 * @p: Port used as iterator
1057 *
1058 * Walks over each port on path from @src to @dst.
1059 */
1060 #define tb_for_each_port_on_path(src, dst, p) \
1061 for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \
1062 (p) = tb_next_port_on_path((src), (dst), (p)))
1063
1064 int tb_port_get_link_speed(struct tb_port *port);
1065 int tb_port_get_link_width(struct tb_port *port);
1066 int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width);
1067 int tb_port_lane_bonding_enable(struct tb_port *port);
1068 void tb_port_lane_bonding_disable(struct tb_port *port);
1069 int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask,
1070 int timeout_msec);
1071 int tb_port_update_credits(struct tb_port *port);
1072
1073 int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
1074 int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
1075 int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset);
1076 int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap);
1077 int tb_port_next_cap(struct tb_port *port, unsigned int offset);
1078 bool tb_port_is_enabled(struct tb_port *port);
1079
1080 bool tb_usb3_port_is_enabled(struct tb_port *port);
1081 int tb_usb3_port_enable(struct tb_port *port, bool enable);
1082
1083 bool tb_pci_port_is_enabled(struct tb_port *port);
1084 int tb_pci_port_enable(struct tb_port *port, bool enable);
1085
1086 int tb_dp_port_hpd_is_active(struct tb_port *port);
1087 int tb_dp_port_hpd_clear(struct tb_port *port);
1088 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1089 unsigned int aux_tx, unsigned int aux_rx);
1090 bool tb_dp_port_is_enabled(struct tb_port *port);
1091 int tb_dp_port_enable(struct tb_port *port, bool enable);
1092
1093 struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
1094 struct tb_port *dst, int dst_hopid,
1095 struct tb_port **last, const char *name,
1096 bool alloc_hopid);
1097 struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
1098 struct tb_port *dst, int dst_hopid, int link_nr,
1099 const char *name);
1100 void tb_path_free(struct tb_path *path);
1101 int tb_path_activate(struct tb_path *path);
1102 void tb_path_deactivate(struct tb_path *path);
1103 int tb_path_deactivate_hop(struct tb_port *port, int hop_index);
1104 bool tb_path_is_invalid(struct tb_path *path);
1105 bool tb_path_port_on_path(const struct tb_path *path,
1106 const struct tb_port *port);
1107
1108 /**
1109 * tb_path_for_each_hop() - Iterate over each hop on path
1110 * @path: Path whose hops to iterate
1111 * @hop: Hop used as iterator
1112 *
1113 * Iterates over each hop on path.
1114 */
1115 #define tb_path_for_each_hop(path, hop) \
1116 for ((hop) = &(path)->hops[0]; \
1117 (hop) <= &(path)->hops[(path)->path_length - 1]; (hop)++)
1118
1119 int tb_drom_read(struct tb_switch *sw);
1120 int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
1121
1122 int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
1123 int tb_lc_reset_port(struct tb_port *port);
1124 int tb_lc_configure_port(struct tb_port *port);
1125 void tb_lc_unconfigure_port(struct tb_port *port);
1126 int tb_lc_configure_xdomain(struct tb_port *port);
1127 void tb_lc_unconfigure_xdomain(struct tb_port *port);
1128 int tb_lc_start_lane_initialization(struct tb_port *port);
1129 bool tb_lc_is_clx_supported(struct tb_port *port);
1130 bool tb_lc_is_usb_plugged(struct tb_port *port);
1131 bool tb_lc_is_xhci_connected(struct tb_port *port);
1132 int tb_lc_xhci_connect(struct tb_port *port);
1133 void tb_lc_xhci_disconnect(struct tb_port *port);
1134 int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags);
1135 int tb_lc_set_sleep(struct tb_switch *sw);
1136 bool tb_lc_lane_bonding_possible(struct tb_switch *sw);
1137 bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in);
1138 int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in);
1139 int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in);
1140 int tb_lc_force_power(struct tb_switch *sw);
1141
tb_route_length(u64 route)1142 static inline int tb_route_length(u64 route)
1143 {
1144 return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT;
1145 }
1146
1147 /**
1148 * tb_downstream_route() - get route to downstream switch
1149 *
1150 * Port must not be the upstream port (otherwise a loop is created).
1151 *
1152 * Return: Returns a route to the switch behind @port.
1153 */
tb_downstream_route(struct tb_port * port)1154 static inline u64 tb_downstream_route(struct tb_port *port)
1155 {
1156 return tb_route(port->sw)
1157 | ((u64) port->port << (port->sw->config.depth * 8));
1158 }
1159
1160 bool tb_is_xdomain_enabled(void);
1161 bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
1162 const void *buf, size_t size);
1163 struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
1164 u64 route, const uuid_t *local_uuid,
1165 const uuid_t *remote_uuid);
1166 void tb_xdomain_add(struct tb_xdomain *xd);
1167 void tb_xdomain_remove(struct tb_xdomain *xd);
1168 struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
1169 u8 depth);
1170
tb_xdomain_parent(struct tb_xdomain * xd)1171 static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
1172 {
1173 return tb_to_switch(xd->dev.parent);
1174 }
1175
1176 /**
1177 * tb_xdomain_downstream_port() - Return downstream facing port of parent router
1178 * @xd: Xdomain pointer
1179 *
1180 * Returns the downstream port the XDomain is connected to.
1181 */
tb_xdomain_downstream_port(struct tb_xdomain * xd)1182 static inline struct tb_port *tb_xdomain_downstream_port(struct tb_xdomain *xd)
1183 {
1184 return tb_port_at(xd->route, tb_xdomain_parent(xd));
1185 }
1186
1187 int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
1188 size_t size);
1189 int tb_retimer_scan(struct tb_port *port, bool add);
1190 void tb_retimer_remove_all(struct tb_port *port);
1191
tb_is_retimer(const struct device * dev)1192 static inline bool tb_is_retimer(const struct device *dev)
1193 {
1194 return dev->type == &tb_retimer_type;
1195 }
1196
tb_to_retimer(struct device * dev)1197 static inline struct tb_retimer *tb_to_retimer(struct device *dev)
1198 {
1199 if (tb_is_retimer(dev))
1200 return container_of(dev, struct tb_retimer, dev);
1201 return NULL;
1202 }
1203
1204 /**
1205 * usb4_switch_version() - Returns USB4 version of the router
1206 * @sw: Router to check
1207 *
1208 * Returns major version of USB4 router (%1 for v1, %2 for v2 and so
1209 * on). Can be called to pre-USB4 router too and in that case returns %0.
1210 */
usb4_switch_version(const struct tb_switch * sw)1211 static inline unsigned int usb4_switch_version(const struct tb_switch *sw)
1212 {
1213 return FIELD_GET(USB4_VERSION_MAJOR_MASK, sw->config.thunderbolt_version);
1214 }
1215
1216 /**
1217 * tb_switch_is_usb4() - Is the switch USB4 compliant
1218 * @sw: Switch to check
1219 *
1220 * Returns true if the @sw is USB4 compliant router, false otherwise.
1221 */
tb_switch_is_usb4(const struct tb_switch * sw)1222 static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
1223 {
1224 return usb4_switch_version(sw) > 0;
1225 }
1226
1227 void usb4_switch_check_wakes(struct tb_switch *sw);
1228 int usb4_switch_setup(struct tb_switch *sw);
1229 int usb4_switch_configuration_valid(struct tb_switch *sw);
1230 int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
1231 int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
1232 size_t size);
1233 bool usb4_switch_lane_bonding_possible(struct tb_switch *sw);
1234 int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags);
1235 int usb4_switch_set_sleep(struct tb_switch *sw);
1236 int usb4_switch_nvm_sector_size(struct tb_switch *sw);
1237 int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
1238 size_t size);
1239 int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address);
1240 int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
1241 const void *buf, size_t size);
1242 int usb4_switch_nvm_authenticate(struct tb_switch *sw);
1243 int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status);
1244 int usb4_switch_credits_init(struct tb_switch *sw);
1245 bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
1246 int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
1247 int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
1248 struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
1249 const struct tb_port *port);
1250 struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
1251 const struct tb_port *port);
1252 int usb4_switch_add_ports(struct tb_switch *sw);
1253 void usb4_switch_remove_ports(struct tb_switch *sw);
1254
1255 int usb4_port_unlock(struct tb_port *port);
1256 int usb4_port_hotplug_enable(struct tb_port *port);
1257 int usb4_port_reset(struct tb_port *port);
1258 int usb4_port_configure(struct tb_port *port);
1259 void usb4_port_unconfigure(struct tb_port *port);
1260 int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd);
1261 void usb4_port_unconfigure_xdomain(struct tb_port *port);
1262 int usb4_port_router_offline(struct tb_port *port);
1263 int usb4_port_router_online(struct tb_port *port);
1264 int usb4_port_enumerate_retimers(struct tb_port *port);
1265 bool usb4_port_clx_supported(struct tb_port *port);
1266 int usb4_port_margining_caps(struct tb_port *port, u32 *caps);
1267 int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
1268 unsigned int ber_level, bool timing, bool right_high,
1269 u32 *results);
1270 int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
1271 bool right_high, u32 counter);
1272 int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors);
1273
1274 int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index);
1275 int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index);
1276 int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
1277 u8 size);
1278 int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
1279 const void *buf, u8 size);
1280 int usb4_port_retimer_is_last(struct tb_port *port, u8 index);
1281 int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index);
1282 int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
1283 unsigned int address);
1284 int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index,
1285 unsigned int address, const void *buf,
1286 size_t size);
1287 int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index);
1288 int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
1289 u32 *status);
1290 int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
1291 unsigned int address, void *buf, size_t size);
1292
1293 int usb4_usb3_port_max_link_rate(struct tb_port *port);
1294 int usb4_usb3_port_actual_link_rate(struct tb_port *port);
1295 int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
1296 int *downstream_bw);
1297 int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
1298 int *downstream_bw);
1299 int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
1300 int *downstream_bw);
1301
1302 int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id);
1303 bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port);
1304 bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port);
1305 int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port,
1306 bool supported);
1307 int usb4_dp_port_group_id(struct tb_port *port);
1308 int usb4_dp_port_set_group_id(struct tb_port *port, int group_id);
1309 int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes);
1310 int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes);
1311 int usb4_dp_port_granularity(struct tb_port *port);
1312 int usb4_dp_port_set_granularity(struct tb_port *port, int granularity);
1313 int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw);
1314 int usb4_dp_port_allocated_bandwidth(struct tb_port *port);
1315 int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw);
1316 int usb4_dp_port_requested_bandwidth(struct tb_port *port);
1317
1318 int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable);
1319
tb_is_usb4_port_device(const struct device * dev)1320 static inline bool tb_is_usb4_port_device(const struct device *dev)
1321 {
1322 return dev->type == &usb4_port_device_type;
1323 }
1324
tb_to_usb4_port_device(struct device * dev)1325 static inline struct usb4_port *tb_to_usb4_port_device(struct device *dev)
1326 {
1327 if (tb_is_usb4_port_device(dev))
1328 return container_of(dev, struct usb4_port, dev);
1329 return NULL;
1330 }
1331
1332 struct usb4_port *usb4_port_device_add(struct tb_port *port);
1333 void usb4_port_device_remove(struct usb4_port *usb4);
1334 int usb4_port_device_resume(struct usb4_port *usb4);
1335
usb4_port_device_is_offline(const struct usb4_port * usb4)1336 static inline bool usb4_port_device_is_offline(const struct usb4_port *usb4)
1337 {
1338 return usb4->offline;
1339 }
1340
1341 void tb_check_quirks(struct tb_switch *sw);
1342
1343 #ifdef CONFIG_ACPI
1344 bool tb_acpi_add_links(struct tb_nhi *nhi);
1345
1346 bool tb_acpi_is_native(void);
1347 bool tb_acpi_may_tunnel_usb3(void);
1348 bool tb_acpi_may_tunnel_dp(void);
1349 bool tb_acpi_may_tunnel_pcie(void);
1350 bool tb_acpi_is_xdomain_allowed(void);
1351
1352 int tb_acpi_init(void);
1353 void tb_acpi_exit(void);
1354 int tb_acpi_power_on_retimers(struct tb_port *port);
1355 int tb_acpi_power_off_retimers(struct tb_port *port);
1356 #else
tb_acpi_add_links(struct tb_nhi * nhi)1357 static inline bool tb_acpi_add_links(struct tb_nhi *nhi) { return false; }
1358
tb_acpi_is_native(void)1359 static inline bool tb_acpi_is_native(void) { return true; }
tb_acpi_may_tunnel_usb3(void)1360 static inline bool tb_acpi_may_tunnel_usb3(void) { return true; }
tb_acpi_may_tunnel_dp(void)1361 static inline bool tb_acpi_may_tunnel_dp(void) { return true; }
tb_acpi_may_tunnel_pcie(void)1362 static inline bool tb_acpi_may_tunnel_pcie(void) { return true; }
tb_acpi_is_xdomain_allowed(void)1363 static inline bool tb_acpi_is_xdomain_allowed(void) { return true; }
1364
tb_acpi_init(void)1365 static inline int tb_acpi_init(void) { return 0; }
tb_acpi_exit(void)1366 static inline void tb_acpi_exit(void) { }
tb_acpi_power_on_retimers(struct tb_port * port)1367 static inline int tb_acpi_power_on_retimers(struct tb_port *port) { return 0; }
tb_acpi_power_off_retimers(struct tb_port * port)1368 static inline int tb_acpi_power_off_retimers(struct tb_port *port) { return 0; }
1369 #endif
1370
1371 #ifdef CONFIG_DEBUG_FS
1372 void tb_debugfs_init(void);
1373 void tb_debugfs_exit(void);
1374 void tb_switch_debugfs_init(struct tb_switch *sw);
1375 void tb_switch_debugfs_remove(struct tb_switch *sw);
1376 void tb_xdomain_debugfs_init(struct tb_xdomain *xd);
1377 void tb_xdomain_debugfs_remove(struct tb_xdomain *xd);
1378 void tb_service_debugfs_init(struct tb_service *svc);
1379 void tb_service_debugfs_remove(struct tb_service *svc);
1380 #else
tb_debugfs_init(void)1381 static inline void tb_debugfs_init(void) { }
tb_debugfs_exit(void)1382 static inline void tb_debugfs_exit(void) { }
tb_switch_debugfs_init(struct tb_switch * sw)1383 static inline void tb_switch_debugfs_init(struct tb_switch *sw) { }
tb_switch_debugfs_remove(struct tb_switch * sw)1384 static inline void tb_switch_debugfs_remove(struct tb_switch *sw) { }
tb_xdomain_debugfs_init(struct tb_xdomain * xd)1385 static inline void tb_xdomain_debugfs_init(struct tb_xdomain *xd) { }
tb_xdomain_debugfs_remove(struct tb_xdomain * xd)1386 static inline void tb_xdomain_debugfs_remove(struct tb_xdomain *xd) { }
tb_service_debugfs_init(struct tb_service * svc)1387 static inline void tb_service_debugfs_init(struct tb_service *svc) { }
tb_service_debugfs_remove(struct tb_service * svc)1388 static inline void tb_service_debugfs_remove(struct tb_service *svc) { }
1389 #endif
1390
1391 #endif
1392