• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt link controller support
4  *
5  * Copyright (C) 2019, Intel Corporation
6  * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7  */
8 
9 #include "tb.h"
10 
11 /**
12  * tb_lc_read_uuid() - Read switch UUID from link controller common register
13  * @sw: Switch whose UUID is read
14  * @uuid: UUID is placed here
15  */
tb_lc_read_uuid(struct tb_switch * sw,u32 * uuid)16 int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid)
17 {
18 	if (!sw->cap_lc)
19 		return -EINVAL;
20 	return tb_sw_read(sw, uuid, TB_CFG_SWITCH, sw->cap_lc + TB_LC_FUSE, 4);
21 }
22 
read_lc_desc(struct tb_switch * sw,u32 * desc)23 static int read_lc_desc(struct tb_switch *sw, u32 *desc)
24 {
25 	if (!sw->cap_lc)
26 		return -EINVAL;
27 	return tb_sw_read(sw, desc, TB_CFG_SWITCH, sw->cap_lc + TB_LC_DESC, 1);
28 }
29 
find_port_lc_cap(struct tb_port * port)30 static int find_port_lc_cap(struct tb_port *port)
31 {
32 	struct tb_switch *sw = port->sw;
33 	int start, phys, ret, size;
34 	u32 desc;
35 
36 	ret = read_lc_desc(sw, &desc);
37 	if (ret)
38 		return ret;
39 
40 	/* Start of port LC registers */
41 	start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
42 	size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
43 	phys = tb_phy_port_from_link(port->port);
44 
45 	return sw->cap_lc + start + phys * size;
46 }
47 
tb_lc_set_port_configured(struct tb_port * port,bool configured)48 static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
49 {
50 	bool upstream = tb_is_upstream_port(port);
51 	struct tb_switch *sw = port->sw;
52 	u32 ctrl, lane;
53 	int cap, ret;
54 
55 	if (sw->generation < 2)
56 		return 0;
57 
58 	cap = find_port_lc_cap(port);
59 	if (cap < 0)
60 		return cap;
61 
62 	ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
63 	if (ret)
64 		return ret;
65 
66 	/* Resolve correct lane */
67 	if (port->port % 2)
68 		lane = TB_LC_SX_CTRL_L1C;
69 	else
70 		lane = TB_LC_SX_CTRL_L2C;
71 
72 	if (configured) {
73 		ctrl |= lane;
74 		if (upstream)
75 			ctrl |= TB_LC_SX_CTRL_UPSTREAM;
76 	} else {
77 		ctrl &= ~lane;
78 		if (upstream)
79 			ctrl &= ~TB_LC_SX_CTRL_UPSTREAM;
80 	}
81 
82 	return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
83 }
84 
85 /**
86  * tb_lc_configure_port() - Let LC know about configured port
87  * @port: Port that is set as configured
88  *
89  * Sets the port configured for power management purposes.
90  */
tb_lc_configure_port(struct tb_port * port)91 int tb_lc_configure_port(struct tb_port *port)
92 {
93 	return tb_lc_set_port_configured(port, true);
94 }
95 
96 /**
97  * tb_lc_unconfigure_port() - Let LC know about unconfigured port
98  * @port: Port that is set as configured
99  *
100  * Sets the port unconfigured for power management purposes.
101  */
tb_lc_unconfigure_port(struct tb_port * port)102 void tb_lc_unconfigure_port(struct tb_port *port)
103 {
104 	tb_lc_set_port_configured(port, false);
105 }
106 
tb_lc_set_xdomain_configured(struct tb_port * port,bool configure)107 static int tb_lc_set_xdomain_configured(struct tb_port *port, bool configure)
108 {
109 	struct tb_switch *sw = port->sw;
110 	u32 ctrl, lane;
111 	int cap, ret;
112 
113 	if (sw->generation < 2)
114 		return 0;
115 
116 	cap = find_port_lc_cap(port);
117 	if (cap < 0)
118 		return cap;
119 
120 	ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
121 	if (ret)
122 		return ret;
123 
124 	/* Resolve correct lane */
125 	if (port->port % 2)
126 		lane = TB_LC_SX_CTRL_L1D;
127 	else
128 		lane = TB_LC_SX_CTRL_L2D;
129 
130 	if (configure)
131 		ctrl |= lane;
132 	else
133 		ctrl &= ~lane;
134 
135 	return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
136 }
137 
138 /**
139  * tb_lc_configure_xdomain() - Inform LC that the link is XDomain
140  * @port: Switch downstream port connected to another host
141  *
142  * Sets the lane configured for XDomain accordingly so that the LC knows
143  * about this. Returns %0 in success and negative errno in failure.
144  */
tb_lc_configure_xdomain(struct tb_port * port)145 int tb_lc_configure_xdomain(struct tb_port *port)
146 {
147 	return tb_lc_set_xdomain_configured(port, true);
148 }
149 
150 /**
151  * tb_lc_unconfigure_xdomain() - Unconfigure XDomain from port
152  * @port: Switch downstream port that was connected to another host
153  *
154  * Unsets the lane XDomain configuration.
155  */
tb_lc_unconfigure_xdomain(struct tb_port * port)156 void tb_lc_unconfigure_xdomain(struct tb_port *port)
157 {
158 	tb_lc_set_xdomain_configured(port, false);
159 }
160 
tb_lc_set_wake_one(struct tb_switch * sw,unsigned int offset,unsigned int flags)161 static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
162 			      unsigned int flags)
163 {
164 	u32 ctrl;
165 	int ret;
166 
167 	/*
168 	 * Enable wake on PCIe and USB4 (wake coming from another
169 	 * router).
170 	 */
171 	ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
172 			 offset + TB_LC_SX_CTRL, 1);
173 	if (ret)
174 		return ret;
175 
176 	ctrl &= ~(TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD | TB_LC_SX_CTRL_WOP |
177 		  TB_LC_SX_CTRL_WOU4);
178 
179 	if (flags & TB_WAKE_ON_CONNECT)
180 		ctrl |= TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD;
181 	if (flags & TB_WAKE_ON_USB4)
182 		ctrl |= TB_LC_SX_CTRL_WOU4;
183 	if (flags & TB_WAKE_ON_PCIE)
184 		ctrl |= TB_LC_SX_CTRL_WOP;
185 
186 	return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, offset + TB_LC_SX_CTRL, 1);
187 }
188 
189 /**
190  * tb_lc_set_wake() - Enable/disable wake
191  * @sw: Switch whose wakes to configure
192  * @flags: Wakeup flags (%0 to disable)
193  *
194  * For each LC sets wake bits accordingly.
195  */
tb_lc_set_wake(struct tb_switch * sw,unsigned int flags)196 int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags)
197 {
198 	int start, size, nlc, ret, i;
199 	u32 desc;
200 
201 	if (sw->generation < 2)
202 		return 0;
203 
204 	if (!tb_route(sw))
205 		return 0;
206 
207 	ret = read_lc_desc(sw, &desc);
208 	if (ret)
209 		return ret;
210 
211 	/* Figure out number of link controllers */
212 	nlc = desc & TB_LC_DESC_NLC_MASK;
213 	start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
214 	size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
215 
216 	/* For each link controller set sleep bit */
217 	for (i = 0; i < nlc; i++) {
218 		unsigned int offset = sw->cap_lc + start + i * size;
219 
220 		ret = tb_lc_set_wake_one(sw, offset, flags);
221 		if (ret)
222 			return ret;
223 	}
224 
225 	return 0;
226 }
227 
228 /**
229  * tb_lc_set_sleep() - Inform LC that the switch is going to sleep
230  * @sw: Switch to set sleep
231  *
232  * Let the switch link controllers know that the switch is going to
233  * sleep.
234  */
tb_lc_set_sleep(struct tb_switch * sw)235 int tb_lc_set_sleep(struct tb_switch *sw)
236 {
237 	int start, size, nlc, ret, i;
238 	u32 desc;
239 
240 	if (sw->generation < 2)
241 		return 0;
242 
243 	ret = read_lc_desc(sw, &desc);
244 	if (ret)
245 		return ret;
246 
247 	/* Figure out number of link controllers */
248 	nlc = desc & TB_LC_DESC_NLC_MASK;
249 	start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
250 	size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
251 
252 	/* For each link controller set sleep bit */
253 	for (i = 0; i < nlc; i++) {
254 		unsigned int offset = sw->cap_lc + start + i * size;
255 		u32 ctrl;
256 
257 		ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
258 				 offset + TB_LC_SX_CTRL, 1);
259 		if (ret)
260 			return ret;
261 
262 		ctrl |= TB_LC_SX_CTRL_SLP;
263 		ret = tb_sw_write(sw, &ctrl, TB_CFG_SWITCH,
264 				  offset + TB_LC_SX_CTRL, 1);
265 		if (ret)
266 			return ret;
267 	}
268 
269 	return 0;
270 }
271 
272 /**
273  * tb_lc_lane_bonding_possible() - Is lane bonding possible towards switch
274  * @sw: Switch to check
275  *
276  * Checks whether conditions for lane bonding from parent to @sw are
277  * possible.
278  */
tb_lc_lane_bonding_possible(struct tb_switch * sw)279 bool tb_lc_lane_bonding_possible(struct tb_switch *sw)
280 {
281 	struct tb_port *up;
282 	int cap, ret;
283 	u32 val;
284 
285 	if (sw->generation < 2)
286 		return false;
287 
288 	up = tb_upstream_port(sw);
289 	cap = find_port_lc_cap(up);
290 	if (cap < 0)
291 		return false;
292 
293 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_PORT_ATTR, 1);
294 	if (ret)
295 		return false;
296 
297 	return !!(val & TB_LC_PORT_ATTR_BE);
298 }
299 
tb_lc_dp_sink_from_port(const struct tb_switch * sw,struct tb_port * in)300 static int tb_lc_dp_sink_from_port(const struct tb_switch *sw,
301 				   struct tb_port *in)
302 {
303 	struct tb_port *port;
304 
305 	/* The first DP IN port is sink 0 and second is sink 1 */
306 	tb_switch_for_each_port(sw, port) {
307 		if (tb_port_is_dpin(port))
308 			return in != port;
309 	}
310 
311 	return -EINVAL;
312 }
313 
tb_lc_dp_sink_available(struct tb_switch * sw,int sink)314 static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink)
315 {
316 	u32 val, alloc;
317 	int ret;
318 
319 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
320 			 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
321 	if (ret)
322 		return ret;
323 
324 	/*
325 	 * Sink is available for CM/SW to use if the allocation valie is
326 	 * either 0 or 1.
327 	 */
328 	if (!sink) {
329 		alloc = val & TB_LC_SNK_ALLOCATION_SNK0_MASK;
330 		if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK0_CM)
331 			return 0;
332 	} else {
333 		alloc = (val & TB_LC_SNK_ALLOCATION_SNK1_MASK) >>
334 			TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
335 		if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK1_CM)
336 			return 0;
337 	}
338 
339 	return -EBUSY;
340 }
341 
342 /**
343  * tb_lc_dp_sink_query() - Is DP sink available for DP IN port
344  * @sw: Switch whose DP sink is queried
345  * @in: DP IN port to check
346  *
347  * Queries through LC SNK_ALLOCATION registers whether DP sink is available
348  * for the given DP IN port or not.
349  */
tb_lc_dp_sink_query(struct tb_switch * sw,struct tb_port * in)350 bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in)
351 {
352 	int sink;
353 
354 	/*
355 	 * For older generations sink is always available as there is no
356 	 * allocation mechanism.
357 	 */
358 	if (sw->generation < 3)
359 		return true;
360 
361 	sink = tb_lc_dp_sink_from_port(sw, in);
362 	if (sink < 0)
363 		return false;
364 
365 	return !tb_lc_dp_sink_available(sw, sink);
366 }
367 
368 /**
369  * tb_lc_dp_sink_alloc() - Allocate DP sink
370  * @sw: Switch whose DP sink is allocated
371  * @in: DP IN port the DP sink is allocated for
372  *
373  * Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the
374  * resource is available and allocation is successful returns %0. In all
375  * other cases returs negative errno. In particular %-EBUSY is returned if
376  * the resource was not available.
377  */
tb_lc_dp_sink_alloc(struct tb_switch * sw,struct tb_port * in)378 int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in)
379 {
380 	int ret, sink;
381 	u32 val;
382 
383 	if (sw->generation < 3)
384 		return 0;
385 
386 	sink = tb_lc_dp_sink_from_port(sw, in);
387 	if (sink < 0)
388 		return sink;
389 
390 	ret = tb_lc_dp_sink_available(sw, sink);
391 	if (ret)
392 		return ret;
393 
394 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
395 			 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
396 	if (ret)
397 		return ret;
398 
399 	if (!sink) {
400 		val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
401 		val |= TB_LC_SNK_ALLOCATION_SNK0_CM;
402 	} else {
403 		val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
404 		val |= TB_LC_SNK_ALLOCATION_SNK1_CM <<
405 			TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
406 	}
407 
408 	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
409 			  sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
410 
411 	if (ret)
412 		return ret;
413 
414 	tb_port_dbg(in, "sink %d allocated\n", sink);
415 	return 0;
416 }
417 
418 /**
419  * tb_lc_dp_sink_dealloc() - De-allocate DP sink
420  * @sw: Switch whose DP sink is de-allocated
421  * @in: DP IN port whose DP sink is de-allocated
422  *
423  * De-allocate DP sink from @in using LC SNK_ALLOCATION registers.
424  */
tb_lc_dp_sink_dealloc(struct tb_switch * sw,struct tb_port * in)425 int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
426 {
427 	int ret, sink;
428 	u32 val;
429 
430 	if (sw->generation < 3)
431 		return 0;
432 
433 	sink = tb_lc_dp_sink_from_port(sw, in);
434 	if (sink < 0)
435 		return sink;
436 
437 	/* Needs to be owned by CM/SW */
438 	ret = tb_lc_dp_sink_available(sw, sink);
439 	if (ret)
440 		return ret;
441 
442 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
443 			 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
444 	if (ret)
445 		return ret;
446 
447 	if (!sink)
448 		val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
449 	else
450 		val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
451 
452 	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
453 			  sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
454 	if (ret)
455 		return ret;
456 
457 	tb_port_dbg(in, "sink %d de-allocated\n", sink);
458 	return 0;
459 }
460 
461 /**
462  * tb_lc_force_power() - Forces LC to be powered on
463  * @sw: Thunderbolt switch
464  *
465  * This is useful to let authentication cycle pass even without
466  * a Thunderbolt link present.
467  */
tb_lc_force_power(struct tb_switch * sw)468 int tb_lc_force_power(struct tb_switch *sw)
469 {
470 	u32 in = 0xffff;
471 
472 	return tb_sw_write(sw, &in, TB_CFG_SWITCH, TB_LC_POWER, 1);
473 }
474