• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KUnit tests
4  *
5  * Copyright (C) 2020, Intel Corporation
6  * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7  */
8 
9 #include <kunit/test.h>
10 #include <linux/idr.h>
11 
12 #include "tb.h"
13 #include "tunnel.h"
14 
__ida_init(struct kunit_resource * res,void * context)15 static int __ida_init(struct kunit_resource *res, void *context)
16 {
17 	struct ida *ida = context;
18 
19 	ida_init(ida);
20 	res->data = ida;
21 	return 0;
22 }
23 
__ida_destroy(struct kunit_resource * res)24 static void __ida_destroy(struct kunit_resource *res)
25 {
26 	struct ida *ida = res->data;
27 
28 	ida_destroy(ida);
29 }
30 
kunit_ida_init(struct kunit * test,struct ida * ida)31 static void kunit_ida_init(struct kunit *test, struct ida *ida)
32 {
33 	kunit_alloc_resource(test, __ida_init, __ida_destroy, GFP_KERNEL, ida);
34 }
35 
alloc_switch(struct kunit * test,u64 route,u8 upstream_port,u8 max_port_number)36 static struct tb_switch *alloc_switch(struct kunit *test, u64 route,
37 				      u8 upstream_port, u8 max_port_number)
38 {
39 	struct tb_switch *sw;
40 	size_t size;
41 	int i;
42 
43 	sw = kunit_kzalloc(test, sizeof(*sw), GFP_KERNEL);
44 	if (!sw)
45 		return NULL;
46 
47 	sw->config.upstream_port_number = upstream_port;
48 	sw->config.depth = tb_route_length(route);
49 	sw->config.route_hi = upper_32_bits(route);
50 	sw->config.route_lo = lower_32_bits(route);
51 	sw->config.enabled = 0;
52 	sw->config.max_port_number = max_port_number;
53 
54 	size = (sw->config.max_port_number + 1) * sizeof(*sw->ports);
55 	sw->ports = kunit_kzalloc(test, size, GFP_KERNEL);
56 	if (!sw->ports)
57 		return NULL;
58 
59 	for (i = 0; i <= sw->config.max_port_number; i++) {
60 		sw->ports[i].sw = sw;
61 		sw->ports[i].port = i;
62 		sw->ports[i].config.port_number = i;
63 		if (i) {
64 			kunit_ida_init(test, &sw->ports[i].in_hopids);
65 			kunit_ida_init(test, &sw->ports[i].out_hopids);
66 		}
67 	}
68 
69 	return sw;
70 }
71 
alloc_host(struct kunit * test)72 static struct tb_switch *alloc_host(struct kunit *test)
73 {
74 	struct tb_switch *sw;
75 
76 	sw = alloc_switch(test, 0, 7, 13);
77 	if (!sw)
78 		return NULL;
79 
80 	sw->config.vendor_id = 0x8086;
81 	sw->config.device_id = 0x9a1b;
82 
83 	sw->ports[0].config.type = TB_TYPE_PORT;
84 	sw->ports[0].config.max_in_hop_id = 7;
85 	sw->ports[0].config.max_out_hop_id = 7;
86 
87 	sw->ports[1].config.type = TB_TYPE_PORT;
88 	sw->ports[1].config.max_in_hop_id = 19;
89 	sw->ports[1].config.max_out_hop_id = 19;
90 	sw->ports[1].dual_link_port = &sw->ports[2];
91 
92 	sw->ports[2].config.type = TB_TYPE_PORT;
93 	sw->ports[2].config.max_in_hop_id = 19;
94 	sw->ports[2].config.max_out_hop_id = 19;
95 	sw->ports[2].dual_link_port = &sw->ports[1];
96 	sw->ports[2].link_nr = 1;
97 
98 	sw->ports[3].config.type = TB_TYPE_PORT;
99 	sw->ports[3].config.max_in_hop_id = 19;
100 	sw->ports[3].config.max_out_hop_id = 19;
101 	sw->ports[3].dual_link_port = &sw->ports[4];
102 
103 	sw->ports[4].config.type = TB_TYPE_PORT;
104 	sw->ports[4].config.max_in_hop_id = 19;
105 	sw->ports[4].config.max_out_hop_id = 19;
106 	sw->ports[4].dual_link_port = &sw->ports[3];
107 	sw->ports[4].link_nr = 1;
108 
109 	sw->ports[5].config.type = TB_TYPE_DP_HDMI_IN;
110 	sw->ports[5].config.max_in_hop_id = 9;
111 	sw->ports[5].config.max_out_hop_id = 9;
112 	sw->ports[5].cap_adap = -1;
113 
114 	sw->ports[6].config.type = TB_TYPE_DP_HDMI_IN;
115 	sw->ports[6].config.max_in_hop_id = 9;
116 	sw->ports[6].config.max_out_hop_id = 9;
117 	sw->ports[6].cap_adap = -1;
118 
119 	sw->ports[7].config.type = TB_TYPE_NHI;
120 	sw->ports[7].config.max_in_hop_id = 11;
121 	sw->ports[7].config.max_out_hop_id = 11;
122 
123 	sw->ports[8].config.type = TB_TYPE_PCIE_DOWN;
124 	sw->ports[8].config.max_in_hop_id = 8;
125 	sw->ports[8].config.max_out_hop_id = 8;
126 
127 	sw->ports[9].config.type = TB_TYPE_PCIE_DOWN;
128 	sw->ports[9].config.max_in_hop_id = 8;
129 	sw->ports[9].config.max_out_hop_id = 8;
130 
131 	sw->ports[10].disabled = true;
132 	sw->ports[11].disabled = true;
133 
134 	sw->ports[12].config.type = TB_TYPE_USB3_DOWN;
135 	sw->ports[12].config.max_in_hop_id = 8;
136 	sw->ports[12].config.max_out_hop_id = 8;
137 
138 	sw->ports[13].config.type = TB_TYPE_USB3_DOWN;
139 	sw->ports[13].config.max_in_hop_id = 8;
140 	sw->ports[13].config.max_out_hop_id = 8;
141 
142 	return sw;
143 }
144 
alloc_dev_default(struct kunit * test,struct tb_switch * parent,u64 route,bool bonded)145 static struct tb_switch *alloc_dev_default(struct kunit *test,
146 					   struct tb_switch *parent,
147 					   u64 route, bool bonded)
148 {
149 	struct tb_port *port, *upstream_port;
150 	struct tb_switch *sw;
151 
152 	sw = alloc_switch(test, route, 1, 19);
153 	if (!sw)
154 		return NULL;
155 
156 	sw->config.vendor_id = 0x8086;
157 	sw->config.device_id = 0x15ef;
158 
159 	sw->ports[0].config.type = TB_TYPE_PORT;
160 	sw->ports[0].config.max_in_hop_id = 8;
161 	sw->ports[0].config.max_out_hop_id = 8;
162 
163 	sw->ports[1].config.type = TB_TYPE_PORT;
164 	sw->ports[1].config.max_in_hop_id = 19;
165 	sw->ports[1].config.max_out_hop_id = 19;
166 	sw->ports[1].dual_link_port = &sw->ports[2];
167 
168 	sw->ports[2].config.type = TB_TYPE_PORT;
169 	sw->ports[2].config.max_in_hop_id = 19;
170 	sw->ports[2].config.max_out_hop_id = 19;
171 	sw->ports[2].dual_link_port = &sw->ports[1];
172 	sw->ports[2].link_nr = 1;
173 
174 	sw->ports[3].config.type = TB_TYPE_PORT;
175 	sw->ports[3].config.max_in_hop_id = 19;
176 	sw->ports[3].config.max_out_hop_id = 19;
177 	sw->ports[3].dual_link_port = &sw->ports[4];
178 
179 	sw->ports[4].config.type = TB_TYPE_PORT;
180 	sw->ports[4].config.max_in_hop_id = 19;
181 	sw->ports[4].config.max_out_hop_id = 19;
182 	sw->ports[4].dual_link_port = &sw->ports[3];
183 	sw->ports[4].link_nr = 1;
184 
185 	sw->ports[5].config.type = TB_TYPE_PORT;
186 	sw->ports[5].config.max_in_hop_id = 19;
187 	sw->ports[5].config.max_out_hop_id = 19;
188 	sw->ports[5].dual_link_port = &sw->ports[6];
189 
190 	sw->ports[6].config.type = TB_TYPE_PORT;
191 	sw->ports[6].config.max_in_hop_id = 19;
192 	sw->ports[6].config.max_out_hop_id = 19;
193 	sw->ports[6].dual_link_port = &sw->ports[5];
194 	sw->ports[6].link_nr = 1;
195 
196 	sw->ports[7].config.type = TB_TYPE_PORT;
197 	sw->ports[7].config.max_in_hop_id = 19;
198 	sw->ports[7].config.max_out_hop_id = 19;
199 	sw->ports[7].dual_link_port = &sw->ports[8];
200 
201 	sw->ports[8].config.type = TB_TYPE_PORT;
202 	sw->ports[8].config.max_in_hop_id = 19;
203 	sw->ports[8].config.max_out_hop_id = 19;
204 	sw->ports[8].dual_link_port = &sw->ports[7];
205 	sw->ports[8].link_nr = 1;
206 
207 	sw->ports[9].config.type = TB_TYPE_PCIE_UP;
208 	sw->ports[9].config.max_in_hop_id = 8;
209 	sw->ports[9].config.max_out_hop_id = 8;
210 
211 	sw->ports[10].config.type = TB_TYPE_PCIE_DOWN;
212 	sw->ports[10].config.max_in_hop_id = 8;
213 	sw->ports[10].config.max_out_hop_id = 8;
214 
215 	sw->ports[11].config.type = TB_TYPE_PCIE_DOWN;
216 	sw->ports[11].config.max_in_hop_id = 8;
217 	sw->ports[11].config.max_out_hop_id = 8;
218 
219 	sw->ports[12].config.type = TB_TYPE_PCIE_DOWN;
220 	sw->ports[12].config.max_in_hop_id = 8;
221 	sw->ports[12].config.max_out_hop_id = 8;
222 
223 	sw->ports[13].config.type = TB_TYPE_DP_HDMI_OUT;
224 	sw->ports[13].config.max_in_hop_id = 9;
225 	sw->ports[13].config.max_out_hop_id = 9;
226 	sw->ports[13].cap_adap = -1;
227 
228 	sw->ports[14].config.type = TB_TYPE_DP_HDMI_OUT;
229 	sw->ports[14].config.max_in_hop_id = 9;
230 	sw->ports[14].config.max_out_hop_id = 9;
231 	sw->ports[14].cap_adap = -1;
232 
233 	sw->ports[15].disabled = true;
234 
235 	sw->ports[16].config.type = TB_TYPE_USB3_UP;
236 	sw->ports[16].config.max_in_hop_id = 8;
237 	sw->ports[16].config.max_out_hop_id = 8;
238 
239 	sw->ports[17].config.type = TB_TYPE_USB3_DOWN;
240 	sw->ports[17].config.max_in_hop_id = 8;
241 	sw->ports[17].config.max_out_hop_id = 8;
242 
243 	sw->ports[18].config.type = TB_TYPE_USB3_DOWN;
244 	sw->ports[18].config.max_in_hop_id = 8;
245 	sw->ports[18].config.max_out_hop_id = 8;
246 
247 	sw->ports[19].config.type = TB_TYPE_USB3_DOWN;
248 	sw->ports[19].config.max_in_hop_id = 8;
249 	sw->ports[19].config.max_out_hop_id = 8;
250 
251 	if (!parent)
252 		return sw;
253 
254 	/* Link them */
255 	upstream_port = tb_upstream_port(sw);
256 	port = tb_port_at(route, parent);
257 	port->remote = upstream_port;
258 	upstream_port->remote = port;
259 	if (port->dual_link_port && upstream_port->dual_link_port) {
260 		port->dual_link_port->remote = upstream_port->dual_link_port;
261 		upstream_port->dual_link_port->remote = port->dual_link_port;
262 
263 		if (bonded) {
264 			/* Bonding is used */
265 			port->bonded = true;
266 			port->dual_link_port->bonded = true;
267 			upstream_port->bonded = true;
268 			upstream_port->dual_link_port->bonded = true;
269 		}
270 	}
271 
272 	return sw;
273 }
274 
alloc_dev_with_dpin(struct kunit * test,struct tb_switch * parent,u64 route,bool bonded)275 static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
276 					     struct tb_switch *parent,
277 					     u64 route, bool bonded)
278 {
279 	struct tb_switch *sw;
280 
281 	sw = alloc_dev_default(test, parent, route, bonded);
282 	if (!sw)
283 		return NULL;
284 
285 	sw->ports[13].config.type = TB_TYPE_DP_HDMI_IN;
286 	sw->ports[13].config.max_in_hop_id = 9;
287 	sw->ports[13].config.max_out_hop_id = 9;
288 
289 	sw->ports[14].config.type = TB_TYPE_DP_HDMI_IN;
290 	sw->ports[14].config.max_in_hop_id = 9;
291 	sw->ports[14].config.max_out_hop_id = 9;
292 
293 	return sw;
294 }
295 
tb_test_path_basic(struct kunit * test)296 static void tb_test_path_basic(struct kunit *test)
297 {
298 	struct tb_port *src_port, *dst_port, *p;
299 	struct tb_switch *host;
300 
301 	host = alloc_host(test);
302 
303 	src_port = &host->ports[5];
304 	dst_port = src_port;
305 
306 	p = tb_next_port_on_path(src_port, dst_port, NULL);
307 	KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
308 
309 	p = tb_next_port_on_path(src_port, dst_port, p);
310 	KUNIT_EXPECT_TRUE(test, !p);
311 }
312 
tb_test_path_not_connected_walk(struct kunit * test)313 static void tb_test_path_not_connected_walk(struct kunit *test)
314 {
315 	struct tb_port *src_port, *dst_port, *p;
316 	struct tb_switch *host, *dev;
317 
318 	host = alloc_host(test);
319 	/* No connection between host and dev */
320 	dev = alloc_dev_default(test, NULL, 3, true);
321 
322 	src_port = &host->ports[12];
323 	dst_port = &dev->ports[16];
324 
325 	p = tb_next_port_on_path(src_port, dst_port, NULL);
326 	KUNIT_EXPECT_PTR_EQ(test, p, src_port);
327 
328 	p = tb_next_port_on_path(src_port, dst_port, p);
329 	KUNIT_EXPECT_PTR_EQ(test, p, &host->ports[3]);
330 
331 	p = tb_next_port_on_path(src_port, dst_port, p);
332 	KUNIT_EXPECT_TRUE(test, !p);
333 
334 	/* Other direction */
335 
336 	p = tb_next_port_on_path(dst_port, src_port, NULL);
337 	KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
338 
339 	p = tb_next_port_on_path(dst_port, src_port, p);
340 	KUNIT_EXPECT_PTR_EQ(test, p, &dev->ports[1]);
341 
342 	p = tb_next_port_on_path(dst_port, src_port, p);
343 	KUNIT_EXPECT_TRUE(test, !p);
344 }
345 
346 struct port_expectation {
347 	u64 route;
348 	u8 port;
349 	enum tb_port_type type;
350 };
351 
tb_test_path_single_hop_walk(struct kunit * test)352 static void tb_test_path_single_hop_walk(struct kunit *test)
353 {
354 	/*
355 	 * Walks from Host PCIe downstream port to Device #1 PCIe
356 	 * upstream port.
357 	 *
358 	 *   [Host]
359 	 *   1 |
360 	 *   1 |
361 	 *  [Device]
362 	 */
363 	static const struct port_expectation test_data[] = {
364 		{ .route = 0x0, .port = 8, .type = TB_TYPE_PCIE_DOWN },
365 		{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
366 		{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
367 		{ .route = 0x1, .port = 9, .type = TB_TYPE_PCIE_UP },
368 	};
369 	struct tb_port *src_port, *dst_port, *p;
370 	struct tb_switch *host, *dev;
371 	int i;
372 
373 	host = alloc_host(test);
374 	dev = alloc_dev_default(test, host, 1, true);
375 
376 	src_port = &host->ports[8];
377 	dst_port = &dev->ports[9];
378 
379 	/* Walk both directions */
380 
381 	i = 0;
382 	tb_for_each_port_on_path(src_port, dst_port, p) {
383 		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
384 		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
385 		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
386 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
387 				test_data[i].type);
388 		i++;
389 	}
390 
391 	KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
392 
393 	i = ARRAY_SIZE(test_data) - 1;
394 	tb_for_each_port_on_path(dst_port, src_port, p) {
395 		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
396 		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
397 		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
398 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
399 				test_data[i].type);
400 		i--;
401 	}
402 
403 	KUNIT_EXPECT_EQ(test, i, -1);
404 }
405 
tb_test_path_daisy_chain_walk(struct kunit * test)406 static void tb_test_path_daisy_chain_walk(struct kunit *test)
407 {
408 	/*
409 	 * Walks from Host DP IN to Device #2 DP OUT.
410 	 *
411 	 *           [Host]
412 	 *            1 |
413 	 *            1 |
414 	 *         [Device #1]
415 	 *       3 /
416 	 *      1 /
417 	 * [Device #2]
418 	 */
419 	static const struct port_expectation test_data[] = {
420 		{ .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
421 		{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
422 		{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
423 		{ .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
424 		{ .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
425 		{ .route = 0x301, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
426 	};
427 	struct tb_port *src_port, *dst_port, *p;
428 	struct tb_switch *host, *dev1, *dev2;
429 	int i;
430 
431 	host = alloc_host(test);
432 	dev1 = alloc_dev_default(test, host, 0x1, true);
433 	dev2 = alloc_dev_default(test, dev1, 0x301, true);
434 
435 	src_port = &host->ports[5];
436 	dst_port = &dev2->ports[13];
437 
438 	/* Walk both directions */
439 
440 	i = 0;
441 	tb_for_each_port_on_path(src_port, dst_port, p) {
442 		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
443 		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
444 		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
445 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
446 				test_data[i].type);
447 		i++;
448 	}
449 
450 	KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
451 
452 	i = ARRAY_SIZE(test_data) - 1;
453 	tb_for_each_port_on_path(dst_port, src_port, p) {
454 		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
455 		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
456 		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
457 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
458 				test_data[i].type);
459 		i--;
460 	}
461 
462 	KUNIT_EXPECT_EQ(test, i, -1);
463 }
464 
tb_test_path_simple_tree_walk(struct kunit * test)465 static void tb_test_path_simple_tree_walk(struct kunit *test)
466 {
467 	/*
468 	 * Walks from Host DP IN to Device #3 DP OUT.
469 	 *
470 	 *           [Host]
471 	 *            1 |
472 	 *            1 |
473 	 *         [Device #1]
474 	 *       3 /   | 5  \ 7
475 	 *      1 /    |     \ 1
476 	 * [Device #2] |    [Device #4]
477 	 *             | 1
478 	 *         [Device #3]
479 	 */
480 	static const struct port_expectation test_data[] = {
481 		{ .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
482 		{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
483 		{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
484 		{ .route = 0x1, .port = 5, .type = TB_TYPE_PORT },
485 		{ .route = 0x501, .port = 1, .type = TB_TYPE_PORT },
486 		{ .route = 0x501, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
487 	};
488 	struct tb_port *src_port, *dst_port, *p;
489 	struct tb_switch *host, *dev1, *dev3;
490 	int i;
491 
492 	host = alloc_host(test);
493 	dev1 = alloc_dev_default(test, host, 0x1, true);
494 	alloc_dev_default(test, dev1, 0x301, true);
495 	dev3 = alloc_dev_default(test, dev1, 0x501, true);
496 	alloc_dev_default(test, dev1, 0x701, true);
497 
498 	src_port = &host->ports[5];
499 	dst_port = &dev3->ports[13];
500 
501 	/* Walk both directions */
502 
503 	i = 0;
504 	tb_for_each_port_on_path(src_port, dst_port, p) {
505 		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
506 		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
507 		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
508 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
509 				test_data[i].type);
510 		i++;
511 	}
512 
513 	KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
514 
515 	i = ARRAY_SIZE(test_data) - 1;
516 	tb_for_each_port_on_path(dst_port, src_port, p) {
517 		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
518 		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
519 		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
520 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
521 				test_data[i].type);
522 		i--;
523 	}
524 
525 	KUNIT_EXPECT_EQ(test, i, -1);
526 }
527 
tb_test_path_complex_tree_walk(struct kunit * test)528 static void tb_test_path_complex_tree_walk(struct kunit *test)
529 {
530 	/*
531 	 * Walks from Device #3 DP IN to Device #9 DP OUT.
532 	 *
533 	 *           [Host]
534 	 *            1 |
535 	 *            1 |
536 	 *         [Device #1]
537 	 *       3 /   | 5  \ 7
538 	 *      1 /    |     \ 1
539 	 * [Device #2] |    [Device #5]
540 	 *    5 |      | 1         \ 7
541 	 *    1 |  [Device #4]      \ 1
542 	 * [Device #3]             [Device #6]
543 	 *                       3 /
544 	 *                      1 /
545 	 *                    [Device #7]
546 	 *                  3 /      | 5
547 	 *                 1 /       |
548 	 *               [Device #8] | 1
549 	 *                       [Device #9]
550 	 */
551 	static const struct port_expectation test_data[] = {
552 		{ .route = 0x50301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
553 		{ .route = 0x50301, .port = 1, .type = TB_TYPE_PORT },
554 		{ .route = 0x301, .port = 5, .type = TB_TYPE_PORT },
555 		{ .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
556 		{ .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
557 		{ .route = 0x1, .port = 7, .type = TB_TYPE_PORT },
558 		{ .route = 0x701, .port = 1, .type = TB_TYPE_PORT },
559 		{ .route = 0x701, .port = 7, .type = TB_TYPE_PORT },
560 		{ .route = 0x70701, .port = 1, .type = TB_TYPE_PORT },
561 		{ .route = 0x70701, .port = 3, .type = TB_TYPE_PORT },
562 		{ .route = 0x3070701, .port = 1, .type = TB_TYPE_PORT },
563 		{ .route = 0x3070701, .port = 5, .type = TB_TYPE_PORT },
564 		{ .route = 0x503070701, .port = 1, .type = TB_TYPE_PORT },
565 		{ .route = 0x503070701, .port = 14, .type = TB_TYPE_DP_HDMI_OUT },
566 	};
567 	struct tb_switch *host, *dev1, *dev2, *dev3, *dev5, *dev6, *dev7, *dev9;
568 	struct tb_port *src_port, *dst_port, *p;
569 	int i;
570 
571 	host = alloc_host(test);
572 	dev1 = alloc_dev_default(test, host, 0x1, true);
573 	dev2 = alloc_dev_default(test, dev1, 0x301, true);
574 	dev3 = alloc_dev_with_dpin(test, dev2, 0x50301, true);
575 	alloc_dev_default(test, dev1, 0x501, true);
576 	dev5 = alloc_dev_default(test, dev1, 0x701, true);
577 	dev6 = alloc_dev_default(test, dev5, 0x70701, true);
578 	dev7 = alloc_dev_default(test, dev6, 0x3070701, true);
579 	alloc_dev_default(test, dev7, 0x303070701, true);
580 	dev9 = alloc_dev_default(test, dev7, 0x503070701, true);
581 
582 	src_port = &dev3->ports[13];
583 	dst_port = &dev9->ports[14];
584 
585 	/* Walk both directions */
586 
587 	i = 0;
588 	tb_for_each_port_on_path(src_port, dst_port, p) {
589 		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
590 		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
591 		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
592 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
593 				test_data[i].type);
594 		i++;
595 	}
596 
597 	KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
598 
599 	i = ARRAY_SIZE(test_data) - 1;
600 	tb_for_each_port_on_path(dst_port, src_port, p) {
601 		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
602 		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
603 		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
604 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
605 				test_data[i].type);
606 		i--;
607 	}
608 
609 	KUNIT_EXPECT_EQ(test, i, -1);
610 }
611 
tb_test_path_max_length_walk(struct kunit * test)612 static void tb_test_path_max_length_walk(struct kunit *test)
613 {
614 	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
615 	struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
616 	struct tb_port *src_port, *dst_port, *p;
617 	int i;
618 
619 	/*
620 	 * Walks from Device #6 DP IN to Device #12 DP OUT.
621 	 *
622 	 *          [Host]
623 	 *         1 /  \ 3
624 	 *        1 /    \ 1
625 	 * [Device #1]   [Device #7]
626 	 *     3 |           | 3
627 	 *     1 |           | 1
628 	 * [Device #2]   [Device #8]
629 	 *     3 |           | 3
630 	 *     1 |           | 1
631 	 * [Device #3]   [Device #9]
632 	 *     3 |           | 3
633 	 *     1 |           | 1
634 	 * [Device #4]   [Device #10]
635 	 *     3 |           | 3
636 	 *     1 |           | 1
637 	 * [Device #5]   [Device #11]
638 	 *     3 |           | 3
639 	 *     1 |           | 1
640 	 * [Device #6]   [Device #12]
641 	 */
642 	static const struct port_expectation test_data[] = {
643 		{ .route = 0x30303030301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
644 		{ .route = 0x30303030301, .port = 1, .type = TB_TYPE_PORT },
645 		{ .route = 0x303030301, .port = 3, .type = TB_TYPE_PORT },
646 		{ .route = 0x303030301, .port = 1, .type = TB_TYPE_PORT },
647 		{ .route = 0x3030301, .port = 3, .type = TB_TYPE_PORT },
648 		{ .route = 0x3030301, .port = 1, .type = TB_TYPE_PORT },
649 		{ .route = 0x30301, .port = 3, .type = TB_TYPE_PORT },
650 		{ .route = 0x30301, .port = 1, .type = TB_TYPE_PORT },
651 		{ .route = 0x301, .port = 3, .type = TB_TYPE_PORT },
652 		{ .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
653 		{ .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
654 		{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
655 		{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
656 		{ .route = 0x0, .port = 3, .type = TB_TYPE_PORT },
657 		{ .route = 0x3, .port = 1, .type = TB_TYPE_PORT },
658 		{ .route = 0x3, .port = 3, .type = TB_TYPE_PORT },
659 		{ .route = 0x303, .port = 1, .type = TB_TYPE_PORT },
660 		{ .route = 0x303, .port = 3, .type = TB_TYPE_PORT },
661 		{ .route = 0x30303, .port = 1, .type = TB_TYPE_PORT },
662 		{ .route = 0x30303, .port = 3, .type = TB_TYPE_PORT },
663 		{ .route = 0x3030303, .port = 1, .type = TB_TYPE_PORT },
664 		{ .route = 0x3030303, .port = 3, .type = TB_TYPE_PORT },
665 		{ .route = 0x303030303, .port = 1, .type = TB_TYPE_PORT },
666 		{ .route = 0x303030303, .port = 3, .type = TB_TYPE_PORT },
667 		{ .route = 0x30303030303, .port = 1, .type = TB_TYPE_PORT },
668 		{ .route = 0x30303030303, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
669 	};
670 
671 	host = alloc_host(test);
672 	dev1 = alloc_dev_default(test, host, 0x1, true);
673 	dev2 = alloc_dev_default(test, dev1, 0x301, true);
674 	dev3 = alloc_dev_default(test, dev2, 0x30301, true);
675 	dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
676 	dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
677 	dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
678 	dev7 = alloc_dev_default(test, host, 0x3, true);
679 	dev8 = alloc_dev_default(test, dev7, 0x303, true);
680 	dev9 = alloc_dev_default(test, dev8, 0x30303, true);
681 	dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
682 	dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
683 	dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
684 
685 	src_port = &dev6->ports[13];
686 	dst_port = &dev12->ports[13];
687 
688 	/* Walk both directions */
689 
690 	i = 0;
691 	tb_for_each_port_on_path(src_port, dst_port, p) {
692 		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
693 		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
694 		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
695 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
696 				test_data[i].type);
697 		i++;
698 	}
699 
700 	KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
701 
702 	i = ARRAY_SIZE(test_data) - 1;
703 	tb_for_each_port_on_path(dst_port, src_port, p) {
704 		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
705 		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
706 		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
707 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
708 				test_data[i].type);
709 		i--;
710 	}
711 
712 	KUNIT_EXPECT_EQ(test, i, -1);
713 }
714 
tb_test_path_not_connected(struct kunit * test)715 static void tb_test_path_not_connected(struct kunit *test)
716 {
717 	struct tb_switch *host, *dev1, *dev2;
718 	struct tb_port *down, *up;
719 	struct tb_path *path;
720 
721 	host = alloc_host(test);
722 	dev1 = alloc_dev_default(test, host, 0x3, false);
723 	/* Not connected to anything */
724 	dev2 = alloc_dev_default(test, NULL, 0x303, false);
725 
726 	down = &dev1->ports[10];
727 	up = &dev2->ports[9];
728 
729 	path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
730 	KUNIT_ASSERT_TRUE(test, path == NULL);
731 	path = tb_path_alloc(NULL, down, 8, up, 8, 1, "PCIe Down");
732 	KUNIT_ASSERT_TRUE(test, path == NULL);
733 }
734 
735 struct hop_expectation {
736 	u64 route;
737 	u8 in_port;
738 	enum tb_port_type in_type;
739 	u8 out_port;
740 	enum tb_port_type out_type;
741 };
742 
tb_test_path_not_bonded_lane0(struct kunit * test)743 static void tb_test_path_not_bonded_lane0(struct kunit *test)
744 {
745 	/*
746 	 * PCIe path from host to device using lane 0.
747 	 *
748 	 *   [Host]
749 	 *   3 |: 4
750 	 *   1 |: 2
751 	 *  [Device]
752 	 */
753 	static const struct hop_expectation test_data[] = {
754 		{
755 			.route = 0x0,
756 			.in_port = 9,
757 			.in_type = TB_TYPE_PCIE_DOWN,
758 			.out_port = 3,
759 			.out_type = TB_TYPE_PORT,
760 		},
761 		{
762 			.route = 0x3,
763 			.in_port = 1,
764 			.in_type = TB_TYPE_PORT,
765 			.out_port = 9,
766 			.out_type = TB_TYPE_PCIE_UP,
767 		},
768 	};
769 	struct tb_switch *host, *dev;
770 	struct tb_port *down, *up;
771 	struct tb_path *path;
772 	int i;
773 
774 	host = alloc_host(test);
775 	dev = alloc_dev_default(test, host, 0x3, false);
776 
777 	down = &host->ports[9];
778 	up = &dev->ports[9];
779 
780 	path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
781 	KUNIT_ASSERT_TRUE(test, path != NULL);
782 	KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
783 	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
784 		const struct tb_port *in_port, *out_port;
785 
786 		in_port = path->hops[i].in_port;
787 		out_port = path->hops[i].out_port;
788 
789 		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
790 		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
791 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
792 				test_data[i].in_type);
793 		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
794 		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
795 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
796 				test_data[i].out_type);
797 	}
798 	tb_path_free(path);
799 }
800 
tb_test_path_not_bonded_lane1(struct kunit * test)801 static void tb_test_path_not_bonded_lane1(struct kunit *test)
802 {
803 	/*
804 	 * DP Video path from host to device using lane 1. Paths like
805 	 * these are only used with Thunderbolt 1 devices where lane
806 	 * bonding is not possible. USB4 specifically does not allow
807 	 * paths like this (you either use lane 0 where lane 1 is
808 	 * disabled or both lanes are bonded).
809 	 *
810 	 *   [Host]
811 	 *   1 :| 2
812 	 *   1 :| 2
813 	 *  [Device]
814 	 */
815 	static const struct hop_expectation test_data[] = {
816 		{
817 			.route = 0x0,
818 			.in_port = 5,
819 			.in_type = TB_TYPE_DP_HDMI_IN,
820 			.out_port = 2,
821 			.out_type = TB_TYPE_PORT,
822 		},
823 		{
824 			.route = 0x1,
825 			.in_port = 2,
826 			.in_type = TB_TYPE_PORT,
827 			.out_port = 13,
828 			.out_type = TB_TYPE_DP_HDMI_OUT,
829 		},
830 	};
831 	struct tb_switch *host, *dev;
832 	struct tb_port *in, *out;
833 	struct tb_path *path;
834 	int i;
835 
836 	host = alloc_host(test);
837 	dev = alloc_dev_default(test, host, 0x1, false);
838 
839 	in = &host->ports[5];
840 	out = &dev->ports[13];
841 
842 	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
843 	KUNIT_ASSERT_TRUE(test, path != NULL);
844 	KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
845 	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
846 		const struct tb_port *in_port, *out_port;
847 
848 		in_port = path->hops[i].in_port;
849 		out_port = path->hops[i].out_port;
850 
851 		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
852 		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
853 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
854 				test_data[i].in_type);
855 		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
856 		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
857 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
858 				test_data[i].out_type);
859 	}
860 	tb_path_free(path);
861 }
862 
tb_test_path_not_bonded_lane1_chain(struct kunit * test)863 static void tb_test_path_not_bonded_lane1_chain(struct kunit *test)
864 {
865 	/*
866 	 * DP Video path from host to device 3 using lane 1.
867 	 *
868 	 *    [Host]
869 	 *    1 :| 2
870 	 *    1 :| 2
871 	 *  [Device #1]
872 	 *    7 :| 8
873 	 *    1 :| 2
874 	 *  [Device #2]
875 	 *    5 :| 6
876 	 *    1 :| 2
877 	 *  [Device #3]
878 	 */
879 	static const struct hop_expectation test_data[] = {
880 		{
881 			.route = 0x0,
882 			.in_port = 5,
883 			.in_type = TB_TYPE_DP_HDMI_IN,
884 			.out_port = 2,
885 			.out_type = TB_TYPE_PORT,
886 		},
887 		{
888 			.route = 0x1,
889 			.in_port = 2,
890 			.in_type = TB_TYPE_PORT,
891 			.out_port = 8,
892 			.out_type = TB_TYPE_PORT,
893 		},
894 		{
895 			.route = 0x701,
896 			.in_port = 2,
897 			.in_type = TB_TYPE_PORT,
898 			.out_port = 6,
899 			.out_type = TB_TYPE_PORT,
900 		},
901 		{
902 			.route = 0x50701,
903 			.in_port = 2,
904 			.in_type = TB_TYPE_PORT,
905 			.out_port = 13,
906 			.out_type = TB_TYPE_DP_HDMI_OUT,
907 		},
908 	};
909 	struct tb_switch *host, *dev1, *dev2, *dev3;
910 	struct tb_port *in, *out;
911 	struct tb_path *path;
912 	int i;
913 
914 	host = alloc_host(test);
915 	dev1 = alloc_dev_default(test, host, 0x1, false);
916 	dev2 = alloc_dev_default(test, dev1, 0x701, false);
917 	dev3 = alloc_dev_default(test, dev2, 0x50701, false);
918 
919 	in = &host->ports[5];
920 	out = &dev3->ports[13];
921 
922 	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
923 	KUNIT_ASSERT_TRUE(test, path != NULL);
924 	KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
925 	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
926 		const struct tb_port *in_port, *out_port;
927 
928 		in_port = path->hops[i].in_port;
929 		out_port = path->hops[i].out_port;
930 
931 		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
932 		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
933 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
934 				test_data[i].in_type);
935 		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
936 		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
937 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
938 				test_data[i].out_type);
939 	}
940 	tb_path_free(path);
941 }
942 
tb_test_path_not_bonded_lane1_chain_reverse(struct kunit * test)943 static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test)
944 {
945 	/*
946 	 * DP Video path from device 3 to host using lane 1.
947 	 *
948 	 *    [Host]
949 	 *    1 :| 2
950 	 *    1 :| 2
951 	 *  [Device #1]
952 	 *    7 :| 8
953 	 *    1 :| 2
954 	 *  [Device #2]
955 	 *    5 :| 6
956 	 *    1 :| 2
957 	 *  [Device #3]
958 	 */
959 	static const struct hop_expectation test_data[] = {
960 		{
961 			.route = 0x50701,
962 			.in_port = 13,
963 			.in_type = TB_TYPE_DP_HDMI_IN,
964 			.out_port = 2,
965 			.out_type = TB_TYPE_PORT,
966 		},
967 		{
968 			.route = 0x701,
969 			.in_port = 6,
970 			.in_type = TB_TYPE_PORT,
971 			.out_port = 2,
972 			.out_type = TB_TYPE_PORT,
973 		},
974 		{
975 			.route = 0x1,
976 			.in_port = 8,
977 			.in_type = TB_TYPE_PORT,
978 			.out_port = 2,
979 			.out_type = TB_TYPE_PORT,
980 		},
981 		{
982 			.route = 0x0,
983 			.in_port = 2,
984 			.in_type = TB_TYPE_PORT,
985 			.out_port = 5,
986 			.out_type = TB_TYPE_DP_HDMI_IN,
987 		},
988 	};
989 	struct tb_switch *host, *dev1, *dev2, *dev3;
990 	struct tb_port *in, *out;
991 	struct tb_path *path;
992 	int i;
993 
994 	host = alloc_host(test);
995 	dev1 = alloc_dev_default(test, host, 0x1, false);
996 	dev2 = alloc_dev_default(test, dev1, 0x701, false);
997 	dev3 = alloc_dev_with_dpin(test, dev2, 0x50701, false);
998 
999 	in = &dev3->ports[13];
1000 	out = &host->ports[5];
1001 
1002 	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1003 	KUNIT_ASSERT_TRUE(test, path != NULL);
1004 	KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
1005 	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1006 		const struct tb_port *in_port, *out_port;
1007 
1008 		in_port = path->hops[i].in_port;
1009 		out_port = path->hops[i].out_port;
1010 
1011 		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1012 		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1013 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1014 				test_data[i].in_type);
1015 		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1016 		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1017 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1018 				test_data[i].out_type);
1019 	}
1020 	tb_path_free(path);
1021 }
1022 
tb_test_path_mixed_chain(struct kunit * test)1023 static void tb_test_path_mixed_chain(struct kunit *test)
1024 {
1025 	/*
1026 	 * DP Video path from host to device 4 where first and last link
1027 	 * is bonded.
1028 	 *
1029 	 *    [Host]
1030 	 *    1 |
1031 	 *    1 |
1032 	 *  [Device #1]
1033 	 *    7 :| 8
1034 	 *    1 :| 2
1035 	 *  [Device #2]
1036 	 *    5 :| 6
1037 	 *    1 :| 2
1038 	 *  [Device #3]
1039 	 *    3 |
1040 	 *    1 |
1041 	 *  [Device #4]
1042 	 */
1043 	static const struct hop_expectation test_data[] = {
1044 		{
1045 			.route = 0x0,
1046 			.in_port = 5,
1047 			.in_type = TB_TYPE_DP_HDMI_IN,
1048 			.out_port = 1,
1049 			.out_type = TB_TYPE_PORT,
1050 		},
1051 		{
1052 			.route = 0x1,
1053 			.in_port = 1,
1054 			.in_type = TB_TYPE_PORT,
1055 			.out_port = 8,
1056 			.out_type = TB_TYPE_PORT,
1057 		},
1058 		{
1059 			.route = 0x701,
1060 			.in_port = 2,
1061 			.in_type = TB_TYPE_PORT,
1062 			.out_port = 6,
1063 			.out_type = TB_TYPE_PORT,
1064 		},
1065 		{
1066 			.route = 0x50701,
1067 			.in_port = 2,
1068 			.in_type = TB_TYPE_PORT,
1069 			.out_port = 3,
1070 			.out_type = TB_TYPE_PORT,
1071 		},
1072 		{
1073 			.route = 0x3050701,
1074 			.in_port = 1,
1075 			.in_type = TB_TYPE_PORT,
1076 			.out_port = 13,
1077 			.out_type = TB_TYPE_DP_HDMI_OUT,
1078 		},
1079 	};
1080 	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1081 	struct tb_port *in, *out;
1082 	struct tb_path *path;
1083 	int i;
1084 
1085 	host = alloc_host(test);
1086 	dev1 = alloc_dev_default(test, host, 0x1, true);
1087 	dev2 = alloc_dev_default(test, dev1, 0x701, false);
1088 	dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1089 	dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1090 
1091 	in = &host->ports[5];
1092 	out = &dev4->ports[13];
1093 
1094 	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1095 	KUNIT_ASSERT_TRUE(test, path != NULL);
1096 	KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
1097 	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1098 		const struct tb_port *in_port, *out_port;
1099 
1100 		in_port = path->hops[i].in_port;
1101 		out_port = path->hops[i].out_port;
1102 
1103 		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1104 		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1105 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1106 				test_data[i].in_type);
1107 		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1108 		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1109 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1110 				test_data[i].out_type);
1111 	}
1112 	tb_path_free(path);
1113 }
1114 
tb_test_path_mixed_chain_reverse(struct kunit * test)1115 static void tb_test_path_mixed_chain_reverse(struct kunit *test)
1116 {
1117 	/*
1118 	 * DP Video path from device 4 to host where first and last link
1119 	 * is bonded.
1120 	 *
1121 	 *    [Host]
1122 	 *    1 |
1123 	 *    1 |
1124 	 *  [Device #1]
1125 	 *    7 :| 8
1126 	 *    1 :| 2
1127 	 *  [Device #2]
1128 	 *    5 :| 6
1129 	 *    1 :| 2
1130 	 *  [Device #3]
1131 	 *    3 |
1132 	 *    1 |
1133 	 *  [Device #4]
1134 	 */
1135 	static const struct hop_expectation test_data[] = {
1136 		{
1137 			.route = 0x3050701,
1138 			.in_port = 13,
1139 			.in_type = TB_TYPE_DP_HDMI_OUT,
1140 			.out_port = 1,
1141 			.out_type = TB_TYPE_PORT,
1142 		},
1143 		{
1144 			.route = 0x50701,
1145 			.in_port = 3,
1146 			.in_type = TB_TYPE_PORT,
1147 			.out_port = 2,
1148 			.out_type = TB_TYPE_PORT,
1149 		},
1150 		{
1151 			.route = 0x701,
1152 			.in_port = 6,
1153 			.in_type = TB_TYPE_PORT,
1154 			.out_port = 2,
1155 			.out_type = TB_TYPE_PORT,
1156 		},
1157 		{
1158 			.route = 0x1,
1159 			.in_port = 8,
1160 			.in_type = TB_TYPE_PORT,
1161 			.out_port = 1,
1162 			.out_type = TB_TYPE_PORT,
1163 		},
1164 		{
1165 			.route = 0x0,
1166 			.in_port = 1,
1167 			.in_type = TB_TYPE_PORT,
1168 			.out_port = 5,
1169 			.out_type = TB_TYPE_DP_HDMI_IN,
1170 		},
1171 	};
1172 	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1173 	struct tb_port *in, *out;
1174 	struct tb_path *path;
1175 	int i;
1176 
1177 	host = alloc_host(test);
1178 	dev1 = alloc_dev_default(test, host, 0x1, true);
1179 	dev2 = alloc_dev_default(test, dev1, 0x701, false);
1180 	dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1181 	dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1182 
1183 	in = &dev4->ports[13];
1184 	out = &host->ports[5];
1185 
1186 	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1187 	KUNIT_ASSERT_TRUE(test, path != NULL);
1188 	KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
1189 	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1190 		const struct tb_port *in_port, *out_port;
1191 
1192 		in_port = path->hops[i].in_port;
1193 		out_port = path->hops[i].out_port;
1194 
1195 		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1196 		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1197 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1198 				test_data[i].in_type);
1199 		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1200 		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1201 		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1202 				test_data[i].out_type);
1203 	}
1204 	tb_path_free(path);
1205 }
1206 
tb_test_tunnel_pcie(struct kunit * test)1207 static void tb_test_tunnel_pcie(struct kunit *test)
1208 {
1209 	struct tb_switch *host, *dev1, *dev2;
1210 	struct tb_tunnel *tunnel1, *tunnel2;
1211 	struct tb_port *down, *up;
1212 
1213 	/*
1214 	 * Create PCIe tunnel between host and two devices.
1215 	 *
1216 	 *   [Host]
1217 	 *    1 |
1218 	 *    1 |
1219 	 *  [Device #1]
1220 	 *    5 |
1221 	 *    1 |
1222 	 *  [Device #2]
1223 	 */
1224 	host = alloc_host(test);
1225 	dev1 = alloc_dev_default(test, host, 0x1, true);
1226 	dev2 = alloc_dev_default(test, dev1, 0x501, true);
1227 
1228 	down = &host->ports[8];
1229 	up = &dev1->ports[9];
1230 	tunnel1 = tb_tunnel_alloc_pci(NULL, up, down);
1231 	KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
1232 	KUNIT_EXPECT_EQ(test, tunnel1->type, (enum tb_tunnel_type)TB_TUNNEL_PCI);
1233 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1234 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1235 	KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
1236 	KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1237 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1238 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1239 	KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1240 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1241 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1242 
1243 	down = &dev1->ports[10];
1244 	up = &dev2->ports[9];
1245 	tunnel2 = tb_tunnel_alloc_pci(NULL, up, down);
1246 	KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
1247 	KUNIT_EXPECT_EQ(test, tunnel2->type, (enum tb_tunnel_type)TB_TUNNEL_PCI);
1248 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1249 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1250 	KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
1251 	KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1252 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1253 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1254 	KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1255 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1256 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1257 
1258 	tb_tunnel_free(tunnel2);
1259 	tb_tunnel_free(tunnel1);
1260 }
1261 
tb_test_tunnel_dp(struct kunit * test)1262 static void tb_test_tunnel_dp(struct kunit *test)
1263 {
1264 	struct tb_switch *host, *dev;
1265 	struct tb_port *in, *out;
1266 	struct tb_tunnel *tunnel;
1267 
1268 	/*
1269 	 * Create DP tunnel between Host and Device
1270 	 *
1271 	 *   [Host]
1272 	 *   1 |
1273 	 *   1 |
1274 	 *  [Device]
1275 	 */
1276 	host = alloc_host(test);
1277 	dev = alloc_dev_default(test, host, 0x3, true);
1278 
1279 	in = &host->ports[5];
1280 	out = &dev->ports[13];
1281 
1282 	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1283 	KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1284 	KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
1285 	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1286 	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1287 	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
1288 	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2);
1289 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1290 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out);
1291 	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 2);
1292 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1293 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, out);
1294 	KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
1295 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1296 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
1297 	tb_tunnel_free(tunnel);
1298 }
1299 
tb_test_tunnel_dp_chain(struct kunit * test)1300 static void tb_test_tunnel_dp_chain(struct kunit *test)
1301 {
1302 	struct tb_switch *host, *dev1, *dev4;
1303 	struct tb_port *in, *out;
1304 	struct tb_tunnel *tunnel;
1305 
1306 	/*
1307 	 * Create DP tunnel from Host DP IN to Device #4 DP OUT.
1308 	 *
1309 	 *           [Host]
1310 	 *            1 |
1311 	 *            1 |
1312 	 *         [Device #1]
1313 	 *       3 /   | 5  \ 7
1314 	 *      1 /    |     \ 1
1315 	 * [Device #2] |    [Device #4]
1316 	 *             | 1
1317 	 *         [Device #3]
1318 	 */
1319 	host = alloc_host(test);
1320 	dev1 = alloc_dev_default(test, host, 0x1, true);
1321 	alloc_dev_default(test, dev1, 0x301, true);
1322 	alloc_dev_default(test, dev1, 0x501, true);
1323 	dev4 = alloc_dev_default(test, dev1, 0x701, true);
1324 
1325 	in = &host->ports[5];
1326 	out = &dev4->ports[14];
1327 
1328 	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1329 	KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1330 	KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
1331 	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1332 	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1333 	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
1334 	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1335 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1336 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out);
1337 	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1338 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1339 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, out);
1340 	KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
1341 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1342 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
1343 	tb_tunnel_free(tunnel);
1344 }
1345 
tb_test_tunnel_dp_tree(struct kunit * test)1346 static void tb_test_tunnel_dp_tree(struct kunit *test)
1347 {
1348 	struct tb_switch *host, *dev1, *dev2, *dev3, *dev5;
1349 	struct tb_port *in, *out;
1350 	struct tb_tunnel *tunnel;
1351 
1352 	/*
1353 	 * Create DP tunnel from Device #2 DP IN to Device #5 DP OUT.
1354 	 *
1355 	 *          [Host]
1356 	 *           3 |
1357 	 *           1 |
1358 	 *         [Device #1]
1359 	 *       3 /   | 5  \ 7
1360 	 *      1 /    |     \ 1
1361 	 * [Device #2] |    [Device #4]
1362 	 *             | 1
1363 	 *         [Device #3]
1364 	 *             | 5
1365 	 *             | 1
1366 	 *         [Device #5]
1367 	 */
1368 	host = alloc_host(test);
1369 	dev1 = alloc_dev_default(test, host, 0x3, true);
1370 	dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1371 	dev3 = alloc_dev_default(test, dev1, 0x503, true);
1372 	alloc_dev_default(test, dev1, 0x703, true);
1373 	dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1374 
1375 	in = &dev2->ports[13];
1376 	out = &dev5->ports[13];
1377 
1378 	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1379 	KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1380 	KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
1381 	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1382 	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1383 	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
1384 	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4);
1385 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1386 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out);
1387 	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 4);
1388 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1389 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[3].out_port, out);
1390 	KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
1391 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1392 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
1393 	tb_tunnel_free(tunnel);
1394 }
1395 
tb_test_tunnel_dp_max_length(struct kunit * test)1396 static void tb_test_tunnel_dp_max_length(struct kunit *test)
1397 {
1398 	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
1399 	struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
1400 	struct tb_port *in, *out;
1401 	struct tb_tunnel *tunnel;
1402 
1403 	/*
1404 	 * Creates DP tunnel from Device #6 to Device #12.
1405 	 *
1406 	 *          [Host]
1407 	 *         1 /  \ 3
1408 	 *        1 /    \ 1
1409 	 * [Device #1]   [Device #7]
1410 	 *     3 |           | 3
1411 	 *     1 |           | 1
1412 	 * [Device #2]   [Device #8]
1413 	 *     3 |           | 3
1414 	 *     1 |           | 1
1415 	 * [Device #3]   [Device #9]
1416 	 *     3 |           | 3
1417 	 *     1 |           | 1
1418 	 * [Device #4]   [Device #10]
1419 	 *     3 |           | 3
1420 	 *     1 |           | 1
1421 	 * [Device #5]   [Device #11]
1422 	 *     3 |           | 3
1423 	 *     1 |           | 1
1424 	 * [Device #6]   [Device #12]
1425 	 */
1426 	host = alloc_host(test);
1427 	dev1 = alloc_dev_default(test, host, 0x1, true);
1428 	dev2 = alloc_dev_default(test, dev1, 0x301, true);
1429 	dev3 = alloc_dev_default(test, dev2, 0x30301, true);
1430 	dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
1431 	dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
1432 	dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
1433 	dev7 = alloc_dev_default(test, host, 0x3, true);
1434 	dev8 = alloc_dev_default(test, dev7, 0x303, true);
1435 	dev9 = alloc_dev_default(test, dev8, 0x30303, true);
1436 	dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
1437 	dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
1438 	dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
1439 
1440 	in = &dev6->ports[13];
1441 	out = &dev12->ports[13];
1442 
1443 	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1444 	KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1445 	KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
1446 	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1447 	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1448 	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
1449 	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13);
1450 	/* First hop */
1451 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1452 	/* Middle */
1453 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].in_port,
1454 			    &host->ports[1]);
1455 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].out_port,
1456 			    &host->ports[3]);
1457 	/* Last */
1458 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[12].out_port, out);
1459 	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 13);
1460 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1461 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].in_port,
1462 			    &host->ports[1]);
1463 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].out_port,
1464 			    &host->ports[3]);
1465 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[12].out_port, out);
1466 	KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 13);
1467 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1468 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].in_port,
1469 			    &host->ports[3]);
1470 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
1471 			    &host->ports[1]);
1472 	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
1473 	tb_tunnel_free(tunnel);
1474 }
1475 
tb_test_tunnel_usb3(struct kunit * test)1476 static void tb_test_tunnel_usb3(struct kunit *test)
1477 {
1478 	struct tb_switch *host, *dev1, *dev2;
1479 	struct tb_tunnel *tunnel1, *tunnel2;
1480 	struct tb_port *down, *up;
1481 
1482 	/*
1483 	 * Create USB3 tunnel between host and two devices.
1484 	 *
1485 	 *   [Host]
1486 	 *    1 |
1487 	 *    1 |
1488 	 *  [Device #1]
1489 	 *          \ 7
1490 	 *           \ 1
1491 	 *         [Device #2]
1492 	 */
1493 	host = alloc_host(test);
1494 	dev1 = alloc_dev_default(test, host, 0x1, true);
1495 	dev2 = alloc_dev_default(test, dev1, 0x701, true);
1496 
1497 	down = &host->ports[12];
1498 	up = &dev1->ports[16];
1499 	tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1500 	KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
1501 	KUNIT_EXPECT_EQ(test, tunnel1->type, (enum tb_tunnel_type)TB_TUNNEL_USB3);
1502 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1503 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1504 	KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
1505 	KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1506 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1507 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1508 	KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1509 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1510 	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1511 
1512 	down = &dev1->ports[17];
1513 	up = &dev2->ports[16];
1514 	tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1515 	KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
1516 	KUNIT_EXPECT_EQ(test, tunnel2->type, (enum tb_tunnel_type)TB_TUNNEL_USB3);
1517 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1518 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1519 	KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
1520 	KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1521 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1522 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1523 	KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1524 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1525 	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1526 
1527 	tb_tunnel_free(tunnel2);
1528 	tb_tunnel_free(tunnel1);
1529 }
1530 
tb_test_tunnel_port_on_path(struct kunit * test)1531 static void tb_test_tunnel_port_on_path(struct kunit *test)
1532 {
1533 	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
1534 	struct tb_port *in, *out, *port;
1535 	struct tb_tunnel *dp_tunnel;
1536 
1537 	/*
1538 	 *          [Host]
1539 	 *           3 |
1540 	 *           1 |
1541 	 *         [Device #1]
1542 	 *       3 /   | 5  \ 7
1543 	 *      1 /    |     \ 1
1544 	 * [Device #2] |    [Device #4]
1545 	 *             | 1
1546 	 *         [Device #3]
1547 	 *             | 5
1548 	 *             | 1
1549 	 *         [Device #5]
1550 	 */
1551 	host = alloc_host(test);
1552 	dev1 = alloc_dev_default(test, host, 0x3, true);
1553 	dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1554 	dev3 = alloc_dev_default(test, dev1, 0x503, true);
1555 	dev4 = alloc_dev_default(test, dev1, 0x703, true);
1556 	dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1557 
1558 	in = &dev2->ports[13];
1559 	out = &dev5->ports[13];
1560 
1561 	dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1562 	KUNIT_ASSERT_TRUE(test, dp_tunnel != NULL);
1563 
1564 	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
1565 	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, out));
1566 
1567 	port = &host->ports[8];
1568 	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1569 
1570 	port = &host->ports[3];
1571 	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1572 
1573 	port = &dev1->ports[1];
1574 	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1575 
1576 	port = &dev1->ports[3];
1577 	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1578 
1579 	port = &dev1->ports[5];
1580 	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1581 
1582 	port = &dev1->ports[7];
1583 	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1584 
1585 	port = &dev3->ports[1];
1586 	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1587 
1588 	port = &dev5->ports[1];
1589 	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1590 
1591 	port = &dev4->ports[1];
1592 	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1593 
1594 	tb_tunnel_free(dp_tunnel);
1595 }
1596 
1597 static struct kunit_case tb_test_cases[] = {
1598 	KUNIT_CASE(tb_test_path_basic),
1599 	KUNIT_CASE(tb_test_path_not_connected_walk),
1600 	KUNIT_CASE(tb_test_path_single_hop_walk),
1601 	KUNIT_CASE(tb_test_path_daisy_chain_walk),
1602 	KUNIT_CASE(tb_test_path_simple_tree_walk),
1603 	KUNIT_CASE(tb_test_path_complex_tree_walk),
1604 	KUNIT_CASE(tb_test_path_max_length_walk),
1605 	KUNIT_CASE(tb_test_path_not_connected),
1606 	KUNIT_CASE(tb_test_path_not_bonded_lane0),
1607 	KUNIT_CASE(tb_test_path_not_bonded_lane1),
1608 	KUNIT_CASE(tb_test_path_not_bonded_lane1_chain),
1609 	KUNIT_CASE(tb_test_path_not_bonded_lane1_chain_reverse),
1610 	KUNIT_CASE(tb_test_path_mixed_chain),
1611 	KUNIT_CASE(tb_test_path_mixed_chain_reverse),
1612 	KUNIT_CASE(tb_test_tunnel_pcie),
1613 	KUNIT_CASE(tb_test_tunnel_dp),
1614 	KUNIT_CASE(tb_test_tunnel_dp_chain),
1615 	KUNIT_CASE(tb_test_tunnel_dp_tree),
1616 	KUNIT_CASE(tb_test_tunnel_dp_max_length),
1617 	KUNIT_CASE(tb_test_tunnel_port_on_path),
1618 	KUNIT_CASE(tb_test_tunnel_usb3),
1619 	{ }
1620 };
1621 
1622 static struct kunit_suite tb_test_suite = {
1623 	.name = "thunderbolt",
1624 	.test_cases = tb_test_cases,
1625 };
1626 
1627 static struct kunit_suite *tb_test_suites[] = { &tb_test_suite, NULL };
1628 
tb_test_init(void)1629 int tb_test_init(void)
1630 {
1631 	return __kunit_test_suites_init(tb_test_suites);
1632 }
1633 
tb_test_exit(void)1634 void tb_test_exit(void)
1635 {
1636 	return __kunit_test_suites_exit(tb_test_suites);
1637 }
1638