1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Thunderbolt/USB4 retimer support.
4 *
5 * Copyright (C) 2020, Intel Corporation
6 * Authors: Kranthi Kuntala <kranthi.kuntala@intel.com>
7 * Mika Westerberg <mika.westerberg@linux.intel.com>
8 */
9
10 #include <linux/delay.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/sched/signal.h>
13
14 #include "sb_regs.h"
15 #include "tb.h"
16
17 #define TB_MAX_RETIMER_INDEX 6
18
tb_retimer_nvm_read(void * priv,unsigned int offset,void * val,size_t bytes)19 static int tb_retimer_nvm_read(void *priv, unsigned int offset, void *val,
20 size_t bytes)
21 {
22 struct tb_nvm *nvm = priv;
23 struct tb_retimer *rt = tb_to_retimer(nvm->dev);
24 int ret;
25
26 pm_runtime_get_sync(&rt->dev);
27
28 if (!mutex_trylock(&rt->tb->lock)) {
29 ret = restart_syscall();
30 goto out;
31 }
32
33 ret = usb4_port_retimer_nvm_read(rt->port, rt->index, offset, val, bytes);
34 mutex_unlock(&rt->tb->lock);
35
36 out:
37 pm_runtime_mark_last_busy(&rt->dev);
38 pm_runtime_put_autosuspend(&rt->dev);
39
40 return ret;
41 }
42
tb_retimer_nvm_write(void * priv,unsigned int offset,void * val,size_t bytes)43 static int tb_retimer_nvm_write(void *priv, unsigned int offset, void *val,
44 size_t bytes)
45 {
46 struct tb_nvm *nvm = priv;
47 struct tb_retimer *rt = tb_to_retimer(nvm->dev);
48 int ret = 0;
49
50 if (!mutex_trylock(&rt->tb->lock))
51 return restart_syscall();
52
53 ret = tb_nvm_write_buf(nvm, offset, val, bytes);
54 mutex_unlock(&rt->tb->lock);
55
56 return ret;
57 }
58
tb_retimer_nvm_add(struct tb_retimer * rt)59 static int tb_retimer_nvm_add(struct tb_retimer *rt)
60 {
61 struct tb_nvm *nvm;
62 u32 val, nvm_size;
63 int ret;
64
65 nvm = tb_nvm_alloc(&rt->dev);
66 if (IS_ERR(nvm))
67 return PTR_ERR(nvm);
68
69 ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_VERSION, &val,
70 sizeof(val));
71 if (ret)
72 goto err_nvm;
73
74 nvm->major = val >> 16;
75 nvm->minor = val >> 8;
76
77 ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_FLASH_SIZE,
78 &val, sizeof(val));
79 if (ret)
80 goto err_nvm;
81
82 nvm_size = (SZ_1M << (val & 7)) / 8;
83 nvm_size = (nvm_size - SZ_16K) / 2;
84
85 ret = tb_nvm_add_active(nvm, nvm_size, tb_retimer_nvm_read);
86 if (ret)
87 goto err_nvm;
88
89 ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE, tb_retimer_nvm_write);
90 if (ret)
91 goto err_nvm;
92
93 rt->nvm = nvm;
94 return 0;
95
96 err_nvm:
97 tb_nvm_free(nvm);
98 return ret;
99 }
100
tb_retimer_nvm_validate_and_write(struct tb_retimer * rt)101 static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
102 {
103 unsigned int image_size, hdr_size;
104 const u8 *buf = rt->nvm->buf;
105 u16 ds_size, device;
106 int ret;
107
108 image_size = rt->nvm->buf_data_size;
109 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
110 return -EINVAL;
111
112 /*
113 * FARB pointer must point inside the image and must at least
114 * contain parts of the digital section we will be reading here.
115 */
116 hdr_size = (*(u32 *)buf) & 0xffffff;
117 if (hdr_size + NVM_DEVID + 2 >= image_size)
118 return -EINVAL;
119
120 /* Digital section start should be aligned to 4k page */
121 if (!IS_ALIGNED(hdr_size, SZ_4K))
122 return -EINVAL;
123
124 /*
125 * Read digital section size and check that it also fits inside
126 * the image.
127 */
128 ds_size = *(u16 *)(buf + hdr_size);
129 if (ds_size >= image_size)
130 return -EINVAL;
131
132 /*
133 * Make sure the device ID in the image matches the retimer
134 * hardware.
135 */
136 device = *(u16 *)(buf + hdr_size + NVM_DEVID);
137 if (device != rt->device)
138 return -EINVAL;
139
140 /* Skip headers in the image */
141 buf += hdr_size;
142 image_size -= hdr_size;
143
144 ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
145 image_size);
146 if (!ret)
147 rt->nvm->flushed = true;
148
149 return ret;
150 }
151
tb_retimer_nvm_authenticate(struct tb_retimer * rt,bool auth_only)152 static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only)
153 {
154 u32 status;
155 int ret;
156
157 if (auth_only) {
158 ret = usb4_port_retimer_nvm_set_offset(rt->port, rt->index, 0);
159 if (ret)
160 return ret;
161 }
162
163 ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index);
164 if (ret)
165 return ret;
166
167 usleep_range(100, 150);
168
169 /*
170 * Check the status now if we still can access the retimer. It
171 * is expected that the below fails.
172 */
173 ret = usb4_port_retimer_nvm_authenticate_status(rt->port, rt->index,
174 &status);
175 if (!ret) {
176 rt->auth_status = status;
177 return status ? -EINVAL : 0;
178 }
179
180 return 0;
181 }
182
device_show(struct device * dev,struct device_attribute * attr,char * buf)183 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
184 char *buf)
185 {
186 struct tb_retimer *rt = tb_to_retimer(dev);
187
188 return sprintf(buf, "%#x\n", rt->device);
189 }
190 static DEVICE_ATTR_RO(device);
191
nvm_authenticate_show(struct device * dev,struct device_attribute * attr,char * buf)192 static ssize_t nvm_authenticate_show(struct device *dev,
193 struct device_attribute *attr, char *buf)
194 {
195 struct tb_retimer *rt = tb_to_retimer(dev);
196 int ret;
197
198 if (!mutex_trylock(&rt->tb->lock))
199 return restart_syscall();
200
201 if (!rt->nvm)
202 ret = -EAGAIN;
203 else
204 ret = sprintf(buf, "%#x\n", rt->auth_status);
205
206 mutex_unlock(&rt->tb->lock);
207
208 return ret;
209 }
210
tb_retimer_nvm_authenticate_status(struct tb_port * port,u32 * status)211 static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status)
212 {
213 int i;
214
215 tb_port_dbg(port, "reading NVM authentication status of retimers\n");
216
217 /*
218 * Before doing anything else, read the authentication status.
219 * If the retimer has it set, store it for the new retimer
220 * device instance.
221 */
222 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
223 usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
224 }
225
tb_retimer_set_inbound_sbtx(struct tb_port * port)226 static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
227 {
228 int i;
229
230 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
231 usb4_port_retimer_set_inbound_sbtx(port, i);
232 }
233
tb_retimer_unset_inbound_sbtx(struct tb_port * port)234 static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
235 {
236 int i;
237
238 for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--)
239 usb4_port_retimer_unset_inbound_sbtx(port, i);
240 }
241
nvm_authenticate_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)242 static ssize_t nvm_authenticate_store(struct device *dev,
243 struct device_attribute *attr, const char *buf, size_t count)
244 {
245 struct tb_retimer *rt = tb_to_retimer(dev);
246 int val, ret;
247
248 pm_runtime_get_sync(&rt->dev);
249
250 if (!mutex_trylock(&rt->tb->lock)) {
251 ret = restart_syscall();
252 goto exit_rpm;
253 }
254
255 if (!rt->nvm) {
256 ret = -EAGAIN;
257 goto exit_unlock;
258 }
259
260 ret = kstrtoint(buf, 10, &val);
261 if (ret)
262 goto exit_unlock;
263
264 /* Always clear status */
265 rt->auth_status = 0;
266
267 if (val) {
268 tb_retimer_set_inbound_sbtx(rt->port);
269 if (val == AUTHENTICATE_ONLY) {
270 ret = tb_retimer_nvm_authenticate(rt, true);
271 } else {
272 if (!rt->nvm->flushed) {
273 if (!rt->nvm->buf) {
274 ret = -EINVAL;
275 goto exit_unlock;
276 }
277
278 ret = tb_retimer_nvm_validate_and_write(rt);
279 if (ret || val == WRITE_ONLY)
280 goto exit_unlock;
281 }
282 if (val == WRITE_AND_AUTHENTICATE)
283 ret = tb_retimer_nvm_authenticate(rt, false);
284 }
285 }
286
287 exit_unlock:
288 tb_retimer_unset_inbound_sbtx(rt->port);
289 mutex_unlock(&rt->tb->lock);
290 exit_rpm:
291 pm_runtime_mark_last_busy(&rt->dev);
292 pm_runtime_put_autosuspend(&rt->dev);
293
294 if (ret)
295 return ret;
296 return count;
297 }
298 static DEVICE_ATTR_RW(nvm_authenticate);
299
nvm_version_show(struct device * dev,struct device_attribute * attr,char * buf)300 static ssize_t nvm_version_show(struct device *dev,
301 struct device_attribute *attr, char *buf)
302 {
303 struct tb_retimer *rt = tb_to_retimer(dev);
304 int ret;
305
306 if (!mutex_trylock(&rt->tb->lock))
307 return restart_syscall();
308
309 if (!rt->nvm)
310 ret = -EAGAIN;
311 else
312 ret = sprintf(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
313
314 mutex_unlock(&rt->tb->lock);
315 return ret;
316 }
317 static DEVICE_ATTR_RO(nvm_version);
318
vendor_show(struct device * dev,struct device_attribute * attr,char * buf)319 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
320 char *buf)
321 {
322 struct tb_retimer *rt = tb_to_retimer(dev);
323
324 return sprintf(buf, "%#x\n", rt->vendor);
325 }
326 static DEVICE_ATTR_RO(vendor);
327
328 static struct attribute *retimer_attrs[] = {
329 &dev_attr_device.attr,
330 &dev_attr_nvm_authenticate.attr,
331 &dev_attr_nvm_version.attr,
332 &dev_attr_vendor.attr,
333 NULL
334 };
335
336 static const struct attribute_group retimer_group = {
337 .attrs = retimer_attrs,
338 };
339
340 static const struct attribute_group *retimer_groups[] = {
341 &retimer_group,
342 NULL
343 };
344
tb_retimer_release(struct device * dev)345 static void tb_retimer_release(struct device *dev)
346 {
347 struct tb_retimer *rt = tb_to_retimer(dev);
348
349 kfree(rt);
350 }
351
352 struct device_type tb_retimer_type = {
353 .name = "thunderbolt_retimer",
354 .groups = retimer_groups,
355 .release = tb_retimer_release,
356 };
357
tb_retimer_add(struct tb_port * port,u8 index,u32 auth_status)358 static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
359 {
360 struct usb4_port *usb4;
361 struct tb_retimer *rt;
362 u32 vendor, device;
363 int ret;
364
365 usb4 = port->usb4;
366 if (!usb4)
367 return -EINVAL;
368
369 ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor,
370 sizeof(vendor));
371 if (ret) {
372 if (ret != -ENODEV)
373 tb_port_warn(port, "failed read retimer VendorId: %d\n", ret);
374 return ret;
375 }
376
377 ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device,
378 sizeof(device));
379 if (ret) {
380 if (ret != -ENODEV)
381 tb_port_warn(port, "failed read retimer ProductId: %d\n", ret);
382 return ret;
383 }
384
385 if (vendor != PCI_VENDOR_ID_INTEL && vendor != 0x8087) {
386 tb_port_info(port, "retimer NVM format of vendor %#x is not supported\n",
387 vendor);
388 return -EOPNOTSUPP;
389 }
390
391 /*
392 * Check that it supports NVM operations. If not then don't add
393 * the device at all.
394 */
395 ret = usb4_port_retimer_nvm_sector_size(port, index);
396 if (ret < 0)
397 return ret;
398
399 rt = kzalloc(sizeof(*rt), GFP_KERNEL);
400 if (!rt)
401 return -ENOMEM;
402
403 rt->index = index;
404 rt->vendor = vendor;
405 rt->device = device;
406 rt->auth_status = auth_status;
407 rt->port = port;
408 rt->tb = port->sw->tb;
409
410 rt->dev.parent = &usb4->dev;
411 rt->dev.bus = &tb_bus_type;
412 rt->dev.type = &tb_retimer_type;
413 dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev),
414 port->port, index);
415
416 ret = device_register(&rt->dev);
417 if (ret) {
418 dev_err(&rt->dev, "failed to register retimer: %d\n", ret);
419 put_device(&rt->dev);
420 return ret;
421 }
422
423 ret = tb_retimer_nvm_add(rt);
424 if (ret) {
425 dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
426 device_unregister(&rt->dev);
427 return ret;
428 }
429
430 dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n",
431 rt->vendor, rt->device);
432
433 pm_runtime_no_callbacks(&rt->dev);
434 pm_runtime_set_active(&rt->dev);
435 pm_runtime_enable(&rt->dev);
436 pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY);
437 pm_runtime_mark_last_busy(&rt->dev);
438 pm_runtime_use_autosuspend(&rt->dev);
439
440 return 0;
441 }
442
tb_retimer_remove(struct tb_retimer * rt)443 static void tb_retimer_remove(struct tb_retimer *rt)
444 {
445 dev_info(&rt->dev, "retimer disconnected\n");
446 tb_nvm_free(rt->nvm);
447 device_unregister(&rt->dev);
448 }
449
450 struct tb_retimer_lookup {
451 const struct tb_port *port;
452 u8 index;
453 };
454
retimer_match(struct device * dev,void * data)455 static int retimer_match(struct device *dev, void *data)
456 {
457 const struct tb_retimer_lookup *lookup = data;
458 struct tb_retimer *rt = tb_to_retimer(dev);
459
460 return rt && rt->port == lookup->port && rt->index == lookup->index;
461 }
462
tb_port_find_retimer(struct tb_port * port,u8 index)463 static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
464 {
465 struct tb_retimer_lookup lookup = { .port = port, .index = index };
466 struct device *dev;
467
468 dev = device_find_child(&port->usb4->dev, &lookup, retimer_match);
469 if (dev)
470 return tb_to_retimer(dev);
471
472 return NULL;
473 }
474
475 /**
476 * tb_retimer_scan() - Scan for on-board retimers under port
477 * @port: USB4 port to scan
478 * @add: If true also registers found retimers
479 *
480 * Brings the sideband into a state where retimers can be accessed.
481 * Then Tries to enumerate on-board retimers connected to @port. Found
482 * retimers are registered as children of @port if @add is set. Does
483 * not scan for cable retimers for now.
484 */
tb_retimer_scan(struct tb_port * port,bool add)485 int tb_retimer_scan(struct tb_port *port, bool add)
486 {
487 u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
488 int ret, i, last_idx = 0;
489
490 /*
491 * Send broadcast RT to make sure retimer indices facing this
492 * port are set.
493 */
494 ret = usb4_port_enumerate_retimers(port);
495 if (ret)
496 return ret;
497
498 /*
499 * Immediately after sending enumerate retimers read the
500 * authentication status of each retimer.
501 */
502 tb_retimer_nvm_authenticate_status(port, status);
503
504 /*
505 * Enable sideband channel for each retimer. We can do this
506 * regardless whether there is device connected or not.
507 */
508 tb_retimer_set_inbound_sbtx(port);
509
510 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
511 /*
512 * Last retimer is true only for the last on-board
513 * retimer (the one connected directly to the Type-C
514 * port).
515 */
516 ret = usb4_port_retimer_is_last(port, i);
517 if (ret > 0)
518 last_idx = i;
519 else if (ret < 0)
520 break;
521 }
522
523 tb_retimer_unset_inbound_sbtx(port);
524
525 if (!last_idx)
526 return 0;
527
528 /* Add on-board retimers if they do not exist already */
529 for (i = 1; i <= last_idx; i++) {
530 struct tb_retimer *rt;
531
532 rt = tb_port_find_retimer(port, i);
533 if (rt) {
534 put_device(&rt->dev);
535 } else if (add) {
536 ret = tb_retimer_add(port, i, status[i]);
537 if (ret && ret != -EOPNOTSUPP)
538 break;
539 }
540 }
541
542 return 0;
543 }
544
remove_retimer(struct device * dev,void * data)545 static int remove_retimer(struct device *dev, void *data)
546 {
547 struct tb_retimer *rt = tb_to_retimer(dev);
548 struct tb_port *port = data;
549
550 if (rt && rt->port == port)
551 tb_retimer_remove(rt);
552 return 0;
553 }
554
555 /**
556 * tb_retimer_remove_all() - Remove all retimers under port
557 * @port: USB4 port whose retimers to remove
558 *
559 * This removes all previously added retimers under @port.
560 */
tb_retimer_remove_all(struct tb_port * port)561 void tb_retimer_remove_all(struct tb_port *port)
562 {
563 struct usb4_port *usb4;
564
565 usb4 = port->usb4;
566 if (usb4)
567 device_for_each_child_reverse(&usb4->dev, port,
568 remove_retimer);
569 }
570