1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Thunderbolt driver - control channel and configuration commands
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
7 */
8
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/pci.h>
13 #include <linux/dmapool.h>
14 #include <linux/workqueue.h>
15
16 #include "ctl.h"
17
18
19 #define TB_CTL_RX_PKG_COUNT 10
20 #define TB_CTL_RETRIES 4
21
22 /**
23 * struct tb_cfg - thunderbolt control channel
24 */
25 struct tb_ctl {
26 struct tb_nhi *nhi;
27 struct tb_ring *tx;
28 struct tb_ring *rx;
29
30 struct dma_pool *frame_pool;
31 struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
32 struct mutex request_queue_lock;
33 struct list_head request_queue;
34 bool running;
35
36 event_cb callback;
37 void *callback_data;
38 };
39
40
41 #define tb_ctl_WARN(ctl, format, arg...) \
42 dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
43
44 #define tb_ctl_err(ctl, format, arg...) \
45 dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
46
47 #define tb_ctl_warn(ctl, format, arg...) \
48 dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
49
50 #define tb_ctl_info(ctl, format, arg...) \
51 dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
52
53 #define tb_ctl_dbg(ctl, format, arg...) \
54 dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
55
56 static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
57 /* Serializes access to request kref_get/put */
58 static DEFINE_MUTEX(tb_cfg_request_lock);
59
60 /**
61 * tb_cfg_request_alloc() - Allocates a new config request
62 *
63 * This is refcounted object so when you are done with this, call
64 * tb_cfg_request_put() to it.
65 */
tb_cfg_request_alloc(void)66 struct tb_cfg_request *tb_cfg_request_alloc(void)
67 {
68 struct tb_cfg_request *req;
69
70 req = kzalloc(sizeof(*req), GFP_KERNEL);
71 if (!req)
72 return NULL;
73
74 kref_init(&req->kref);
75
76 return req;
77 }
78
79 /**
80 * tb_cfg_request_get() - Increase refcount of a request
81 * @req: Request whose refcount is increased
82 */
tb_cfg_request_get(struct tb_cfg_request * req)83 void tb_cfg_request_get(struct tb_cfg_request *req)
84 {
85 mutex_lock(&tb_cfg_request_lock);
86 kref_get(&req->kref);
87 mutex_unlock(&tb_cfg_request_lock);
88 }
89
tb_cfg_request_destroy(struct kref * kref)90 static void tb_cfg_request_destroy(struct kref *kref)
91 {
92 struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
93
94 kfree(req);
95 }
96
97 /**
98 * tb_cfg_request_put() - Decrease refcount and possibly release the request
99 * @req: Request whose refcount is decreased
100 *
101 * Call this function when you are done with the request. When refcount
102 * goes to %0 the object is released.
103 */
tb_cfg_request_put(struct tb_cfg_request * req)104 void tb_cfg_request_put(struct tb_cfg_request *req)
105 {
106 mutex_lock(&tb_cfg_request_lock);
107 kref_put(&req->kref, tb_cfg_request_destroy);
108 mutex_unlock(&tb_cfg_request_lock);
109 }
110
tb_cfg_request_enqueue(struct tb_ctl * ctl,struct tb_cfg_request * req)111 static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
112 struct tb_cfg_request *req)
113 {
114 WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
115 WARN_ON(req->ctl);
116
117 mutex_lock(&ctl->request_queue_lock);
118 if (!ctl->running) {
119 mutex_unlock(&ctl->request_queue_lock);
120 return -ENOTCONN;
121 }
122 req->ctl = ctl;
123 list_add_tail(&req->list, &ctl->request_queue);
124 set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
125 mutex_unlock(&ctl->request_queue_lock);
126 return 0;
127 }
128
tb_cfg_request_dequeue(struct tb_cfg_request * req)129 static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
130 {
131 struct tb_ctl *ctl = req->ctl;
132
133 mutex_lock(&ctl->request_queue_lock);
134 list_del(&req->list);
135 clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
136 if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
137 wake_up(&tb_cfg_request_cancel_queue);
138 mutex_unlock(&ctl->request_queue_lock);
139 }
140
tb_cfg_request_is_active(struct tb_cfg_request * req)141 static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
142 {
143 return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
144 }
145
146 static struct tb_cfg_request *
tb_cfg_request_find(struct tb_ctl * ctl,struct ctl_pkg * pkg)147 tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
148 {
149 struct tb_cfg_request *req;
150 bool found = false;
151
152 mutex_lock(&pkg->ctl->request_queue_lock);
153 list_for_each_entry(req, &pkg->ctl->request_queue, list) {
154 tb_cfg_request_get(req);
155 if (req->match(req, pkg)) {
156 found = true;
157 break;
158 }
159 tb_cfg_request_put(req);
160 }
161 mutex_unlock(&pkg->ctl->request_queue_lock);
162
163 return found ? req : NULL;
164 }
165
166 /* utility functions */
167
168
check_header(const struct ctl_pkg * pkg,u32 len,enum tb_cfg_pkg_type type,u64 route)169 static int check_header(const struct ctl_pkg *pkg, u32 len,
170 enum tb_cfg_pkg_type type, u64 route)
171 {
172 struct tb_cfg_header *header = pkg->buffer;
173
174 /* check frame, TODO: frame flags */
175 if (WARN(len != pkg->frame.size,
176 "wrong framesize (expected %#x, got %#x)\n",
177 len, pkg->frame.size))
178 return -EIO;
179 if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
180 type, pkg->frame.eof))
181 return -EIO;
182 if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
183 pkg->frame.sof))
184 return -EIO;
185
186 /* check header */
187 if (WARN(header->unknown != 1 << 9,
188 "header->unknown is %#x\n", header->unknown))
189 return -EIO;
190 if (WARN(route != tb_cfg_get_route(header),
191 "wrong route (expected %llx, got %llx)",
192 route, tb_cfg_get_route(header)))
193 return -EIO;
194 return 0;
195 }
196
check_config_address(struct tb_cfg_address addr,enum tb_cfg_space space,u32 offset,u32 length)197 static int check_config_address(struct tb_cfg_address addr,
198 enum tb_cfg_space space, u32 offset,
199 u32 length)
200 {
201 if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
202 return -EIO;
203 if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
204 space, addr.space))
205 return -EIO;
206 if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
207 offset, addr.offset))
208 return -EIO;
209 if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
210 length, addr.length))
211 return -EIO;
212 /*
213 * We cannot check addr->port as it is set to the upstream port of the
214 * sender.
215 */
216 return 0;
217 }
218
decode_error(const struct ctl_pkg * response)219 static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
220 {
221 struct cfg_error_pkg *pkg = response->buffer;
222 struct tb_cfg_result res = { 0 };
223 res.response_route = tb_cfg_get_route(&pkg->header);
224 res.response_port = 0;
225 res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
226 tb_cfg_get_route(&pkg->header));
227 if (res.err)
228 return res;
229
230 WARN(pkg->zero1, "pkg->zero1 is %#x\n", pkg->zero1);
231 WARN(pkg->zero2, "pkg->zero1 is %#x\n", pkg->zero1);
232 WARN(pkg->zero3, "pkg->zero1 is %#x\n", pkg->zero1);
233 res.err = 1;
234 res.tb_error = pkg->error;
235 res.response_port = pkg->port;
236 return res;
237
238 }
239
parse_header(const struct ctl_pkg * pkg,u32 len,enum tb_cfg_pkg_type type,u64 route)240 static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
241 enum tb_cfg_pkg_type type, u64 route)
242 {
243 struct tb_cfg_header *header = pkg->buffer;
244 struct tb_cfg_result res = { 0 };
245
246 if (pkg->frame.eof == TB_CFG_PKG_ERROR)
247 return decode_error(pkg);
248
249 res.response_port = 0; /* will be updated later for cfg_read/write */
250 res.response_route = tb_cfg_get_route(header);
251 res.err = check_header(pkg, len, type, route);
252 return res;
253 }
254
tb_cfg_print_error(struct tb_ctl * ctl,const struct tb_cfg_result * res)255 static void tb_cfg_print_error(struct tb_ctl *ctl,
256 const struct tb_cfg_result *res)
257 {
258 WARN_ON(res->err != 1);
259 switch (res->tb_error) {
260 case TB_CFG_ERROR_PORT_NOT_CONNECTED:
261 /* Port is not connected. This can happen during surprise
262 * removal. Do not warn. */
263 return;
264 case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
265 /*
266 * Invalid cfg_space/offset/length combination in
267 * cfg_read/cfg_write.
268 */
269 tb_ctl_WARN(ctl,
270 "CFG_ERROR(%llx:%x): Invalid config space or offset\n",
271 res->response_route, res->response_port);
272 return;
273 case TB_CFG_ERROR_NO_SUCH_PORT:
274 /*
275 * - The route contains a non-existent port.
276 * - The route contains a non-PHY port (e.g. PCIe).
277 * - The port in cfg_read/cfg_write does not exist.
278 */
279 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
280 res->response_route, res->response_port);
281 return;
282 case TB_CFG_ERROR_LOOP:
283 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
284 res->response_route, res->response_port);
285 return;
286 default:
287 /* 5,6,7,9 and 11 are also valid error codes */
288 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
289 res->response_route, res->response_port);
290 return;
291 }
292 }
293
tb_crc(const void * data,size_t len)294 static __be32 tb_crc(const void *data, size_t len)
295 {
296 return cpu_to_be32(~__crc32c_le(~0, data, len));
297 }
298
tb_ctl_pkg_free(struct ctl_pkg * pkg)299 static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
300 {
301 if (pkg) {
302 dma_pool_free(pkg->ctl->frame_pool,
303 pkg->buffer, pkg->frame.buffer_phy);
304 kfree(pkg);
305 }
306 }
307
tb_ctl_pkg_alloc(struct tb_ctl * ctl)308 static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
309 {
310 struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
311 if (!pkg)
312 return NULL;
313 pkg->ctl = ctl;
314 pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
315 &pkg->frame.buffer_phy);
316 if (!pkg->buffer) {
317 kfree(pkg);
318 return NULL;
319 }
320 return pkg;
321 }
322
323
324 /* RX/TX handling */
325
tb_ctl_tx_callback(struct tb_ring * ring,struct ring_frame * frame,bool canceled)326 static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
327 bool canceled)
328 {
329 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
330 tb_ctl_pkg_free(pkg);
331 }
332
333 /**
334 * tb_cfg_tx() - transmit a packet on the control channel
335 *
336 * len must be a multiple of four.
337 *
338 * Return: Returns 0 on success or an error code on failure.
339 */
tb_ctl_tx(struct tb_ctl * ctl,const void * data,size_t len,enum tb_cfg_pkg_type type)340 static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
341 enum tb_cfg_pkg_type type)
342 {
343 int res;
344 struct ctl_pkg *pkg;
345 if (len % 4 != 0) { /* required for le->be conversion */
346 tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
347 return -EINVAL;
348 }
349 if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
350 tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
351 len, TB_FRAME_SIZE - 4);
352 return -EINVAL;
353 }
354 pkg = tb_ctl_pkg_alloc(ctl);
355 if (!pkg)
356 return -ENOMEM;
357 pkg->frame.callback = tb_ctl_tx_callback;
358 pkg->frame.size = len + 4;
359 pkg->frame.sof = type;
360 pkg->frame.eof = type;
361 cpu_to_be32_array(pkg->buffer, data, len / 4);
362 *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
363
364 res = tb_ring_tx(ctl->tx, &pkg->frame);
365 if (res) /* ring is stopped */
366 tb_ctl_pkg_free(pkg);
367 return res;
368 }
369
370 /**
371 * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
372 */
tb_ctl_handle_event(struct tb_ctl * ctl,enum tb_cfg_pkg_type type,struct ctl_pkg * pkg,size_t size)373 static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
374 struct ctl_pkg *pkg, size_t size)
375 {
376 return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
377 }
378
tb_ctl_rx_submit(struct ctl_pkg * pkg)379 static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
380 {
381 tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
382 * We ignore failures during stop.
383 * All rx packets are referenced
384 * from ctl->rx_packets, so we do
385 * not loose them.
386 */
387 }
388
tb_async_error(const struct ctl_pkg * pkg)389 static int tb_async_error(const struct ctl_pkg *pkg)
390 {
391 const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg;
392
393 if (pkg->frame.eof != TB_CFG_PKG_ERROR)
394 return false;
395
396 switch (error->error) {
397 case TB_CFG_ERROR_LINK_ERROR:
398 case TB_CFG_ERROR_HEC_ERROR_DETECTED:
399 case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
400 return true;
401
402 default:
403 return false;
404 }
405 }
406
tb_ctl_rx_callback(struct tb_ring * ring,struct ring_frame * frame,bool canceled)407 static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
408 bool canceled)
409 {
410 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
411 struct tb_cfg_request *req;
412 __be32 crc32;
413
414 if (canceled)
415 return; /*
416 * ring is stopped, packet is referenced from
417 * ctl->rx_packets.
418 */
419
420 if (frame->size < 4 || frame->size % 4 != 0) {
421 tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
422 frame->size);
423 goto rx;
424 }
425
426 frame->size -= 4; /* remove checksum */
427 crc32 = tb_crc(pkg->buffer, frame->size);
428 be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
429
430 switch (frame->eof) {
431 case TB_CFG_PKG_READ:
432 case TB_CFG_PKG_WRITE:
433 case TB_CFG_PKG_ERROR:
434 case TB_CFG_PKG_OVERRIDE:
435 case TB_CFG_PKG_RESET:
436 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
437 tb_ctl_err(pkg->ctl,
438 "RX: checksum mismatch, dropping packet\n");
439 goto rx;
440 }
441 if (tb_async_error(pkg)) {
442 tb_ctl_handle_event(pkg->ctl, frame->eof,
443 pkg, frame->size);
444 goto rx;
445 }
446 break;
447
448 case TB_CFG_PKG_EVENT:
449 case TB_CFG_PKG_XDOMAIN_RESP:
450 case TB_CFG_PKG_XDOMAIN_REQ:
451 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
452 tb_ctl_err(pkg->ctl,
453 "RX: checksum mismatch, dropping packet\n");
454 goto rx;
455 }
456 /* Fall through */
457 case TB_CFG_PKG_ICM_EVENT:
458 if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
459 goto rx;
460 break;
461
462 default:
463 break;
464 }
465
466 /*
467 * The received packet will be processed only if there is an
468 * active request and that the packet is what is expected. This
469 * prevents packets such as replies coming after timeout has
470 * triggered from messing with the active requests.
471 */
472 req = tb_cfg_request_find(pkg->ctl, pkg);
473 if (req) {
474 if (req->copy(req, pkg))
475 schedule_work(&req->work);
476 tb_cfg_request_put(req);
477 }
478
479 rx:
480 tb_ctl_rx_submit(pkg);
481 }
482
tb_cfg_request_work(struct work_struct * work)483 static void tb_cfg_request_work(struct work_struct *work)
484 {
485 struct tb_cfg_request *req = container_of(work, typeof(*req), work);
486
487 if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
488 req->callback(req->callback_data);
489
490 tb_cfg_request_dequeue(req);
491 tb_cfg_request_put(req);
492 }
493
494 /**
495 * tb_cfg_request() - Start control request not waiting for it to complete
496 * @ctl: Control channel to use
497 * @req: Request to start
498 * @callback: Callback called when the request is completed
499 * @callback_data: Data to be passed to @callback
500 *
501 * This queues @req on the given control channel without waiting for it
502 * to complete. When the request completes @callback is called.
503 */
tb_cfg_request(struct tb_ctl * ctl,struct tb_cfg_request * req,void (* callback)(void *),void * callback_data)504 int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
505 void (*callback)(void *), void *callback_data)
506 {
507 int ret;
508
509 req->flags = 0;
510 req->callback = callback;
511 req->callback_data = callback_data;
512 INIT_WORK(&req->work, tb_cfg_request_work);
513 INIT_LIST_HEAD(&req->list);
514
515 tb_cfg_request_get(req);
516 ret = tb_cfg_request_enqueue(ctl, req);
517 if (ret)
518 goto err_put;
519
520 ret = tb_ctl_tx(ctl, req->request, req->request_size,
521 req->request_type);
522 if (ret)
523 goto err_dequeue;
524
525 if (!req->response)
526 schedule_work(&req->work);
527
528 return 0;
529
530 err_dequeue:
531 tb_cfg_request_dequeue(req);
532 err_put:
533 tb_cfg_request_put(req);
534
535 return ret;
536 }
537
538 /**
539 * tb_cfg_request_cancel() - Cancel a control request
540 * @req: Request to cancel
541 * @err: Error to assign to the request
542 *
543 * This function can be used to cancel ongoing request. It will wait
544 * until the request is not active anymore.
545 */
tb_cfg_request_cancel(struct tb_cfg_request * req,int err)546 void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
547 {
548 set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
549 schedule_work(&req->work);
550 wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
551 req->result.err = err;
552 }
553
tb_cfg_request_complete(void * data)554 static void tb_cfg_request_complete(void *data)
555 {
556 complete(data);
557 }
558
559 /**
560 * tb_cfg_request_sync() - Start control request and wait until it completes
561 * @ctl: Control channel to use
562 * @req: Request to start
563 * @timeout_msec: Timeout how long to wait @req to complete
564 *
565 * Starts a control request and waits until it completes. If timeout
566 * triggers the request is canceled before function returns. Note the
567 * caller needs to make sure only one message for given switch is active
568 * at a time.
569 */
tb_cfg_request_sync(struct tb_ctl * ctl,struct tb_cfg_request * req,int timeout_msec)570 struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
571 struct tb_cfg_request *req,
572 int timeout_msec)
573 {
574 unsigned long timeout = msecs_to_jiffies(timeout_msec);
575 struct tb_cfg_result res = { 0 };
576 DECLARE_COMPLETION_ONSTACK(done);
577 int ret;
578
579 ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
580 if (ret) {
581 res.err = ret;
582 return res;
583 }
584
585 if (!wait_for_completion_timeout(&done, timeout))
586 tb_cfg_request_cancel(req, -ETIMEDOUT);
587
588 flush_work(&req->work);
589
590 return req->result;
591 }
592
593 /* public interface, alloc/start/stop/free */
594
595 /**
596 * tb_ctl_alloc() - allocate a control channel
597 *
598 * cb will be invoked once for every hot plug event.
599 *
600 * Return: Returns a pointer on success or NULL on failure.
601 */
tb_ctl_alloc(struct tb_nhi * nhi,event_cb cb,void * cb_data)602 struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data)
603 {
604 int i;
605 struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
606 if (!ctl)
607 return NULL;
608 ctl->nhi = nhi;
609 ctl->callback = cb;
610 ctl->callback_data = cb_data;
611
612 mutex_init(&ctl->request_queue_lock);
613 INIT_LIST_HEAD(&ctl->request_queue);
614 ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
615 TB_FRAME_SIZE, 4, 0);
616 if (!ctl->frame_pool)
617 goto err;
618
619 ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
620 if (!ctl->tx)
621 goto err;
622
623 ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0xffff,
624 0xffff, NULL, NULL);
625 if (!ctl->rx)
626 goto err;
627
628 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
629 ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
630 if (!ctl->rx_packets[i])
631 goto err;
632 ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
633 }
634
635 tb_ctl_dbg(ctl, "control channel created\n");
636 return ctl;
637 err:
638 tb_ctl_free(ctl);
639 return NULL;
640 }
641
642 /**
643 * tb_ctl_free() - free a control channel
644 *
645 * Must be called after tb_ctl_stop.
646 *
647 * Must NOT be called from ctl->callback.
648 */
tb_ctl_free(struct tb_ctl * ctl)649 void tb_ctl_free(struct tb_ctl *ctl)
650 {
651 int i;
652
653 if (!ctl)
654 return;
655
656 if (ctl->rx)
657 tb_ring_free(ctl->rx);
658 if (ctl->tx)
659 tb_ring_free(ctl->tx);
660
661 /* free RX packets */
662 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
663 tb_ctl_pkg_free(ctl->rx_packets[i]);
664
665
666 dma_pool_destroy(ctl->frame_pool);
667 kfree(ctl);
668 }
669
670 /**
671 * tb_cfg_start() - start/resume the control channel
672 */
tb_ctl_start(struct tb_ctl * ctl)673 void tb_ctl_start(struct tb_ctl *ctl)
674 {
675 int i;
676 tb_ctl_dbg(ctl, "control channel starting...\n");
677 tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
678 tb_ring_start(ctl->rx);
679 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
680 tb_ctl_rx_submit(ctl->rx_packets[i]);
681
682 ctl->running = true;
683 }
684
685 /**
686 * control() - pause the control channel
687 *
688 * All invocations of ctl->callback will have finished after this method
689 * returns.
690 *
691 * Must NOT be called from ctl->callback.
692 */
tb_ctl_stop(struct tb_ctl * ctl)693 void tb_ctl_stop(struct tb_ctl *ctl)
694 {
695 mutex_lock(&ctl->request_queue_lock);
696 ctl->running = false;
697 mutex_unlock(&ctl->request_queue_lock);
698
699 tb_ring_stop(ctl->rx);
700 tb_ring_stop(ctl->tx);
701
702 if (!list_empty(&ctl->request_queue))
703 tb_ctl_WARN(ctl, "dangling request in request_queue\n");
704 INIT_LIST_HEAD(&ctl->request_queue);
705 tb_ctl_dbg(ctl, "control channel stopped\n");
706 }
707
708 /* public interface, commands */
709
710 /**
711 * tb_cfg_error() - send error packet
712 *
713 * Return: Returns 0 on success or an error code on failure.
714 */
tb_cfg_error(struct tb_ctl * ctl,u64 route,u32 port,enum tb_cfg_error error)715 int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
716 enum tb_cfg_error error)
717 {
718 struct cfg_error_pkg pkg = {
719 .header = tb_cfg_make_header(route),
720 .port = port,
721 .error = error,
722 };
723 tb_ctl_dbg(ctl, "resetting error on %llx:%x.\n", route, port);
724 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
725 }
726
tb_cfg_match(const struct tb_cfg_request * req,const struct ctl_pkg * pkg)727 static bool tb_cfg_match(const struct tb_cfg_request *req,
728 const struct ctl_pkg *pkg)
729 {
730 u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
731
732 if (pkg->frame.eof == TB_CFG_PKG_ERROR)
733 return true;
734
735 if (pkg->frame.eof != req->response_type)
736 return false;
737 if (route != tb_cfg_get_route(req->request))
738 return false;
739 if (pkg->frame.size != req->response_size)
740 return false;
741
742 if (pkg->frame.eof == TB_CFG_PKG_READ ||
743 pkg->frame.eof == TB_CFG_PKG_WRITE) {
744 const struct cfg_read_pkg *req_hdr = req->request;
745 const struct cfg_read_pkg *res_hdr = pkg->buffer;
746
747 if (req_hdr->addr.seq != res_hdr->addr.seq)
748 return false;
749 }
750
751 return true;
752 }
753
tb_cfg_copy(struct tb_cfg_request * req,const struct ctl_pkg * pkg)754 static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
755 {
756 struct tb_cfg_result res;
757
758 /* Now make sure it is in expected format */
759 res = parse_header(pkg, req->response_size, req->response_type,
760 tb_cfg_get_route(req->request));
761 if (!res.err)
762 memcpy(req->response, pkg->buffer, req->response_size);
763
764 req->result = res;
765
766 /* Always complete when first response is received */
767 return true;
768 }
769
770 /**
771 * tb_cfg_reset() - send a reset packet and wait for a response
772 *
773 * If the switch at route is incorrectly configured then we will not receive a
774 * reply (even though the switch will reset). The caller should check for
775 * -ETIMEDOUT and attempt to reconfigure the switch.
776 */
tb_cfg_reset(struct tb_ctl * ctl,u64 route,int timeout_msec)777 struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
778 int timeout_msec)
779 {
780 struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
781 struct tb_cfg_result res = { 0 };
782 struct tb_cfg_header reply;
783 struct tb_cfg_request *req;
784
785 req = tb_cfg_request_alloc();
786 if (!req) {
787 res.err = -ENOMEM;
788 return res;
789 }
790
791 req->match = tb_cfg_match;
792 req->copy = tb_cfg_copy;
793 req->request = &request;
794 req->request_size = sizeof(request);
795 req->request_type = TB_CFG_PKG_RESET;
796 req->response = &reply;
797 req->response_size = sizeof(reply);
798 req->response_type = TB_CFG_PKG_RESET;
799
800 res = tb_cfg_request_sync(ctl, req, timeout_msec);
801
802 tb_cfg_request_put(req);
803
804 return res;
805 }
806
807 /**
808 * tb_cfg_read() - read from config space into buffer
809 *
810 * Offset and length are in dwords.
811 */
tb_cfg_read_raw(struct tb_ctl * ctl,void * buffer,u64 route,u32 port,enum tb_cfg_space space,u32 offset,u32 length,int timeout_msec)812 struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
813 u64 route, u32 port, enum tb_cfg_space space,
814 u32 offset, u32 length, int timeout_msec)
815 {
816 struct tb_cfg_result res = { 0 };
817 struct cfg_read_pkg request = {
818 .header = tb_cfg_make_header(route),
819 .addr = {
820 .port = port,
821 .space = space,
822 .offset = offset,
823 .length = length,
824 },
825 };
826 struct cfg_write_pkg reply;
827 int retries = 0;
828
829 while (retries < TB_CTL_RETRIES) {
830 struct tb_cfg_request *req;
831
832 req = tb_cfg_request_alloc();
833 if (!req) {
834 res.err = -ENOMEM;
835 return res;
836 }
837
838 request.addr.seq = retries++;
839
840 req->match = tb_cfg_match;
841 req->copy = tb_cfg_copy;
842 req->request = &request;
843 req->request_size = sizeof(request);
844 req->request_type = TB_CFG_PKG_READ;
845 req->response = &reply;
846 req->response_size = 12 + 4 * length;
847 req->response_type = TB_CFG_PKG_READ;
848
849 res = tb_cfg_request_sync(ctl, req, timeout_msec);
850
851 tb_cfg_request_put(req);
852
853 if (res.err != -ETIMEDOUT)
854 break;
855
856 /* Wait a bit (arbitrary time) until we send a retry */
857 usleep_range(10, 100);
858 }
859
860 if (res.err)
861 return res;
862
863 res.response_port = reply.addr.port;
864 res.err = check_config_address(reply.addr, space, offset, length);
865 if (!res.err)
866 memcpy(buffer, &reply.data, 4 * length);
867 return res;
868 }
869
870 /**
871 * tb_cfg_write() - write from buffer into config space
872 *
873 * Offset and length are in dwords.
874 */
tb_cfg_write_raw(struct tb_ctl * ctl,const void * buffer,u64 route,u32 port,enum tb_cfg_space space,u32 offset,u32 length,int timeout_msec)875 struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
876 u64 route, u32 port, enum tb_cfg_space space,
877 u32 offset, u32 length, int timeout_msec)
878 {
879 struct tb_cfg_result res = { 0 };
880 struct cfg_write_pkg request = {
881 .header = tb_cfg_make_header(route),
882 .addr = {
883 .port = port,
884 .space = space,
885 .offset = offset,
886 .length = length,
887 },
888 };
889 struct cfg_read_pkg reply;
890 int retries = 0;
891
892 memcpy(&request.data, buffer, length * 4);
893
894 while (retries < TB_CTL_RETRIES) {
895 struct tb_cfg_request *req;
896
897 req = tb_cfg_request_alloc();
898 if (!req) {
899 res.err = -ENOMEM;
900 return res;
901 }
902
903 request.addr.seq = retries++;
904
905 req->match = tb_cfg_match;
906 req->copy = tb_cfg_copy;
907 req->request = &request;
908 req->request_size = 12 + 4 * length;
909 req->request_type = TB_CFG_PKG_WRITE;
910 req->response = &reply;
911 req->response_size = sizeof(reply);
912 req->response_type = TB_CFG_PKG_WRITE;
913
914 res = tb_cfg_request_sync(ctl, req, timeout_msec);
915
916 tb_cfg_request_put(req);
917
918 if (res.err != -ETIMEDOUT)
919 break;
920
921 /* Wait a bit (arbitrary time) until we send a retry */
922 usleep_range(10, 100);
923 }
924
925 if (res.err)
926 return res;
927
928 res.response_port = reply.addr.port;
929 res.err = check_config_address(reply.addr, space, offset, length);
930 return res;
931 }
932
tb_cfg_get_error(struct tb_ctl * ctl,enum tb_cfg_space space,const struct tb_cfg_result * res)933 static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
934 const struct tb_cfg_result *res)
935 {
936 /*
937 * For unimplemented ports access to port config space may return
938 * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is
939 * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
940 * that the caller can mark the port as disabled.
941 */
942 if (space == TB_CFG_PORT &&
943 res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
944 return -ENODEV;
945
946 tb_cfg_print_error(ctl, res);
947 return -EIO;
948 }
949
tb_cfg_read(struct tb_ctl * ctl,void * buffer,u64 route,u32 port,enum tb_cfg_space space,u32 offset,u32 length)950 int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
951 enum tb_cfg_space space, u32 offset, u32 length)
952 {
953 struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
954 space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
955 switch (res.err) {
956 case 0:
957 /* Success */
958 break;
959
960 case 1:
961 /* Thunderbolt error, tb_error holds the actual number */
962 return tb_cfg_get_error(ctl, space, &res);
963
964 case -ETIMEDOUT:
965 tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n",
966 space, offset);
967 break;
968
969 default:
970 WARN(1, "tb_cfg_read: %d\n", res.err);
971 break;
972 }
973 return res.err;
974 }
975
tb_cfg_write(struct tb_ctl * ctl,const void * buffer,u64 route,u32 port,enum tb_cfg_space space,u32 offset,u32 length)976 int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
977 enum tb_cfg_space space, u32 offset, u32 length)
978 {
979 struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
980 space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
981 switch (res.err) {
982 case 0:
983 /* Success */
984 break;
985
986 case 1:
987 /* Thunderbolt error, tb_error holds the actual number */
988 return tb_cfg_get_error(ctl, space, &res);
989
990 case -ETIMEDOUT:
991 tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n",
992 space, offset);
993 break;
994
995 default:
996 WARN(1, "tb_cfg_write: %d\n", res.err);
997 break;
998 }
999 return res.err;
1000 }
1001
1002 /**
1003 * tb_cfg_get_upstream_port() - get upstream port number of switch at route
1004 *
1005 * Reads the first dword from the switches TB_CFG_SWITCH config area and
1006 * returns the port number from which the reply originated.
1007 *
1008 * Return: Returns the upstream port number on success or an error code on
1009 * failure.
1010 */
tb_cfg_get_upstream_port(struct tb_ctl * ctl,u64 route)1011 int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
1012 {
1013 u32 dummy;
1014 struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
1015 TB_CFG_SWITCH, 0, 1,
1016 TB_CFG_DEFAULT_TIMEOUT);
1017 if (res.err == 1)
1018 return -EIO;
1019 if (res.err)
1020 return res.err;
1021 return res.response_port;
1022 }
1023