1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * xhci-dbgcap.c - xHCI debug capability support
4  *
5  * Copyright (C) 2017 Intel Corporation
6  *
7  * Author: Lu Baolu <baolu.lu@linux.intel.com>
8  */
9 #include <linux/bug.h>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/kstrtox.h>
14 #include <linux/list.h>
15 #include <linux/nls.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/string.h>
20 #include <linux/sysfs.h>
21 #include <linux/types.h>
22 #include <linux/workqueue.h>
23 
24 #include <linux/io-64-nonatomic-lo-hi.h>
25 
26 #include <asm/byteorder.h>
27 
28 #include "xhci.h"
29 #include "xhci-trace.h"
30 #include "xhci-dbgcap.h"
31 
dbc_free_ctx(struct device * dev,struct xhci_container_ctx * ctx)32 static void dbc_free_ctx(struct device *dev, struct xhci_container_ctx *ctx)
33 {
34 	if (!ctx)
35 		return;
36 	dma_free_coherent(dev, ctx->size, ctx->bytes, ctx->dma);
37 	kfree(ctx);
38 }
39 
40 /* we use only one segment for DbC rings */
dbc_ring_free(struct device * dev,struct xhci_ring * ring)41 static void dbc_ring_free(struct device *dev, struct xhci_ring *ring)
42 {
43 	if (!ring)
44 		return;
45 
46 	if (ring->first_seg) {
47 		dma_free_coherent(dev, TRB_SEGMENT_SIZE,
48 				  ring->first_seg->trbs,
49 				  ring->first_seg->dma);
50 		kfree(ring->first_seg);
51 	}
52 	kfree(ring);
53 }
54 
xhci_dbc_populate_strings(struct dbc_str_descs * strings)55 static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
56 {
57 	struct usb_string_descriptor	*s_desc;
58 	u32				string_length;
59 
60 	/* Serial string: */
61 	s_desc = (struct usb_string_descriptor *)strings->serial;
62 	utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
63 			UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
64 			DBC_MAX_STRING_LENGTH);
65 
66 	s_desc->bLength		= (strlen(DBC_STRING_SERIAL) + 1) * 2;
67 	s_desc->bDescriptorType	= USB_DT_STRING;
68 	string_length		= s_desc->bLength;
69 	string_length		<<= 8;
70 
71 	/* Product string: */
72 	s_desc = (struct usb_string_descriptor *)strings->product;
73 	utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
74 			UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
75 			DBC_MAX_STRING_LENGTH);
76 
77 	s_desc->bLength		= (strlen(DBC_STRING_PRODUCT) + 1) * 2;
78 	s_desc->bDescriptorType	= USB_DT_STRING;
79 	string_length		+= s_desc->bLength;
80 	string_length		<<= 8;
81 
82 	/* Manufacture string: */
83 	s_desc = (struct usb_string_descriptor *)strings->manufacturer;
84 	utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
85 			strlen(DBC_STRING_MANUFACTURER),
86 			UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
87 			DBC_MAX_STRING_LENGTH);
88 
89 	s_desc->bLength		= (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
90 	s_desc->bDescriptorType	= USB_DT_STRING;
91 	string_length		+= s_desc->bLength;
92 	string_length		<<= 8;
93 
94 	/* String0: */
95 	strings->string0[0]	= 4;
96 	strings->string0[1]	= USB_DT_STRING;
97 	strings->string0[2]	= 0x09;
98 	strings->string0[3]	= 0x04;
99 	string_length		+= 4;
100 
101 	return string_length;
102 }
103 
xhci_dbc_init_ep_contexts(struct xhci_dbc * dbc)104 static void xhci_dbc_init_ep_contexts(struct xhci_dbc *dbc)
105 {
106 	struct xhci_ep_ctx      *ep_ctx;
107 	unsigned int		max_burst;
108 	dma_addr_t		deq;
109 
110 	max_burst               = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
111 
112 	/* Populate bulk out endpoint context: */
113 	ep_ctx                  = dbc_bulkout_ctx(dbc);
114 	deq                     = dbc_bulkout_enq(dbc);
115 	ep_ctx->ep_info         = 0;
116 	ep_ctx->ep_info2        = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
117 	ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_out->cycle_state);
118 
119 	/* Populate bulk in endpoint context: */
120 	ep_ctx                  = dbc_bulkin_ctx(dbc);
121 	deq                     = dbc_bulkin_enq(dbc);
122 	ep_ctx->ep_info         = 0;
123 	ep_ctx->ep_info2        = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
124 	ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_in->cycle_state);
125 }
126 
xhci_dbc_init_contexts(struct xhci_dbc * dbc,u32 string_length)127 static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
128 {
129 	struct dbc_info_context	*info;
130 	u32			dev_info;
131 	dma_addr_t		dma;
132 
133 	if (!dbc)
134 		return;
135 
136 	/* Populate info Context: */
137 	info			= (struct dbc_info_context *)dbc->ctx->bytes;
138 	dma			= dbc->string_dma;
139 	info->string0		= cpu_to_le64(dma);
140 	info->manufacturer	= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
141 	info->product		= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
142 	info->serial		= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
143 	info->length		= cpu_to_le32(string_length);
144 
145 	/* Populate bulk in and out endpoint contexts: */
146 	xhci_dbc_init_ep_contexts(dbc);
147 
148 	/* Set DbC context and info registers: */
149 	lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
150 
151 	dev_info = (dbc->idVendor << 16) | dbc->bInterfaceProtocol;
152 	writel(dev_info, &dbc->regs->devinfo1);
153 
154 	dev_info = (dbc->bcdDevice << 16) | dbc->idProduct;
155 	writel(dev_info, &dbc->regs->devinfo2);
156 }
157 
xhci_dbc_giveback(struct dbc_request * req,int status)158 static void xhci_dbc_giveback(struct dbc_request *req, int status)
159 	__releases(&dbc->lock)
160 	__acquires(&dbc->lock)
161 {
162 	struct xhci_dbc		*dbc = req->dbc;
163 	struct device		*dev = dbc->dev;
164 
165 	list_del_init(&req->list_pending);
166 	req->trb_dma = 0;
167 	req->trb = NULL;
168 
169 	if (req->status == -EINPROGRESS)
170 		req->status = status;
171 
172 	trace_xhci_dbc_giveback_request(req);
173 
174 	dma_unmap_single(dev,
175 			 req->dma,
176 			 req->length,
177 			 dbc_ep_dma_direction(req));
178 
179 	/* Give back the transfer request: */
180 	spin_unlock(&dbc->lock);
181 	req->complete(dbc, req);
182 	spin_lock(&dbc->lock);
183 }
184 
trb_to_noop(union xhci_trb * trb)185 static void trb_to_noop(union xhci_trb *trb)
186 {
187 	trb->generic.field[0]	= 0;
188 	trb->generic.field[1]	= 0;
189 	trb->generic.field[2]	= 0;
190 	trb->generic.field[3]	&= cpu_to_le32(TRB_CYCLE);
191 	trb->generic.field[3]	|= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
192 }
193 
xhci_dbc_flush_single_request(struct dbc_request * req)194 static void xhci_dbc_flush_single_request(struct dbc_request *req)
195 {
196 	trb_to_noop(req->trb);
197 	xhci_dbc_giveback(req, -ESHUTDOWN);
198 }
199 
xhci_dbc_flush_endpoint_requests(struct dbc_ep * dep)200 static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
201 {
202 	struct dbc_request	*req, *tmp;
203 
204 	list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
205 		xhci_dbc_flush_single_request(req);
206 }
207 
xhci_dbc_flush_requests(struct xhci_dbc * dbc)208 static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
209 {
210 	xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
211 	xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
212 }
213 
214 struct dbc_request *
dbc_alloc_request(struct xhci_dbc * dbc,unsigned int direction,gfp_t flags)215 dbc_alloc_request(struct xhci_dbc *dbc, unsigned int direction, gfp_t flags)
216 {
217 	struct dbc_request	*req;
218 
219 	if (direction != BULK_IN &&
220 	    direction != BULK_OUT)
221 		return NULL;
222 
223 	if (!dbc)
224 		return NULL;
225 
226 	req = kzalloc(sizeof(*req), flags);
227 	if (!req)
228 		return NULL;
229 
230 	req->dbc = dbc;
231 	INIT_LIST_HEAD(&req->list_pending);
232 	INIT_LIST_HEAD(&req->list_pool);
233 	req->direction = direction;
234 
235 	trace_xhci_dbc_alloc_request(req);
236 
237 	return req;
238 }
239 
240 void
dbc_free_request(struct dbc_request * req)241 dbc_free_request(struct dbc_request *req)
242 {
243 	trace_xhci_dbc_free_request(req);
244 
245 	kfree(req);
246 }
247 
248 static void
xhci_dbc_queue_trb(struct xhci_ring * ring,u32 field1,u32 field2,u32 field3,u32 field4)249 xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
250 		   u32 field2, u32 field3, u32 field4)
251 {
252 	union xhci_trb		*trb, *next;
253 
254 	trb = ring->enqueue;
255 	trb->generic.field[0]	= cpu_to_le32(field1);
256 	trb->generic.field[1]	= cpu_to_le32(field2);
257 	trb->generic.field[2]	= cpu_to_le32(field3);
258 	trb->generic.field[3]	= cpu_to_le32(field4);
259 
260 	trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
261 
262 	ring->num_trbs_free--;
263 	next = ++(ring->enqueue);
264 	if (TRB_TYPE_LINK_LE32(next->link.control)) {
265 		next->link.control ^= cpu_to_le32(TRB_CYCLE);
266 		ring->enqueue = ring->enq_seg->trbs;
267 		ring->cycle_state ^= 1;
268 	}
269 }
270 
xhci_dbc_queue_bulk_tx(struct dbc_ep * dep,struct dbc_request * req)271 static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
272 				  struct dbc_request *req)
273 {
274 	u64			addr;
275 	union xhci_trb		*trb;
276 	unsigned int		num_trbs;
277 	struct xhci_dbc		*dbc = req->dbc;
278 	struct xhci_ring	*ring = dep->ring;
279 	u32			length, control, cycle;
280 
281 	num_trbs = count_trbs(req->dma, req->length);
282 	WARN_ON(num_trbs != 1);
283 	if (ring->num_trbs_free < num_trbs)
284 		return -EBUSY;
285 
286 	addr	= req->dma;
287 	trb	= ring->enqueue;
288 	cycle	= ring->cycle_state;
289 	length	= TRB_LEN(req->length);
290 	control	= TRB_TYPE(TRB_NORMAL) | TRB_IOC;
291 
292 	if (cycle)
293 		control &= cpu_to_le32(~TRB_CYCLE);
294 	else
295 		control |= cpu_to_le32(TRB_CYCLE);
296 
297 	req->trb = ring->enqueue;
298 	req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
299 	xhci_dbc_queue_trb(ring,
300 			   lower_32_bits(addr),
301 			   upper_32_bits(addr),
302 			   length, control);
303 
304 	/*
305 	 * Add a barrier between writes of trb fields and flipping
306 	 * the cycle bit:
307 	 */
308 	wmb();
309 
310 	if (cycle)
311 		trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
312 	else
313 		trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
314 
315 	writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
316 
317 	return 0;
318 }
319 
320 static int
dbc_ep_do_queue(struct dbc_request * req)321 dbc_ep_do_queue(struct dbc_request *req)
322 {
323 	int			ret;
324 	struct xhci_dbc		*dbc = req->dbc;
325 	struct device		*dev = dbc->dev;
326 	struct dbc_ep		*dep = &dbc->eps[req->direction];
327 
328 	if (!req->length || !req->buf)
329 		return -EINVAL;
330 
331 	req->actual		= 0;
332 	req->status		= -EINPROGRESS;
333 
334 	req->dma = dma_map_single(dev,
335 				  req->buf,
336 				  req->length,
337 				  dbc_ep_dma_direction(dep));
338 	if (dma_mapping_error(dev, req->dma)) {
339 		dev_err(dbc->dev, "failed to map buffer\n");
340 		return -EFAULT;
341 	}
342 
343 	ret = xhci_dbc_queue_bulk_tx(dep, req);
344 	if (ret) {
345 		dev_err(dbc->dev, "failed to queue trbs\n");
346 		dma_unmap_single(dev,
347 				 req->dma,
348 				 req->length,
349 				 dbc_ep_dma_direction(dep));
350 		return -EFAULT;
351 	}
352 
353 	list_add_tail(&req->list_pending, &dep->list_pending);
354 
355 	return 0;
356 }
357 
dbc_ep_queue(struct dbc_request * req)358 int dbc_ep_queue(struct dbc_request *req)
359 {
360 	unsigned long		flags;
361 	struct xhci_dbc		*dbc = req->dbc;
362 	int			ret = -ESHUTDOWN;
363 
364 	if (!dbc)
365 		return -ENODEV;
366 
367 	if (req->direction != BULK_IN &&
368 	    req->direction != BULK_OUT)
369 		return -EINVAL;
370 
371 	spin_lock_irqsave(&dbc->lock, flags);
372 	if (dbc->state == DS_CONFIGURED)
373 		ret = dbc_ep_do_queue(req);
374 	spin_unlock_irqrestore(&dbc->lock, flags);
375 
376 	mod_delayed_work(system_wq, &dbc->event_work, 0);
377 
378 	trace_xhci_dbc_queue_request(req);
379 
380 	return ret;
381 }
382 
xhci_dbc_do_eps_init(struct xhci_dbc * dbc,bool direction)383 static inline void xhci_dbc_do_eps_init(struct xhci_dbc *dbc, bool direction)
384 {
385 	struct dbc_ep		*dep;
386 
387 	dep			= &dbc->eps[direction];
388 	dep->dbc		= dbc;
389 	dep->direction		= direction;
390 	dep->ring		= direction ? dbc->ring_in : dbc->ring_out;
391 
392 	INIT_LIST_HEAD(&dep->list_pending);
393 }
394 
xhci_dbc_eps_init(struct xhci_dbc * dbc)395 static void xhci_dbc_eps_init(struct xhci_dbc *dbc)
396 {
397 	xhci_dbc_do_eps_init(dbc, BULK_OUT);
398 	xhci_dbc_do_eps_init(dbc, BULK_IN);
399 }
400 
xhci_dbc_eps_exit(struct xhci_dbc * dbc)401 static void xhci_dbc_eps_exit(struct xhci_dbc *dbc)
402 {
403 	memset(dbc->eps, 0, sizeof_field(struct xhci_dbc, eps));
404 }
405 
dbc_erst_alloc(struct device * dev,struct xhci_ring * evt_ring,struct xhci_erst * erst,gfp_t flags)406 static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
407 		    struct xhci_erst *erst, gfp_t flags)
408 {
409 	erst->entries = dma_alloc_coherent(dev, sizeof(*erst->entries),
410 					   &erst->erst_dma_addr, flags);
411 	if (!erst->entries)
412 		return -ENOMEM;
413 
414 	erst->num_entries = 1;
415 	erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma);
416 	erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
417 	erst->entries[0].rsvd = 0;
418 	return 0;
419 }
420 
dbc_erst_free(struct device * dev,struct xhci_erst * erst)421 static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
422 {
423 	dma_free_coherent(dev, sizeof(*erst->entries), erst->entries,
424 			  erst->erst_dma_addr);
425 	erst->entries = NULL;
426 }
427 
428 static struct xhci_container_ctx *
dbc_alloc_ctx(struct device * dev,gfp_t flags)429 dbc_alloc_ctx(struct device *dev, gfp_t flags)
430 {
431 	struct xhci_container_ctx *ctx;
432 
433 	ctx = kzalloc(sizeof(*ctx), flags);
434 	if (!ctx)
435 		return NULL;
436 
437 	/* xhci 7.6.9, all three contexts; info, ep-out and ep-in. Each 64 bytes*/
438 	ctx->size = 3 * DBC_CONTEXT_SIZE;
439 	ctx->bytes = dma_alloc_coherent(dev, ctx->size, &ctx->dma, flags);
440 	if (!ctx->bytes) {
441 		kfree(ctx);
442 		return NULL;
443 	}
444 	return ctx;
445 }
446 
xhci_dbc_ring_init(struct xhci_ring * ring)447 static void xhci_dbc_ring_init(struct xhci_ring *ring)
448 {
449 	struct xhci_segment *seg = ring->first_seg;
450 
451 	/* clear all trbs on ring in case of old ring */
452 	memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
453 
454 	/* Only event ring does not use link TRB */
455 	if (ring->type != TYPE_EVENT) {
456 		union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
457 
458 		trb->link.segment_ptr = cpu_to_le64(ring->first_seg->dma);
459 		trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
460 	}
461 	xhci_initialize_ring_info(ring);
462 }
463 
xhci_dbc_reinit_ep_rings(struct xhci_dbc * dbc)464 static int xhci_dbc_reinit_ep_rings(struct xhci_dbc *dbc)
465 {
466 	struct xhci_ring *in_ring = dbc->eps[BULK_IN].ring;
467 	struct xhci_ring *out_ring = dbc->eps[BULK_OUT].ring;
468 
469 	if (!in_ring || !out_ring || !dbc->ctx) {
470 		dev_warn(dbc->dev, "Can't re-init unallocated endpoints\n");
471 		return -ENODEV;
472 	}
473 
474 	xhci_dbc_ring_init(in_ring);
475 	xhci_dbc_ring_init(out_ring);
476 
477 	/* set ep context enqueue, dequeue, and cycle to initial values */
478 	xhci_dbc_init_ep_contexts(dbc);
479 
480 	return 0;
481 }
482 
483 static struct xhci_ring *
xhci_dbc_ring_alloc(struct device * dev,enum xhci_ring_type type,gfp_t flags)484 xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
485 {
486 	struct xhci_ring *ring;
487 	struct xhci_segment *seg;
488 	dma_addr_t dma;
489 
490 	ring = kzalloc(sizeof(*ring), flags);
491 	if (!ring)
492 		return NULL;
493 
494 	ring->num_segs = 1;
495 	ring->type = type;
496 
497 	seg = kzalloc(sizeof(*seg), flags);
498 	if (!seg)
499 		goto seg_fail;
500 
501 	ring->first_seg = seg;
502 	ring->last_seg = seg;
503 	seg->next = seg;
504 
505 	seg->trbs = dma_alloc_coherent(dev, TRB_SEGMENT_SIZE, &dma, flags);
506 	if (!seg->trbs)
507 		goto dma_fail;
508 
509 	seg->dma = dma;
510 
511 	INIT_LIST_HEAD(&ring->td_list);
512 
513 	xhci_dbc_ring_init(ring);
514 
515 	return ring;
516 dma_fail:
517 	kfree(seg);
518 seg_fail:
519 	kfree(ring);
520 	return NULL;
521 }
522 
xhci_dbc_mem_init(struct xhci_dbc * dbc,gfp_t flags)523 static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags)
524 {
525 	int			ret;
526 	dma_addr_t		deq;
527 	u32			string_length;
528 	struct device		*dev = dbc->dev;
529 
530 	/* Allocate various rings for events and transfers: */
531 	dbc->ring_evt = xhci_dbc_ring_alloc(dev, TYPE_EVENT, flags);
532 	if (!dbc->ring_evt)
533 		goto evt_fail;
534 
535 	dbc->ring_in = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
536 	if (!dbc->ring_in)
537 		goto in_fail;
538 
539 	dbc->ring_out = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
540 	if (!dbc->ring_out)
541 		goto out_fail;
542 
543 	/* Allocate and populate ERST: */
544 	ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags);
545 	if (ret)
546 		goto erst_fail;
547 
548 	/* Allocate context data structure: */
549 	dbc->ctx = dbc_alloc_ctx(dev, flags); /* was sysdev, and is still */
550 	if (!dbc->ctx)
551 		goto ctx_fail;
552 
553 	/* Allocate the string table: */
554 	dbc->string_size = sizeof(*dbc->string);
555 	dbc->string = dma_alloc_coherent(dev, dbc->string_size,
556 					 &dbc->string_dma, flags);
557 	if (!dbc->string)
558 		goto string_fail;
559 
560 	/* Setup ERST register: */
561 	writel(dbc->erst.num_entries, &dbc->regs->ersts);
562 
563 	lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba);
564 	deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
565 				   dbc->ring_evt->dequeue);
566 	lo_hi_writeq(deq, &dbc->regs->erdp);
567 
568 	/* Setup strings and contexts: */
569 	string_length = xhci_dbc_populate_strings(dbc->string);
570 	xhci_dbc_init_contexts(dbc, string_length);
571 
572 	xhci_dbc_eps_init(dbc);
573 	dbc->state = DS_INITIALIZED;
574 
575 	return 0;
576 
577 string_fail:
578 	dbc_free_ctx(dev, dbc->ctx);
579 	dbc->ctx = NULL;
580 ctx_fail:
581 	dbc_erst_free(dev, &dbc->erst);
582 erst_fail:
583 	dbc_ring_free(dev, dbc->ring_out);
584 	dbc->ring_out = NULL;
585 out_fail:
586 	dbc_ring_free(dev, dbc->ring_in);
587 	dbc->ring_in = NULL;
588 in_fail:
589 	dbc_ring_free(dev, dbc->ring_evt);
590 	dbc->ring_evt = NULL;
591 evt_fail:
592 	return -ENOMEM;
593 }
594 
xhci_dbc_mem_cleanup(struct xhci_dbc * dbc)595 static void xhci_dbc_mem_cleanup(struct xhci_dbc *dbc)
596 {
597 	if (!dbc)
598 		return;
599 
600 	xhci_dbc_eps_exit(dbc);
601 
602 	dma_free_coherent(dbc->dev, dbc->string_size, dbc->string, dbc->string_dma);
603 	dbc->string = NULL;
604 
605 	dbc_free_ctx(dbc->dev, dbc->ctx);
606 	dbc->ctx = NULL;
607 
608 	dbc_erst_free(dbc->dev, &dbc->erst);
609 	dbc_ring_free(dbc->dev, dbc->ring_out);
610 	dbc_ring_free(dbc->dev, dbc->ring_in);
611 	dbc_ring_free(dbc->dev, dbc->ring_evt);
612 	dbc->ring_in = NULL;
613 	dbc->ring_out = NULL;
614 	dbc->ring_evt = NULL;
615 }
616 
xhci_do_dbc_start(struct xhci_dbc * dbc)617 static int xhci_do_dbc_start(struct xhci_dbc *dbc)
618 {
619 	int			ret;
620 	u32			ctrl;
621 
622 	if (dbc->state != DS_DISABLED)
623 		return -EINVAL;
624 
625 	writel(0, &dbc->regs->control);
626 	ret = xhci_handshake(&dbc->regs->control,
627 			     DBC_CTRL_DBC_ENABLE,
628 			     0, 1000);
629 	if (ret)
630 		return ret;
631 
632 	ret = xhci_dbc_mem_init(dbc, GFP_ATOMIC);
633 	if (ret)
634 		return ret;
635 
636 	ctrl = readl(&dbc->regs->control);
637 	writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
638 	       &dbc->regs->control);
639 	ret = xhci_handshake(&dbc->regs->control,
640 			     DBC_CTRL_DBC_ENABLE,
641 			     DBC_CTRL_DBC_ENABLE, 1000);
642 	if (ret)
643 		return ret;
644 
645 	dbc->state = DS_ENABLED;
646 
647 	return 0;
648 }
649 
xhci_do_dbc_stop(struct xhci_dbc * dbc)650 static int xhci_do_dbc_stop(struct xhci_dbc *dbc)
651 {
652 	if (dbc->state == DS_DISABLED)
653 		return -EINVAL;
654 
655 	writel(0, &dbc->regs->control);
656 	dbc->state = DS_DISABLED;
657 
658 	return 0;
659 }
660 
xhci_dbc_start(struct xhci_dbc * dbc)661 static int xhci_dbc_start(struct xhci_dbc *dbc)
662 {
663 	int			ret;
664 	unsigned long		flags;
665 
666 	WARN_ON(!dbc);
667 
668 	pm_runtime_get_sync(dbc->dev); /* note this was self.controller */
669 
670 	spin_lock_irqsave(&dbc->lock, flags);
671 	ret = xhci_do_dbc_start(dbc);
672 	spin_unlock_irqrestore(&dbc->lock, flags);
673 
674 	if (ret) {
675 		pm_runtime_put(dbc->dev); /* note this was self.controller */
676 		return ret;
677 	}
678 
679 	return mod_delayed_work(system_wq, &dbc->event_work,
680 				msecs_to_jiffies(dbc->poll_interval));
681 }
682 
xhci_dbc_stop(struct xhci_dbc * dbc)683 static void xhci_dbc_stop(struct xhci_dbc *dbc)
684 {
685 	int ret;
686 	unsigned long		flags;
687 
688 	WARN_ON(!dbc);
689 
690 	switch (dbc->state) {
691 	case DS_DISABLED:
692 		return;
693 	case DS_CONFIGURED:
694 		spin_lock(&dbc->lock);
695 		xhci_dbc_flush_requests(dbc);
696 		spin_unlock(&dbc->lock);
697 
698 		if (dbc->driver->disconnect)
699 			dbc->driver->disconnect(dbc);
700 		break;
701 	default:
702 		break;
703 	}
704 
705 	cancel_delayed_work_sync(&dbc->event_work);
706 
707 	spin_lock_irqsave(&dbc->lock, flags);
708 	ret = xhci_do_dbc_stop(dbc);
709 	spin_unlock_irqrestore(&dbc->lock, flags);
710 	if (ret)
711 		return;
712 
713 	xhci_dbc_mem_cleanup(dbc);
714 	pm_runtime_put_sync(dbc->dev); /* note, was self.controller */
715 }
716 
717 static void
handle_ep_halt_changes(struct xhci_dbc * dbc,struct dbc_ep * dep,bool halted)718 handle_ep_halt_changes(struct xhci_dbc *dbc, struct dbc_ep *dep, bool halted)
719 {
720 	if (halted) {
721 		dev_info(dbc->dev, "DbC Endpoint halted\n");
722 		dep->halted = 1;
723 
724 	} else if (dep->halted) {
725 		dev_info(dbc->dev, "DbC Endpoint halt cleared\n");
726 		dep->halted = 0;
727 
728 		if (!list_empty(&dep->list_pending))
729 			writel(DBC_DOOR_BELL_TARGET(dep->direction),
730 			       &dbc->regs->doorbell);
731 	}
732 }
733 
734 static void
dbc_handle_port_status(struct xhci_dbc * dbc,union xhci_trb * event)735 dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
736 {
737 	u32			portsc;
738 
739 	portsc = readl(&dbc->regs->portsc);
740 	if (portsc & DBC_PORTSC_CONN_CHANGE)
741 		dev_info(dbc->dev, "DbC port connect change\n");
742 
743 	if (portsc & DBC_PORTSC_RESET_CHANGE)
744 		dev_info(dbc->dev, "DbC port reset change\n");
745 
746 	if (portsc & DBC_PORTSC_LINK_CHANGE)
747 		dev_info(dbc->dev, "DbC port link status change\n");
748 
749 	if (portsc & DBC_PORTSC_CONFIG_CHANGE)
750 		dev_info(dbc->dev, "DbC config error change\n");
751 
752 	/* Port reset change bit will be cleared in other place: */
753 	writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
754 }
755 
dbc_handle_xfer_event(struct xhci_dbc * dbc,union xhci_trb * event)756 static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
757 {
758 	struct dbc_ep		*dep;
759 	struct xhci_ring	*ring;
760 	int			ep_id;
761 	int			status;
762 	struct xhci_ep_ctx	*ep_ctx;
763 	u32			comp_code;
764 	size_t			remain_length;
765 	struct dbc_request	*req = NULL, *r;
766 
767 	comp_code	= GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
768 	remain_length	= EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
769 	ep_id		= TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
770 	dep		= (ep_id == EPID_OUT) ?
771 				get_out_ep(dbc) : get_in_ep(dbc);
772 	ep_ctx		= (ep_id == EPID_OUT) ?
773 				dbc_bulkout_ctx(dbc) : dbc_bulkin_ctx(dbc);
774 	ring		= dep->ring;
775 
776 	/* Match the pending request: */
777 	list_for_each_entry(r, &dep->list_pending, list_pending) {
778 		if (r->trb_dma == event->trans_event.buffer) {
779 			req = r;
780 			break;
781 		}
782 		if (r->status == -COMP_STALL_ERROR) {
783 			dev_warn(dbc->dev, "Give back stale stalled req\n");
784 			ring->num_trbs_free++;
785 			xhci_dbc_giveback(r, 0);
786 		}
787 	}
788 
789 	if (!req) {
790 		dev_warn(dbc->dev, "no matched request\n");
791 		return;
792 	}
793 
794 	trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
795 
796 	switch (comp_code) {
797 	case COMP_SUCCESS:
798 		remain_length = 0;
799 		fallthrough;
800 	case COMP_SHORT_PACKET:
801 		status = 0;
802 		break;
803 	case COMP_TRB_ERROR:
804 	case COMP_BABBLE_DETECTED_ERROR:
805 	case COMP_USB_TRANSACTION_ERROR:
806 		dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
807 		status = -comp_code;
808 		break;
809 	case COMP_STALL_ERROR:
810 		dev_warn(dbc->dev, "Stall error at bulk TRB %llx, remaining %zu, ep deq %llx\n",
811 			 event->trans_event.buffer, remain_length, ep_ctx->deq);
812 		status = 0;
813 		dep->halted = 1;
814 
815 		/*
816 		 * xHC DbC may trigger a STALL bulk xfer event when host sends a
817 		 * ClearFeature(ENDPOINT_HALT) request even if there wasn't an
818 		 * active bulk transfer.
819 		 *
820 		 * Don't give back this transfer request as hardware will later
821 		 * start processing TRBs starting from this 'STALLED' TRB,
822 		 * causing TRBs and requests to be out of sync.
823 		 *
824 		 * If STALL event shows some bytes were transferred then assume
825 		 * it's an actual transfer issue and give back the request.
826 		 * In this case mark the TRB as No-Op to avoid hw from using the
827 		 * TRB again.
828 		 */
829 
830 		if ((ep_ctx->deq & ~TRB_CYCLE) == event->trans_event.buffer) {
831 			dev_dbg(dbc->dev, "Ep stopped on Stalled TRB\n");
832 			if (remain_length == req->length) {
833 				dev_dbg(dbc->dev, "Spurious stall event, keep req\n");
834 				req->status = -COMP_STALL_ERROR;
835 				req->actual = 0;
836 				return;
837 			}
838 			dev_dbg(dbc->dev, "Give back stalled req, but turn TRB to No-op\n");
839 			trb_to_noop(req->trb);
840 		}
841 		break;
842 
843 	default:
844 		dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
845 		status = -comp_code;
846 		break;
847 	}
848 
849 	ring->num_trbs_free++;
850 	req->actual = req->length - remain_length;
851 	xhci_dbc_giveback(req, status);
852 }
853 
inc_evt_deq(struct xhci_ring * ring)854 static void inc_evt_deq(struct xhci_ring *ring)
855 {
856 	/* If on the last TRB of the segment go back to the beginning */
857 	if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) {
858 		ring->cycle_state ^= 1;
859 		ring->dequeue = ring->deq_seg->trbs;
860 		return;
861 	}
862 	ring->dequeue++;
863 }
864 
xhci_dbc_do_handle_events(struct xhci_dbc * dbc)865 static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
866 {
867 	dma_addr_t		deq;
868 	union xhci_trb		*evt;
869 	enum evtreturn		ret = EVT_DONE;
870 	u32			ctrl, portsc;
871 	bool			update_erdp = false;
872 
873 	/* DbC state machine: */
874 	switch (dbc->state) {
875 	case DS_DISABLED:
876 	case DS_INITIALIZED:
877 
878 		return EVT_ERR;
879 	case DS_ENABLED:
880 		portsc = readl(&dbc->regs->portsc);
881 		if (portsc & DBC_PORTSC_CONN_STATUS) {
882 			dbc->state = DS_CONNECTED;
883 			dev_info(dbc->dev, "DbC connected\n");
884 		}
885 
886 		return EVT_DONE;
887 	case DS_CONNECTED:
888 		ctrl = readl(&dbc->regs->control);
889 		if (ctrl & DBC_CTRL_DBC_RUN) {
890 			dbc->state = DS_CONFIGURED;
891 			dev_info(dbc->dev, "DbC configured\n");
892 			portsc = readl(&dbc->regs->portsc);
893 			writel(portsc, &dbc->regs->portsc);
894 			return EVT_GSER;
895 		}
896 
897 		return EVT_DONE;
898 	case DS_CONFIGURED:
899 		/* Handle cable unplug event: */
900 		portsc = readl(&dbc->regs->portsc);
901 		if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
902 		    !(portsc & DBC_PORTSC_CONN_STATUS)) {
903 			dev_info(dbc->dev, "DbC cable unplugged\n");
904 			dbc->state = DS_ENABLED;
905 			xhci_dbc_flush_requests(dbc);
906 			xhci_dbc_reinit_ep_rings(dbc);
907 			return EVT_DISC;
908 		}
909 
910 		/* Handle debug port reset event: */
911 		if (portsc & DBC_PORTSC_RESET_CHANGE) {
912 			dev_info(dbc->dev, "DbC port reset\n");
913 			writel(portsc, &dbc->regs->portsc);
914 			dbc->state = DS_ENABLED;
915 			xhci_dbc_flush_requests(dbc);
916 			xhci_dbc_reinit_ep_rings(dbc);
917 			return EVT_DISC;
918 		}
919 
920 		/* Check and handle changes in endpoint halt status */
921 		ctrl = readl(&dbc->regs->control);
922 		handle_ep_halt_changes(dbc, get_in_ep(dbc), ctrl & DBC_CTRL_HALT_IN_TR);
923 		handle_ep_halt_changes(dbc, get_out_ep(dbc), ctrl & DBC_CTRL_HALT_OUT_TR);
924 
925 		/* Clear DbC run change bit: */
926 		if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
927 			writel(ctrl, &dbc->regs->control);
928 			ctrl = readl(&dbc->regs->control);
929 		}
930 		break;
931 	default:
932 		dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
933 		break;
934 	}
935 
936 	/* Handle the events in the event ring: */
937 	evt = dbc->ring_evt->dequeue;
938 	while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
939 			dbc->ring_evt->cycle_state) {
940 		/*
941 		 * Add a barrier between reading the cycle flag and any
942 		 * reads of the event's flags/data below:
943 		 */
944 		rmb();
945 
946 		trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
947 
948 		switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
949 		case TRB_TYPE(TRB_PORT_STATUS):
950 			dbc_handle_port_status(dbc, evt);
951 			break;
952 		case TRB_TYPE(TRB_TRANSFER):
953 			dbc_handle_xfer_event(dbc, evt);
954 			ret = EVT_XFER_DONE;
955 			break;
956 		default:
957 			break;
958 		}
959 
960 		inc_evt_deq(dbc->ring_evt);
961 
962 		evt = dbc->ring_evt->dequeue;
963 		update_erdp = true;
964 	}
965 
966 	/* Update event ring dequeue pointer: */
967 	if (update_erdp) {
968 		deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
969 					   dbc->ring_evt->dequeue);
970 		lo_hi_writeq(deq, &dbc->regs->erdp);
971 	}
972 
973 	return ret;
974 }
975 
xhci_dbc_handle_events(struct work_struct * work)976 static void xhci_dbc_handle_events(struct work_struct *work)
977 {
978 	enum evtreturn		evtr;
979 	struct xhci_dbc		*dbc;
980 	unsigned long		flags;
981 	unsigned int		poll_interval;
982 	unsigned long		busypoll_timelimit;
983 
984 	dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
985 	poll_interval = dbc->poll_interval;
986 
987 	spin_lock_irqsave(&dbc->lock, flags);
988 	evtr = xhci_dbc_do_handle_events(dbc);
989 	spin_unlock_irqrestore(&dbc->lock, flags);
990 
991 	switch (evtr) {
992 	case EVT_GSER:
993 		if (dbc->driver->configure)
994 			dbc->driver->configure(dbc);
995 		break;
996 	case EVT_DISC:
997 		if (dbc->driver->disconnect)
998 			dbc->driver->disconnect(dbc);
999 		break;
1000 	case EVT_DONE:
1001 		/*
1002 		 * Set fast poll rate if there are pending out transfers, or
1003 		 * a transfer was recently processed
1004 		 */
1005 		busypoll_timelimit = dbc->xfer_timestamp +
1006 			msecs_to_jiffies(DBC_XFER_INACTIVITY_TIMEOUT);
1007 
1008 		if (!list_empty(&dbc->eps[BULK_OUT].list_pending) ||
1009 		    time_is_after_jiffies(busypoll_timelimit))
1010 			poll_interval = 0;
1011 		break;
1012 	case EVT_XFER_DONE:
1013 		dbc->xfer_timestamp = jiffies;
1014 		poll_interval = 0;
1015 		break;
1016 	default:
1017 		dev_info(dbc->dev, "stop handling dbc events\n");
1018 		return;
1019 	}
1020 
1021 	mod_delayed_work(system_wq, &dbc->event_work,
1022 			 msecs_to_jiffies(poll_interval));
1023 }
1024 
1025 static const char * const dbc_state_strings[DS_MAX] = {
1026 	[DS_DISABLED] = "disabled",
1027 	[DS_INITIALIZED] = "initialized",
1028 	[DS_ENABLED] = "enabled",
1029 	[DS_CONNECTED] = "connected",
1030 	[DS_CONFIGURED] = "configured",
1031 };
1032 
dbc_show(struct device * dev,struct device_attribute * attr,char * buf)1033 static ssize_t dbc_show(struct device *dev,
1034 			struct device_attribute *attr,
1035 			char *buf)
1036 {
1037 	struct xhci_dbc		*dbc;
1038 	struct xhci_hcd		*xhci;
1039 
1040 	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1041 	dbc = xhci->dbc;
1042 
1043 	if (dbc->state >= ARRAY_SIZE(dbc_state_strings))
1044 		return sysfs_emit(buf, "unknown\n");
1045 
1046 	return sysfs_emit(buf, "%s\n", dbc_state_strings[dbc->state]);
1047 }
1048 
dbc_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1049 static ssize_t dbc_store(struct device *dev,
1050 			 struct device_attribute *attr,
1051 			 const char *buf, size_t count)
1052 {
1053 	struct xhci_hcd		*xhci;
1054 	struct xhci_dbc		*dbc;
1055 
1056 	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1057 	dbc = xhci->dbc;
1058 
1059 	if (sysfs_streq(buf, "enable"))
1060 		xhci_dbc_start(dbc);
1061 	else if (sysfs_streq(buf, "disable"))
1062 		xhci_dbc_stop(dbc);
1063 	else
1064 		return -EINVAL;
1065 
1066 	return count;
1067 }
1068 
dbc_idVendor_show(struct device * dev,struct device_attribute * attr,char * buf)1069 static ssize_t dbc_idVendor_show(struct device *dev,
1070 			    struct device_attribute *attr,
1071 			    char *buf)
1072 {
1073 	struct xhci_dbc		*dbc;
1074 	struct xhci_hcd		*xhci;
1075 
1076 	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1077 	dbc = xhci->dbc;
1078 
1079 	return sysfs_emit(buf, "%04x\n", dbc->idVendor);
1080 }
1081 
dbc_idVendor_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1082 static ssize_t dbc_idVendor_store(struct device *dev,
1083 			     struct device_attribute *attr,
1084 			     const char *buf, size_t size)
1085 {
1086 	struct xhci_dbc		*dbc;
1087 	struct xhci_hcd		*xhci;
1088 	void __iomem		*ptr;
1089 	u16			value;
1090 	u32			dev_info;
1091 	int ret;
1092 
1093 	ret = kstrtou16(buf, 0, &value);
1094 	if (ret)
1095 		return ret;
1096 
1097 	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1098 	dbc = xhci->dbc;
1099 	if (dbc->state != DS_DISABLED)
1100 		return -EBUSY;
1101 
1102 	dbc->idVendor = value;
1103 	ptr = &dbc->regs->devinfo1;
1104 	dev_info = readl(ptr);
1105 	dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16);
1106 	writel(dev_info, ptr);
1107 
1108 	return size;
1109 }
1110 
dbc_idProduct_show(struct device * dev,struct device_attribute * attr,char * buf)1111 static ssize_t dbc_idProduct_show(struct device *dev,
1112 			    struct device_attribute *attr,
1113 			    char *buf)
1114 {
1115 	struct xhci_dbc         *dbc;
1116 	struct xhci_hcd         *xhci;
1117 
1118 	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1119 	dbc = xhci->dbc;
1120 
1121 	return sysfs_emit(buf, "%04x\n", dbc->idProduct);
1122 }
1123 
dbc_idProduct_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1124 static ssize_t dbc_idProduct_store(struct device *dev,
1125 			     struct device_attribute *attr,
1126 			     const char *buf, size_t size)
1127 {
1128 	struct xhci_dbc         *dbc;
1129 	struct xhci_hcd         *xhci;
1130 	void __iomem		*ptr;
1131 	u32			dev_info;
1132 	u16			value;
1133 	int ret;
1134 
1135 	ret = kstrtou16(buf, 0, &value);
1136 	if (ret)
1137 		return ret;
1138 
1139 	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1140 	dbc = xhci->dbc;
1141 	if (dbc->state != DS_DISABLED)
1142 		return -EBUSY;
1143 
1144 	dbc->idProduct = value;
1145 	ptr = &dbc->regs->devinfo2;
1146 	dev_info = readl(ptr);
1147 	dev_info = (dev_info & ~(0xffffu)) | value;
1148 	writel(dev_info, ptr);
1149 	return size;
1150 }
1151 
dbc_bcdDevice_show(struct device * dev,struct device_attribute * attr,char * buf)1152 static ssize_t dbc_bcdDevice_show(struct device *dev,
1153 				   struct device_attribute *attr,
1154 				   char *buf)
1155 {
1156 	struct xhci_dbc	*dbc;
1157 	struct xhci_hcd	*xhci;
1158 
1159 	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1160 	dbc = xhci->dbc;
1161 
1162 	return sysfs_emit(buf, "%04x\n", dbc->bcdDevice);
1163 }
1164 
dbc_bcdDevice_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1165 static ssize_t dbc_bcdDevice_store(struct device *dev,
1166 				    struct device_attribute *attr,
1167 				    const char *buf, size_t size)
1168 {
1169 	struct xhci_dbc	*dbc;
1170 	struct xhci_hcd	*xhci;
1171 	void __iomem *ptr;
1172 	u32 dev_info;
1173 	u16 value;
1174 	int ret;
1175 
1176 	ret = kstrtou16(buf, 0, &value);
1177 	if (ret)
1178 		return ret;
1179 
1180 	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1181 	dbc = xhci->dbc;
1182 	if (dbc->state != DS_DISABLED)
1183 		return -EBUSY;
1184 
1185 	dbc->bcdDevice = value;
1186 	ptr = &dbc->regs->devinfo2;
1187 	dev_info = readl(ptr);
1188 	dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16);
1189 	writel(dev_info, ptr);
1190 
1191 	return size;
1192 }
1193 
dbc_bInterfaceProtocol_show(struct device * dev,struct device_attribute * attr,char * buf)1194 static ssize_t dbc_bInterfaceProtocol_show(struct device *dev,
1195 				 struct device_attribute *attr,
1196 				 char *buf)
1197 {
1198 	struct xhci_dbc	*dbc;
1199 	struct xhci_hcd	*xhci;
1200 
1201 	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1202 	dbc = xhci->dbc;
1203 
1204 	return sysfs_emit(buf, "%02x\n", dbc->bInterfaceProtocol);
1205 }
1206 
dbc_bInterfaceProtocol_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1207 static ssize_t dbc_bInterfaceProtocol_store(struct device *dev,
1208 				  struct device_attribute *attr,
1209 				  const char *buf, size_t size)
1210 {
1211 	struct xhci_dbc *dbc;
1212 	struct xhci_hcd *xhci;
1213 	void __iomem *ptr;
1214 	u32 dev_info;
1215 	u8 value;
1216 	int ret;
1217 
1218 	/* bInterfaceProtocol is 8 bit, but... */
1219 	ret = kstrtou8(buf, 0, &value);
1220 	if (ret)
1221 		return ret;
1222 
1223 	/* ...xhci only supports values 0 and 1 */
1224 	if (value > 1)
1225 		return -EINVAL;
1226 
1227 	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1228 	dbc = xhci->dbc;
1229 	if (dbc->state != DS_DISABLED)
1230 		return -EBUSY;
1231 
1232 	dbc->bInterfaceProtocol = value;
1233 	ptr = &dbc->regs->devinfo1;
1234 	dev_info = readl(ptr);
1235 	dev_info = (dev_info & ~(0xffu)) | value;
1236 	writel(dev_info, ptr);
1237 
1238 	return size;
1239 }
1240 
dbc_poll_interval_ms_show(struct device * dev,struct device_attribute * attr,char * buf)1241 static ssize_t dbc_poll_interval_ms_show(struct device *dev,
1242 					 struct device_attribute *attr,
1243 					 char *buf)
1244 {
1245 	struct xhci_dbc *dbc;
1246 	struct xhci_hcd *xhci;
1247 
1248 	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1249 	dbc = xhci->dbc;
1250 
1251 	return sysfs_emit(buf, "%u\n", dbc->poll_interval);
1252 }
1253 
dbc_poll_interval_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1254 static ssize_t dbc_poll_interval_ms_store(struct device *dev,
1255 					  struct device_attribute *attr,
1256 					  const char *buf, size_t size)
1257 {
1258 	struct xhci_dbc *dbc;
1259 	struct xhci_hcd *xhci;
1260 	u32 value;
1261 	int ret;
1262 
1263 	ret = kstrtou32(buf, 0, &value);
1264 	if (ret || value > DBC_POLL_INTERVAL_MAX)
1265 		return -EINVAL;
1266 
1267 	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1268 	dbc = xhci->dbc;
1269 
1270 	dbc->poll_interval = value;
1271 
1272 	mod_delayed_work(system_wq, &dbc->event_work, 0);
1273 
1274 	return size;
1275 }
1276 
1277 static DEVICE_ATTR_RW(dbc);
1278 static DEVICE_ATTR_RW(dbc_idVendor);
1279 static DEVICE_ATTR_RW(dbc_idProduct);
1280 static DEVICE_ATTR_RW(dbc_bcdDevice);
1281 static DEVICE_ATTR_RW(dbc_bInterfaceProtocol);
1282 static DEVICE_ATTR_RW(dbc_poll_interval_ms);
1283 
1284 static struct attribute *dbc_dev_attrs[] = {
1285 	&dev_attr_dbc.attr,
1286 	&dev_attr_dbc_idVendor.attr,
1287 	&dev_attr_dbc_idProduct.attr,
1288 	&dev_attr_dbc_bcdDevice.attr,
1289 	&dev_attr_dbc_bInterfaceProtocol.attr,
1290 	&dev_attr_dbc_poll_interval_ms.attr,
1291 	NULL
1292 };
1293 ATTRIBUTE_GROUPS(dbc_dev);
1294 
1295 struct xhci_dbc *
xhci_alloc_dbc(struct device * dev,void __iomem * base,const struct dbc_driver * driver)1296 xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *driver)
1297 {
1298 	struct xhci_dbc		*dbc;
1299 	int			ret;
1300 
1301 	dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
1302 	if (!dbc)
1303 		return NULL;
1304 
1305 	dbc->regs = base;
1306 	dbc->dev = dev;
1307 	dbc->driver = driver;
1308 	dbc->idProduct = DBC_PRODUCT_ID;
1309 	dbc->idVendor = DBC_VENDOR_ID;
1310 	dbc->bcdDevice = DBC_DEVICE_REV;
1311 	dbc->bInterfaceProtocol = DBC_PROTOCOL;
1312 	dbc->poll_interval = DBC_POLL_INTERVAL_DEFAULT;
1313 
1314 	if (readl(&dbc->regs->control) & DBC_CTRL_DBC_ENABLE)
1315 		goto err;
1316 
1317 	INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
1318 	spin_lock_init(&dbc->lock);
1319 
1320 	ret = sysfs_create_groups(&dev->kobj, dbc_dev_groups);
1321 	if (ret)
1322 		goto err;
1323 
1324 	return dbc;
1325 err:
1326 	kfree(dbc);
1327 	return NULL;
1328 }
1329 
1330 /* undo what xhci_alloc_dbc() did */
xhci_dbc_remove(struct xhci_dbc * dbc)1331 void xhci_dbc_remove(struct xhci_dbc *dbc)
1332 {
1333 	if (!dbc)
1334 		return;
1335 	/* stop hw, stop wq and call dbc->ops->stop() */
1336 	xhci_dbc_stop(dbc);
1337 
1338 	/* remove sysfs files */
1339 	sysfs_remove_groups(&dbc->dev->kobj, dbc_dev_groups);
1340 
1341 	kfree(dbc);
1342 }
1343 
1344 
xhci_create_dbc_dev(struct xhci_hcd * xhci)1345 int xhci_create_dbc_dev(struct xhci_hcd *xhci)
1346 {
1347 	struct device		*dev;
1348 	void __iomem		*base;
1349 	int			ret;
1350 	int			dbc_cap_offs;
1351 
1352 	/* create all parameters needed resembling a dbc device */
1353 	dev = xhci_to_hcd(xhci)->self.controller;
1354 	base = &xhci->cap_regs->hc_capbase;
1355 
1356 	dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
1357 	if (!dbc_cap_offs)
1358 		return -ENODEV;
1359 
1360 	/* already allocated and in use */
1361 	if (xhci->dbc)
1362 		return -EBUSY;
1363 
1364 	ret = xhci_dbc_tty_probe(dev, base + dbc_cap_offs, xhci);
1365 
1366 	return ret;
1367 }
1368 
xhci_remove_dbc_dev(struct xhci_hcd * xhci)1369 void xhci_remove_dbc_dev(struct xhci_hcd *xhci)
1370 {
1371 	unsigned long		flags;
1372 
1373 	if (!xhci->dbc)
1374 		return;
1375 
1376 	xhci_dbc_tty_remove(xhci->dbc);
1377 	spin_lock_irqsave(&xhci->lock, flags);
1378 	xhci->dbc = NULL;
1379 	spin_unlock_irqrestore(&xhci->lock, flags);
1380 }
1381 
1382 #ifdef CONFIG_PM
xhci_dbc_suspend(struct xhci_hcd * xhci)1383 int xhci_dbc_suspend(struct xhci_hcd *xhci)
1384 {
1385 	struct xhci_dbc		*dbc = xhci->dbc;
1386 
1387 	if (!dbc)
1388 		return 0;
1389 
1390 	if (dbc->state == DS_CONFIGURED)
1391 		dbc->resume_required = 1;
1392 
1393 	xhci_dbc_stop(dbc);
1394 
1395 	return 0;
1396 }
1397 
xhci_dbc_resume(struct xhci_hcd * xhci)1398 int xhci_dbc_resume(struct xhci_hcd *xhci)
1399 {
1400 	int			ret = 0;
1401 	struct xhci_dbc		*dbc = xhci->dbc;
1402 
1403 	if (!dbc)
1404 		return 0;
1405 
1406 	if (dbc->resume_required) {
1407 		dbc->resume_required = 0;
1408 		xhci_dbc_start(dbc);
1409 	}
1410 
1411 	return ret;
1412 }
1413 #endif /* CONFIG_PM */
1414 
xhci_dbc_init(void)1415 int xhci_dbc_init(void)
1416 {
1417 	return dbc_tty_init();
1418 }
1419 
xhci_dbc_exit(void)1420 void xhci_dbc_exit(void)
1421 {
1422 	dbc_tty_exit();
1423 }
1424