• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Greybus connections
4  *
5  * Copyright 2014 Google Inc.
6  * Copyright 2014 Linaro Ltd.
7  */
8 
9 #include <linux/workqueue.h>
10 #include <linux/greybus.h>
11 
12 #include "greybus_trace.h"
13 
14 #define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT	1000
15 
16 static void gb_connection_kref_release(struct kref *kref);
17 
18 static DEFINE_SPINLOCK(gb_connections_lock);
19 static DEFINE_MUTEX(gb_connection_mutex);
20 
21 /* Caller holds gb_connection_mutex. */
gb_connection_cport_in_use(struct gb_interface * intf,u16 cport_id)22 static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
23 {
24 	struct gb_host_device *hd = intf->hd;
25 	struct gb_connection *connection;
26 
27 	list_for_each_entry(connection, &hd->connections, hd_links) {
28 		if (connection->intf == intf &&
29 		    connection->intf_cport_id == cport_id)
30 			return true;
31 	}
32 
33 	return false;
34 }
35 
gb_connection_get(struct gb_connection * connection)36 static void gb_connection_get(struct gb_connection *connection)
37 {
38 	kref_get(&connection->kref);
39 
40 	trace_gb_connection_get(connection);
41 }
42 
gb_connection_put(struct gb_connection * connection)43 static void gb_connection_put(struct gb_connection *connection)
44 {
45 	trace_gb_connection_put(connection);
46 
47 	kref_put(&connection->kref, gb_connection_kref_release);
48 }
49 
50 /*
51  * Returns a reference-counted pointer to the connection if found.
52  */
53 static struct gb_connection *
gb_connection_hd_find(struct gb_host_device * hd,u16 cport_id)54 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
55 {
56 	struct gb_connection *connection;
57 	unsigned long flags;
58 
59 	spin_lock_irqsave(&gb_connections_lock, flags);
60 	list_for_each_entry(connection, &hd->connections, hd_links)
61 		if (connection->hd_cport_id == cport_id) {
62 			gb_connection_get(connection);
63 			goto found;
64 		}
65 	connection = NULL;
66 found:
67 	spin_unlock_irqrestore(&gb_connections_lock, flags);
68 
69 	return connection;
70 }
71 
72 /*
73  * Callback from the host driver to let us know that data has been
74  * received on the bundle.
75  */
greybus_data_rcvd(struct gb_host_device * hd,u16 cport_id,u8 * data,size_t length)76 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
77 		       u8 *data, size_t length)
78 {
79 	struct gb_connection *connection;
80 
81 	trace_gb_hd_in(hd);
82 
83 	connection = gb_connection_hd_find(hd, cport_id);
84 	if (!connection) {
85 		dev_err(&hd->dev,
86 			"nonexistent connection (%zu bytes dropped)\n", length);
87 		return;
88 	}
89 	gb_connection_recv(connection, data, length);
90 	gb_connection_put(connection);
91 }
92 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
93 
gb_connection_kref_release(struct kref * kref)94 static void gb_connection_kref_release(struct kref *kref)
95 {
96 	struct gb_connection *connection;
97 
98 	connection = container_of(kref, struct gb_connection, kref);
99 
100 	trace_gb_connection_release(connection);
101 
102 	kfree(connection);
103 }
104 
gb_connection_init_name(struct gb_connection * connection)105 static void gb_connection_init_name(struct gb_connection *connection)
106 {
107 	u16 hd_cport_id = connection->hd_cport_id;
108 	u16 cport_id = 0;
109 	u8 intf_id = 0;
110 
111 	if (connection->intf) {
112 		intf_id = connection->intf->interface_id;
113 		cport_id = connection->intf_cport_id;
114 	}
115 
116 	snprintf(connection->name, sizeof(connection->name),
117 		 "%u/%u:%u", hd_cport_id, intf_id, cport_id);
118 }
119 
120 /*
121  * _gb_connection_create() - create a Greybus connection
122  * @hd:			host device of the connection
123  * @hd_cport_id:	host-device cport id, or -1 for dynamic allocation
124  * @intf:		remote interface, or NULL for static connections
125  * @bundle:		remote-interface bundle (may be NULL)
126  * @cport_id:		remote-interface cport id, or 0 for static connections
127  * @handler:		request handler (may be NULL)
128  * @flags:		connection flags
129  *
130  * Create a Greybus connection, representing the bidirectional link
131  * between a CPort on a (local) Greybus host device and a CPort on
132  * another Greybus interface.
133  *
134  * A connection also maintains the state of operations sent over the
135  * connection.
136  *
137  * Serialised against concurrent create and destroy using the
138  * gb_connection_mutex.
139  *
140  * Return: A pointer to the new connection if successful, or an ERR_PTR
141  * otherwise.
142  */
143 static struct gb_connection *
_gb_connection_create(struct gb_host_device * hd,int hd_cport_id,struct gb_interface * intf,struct gb_bundle * bundle,int cport_id,gb_request_handler_t handler,unsigned long flags)144 _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
145 		      struct gb_interface *intf,
146 		      struct gb_bundle *bundle, int cport_id,
147 		      gb_request_handler_t handler,
148 		      unsigned long flags)
149 {
150 	struct gb_connection *connection;
151 	int ret;
152 
153 	mutex_lock(&gb_connection_mutex);
154 
155 	if (intf && gb_connection_cport_in_use(intf, cport_id)) {
156 		dev_err(&intf->dev, "cport %u already in use\n", cport_id);
157 		ret = -EBUSY;
158 		goto err_unlock;
159 	}
160 
161 	ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
162 	if (ret < 0) {
163 		dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
164 		goto err_unlock;
165 	}
166 	hd_cport_id = ret;
167 
168 	connection = kzalloc(sizeof(*connection), GFP_KERNEL);
169 	if (!connection) {
170 		ret = -ENOMEM;
171 		goto err_hd_cport_release;
172 	}
173 
174 	connection->hd_cport_id = hd_cport_id;
175 	connection->intf_cport_id = cport_id;
176 	connection->hd = hd;
177 	connection->intf = intf;
178 	connection->bundle = bundle;
179 	connection->handler = handler;
180 	connection->flags = flags;
181 	if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
182 		connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
183 	connection->state = GB_CONNECTION_STATE_DISABLED;
184 
185 	atomic_set(&connection->op_cycle, 0);
186 	mutex_init(&connection->mutex);
187 	spin_lock_init(&connection->lock);
188 	INIT_LIST_HEAD(&connection->operations);
189 
190 	connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
191 					 dev_name(&hd->dev), hd_cport_id);
192 	if (!connection->wq) {
193 		ret = -ENOMEM;
194 		goto err_free_connection;
195 	}
196 
197 	kref_init(&connection->kref);
198 
199 	gb_connection_init_name(connection);
200 
201 	spin_lock_irq(&gb_connections_lock);
202 	list_add(&connection->hd_links, &hd->connections);
203 
204 	if (bundle)
205 		list_add(&connection->bundle_links, &bundle->connections);
206 	else
207 		INIT_LIST_HEAD(&connection->bundle_links);
208 
209 	spin_unlock_irq(&gb_connections_lock);
210 
211 	mutex_unlock(&gb_connection_mutex);
212 
213 	trace_gb_connection_create(connection);
214 
215 	return connection;
216 
217 err_free_connection:
218 	kfree(connection);
219 err_hd_cport_release:
220 	gb_hd_cport_release(hd, hd_cport_id);
221 err_unlock:
222 	mutex_unlock(&gb_connection_mutex);
223 
224 	return ERR_PTR(ret);
225 }
226 
227 struct gb_connection *
gb_connection_create_static(struct gb_host_device * hd,u16 hd_cport_id,gb_request_handler_t handler)228 gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
229 			    gb_request_handler_t handler)
230 {
231 	return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
232 				     GB_CONNECTION_FLAG_HIGH_PRIO);
233 }
234 
235 struct gb_connection *
gb_connection_create_control(struct gb_interface * intf)236 gb_connection_create_control(struct gb_interface *intf)
237 {
238 	return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
239 				     GB_CONNECTION_FLAG_CONTROL |
240 				     GB_CONNECTION_FLAG_HIGH_PRIO);
241 }
242 
243 struct gb_connection *
gb_connection_create(struct gb_bundle * bundle,u16 cport_id,gb_request_handler_t handler)244 gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
245 		     gb_request_handler_t handler)
246 {
247 	struct gb_interface *intf = bundle->intf;
248 
249 	return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
250 				     handler, 0);
251 }
252 EXPORT_SYMBOL_GPL(gb_connection_create);
253 
254 struct gb_connection *
gb_connection_create_flags(struct gb_bundle * bundle,u16 cport_id,gb_request_handler_t handler,unsigned long flags)255 gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
256 			   gb_request_handler_t handler,
257 			   unsigned long flags)
258 {
259 	struct gb_interface *intf = bundle->intf;
260 
261 	if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
262 		flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
263 
264 	return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
265 				     handler, flags);
266 }
267 EXPORT_SYMBOL_GPL(gb_connection_create_flags);
268 
269 struct gb_connection *
gb_connection_create_offloaded(struct gb_bundle * bundle,u16 cport_id,unsigned long flags)270 gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
271 			       unsigned long flags)
272 {
273 	flags |= GB_CONNECTION_FLAG_OFFLOADED;
274 
275 	return gb_connection_create_flags(bundle, cport_id, NULL, flags);
276 }
277 EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
278 
gb_connection_hd_cport_enable(struct gb_connection * connection)279 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
280 {
281 	struct gb_host_device *hd = connection->hd;
282 	int ret;
283 
284 	if (!hd->driver->cport_enable)
285 		return 0;
286 
287 	ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
288 				       connection->flags);
289 	if (ret) {
290 		dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
291 			connection->name, ret);
292 		return ret;
293 	}
294 
295 	return 0;
296 }
297 
gb_connection_hd_cport_disable(struct gb_connection * connection)298 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
299 {
300 	struct gb_host_device *hd = connection->hd;
301 	int ret;
302 
303 	if (!hd->driver->cport_disable)
304 		return;
305 
306 	ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
307 	if (ret) {
308 		dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
309 			connection->name, ret);
310 	}
311 }
312 
gb_connection_hd_cport_connected(struct gb_connection * connection)313 static int gb_connection_hd_cport_connected(struct gb_connection *connection)
314 {
315 	struct gb_host_device *hd = connection->hd;
316 	int ret;
317 
318 	if (!hd->driver->cport_connected)
319 		return 0;
320 
321 	ret = hd->driver->cport_connected(hd, connection->hd_cport_id);
322 	if (ret) {
323 		dev_err(&hd->dev, "%s: failed to set connected state: %d\n",
324 			connection->name, ret);
325 		return ret;
326 	}
327 
328 	return 0;
329 }
330 
gb_connection_hd_cport_flush(struct gb_connection * connection)331 static int gb_connection_hd_cport_flush(struct gb_connection *connection)
332 {
333 	struct gb_host_device *hd = connection->hd;
334 	int ret;
335 
336 	if (!hd->driver->cport_flush)
337 		return 0;
338 
339 	ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
340 	if (ret) {
341 		dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
342 			connection->name, ret);
343 		return ret;
344 	}
345 
346 	return 0;
347 }
348 
gb_connection_hd_cport_quiesce(struct gb_connection * connection)349 static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
350 {
351 	struct gb_host_device *hd = connection->hd;
352 	size_t peer_space;
353 	int ret;
354 
355 	if (!hd->driver->cport_quiesce)
356 		return 0;
357 
358 	peer_space = sizeof(struct gb_operation_msg_hdr) +
359 			sizeof(struct gb_cport_shutdown_request);
360 
361 	if (connection->mode_switch)
362 		peer_space += sizeof(struct gb_operation_msg_hdr);
363 
364 	if (!hd->driver->cport_quiesce)
365 		return 0;
366 
367 	ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
368 					peer_space,
369 					GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
370 	if (ret) {
371 		dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n",
372 			connection->name, ret);
373 		return ret;
374 	}
375 
376 	return 0;
377 }
378 
gb_connection_hd_cport_clear(struct gb_connection * connection)379 static int gb_connection_hd_cport_clear(struct gb_connection *connection)
380 {
381 	struct gb_host_device *hd = connection->hd;
382 	int ret;
383 
384 	if (!hd->driver->cport_clear)
385 		return 0;
386 
387 	ret = hd->driver->cport_clear(hd, connection->hd_cport_id);
388 	if (ret) {
389 		dev_err(&hd->dev, "%s: failed to clear host cport: %d\n",
390 			connection->name, ret);
391 		return ret;
392 	}
393 
394 	return 0;
395 }
396 
397 /*
398  * Request the SVC to create a connection from AP's cport to interface's
399  * cport.
400  */
401 static int
gb_connection_svc_connection_create(struct gb_connection * connection)402 gb_connection_svc_connection_create(struct gb_connection *connection)
403 {
404 	struct gb_host_device *hd = connection->hd;
405 	struct gb_interface *intf;
406 	u8 cport_flags;
407 	int ret;
408 
409 	if (gb_connection_is_static(connection))
410 		return 0;
411 
412 	intf = connection->intf;
413 
414 	/*
415 	 * Enable either E2EFC or CSD, unless no flow control is requested.
416 	 */
417 	cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
418 	if (gb_connection_flow_control_disabled(connection)) {
419 		cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
420 	} else if (gb_connection_e2efc_enabled(connection)) {
421 		cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
422 				GB_SVC_CPORT_FLAG_E2EFC;
423 	}
424 
425 	ret = gb_svc_connection_create(hd->svc,
426 				       hd->svc->ap_intf_id,
427 				       connection->hd_cport_id,
428 				       intf->interface_id,
429 				       connection->intf_cport_id,
430 				       cport_flags);
431 	if (ret) {
432 		dev_err(&connection->hd->dev,
433 			"%s: failed to create svc connection: %d\n",
434 			connection->name, ret);
435 		return ret;
436 	}
437 
438 	return 0;
439 }
440 
441 static void
gb_connection_svc_connection_destroy(struct gb_connection * connection)442 gb_connection_svc_connection_destroy(struct gb_connection *connection)
443 {
444 	if (gb_connection_is_static(connection))
445 		return;
446 
447 	gb_svc_connection_destroy(connection->hd->svc,
448 				  connection->hd->svc->ap_intf_id,
449 				  connection->hd_cport_id,
450 				  connection->intf->interface_id,
451 				  connection->intf_cport_id);
452 }
453 
454 /* Inform Interface about active CPorts */
gb_connection_control_connected(struct gb_connection * connection)455 static int gb_connection_control_connected(struct gb_connection *connection)
456 {
457 	struct gb_control *control;
458 	u16 cport_id = connection->intf_cport_id;
459 	int ret;
460 
461 	if (gb_connection_is_static(connection))
462 		return 0;
463 
464 	if (gb_connection_is_control(connection))
465 		return 0;
466 
467 	control = connection->intf->control;
468 
469 	ret = gb_control_connected_operation(control, cport_id);
470 	if (ret) {
471 		dev_err(&connection->bundle->dev,
472 			"failed to connect cport: %d\n", ret);
473 		return ret;
474 	}
475 
476 	return 0;
477 }
478 
479 static void
gb_connection_control_disconnecting(struct gb_connection * connection)480 gb_connection_control_disconnecting(struct gb_connection *connection)
481 {
482 	struct gb_control *control;
483 	u16 cport_id = connection->intf_cport_id;
484 	int ret;
485 
486 	if (gb_connection_is_static(connection))
487 		return;
488 
489 	control = connection->intf->control;
490 
491 	ret = gb_control_disconnecting_operation(control, cport_id);
492 	if (ret) {
493 		dev_err(&connection->hd->dev,
494 			"%s: failed to send disconnecting: %d\n",
495 			connection->name, ret);
496 	}
497 }
498 
499 static void
gb_connection_control_disconnected(struct gb_connection * connection)500 gb_connection_control_disconnected(struct gb_connection *connection)
501 {
502 	struct gb_control *control;
503 	u16 cport_id = connection->intf_cport_id;
504 	int ret;
505 
506 	if (gb_connection_is_static(connection))
507 		return;
508 
509 	control = connection->intf->control;
510 
511 	if (gb_connection_is_control(connection)) {
512 		if (connection->mode_switch) {
513 			ret = gb_control_mode_switch_operation(control);
514 			if (ret) {
515 				/*
516 				 * Allow mode switch to time out waiting for
517 				 * mailbox event.
518 				 */
519 				return;
520 			}
521 		}
522 
523 		return;
524 	}
525 
526 	ret = gb_control_disconnected_operation(control, cport_id);
527 	if (ret) {
528 		dev_warn(&connection->bundle->dev,
529 			 "failed to disconnect cport: %d\n", ret);
530 	}
531 }
532 
gb_connection_shutdown_operation(struct gb_connection * connection,u8 phase)533 static int gb_connection_shutdown_operation(struct gb_connection *connection,
534 					    u8 phase)
535 {
536 	struct gb_cport_shutdown_request *req;
537 	struct gb_operation *operation;
538 	int ret;
539 
540 	operation = gb_operation_create_core(connection,
541 					     GB_REQUEST_TYPE_CPORT_SHUTDOWN,
542 					     sizeof(*req), 0, 0,
543 					     GFP_KERNEL);
544 	if (!operation)
545 		return -ENOMEM;
546 
547 	req = operation->request->payload;
548 	req->phase = phase;
549 
550 	ret = gb_operation_request_send_sync(operation);
551 
552 	gb_operation_put(operation);
553 
554 	return ret;
555 }
556 
gb_connection_cport_shutdown(struct gb_connection * connection,u8 phase)557 static int gb_connection_cport_shutdown(struct gb_connection *connection,
558 					u8 phase)
559 {
560 	struct gb_host_device *hd = connection->hd;
561 	const struct gb_hd_driver *drv = hd->driver;
562 	int ret;
563 
564 	if (gb_connection_is_static(connection))
565 		return 0;
566 
567 	if (gb_connection_is_offloaded(connection)) {
568 		if (!drv->cport_shutdown)
569 			return 0;
570 
571 		ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase,
572 					  GB_OPERATION_TIMEOUT_DEFAULT);
573 	} else {
574 		ret = gb_connection_shutdown_operation(connection, phase);
575 	}
576 
577 	if (ret) {
578 		dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n",
579 			connection->name, phase, ret);
580 		return ret;
581 	}
582 
583 	return 0;
584 }
585 
586 static int
gb_connection_cport_shutdown_phase_1(struct gb_connection * connection)587 gb_connection_cport_shutdown_phase_1(struct gb_connection *connection)
588 {
589 	return gb_connection_cport_shutdown(connection, 1);
590 }
591 
592 static int
gb_connection_cport_shutdown_phase_2(struct gb_connection * connection)593 gb_connection_cport_shutdown_phase_2(struct gb_connection *connection)
594 {
595 	return gb_connection_cport_shutdown(connection, 2);
596 }
597 
598 /*
599  * Cancel all active operations on a connection.
600  *
601  * Locking: Called with connection lock held and state set to DISABLED or
602  * DISCONNECTING.
603  */
gb_connection_cancel_operations(struct gb_connection * connection,int errno)604 static void gb_connection_cancel_operations(struct gb_connection *connection,
605 					    int errno)
606 	__must_hold(&connection->lock)
607 {
608 	struct gb_operation *operation;
609 
610 	while (!list_empty(&connection->operations)) {
611 		operation = list_last_entry(&connection->operations,
612 					    struct gb_operation, links);
613 		gb_operation_get(operation);
614 		spin_unlock_irq(&connection->lock);
615 
616 		if (gb_operation_is_incoming(operation))
617 			gb_operation_cancel_incoming(operation, errno);
618 		else
619 			gb_operation_cancel(operation, errno);
620 
621 		gb_operation_put(operation);
622 
623 		spin_lock_irq(&connection->lock);
624 	}
625 }
626 
627 /*
628  * Cancel all active incoming operations on a connection.
629  *
630  * Locking: Called with connection lock held and state set to ENABLED_TX.
631  */
632 static void
gb_connection_flush_incoming_operations(struct gb_connection * connection,int errno)633 gb_connection_flush_incoming_operations(struct gb_connection *connection,
634 					int errno)
635 	__must_hold(&connection->lock)
636 {
637 	struct gb_operation *operation;
638 	bool incoming;
639 
640 	while (!list_empty(&connection->operations)) {
641 		incoming = false;
642 		list_for_each_entry(operation, &connection->operations,
643 				    links) {
644 			if (gb_operation_is_incoming(operation)) {
645 				gb_operation_get(operation);
646 				incoming = true;
647 				break;
648 			}
649 		}
650 
651 		if (!incoming)
652 			break;
653 
654 		spin_unlock_irq(&connection->lock);
655 
656 		/* FIXME: flush, not cancel? */
657 		gb_operation_cancel_incoming(operation, errno);
658 		gb_operation_put(operation);
659 
660 		spin_lock_irq(&connection->lock);
661 	}
662 }
663 
664 /*
665  * _gb_connection_enable() - enable a connection
666  * @connection:		connection to enable
667  * @rx:			whether to enable incoming requests
668  *
669  * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
670  * ENABLED_TX->ENABLED state transitions.
671  *
672  * Locking: Caller holds connection->mutex.
673  */
_gb_connection_enable(struct gb_connection * connection,bool rx)674 static int _gb_connection_enable(struct gb_connection *connection, bool rx)
675 {
676 	int ret;
677 
678 	/* Handle ENABLED_TX -> ENABLED transitions. */
679 	if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
680 		if (!(connection->handler && rx))
681 			return 0;
682 
683 		spin_lock_irq(&connection->lock);
684 		connection->state = GB_CONNECTION_STATE_ENABLED;
685 		spin_unlock_irq(&connection->lock);
686 
687 		return 0;
688 	}
689 
690 	ret = gb_connection_hd_cport_enable(connection);
691 	if (ret)
692 		return ret;
693 
694 	ret = gb_connection_svc_connection_create(connection);
695 	if (ret)
696 		goto err_hd_cport_clear;
697 
698 	ret = gb_connection_hd_cport_connected(connection);
699 	if (ret)
700 		goto err_svc_connection_destroy;
701 
702 	spin_lock_irq(&connection->lock);
703 	if (connection->handler && rx)
704 		connection->state = GB_CONNECTION_STATE_ENABLED;
705 	else
706 		connection->state = GB_CONNECTION_STATE_ENABLED_TX;
707 	spin_unlock_irq(&connection->lock);
708 
709 	ret = gb_connection_control_connected(connection);
710 	if (ret)
711 		goto err_control_disconnecting;
712 
713 	return 0;
714 
715 err_control_disconnecting:
716 	spin_lock_irq(&connection->lock);
717 	connection->state = GB_CONNECTION_STATE_DISCONNECTING;
718 	gb_connection_cancel_operations(connection, -ESHUTDOWN);
719 	spin_unlock_irq(&connection->lock);
720 
721 	/* Transmit queue should already be empty. */
722 	gb_connection_hd_cport_flush(connection);
723 
724 	gb_connection_control_disconnecting(connection);
725 	gb_connection_cport_shutdown_phase_1(connection);
726 	gb_connection_hd_cport_quiesce(connection);
727 	gb_connection_cport_shutdown_phase_2(connection);
728 	gb_connection_control_disconnected(connection);
729 	connection->state = GB_CONNECTION_STATE_DISABLED;
730 err_svc_connection_destroy:
731 	gb_connection_svc_connection_destroy(connection);
732 err_hd_cport_clear:
733 	gb_connection_hd_cport_clear(connection);
734 
735 	gb_connection_hd_cport_disable(connection);
736 
737 	return ret;
738 }
739 
gb_connection_enable(struct gb_connection * connection)740 int gb_connection_enable(struct gb_connection *connection)
741 {
742 	int ret = 0;
743 
744 	mutex_lock(&connection->mutex);
745 
746 	if (connection->state == GB_CONNECTION_STATE_ENABLED)
747 		goto out_unlock;
748 
749 	ret = _gb_connection_enable(connection, true);
750 	if (!ret)
751 		trace_gb_connection_enable(connection);
752 
753 out_unlock:
754 	mutex_unlock(&connection->mutex);
755 
756 	return ret;
757 }
758 EXPORT_SYMBOL_GPL(gb_connection_enable);
759 
gb_connection_enable_tx(struct gb_connection * connection)760 int gb_connection_enable_tx(struct gb_connection *connection)
761 {
762 	int ret = 0;
763 
764 	mutex_lock(&connection->mutex);
765 
766 	if (connection->state == GB_CONNECTION_STATE_ENABLED) {
767 		ret = -EINVAL;
768 		goto out_unlock;
769 	}
770 
771 	if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
772 		goto out_unlock;
773 
774 	ret = _gb_connection_enable(connection, false);
775 	if (!ret)
776 		trace_gb_connection_enable(connection);
777 
778 out_unlock:
779 	mutex_unlock(&connection->mutex);
780 
781 	return ret;
782 }
783 EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
784 
gb_connection_disable_rx(struct gb_connection * connection)785 void gb_connection_disable_rx(struct gb_connection *connection)
786 {
787 	mutex_lock(&connection->mutex);
788 
789 	spin_lock_irq(&connection->lock);
790 	if (connection->state != GB_CONNECTION_STATE_ENABLED) {
791 		spin_unlock_irq(&connection->lock);
792 		goto out_unlock;
793 	}
794 	connection->state = GB_CONNECTION_STATE_ENABLED_TX;
795 	gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
796 	spin_unlock_irq(&connection->lock);
797 
798 	trace_gb_connection_disable(connection);
799 
800 out_unlock:
801 	mutex_unlock(&connection->mutex);
802 }
803 EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
804 
gb_connection_mode_switch_prepare(struct gb_connection * connection)805 void gb_connection_mode_switch_prepare(struct gb_connection *connection)
806 {
807 	connection->mode_switch = true;
808 }
809 
gb_connection_mode_switch_complete(struct gb_connection * connection)810 void gb_connection_mode_switch_complete(struct gb_connection *connection)
811 {
812 	gb_connection_svc_connection_destroy(connection);
813 	gb_connection_hd_cport_clear(connection);
814 
815 	gb_connection_hd_cport_disable(connection);
816 
817 	connection->mode_switch = false;
818 }
819 
gb_connection_disable(struct gb_connection * connection)820 void gb_connection_disable(struct gb_connection *connection)
821 {
822 	mutex_lock(&connection->mutex);
823 
824 	if (connection->state == GB_CONNECTION_STATE_DISABLED)
825 		goto out_unlock;
826 
827 	trace_gb_connection_disable(connection);
828 
829 	spin_lock_irq(&connection->lock);
830 	connection->state = GB_CONNECTION_STATE_DISCONNECTING;
831 	gb_connection_cancel_operations(connection, -ESHUTDOWN);
832 	spin_unlock_irq(&connection->lock);
833 
834 	gb_connection_hd_cport_flush(connection);
835 
836 	gb_connection_control_disconnecting(connection);
837 	gb_connection_cport_shutdown_phase_1(connection);
838 	gb_connection_hd_cport_quiesce(connection);
839 	gb_connection_cport_shutdown_phase_2(connection);
840 	gb_connection_control_disconnected(connection);
841 
842 	connection->state = GB_CONNECTION_STATE_DISABLED;
843 
844 	/* control-connection tear down is deferred when mode switching */
845 	if (!connection->mode_switch) {
846 		gb_connection_svc_connection_destroy(connection);
847 		gb_connection_hd_cport_clear(connection);
848 
849 		gb_connection_hd_cport_disable(connection);
850 	}
851 
852 out_unlock:
853 	mutex_unlock(&connection->mutex);
854 }
855 EXPORT_SYMBOL_GPL(gb_connection_disable);
856 
857 /* Disable a connection without communicating with the remote end. */
gb_connection_disable_forced(struct gb_connection * connection)858 void gb_connection_disable_forced(struct gb_connection *connection)
859 {
860 	mutex_lock(&connection->mutex);
861 
862 	if (connection->state == GB_CONNECTION_STATE_DISABLED)
863 		goto out_unlock;
864 
865 	trace_gb_connection_disable(connection);
866 
867 	spin_lock_irq(&connection->lock);
868 	connection->state = GB_CONNECTION_STATE_DISABLED;
869 	gb_connection_cancel_operations(connection, -ESHUTDOWN);
870 	spin_unlock_irq(&connection->lock);
871 
872 	gb_connection_hd_cport_flush(connection);
873 
874 	gb_connection_svc_connection_destroy(connection);
875 	gb_connection_hd_cport_clear(connection);
876 
877 	gb_connection_hd_cport_disable(connection);
878 out_unlock:
879 	mutex_unlock(&connection->mutex);
880 }
881 EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
882 
883 /* Caller must have disabled the connection before destroying it. */
gb_connection_destroy(struct gb_connection * connection)884 void gb_connection_destroy(struct gb_connection *connection)
885 {
886 	if (!connection)
887 		return;
888 
889 	if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
890 		gb_connection_disable(connection);
891 
892 	mutex_lock(&gb_connection_mutex);
893 
894 	spin_lock_irq(&gb_connections_lock);
895 	list_del(&connection->bundle_links);
896 	list_del(&connection->hd_links);
897 	spin_unlock_irq(&gb_connections_lock);
898 
899 	destroy_workqueue(connection->wq);
900 
901 	gb_hd_cport_release(connection->hd, connection->hd_cport_id);
902 	connection->hd_cport_id = CPORT_ID_BAD;
903 
904 	mutex_unlock(&gb_connection_mutex);
905 
906 	gb_connection_put(connection);
907 }
908 EXPORT_SYMBOL_GPL(gb_connection_destroy);
909 
gb_connection_latency_tag_enable(struct gb_connection * connection)910 void gb_connection_latency_tag_enable(struct gb_connection *connection)
911 {
912 	struct gb_host_device *hd = connection->hd;
913 	int ret;
914 
915 	if (!hd->driver->latency_tag_enable)
916 		return;
917 
918 	ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
919 	if (ret) {
920 		dev_err(&connection->hd->dev,
921 			"%s: failed to enable latency tag: %d\n",
922 			connection->name, ret);
923 	}
924 }
925 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
926 
gb_connection_latency_tag_disable(struct gb_connection * connection)927 void gb_connection_latency_tag_disable(struct gb_connection *connection)
928 {
929 	struct gb_host_device *hd = connection->hd;
930 	int ret;
931 
932 	if (!hd->driver->latency_tag_disable)
933 		return;
934 
935 	ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
936 	if (ret) {
937 		dev_err(&connection->hd->dev,
938 			"%s: failed to disable latency tag: %d\n",
939 			connection->name, ret);
940 	}
941 }
942 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);
943