• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Greybus operations
3  *
4  * Copyright 2014-2015 Google Inc.
5  * Copyright 2014-2015 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/wait.h>
15 #include <linux/workqueue.h>
16 
17 #include "greybus.h"
18 #include "greybus_trace.h"
19 
20 static struct kmem_cache *gb_operation_cache;
21 static struct kmem_cache *gb_message_cache;
22 
23 /* Workqueue to handle Greybus operation completions. */
24 static struct workqueue_struct *gb_operation_completion_wq;
25 
26 /* Wait queue for synchronous cancellations. */
27 static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
28 
29 /*
30  * Protects updates to operation->errno.
31  */
32 static DEFINE_SPINLOCK(gb_operations_lock);
33 
34 static int gb_operation_response_send(struct gb_operation *operation,
35 					int errno);
36 
37 /*
38  * Increment operation active count and add to connection list unless the
39  * connection is going away.
40  *
41  * Caller holds operation reference.
42  */
gb_operation_get_active(struct gb_operation * operation)43 static int gb_operation_get_active(struct gb_operation *operation)
44 {
45 	struct gb_connection *connection = operation->connection;
46 	unsigned long flags;
47 
48 	spin_lock_irqsave(&connection->lock, flags);
49 	switch (connection->state) {
50 	case GB_CONNECTION_STATE_ENABLED:
51 		break;
52 	case GB_CONNECTION_STATE_ENABLED_TX:
53 		if (gb_operation_is_incoming(operation))
54 			goto err_unlock;
55 		break;
56 	case GB_CONNECTION_STATE_DISCONNECTING:
57 		if (!gb_operation_is_core(operation))
58 			goto err_unlock;
59 		break;
60 	default:
61 		goto err_unlock;
62 	}
63 
64 	if (operation->active++ == 0)
65 		list_add_tail(&operation->links, &connection->operations);
66 
67 	trace_gb_operation_get_active(operation);
68 
69 	spin_unlock_irqrestore(&connection->lock, flags);
70 
71 	return 0;
72 
73 err_unlock:
74 	spin_unlock_irqrestore(&connection->lock, flags);
75 
76 	return -ENOTCONN;
77 }
78 
79 /* Caller holds operation reference. */
gb_operation_put_active(struct gb_operation * operation)80 static void gb_operation_put_active(struct gb_operation *operation)
81 {
82 	struct gb_connection *connection = operation->connection;
83 	unsigned long flags;
84 
85 	spin_lock_irqsave(&connection->lock, flags);
86 
87 	trace_gb_operation_put_active(operation);
88 
89 	if (--operation->active == 0) {
90 		list_del(&operation->links);
91 		if (atomic_read(&operation->waiters))
92 			wake_up(&gb_operation_cancellation_queue);
93 	}
94 	spin_unlock_irqrestore(&connection->lock, flags);
95 }
96 
gb_operation_is_active(struct gb_operation * operation)97 static bool gb_operation_is_active(struct gb_operation *operation)
98 {
99 	struct gb_connection *connection = operation->connection;
100 	unsigned long flags;
101 	bool ret;
102 
103 	spin_lock_irqsave(&connection->lock, flags);
104 	ret = operation->active;
105 	spin_unlock_irqrestore(&connection->lock, flags);
106 
107 	return ret;
108 }
109 
110 /*
111  * Set an operation's result.
112  *
113  * Initially an outgoing operation's errno value is -EBADR.
114  * If no error occurs before sending the request message the only
115  * valid value operation->errno can be set to is -EINPROGRESS,
116  * indicating the request has been (or rather is about to be) sent.
117  * At that point nobody should be looking at the result until the
118  * response arrives.
119  *
120  * The first time the result gets set after the request has been
121  * sent, that result "sticks."  That is, if two concurrent threads
122  * race to set the result, the first one wins.  The return value
123  * tells the caller whether its result was recorded; if not the
124  * caller has nothing more to do.
125  *
126  * The result value -EILSEQ is reserved to signal an implementation
127  * error; if it's ever observed, the code performing the request has
128  * done something fundamentally wrong.  It is an error to try to set
129  * the result to -EBADR, and attempts to do so result in a warning,
130  * and -EILSEQ is used instead.  Similarly, the only valid result
131  * value to set for an operation in initial state is -EINPROGRESS.
132  * Attempts to do otherwise will also record a (successful) -EILSEQ
133  * operation result.
134  */
gb_operation_result_set(struct gb_operation * operation,int result)135 static bool gb_operation_result_set(struct gb_operation *operation, int result)
136 {
137 	unsigned long flags;
138 	int prev;
139 
140 	if (result == -EINPROGRESS) {
141 		/*
142 		 * -EINPROGRESS is used to indicate the request is
143 		 * in flight.  It should be the first result value
144 		 * set after the initial -EBADR.  Issue a warning
145 		 * and record an implementation error if it's
146 		 * set at any other time.
147 		 */
148 		spin_lock_irqsave(&gb_operations_lock, flags);
149 		prev = operation->errno;
150 		if (prev == -EBADR)
151 			operation->errno = result;
152 		else
153 			operation->errno = -EILSEQ;
154 		spin_unlock_irqrestore(&gb_operations_lock, flags);
155 		WARN_ON(prev != -EBADR);
156 
157 		return true;
158 	}
159 
160 	/*
161 	 * The first result value set after a request has been sent
162 	 * will be the final result of the operation.  Subsequent
163 	 * attempts to set the result are ignored.
164 	 *
165 	 * Note that -EBADR is a reserved "initial state" result
166 	 * value.  Attempts to set this value result in a warning,
167 	 * and the result code is set to -EILSEQ instead.
168 	 */
169 	if (WARN_ON(result == -EBADR))
170 		result = -EILSEQ; /* Nobody should be setting -EBADR */
171 
172 	spin_lock_irqsave(&gb_operations_lock, flags);
173 	prev = operation->errno;
174 	if (prev == -EINPROGRESS)
175 		operation->errno = result;	/* First and final result */
176 	spin_unlock_irqrestore(&gb_operations_lock, flags);
177 
178 	return prev == -EINPROGRESS;
179 }
180 
gb_operation_result(struct gb_operation * operation)181 int gb_operation_result(struct gb_operation *operation)
182 {
183 	int result = operation->errno;
184 
185 	WARN_ON(result == -EBADR);
186 	WARN_ON(result == -EINPROGRESS);
187 
188 	return result;
189 }
190 EXPORT_SYMBOL_GPL(gb_operation_result);
191 
192 /*
193  * Looks up an outgoing operation on a connection and returns a refcounted
194  * pointer if found, or NULL otherwise.
195  */
196 static struct gb_operation *
gb_operation_find_outgoing(struct gb_connection * connection,u16 operation_id)197 gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
198 {
199 	struct gb_operation *operation;
200 	unsigned long flags;
201 	bool found = false;
202 
203 	spin_lock_irqsave(&connection->lock, flags);
204 	list_for_each_entry(operation, &connection->operations, links)
205 		if (operation->id == operation_id &&
206 				!gb_operation_is_incoming(operation)) {
207 			gb_operation_get(operation);
208 			found = true;
209 			break;
210 		}
211 	spin_unlock_irqrestore(&connection->lock, flags);
212 
213 	return found ? operation : NULL;
214 }
215 
gb_message_send(struct gb_message * message,gfp_t gfp)216 static int gb_message_send(struct gb_message *message, gfp_t gfp)
217 {
218 	struct gb_connection *connection = message->operation->connection;
219 
220 	trace_gb_message_send(message);
221 	return connection->hd->driver->message_send(connection->hd,
222 					connection->hd_cport_id,
223 					message,
224 					gfp);
225 }
226 
227 /*
228  * Cancel a message we have passed to the host device layer to be sent.
229  */
gb_message_cancel(struct gb_message * message)230 static void gb_message_cancel(struct gb_message *message)
231 {
232 	struct gb_host_device *hd = message->operation->connection->hd;
233 
234 	hd->driver->message_cancel(message);
235 }
236 
gb_operation_request_handle(struct gb_operation * operation)237 static void gb_operation_request_handle(struct gb_operation *operation)
238 {
239 	struct gb_connection *connection = operation->connection;
240 	int status;
241 	int ret;
242 
243 	if (connection->handler) {
244 		status = connection->handler(operation);
245 	} else {
246 		dev_err(&connection->hd->dev,
247 			"%s: unexpected incoming request of type 0x%02x\n",
248 			connection->name, operation->type);
249 
250 		status = -EPROTONOSUPPORT;
251 	}
252 
253 	ret = gb_operation_response_send(operation, status);
254 	if (ret) {
255 		dev_err(&connection->hd->dev,
256 			"%s: failed to send response %d for type 0x%02x: %d\n",
257 			connection->name, status, operation->type, ret);
258 		return;
259 	}
260 }
261 
262 /*
263  * Process operation work.
264  *
265  * For incoming requests, call the protocol request handler. The operation
266  * result should be -EINPROGRESS at this point.
267  *
268  * For outgoing requests, the operation result value should have
269  * been set before queueing this.  The operation callback function
270  * allows the original requester to know the request has completed
271  * and its result is available.
272  */
gb_operation_work(struct work_struct * work)273 static void gb_operation_work(struct work_struct *work)
274 {
275 	struct gb_operation *operation;
276 	int ret;
277 
278 	operation = container_of(work, struct gb_operation, work);
279 
280 	if (gb_operation_is_incoming(operation)) {
281 		gb_operation_request_handle(operation);
282 	} else {
283 		ret = del_timer_sync(&operation->timer);
284 		if (!ret) {
285 			/* Cancel request message if scheduled by timeout. */
286 			if (gb_operation_result(operation) == -ETIMEDOUT)
287 				gb_message_cancel(operation->request);
288 		}
289 
290 		operation->callback(operation);
291 	}
292 
293 	gb_operation_put_active(operation);
294 	gb_operation_put(operation);
295 }
296 
gb_operation_timeout(unsigned long arg)297 static void gb_operation_timeout(unsigned long arg)
298 {
299 	struct gb_operation *operation = (void *)arg;
300 
301 	if (gb_operation_result_set(operation, -ETIMEDOUT)) {
302 		/*
303 		 * A stuck request message will be cancelled from the
304 		 * workqueue.
305 		 */
306 		queue_work(gb_operation_completion_wq, &operation->work);
307 	}
308 }
309 
gb_operation_message_init(struct gb_host_device * hd,struct gb_message * message,u16 operation_id,size_t payload_size,u8 type)310 static void gb_operation_message_init(struct gb_host_device *hd,
311 				struct gb_message *message, u16 operation_id,
312 				size_t payload_size, u8 type)
313 {
314 	struct gb_operation_msg_hdr *header;
315 
316 	header = message->buffer;
317 
318 	message->header = header;
319 	message->payload = payload_size ? header + 1 : NULL;
320 	message->payload_size = payload_size;
321 
322 	/*
323 	 * The type supplied for incoming message buffers will be
324 	 * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by
325 	 * arriving data so there's no need to initialize the message header.
326 	 */
327 	if (type != GB_REQUEST_TYPE_INVALID) {
328 		u16 message_size = (u16)(sizeof(*header) + payload_size);
329 
330 		/*
331 		 * For a request, the operation id gets filled in
332 		 * when the message is sent.  For a response, it
333 		 * will be copied from the request by the caller.
334 		 *
335 		 * The result field in a request message must be
336 		 * zero.  It will be set just prior to sending for
337 		 * a response.
338 		 */
339 		header->size = cpu_to_le16(message_size);
340 		header->operation_id = 0;
341 		header->type = type;
342 		header->result = 0;
343 	}
344 }
345 
346 /*
347  * Allocate a message to be used for an operation request or response.
348  * Both types of message contain a common header.  The request message
349  * for an outgoing operation is outbound, as is the response message
350  * for an incoming operation.  The message header for an outbound
351  * message is partially initialized here.
352  *
353  * The headers for inbound messages don't need to be initialized;
354  * they'll be filled in by arriving data.
355  *
356  * Our message buffers have the following layout:
357  *	message header  \_ these combined are
358  *	message payload /  the message size
359  */
360 static struct gb_message *
gb_operation_message_alloc(struct gb_host_device * hd,u8 type,size_t payload_size,gfp_t gfp_flags)361 gb_operation_message_alloc(struct gb_host_device *hd, u8 type,
362 				size_t payload_size, gfp_t gfp_flags)
363 {
364 	struct gb_message *message;
365 	struct gb_operation_msg_hdr *header;
366 	size_t message_size = payload_size + sizeof(*header);
367 
368 	if (message_size > hd->buffer_size_max) {
369 		dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n",
370 				message_size, hd->buffer_size_max);
371 		return NULL;
372 	}
373 
374 	/* Allocate the message structure and buffer. */
375 	message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
376 	if (!message)
377 		return NULL;
378 
379 	message->buffer = kzalloc(message_size, gfp_flags);
380 	if (!message->buffer)
381 		goto err_free_message;
382 
383 	/* Initialize the message.  Operation id is filled in later. */
384 	gb_operation_message_init(hd, message, 0, payload_size, type);
385 
386 	return message;
387 
388 err_free_message:
389 	kmem_cache_free(gb_message_cache, message);
390 
391 	return NULL;
392 }
393 
gb_operation_message_free(struct gb_message * message)394 static void gb_operation_message_free(struct gb_message *message)
395 {
396 	kfree(message->buffer);
397 	kmem_cache_free(gb_message_cache, message);
398 }
399 
400 /*
401  * Map an enum gb_operation_status value (which is represented in a
402  * message as a single byte) to an appropriate Linux negative errno.
403  */
gb_operation_status_map(u8 status)404 static int gb_operation_status_map(u8 status)
405 {
406 	switch (status) {
407 	case GB_OP_SUCCESS:
408 		return 0;
409 	case GB_OP_INTERRUPTED:
410 		return -EINTR;
411 	case GB_OP_TIMEOUT:
412 		return -ETIMEDOUT;
413 	case GB_OP_NO_MEMORY:
414 		return -ENOMEM;
415 	case GB_OP_PROTOCOL_BAD:
416 		return -EPROTONOSUPPORT;
417 	case GB_OP_OVERFLOW:
418 		return -EMSGSIZE;
419 	case GB_OP_INVALID:
420 		return -EINVAL;
421 	case GB_OP_RETRY:
422 		return -EAGAIN;
423 	case GB_OP_NONEXISTENT:
424 		return -ENODEV;
425 	case GB_OP_MALFUNCTION:
426 		return -EILSEQ;
427 	case GB_OP_UNKNOWN_ERROR:
428 	default:
429 		return -EIO;
430 	}
431 }
432 
433 /*
434  * Map a Linux errno value (from operation->errno) into the value
435  * that should represent it in a response message status sent
436  * over the wire.  Returns an enum gb_operation_status value (which
437  * is represented in a message as a single byte).
438  */
gb_operation_errno_map(int errno)439 static u8 gb_operation_errno_map(int errno)
440 {
441 	switch (errno) {
442 	case 0:
443 		return GB_OP_SUCCESS;
444 	case -EINTR:
445 		return GB_OP_INTERRUPTED;
446 	case -ETIMEDOUT:
447 		return GB_OP_TIMEOUT;
448 	case -ENOMEM:
449 		return GB_OP_NO_MEMORY;
450 	case -EPROTONOSUPPORT:
451 		return GB_OP_PROTOCOL_BAD;
452 	case -EMSGSIZE:
453 		return GB_OP_OVERFLOW;	/* Could be underflow too */
454 	case -EINVAL:
455 		return GB_OP_INVALID;
456 	case -EAGAIN:
457 		return GB_OP_RETRY;
458 	case -EILSEQ:
459 		return GB_OP_MALFUNCTION;
460 	case -ENODEV:
461 		return GB_OP_NONEXISTENT;
462 	case -EIO:
463 	default:
464 		return GB_OP_UNKNOWN_ERROR;
465 	}
466 }
467 
gb_operation_response_alloc(struct gb_operation * operation,size_t response_size,gfp_t gfp)468 bool gb_operation_response_alloc(struct gb_operation *operation,
469 					size_t response_size, gfp_t gfp)
470 {
471 	struct gb_host_device *hd = operation->connection->hd;
472 	struct gb_operation_msg_hdr *request_header;
473 	struct gb_message *response;
474 	u8 type;
475 
476 	type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
477 	response = gb_operation_message_alloc(hd, type, response_size, gfp);
478 	if (!response)
479 		return false;
480 	response->operation = operation;
481 
482 	/*
483 	 * Size and type get initialized when the message is
484 	 * allocated.  The errno will be set before sending.  All
485 	 * that's left is the operation id, which we copy from the
486 	 * request message header (as-is, in little-endian order).
487 	 */
488 	request_header = operation->request->header;
489 	response->header->operation_id = request_header->operation_id;
490 	operation->response = response;
491 
492 	return true;
493 }
494 EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
495 
496 /*
497  * Create a Greybus operation to be sent over the given connection.
498  * The request buffer will be big enough for a payload of the given
499  * size.
500  *
501  * For outgoing requests, the request message's header will be
502  * initialized with the type of the request and the message size.
503  * Outgoing operations must also specify the response buffer size,
504  * which must be sufficient to hold all expected response data.  The
505  * response message header will eventually be overwritten, so there's
506  * no need to initialize it here.
507  *
508  * Request messages for incoming operations can arrive in interrupt
509  * context, so they must be allocated with GFP_ATOMIC.  In this case
510  * the request buffer will be immediately overwritten, so there is
511  * no need to initialize the message header.  Responsibility for
512  * allocating a response buffer lies with the incoming request
513  * handler for a protocol.  So we don't allocate that here.
514  *
515  * Returns a pointer to the new operation or a null pointer if an
516  * error occurs.
517  */
518 static struct gb_operation *
gb_operation_create_common(struct gb_connection * connection,u8 type,size_t request_size,size_t response_size,unsigned long op_flags,gfp_t gfp_flags)519 gb_operation_create_common(struct gb_connection *connection, u8 type,
520 				size_t request_size, size_t response_size,
521 				unsigned long op_flags, gfp_t gfp_flags)
522 {
523 	struct gb_host_device *hd = connection->hd;
524 	struct gb_operation *operation;
525 
526 	operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
527 	if (!operation)
528 		return NULL;
529 	operation->connection = connection;
530 
531 	operation->request = gb_operation_message_alloc(hd, type, request_size,
532 							gfp_flags);
533 	if (!operation->request)
534 		goto err_cache;
535 	operation->request->operation = operation;
536 
537 	/* Allocate the response buffer for outgoing operations */
538 	if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
539 		if (!gb_operation_response_alloc(operation, response_size,
540 						 gfp_flags)) {
541 			goto err_request;
542 		}
543 
544 		setup_timer(&operation->timer, gb_operation_timeout,
545 			    (unsigned long)operation);
546 	}
547 
548 	operation->flags = op_flags;
549 	operation->type = type;
550 	operation->errno = -EBADR;  /* Initial value--means "never set" */
551 
552 	INIT_WORK(&operation->work, gb_operation_work);
553 	init_completion(&operation->completion);
554 	kref_init(&operation->kref);
555 	atomic_set(&operation->waiters, 0);
556 
557 	return operation;
558 
559 err_request:
560 	gb_operation_message_free(operation->request);
561 err_cache:
562 	kmem_cache_free(gb_operation_cache, operation);
563 
564 	return NULL;
565 }
566 
567 /*
568  * Create a new operation associated with the given connection.  The
569  * request and response sizes provided are the number of bytes
570  * required to hold the request/response payload only.  Both of
571  * these are allowed to be 0.  Note that 0x00 is reserved as an
572  * invalid operation type for all protocols, and this is enforced
573  * here.
574  */
575 struct gb_operation *
gb_operation_create_flags(struct gb_connection * connection,u8 type,size_t request_size,size_t response_size,unsigned long flags,gfp_t gfp)576 gb_operation_create_flags(struct gb_connection *connection,
577 				u8 type, size_t request_size,
578 				size_t response_size, unsigned long flags,
579 				gfp_t gfp)
580 {
581 	struct gb_operation *operation;
582 
583 	if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID))
584 		return NULL;
585 	if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
586 		type &= ~GB_MESSAGE_TYPE_RESPONSE;
587 
588 	if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK))
589 		flags &= GB_OPERATION_FLAG_USER_MASK;
590 
591 	operation = gb_operation_create_common(connection, type,
592 						request_size, response_size,
593 						flags, gfp);
594 	if (operation)
595 		trace_gb_operation_create(operation);
596 
597 	return operation;
598 }
599 EXPORT_SYMBOL_GPL(gb_operation_create_flags);
600 
601 struct gb_operation *
gb_operation_create_core(struct gb_connection * connection,u8 type,size_t request_size,size_t response_size,unsigned long flags,gfp_t gfp)602 gb_operation_create_core(struct gb_connection *connection,
603 				u8 type, size_t request_size,
604 				size_t response_size, unsigned long flags,
605 				gfp_t gfp)
606 {
607 	struct gb_operation *operation;
608 
609 	flags |= GB_OPERATION_FLAG_CORE;
610 
611 	operation = gb_operation_create_common(connection, type,
612 						request_size, response_size,
613 						flags, gfp);
614 	if (operation)
615 		trace_gb_operation_create_core(operation);
616 
617 	return operation;
618 }
619 /* Do not export this function. */
620 
gb_operation_get_payload_size_max(struct gb_connection * connection)621 size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
622 {
623 	struct gb_host_device *hd = connection->hd;
624 
625 	return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
626 }
627 EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
628 
629 static struct gb_operation *
gb_operation_create_incoming(struct gb_connection * connection,u16 id,u8 type,void * data,size_t size)630 gb_operation_create_incoming(struct gb_connection *connection, u16 id,
631 				u8 type, void *data, size_t size)
632 {
633 	struct gb_operation *operation;
634 	size_t request_size;
635 	unsigned long flags = GB_OPERATION_FLAG_INCOMING;
636 
637 	/* Caller has made sure we at least have a message header. */
638 	request_size = size - sizeof(struct gb_operation_msg_hdr);
639 
640 	if (!id)
641 		flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
642 
643 	operation = gb_operation_create_common(connection, type,
644 						request_size,
645 						GB_REQUEST_TYPE_INVALID,
646 						flags, GFP_ATOMIC);
647 	if (!operation)
648 		return NULL;
649 
650 	operation->id = id;
651 	memcpy(operation->request->header, data, size);
652 	trace_gb_operation_create_incoming(operation);
653 
654 	return operation;
655 }
656 
657 /*
658  * Get an additional reference on an operation.
659  */
gb_operation_get(struct gb_operation * operation)660 void gb_operation_get(struct gb_operation *operation)
661 {
662 	kref_get(&operation->kref);
663 }
664 EXPORT_SYMBOL_GPL(gb_operation_get);
665 
666 /*
667  * Destroy a previously created operation.
668  */
_gb_operation_destroy(struct kref * kref)669 static void _gb_operation_destroy(struct kref *kref)
670 {
671 	struct gb_operation *operation;
672 
673 	operation = container_of(kref, struct gb_operation, kref);
674 
675 	trace_gb_operation_destroy(operation);
676 
677 	if (operation->response)
678 		gb_operation_message_free(operation->response);
679 	gb_operation_message_free(operation->request);
680 
681 	kmem_cache_free(gb_operation_cache, operation);
682 }
683 
684 /*
685  * Drop a reference on an operation, and destroy it when the last
686  * one is gone.
687  */
gb_operation_put(struct gb_operation * operation)688 void gb_operation_put(struct gb_operation *operation)
689 {
690 	if (WARN_ON(!operation))
691 		return;
692 
693 	kref_put(&operation->kref, _gb_operation_destroy);
694 }
695 EXPORT_SYMBOL_GPL(gb_operation_put);
696 
697 /* Tell the requester we're done */
gb_operation_sync_callback(struct gb_operation * operation)698 static void gb_operation_sync_callback(struct gb_operation *operation)
699 {
700 	complete(&operation->completion);
701 }
702 
703 /**
704  * gb_operation_request_send() - send an operation request message
705  * @operation:	the operation to initiate
706  * @callback:	the operation completion callback
707  * @timeout:	operation timeout in milliseconds, or zero for no timeout
708  * @gfp:	the memory flags to use for any allocations
709  *
710  * The caller has filled in any payload so the request message is ready to go.
711  * The callback function supplied will be called when the response message has
712  * arrived, a unidirectional request has been sent, or the operation is
713  * cancelled, indicating that the operation is complete. The callback function
714  * can fetch the result of the operation using gb_operation_result() if
715  * desired.
716  *
717  * Return: 0 if the request was successfully queued in the host-driver queues,
718  * or a negative errno.
719  */
gb_operation_request_send(struct gb_operation * operation,gb_operation_callback callback,unsigned int timeout,gfp_t gfp)720 int gb_operation_request_send(struct gb_operation *operation,
721 				gb_operation_callback callback,
722 				unsigned int timeout,
723 				gfp_t gfp)
724 {
725 	struct gb_connection *connection = operation->connection;
726 	struct gb_operation_msg_hdr *header;
727 	unsigned int cycle;
728 	int ret;
729 
730 	if (gb_connection_is_offloaded(connection))
731 		return -EBUSY;
732 
733 	if (!callback)
734 		return -EINVAL;
735 
736 	/*
737 	 * Record the callback function, which is executed in
738 	 * non-atomic (workqueue) context when the final result
739 	 * of an operation has been set.
740 	 */
741 	operation->callback = callback;
742 
743 	/*
744 	 * Assign the operation's id, and store it in the request header.
745 	 * Zero is a reserved operation id for unidirectional operations.
746 	 */
747 	if (gb_operation_is_unidirectional(operation)) {
748 		operation->id = 0;
749 	} else {
750 		cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
751 		operation->id = (u16)(cycle % U16_MAX + 1);
752 	}
753 
754 	header = operation->request->header;
755 	header->operation_id = cpu_to_le16(operation->id);
756 
757 	gb_operation_result_set(operation, -EINPROGRESS);
758 
759 	/*
760 	 * Get an extra reference on the operation. It'll be dropped when the
761 	 * operation completes.
762 	 */
763 	gb_operation_get(operation);
764 	ret = gb_operation_get_active(operation);
765 	if (ret)
766 		goto err_put;
767 
768 	ret = gb_message_send(operation->request, gfp);
769 	if (ret)
770 		goto err_put_active;
771 
772 	if (timeout) {
773 		operation->timer.expires = jiffies + msecs_to_jiffies(timeout);
774 		add_timer(&operation->timer);
775 	}
776 
777 	return 0;
778 
779 err_put_active:
780 	gb_operation_put_active(operation);
781 err_put:
782 	gb_operation_put(operation);
783 
784 	return ret;
785 }
786 EXPORT_SYMBOL_GPL(gb_operation_request_send);
787 
788 /*
789  * Send a synchronous operation.  This function is expected to
790  * block, returning only when the response has arrived, (or when an
791  * error is detected.  The return value is the result of the
792  * operation.
793  */
gb_operation_request_send_sync_timeout(struct gb_operation * operation,unsigned int timeout)794 int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
795 						unsigned int timeout)
796 {
797 	int ret;
798 
799 	ret = gb_operation_request_send(operation, gb_operation_sync_callback,
800 					timeout, GFP_KERNEL);
801 	if (ret)
802 		return ret;
803 
804 	ret = wait_for_completion_interruptible(&operation->completion);
805 	if (ret < 0) {
806 		/* Cancel the operation if interrupted */
807 		gb_operation_cancel(operation, -ECANCELED);
808 	}
809 
810 	return gb_operation_result(operation);
811 }
812 EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout);
813 
814 /*
815  * Send a response for an incoming operation request.  A non-zero
816  * errno indicates a failed operation.
817  *
818  * If there is any response payload, the incoming request handler is
819  * responsible for allocating the response message.  Otherwise the
820  * it can simply supply the result errno; this function will
821  * allocate the response message if necessary.
822  */
gb_operation_response_send(struct gb_operation * operation,int errno)823 static int gb_operation_response_send(struct gb_operation *operation,
824 					int errno)
825 {
826 	struct gb_connection *connection = operation->connection;
827 	int ret;
828 
829 	if (!operation->response &&
830 			!gb_operation_is_unidirectional(operation)) {
831 		if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
832 			return -ENOMEM;
833 	}
834 
835 	/* Record the result */
836 	if (!gb_operation_result_set(operation, errno)) {
837 		dev_err(&connection->hd->dev, "request result already set\n");
838 		return -EIO;	/* Shouldn't happen */
839 	}
840 
841 	/* Sender of request does not care about response. */
842 	if (gb_operation_is_unidirectional(operation))
843 		return 0;
844 
845 	/* Reference will be dropped when message has been sent. */
846 	gb_operation_get(operation);
847 	ret = gb_operation_get_active(operation);
848 	if (ret)
849 		goto err_put;
850 
851 	/* Fill in the response header and send it */
852 	operation->response->header->result = gb_operation_errno_map(errno);
853 
854 	ret = gb_message_send(operation->response, GFP_KERNEL);
855 	if (ret)
856 		goto err_put_active;
857 
858 	return 0;
859 
860 err_put_active:
861 	gb_operation_put_active(operation);
862 err_put:
863 	gb_operation_put(operation);
864 
865 	return ret;
866 }
867 
868 /*
869  * This function is called when a message send request has completed.
870  */
greybus_message_sent(struct gb_host_device * hd,struct gb_message * message,int status)871 void greybus_message_sent(struct gb_host_device *hd,
872 					struct gb_message *message, int status)
873 {
874 	struct gb_operation *operation = message->operation;
875 	struct gb_connection *connection = operation->connection;
876 
877 	/*
878 	 * If the message was a response, we just need to drop our
879 	 * reference to the operation.  If an error occurred, report
880 	 * it.
881 	 *
882 	 * For requests, if there's no error and the operation in not
883 	 * unidirectional, there's nothing more to do until the response
884 	 * arrives. If an error occurred attempting to send it, or if the
885 	 * operation is unidrectional, record the result of the operation and
886 	 * schedule its completion.
887 	 */
888 	if (message == operation->response) {
889 		if (status) {
890 			dev_err(&connection->hd->dev,
891 				"%s: error sending response 0x%02x: %d\n",
892 				connection->name, operation->type, status);
893 		}
894 
895 		gb_operation_put_active(operation);
896 		gb_operation_put(operation);
897 	} else if (status || gb_operation_is_unidirectional(operation)) {
898 		if (gb_operation_result_set(operation, status)) {
899 			queue_work(gb_operation_completion_wq,
900 					&operation->work);
901 		}
902 	}
903 }
904 EXPORT_SYMBOL_GPL(greybus_message_sent);
905 
906 /*
907  * We've received data on a connection, and it doesn't look like a
908  * response, so we assume it's a request.
909  *
910  * This is called in interrupt context, so just copy the incoming
911  * data into the request buffer and handle the rest via workqueue.
912  */
gb_connection_recv_request(struct gb_connection * connection,const struct gb_operation_msg_hdr * header,void * data,size_t size)913 static void gb_connection_recv_request(struct gb_connection *connection,
914 				const struct gb_operation_msg_hdr *header,
915 				void *data, size_t size)
916 {
917 	struct gb_operation *operation;
918 	u16 operation_id;
919 	u8 type;
920 	int ret;
921 
922 	operation_id = le16_to_cpu(header->operation_id);
923 	type = header->type;
924 
925 	operation = gb_operation_create_incoming(connection, operation_id,
926 						type, data, size);
927 	if (!operation) {
928 		dev_err(&connection->hd->dev,
929 			"%s: can't create incoming operation\n",
930 			connection->name);
931 		return;
932 	}
933 
934 	ret = gb_operation_get_active(operation);
935 	if (ret) {
936 		gb_operation_put(operation);
937 		return;
938 	}
939 	trace_gb_message_recv_request(operation->request);
940 
941 	/*
942 	 * The initial reference to the operation will be dropped when the
943 	 * request handler returns.
944 	 */
945 	if (gb_operation_result_set(operation, -EINPROGRESS))
946 		queue_work(connection->wq, &operation->work);
947 }
948 
949 /*
950  * We've received data that appears to be an operation response
951  * message.  Look up the operation, and record that we've received
952  * its response.
953  *
954  * This is called in interrupt context, so just copy the incoming
955  * data into the response buffer and handle the rest via workqueue.
956  */
gb_connection_recv_response(struct gb_connection * connection,const struct gb_operation_msg_hdr * header,void * data,size_t size)957 static void gb_connection_recv_response(struct gb_connection *connection,
958 				const struct gb_operation_msg_hdr *header,
959 				void *data, size_t size)
960 {
961 	struct gb_operation *operation;
962 	struct gb_message *message;
963 	size_t message_size;
964 	u16 operation_id;
965 	int errno;
966 
967 	operation_id = le16_to_cpu(header->operation_id);
968 
969 	if (!operation_id) {
970 		dev_err_ratelimited(&connection->hd->dev,
971 				"%s: invalid response id 0 received\n",
972 				connection->name);
973 		return;
974 	}
975 
976 	operation = gb_operation_find_outgoing(connection, operation_id);
977 	if (!operation) {
978 		dev_err_ratelimited(&connection->hd->dev,
979 				"%s: unexpected response id 0x%04x received\n",
980 				connection->name, operation_id);
981 		return;
982 	}
983 
984 	errno = gb_operation_status_map(header->result);
985 	message = operation->response;
986 	message_size = sizeof(*header) + message->payload_size;
987 	if (!errno && size > message_size) {
988 		dev_err_ratelimited(&connection->hd->dev,
989 				"%s: malformed response 0x%02x received (%zu > %zu)\n",
990 				connection->name, header->type,
991 				size, message_size);
992 		errno = -EMSGSIZE;
993 	} else if (!errno && size < message_size) {
994 		if (gb_operation_short_response_allowed(operation)) {
995 			message->payload_size = size - sizeof(*header);
996 		} else {
997 			dev_err_ratelimited(&connection->hd->dev,
998 					"%s: short response 0x%02x received (%zu < %zu)\n",
999 					connection->name, header->type,
1000 					size, message_size);
1001 			errno = -EMSGSIZE;
1002 		}
1003 	}
1004 
1005 	/* We must ignore the payload if a bad status is returned */
1006 	if (errno)
1007 		size = sizeof(*header);
1008 
1009 	/* The rest will be handled in work queue context */
1010 	if (gb_operation_result_set(operation, errno)) {
1011 		memcpy(message->buffer, data, size);
1012 
1013 		trace_gb_message_recv_response(message);
1014 
1015 		queue_work(gb_operation_completion_wq, &operation->work);
1016 	}
1017 
1018 	gb_operation_put(operation);
1019 }
1020 
1021 /*
1022  * Handle data arriving on a connection.  As soon as we return the
1023  * supplied data buffer will be reused (so unless we do something
1024  * with, it's effectively dropped).
1025  */
gb_connection_recv(struct gb_connection * connection,void * data,size_t size)1026 void gb_connection_recv(struct gb_connection *connection,
1027 				void *data, size_t size)
1028 {
1029 	struct gb_operation_msg_hdr header;
1030 	struct device *dev = &connection->hd->dev;
1031 	size_t msg_size;
1032 
1033 	if (connection->state == GB_CONNECTION_STATE_DISABLED ||
1034 			gb_connection_is_offloaded(connection)) {
1035 		dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n",
1036 				connection->name, size);
1037 		return;
1038 	}
1039 
1040 	if (size < sizeof(header)) {
1041 		dev_err_ratelimited(dev, "%s: short message received\n",
1042 				connection->name);
1043 		return;
1044 	}
1045 
1046 	/* Use memcpy as data may be unaligned */
1047 	memcpy(&header, data, sizeof(header));
1048 	msg_size = le16_to_cpu(header.size);
1049 	if (size < msg_size) {
1050 		dev_err_ratelimited(dev,
1051 				"%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
1052 				connection->name,
1053 				le16_to_cpu(header.operation_id),
1054 				header.type, size, msg_size);
1055 		return;		/* XXX Should still complete operation */
1056 	}
1057 
1058 	if (header.type & GB_MESSAGE_TYPE_RESPONSE) {
1059 		gb_connection_recv_response(connection,	&header, data,
1060 						msg_size);
1061 	} else {
1062 		gb_connection_recv_request(connection, &header, data,
1063 						msg_size);
1064 	}
1065 }
1066 
1067 /*
1068  * Cancel an outgoing operation synchronously, and record the given error to
1069  * indicate why.
1070  */
gb_operation_cancel(struct gb_operation * operation,int errno)1071 void gb_operation_cancel(struct gb_operation *operation, int errno)
1072 {
1073 	if (WARN_ON(gb_operation_is_incoming(operation)))
1074 		return;
1075 
1076 	if (gb_operation_result_set(operation, errno)) {
1077 		gb_message_cancel(operation->request);
1078 		queue_work(gb_operation_completion_wq, &operation->work);
1079 	}
1080 	trace_gb_message_cancel_outgoing(operation->request);
1081 
1082 	atomic_inc(&operation->waiters);
1083 	wait_event(gb_operation_cancellation_queue,
1084 			!gb_operation_is_active(operation));
1085 	atomic_dec(&operation->waiters);
1086 }
1087 EXPORT_SYMBOL_GPL(gb_operation_cancel);
1088 
1089 /*
1090  * Cancel an incoming operation synchronously. Called during connection tear
1091  * down.
1092  */
gb_operation_cancel_incoming(struct gb_operation * operation,int errno)1093 void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
1094 {
1095 	if (WARN_ON(!gb_operation_is_incoming(operation)))
1096 		return;
1097 
1098 	if (!gb_operation_is_unidirectional(operation)) {
1099 		/*
1100 		 * Make sure the request handler has submitted the response
1101 		 * before cancelling it.
1102 		 */
1103 		flush_work(&operation->work);
1104 		if (!gb_operation_result_set(operation, errno))
1105 			gb_message_cancel(operation->response);
1106 	}
1107 	trace_gb_message_cancel_incoming(operation->response);
1108 
1109 	atomic_inc(&operation->waiters);
1110 	wait_event(gb_operation_cancellation_queue,
1111 			!gb_operation_is_active(operation));
1112 	atomic_dec(&operation->waiters);
1113 }
1114 
1115 /**
1116  * gb_operation_sync_timeout() - implement a "simple" synchronous operation
1117  * @connection: the Greybus connection to send this to
1118  * @type: the type of operation to send
1119  * @request: pointer to a memory buffer to copy the request from
1120  * @request_size: size of @request
1121  * @response: pointer to a memory buffer to copy the response to
1122  * @response_size: the size of @response.
1123  * @timeout: operation timeout in milliseconds
1124  *
1125  * This function implements a simple synchronous Greybus operation.  It sends
1126  * the provided operation request and waits (sleeps) until the corresponding
1127  * operation response message has been successfully received, or an error
1128  * occurs.  @request and @response are buffers to hold the request and response
1129  * data respectively, and if they are not NULL, their size must be specified in
1130  * @request_size and @response_size.
1131  *
1132  * If a response payload is to come back, and @response is not NULL,
1133  * @response_size number of bytes will be copied into @response if the operation
1134  * is successful.
1135  *
1136  * If there is an error, the response buffer is left alone.
1137  */
gb_operation_sync_timeout(struct gb_connection * connection,int type,void * request,int request_size,void * response,int response_size,unsigned int timeout)1138 int gb_operation_sync_timeout(struct gb_connection *connection, int type,
1139 				void *request, int request_size,
1140 				void *response, int response_size,
1141 				unsigned int timeout)
1142 {
1143 	struct gb_operation *operation;
1144 	int ret;
1145 
1146 	if ((response_size && !response) ||
1147 	    (request_size && !request))
1148 		return -EINVAL;
1149 
1150 	operation = gb_operation_create(connection, type,
1151 					request_size, response_size,
1152 					GFP_KERNEL);
1153 	if (!operation)
1154 		return -ENOMEM;
1155 
1156 	if (request_size)
1157 		memcpy(operation->request->payload, request, request_size);
1158 
1159 	ret = gb_operation_request_send_sync_timeout(operation, timeout);
1160 	if (ret) {
1161 		dev_err(&connection->hd->dev,
1162 			"%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n",
1163 			connection->name, operation->id, type, ret);
1164 	} else {
1165 		if (response_size) {
1166 			memcpy(response, operation->response->payload,
1167 			       response_size);
1168 		}
1169 	}
1170 
1171 	gb_operation_put(operation);
1172 
1173 	return ret;
1174 }
1175 EXPORT_SYMBOL_GPL(gb_operation_sync_timeout);
1176 
1177 /**
1178  * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
1179  * @connection:		connection to use
1180  * @type:		type of operation to send
1181  * @request:		memory buffer to copy the request from
1182  * @request_size:	size of @request
1183  * @timeout:		send timeout in milliseconds
1184  *
1185  * Initiate a unidirectional operation by sending a request message and
1186  * waiting for it to be acknowledged as sent by the host device.
1187  *
1188  * Note that successful send of a unidirectional operation does not imply that
1189  * the request as actually reached the remote end of the connection.
1190  */
gb_operation_unidirectional_timeout(struct gb_connection * connection,int type,void * request,int request_size,unsigned int timeout)1191 int gb_operation_unidirectional_timeout(struct gb_connection *connection,
1192 				int type, void *request, int request_size,
1193 				unsigned int timeout)
1194 {
1195 	struct gb_operation *operation;
1196 	int ret;
1197 
1198 	if (request_size && !request)
1199 		return -EINVAL;
1200 
1201 	operation = gb_operation_create_flags(connection, type,
1202 					request_size, 0,
1203 					GB_OPERATION_FLAG_UNIDIRECTIONAL,
1204 					GFP_KERNEL);
1205 	if (!operation)
1206 		return -ENOMEM;
1207 
1208 	if (request_size)
1209 		memcpy(operation->request->payload, request, request_size);
1210 
1211 	ret = gb_operation_request_send_sync_timeout(operation, timeout);
1212 	if (ret) {
1213 		dev_err(&connection->hd->dev,
1214 			"%s: unidirectional operation of type 0x%02x failed: %d\n",
1215 			connection->name, type, ret);
1216 	}
1217 
1218 	gb_operation_put(operation);
1219 
1220 	return ret;
1221 }
1222 EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout);
1223 
gb_operation_init(void)1224 int __init gb_operation_init(void)
1225 {
1226 	gb_message_cache = kmem_cache_create("gb_message_cache",
1227 				sizeof(struct gb_message), 0, 0, NULL);
1228 	if (!gb_message_cache)
1229 		return -ENOMEM;
1230 
1231 	gb_operation_cache = kmem_cache_create("gb_operation_cache",
1232 				sizeof(struct gb_operation), 0, 0, NULL);
1233 	if (!gb_operation_cache)
1234 		goto err_destroy_message_cache;
1235 
1236 	gb_operation_completion_wq = alloc_workqueue("greybus_completion",
1237 				0, 0);
1238 	if (!gb_operation_completion_wq)
1239 		goto err_destroy_operation_cache;
1240 
1241 	return 0;
1242 
1243 err_destroy_operation_cache:
1244 	kmem_cache_destroy(gb_operation_cache);
1245 	gb_operation_cache = NULL;
1246 err_destroy_message_cache:
1247 	kmem_cache_destroy(gb_message_cache);
1248 	gb_message_cache = NULL;
1249 
1250 	return -ENOMEM;
1251 }
1252 
gb_operation_exit(void)1253 void gb_operation_exit(void)
1254 {
1255 	destroy_workqueue(gb_operation_completion_wq);
1256 	gb_operation_completion_wq = NULL;
1257 	kmem_cache_destroy(gb_operation_cache);
1258 	gb_operation_cache = NULL;
1259 	kmem_cache_destroy(gb_message_cache);
1260 	gb_message_cache = NULL;
1261 }
1262