• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Greybus operations
3  *
4  * Copyright 2014-2015 Google Inc.
5  * Copyright 2014-2015 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/wait.h>
15 #include <linux/workqueue.h>
16 
17 #include "greybus.h"
18 #include "greybus_trace.h"
19 
20 static struct kmem_cache *gb_operation_cache;
21 static struct kmem_cache *gb_message_cache;
22 
23 /* Workqueue to handle Greybus operation completions. */
24 static struct workqueue_struct *gb_operation_completion_wq;
25 
26 /* Wait queue for synchronous cancellations. */
27 static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
28 
29 /*
30  * Protects updates to operation->errno.
31  */
32 static DEFINE_SPINLOCK(gb_operations_lock);
33 
34 static int gb_operation_response_send(struct gb_operation *operation,
35 					int errno);
36 
37 /*
38  * Increment operation active count and add to connection list unless the
39  * connection is going away.
40  *
41  * Caller holds operation reference.
42  */
gb_operation_get_active(struct gb_operation * operation)43 static int gb_operation_get_active(struct gb_operation *operation)
44 {
45 	struct gb_connection *connection = operation->connection;
46 	unsigned long flags;
47 
48 	spin_lock_irqsave(&connection->lock, flags);
49 	switch (connection->state) {
50 	case GB_CONNECTION_STATE_ENABLED:
51 		break;
52 	case GB_CONNECTION_STATE_ENABLED_TX:
53 		if (gb_operation_is_incoming(operation))
54 			goto err_unlock;
55 		break;
56 	case GB_CONNECTION_STATE_DISCONNECTING:
57 		if (!gb_operation_is_core(operation))
58 			goto err_unlock;
59 		break;
60 	default:
61 		goto err_unlock;
62 	}
63 
64 	if (operation->active++ == 0)
65 		list_add_tail(&operation->links, &connection->operations);
66 
67 	trace_gb_operation_get_active(operation);
68 
69 	spin_unlock_irqrestore(&connection->lock, flags);
70 
71 	return 0;
72 
73 err_unlock:
74 	spin_unlock_irqrestore(&connection->lock, flags);
75 
76 	return -ENOTCONN;
77 }
78 
79 /* Caller holds operation reference. */
gb_operation_put_active(struct gb_operation * operation)80 static void gb_operation_put_active(struct gb_operation *operation)
81 {
82 	struct gb_connection *connection = operation->connection;
83 	unsigned long flags;
84 
85 	spin_lock_irqsave(&connection->lock, flags);
86 
87 	trace_gb_operation_put_active(operation);
88 
89 	if (--operation->active == 0) {
90 		list_del(&operation->links);
91 		if (atomic_read(&operation->waiters))
92 			wake_up(&gb_operation_cancellation_queue);
93 	}
94 	spin_unlock_irqrestore(&connection->lock, flags);
95 }
96 
gb_operation_is_active(struct gb_operation * operation)97 static bool gb_operation_is_active(struct gb_operation *operation)
98 {
99 	struct gb_connection *connection = operation->connection;
100 	unsigned long flags;
101 	bool ret;
102 
103 	spin_lock_irqsave(&connection->lock, flags);
104 	ret = operation->active;
105 	spin_unlock_irqrestore(&connection->lock, flags);
106 
107 	return ret;
108 }
109 
110 /*
111  * Set an operation's result.
112  *
113  * Initially an outgoing operation's errno value is -EBADR.
114  * If no error occurs before sending the request message the only
115  * valid value operation->errno can be set to is -EINPROGRESS,
116  * indicating the request has been (or rather is about to be) sent.
117  * At that point nobody should be looking at the result until the
118  * response arrives.
119  *
120  * The first time the result gets set after the request has been
121  * sent, that result "sticks."  That is, if two concurrent threads
122  * race to set the result, the first one wins.  The return value
123  * tells the caller whether its result was recorded; if not the
124  * caller has nothing more to do.
125  *
126  * The result value -EILSEQ is reserved to signal an implementation
127  * error; if it's ever observed, the code performing the request has
128  * done something fundamentally wrong.  It is an error to try to set
129  * the result to -EBADR, and attempts to do so result in a warning,
130  * and -EILSEQ is used instead.  Similarly, the only valid result
131  * value to set for an operation in initial state is -EINPROGRESS.
132  * Attempts to do otherwise will also record a (successful) -EILSEQ
133  * operation result.
134  */
gb_operation_result_set(struct gb_operation * operation,int result)135 static bool gb_operation_result_set(struct gb_operation *operation, int result)
136 {
137 	unsigned long flags;
138 	int prev;
139 
140 	if (result == -EINPROGRESS) {
141 		/*
142 		 * -EINPROGRESS is used to indicate the request is
143 		 * in flight.  It should be the first result value
144 		 * set after the initial -EBADR.  Issue a warning
145 		 * and record an implementation error if it's
146 		 * set at any other time.
147 		 */
148 		spin_lock_irqsave(&gb_operations_lock, flags);
149 		prev = operation->errno;
150 		if (prev == -EBADR)
151 			operation->errno = result;
152 		else
153 			operation->errno = -EILSEQ;
154 		spin_unlock_irqrestore(&gb_operations_lock, flags);
155 		WARN_ON(prev != -EBADR);
156 
157 		return true;
158 	}
159 
160 	/*
161 	 * The first result value set after a request has been sent
162 	 * will be the final result of the operation.  Subsequent
163 	 * attempts to set the result are ignored.
164 	 *
165 	 * Note that -EBADR is a reserved "initial state" result
166 	 * value.  Attempts to set this value result in a warning,
167 	 * and the result code is set to -EILSEQ instead.
168 	 */
169 	if (WARN_ON(result == -EBADR))
170 		result = -EILSEQ; /* Nobody should be setting -EBADR */
171 
172 	spin_lock_irqsave(&gb_operations_lock, flags);
173 	prev = operation->errno;
174 	if (prev == -EINPROGRESS)
175 		operation->errno = result;	/* First and final result */
176 	spin_unlock_irqrestore(&gb_operations_lock, flags);
177 
178 	return prev == -EINPROGRESS;
179 }
180 
gb_operation_result(struct gb_operation * operation)181 int gb_operation_result(struct gb_operation *operation)
182 {
183 	int result = operation->errno;
184 
185 	WARN_ON(result == -EBADR);
186 	WARN_ON(result == -EINPROGRESS);
187 
188 	return result;
189 }
190 EXPORT_SYMBOL_GPL(gb_operation_result);
191 
192 /*
193  * Looks up an outgoing operation on a connection and returns a refcounted
194  * pointer if found, or NULL otherwise.
195  */
196 static struct gb_operation *
gb_operation_find_outgoing(struct gb_connection * connection,u16 operation_id)197 gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
198 {
199 	struct gb_operation *operation;
200 	unsigned long flags;
201 	bool found = false;
202 
203 	spin_lock_irqsave(&connection->lock, flags);
204 	list_for_each_entry(operation, &connection->operations, links)
205 		if (operation->id == operation_id &&
206 				!gb_operation_is_incoming(operation)) {
207 			gb_operation_get(operation);
208 			found = true;
209 			break;
210 		}
211 	spin_unlock_irqrestore(&connection->lock, flags);
212 
213 	return found ? operation : NULL;
214 }
215 
gb_message_send(struct gb_message * message,gfp_t gfp)216 static int gb_message_send(struct gb_message *message, gfp_t gfp)
217 {
218 	struct gb_connection *connection = message->operation->connection;
219 
220 	trace_gb_message_send(message);
221 	return connection->hd->driver->message_send(connection->hd,
222 					connection->hd_cport_id,
223 					message,
224 					gfp);
225 }
226 
227 /*
228  * Cancel a message we have passed to the host device layer to be sent.
229  */
gb_message_cancel(struct gb_message * message)230 static void gb_message_cancel(struct gb_message *message)
231 {
232 	struct gb_host_device *hd = message->operation->connection->hd;
233 
234 	hd->driver->message_cancel(message);
235 }
236 
gb_operation_request_handle(struct gb_operation * operation)237 static void gb_operation_request_handle(struct gb_operation *operation)
238 {
239 	struct gb_connection *connection = operation->connection;
240 	int status;
241 	int ret;
242 
243 	if (connection->handler) {
244 		status = connection->handler(operation);
245 	} else {
246 		dev_err(&connection->hd->dev,
247 			"%s: unexpected incoming request of type 0x%02x\n",
248 			connection->name, operation->type);
249 
250 		status = -EPROTONOSUPPORT;
251 	}
252 
253 	ret = gb_operation_response_send(operation, status);
254 	if (ret) {
255 		dev_err(&connection->hd->dev,
256 			"%s: failed to send response %d for type 0x%02x: %d\n",
257 			connection->name, status, operation->type, ret);
258 		return;
259 	}
260 }
261 
262 /*
263  * Process operation work.
264  *
265  * For incoming requests, call the protocol request handler. The operation
266  * result should be -EINPROGRESS at this point.
267  *
268  * For outgoing requests, the operation result value should have
269  * been set before queueing this.  The operation callback function
270  * allows the original requester to know the request has completed
271  * and its result is available.
272  */
gb_operation_work(struct work_struct * work)273 static void gb_operation_work(struct work_struct *work)
274 {
275 	struct gb_operation *operation;
276 
277 	operation = container_of(work, struct gb_operation, work);
278 
279 	if (gb_operation_is_incoming(operation))
280 		gb_operation_request_handle(operation);
281 	else
282 		operation->callback(operation);
283 
284 	gb_operation_put_active(operation);
285 	gb_operation_put(operation);
286 }
287 
gb_operation_message_init(struct gb_host_device * hd,struct gb_message * message,u16 operation_id,size_t payload_size,u8 type)288 static void gb_operation_message_init(struct gb_host_device *hd,
289 				struct gb_message *message, u16 operation_id,
290 				size_t payload_size, u8 type)
291 {
292 	struct gb_operation_msg_hdr *header;
293 
294 	header = message->buffer;
295 
296 	message->header = header;
297 	message->payload = payload_size ? header + 1 : NULL;
298 	message->payload_size = payload_size;
299 
300 	/*
301 	 * The type supplied for incoming message buffers will be
302 	 * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by
303 	 * arriving data so there's no need to initialize the message header.
304 	 */
305 	if (type != GB_REQUEST_TYPE_INVALID) {
306 		u16 message_size = (u16)(sizeof(*header) + payload_size);
307 
308 		/*
309 		 * For a request, the operation id gets filled in
310 		 * when the message is sent.  For a response, it
311 		 * will be copied from the request by the caller.
312 		 *
313 		 * The result field in a request message must be
314 		 * zero.  It will be set just prior to sending for
315 		 * a response.
316 		 */
317 		header->size = cpu_to_le16(message_size);
318 		header->operation_id = 0;
319 		header->type = type;
320 		header->result = 0;
321 	}
322 }
323 
324 /*
325  * Allocate a message to be used for an operation request or response.
326  * Both types of message contain a common header.  The request message
327  * for an outgoing operation is outbound, as is the response message
328  * for an incoming operation.  The message header for an outbound
329  * message is partially initialized here.
330  *
331  * The headers for inbound messages don't need to be initialized;
332  * they'll be filled in by arriving data.
333  *
334  * Our message buffers have the following layout:
335  *	message header  \_ these combined are
336  *	message payload /  the message size
337  */
338 static struct gb_message *
gb_operation_message_alloc(struct gb_host_device * hd,u8 type,size_t payload_size,gfp_t gfp_flags)339 gb_operation_message_alloc(struct gb_host_device *hd, u8 type,
340 				size_t payload_size, gfp_t gfp_flags)
341 {
342 	struct gb_message *message;
343 	struct gb_operation_msg_hdr *header;
344 	size_t message_size = payload_size + sizeof(*header);
345 
346 	if (message_size > hd->buffer_size_max) {
347 		dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n",
348 				message_size, hd->buffer_size_max);
349 		return NULL;
350 	}
351 
352 	/* Allocate the message structure and buffer. */
353 	message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
354 	if (!message)
355 		return NULL;
356 
357 	message->buffer = kzalloc(message_size, gfp_flags);
358 	if (!message->buffer)
359 		goto err_free_message;
360 
361 	/* Initialize the message.  Operation id is filled in later. */
362 	gb_operation_message_init(hd, message, 0, payload_size, type);
363 
364 	return message;
365 
366 err_free_message:
367 	kmem_cache_free(gb_message_cache, message);
368 
369 	return NULL;
370 }
371 
gb_operation_message_free(struct gb_message * message)372 static void gb_operation_message_free(struct gb_message *message)
373 {
374 	kfree(message->buffer);
375 	kmem_cache_free(gb_message_cache, message);
376 }
377 
378 /*
379  * Map an enum gb_operation_status value (which is represented in a
380  * message as a single byte) to an appropriate Linux negative errno.
381  */
gb_operation_status_map(u8 status)382 static int gb_operation_status_map(u8 status)
383 {
384 	switch (status) {
385 	case GB_OP_SUCCESS:
386 		return 0;
387 	case GB_OP_INTERRUPTED:
388 		return -EINTR;
389 	case GB_OP_TIMEOUT:
390 		return -ETIMEDOUT;
391 	case GB_OP_NO_MEMORY:
392 		return -ENOMEM;
393 	case GB_OP_PROTOCOL_BAD:
394 		return -EPROTONOSUPPORT;
395 	case GB_OP_OVERFLOW:
396 		return -EMSGSIZE;
397 	case GB_OP_INVALID:
398 		return -EINVAL;
399 	case GB_OP_RETRY:
400 		return -EAGAIN;
401 	case GB_OP_NONEXISTENT:
402 		return -ENODEV;
403 	case GB_OP_MALFUNCTION:
404 		return -EILSEQ;
405 	case GB_OP_UNKNOWN_ERROR:
406 	default:
407 		return -EIO;
408 	}
409 }
410 
411 /*
412  * Map a Linux errno value (from operation->errno) into the value
413  * that should represent it in a response message status sent
414  * over the wire.  Returns an enum gb_operation_status value (which
415  * is represented in a message as a single byte).
416  */
gb_operation_errno_map(int errno)417 static u8 gb_operation_errno_map(int errno)
418 {
419 	switch (errno) {
420 	case 0:
421 		return GB_OP_SUCCESS;
422 	case -EINTR:
423 		return GB_OP_INTERRUPTED;
424 	case -ETIMEDOUT:
425 		return GB_OP_TIMEOUT;
426 	case -ENOMEM:
427 		return GB_OP_NO_MEMORY;
428 	case -EPROTONOSUPPORT:
429 		return GB_OP_PROTOCOL_BAD;
430 	case -EMSGSIZE:
431 		return GB_OP_OVERFLOW;	/* Could be underflow too */
432 	case -EINVAL:
433 		return GB_OP_INVALID;
434 	case -EAGAIN:
435 		return GB_OP_RETRY;
436 	case -EILSEQ:
437 		return GB_OP_MALFUNCTION;
438 	case -ENODEV:
439 		return GB_OP_NONEXISTENT;
440 	case -EIO:
441 	default:
442 		return GB_OP_UNKNOWN_ERROR;
443 	}
444 }
445 
gb_operation_response_alloc(struct gb_operation * operation,size_t response_size,gfp_t gfp)446 bool gb_operation_response_alloc(struct gb_operation *operation,
447 					size_t response_size, gfp_t gfp)
448 {
449 	struct gb_host_device *hd = operation->connection->hd;
450 	struct gb_operation_msg_hdr *request_header;
451 	struct gb_message *response;
452 	u8 type;
453 
454 	type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
455 	response = gb_operation_message_alloc(hd, type, response_size, gfp);
456 	if (!response)
457 		return false;
458 	response->operation = operation;
459 
460 	/*
461 	 * Size and type get initialized when the message is
462 	 * allocated.  The errno will be set before sending.  All
463 	 * that's left is the operation id, which we copy from the
464 	 * request message header (as-is, in little-endian order).
465 	 */
466 	request_header = operation->request->header;
467 	response->header->operation_id = request_header->operation_id;
468 	operation->response = response;
469 
470 	return true;
471 }
472 EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
473 
474 /*
475  * Create a Greybus operation to be sent over the given connection.
476  * The request buffer will be big enough for a payload of the given
477  * size.
478  *
479  * For outgoing requests, the request message's header will be
480  * initialized with the type of the request and the message size.
481  * Outgoing operations must also specify the response buffer size,
482  * which must be sufficient to hold all expected response data.  The
483  * response message header will eventually be overwritten, so there's
484  * no need to initialize it here.
485  *
486  * Request messages for incoming operations can arrive in interrupt
487  * context, so they must be allocated with GFP_ATOMIC.  In this case
488  * the request buffer will be immediately overwritten, so there is
489  * no need to initialize the message header.  Responsibility for
490  * allocating a response buffer lies with the incoming request
491  * handler for a protocol.  So we don't allocate that here.
492  *
493  * Returns a pointer to the new operation or a null pointer if an
494  * error occurs.
495  */
496 static struct gb_operation *
gb_operation_create_common(struct gb_connection * connection,u8 type,size_t request_size,size_t response_size,unsigned long op_flags,gfp_t gfp_flags)497 gb_operation_create_common(struct gb_connection *connection, u8 type,
498 				size_t request_size, size_t response_size,
499 				unsigned long op_flags, gfp_t gfp_flags)
500 {
501 	struct gb_host_device *hd = connection->hd;
502 	struct gb_operation *operation;
503 
504 	operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
505 	if (!operation)
506 		return NULL;
507 	operation->connection = connection;
508 
509 	operation->request = gb_operation_message_alloc(hd, type, request_size,
510 							gfp_flags);
511 	if (!operation->request)
512 		goto err_cache;
513 	operation->request->operation = operation;
514 
515 	/* Allocate the response buffer for outgoing operations */
516 	if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
517 		if (!gb_operation_response_alloc(operation, response_size,
518 						 gfp_flags)) {
519 			goto err_request;
520 		}
521 	}
522 
523 	operation->flags = op_flags;
524 	operation->type = type;
525 	operation->errno = -EBADR;  /* Initial value--means "never set" */
526 
527 	INIT_WORK(&operation->work, gb_operation_work);
528 	init_completion(&operation->completion);
529 	kref_init(&operation->kref);
530 	atomic_set(&operation->waiters, 0);
531 
532 	return operation;
533 
534 err_request:
535 	gb_operation_message_free(operation->request);
536 err_cache:
537 	kmem_cache_free(gb_operation_cache, operation);
538 
539 	return NULL;
540 }
541 
542 /*
543  * Create a new operation associated with the given connection.  The
544  * request and response sizes provided are the number of bytes
545  * required to hold the request/response payload only.  Both of
546  * these are allowed to be 0.  Note that 0x00 is reserved as an
547  * invalid operation type for all protocols, and this is enforced
548  * here.
549  */
550 struct gb_operation *
gb_operation_create_flags(struct gb_connection * connection,u8 type,size_t request_size,size_t response_size,unsigned long flags,gfp_t gfp)551 gb_operation_create_flags(struct gb_connection *connection,
552 				u8 type, size_t request_size,
553 				size_t response_size, unsigned long flags,
554 				gfp_t gfp)
555 {
556 	struct gb_operation *operation;
557 
558 	if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID))
559 		return NULL;
560 	if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
561 		type &= ~GB_MESSAGE_TYPE_RESPONSE;
562 
563 	if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK))
564 		flags &= GB_OPERATION_FLAG_USER_MASK;
565 
566 	operation = gb_operation_create_common(connection, type,
567 						request_size, response_size,
568 						flags, gfp);
569 	if (operation)
570 		trace_gb_operation_create(operation);
571 
572 	return operation;
573 }
574 EXPORT_SYMBOL_GPL(gb_operation_create_flags);
575 
576 struct gb_operation *
gb_operation_create_core(struct gb_connection * connection,u8 type,size_t request_size,size_t response_size,unsigned long flags,gfp_t gfp)577 gb_operation_create_core(struct gb_connection *connection,
578 				u8 type, size_t request_size,
579 				size_t response_size, unsigned long flags,
580 				gfp_t gfp)
581 {
582 	struct gb_operation *operation;
583 
584 	flags |= GB_OPERATION_FLAG_CORE;
585 
586 	operation = gb_operation_create_common(connection, type,
587 						request_size, response_size,
588 						flags, gfp);
589 	if (operation)
590 		trace_gb_operation_create_core(operation);
591 
592 	return operation;
593 }
594 /* Do not export this function. */
595 
gb_operation_get_payload_size_max(struct gb_connection * connection)596 size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
597 {
598 	struct gb_host_device *hd = connection->hd;
599 
600 	return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
601 }
602 EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
603 
604 static struct gb_operation *
gb_operation_create_incoming(struct gb_connection * connection,u16 id,u8 type,void * data,size_t size)605 gb_operation_create_incoming(struct gb_connection *connection, u16 id,
606 				u8 type, void *data, size_t size)
607 {
608 	struct gb_operation *operation;
609 	size_t request_size;
610 	unsigned long flags = GB_OPERATION_FLAG_INCOMING;
611 
612 	/* Caller has made sure we at least have a message header. */
613 	request_size = size - sizeof(struct gb_operation_msg_hdr);
614 
615 	if (!id)
616 		flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
617 
618 	operation = gb_operation_create_common(connection, type,
619 						request_size,
620 						GB_REQUEST_TYPE_INVALID,
621 						flags, GFP_ATOMIC);
622 	if (!operation)
623 		return NULL;
624 
625 	operation->id = id;
626 	memcpy(operation->request->header, data, size);
627 	trace_gb_operation_create_incoming(operation);
628 
629 	return operation;
630 }
631 
632 /*
633  * Get an additional reference on an operation.
634  */
gb_operation_get(struct gb_operation * operation)635 void gb_operation_get(struct gb_operation *operation)
636 {
637 	kref_get(&operation->kref);
638 }
639 EXPORT_SYMBOL_GPL(gb_operation_get);
640 
641 /*
642  * Destroy a previously created operation.
643  */
_gb_operation_destroy(struct kref * kref)644 static void _gb_operation_destroy(struct kref *kref)
645 {
646 	struct gb_operation *operation;
647 
648 	operation = container_of(kref, struct gb_operation, kref);
649 
650 	trace_gb_operation_destroy(operation);
651 
652 	if (operation->response)
653 		gb_operation_message_free(operation->response);
654 	gb_operation_message_free(operation->request);
655 
656 	kmem_cache_free(gb_operation_cache, operation);
657 }
658 
659 /*
660  * Drop a reference on an operation, and destroy it when the last
661  * one is gone.
662  */
gb_operation_put(struct gb_operation * operation)663 void gb_operation_put(struct gb_operation *operation)
664 {
665 	if (WARN_ON(!operation))
666 		return;
667 
668 	kref_put(&operation->kref, _gb_operation_destroy);
669 }
670 EXPORT_SYMBOL_GPL(gb_operation_put);
671 
672 /* Tell the requester we're done */
gb_operation_sync_callback(struct gb_operation * operation)673 static void gb_operation_sync_callback(struct gb_operation *operation)
674 {
675 	complete(&operation->completion);
676 }
677 
678 /**
679  * gb_operation_request_send() - send an operation request message
680  * @operation:	the operation to initiate
681  * @callback:	the operation completion callback
682  * @gfp:	the memory flags to use for any allocations
683  *
684  * The caller has filled in any payload so the request message is ready to go.
685  * The callback function supplied will be called when the response message has
686  * arrived, a unidirectional request has been sent, or the operation is
687  * cancelled, indicating that the operation is complete. The callback function
688  * can fetch the result of the operation using gb_operation_result() if
689  * desired.
690  *
691  * Return: 0 if the request was successfully queued in the host-driver queues,
692  * or a negative errno.
693  */
gb_operation_request_send(struct gb_operation * operation,gb_operation_callback callback,gfp_t gfp)694 int gb_operation_request_send(struct gb_operation *operation,
695 				gb_operation_callback callback,
696 				gfp_t gfp)
697 {
698 	struct gb_connection *connection = operation->connection;
699 	struct gb_operation_msg_hdr *header;
700 	unsigned int cycle;
701 	int ret;
702 
703 	if (gb_connection_is_offloaded(connection))
704 		return -EBUSY;
705 
706 	if (!callback)
707 		return -EINVAL;
708 
709 	/*
710 	 * Record the callback function, which is executed in
711 	 * non-atomic (workqueue) context when the final result
712 	 * of an operation has been set.
713 	 */
714 	operation->callback = callback;
715 
716 	/*
717 	 * Assign the operation's id, and store it in the request header.
718 	 * Zero is a reserved operation id for unidirectional operations.
719 	 */
720 	if (gb_operation_is_unidirectional(operation)) {
721 		operation->id = 0;
722 	} else {
723 		cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
724 		operation->id = (u16)(cycle % U16_MAX + 1);
725 	}
726 
727 	header = operation->request->header;
728 	header->operation_id = cpu_to_le16(operation->id);
729 
730 	gb_operation_result_set(operation, -EINPROGRESS);
731 
732 	/*
733 	 * Get an extra reference on the operation. It'll be dropped when the
734 	 * operation completes.
735 	 */
736 	gb_operation_get(operation);
737 	ret = gb_operation_get_active(operation);
738 	if (ret)
739 		goto err_put;
740 
741 	ret = gb_message_send(operation->request, gfp);
742 	if (ret)
743 		goto err_put_active;
744 
745 	return 0;
746 
747 err_put_active:
748 	gb_operation_put_active(operation);
749 err_put:
750 	gb_operation_put(operation);
751 
752 	return ret;
753 }
754 EXPORT_SYMBOL_GPL(gb_operation_request_send);
755 
756 /*
757  * Send a synchronous operation.  This function is expected to
758  * block, returning only when the response has arrived, (or when an
759  * error is detected.  The return value is the result of the
760  * operation.
761  */
gb_operation_request_send_sync_timeout(struct gb_operation * operation,unsigned int timeout)762 int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
763 						unsigned int timeout)
764 {
765 	int ret;
766 	unsigned long timeout_jiffies;
767 
768 	ret = gb_operation_request_send(operation, gb_operation_sync_callback,
769 					GFP_KERNEL);
770 	if (ret)
771 		return ret;
772 
773 	if (timeout)
774 		timeout_jiffies = msecs_to_jiffies(timeout);
775 	else
776 		timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
777 
778 	ret = wait_for_completion_interruptible_timeout(&operation->completion,
779 							timeout_jiffies);
780 	if (ret < 0) {
781 		/* Cancel the operation if interrupted */
782 		gb_operation_cancel(operation, -ECANCELED);
783 	} else if (ret == 0) {
784 		/* Cancel the operation if op timed out */
785 		gb_operation_cancel(operation, -ETIMEDOUT);
786 	}
787 
788 	return gb_operation_result(operation);
789 }
790 EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout);
791 
792 /*
793  * Send a response for an incoming operation request.  A non-zero
794  * errno indicates a failed operation.
795  *
796  * If there is any response payload, the incoming request handler is
797  * responsible for allocating the response message.  Otherwise the
798  * it can simply supply the result errno; this function will
799  * allocate the response message if necessary.
800  */
gb_operation_response_send(struct gb_operation * operation,int errno)801 static int gb_operation_response_send(struct gb_operation *operation,
802 					int errno)
803 {
804 	struct gb_connection *connection = operation->connection;
805 	int ret;
806 
807 	if (!operation->response &&
808 			!gb_operation_is_unidirectional(operation)) {
809 		if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
810 			return -ENOMEM;
811 	}
812 
813 	/* Record the result */
814 	if (!gb_operation_result_set(operation, errno)) {
815 		dev_err(&connection->hd->dev, "request result already set\n");
816 		return -EIO;	/* Shouldn't happen */
817 	}
818 
819 	/* Sender of request does not care about response. */
820 	if (gb_operation_is_unidirectional(operation))
821 		return 0;
822 
823 	/* Reference will be dropped when message has been sent. */
824 	gb_operation_get(operation);
825 	ret = gb_operation_get_active(operation);
826 	if (ret)
827 		goto err_put;
828 
829 	/* Fill in the response header and send it */
830 	operation->response->header->result = gb_operation_errno_map(errno);
831 
832 	ret = gb_message_send(operation->response, GFP_KERNEL);
833 	if (ret)
834 		goto err_put_active;
835 
836 	return 0;
837 
838 err_put_active:
839 	gb_operation_put_active(operation);
840 err_put:
841 	gb_operation_put(operation);
842 
843 	return ret;
844 }
845 
846 /*
847  * This function is called when a message send request has completed.
848  */
greybus_message_sent(struct gb_host_device * hd,struct gb_message * message,int status)849 void greybus_message_sent(struct gb_host_device *hd,
850 					struct gb_message *message, int status)
851 {
852 	struct gb_operation *operation = message->operation;
853 	struct gb_connection *connection = operation->connection;
854 
855 	/*
856 	 * If the message was a response, we just need to drop our
857 	 * reference to the operation.  If an error occurred, report
858 	 * it.
859 	 *
860 	 * For requests, if there's no error and the operation in not
861 	 * unidirectional, there's nothing more to do until the response
862 	 * arrives. If an error occurred attempting to send it, or if the
863 	 * operation is unidrectional, record the result of the operation and
864 	 * schedule its completion.
865 	 */
866 	if (message == operation->response) {
867 		if (status) {
868 			dev_err(&connection->hd->dev,
869 				"%s: error sending response 0x%02x: %d\n",
870 				connection->name, operation->type, status);
871 		}
872 
873 		gb_operation_put_active(operation);
874 		gb_operation_put(operation);
875 	} else if (status || gb_operation_is_unidirectional(operation)) {
876 		if (gb_operation_result_set(operation, status)) {
877 			queue_work(gb_operation_completion_wq,
878 					&operation->work);
879 		}
880 	}
881 }
882 EXPORT_SYMBOL_GPL(greybus_message_sent);
883 
884 /*
885  * We've received data on a connection, and it doesn't look like a
886  * response, so we assume it's a request.
887  *
888  * This is called in interrupt context, so just copy the incoming
889  * data into the request buffer and handle the rest via workqueue.
890  */
gb_connection_recv_request(struct gb_connection * connection,const struct gb_operation_msg_hdr * header,void * data,size_t size)891 static void gb_connection_recv_request(struct gb_connection *connection,
892 				const struct gb_operation_msg_hdr *header,
893 				void *data, size_t size)
894 {
895 	struct gb_operation *operation;
896 	u16 operation_id;
897 	u8 type;
898 	int ret;
899 
900 	operation_id = le16_to_cpu(header->operation_id);
901 	type = header->type;
902 
903 	operation = gb_operation_create_incoming(connection, operation_id,
904 						type, data, size);
905 	if (!operation) {
906 		dev_err(&connection->hd->dev,
907 			"%s: can't create incoming operation\n",
908 			connection->name);
909 		return;
910 	}
911 
912 	ret = gb_operation_get_active(operation);
913 	if (ret) {
914 		gb_operation_put(operation);
915 		return;
916 	}
917 	trace_gb_message_recv_request(operation->request);
918 
919 	/*
920 	 * The initial reference to the operation will be dropped when the
921 	 * request handler returns.
922 	 */
923 	if (gb_operation_result_set(operation, -EINPROGRESS))
924 		queue_work(connection->wq, &operation->work);
925 }
926 
927 /*
928  * We've received data that appears to be an operation response
929  * message.  Look up the operation, and record that we've received
930  * its response.
931  *
932  * This is called in interrupt context, so just copy the incoming
933  * data into the response buffer and handle the rest via workqueue.
934  */
gb_connection_recv_response(struct gb_connection * connection,const struct gb_operation_msg_hdr * header,void * data,size_t size)935 static void gb_connection_recv_response(struct gb_connection *connection,
936 				const struct gb_operation_msg_hdr *header,
937 				void *data, size_t size)
938 {
939 	struct gb_operation *operation;
940 	struct gb_message *message;
941 	size_t message_size;
942 	u16 operation_id;
943 	int errno;
944 
945 	operation_id = le16_to_cpu(header->operation_id);
946 
947 	if (!operation_id) {
948 		dev_err_ratelimited(&connection->hd->dev,
949 				"%s: invalid response id 0 received\n",
950 				connection->name);
951 		return;
952 	}
953 
954 	operation = gb_operation_find_outgoing(connection, operation_id);
955 	if (!operation) {
956 		dev_err_ratelimited(&connection->hd->dev,
957 				"%s: unexpected response id 0x%04x received\n",
958 				connection->name, operation_id);
959 		return;
960 	}
961 
962 	errno = gb_operation_status_map(header->result);
963 	message = operation->response;
964 	message_size = sizeof(*header) + message->payload_size;
965 	if (!errno && size > message_size) {
966 		dev_err_ratelimited(&connection->hd->dev,
967 				"%s: malformed response 0x%02x received (%zu > %zu)\n",
968 				connection->name, header->type,
969 				size, message_size);
970 		errno = -EMSGSIZE;
971 	} else if (!errno && size < message_size) {
972 		if (gb_operation_short_response_allowed(operation)) {
973 			message->payload_size = size - sizeof(*header);
974 		} else {
975 			dev_err_ratelimited(&connection->hd->dev,
976 					"%s: short response 0x%02x received (%zu < %zu)\n",
977 					connection->name, header->type,
978 					size, message_size);
979 			errno = -EMSGSIZE;
980 		}
981 	}
982 
983 	/* We must ignore the payload if a bad status is returned */
984 	if (errno)
985 		size = sizeof(*header);
986 
987 	/* The rest will be handled in work queue context */
988 	if (gb_operation_result_set(operation, errno)) {
989 		memcpy(message->buffer, data, size);
990 
991 		trace_gb_message_recv_response(message);
992 
993 		queue_work(gb_operation_completion_wq, &operation->work);
994 	}
995 
996 	gb_operation_put(operation);
997 }
998 
999 /*
1000  * Handle data arriving on a connection.  As soon as we return the
1001  * supplied data buffer will be reused (so unless we do something
1002  * with, it's effectively dropped).
1003  */
gb_connection_recv(struct gb_connection * connection,void * data,size_t size)1004 void gb_connection_recv(struct gb_connection *connection,
1005 				void *data, size_t size)
1006 {
1007 	struct gb_operation_msg_hdr header;
1008 	struct device *dev = &connection->hd->dev;
1009 	size_t msg_size;
1010 
1011 	if (connection->state == GB_CONNECTION_STATE_DISABLED ||
1012 			gb_connection_is_offloaded(connection)) {
1013 		dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n",
1014 				connection->name, size);
1015 		return;
1016 	}
1017 
1018 	if (size < sizeof(header)) {
1019 		dev_err_ratelimited(dev, "%s: short message received\n",
1020 				connection->name);
1021 		return;
1022 	}
1023 
1024 	/* Use memcpy as data may be unaligned */
1025 	memcpy(&header, data, sizeof(header));
1026 	msg_size = le16_to_cpu(header.size);
1027 	if (size < msg_size) {
1028 		dev_err_ratelimited(dev,
1029 				"%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
1030 				connection->name,
1031 				le16_to_cpu(header.operation_id),
1032 				header.type, size, msg_size);
1033 		return;		/* XXX Should still complete operation */
1034 	}
1035 
1036 	if (header.type & GB_MESSAGE_TYPE_RESPONSE) {
1037 		gb_connection_recv_response(connection,	&header, data,
1038 						msg_size);
1039 	} else {
1040 		gb_connection_recv_request(connection, &header, data,
1041 						msg_size);
1042 	}
1043 }
1044 
1045 /*
1046  * Cancel an outgoing operation synchronously, and record the given error to
1047  * indicate why.
1048  */
gb_operation_cancel(struct gb_operation * operation,int errno)1049 void gb_operation_cancel(struct gb_operation *operation, int errno)
1050 {
1051 	if (WARN_ON(gb_operation_is_incoming(operation)))
1052 		return;
1053 
1054 	if (gb_operation_result_set(operation, errno)) {
1055 		gb_message_cancel(operation->request);
1056 		queue_work(gb_operation_completion_wq, &operation->work);
1057 	}
1058 	trace_gb_message_cancel_outgoing(operation->request);
1059 
1060 	atomic_inc(&operation->waiters);
1061 	wait_event(gb_operation_cancellation_queue,
1062 			!gb_operation_is_active(operation));
1063 	atomic_dec(&operation->waiters);
1064 }
1065 EXPORT_SYMBOL_GPL(gb_operation_cancel);
1066 
1067 /*
1068  * Cancel an incoming operation synchronously. Called during connection tear
1069  * down.
1070  */
gb_operation_cancel_incoming(struct gb_operation * operation,int errno)1071 void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
1072 {
1073 	if (WARN_ON(!gb_operation_is_incoming(operation)))
1074 		return;
1075 
1076 	if (!gb_operation_is_unidirectional(operation)) {
1077 		/*
1078 		 * Make sure the request handler has submitted the response
1079 		 * before cancelling it.
1080 		 */
1081 		flush_work(&operation->work);
1082 		if (!gb_operation_result_set(operation, errno))
1083 			gb_message_cancel(operation->response);
1084 	}
1085 	trace_gb_message_cancel_incoming(operation->response);
1086 
1087 	atomic_inc(&operation->waiters);
1088 	wait_event(gb_operation_cancellation_queue,
1089 			!gb_operation_is_active(operation));
1090 	atomic_dec(&operation->waiters);
1091 }
1092 
1093 /**
1094  * gb_operation_sync_timeout() - implement a "simple" synchronous operation
1095  * @connection: the Greybus connection to send this to
1096  * @type: the type of operation to send
1097  * @request: pointer to a memory buffer to copy the request from
1098  * @request_size: size of @request
1099  * @response: pointer to a memory buffer to copy the response to
1100  * @response_size: the size of @response.
1101  * @timeout: operation timeout in milliseconds
1102  *
1103  * This function implements a simple synchronous Greybus operation.  It sends
1104  * the provided operation request and waits (sleeps) until the corresponding
1105  * operation response message has been successfully received, or an error
1106  * occurs.  @request and @response are buffers to hold the request and response
1107  * data respectively, and if they are not NULL, their size must be specified in
1108  * @request_size and @response_size.
1109  *
1110  * If a response payload is to come back, and @response is not NULL,
1111  * @response_size number of bytes will be copied into @response if the operation
1112  * is successful.
1113  *
1114  * If there is an error, the response buffer is left alone.
1115  */
gb_operation_sync_timeout(struct gb_connection * connection,int type,void * request,int request_size,void * response,int response_size,unsigned int timeout)1116 int gb_operation_sync_timeout(struct gb_connection *connection, int type,
1117 				void *request, int request_size,
1118 				void *response, int response_size,
1119 				unsigned int timeout)
1120 {
1121 	struct gb_operation *operation;
1122 	int ret;
1123 
1124 	if ((response_size && !response) ||
1125 	    (request_size && !request))
1126 		return -EINVAL;
1127 
1128 	operation = gb_operation_create(connection, type,
1129 					request_size, response_size,
1130 					GFP_KERNEL);
1131 	if (!operation)
1132 		return -ENOMEM;
1133 
1134 	if (request_size)
1135 		memcpy(operation->request->payload, request, request_size);
1136 
1137 	ret = gb_operation_request_send_sync_timeout(operation, timeout);
1138 	if (ret) {
1139 		dev_err(&connection->hd->dev,
1140 			"%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n",
1141 			connection->name, operation->id, type, ret);
1142 	} else {
1143 		if (response_size) {
1144 			memcpy(response, operation->response->payload,
1145 			       response_size);
1146 		}
1147 	}
1148 
1149 	gb_operation_put(operation);
1150 
1151 	return ret;
1152 }
1153 EXPORT_SYMBOL_GPL(gb_operation_sync_timeout);
1154 
1155 /**
1156  * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
1157  * @connection:		connection to use
1158  * @type:		type of operation to send
1159  * @request:		memory buffer to copy the request from
1160  * @request_size:	size of @request
1161  * @timeout:		send timeout in milliseconds
1162  *
1163  * Initiate a unidirectional operation by sending a request message and
1164  * waiting for it to be acknowledged as sent by the host device.
1165  *
1166  * Note that successful send of a unidirectional operation does not imply that
1167  * the request as actually reached the remote end of the connection.
1168  */
gb_operation_unidirectional_timeout(struct gb_connection * connection,int type,void * request,int request_size,unsigned int timeout)1169 int gb_operation_unidirectional_timeout(struct gb_connection *connection,
1170 				int type, void *request, int request_size,
1171 				unsigned int timeout)
1172 {
1173 	struct gb_operation *operation;
1174 	int ret;
1175 
1176 	if (request_size && !request)
1177 		return -EINVAL;
1178 
1179 	operation = gb_operation_create_flags(connection, type,
1180 					request_size, 0,
1181 					GB_OPERATION_FLAG_UNIDIRECTIONAL,
1182 					GFP_KERNEL);
1183 	if (!operation)
1184 		return -ENOMEM;
1185 
1186 	if (request_size)
1187 		memcpy(operation->request->payload, request, request_size);
1188 
1189 	ret = gb_operation_request_send_sync_timeout(operation, timeout);
1190 	if (ret) {
1191 		dev_err(&connection->hd->dev,
1192 			"%s: unidirectional operation of type 0x%02x failed: %d\n",
1193 			connection->name, type, ret);
1194 	}
1195 
1196 	gb_operation_put(operation);
1197 
1198 	return ret;
1199 }
1200 EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout);
1201 
gb_operation_init(void)1202 int __init gb_operation_init(void)
1203 {
1204 	gb_message_cache = kmem_cache_create("gb_message_cache",
1205 				sizeof(struct gb_message), 0, 0, NULL);
1206 	if (!gb_message_cache)
1207 		return -ENOMEM;
1208 
1209 	gb_operation_cache = kmem_cache_create("gb_operation_cache",
1210 				sizeof(struct gb_operation), 0, 0, NULL);
1211 	if (!gb_operation_cache)
1212 		goto err_destroy_message_cache;
1213 
1214 	gb_operation_completion_wq = alloc_workqueue("greybus_completion",
1215 				0, 0);
1216 	if (!gb_operation_completion_wq)
1217 		goto err_destroy_operation_cache;
1218 
1219 	return 0;
1220 
1221 err_destroy_operation_cache:
1222 	kmem_cache_destroy(gb_operation_cache);
1223 	gb_operation_cache = NULL;
1224 err_destroy_message_cache:
1225 	kmem_cache_destroy(gb_message_cache);
1226 	gb_message_cache = NULL;
1227 
1228 	return -ENOMEM;
1229 }
1230 
gb_operation_exit(void)1231 void gb_operation_exit(void)
1232 {
1233 	destroy_workqueue(gb_operation_completion_wq);
1234 	gb_operation_completion_wq = NULL;
1235 	kmem_cache_destroy(gb_operation_cache);
1236 	gb_operation_cache = NULL;
1237 	kmem_cache_destroy(gb_message_cache);
1238 	gb_message_cache = NULL;
1239 }
1240