• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
3 
4 #include <linux/types.h>
5 #include <linux/completion.h>
6 #include <linux/mutex.h>
7 #include <linux/bitops.h>
8 #include <linux/kthread.h>
9 #include <linux/wait.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/kref.h>
13 #include <linux/rcupdate.h>
14 #include <linux/sched/signal.h>
15 
16 #include "vchiq_arm.h"
17 #include "vchiq_core.h"
18 
19 #define VCHIQ_SLOT_HANDLER_STACK 8192
20 
21 #define VCHIQ_MSG_PADDING            0  /* -                                 */
22 #define VCHIQ_MSG_CONNECT            1  /* -                                 */
23 #define VCHIQ_MSG_OPEN               2  /* + (srcport, -), fourcc, client_id */
24 #define VCHIQ_MSG_OPENACK            3  /* + (srcport, dstport)              */
25 #define VCHIQ_MSG_CLOSE              4  /* + (srcport, dstport)              */
26 #define VCHIQ_MSG_DATA               5  /* + (srcport, dstport)              */
27 #define VCHIQ_MSG_BULK_RX            6  /* + (srcport, dstport), data, size  */
28 #define VCHIQ_MSG_BULK_TX            7  /* + (srcport, dstport), data, size  */
29 #define VCHIQ_MSG_BULK_RX_DONE       8  /* + (srcport, dstport), actual      */
30 #define VCHIQ_MSG_BULK_TX_DONE       9  /* + (srcport, dstport), actual      */
31 #define VCHIQ_MSG_PAUSE             10  /* -                                 */
32 #define VCHIQ_MSG_RESUME            11  /* -                                 */
33 #define VCHIQ_MSG_REMOTE_USE        12  /* -                                 */
34 #define VCHIQ_MSG_REMOTE_RELEASE    13  /* -                                 */
35 #define VCHIQ_MSG_REMOTE_USE_ACTIVE 14  /* -                                 */
36 
37 #define TYPE_SHIFT 24
38 
39 #define VCHIQ_PORT_MAX                 (VCHIQ_MAX_SERVICES - 1)
40 #define VCHIQ_PORT_FREE                0x1000
41 #define VCHIQ_PORT_IS_VALID(port)      ((port) < VCHIQ_PORT_FREE)
42 #define VCHIQ_MAKE_MSG(type, srcport, dstport) \
43 	(((type) << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
44 #define VCHIQ_MSG_TYPE(msgid)          ((unsigned int)(msgid) >> TYPE_SHIFT)
45 #define VCHIQ_MSG_SRCPORT(msgid) \
46 	(unsigned short)(((unsigned int)(msgid) >> 12) & 0xfff)
47 #define VCHIQ_MSG_DSTPORT(msgid) \
48 	((unsigned short)(msgid) & 0xfff)
49 
50 #define MAKE_CONNECT			(VCHIQ_MSG_CONNECT << TYPE_SHIFT)
51 #define MAKE_OPEN(srcport) \
52 	((VCHIQ_MSG_OPEN << TYPE_SHIFT) | ((srcport) << 12))
53 #define MAKE_OPENACK(srcport, dstport) \
54 	((VCHIQ_MSG_OPENACK << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
55 #define MAKE_CLOSE(srcport, dstport) \
56 	((VCHIQ_MSG_CLOSE << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
57 #define MAKE_DATA(srcport, dstport) \
58 	((VCHIQ_MSG_DATA << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
59 #define MAKE_PAUSE			(VCHIQ_MSG_PAUSE << TYPE_SHIFT)
60 #define MAKE_RESUME			(VCHIQ_MSG_RESUME << TYPE_SHIFT)
61 #define MAKE_REMOTE_USE			(VCHIQ_MSG_REMOTE_USE << TYPE_SHIFT)
62 #define MAKE_REMOTE_USE_ACTIVE		(VCHIQ_MSG_REMOTE_USE_ACTIVE << TYPE_SHIFT)
63 
64 /* Ensure the fields are wide enough */
65 static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
66 	== 0);
67 static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
68 static_assert((unsigned int)VCHIQ_PORT_MAX <
69 	(unsigned int)VCHIQ_PORT_FREE);
70 
71 #define VCHIQ_MSGID_PADDING            VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
72 #define VCHIQ_MSGID_CLAIMED            0x40000000
73 
74 #define VCHIQ_FOURCC_INVALID           0x00000000
75 #define VCHIQ_FOURCC_IS_LEGAL(fourcc)  ((fourcc) != VCHIQ_FOURCC_INVALID)
76 
77 #define VCHIQ_BULK_ACTUAL_ABORTED -1
78 
79 #if VCHIQ_ENABLE_STATS
80 #define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
81 #define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
82 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
83 	(service->stats. stat += addend)
84 #else
85 #define VCHIQ_STATS_INC(state, stat) ((void)0)
86 #define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
87 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
88 #endif
89 
90 #define HANDLE_STATE_SHIFT 12
91 
92 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
93 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
94 #define SLOT_INDEX_FROM_DATA(state, data) \
95 	(((unsigned int)((char *)data - (char *)state->slot_data)) / \
96 	VCHIQ_SLOT_SIZE)
97 #define SLOT_INDEX_FROM_INFO(state, info) \
98 	((unsigned int)(info - state->slot_info))
99 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \
100 	((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
101 #define SLOT_QUEUE_INDEX_FROM_POS_MASKED(pos) \
102 	(SLOT_QUEUE_INDEX_FROM_POS(pos) & VCHIQ_SLOT_QUEUE_MASK)
103 
104 #define BULK_INDEX(x) ((x) & (VCHIQ_NUM_SERVICE_BULKS - 1))
105 
106 #define SRVTRACE_LEVEL(srv) \
107 	(((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
108 #define SRVTRACE_ENABLED(srv, lev) \
109 	(((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
110 
111 #define NO_CLOSE_RECVD	0
112 #define CLOSE_RECVD	1
113 
114 #define NO_RETRY_POLL	0
115 #define RETRY_POLL	1
116 
117 struct vchiq_open_payload {
118 	int fourcc;
119 	int client_id;
120 	short version;
121 	short version_min;
122 };
123 
124 struct vchiq_openack_payload {
125 	short version;
126 };
127 
128 enum {
129 	QMFLAGS_IS_BLOCKING     = BIT(0),
130 	QMFLAGS_NO_MUTEX_LOCK   = BIT(1),
131 	QMFLAGS_NO_MUTEX_UNLOCK = BIT(2)
132 };
133 
134 enum {
135 	VCHIQ_POLL_TERMINATE,
136 	VCHIQ_POLL_REMOVE,
137 	VCHIQ_POLL_TXNOTIFY,
138 	VCHIQ_POLL_RXNOTIFY,
139 	VCHIQ_POLL_COUNT
140 };
141 
142 /* we require this for consistency between endpoints */
143 static_assert(sizeof(struct vchiq_header) == 8);
144 static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
145 
check_sizes(void)146 static inline void check_sizes(void)
147 {
148 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_SLOT_SIZE);
149 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS);
150 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS_PER_SIDE);
151 	BUILD_BUG_ON_NOT_POWER_OF_2(sizeof(struct vchiq_header));
152 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_CURRENT_BULKS);
153 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_SERVICE_BULKS);
154 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SERVICES);
155 }
156 
157 /* Run time control of log level, based on KERN_XXX level. */
158 int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
159 int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
160 int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
161 
162 DEFINE_SPINLOCK(bulk_waiter_spinlock);
163 static DEFINE_SPINLOCK(quota_spinlock);
164 
165 static unsigned int handle_seq;
166 
167 static const char *const srvstate_names[] = {
168 	"FREE",
169 	"HIDDEN",
170 	"LISTENING",
171 	"OPENING",
172 	"OPEN",
173 	"OPENSYNC",
174 	"CLOSESENT",
175 	"CLOSERECVD",
176 	"CLOSEWAIT",
177 	"CLOSED"
178 };
179 
180 static const char *const reason_names[] = {
181 	"SERVICE_OPENED",
182 	"SERVICE_CLOSED",
183 	"MESSAGE_AVAILABLE",
184 	"BULK_TRANSMIT_DONE",
185 	"BULK_RECEIVE_DONE",
186 	"BULK_TRANSMIT_ABORTED",
187 	"BULK_RECEIVE_ABORTED"
188 };
189 
190 static const char *const conn_state_names[] = {
191 	"DISCONNECTED",
192 	"CONNECTING",
193 	"CONNECTED",
194 	"PAUSING",
195 	"PAUSE_SENT",
196 	"PAUSED",
197 	"RESUMING",
198 	"PAUSE_TIMEOUT",
199 	"RESUME_TIMEOUT"
200 };
201 
202 static void
203 release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
204 
msg_type_str(unsigned int msg_type)205 static const char *msg_type_str(unsigned int msg_type)
206 {
207 	switch (msg_type) {
208 	case VCHIQ_MSG_PADDING:			return "PADDING";
209 	case VCHIQ_MSG_CONNECT:			return "CONNECT";
210 	case VCHIQ_MSG_OPEN:			return "OPEN";
211 	case VCHIQ_MSG_OPENACK:			return "OPENACK";
212 	case VCHIQ_MSG_CLOSE:			return "CLOSE";
213 	case VCHIQ_MSG_DATA:			return "DATA";
214 	case VCHIQ_MSG_BULK_RX:			return "BULK_RX";
215 	case VCHIQ_MSG_BULK_TX:			return "BULK_TX";
216 	case VCHIQ_MSG_BULK_RX_DONE:		return "BULK_RX_DONE";
217 	case VCHIQ_MSG_BULK_TX_DONE:		return "BULK_TX_DONE";
218 	case VCHIQ_MSG_PAUSE:			return "PAUSE";
219 	case VCHIQ_MSG_RESUME:			return "RESUME";
220 	case VCHIQ_MSG_REMOTE_USE:		return "REMOTE_USE";
221 	case VCHIQ_MSG_REMOTE_RELEASE:		return "REMOTE_RELEASE";
222 	case VCHIQ_MSG_REMOTE_USE_ACTIVE:	return "REMOTE_USE_ACTIVE";
223 	}
224 	return "???";
225 }
226 
227 static inline void
set_service_state(struct vchiq_service * service,int newstate)228 set_service_state(struct vchiq_service *service, int newstate)
229 {
230 	vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
231 		       service->state->id, service->localport,
232 		       srvstate_names[service->srvstate],
233 		       srvstate_names[newstate]);
234 	service->srvstate = newstate;
235 }
236 
handle_to_service(struct vchiq_instance * instance,unsigned int handle)237 struct vchiq_service *handle_to_service(struct vchiq_instance *instance, unsigned int handle)
238 {
239 	int idx = handle & (VCHIQ_MAX_SERVICES - 1);
240 
241 	return rcu_dereference(instance->state->services[idx]);
242 }
243 struct vchiq_service *
find_service_by_handle(struct vchiq_instance * instance,unsigned int handle)244 find_service_by_handle(struct vchiq_instance *instance, unsigned int handle)
245 {
246 	struct vchiq_service *service;
247 
248 	rcu_read_lock();
249 	service = handle_to_service(instance, handle);
250 	if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
251 	    service->handle == handle &&
252 	    kref_get_unless_zero(&service->ref_count)) {
253 		service = rcu_pointer_handoff(service);
254 		rcu_read_unlock();
255 		return service;
256 	}
257 	rcu_read_unlock();
258 	vchiq_log_info(vchiq_core_log_level,
259 		       "Invalid service handle 0x%x", handle);
260 	return NULL;
261 }
262 
263 struct vchiq_service *
find_service_by_port(struct vchiq_state * state,unsigned int localport)264 find_service_by_port(struct vchiq_state *state, unsigned int localport)
265 {
266 	if (localport <= VCHIQ_PORT_MAX) {
267 		struct vchiq_service *service;
268 
269 		rcu_read_lock();
270 		service = rcu_dereference(state->services[localport]);
271 		if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
272 		    kref_get_unless_zero(&service->ref_count)) {
273 			service = rcu_pointer_handoff(service);
274 			rcu_read_unlock();
275 			return service;
276 		}
277 		rcu_read_unlock();
278 	}
279 	vchiq_log_info(vchiq_core_log_level,
280 		       "Invalid port %u", localport);
281 	return NULL;
282 }
283 
284 struct vchiq_service *
find_service_for_instance(struct vchiq_instance * instance,unsigned int handle)285 find_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
286 {
287 	struct vchiq_service *service;
288 
289 	rcu_read_lock();
290 	service = handle_to_service(instance, handle);
291 	if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
292 	    service->handle == handle &&
293 	    service->instance == instance &&
294 	    kref_get_unless_zero(&service->ref_count)) {
295 		service = rcu_pointer_handoff(service);
296 		rcu_read_unlock();
297 		return service;
298 	}
299 	rcu_read_unlock();
300 	vchiq_log_info(vchiq_core_log_level,
301 		       "Invalid service handle 0x%x", handle);
302 	return NULL;
303 }
304 
305 struct vchiq_service *
find_closed_service_for_instance(struct vchiq_instance * instance,unsigned int handle)306 find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
307 {
308 	struct vchiq_service *service;
309 
310 	rcu_read_lock();
311 	service = handle_to_service(instance, handle);
312 	if (service &&
313 	    (service->srvstate == VCHIQ_SRVSTATE_FREE ||
314 	     service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
315 	    service->handle == handle &&
316 	    service->instance == instance &&
317 	    kref_get_unless_zero(&service->ref_count)) {
318 		service = rcu_pointer_handoff(service);
319 		rcu_read_unlock();
320 		return service;
321 	}
322 	rcu_read_unlock();
323 	vchiq_log_info(vchiq_core_log_level,
324 		       "Invalid service handle 0x%x", handle);
325 	return service;
326 }
327 
328 struct vchiq_service *
__next_service_by_instance(struct vchiq_state * state,struct vchiq_instance * instance,int * pidx)329 __next_service_by_instance(struct vchiq_state *state,
330 			   struct vchiq_instance *instance,
331 			   int *pidx)
332 {
333 	struct vchiq_service *service = NULL;
334 	int idx = *pidx;
335 
336 	while (idx < state->unused_service) {
337 		struct vchiq_service *srv;
338 
339 		srv = rcu_dereference(state->services[idx]);
340 		idx++;
341 		if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
342 		    srv->instance == instance) {
343 			service = srv;
344 			break;
345 		}
346 	}
347 
348 	*pidx = idx;
349 	return service;
350 }
351 
352 struct vchiq_service *
next_service_by_instance(struct vchiq_state * state,struct vchiq_instance * instance,int * pidx)353 next_service_by_instance(struct vchiq_state *state,
354 			 struct vchiq_instance *instance,
355 			 int *pidx)
356 {
357 	struct vchiq_service *service;
358 
359 	rcu_read_lock();
360 	while (1) {
361 		service = __next_service_by_instance(state, instance, pidx);
362 		if (!service)
363 			break;
364 		if (kref_get_unless_zero(&service->ref_count)) {
365 			service = rcu_pointer_handoff(service);
366 			break;
367 		}
368 	}
369 	rcu_read_unlock();
370 	return service;
371 }
372 
373 void
vchiq_service_get(struct vchiq_service * service)374 vchiq_service_get(struct vchiq_service *service)
375 {
376 	if (!service) {
377 		WARN(1, "%s service is NULL\n", __func__);
378 		return;
379 	}
380 	kref_get(&service->ref_count);
381 }
382 
service_release(struct kref * kref)383 static void service_release(struct kref *kref)
384 {
385 	struct vchiq_service *service =
386 		container_of(kref, struct vchiq_service, ref_count);
387 	struct vchiq_state *state = service->state;
388 
389 	WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
390 	rcu_assign_pointer(state->services[service->localport], NULL);
391 	if (service->userdata_term)
392 		service->userdata_term(service->base.userdata);
393 	kfree_rcu(service, rcu);
394 }
395 
396 void
vchiq_service_put(struct vchiq_service * service)397 vchiq_service_put(struct vchiq_service *service)
398 {
399 	if (!service) {
400 		WARN(1, "%s: service is NULL\n", __func__);
401 		return;
402 	}
403 	kref_put(&service->ref_count, service_release);
404 }
405 
406 int
vchiq_get_client_id(struct vchiq_instance * instance,unsigned int handle)407 vchiq_get_client_id(struct vchiq_instance *instance, unsigned int handle)
408 {
409 	struct vchiq_service *service;
410 	int id;
411 
412 	rcu_read_lock();
413 	service = handle_to_service(instance, handle);
414 	id = service ? service->client_id : 0;
415 	rcu_read_unlock();
416 	return id;
417 }
418 
419 void *
vchiq_get_service_userdata(struct vchiq_instance * instance,unsigned int handle)420 vchiq_get_service_userdata(struct vchiq_instance *instance, unsigned int handle)
421 {
422 	void *userdata;
423 	struct vchiq_service *service;
424 
425 	rcu_read_lock();
426 	service = handle_to_service(instance, handle);
427 	userdata = service ? service->base.userdata : NULL;
428 	rcu_read_unlock();
429 	return userdata;
430 }
431 EXPORT_SYMBOL(vchiq_get_service_userdata);
432 
433 static void
mark_service_closing_internal(struct vchiq_service * service,int sh_thread)434 mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
435 {
436 	struct vchiq_state *state = service->state;
437 	struct vchiq_service_quota *quota;
438 
439 	service->closing = 1;
440 
441 	/* Synchronise with other threads. */
442 	mutex_lock(&state->recycle_mutex);
443 	mutex_unlock(&state->recycle_mutex);
444 	if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
445 		/*
446 		 * If we're pausing then the slot_mutex is held until resume
447 		 * by the slot handler.  Therefore don't try to acquire this
448 		 * mutex if we're the slot handler and in the pause sent state.
449 		 * We don't need to in this case anyway.
450 		 */
451 		mutex_lock(&state->slot_mutex);
452 		mutex_unlock(&state->slot_mutex);
453 	}
454 
455 	/* Unblock any sending thread. */
456 	quota = &state->service_quotas[service->localport];
457 	complete(&quota->quota_event);
458 }
459 
460 static void
mark_service_closing(struct vchiq_service * service)461 mark_service_closing(struct vchiq_service *service)
462 {
463 	mark_service_closing_internal(service, 0);
464 }
465 
466 static inline enum vchiq_status
make_service_callback(struct vchiq_service * service,enum vchiq_reason reason,struct vchiq_header * header,void * bulk_userdata)467 make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
468 		      struct vchiq_header *header, void *bulk_userdata)
469 {
470 	enum vchiq_status status;
471 
472 	vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
473 			service->state->id, service->localport, reason_names[reason],
474 			header, bulk_userdata);
475 	status = service->base.callback(service->instance, reason, header, service->handle,
476 					bulk_userdata);
477 	if (status == VCHIQ_ERROR) {
478 		vchiq_log_warning(vchiq_core_log_level,
479 				  "%d: ignoring ERROR from callback to service %x",
480 				  service->state->id, service->handle);
481 		status = VCHIQ_SUCCESS;
482 	}
483 
484 	if (reason != VCHIQ_MESSAGE_AVAILABLE)
485 		vchiq_release_message(service->instance, service->handle, header);
486 
487 	return status;
488 }
489 
490 inline void
vchiq_set_conn_state(struct vchiq_state * state,enum vchiq_connstate newstate)491 vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
492 {
493 	enum vchiq_connstate oldstate = state->conn_state;
494 
495 	vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id, conn_state_names[oldstate],
496 		       conn_state_names[newstate]);
497 	state->conn_state = newstate;
498 	vchiq_platform_conn_state_changed(state, oldstate, newstate);
499 }
500 
501 static inline void
remote_event_create(wait_queue_head_t * wq,struct remote_event * event)502 remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
503 {
504 	event->armed = 0;
505 	/*
506 	 * Don't clear the 'fired' flag because it may already have been set
507 	 * by the other side.
508 	 */
509 	init_waitqueue_head(wq);
510 }
511 
512 /*
513  * All the event waiting routines in VCHIQ used a custom semaphore
514  * implementation that filtered most signals. This achieved a behaviour similar
515  * to the "killable" family of functions. While cleaning up this code all the
516  * routines where switched to the "interruptible" family of functions, as the
517  * former was deemed unjustified and the use "killable" set all VCHIQ's
518  * threads in D state.
519  */
520 static inline int
remote_event_wait(wait_queue_head_t * wq,struct remote_event * event)521 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
522 {
523 	if (!event->fired) {
524 		event->armed = 1;
525 		dsb(sy);
526 		if (wait_event_interruptible(*wq, event->fired)) {
527 			event->armed = 0;
528 			return 0;
529 		}
530 		event->armed = 0;
531 		/* Ensure that the peer sees that we are not waiting (armed == 0). */
532 		wmb();
533 	}
534 
535 	event->fired = 0;
536 	return 1;
537 }
538 
539 static inline void
remote_event_signal_local(wait_queue_head_t * wq,struct remote_event * event)540 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
541 {
542 	event->fired = 1;
543 	event->armed = 0;
544 	wake_up_all(wq);
545 }
546 
547 static inline void
remote_event_poll(wait_queue_head_t * wq,struct remote_event * event)548 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
549 {
550 	if (event->fired && event->armed)
551 		remote_event_signal_local(wq, event);
552 }
553 
554 void
remote_event_pollall(struct vchiq_state * state)555 remote_event_pollall(struct vchiq_state *state)
556 {
557 	remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
558 	remote_event_poll(&state->sync_release_event, &state->local->sync_release);
559 	remote_event_poll(&state->trigger_event, &state->local->trigger);
560 	remote_event_poll(&state->recycle_event, &state->local->recycle);
561 }
562 
563 /*
564  * Round up message sizes so that any space at the end of a slot is always big
565  * enough for a header. This relies on header size being a power of two, which
566  * has been verified earlier by a static assertion.
567  */
568 
569 static inline size_t
calc_stride(size_t size)570 calc_stride(size_t size)
571 {
572 	/* Allow room for the header */
573 	size += sizeof(struct vchiq_header);
574 
575 	/* Round up */
576 	return (size + sizeof(struct vchiq_header) - 1) &
577 		~(sizeof(struct vchiq_header) - 1);
578 }
579 
580 /* Called by the slot handler thread */
581 static struct vchiq_service *
get_listening_service(struct vchiq_state * state,int fourcc)582 get_listening_service(struct vchiq_state *state, int fourcc)
583 {
584 	int i;
585 
586 	WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
587 
588 	rcu_read_lock();
589 	for (i = 0; i < state->unused_service; i++) {
590 		struct vchiq_service *service;
591 
592 		service = rcu_dereference(state->services[i]);
593 		if (service &&
594 		    service->public_fourcc == fourcc &&
595 		    (service->srvstate == VCHIQ_SRVSTATE_LISTENING ||
596 		     (service->srvstate == VCHIQ_SRVSTATE_OPEN &&
597 		      service->remoteport == VCHIQ_PORT_FREE)) &&
598 		    kref_get_unless_zero(&service->ref_count)) {
599 			service = rcu_pointer_handoff(service);
600 			rcu_read_unlock();
601 			return service;
602 		}
603 	}
604 	rcu_read_unlock();
605 	return NULL;
606 }
607 
608 /* Called by the slot handler thread */
609 static struct vchiq_service *
get_connected_service(struct vchiq_state * state,unsigned int port)610 get_connected_service(struct vchiq_state *state, unsigned int port)
611 {
612 	int i;
613 
614 	rcu_read_lock();
615 	for (i = 0; i < state->unused_service; i++) {
616 		struct vchiq_service *service =
617 			rcu_dereference(state->services[i]);
618 
619 		if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN &&
620 		    service->remoteport == port &&
621 		    kref_get_unless_zero(&service->ref_count)) {
622 			service = rcu_pointer_handoff(service);
623 			rcu_read_unlock();
624 			return service;
625 		}
626 	}
627 	rcu_read_unlock();
628 	return NULL;
629 }
630 
631 inline void
request_poll(struct vchiq_state * state,struct vchiq_service * service,int poll_type)632 request_poll(struct vchiq_state *state, struct vchiq_service *service,
633 	     int poll_type)
634 {
635 	u32 value;
636 	int index;
637 
638 	if (!service)
639 		goto skip_service;
640 
641 	do {
642 		value = atomic_read(&service->poll_flags);
643 	} while (atomic_cmpxchg(&service->poll_flags, value,
644 		 value | BIT(poll_type)) != value);
645 
646 	index = BITSET_WORD(service->localport);
647 	do {
648 		value = atomic_read(&state->poll_services[index]);
649 	} while (atomic_cmpxchg(&state->poll_services[index],
650 		 value, value | BIT(service->localport & 0x1f)) != value);
651 
652 skip_service:
653 	state->poll_needed = 1;
654 	/* Ensure the slot handler thread sees the poll_needed flag. */
655 	wmb();
656 
657 	/* ... and ensure the slot handler runs. */
658 	remote_event_signal_local(&state->trigger_event, &state->local->trigger);
659 }
660 
661 /*
662  * Called from queue_message, by the slot handler and application threads,
663  * with slot_mutex held
664  */
665 static struct vchiq_header *
reserve_space(struct vchiq_state * state,size_t space,int is_blocking)666 reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
667 {
668 	struct vchiq_shared_state *local = state->local;
669 	int tx_pos = state->local_tx_pos;
670 	int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
671 
672 	if (space > slot_space) {
673 		struct vchiq_header *header;
674 		/* Fill the remaining space with padding */
675 		WARN_ON(!state->tx_data);
676 		header = (struct vchiq_header *)
677 			(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
678 		header->msgid = VCHIQ_MSGID_PADDING;
679 		header->size = slot_space - sizeof(struct vchiq_header);
680 
681 		tx_pos += slot_space;
682 	}
683 
684 	/* If necessary, get the next slot. */
685 	if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
686 		int slot_index;
687 
688 		/* If there is no free slot... */
689 
690 		if (!try_wait_for_completion(&state->slot_available_event)) {
691 			/* ...wait for one. */
692 
693 			VCHIQ_STATS_INC(state, slot_stalls);
694 
695 			/* But first, flush through the last slot. */
696 			state->local_tx_pos = tx_pos;
697 			local->tx_pos = tx_pos;
698 			remote_event_signal(&state->remote->trigger);
699 
700 			if (!is_blocking ||
701 			    (wait_for_completion_interruptible(&state->slot_available_event)))
702 				return NULL; /* No space available */
703 		}
704 
705 		if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
706 			complete(&state->slot_available_event);
707 			pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
708 			return NULL;
709 		}
710 
711 		slot_index = local->slot_queue[SLOT_QUEUE_INDEX_FROM_POS_MASKED(tx_pos)];
712 		state->tx_data =
713 			(char *)SLOT_DATA_FROM_INDEX(state, slot_index);
714 	}
715 
716 	state->local_tx_pos = tx_pos + space;
717 
718 	return (struct vchiq_header *)(state->tx_data +
719 						(tx_pos & VCHIQ_SLOT_MASK));
720 }
721 
722 static void
process_free_data_message(struct vchiq_state * state,u32 * service_found,struct vchiq_header * header)723 process_free_data_message(struct vchiq_state *state, u32 *service_found,
724 			  struct vchiq_header *header)
725 {
726 	int msgid = header->msgid;
727 	int port = VCHIQ_MSG_SRCPORT(msgid);
728 	struct vchiq_service_quota *quota = &state->service_quotas[port];
729 	int count;
730 
731 	spin_lock(&quota_spinlock);
732 	count = quota->message_use_count;
733 	if (count > 0)
734 		quota->message_use_count = count - 1;
735 	spin_unlock(&quota_spinlock);
736 
737 	if (count == quota->message_quota) {
738 		/*
739 		 * Signal the service that it
740 		 * has dropped below its quota
741 		 */
742 		complete(&quota->quota_event);
743 	} else if (count == 0) {
744 		vchiq_log_error(vchiq_core_log_level,
745 				"service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
746 				port, quota->message_use_count, header, msgid, header->msgid,
747 				header->size);
748 		WARN(1, "invalid message use count\n");
749 	}
750 	if (!BITSET_IS_SET(service_found, port)) {
751 		/* Set the found bit for this service */
752 		BITSET_SET(service_found, port);
753 
754 		spin_lock(&quota_spinlock);
755 		count = quota->slot_use_count;
756 		if (count > 0)
757 			quota->slot_use_count = count - 1;
758 		spin_unlock(&quota_spinlock);
759 
760 		if (count > 0) {
761 			/*
762 			 * Signal the service in case
763 			 * it has dropped below its quota
764 			 */
765 			complete(&quota->quota_event);
766 			vchiq_log_trace(vchiq_core_log_level, "%d: pfq:%d %x@%pK - slot_use->%d",
767 					state->id, port, header->size, header, count - 1);
768 		} else {
769 			vchiq_log_error(vchiq_core_log_level,
770 					"service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
771 					port, count, header, msgid, header->msgid, header->size);
772 			WARN(1, "bad slot use count\n");
773 		}
774 	}
775 }
776 
777 /* Called by the recycle thread. */
778 static void
process_free_queue(struct vchiq_state * state,u32 * service_found,size_t length)779 process_free_queue(struct vchiq_state *state, u32 *service_found,
780 		   size_t length)
781 {
782 	struct vchiq_shared_state *local = state->local;
783 	int slot_queue_available;
784 
785 	/*
786 	 * Find slots which have been freed by the other side, and return them
787 	 * to the available queue.
788 	 */
789 	slot_queue_available = state->slot_queue_available;
790 
791 	/*
792 	 * Use a memory barrier to ensure that any state that may have been
793 	 * modified by another thread is not masked by stale prefetched
794 	 * values.
795 	 */
796 	mb();
797 
798 	while (slot_queue_available != local->slot_queue_recycle) {
799 		unsigned int pos;
800 		int slot_index = local->slot_queue[slot_queue_available &
801 			VCHIQ_SLOT_QUEUE_MASK];
802 		char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
803 		int data_found = 0;
804 
805 		slot_queue_available++;
806 		/*
807 		 * Beware of the address dependency - data is calculated
808 		 * using an index written by the other side.
809 		 */
810 		rmb();
811 
812 		vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
813 				state->id, slot_index, data, local->slot_queue_recycle,
814 				slot_queue_available);
815 
816 		/* Initialise the bitmask for services which have used this slot */
817 		memset(service_found, 0, length);
818 
819 		pos = 0;
820 
821 		while (pos < VCHIQ_SLOT_SIZE) {
822 			struct vchiq_header *header =
823 				(struct vchiq_header *)(data + pos);
824 			int msgid = header->msgid;
825 
826 			if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
827 				process_free_data_message(state, service_found,
828 							  header);
829 				data_found = 1;
830 			}
831 
832 			pos += calc_stride(header->size);
833 			if (pos > VCHIQ_SLOT_SIZE) {
834 				vchiq_log_error(vchiq_core_log_level,
835 						"pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
836 						pos, header, msgid, header->msgid, header->size);
837 				WARN(1, "invalid slot position\n");
838 			}
839 		}
840 
841 		if (data_found) {
842 			int count;
843 
844 			spin_lock(&quota_spinlock);
845 			count = state->data_use_count;
846 			if (count > 0)
847 				state->data_use_count = count - 1;
848 			spin_unlock(&quota_spinlock);
849 			if (count == state->data_quota)
850 				complete(&state->data_quota_event);
851 		}
852 
853 		/*
854 		 * Don't allow the slot to be reused until we are no
855 		 * longer interested in it.
856 		 */
857 		mb();
858 
859 		state->slot_queue_available = slot_queue_available;
860 		complete(&state->slot_available_event);
861 	}
862 }
863 
864 static ssize_t
memcpy_copy_callback(void * context,void * dest,size_t offset,size_t maxsize)865 memcpy_copy_callback(void *context, void *dest, size_t offset, size_t maxsize)
866 {
867 	memcpy(dest + offset, context + offset, maxsize);
868 	return maxsize;
869 }
870 
871 static ssize_t
copy_message_data(ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,void * dest,size_t size)872 copy_message_data(ssize_t (*copy_callback)(void *context, void *dest, size_t offset,
873 					   size_t maxsize),
874 	void *context,
875 	void *dest,
876 	size_t size)
877 {
878 	size_t pos = 0;
879 
880 	while (pos < size) {
881 		ssize_t callback_result;
882 		size_t max_bytes = size - pos;
883 
884 		callback_result = copy_callback(context, dest + pos, pos,
885 						max_bytes);
886 
887 		if (callback_result < 0)
888 			return callback_result;
889 
890 		if (!callback_result)
891 			return -EIO;
892 
893 		if (callback_result > max_bytes)
894 			return -EIO;
895 
896 		pos += callback_result;
897 	}
898 
899 	return size;
900 }
901 
902 /* Called by the slot handler and application threads */
903 static enum vchiq_status
queue_message(struct vchiq_state * state,struct vchiq_service * service,int msgid,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,size_t size,int flags)904 queue_message(struct vchiq_state *state, struct vchiq_service *service,
905 	      int msgid,
906 	      ssize_t (*copy_callback)(void *context, void *dest,
907 				       size_t offset, size_t maxsize),
908 	      void *context, size_t size, int flags)
909 {
910 	struct vchiq_shared_state *local;
911 	struct vchiq_service_quota *quota = NULL;
912 	struct vchiq_header *header;
913 	int type = VCHIQ_MSG_TYPE(msgid);
914 
915 	size_t stride;
916 
917 	local = state->local;
918 
919 	stride = calc_stride(size);
920 
921 	WARN_ON(stride > VCHIQ_SLOT_SIZE);
922 
923 	if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
924 	    mutex_lock_killable(&state->slot_mutex))
925 		return VCHIQ_RETRY;
926 
927 	if (type == VCHIQ_MSG_DATA) {
928 		int tx_end_index;
929 
930 		if (!service) {
931 			WARN(1, "%s: service is NULL\n", __func__);
932 			mutex_unlock(&state->slot_mutex);
933 			return VCHIQ_ERROR;
934 		}
935 
936 		WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
937 				 QMFLAGS_NO_MUTEX_UNLOCK));
938 
939 		if (service->closing) {
940 			/* The service has been closed */
941 			mutex_unlock(&state->slot_mutex);
942 			return VCHIQ_ERROR;
943 		}
944 
945 		quota = &state->service_quotas[service->localport];
946 
947 		spin_lock(&quota_spinlock);
948 
949 		/*
950 		 * Ensure this service doesn't use more than its quota of
951 		 * messages or slots
952 		 */
953 		tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
954 
955 		/*
956 		 * Ensure data messages don't use more than their quota of
957 		 * slots
958 		 */
959 		while ((tx_end_index != state->previous_data_index) &&
960 		       (state->data_use_count == state->data_quota)) {
961 			VCHIQ_STATS_INC(state, data_stalls);
962 			spin_unlock(&quota_spinlock);
963 			mutex_unlock(&state->slot_mutex);
964 
965 			if (wait_for_completion_interruptible(&state->data_quota_event))
966 				return VCHIQ_RETRY;
967 
968 			mutex_lock(&state->slot_mutex);
969 			spin_lock(&quota_spinlock);
970 			tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
971 			if ((tx_end_index == state->previous_data_index) ||
972 			    (state->data_use_count < state->data_quota)) {
973 				/* Pass the signal on to other waiters */
974 				complete(&state->data_quota_event);
975 				break;
976 			}
977 		}
978 
979 		while ((quota->message_use_count == quota->message_quota) ||
980 		       ((tx_end_index != quota->previous_tx_index) &&
981 			(quota->slot_use_count == quota->slot_quota))) {
982 			spin_unlock(&quota_spinlock);
983 			vchiq_log_trace(vchiq_core_log_level,
984 					"%d: qm:%d %s,%zx - quota stall (msg %d, slot %d)",
985 					state->id, service->localport, msg_type_str(type), size,
986 					quota->message_use_count, quota->slot_use_count);
987 			VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
988 			mutex_unlock(&state->slot_mutex);
989 			if (wait_for_completion_interruptible(&quota->quota_event))
990 				return VCHIQ_RETRY;
991 			if (service->closing)
992 				return VCHIQ_ERROR;
993 			if (mutex_lock_killable(&state->slot_mutex))
994 				return VCHIQ_RETRY;
995 			if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
996 				/* The service has been closed */
997 				mutex_unlock(&state->slot_mutex);
998 				return VCHIQ_ERROR;
999 			}
1000 			spin_lock(&quota_spinlock);
1001 			tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
1002 		}
1003 
1004 		spin_unlock(&quota_spinlock);
1005 	}
1006 
1007 	header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
1008 
1009 	if (!header) {
1010 		if (service)
1011 			VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
1012 		/*
1013 		 * In the event of a failure, return the mutex to the
1014 		 * state it was in
1015 		 */
1016 		if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
1017 			mutex_unlock(&state->slot_mutex);
1018 		return VCHIQ_RETRY;
1019 	}
1020 
1021 	if (type == VCHIQ_MSG_DATA) {
1022 		ssize_t callback_result;
1023 		int tx_end_index;
1024 		int slot_use_count;
1025 
1026 		vchiq_log_info(vchiq_core_log_level, "%d: qm %s@%pK,%zx (%d->%d)", state->id,
1027 			       msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
1028 			       VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
1029 
1030 		WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
1031 				 QMFLAGS_NO_MUTEX_UNLOCK));
1032 
1033 		callback_result =
1034 			copy_message_data(copy_callback, context,
1035 					  header->data, size);
1036 
1037 		if (callback_result < 0) {
1038 			mutex_unlock(&state->slot_mutex);
1039 			VCHIQ_SERVICE_STATS_INC(service, error_count);
1040 			return VCHIQ_ERROR;
1041 		}
1042 
1043 		if (SRVTRACE_ENABLED(service,
1044 				     VCHIQ_LOG_INFO))
1045 			vchiq_log_dump_mem("Sent", 0,
1046 					   header->data,
1047 					   min_t(size_t, 16, callback_result));
1048 
1049 		spin_lock(&quota_spinlock);
1050 		quota->message_use_count++;
1051 
1052 		tx_end_index =
1053 			SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
1054 
1055 		/*
1056 		 * If this transmission can't fit in the last slot used by any
1057 		 * service, the data_use_count must be increased.
1058 		 */
1059 		if (tx_end_index != state->previous_data_index) {
1060 			state->previous_data_index = tx_end_index;
1061 			state->data_use_count++;
1062 		}
1063 
1064 		/*
1065 		 * If this isn't the same slot last used by this service,
1066 		 * the service's slot_use_count must be increased.
1067 		 */
1068 		if (tx_end_index != quota->previous_tx_index) {
1069 			quota->previous_tx_index = tx_end_index;
1070 			slot_use_count = ++quota->slot_use_count;
1071 		} else {
1072 			slot_use_count = 0;
1073 		}
1074 
1075 		spin_unlock(&quota_spinlock);
1076 
1077 		if (slot_use_count)
1078 			vchiq_log_trace(vchiq_core_log_level,
1079 					"%d: qm:%d %s,%zx - slot_use->%d (hdr %p)", state->id,
1080 					service->localport, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1081 					size, slot_use_count, header);
1082 
1083 		VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1084 		VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1085 	} else {
1086 		vchiq_log_info(vchiq_core_log_level, "%d: qm %s@%pK,%zx (%d->%d)", state->id,
1087 			       msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
1088 			       VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
1089 		if (size != 0) {
1090 			/*
1091 			 * It is assumed for now that this code path
1092 			 * only happens from calls inside this file.
1093 			 *
1094 			 * External callers are through the vchiq_queue_message
1095 			 * path which always sets the type to be VCHIQ_MSG_DATA
1096 			 *
1097 			 * At first glance this appears to be correct but
1098 			 * more review is needed.
1099 			 */
1100 			copy_message_data(copy_callback, context,
1101 					  header->data, size);
1102 		}
1103 		VCHIQ_STATS_INC(state, ctrl_tx_count);
1104 	}
1105 
1106 	header->msgid = msgid;
1107 	header->size = size;
1108 
1109 	{
1110 		int svc_fourcc;
1111 
1112 		svc_fourcc = service
1113 			? service->base.fourcc
1114 			: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1115 
1116 		vchiq_log_info(SRVTRACE_LEVEL(service),
1117 			       "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
1118 			       msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
1119 			       VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), VCHIQ_MSG_SRCPORT(msgid),
1120 			       VCHIQ_MSG_DSTPORT(msgid), size);
1121 	}
1122 
1123 	/* Make sure the new header is visible to the peer. */
1124 	wmb();
1125 
1126 	/* Make the new tx_pos visible to the peer. */
1127 	local->tx_pos = state->local_tx_pos;
1128 	wmb();
1129 
1130 	if (service && (type == VCHIQ_MSG_CLOSE))
1131 		set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
1132 
1133 	if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
1134 		mutex_unlock(&state->slot_mutex);
1135 
1136 	remote_event_signal(&state->remote->trigger);
1137 
1138 	return VCHIQ_SUCCESS;
1139 }
1140 
1141 /* Called by the slot handler and application threads */
1142 static enum vchiq_status
queue_message_sync(struct vchiq_state * state,struct vchiq_service * service,int msgid,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,int size,int is_blocking)1143 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
1144 		   int msgid,
1145 		   ssize_t (*copy_callback)(void *context, void *dest,
1146 					    size_t offset, size_t maxsize),
1147 		   void *context, int size, int is_blocking)
1148 {
1149 	struct vchiq_shared_state *local;
1150 	struct vchiq_header *header;
1151 	ssize_t callback_result;
1152 
1153 	local = state->local;
1154 
1155 	if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME &&
1156 	    mutex_lock_killable(&state->sync_mutex))
1157 		return VCHIQ_RETRY;
1158 
1159 	remote_event_wait(&state->sync_release_event, &local->sync_release);
1160 
1161 	/* Ensure that reads don't overtake the remote_event_wait. */
1162 	rmb();
1163 
1164 	header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1165 		local->slot_sync);
1166 
1167 	{
1168 		int oldmsgid = header->msgid;
1169 
1170 		if (oldmsgid != VCHIQ_MSGID_PADDING)
1171 			vchiq_log_error(vchiq_core_log_level, "%d: qms - msgid %x, not PADDING",
1172 					state->id, oldmsgid);
1173 	}
1174 
1175 	vchiq_log_info(vchiq_sync_log_level,
1176 		       "%d: qms %s@%pK,%x (%d->%d)", state->id,
1177 		       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1178 		       header, size, VCHIQ_MSG_SRCPORT(msgid),
1179 		       VCHIQ_MSG_DSTPORT(msgid));
1180 
1181 	callback_result =
1182 		copy_message_data(copy_callback, context,
1183 				  header->data, size);
1184 
1185 	if (callback_result < 0) {
1186 		mutex_unlock(&state->slot_mutex);
1187 		VCHIQ_SERVICE_STATS_INC(service, error_count);
1188 		return VCHIQ_ERROR;
1189 	}
1190 
1191 	if (service) {
1192 		if (SRVTRACE_ENABLED(service,
1193 				     VCHIQ_LOG_INFO))
1194 			vchiq_log_dump_mem("Sent", 0,
1195 					   header->data,
1196 					   min_t(size_t, 16, callback_result));
1197 
1198 		VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1199 		VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1200 	} else {
1201 		VCHIQ_STATS_INC(state, ctrl_tx_count);
1202 	}
1203 
1204 	header->size = size;
1205 	header->msgid = msgid;
1206 
1207 	if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
1208 		int svc_fourcc;
1209 
1210 		svc_fourcc = service
1211 			? service->base.fourcc
1212 			: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1213 
1214 		vchiq_log_trace(vchiq_sync_log_level,
1215 				"Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
1216 				msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
1217 				VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), VCHIQ_MSG_SRCPORT(msgid),
1218 				VCHIQ_MSG_DSTPORT(msgid), size);
1219 	}
1220 
1221 	remote_event_signal(&state->remote->sync_trigger);
1222 
1223 	if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
1224 		mutex_unlock(&state->sync_mutex);
1225 
1226 	return VCHIQ_SUCCESS;
1227 }
1228 
1229 static inline void
claim_slot(struct vchiq_slot_info * slot)1230 claim_slot(struct vchiq_slot_info *slot)
1231 {
1232 	slot->use_count++;
1233 }
1234 
1235 static void
release_slot(struct vchiq_state * state,struct vchiq_slot_info * slot_info,struct vchiq_header * header,struct vchiq_service * service)1236 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
1237 	     struct vchiq_header *header, struct vchiq_service *service)
1238 {
1239 	mutex_lock(&state->recycle_mutex);
1240 
1241 	if (header) {
1242 		int msgid = header->msgid;
1243 
1244 		if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) || (service && service->closing)) {
1245 			mutex_unlock(&state->recycle_mutex);
1246 			return;
1247 		}
1248 
1249 		/* Rewrite the message header to prevent a double release */
1250 		header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
1251 	}
1252 
1253 	slot_info->release_count++;
1254 
1255 	if (slot_info->release_count == slot_info->use_count) {
1256 		int slot_queue_recycle;
1257 		/* Add to the freed queue */
1258 
1259 		/*
1260 		 * A read barrier is necessary here to prevent speculative
1261 		 * fetches of remote->slot_queue_recycle from overtaking the
1262 		 * mutex.
1263 		 */
1264 		rmb();
1265 
1266 		slot_queue_recycle = state->remote->slot_queue_recycle;
1267 		state->remote->slot_queue[slot_queue_recycle &
1268 			VCHIQ_SLOT_QUEUE_MASK] =
1269 			SLOT_INDEX_FROM_INFO(state, slot_info);
1270 		state->remote->slot_queue_recycle = slot_queue_recycle + 1;
1271 		vchiq_log_info(vchiq_core_log_level, "%d: %s %d - recycle->%x", state->id, __func__,
1272 			       SLOT_INDEX_FROM_INFO(state, slot_info),
1273 			       state->remote->slot_queue_recycle);
1274 
1275 		/*
1276 		 * A write barrier is necessary, but remote_event_signal
1277 		 * contains one.
1278 		 */
1279 		remote_event_signal(&state->remote->recycle);
1280 	}
1281 
1282 	mutex_unlock(&state->recycle_mutex);
1283 }
1284 
1285 static inline enum vchiq_reason
get_bulk_reason(struct vchiq_bulk * bulk)1286 get_bulk_reason(struct vchiq_bulk *bulk)
1287 {
1288 	if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1289 		if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1290 			return VCHIQ_BULK_TRANSMIT_ABORTED;
1291 
1292 		return VCHIQ_BULK_TRANSMIT_DONE;
1293 	}
1294 
1295 	if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1296 		return VCHIQ_BULK_RECEIVE_ABORTED;
1297 
1298 	return VCHIQ_BULK_RECEIVE_DONE;
1299 }
1300 
1301 /* Called by the slot handler - don't hold the bulk mutex */
1302 static enum vchiq_status
notify_bulks(struct vchiq_service * service,struct vchiq_bulk_queue * queue,int retry_poll)1303 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
1304 	     int retry_poll)
1305 {
1306 	enum vchiq_status status = VCHIQ_SUCCESS;
1307 
1308 	vchiq_log_trace(vchiq_core_log_level, "%d: nb:%d %cx - p=%x rn=%x r=%x", service->state->id,
1309 			service->localport, (queue == &service->bulk_tx) ? 't' : 'r',
1310 			queue->process, queue->remote_notify, queue->remove);
1311 
1312 	queue->remote_notify = queue->process;
1313 
1314 	while (queue->remove != queue->remote_notify) {
1315 		struct vchiq_bulk *bulk =
1316 			&queue->bulks[BULK_INDEX(queue->remove)];
1317 
1318 		/*
1319 		 * Only generate callbacks for non-dummy bulk
1320 		 * requests, and non-terminated services
1321 		 */
1322 		if (bulk->data && service->instance) {
1323 			if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
1324 				if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1325 					VCHIQ_SERVICE_STATS_INC(service, bulk_tx_count);
1326 					VCHIQ_SERVICE_STATS_ADD(service, bulk_tx_bytes,
1327 								bulk->actual);
1328 				} else {
1329 					VCHIQ_SERVICE_STATS_INC(service, bulk_rx_count);
1330 					VCHIQ_SERVICE_STATS_ADD(service, bulk_rx_bytes,
1331 								bulk->actual);
1332 				}
1333 			} else {
1334 				VCHIQ_SERVICE_STATS_INC(service, bulk_aborted_count);
1335 			}
1336 			if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
1337 				struct bulk_waiter *waiter;
1338 
1339 				spin_lock(&bulk_waiter_spinlock);
1340 				waiter = bulk->userdata;
1341 				if (waiter) {
1342 					waiter->actual = bulk->actual;
1343 					complete(&waiter->event);
1344 				}
1345 				spin_unlock(&bulk_waiter_spinlock);
1346 			} else if (bulk->mode == VCHIQ_BULK_MODE_CALLBACK) {
1347 				enum vchiq_reason reason =
1348 						get_bulk_reason(bulk);
1349 				status = make_service_callback(service, reason,	NULL,
1350 							       bulk->userdata);
1351 				if (status == VCHIQ_RETRY)
1352 					break;
1353 			}
1354 		}
1355 
1356 		queue->remove++;
1357 		complete(&service->bulk_remove_event);
1358 	}
1359 	if (!retry_poll)
1360 		status = VCHIQ_SUCCESS;
1361 
1362 	if (status == VCHIQ_RETRY)
1363 		request_poll(service->state, service, (queue == &service->bulk_tx) ?
1364 			     VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
1365 
1366 	return status;
1367 }
1368 
1369 static void
poll_services_of_group(struct vchiq_state * state,int group)1370 poll_services_of_group(struct vchiq_state *state, int group)
1371 {
1372 	u32 flags = atomic_xchg(&state->poll_services[group], 0);
1373 	int i;
1374 
1375 	for (i = 0; flags; i++) {
1376 		struct vchiq_service *service;
1377 		u32 service_flags;
1378 
1379 		if ((flags & BIT(i)) == 0)
1380 			continue;
1381 
1382 		service = find_service_by_port(state, (group << 5) + i);
1383 		flags &= ~BIT(i);
1384 
1385 		if (!service)
1386 			continue;
1387 
1388 		service_flags = atomic_xchg(&service->poll_flags, 0);
1389 		if (service_flags & BIT(VCHIQ_POLL_REMOVE)) {
1390 			vchiq_log_info(vchiq_core_log_level, "%d: ps - remove %d<->%d",
1391 				       state->id, service->localport,
1392 				       service->remoteport);
1393 
1394 			/*
1395 			 * Make it look like a client, because
1396 			 * it must be removed and not left in
1397 			 * the LISTENING state.
1398 			 */
1399 			service->public_fourcc = VCHIQ_FOURCC_INVALID;
1400 
1401 			if (vchiq_close_service_internal(service, NO_CLOSE_RECVD) !=
1402 							 VCHIQ_SUCCESS)
1403 				request_poll(state, service, VCHIQ_POLL_REMOVE);
1404 		} else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) {
1405 			vchiq_log_info(vchiq_core_log_level, "%d: ps - terminate %d<->%d",
1406 				       state->id, service->localport, service->remoteport);
1407 			if (vchiq_close_service_internal(service, NO_CLOSE_RECVD) != VCHIQ_SUCCESS)
1408 				request_poll(state, service, VCHIQ_POLL_TERMINATE);
1409 		}
1410 		if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
1411 			notify_bulks(service, &service->bulk_tx, RETRY_POLL);
1412 		if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
1413 			notify_bulks(service, &service->bulk_rx, RETRY_POLL);
1414 		vchiq_service_put(service);
1415 	}
1416 }
1417 
1418 /* Called by the slot handler thread */
1419 static void
poll_services(struct vchiq_state * state)1420 poll_services(struct vchiq_state *state)
1421 {
1422 	int group;
1423 
1424 	for (group = 0; group < BITSET_SIZE(state->unused_service); group++)
1425 		poll_services_of_group(state, group);
1426 }
1427 
1428 /* Called with the bulk_mutex held */
1429 static void
abort_outstanding_bulks(struct vchiq_service * service,struct vchiq_bulk_queue * queue)1430 abort_outstanding_bulks(struct vchiq_service *service,
1431 			struct vchiq_bulk_queue *queue)
1432 {
1433 	int is_tx = (queue == &service->bulk_tx);
1434 
1435 	vchiq_log_trace(vchiq_core_log_level, "%d: aob:%d %cx - li=%x ri=%x p=%x",
1436 			service->state->id, service->localport, is_tx ? 't' : 'r',
1437 			queue->local_insert, queue->remote_insert, queue->process);
1438 
1439 	WARN_ON((int)(queue->local_insert - queue->process) < 0);
1440 	WARN_ON((int)(queue->remote_insert - queue->process) < 0);
1441 
1442 	while ((queue->process != queue->local_insert) ||
1443 	       (queue->process != queue->remote_insert)) {
1444 		struct vchiq_bulk *bulk = &queue->bulks[BULK_INDEX(queue->process)];
1445 
1446 		if (queue->process == queue->remote_insert) {
1447 			/* fabricate a matching dummy bulk */
1448 			bulk->remote_data = NULL;
1449 			bulk->remote_size = 0;
1450 			queue->remote_insert++;
1451 		}
1452 
1453 		if (queue->process != queue->local_insert) {
1454 			vchiq_complete_bulk(service->instance, bulk);
1455 
1456 			vchiq_log_info(SRVTRACE_LEVEL(service),
1457 				       "%s %c%c%c%c d:%d ABORTED - tx len:%d, rx len:%d",
1458 				       is_tx ? "Send Bulk to" : "Recv Bulk from",
1459 				       VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1460 				       service->remoteport, bulk->size, bulk->remote_size);
1461 		} else {
1462 			/* fabricate a matching dummy bulk */
1463 			bulk->data = 0;
1464 			bulk->size = 0;
1465 			bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
1466 			bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
1467 				VCHIQ_BULK_RECEIVE;
1468 			queue->local_insert++;
1469 		}
1470 
1471 		queue->process++;
1472 	}
1473 }
1474 
1475 static int
parse_open(struct vchiq_state * state,struct vchiq_header * header)1476 parse_open(struct vchiq_state *state, struct vchiq_header *header)
1477 {
1478 	const struct vchiq_open_payload *payload;
1479 	struct vchiq_service *service = NULL;
1480 	int msgid, size;
1481 	unsigned int localport, remoteport, fourcc;
1482 	short version, version_min;
1483 
1484 	msgid = header->msgid;
1485 	size = header->size;
1486 	localport = VCHIQ_MSG_DSTPORT(msgid);
1487 	remoteport = VCHIQ_MSG_SRCPORT(msgid);
1488 	if (size < sizeof(struct vchiq_open_payload))
1489 		goto fail_open;
1490 
1491 	payload = (struct vchiq_open_payload *)header->data;
1492 	fourcc = payload->fourcc;
1493 	vchiq_log_info(vchiq_core_log_level, "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
1494 		       state->id, header, localport, VCHIQ_FOURCC_AS_4CHARS(fourcc));
1495 
1496 	service = get_listening_service(state, fourcc);
1497 	if (!service)
1498 		goto fail_open;
1499 
1500 	/* A matching service exists */
1501 	version = payload->version;
1502 	version_min = payload->version_min;
1503 
1504 	if ((service->version < version_min) || (version < service->version_min)) {
1505 		/* Version mismatch */
1506 		vchiq_loud_error_header();
1507 		vchiq_loud_error("%d: service %d (%c%c%c%c) version mismatch - local (%d, min %d) vs. remote (%d, min %d)",
1508 				 state->id, service->localport, VCHIQ_FOURCC_AS_4CHARS(fourcc),
1509 				 service->version, service->version_min, version, version_min);
1510 		vchiq_loud_error_footer();
1511 		vchiq_service_put(service);
1512 		service = NULL;
1513 		goto fail_open;
1514 	}
1515 	service->peer_version = version;
1516 
1517 	if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
1518 		struct vchiq_openack_payload ack_payload = {
1519 			service->version
1520 		};
1521 		int openack_id = MAKE_OPENACK(service->localport, remoteport);
1522 
1523 		if (state->version_common <
1524 		    VCHIQ_VERSION_SYNCHRONOUS_MODE)
1525 			service->sync = 0;
1526 
1527 		/* Acknowledge the OPEN */
1528 		if (service->sync) {
1529 			if (queue_message_sync(state, NULL, openack_id, memcpy_copy_callback,
1530 					       &ack_payload, sizeof(ack_payload), 0) == VCHIQ_RETRY)
1531 				goto bail_not_ready;
1532 
1533 			/* The service is now open */
1534 			set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
1535 		} else {
1536 			if (queue_message(state, NULL, openack_id, memcpy_copy_callback,
1537 					  &ack_payload, sizeof(ack_payload), 0) == VCHIQ_RETRY)
1538 				goto bail_not_ready;
1539 
1540 			/* The service is now open */
1541 			set_service_state(service, VCHIQ_SRVSTATE_OPEN);
1542 		}
1543 	}
1544 
1545 	/* Success - the message has been dealt with */
1546 	vchiq_service_put(service);
1547 	return 1;
1548 
1549 fail_open:
1550 	/* No available service, or an invalid request - send a CLOSE */
1551 	if (queue_message(state, NULL, MAKE_CLOSE(0, VCHIQ_MSG_SRCPORT(msgid)),
1552 			  NULL, NULL, 0, 0) == VCHIQ_RETRY)
1553 		goto bail_not_ready;
1554 
1555 	return 1;
1556 
1557 bail_not_ready:
1558 	if (service)
1559 		vchiq_service_put(service);
1560 
1561 	return 0;
1562 }
1563 
1564 /**
1565  * parse_message() - parses a single message from the rx slot
1566  * @state:  vchiq state struct
1567  * @header: message header
1568  *
1569  * Context: Process context
1570  *
1571  * Return:
1572  * * >= 0     - size of the parsed message payload (without header)
1573  * * -EINVAL  - fatal error occurred, bail out is required
1574  */
1575 static int
parse_message(struct vchiq_state * state,struct vchiq_header * header)1576 parse_message(struct vchiq_state *state, struct vchiq_header *header)
1577 {
1578 	struct vchiq_service *service = NULL;
1579 	unsigned int localport, remoteport;
1580 	int msgid, size, type, ret = -EINVAL;
1581 
1582 	DEBUG_INITIALISE(state->local);
1583 
1584 	DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
1585 	msgid = header->msgid;
1586 	DEBUG_VALUE(PARSE_MSGID, msgid);
1587 	size = header->size;
1588 	type = VCHIQ_MSG_TYPE(msgid);
1589 	localport = VCHIQ_MSG_DSTPORT(msgid);
1590 	remoteport = VCHIQ_MSG_SRCPORT(msgid);
1591 
1592 	if (type != VCHIQ_MSG_DATA)
1593 		VCHIQ_STATS_INC(state, ctrl_rx_count);
1594 
1595 	switch (type) {
1596 	case VCHIQ_MSG_OPENACK:
1597 	case VCHIQ_MSG_CLOSE:
1598 	case VCHIQ_MSG_DATA:
1599 	case VCHIQ_MSG_BULK_RX:
1600 	case VCHIQ_MSG_BULK_TX:
1601 	case VCHIQ_MSG_BULK_RX_DONE:
1602 	case VCHIQ_MSG_BULK_TX_DONE:
1603 		service = find_service_by_port(state, localport);
1604 		if ((!service ||
1605 		     ((service->remoteport != remoteport) &&
1606 		      (service->remoteport != VCHIQ_PORT_FREE))) &&
1607 		    (localport == 0) &&
1608 		    (type == VCHIQ_MSG_CLOSE)) {
1609 			/*
1610 			 * This could be a CLOSE from a client which
1611 			 * hadn't yet received the OPENACK - look for
1612 			 * the connected service
1613 			 */
1614 			if (service)
1615 				vchiq_service_put(service);
1616 			service = get_connected_service(state, remoteport);
1617 			if (service)
1618 				vchiq_log_warning(vchiq_core_log_level,
1619 						  "%d: prs %s@%pK (%d->%d) - found connected service %d",
1620 						  state->id, msg_type_str(type), header,
1621 						  remoteport, localport, service->localport);
1622 		}
1623 
1624 		if (!service) {
1625 			vchiq_log_error(vchiq_core_log_level,
1626 					"%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
1627 					state->id, msg_type_str(type), header, remoteport,
1628 					localport, localport);
1629 			goto skip_message;
1630 		}
1631 		break;
1632 	default:
1633 		break;
1634 	}
1635 
1636 	if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
1637 		int svc_fourcc;
1638 
1639 		svc_fourcc = service
1640 			? service->base.fourcc
1641 			: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1642 		vchiq_log_info(SRVTRACE_LEVEL(service),
1643 			       "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d len:%d",
1644 			       msg_type_str(type), type, VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1645 			       remoteport, localport, size);
1646 		if (size > 0)
1647 			vchiq_log_dump_mem("Rcvd", 0, header->data, min(16, size));
1648 	}
1649 
1650 	if (((unsigned long)header & VCHIQ_SLOT_MASK) +
1651 	    calc_stride(size) > VCHIQ_SLOT_SIZE) {
1652 		vchiq_log_error(vchiq_core_log_level,
1653 				"header %pK (msgid %x) - size %x too big for slot",
1654 				header, (unsigned int)msgid, (unsigned int)size);
1655 		WARN(1, "oversized for slot\n");
1656 	}
1657 
1658 	switch (type) {
1659 	case VCHIQ_MSG_OPEN:
1660 		WARN_ON(VCHIQ_MSG_DSTPORT(msgid));
1661 		if (!parse_open(state, header))
1662 			goto bail_not_ready;
1663 		break;
1664 	case VCHIQ_MSG_OPENACK:
1665 		if (size >= sizeof(struct vchiq_openack_payload)) {
1666 			const struct vchiq_openack_payload *payload =
1667 				(struct vchiq_openack_payload *)
1668 				header->data;
1669 			service->peer_version = payload->version;
1670 		}
1671 		vchiq_log_info(vchiq_core_log_level, "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
1672 			       state->id, header, size, remoteport, localport,
1673 			       service->peer_version);
1674 		if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
1675 			service->remoteport = remoteport;
1676 			set_service_state(service, VCHIQ_SRVSTATE_OPEN);
1677 			complete(&service->remove_event);
1678 		} else {
1679 			vchiq_log_error(vchiq_core_log_level, "OPENACK received in state %s",
1680 					srvstate_names[service->srvstate]);
1681 		}
1682 		break;
1683 	case VCHIQ_MSG_CLOSE:
1684 		WARN_ON(size); /* There should be no data */
1685 
1686 		vchiq_log_info(vchiq_core_log_level, "%d: prs CLOSE@%pK (%d->%d)",
1687 			       state->id, header, remoteport, localport);
1688 
1689 		mark_service_closing_internal(service, 1);
1690 
1691 		if (vchiq_close_service_internal(service, CLOSE_RECVD) == VCHIQ_RETRY)
1692 			goto bail_not_ready;
1693 
1694 		vchiq_log_info(vchiq_core_log_level, "Close Service %c%c%c%c s:%u d:%d",
1695 			       VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1696 			       service->localport, service->remoteport);
1697 		break;
1698 	case VCHIQ_MSG_DATA:
1699 		vchiq_log_info(vchiq_core_log_level, "%d: prs DATA@%pK,%x (%d->%d)",
1700 			       state->id, header, size, remoteport, localport);
1701 
1702 		if ((service->remoteport == remoteport) &&
1703 		    (service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
1704 			header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
1705 			claim_slot(state->rx_info);
1706 			DEBUG_TRACE(PARSE_LINE);
1707 			if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
1708 						  NULL) == VCHIQ_RETRY) {
1709 				DEBUG_TRACE(PARSE_LINE);
1710 				goto bail_not_ready;
1711 			}
1712 			VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
1713 			VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes, size);
1714 		} else {
1715 			VCHIQ_STATS_INC(state, error_count);
1716 		}
1717 		break;
1718 	case VCHIQ_MSG_CONNECT:
1719 		vchiq_log_info(vchiq_core_log_level, "%d: prs CONNECT@%pK", state->id, header);
1720 		state->version_common =	((struct vchiq_slot_zero *)
1721 					 state->slot_data)->version;
1722 		complete(&state->connect);
1723 		break;
1724 	case VCHIQ_MSG_BULK_RX:
1725 	case VCHIQ_MSG_BULK_TX:
1726 		/*
1727 		 * We should never receive a bulk request from the
1728 		 * other side since we're not setup to perform as the
1729 		 * master.
1730 		 */
1731 		WARN_ON(1);
1732 		break;
1733 	case VCHIQ_MSG_BULK_RX_DONE:
1734 	case VCHIQ_MSG_BULK_TX_DONE:
1735 		if ((service->remoteport == remoteport) &&
1736 		    (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
1737 			struct vchiq_bulk_queue *queue;
1738 			struct vchiq_bulk *bulk;
1739 
1740 			queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
1741 				&service->bulk_rx : &service->bulk_tx;
1742 
1743 			DEBUG_TRACE(PARSE_LINE);
1744 			if (mutex_lock_killable(&service->bulk_mutex)) {
1745 				DEBUG_TRACE(PARSE_LINE);
1746 				goto bail_not_ready;
1747 			}
1748 			if ((int)(queue->remote_insert -
1749 				queue->local_insert) >= 0) {
1750 				vchiq_log_error(vchiq_core_log_level,
1751 						"%d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)",
1752 						state->id, msg_type_str(type), header, remoteport,
1753 						localport, queue->remote_insert,
1754 						queue->local_insert);
1755 				mutex_unlock(&service->bulk_mutex);
1756 				break;
1757 			}
1758 			if (queue->process != queue->remote_insert) {
1759 				pr_err("%s: p %x != ri %x\n",
1760 				       __func__,
1761 				       queue->process,
1762 				       queue->remote_insert);
1763 				mutex_unlock(&service->bulk_mutex);
1764 				goto bail_not_ready;
1765 			}
1766 
1767 			bulk = &queue->bulks[BULK_INDEX(queue->remote_insert)];
1768 			bulk->actual = *(int *)header->data;
1769 			queue->remote_insert++;
1770 
1771 			vchiq_log_info(vchiq_core_log_level, "%d: prs %s@%pK (%d->%d) %x@%pad",
1772 				       state->id, msg_type_str(type), header, remoteport, localport,
1773 				       bulk->actual, &bulk->data);
1774 
1775 			vchiq_log_trace(vchiq_core_log_level, "%d: prs:%d %cx li=%x ri=%x p=%x",
1776 					state->id, localport,
1777 					(type == VCHIQ_MSG_BULK_RX_DONE) ? 'r' : 't',
1778 					queue->local_insert, queue->remote_insert, queue->process);
1779 
1780 			DEBUG_TRACE(PARSE_LINE);
1781 			WARN_ON(queue->process == queue->local_insert);
1782 			vchiq_complete_bulk(service->instance, bulk);
1783 			queue->process++;
1784 			mutex_unlock(&service->bulk_mutex);
1785 			DEBUG_TRACE(PARSE_LINE);
1786 			notify_bulks(service, queue, RETRY_POLL);
1787 			DEBUG_TRACE(PARSE_LINE);
1788 		}
1789 		break;
1790 	case VCHIQ_MSG_PADDING:
1791 		vchiq_log_trace(vchiq_core_log_level, "%d: prs PADDING@%pK,%x",
1792 				state->id, header, size);
1793 		break;
1794 	case VCHIQ_MSG_PAUSE:
1795 		/* If initiated, signal the application thread */
1796 		vchiq_log_trace(vchiq_core_log_level, "%d: prs PAUSE@%pK,%x",
1797 				state->id, header, size);
1798 		if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
1799 			vchiq_log_error(vchiq_core_log_level, "%d: PAUSE received in state PAUSED",
1800 					state->id);
1801 			break;
1802 		}
1803 		if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
1804 			/* Send a PAUSE in response */
1805 			if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
1806 					  QMFLAGS_NO_MUTEX_UNLOCK) == VCHIQ_RETRY)
1807 				goto bail_not_ready;
1808 		}
1809 		/* At this point slot_mutex is held */
1810 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
1811 		break;
1812 	case VCHIQ_MSG_RESUME:
1813 		vchiq_log_trace(vchiq_core_log_level, "%d: prs RESUME@%pK,%x",
1814 				state->id, header, size);
1815 		/* Release the slot mutex */
1816 		mutex_unlock(&state->slot_mutex);
1817 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1818 		break;
1819 
1820 	case VCHIQ_MSG_REMOTE_USE:
1821 		vchiq_on_remote_use(state);
1822 		break;
1823 	case VCHIQ_MSG_REMOTE_RELEASE:
1824 		vchiq_on_remote_release(state);
1825 		break;
1826 	case VCHIQ_MSG_REMOTE_USE_ACTIVE:
1827 		break;
1828 
1829 	default:
1830 		vchiq_log_error(vchiq_core_log_level, "%d: prs invalid msgid %x@%pK,%x",
1831 				state->id, msgid, header, size);
1832 		WARN(1, "invalid message\n");
1833 		break;
1834 	}
1835 
1836 skip_message:
1837 	ret = size;
1838 
1839 bail_not_ready:
1840 	if (service)
1841 		vchiq_service_put(service);
1842 
1843 	return ret;
1844 }
1845 
1846 /* Called by the slot handler thread */
1847 static void
parse_rx_slots(struct vchiq_state * state)1848 parse_rx_slots(struct vchiq_state *state)
1849 {
1850 	struct vchiq_shared_state *remote = state->remote;
1851 	int tx_pos;
1852 
1853 	DEBUG_INITIALISE(state->local);
1854 
1855 	tx_pos = remote->tx_pos;
1856 
1857 	while (state->rx_pos != tx_pos) {
1858 		struct vchiq_header *header;
1859 		int size;
1860 
1861 		DEBUG_TRACE(PARSE_LINE);
1862 		if (!state->rx_data) {
1863 			int rx_index;
1864 
1865 			WARN_ON(state->rx_pos & VCHIQ_SLOT_MASK);
1866 			rx_index = remote->slot_queue[
1867 				SLOT_QUEUE_INDEX_FROM_POS_MASKED(state->rx_pos)];
1868 			state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
1869 				rx_index);
1870 			state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
1871 
1872 			/*
1873 			 * Initialise use_count to one, and increment
1874 			 * release_count at the end of the slot to avoid
1875 			 * releasing the slot prematurely.
1876 			 */
1877 			state->rx_info->use_count = 1;
1878 			state->rx_info->release_count = 0;
1879 		}
1880 
1881 		header = (struct vchiq_header *)(state->rx_data +
1882 			(state->rx_pos & VCHIQ_SLOT_MASK));
1883 		size = parse_message(state, header);
1884 		if (size < 0)
1885 			return;
1886 
1887 		state->rx_pos += calc_stride(size);
1888 
1889 		DEBUG_TRACE(PARSE_LINE);
1890 		/*
1891 		 * Perform some housekeeping when the end of the slot is
1892 		 * reached.
1893 		 */
1894 		if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
1895 			/* Remove the extra reference count. */
1896 			release_slot(state, state->rx_info, NULL, NULL);
1897 			state->rx_data = NULL;
1898 		}
1899 	}
1900 }
1901 
1902 /**
1903  * handle_poll() - handle service polling and other rare conditions
1904  * @state:  vchiq state struct
1905  *
1906  * Context: Process context
1907  *
1908  * Return:
1909  * * 0        - poll handled successful
1910  * * -EAGAIN  - retry later
1911  */
1912 static int
handle_poll(struct vchiq_state * state)1913 handle_poll(struct vchiq_state *state)
1914 {
1915 	switch (state->conn_state) {
1916 	case VCHIQ_CONNSTATE_CONNECTED:
1917 		/* Poll the services as requested */
1918 		poll_services(state);
1919 		break;
1920 
1921 	case VCHIQ_CONNSTATE_PAUSING:
1922 		if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
1923 				  QMFLAGS_NO_MUTEX_UNLOCK) != VCHIQ_RETRY) {
1924 			vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSE_SENT);
1925 		} else {
1926 			/* Retry later */
1927 			return -EAGAIN;
1928 		}
1929 		break;
1930 
1931 	case VCHIQ_CONNSTATE_RESUMING:
1932 		if (queue_message(state, NULL, MAKE_RESUME, NULL, NULL, 0,
1933 				  QMFLAGS_NO_MUTEX_LOCK) != VCHIQ_RETRY) {
1934 			vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1935 		} else {
1936 			/*
1937 			 * This should really be impossible,
1938 			 * since the PAUSE should have flushed
1939 			 * through outstanding messages.
1940 			 */
1941 			vchiq_log_error(vchiq_core_log_level, "Failed to send RESUME message");
1942 		}
1943 		break;
1944 	default:
1945 		break;
1946 	}
1947 
1948 	return 0;
1949 }
1950 
1951 /* Called by the slot handler thread */
1952 static int
slot_handler_func(void * v)1953 slot_handler_func(void *v)
1954 {
1955 	struct vchiq_state *state = v;
1956 	struct vchiq_shared_state *local = state->local;
1957 
1958 	DEBUG_INITIALISE(local);
1959 
1960 	while (1) {
1961 		DEBUG_COUNT(SLOT_HANDLER_COUNT);
1962 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1963 		remote_event_wait(&state->trigger_event, &local->trigger);
1964 
1965 		/* Ensure that reads don't overtake the remote_event_wait. */
1966 		rmb();
1967 
1968 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1969 		if (state->poll_needed) {
1970 			state->poll_needed = 0;
1971 
1972 			/*
1973 			 * Handle service polling and other rare conditions here
1974 			 * out of the mainline code
1975 			 */
1976 			if (handle_poll(state) == -EAGAIN)
1977 				state->poll_needed = 1;
1978 		}
1979 
1980 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1981 		parse_rx_slots(state);
1982 	}
1983 	return 0;
1984 }
1985 
1986 /* Called by the recycle thread */
1987 static int
recycle_func(void * v)1988 recycle_func(void *v)
1989 {
1990 	struct vchiq_state *state = v;
1991 	struct vchiq_shared_state *local = state->local;
1992 	u32 *found;
1993 	size_t length;
1994 
1995 	length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
1996 
1997 	found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
1998 			      GFP_KERNEL);
1999 	if (!found)
2000 		return -ENOMEM;
2001 
2002 	while (1) {
2003 		remote_event_wait(&state->recycle_event, &local->recycle);
2004 
2005 		process_free_queue(state, found, length);
2006 	}
2007 	return 0;
2008 }
2009 
2010 /* Called by the sync thread */
2011 static int
sync_func(void * v)2012 sync_func(void *v)
2013 {
2014 	struct vchiq_state *state = v;
2015 	struct vchiq_shared_state *local = state->local;
2016 	struct vchiq_header *header =
2017 		(struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2018 			state->remote->slot_sync);
2019 
2020 	while (1) {
2021 		struct vchiq_service *service;
2022 		int msgid, size;
2023 		int type;
2024 		unsigned int localport, remoteport;
2025 
2026 		remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
2027 
2028 		/* Ensure that reads don't overtake the remote_event_wait. */
2029 		rmb();
2030 
2031 		msgid = header->msgid;
2032 		size = header->size;
2033 		type = VCHIQ_MSG_TYPE(msgid);
2034 		localport = VCHIQ_MSG_DSTPORT(msgid);
2035 		remoteport = VCHIQ_MSG_SRCPORT(msgid);
2036 
2037 		service = find_service_by_port(state, localport);
2038 
2039 		if (!service) {
2040 			vchiq_log_error(vchiq_sync_log_level,
2041 					"%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
2042 					state->id, msg_type_str(type), header,
2043 					remoteport, localport, localport);
2044 			release_message_sync(state, header);
2045 			continue;
2046 		}
2047 
2048 		if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
2049 			int svc_fourcc;
2050 
2051 			svc_fourcc = service
2052 				? service->base.fourcc
2053 				: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
2054 			vchiq_log_trace(vchiq_sync_log_level,
2055 					"Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
2056 					msg_type_str(type), VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
2057 					remoteport, localport, size);
2058 			if (size > 0)
2059 				vchiq_log_dump_mem("Rcvd", 0, header->data, min(16, size));
2060 		}
2061 
2062 		switch (type) {
2063 		case VCHIQ_MSG_OPENACK:
2064 			if (size >= sizeof(struct vchiq_openack_payload)) {
2065 				const struct vchiq_openack_payload *payload =
2066 					(struct vchiq_openack_payload *)
2067 					header->data;
2068 				service->peer_version = payload->version;
2069 			}
2070 			vchiq_log_info(vchiq_sync_log_level, "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
2071 				       state->id, header, size, remoteport, localport,
2072 				       service->peer_version);
2073 			if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2074 				service->remoteport = remoteport;
2075 				set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
2076 				service->sync = 1;
2077 				complete(&service->remove_event);
2078 			}
2079 			release_message_sync(state, header);
2080 			break;
2081 
2082 		case VCHIQ_MSG_DATA:
2083 			vchiq_log_trace(vchiq_sync_log_level, "%d: sf DATA@%pK,%x (%d->%d)",
2084 					state->id, header, size, remoteport, localport);
2085 
2086 			if ((service->remoteport == remoteport) &&
2087 			    (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) {
2088 				if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
2089 							  NULL) == VCHIQ_RETRY)
2090 					vchiq_log_error(vchiq_sync_log_level,
2091 							"synchronous callback to service %d returns VCHIQ_RETRY",
2092 							localport);
2093 			}
2094 			break;
2095 
2096 		default:
2097 			vchiq_log_error(vchiq_sync_log_level, "%d: sf unexpected msgid %x@%pK,%x",
2098 					state->id, msgid, header, size);
2099 			release_message_sync(state, header);
2100 			break;
2101 		}
2102 
2103 		vchiq_service_put(service);
2104 	}
2105 
2106 	return 0;
2107 }
2108 
2109 inline const char *
get_conn_state_name(enum vchiq_connstate conn_state)2110 get_conn_state_name(enum vchiq_connstate conn_state)
2111 {
2112 	return conn_state_names[conn_state];
2113 }
2114 
2115 struct vchiq_slot_zero *
vchiq_init_slots(void * mem_base,int mem_size)2116 vchiq_init_slots(void *mem_base, int mem_size)
2117 {
2118 	int mem_align =
2119 		(int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
2120 	struct vchiq_slot_zero *slot_zero =
2121 		(struct vchiq_slot_zero *)(mem_base + mem_align);
2122 	int num_slots = (mem_size - mem_align) / VCHIQ_SLOT_SIZE;
2123 	int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
2124 
2125 	check_sizes();
2126 
2127 	/* Ensure there is enough memory to run an absolutely minimum system */
2128 	num_slots -= first_data_slot;
2129 
2130 	if (num_slots < 4) {
2131 		vchiq_log_error(vchiq_core_log_level, "%s - insufficient memory %x bytes",
2132 				__func__, mem_size);
2133 		return NULL;
2134 	}
2135 
2136 	memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
2137 
2138 	slot_zero->magic = VCHIQ_MAGIC;
2139 	slot_zero->version = VCHIQ_VERSION;
2140 	slot_zero->version_min = VCHIQ_VERSION_MIN;
2141 	slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
2142 	slot_zero->slot_size = VCHIQ_SLOT_SIZE;
2143 	slot_zero->max_slots = VCHIQ_MAX_SLOTS;
2144 	slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
2145 
2146 	slot_zero->master.slot_sync = first_data_slot;
2147 	slot_zero->master.slot_first = first_data_slot + 1;
2148 	slot_zero->master.slot_last = first_data_slot + (num_slots / 2) - 1;
2149 	slot_zero->slave.slot_sync = first_data_slot + (num_slots / 2);
2150 	slot_zero->slave.slot_first = first_data_slot + (num_slots / 2) + 1;
2151 	slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
2152 
2153 	return slot_zero;
2154 }
2155 
2156 int
vchiq_init_state(struct vchiq_state * state,struct vchiq_slot_zero * slot_zero,struct device * dev)2157 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev)
2158 {
2159 	struct vchiq_shared_state *local;
2160 	struct vchiq_shared_state *remote;
2161 	char threadname[16];
2162 	int i, ret;
2163 
2164 	local = &slot_zero->slave;
2165 	remote = &slot_zero->master;
2166 
2167 	if (local->initialised) {
2168 		vchiq_loud_error_header();
2169 		if (remote->initialised)
2170 			vchiq_loud_error("local state has already been initialised");
2171 		else
2172 			vchiq_loud_error("master/slave mismatch two slaves");
2173 		vchiq_loud_error_footer();
2174 		return -EINVAL;
2175 	}
2176 
2177 	memset(state, 0, sizeof(struct vchiq_state));
2178 
2179 	state->dev = dev;
2180 
2181 	/*
2182 	 * initialize shared state pointers
2183 	 */
2184 
2185 	state->local = local;
2186 	state->remote = remote;
2187 	state->slot_data = (struct vchiq_slot *)slot_zero;
2188 
2189 	/*
2190 	 * initialize events and mutexes
2191 	 */
2192 
2193 	init_completion(&state->connect);
2194 	mutex_init(&state->mutex);
2195 	mutex_init(&state->slot_mutex);
2196 	mutex_init(&state->recycle_mutex);
2197 	mutex_init(&state->sync_mutex);
2198 	mutex_init(&state->bulk_transfer_mutex);
2199 
2200 	init_completion(&state->slot_available_event);
2201 	init_completion(&state->slot_remove_event);
2202 	init_completion(&state->data_quota_event);
2203 
2204 	state->slot_queue_available = 0;
2205 
2206 	for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
2207 		struct vchiq_service_quota *quota = &state->service_quotas[i];
2208 		init_completion(&quota->quota_event);
2209 	}
2210 
2211 	for (i = local->slot_first; i <= local->slot_last; i++) {
2212 		local->slot_queue[state->slot_queue_available] = i;
2213 		state->slot_queue_available++;
2214 		complete(&state->slot_available_event);
2215 	}
2216 
2217 	state->default_slot_quota = state->slot_queue_available / 2;
2218 	state->default_message_quota =
2219 		min_t(unsigned short, state->default_slot_quota * 256, ~0);
2220 
2221 	state->previous_data_index = -1;
2222 	state->data_use_count = 0;
2223 	state->data_quota = state->slot_queue_available - 1;
2224 
2225 	remote_event_create(&state->trigger_event, &local->trigger);
2226 	local->tx_pos = 0;
2227 	remote_event_create(&state->recycle_event, &local->recycle);
2228 	local->slot_queue_recycle = state->slot_queue_available;
2229 	remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
2230 	remote_event_create(&state->sync_release_event, &local->sync_release);
2231 
2232 	/* At start-of-day, the slot is empty and available */
2233 	((struct vchiq_header *)
2234 		SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
2235 							VCHIQ_MSGID_PADDING;
2236 	remote_event_signal_local(&state->sync_release_event, &local->sync_release);
2237 
2238 	local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
2239 
2240 	ret = vchiq_platform_init_state(state);
2241 	if (ret)
2242 		return ret;
2243 
2244 	/*
2245 	 * bring up slot handler thread
2246 	 */
2247 	snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
2248 	state->slot_handler_thread = kthread_create(&slot_handler_func, (void *)state, threadname);
2249 
2250 	if (IS_ERR(state->slot_handler_thread)) {
2251 		vchiq_loud_error_header();
2252 		vchiq_loud_error("couldn't create thread %s", threadname);
2253 		vchiq_loud_error_footer();
2254 		return PTR_ERR(state->slot_handler_thread);
2255 	}
2256 	set_user_nice(state->slot_handler_thread, -19);
2257 
2258 	snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
2259 	state->recycle_thread = kthread_create(&recycle_func, (void *)state, threadname);
2260 	if (IS_ERR(state->recycle_thread)) {
2261 		vchiq_loud_error_header();
2262 		vchiq_loud_error("couldn't create thread %s", threadname);
2263 		vchiq_loud_error_footer();
2264 		ret = PTR_ERR(state->recycle_thread);
2265 		goto fail_free_handler_thread;
2266 	}
2267 	set_user_nice(state->recycle_thread, -19);
2268 
2269 	snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
2270 	state->sync_thread = kthread_create(&sync_func, (void *)state, threadname);
2271 	if (IS_ERR(state->sync_thread)) {
2272 		vchiq_loud_error_header();
2273 		vchiq_loud_error("couldn't create thread %s", threadname);
2274 		vchiq_loud_error_footer();
2275 		ret = PTR_ERR(state->sync_thread);
2276 		goto fail_free_recycle_thread;
2277 	}
2278 	set_user_nice(state->sync_thread, -20);
2279 
2280 	wake_up_process(state->slot_handler_thread);
2281 	wake_up_process(state->recycle_thread);
2282 	wake_up_process(state->sync_thread);
2283 
2284 	/* Indicate readiness to the other side */
2285 	local->initialised = 1;
2286 
2287 	return 0;
2288 
2289 fail_free_recycle_thread:
2290 	kthread_stop(state->recycle_thread);
2291 fail_free_handler_thread:
2292 	kthread_stop(state->slot_handler_thread);
2293 
2294 	return ret;
2295 }
2296 
vchiq_msg_queue_push(struct vchiq_instance * instance,unsigned int handle,struct vchiq_header * header)2297 void vchiq_msg_queue_push(struct vchiq_instance *instance, unsigned int handle,
2298 			  struct vchiq_header *header)
2299 {
2300 	struct vchiq_service *service = find_service_by_handle(instance, handle);
2301 	int pos;
2302 
2303 	if (!service)
2304 		return;
2305 
2306 	while (service->msg_queue_write == service->msg_queue_read +
2307 		VCHIQ_MAX_SLOTS) {
2308 		if (wait_for_completion_interruptible(&service->msg_queue_pop))
2309 			flush_signals(current);
2310 	}
2311 
2312 	pos = service->msg_queue_write & (VCHIQ_MAX_SLOTS - 1);
2313 	service->msg_queue_write++;
2314 	service->msg_queue[pos] = header;
2315 
2316 	complete(&service->msg_queue_push);
2317 }
2318 EXPORT_SYMBOL(vchiq_msg_queue_push);
2319 
vchiq_msg_hold(struct vchiq_instance * instance,unsigned int handle)2320 struct vchiq_header *vchiq_msg_hold(struct vchiq_instance *instance, unsigned int handle)
2321 {
2322 	struct vchiq_service *service = find_service_by_handle(instance, handle);
2323 	struct vchiq_header *header;
2324 	int pos;
2325 
2326 	if (!service)
2327 		return NULL;
2328 
2329 	if (service->msg_queue_write == service->msg_queue_read)
2330 		return NULL;
2331 
2332 	while (service->msg_queue_write == service->msg_queue_read) {
2333 		if (wait_for_completion_interruptible(&service->msg_queue_push))
2334 			flush_signals(current);
2335 	}
2336 
2337 	pos = service->msg_queue_read & (VCHIQ_MAX_SLOTS - 1);
2338 	service->msg_queue_read++;
2339 	header = service->msg_queue[pos];
2340 
2341 	complete(&service->msg_queue_pop);
2342 
2343 	return header;
2344 }
2345 EXPORT_SYMBOL(vchiq_msg_hold);
2346 
vchiq_validate_params(const struct vchiq_service_params_kernel * params)2347 static int vchiq_validate_params(const struct vchiq_service_params_kernel *params)
2348 {
2349 	if (!params->callback || !params->fourcc) {
2350 		vchiq_loud_error("Can't add service, invalid params\n");
2351 		return -EINVAL;
2352 	}
2353 
2354 	return 0;
2355 }
2356 
2357 /* Called from application thread when a client or server service is created. */
2358 struct vchiq_service *
vchiq_add_service_internal(struct vchiq_state * state,const struct vchiq_service_params_kernel * params,int srvstate,struct vchiq_instance * instance,void (* userdata_term)(void * userdata))2359 vchiq_add_service_internal(struct vchiq_state *state,
2360 			   const struct vchiq_service_params_kernel *params,
2361 			   int srvstate, struct vchiq_instance *instance,
2362 			   void (*userdata_term)(void *userdata))
2363 {
2364 	struct vchiq_service *service;
2365 	struct vchiq_service __rcu **pservice = NULL;
2366 	struct vchiq_service_quota *quota;
2367 	int ret;
2368 	int i;
2369 
2370 	ret = vchiq_validate_params(params);
2371 	if (ret)
2372 		return NULL;
2373 
2374 	service = kzalloc(sizeof(*service), GFP_KERNEL);
2375 	if (!service)
2376 		return service;
2377 
2378 	service->base.fourcc   = params->fourcc;
2379 	service->base.callback = params->callback;
2380 	service->base.userdata = params->userdata;
2381 	service->handle        = VCHIQ_SERVICE_HANDLE_INVALID;
2382 	kref_init(&service->ref_count);
2383 	service->srvstate      = VCHIQ_SRVSTATE_FREE;
2384 	service->userdata_term = userdata_term;
2385 	service->localport     = VCHIQ_PORT_FREE;
2386 	service->remoteport    = VCHIQ_PORT_FREE;
2387 
2388 	service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
2389 		VCHIQ_FOURCC_INVALID : params->fourcc;
2390 	service->auto_close    = 1;
2391 	atomic_set(&service->poll_flags, 0);
2392 	service->version       = params->version;
2393 	service->version_min   = params->version_min;
2394 	service->state         = state;
2395 	service->instance      = instance;
2396 	init_completion(&service->remove_event);
2397 	init_completion(&service->bulk_remove_event);
2398 	init_completion(&service->msg_queue_pop);
2399 	init_completion(&service->msg_queue_push);
2400 	mutex_init(&service->bulk_mutex);
2401 
2402 	/*
2403 	 * Although it is perfectly possible to use a spinlock
2404 	 * to protect the creation of services, it is overkill as it
2405 	 * disables interrupts while the array is searched.
2406 	 * The only danger is of another thread trying to create a
2407 	 * service - service deletion is safe.
2408 	 * Therefore it is preferable to use state->mutex which,
2409 	 * although slower to claim, doesn't block interrupts while
2410 	 * it is held.
2411 	 */
2412 
2413 	mutex_lock(&state->mutex);
2414 
2415 	/* Prepare to use a previously unused service */
2416 	if (state->unused_service < VCHIQ_MAX_SERVICES)
2417 		pservice = &state->services[state->unused_service];
2418 
2419 	if (srvstate == VCHIQ_SRVSTATE_OPENING) {
2420 		for (i = 0; i < state->unused_service; i++) {
2421 			if (!rcu_access_pointer(state->services[i])) {
2422 				pservice = &state->services[i];
2423 				break;
2424 			}
2425 		}
2426 	} else {
2427 		rcu_read_lock();
2428 		for (i = (state->unused_service - 1); i >= 0; i--) {
2429 			struct vchiq_service *srv;
2430 
2431 			srv = rcu_dereference(state->services[i]);
2432 			if (!srv) {
2433 				pservice = &state->services[i];
2434 			} else if ((srv->public_fourcc == params->fourcc) &&
2435 				   ((srv->instance != instance) ||
2436 				   (srv->base.callback != params->callback))) {
2437 				/*
2438 				 * There is another server using this
2439 				 * fourcc which doesn't match.
2440 				 */
2441 				pservice = NULL;
2442 				break;
2443 			}
2444 		}
2445 		rcu_read_unlock();
2446 	}
2447 
2448 	if (pservice) {
2449 		service->localport = (pservice - state->services);
2450 		if (!handle_seq)
2451 			handle_seq = VCHIQ_MAX_STATES *
2452 				 VCHIQ_MAX_SERVICES;
2453 		service->handle = handle_seq |
2454 			(state->id * VCHIQ_MAX_SERVICES) |
2455 			service->localport;
2456 		handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
2457 		rcu_assign_pointer(*pservice, service);
2458 		if (pservice == &state->services[state->unused_service])
2459 			state->unused_service++;
2460 	}
2461 
2462 	mutex_unlock(&state->mutex);
2463 
2464 	if (!pservice) {
2465 		kfree(service);
2466 		return NULL;
2467 	}
2468 
2469 	quota = &state->service_quotas[service->localport];
2470 	quota->slot_quota = state->default_slot_quota;
2471 	quota->message_quota = state->default_message_quota;
2472 	if (quota->slot_use_count == 0)
2473 		quota->previous_tx_index =
2474 			SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
2475 			- 1;
2476 
2477 	/* Bring this service online */
2478 	set_service_state(service, srvstate);
2479 
2480 	vchiq_log_info(vchiq_core_msg_log_level, "%s Service %c%c%c%c SrcPort:%d",
2481 		       (srvstate == VCHIQ_SRVSTATE_OPENING) ? "Open" : "Add",
2482 		       VCHIQ_FOURCC_AS_4CHARS(params->fourcc), service->localport);
2483 
2484 	/* Don't unlock the service - leave it with a ref_count of 1. */
2485 
2486 	return service;
2487 }
2488 
2489 enum vchiq_status
vchiq_open_service_internal(struct vchiq_service * service,int client_id)2490 vchiq_open_service_internal(struct vchiq_service *service, int client_id)
2491 {
2492 	struct vchiq_open_payload payload = {
2493 		service->base.fourcc,
2494 		client_id,
2495 		service->version,
2496 		service->version_min
2497 	};
2498 	enum vchiq_status status = VCHIQ_SUCCESS;
2499 
2500 	service->client_id = client_id;
2501 	vchiq_use_service_internal(service);
2502 	status = queue_message(service->state,
2503 			       NULL, MAKE_OPEN(service->localport),
2504 			       memcpy_copy_callback,
2505 			       &payload,
2506 			       sizeof(payload),
2507 			       QMFLAGS_IS_BLOCKING);
2508 
2509 	if (status != VCHIQ_SUCCESS)
2510 		return status;
2511 
2512 	/* Wait for the ACK/NAK */
2513 	if (wait_for_completion_interruptible(&service->remove_event)) {
2514 		status = VCHIQ_RETRY;
2515 		vchiq_release_service_internal(service);
2516 	} else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
2517 		   (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
2518 		if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
2519 			vchiq_log_error(vchiq_core_log_level,
2520 					"%d: osi - srvstate = %s (ref %u)",
2521 					service->state->id,
2522 					srvstate_names[service->srvstate],
2523 					kref_read(&service->ref_count));
2524 		status = VCHIQ_ERROR;
2525 		VCHIQ_SERVICE_STATS_INC(service, error_count);
2526 		vchiq_release_service_internal(service);
2527 	}
2528 
2529 	return status;
2530 }
2531 
2532 static void
release_service_messages(struct vchiq_service * service)2533 release_service_messages(struct vchiq_service *service)
2534 {
2535 	struct vchiq_state *state = service->state;
2536 	int slot_last = state->remote->slot_last;
2537 	int i;
2538 
2539 	/* Release any claimed messages aimed at this service */
2540 
2541 	if (service->sync) {
2542 		struct vchiq_header *header =
2543 			(struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2544 						state->remote->slot_sync);
2545 		if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
2546 			release_message_sync(state, header);
2547 
2548 		return;
2549 	}
2550 
2551 	for (i = state->remote->slot_first; i <= slot_last; i++) {
2552 		struct vchiq_slot_info *slot_info =
2553 			SLOT_INFO_FROM_INDEX(state, i);
2554 		unsigned int pos, end;
2555 		char *data;
2556 
2557 		if (slot_info->release_count == slot_info->use_count)
2558 			continue;
2559 
2560 		data = (char *)SLOT_DATA_FROM_INDEX(state, i);
2561 		end = VCHIQ_SLOT_SIZE;
2562 		if (data == state->rx_data)
2563 			/*
2564 			 * This buffer is still being read from - stop
2565 			 * at the current read position
2566 			 */
2567 			end = state->rx_pos & VCHIQ_SLOT_MASK;
2568 
2569 		pos = 0;
2570 
2571 		while (pos < end) {
2572 			struct vchiq_header *header =
2573 				(struct vchiq_header *)(data + pos);
2574 			int msgid = header->msgid;
2575 			int port = VCHIQ_MSG_DSTPORT(msgid);
2576 
2577 			if ((port == service->localport) && (msgid & VCHIQ_MSGID_CLAIMED)) {
2578 				vchiq_log_info(vchiq_core_log_level, "  fsi - hdr %pK", header);
2579 				release_slot(state, slot_info, header, NULL);
2580 			}
2581 			pos += calc_stride(header->size);
2582 			if (pos > VCHIQ_SLOT_SIZE) {
2583 				vchiq_log_error(vchiq_core_log_level,
2584 						"fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
2585 						pos, header, msgid, header->msgid, header->size);
2586 				WARN(1, "invalid slot position\n");
2587 			}
2588 		}
2589 	}
2590 }
2591 
2592 static int
do_abort_bulks(struct vchiq_service * service)2593 do_abort_bulks(struct vchiq_service *service)
2594 {
2595 	enum vchiq_status status;
2596 
2597 	/* Abort any outstanding bulk transfers */
2598 	if (mutex_lock_killable(&service->bulk_mutex))
2599 		return 0;
2600 	abort_outstanding_bulks(service, &service->bulk_tx);
2601 	abort_outstanding_bulks(service, &service->bulk_rx);
2602 	mutex_unlock(&service->bulk_mutex);
2603 
2604 	status = notify_bulks(service, &service->bulk_tx, NO_RETRY_POLL);
2605 	if (status != VCHIQ_SUCCESS)
2606 		return 0;
2607 
2608 	status = notify_bulks(service, &service->bulk_rx, NO_RETRY_POLL);
2609 	return (status == VCHIQ_SUCCESS);
2610 }
2611 
2612 static enum vchiq_status
close_service_complete(struct vchiq_service * service,int failstate)2613 close_service_complete(struct vchiq_service *service, int failstate)
2614 {
2615 	enum vchiq_status status;
2616 	int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2617 	int newstate;
2618 
2619 	switch (service->srvstate) {
2620 	case VCHIQ_SRVSTATE_OPEN:
2621 	case VCHIQ_SRVSTATE_CLOSESENT:
2622 	case VCHIQ_SRVSTATE_CLOSERECVD:
2623 		if (is_server) {
2624 			if (service->auto_close) {
2625 				service->client_id = 0;
2626 				service->remoteport = VCHIQ_PORT_FREE;
2627 				newstate = VCHIQ_SRVSTATE_LISTENING;
2628 			} else {
2629 				newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
2630 			}
2631 		} else {
2632 			newstate = VCHIQ_SRVSTATE_CLOSED;
2633 		}
2634 		set_service_state(service, newstate);
2635 		break;
2636 	case VCHIQ_SRVSTATE_LISTENING:
2637 		break;
2638 	default:
2639 		vchiq_log_error(vchiq_core_log_level, "%s(%x) called in state %s", __func__,
2640 				service->handle, srvstate_names[service->srvstate]);
2641 		WARN(1, "%s in unexpected state\n", __func__);
2642 		return VCHIQ_ERROR;
2643 	}
2644 
2645 	status = make_service_callback(service, VCHIQ_SERVICE_CLOSED, NULL, NULL);
2646 
2647 	if (status != VCHIQ_RETRY) {
2648 		int uc = service->service_use_count;
2649 		int i;
2650 		/* Complete the close process */
2651 		for (i = 0; i < uc; i++)
2652 			/*
2653 			 * cater for cases where close is forced and the
2654 			 * client may not close all it's handles
2655 			 */
2656 			vchiq_release_service_internal(service);
2657 
2658 		service->client_id = 0;
2659 		service->remoteport = VCHIQ_PORT_FREE;
2660 
2661 		if (service->srvstate == VCHIQ_SRVSTATE_CLOSED) {
2662 			vchiq_free_service_internal(service);
2663 		} else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
2664 			if (is_server)
2665 				service->closing = 0;
2666 
2667 			complete(&service->remove_event);
2668 		}
2669 	} else {
2670 		set_service_state(service, failstate);
2671 	}
2672 
2673 	return status;
2674 }
2675 
2676 /* Called by the slot handler */
2677 enum vchiq_status
vchiq_close_service_internal(struct vchiq_service * service,int close_recvd)2678 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
2679 {
2680 	struct vchiq_state *state = service->state;
2681 	enum vchiq_status status = VCHIQ_SUCCESS;
2682 	int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2683 	int close_id = MAKE_CLOSE(service->localport,
2684 				  VCHIQ_MSG_DSTPORT(service->remoteport));
2685 
2686 	vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)", service->state->id,
2687 		       service->localport, close_recvd, srvstate_names[service->srvstate]);
2688 
2689 	switch (service->srvstate) {
2690 	case VCHIQ_SRVSTATE_CLOSED:
2691 	case VCHIQ_SRVSTATE_HIDDEN:
2692 	case VCHIQ_SRVSTATE_LISTENING:
2693 	case VCHIQ_SRVSTATE_CLOSEWAIT:
2694 		if (close_recvd) {
2695 			vchiq_log_error(vchiq_core_log_level, "%s(1) called in state %s",
2696 					__func__, srvstate_names[service->srvstate]);
2697 		} else if (is_server) {
2698 			if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
2699 				status = VCHIQ_ERROR;
2700 			} else {
2701 				service->client_id = 0;
2702 				service->remoteport = VCHIQ_PORT_FREE;
2703 				if (service->srvstate == VCHIQ_SRVSTATE_CLOSEWAIT)
2704 					set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
2705 			}
2706 			complete(&service->remove_event);
2707 		} else {
2708 			vchiq_free_service_internal(service);
2709 		}
2710 		break;
2711 	case VCHIQ_SRVSTATE_OPENING:
2712 		if (close_recvd) {
2713 			/* The open was rejected - tell the user */
2714 			set_service_state(service, VCHIQ_SRVSTATE_CLOSEWAIT);
2715 			complete(&service->remove_event);
2716 		} else {
2717 			/* Shutdown mid-open - let the other side know */
2718 			status = queue_message(state, service, close_id, NULL, NULL, 0, 0);
2719 		}
2720 		break;
2721 
2722 	case VCHIQ_SRVSTATE_OPENSYNC:
2723 		mutex_lock(&state->sync_mutex);
2724 		fallthrough;
2725 	case VCHIQ_SRVSTATE_OPEN:
2726 		if (close_recvd) {
2727 			if (!do_abort_bulks(service))
2728 				status = VCHIQ_RETRY;
2729 		}
2730 
2731 		release_service_messages(service);
2732 
2733 		if (status == VCHIQ_SUCCESS)
2734 			status = queue_message(state, service, close_id, NULL,
2735 					       NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
2736 
2737 		if (status != VCHIQ_SUCCESS) {
2738 			if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)
2739 				mutex_unlock(&state->sync_mutex);
2740 			break;
2741 		}
2742 
2743 		if (!close_recvd) {
2744 			/* Change the state while the mutex is still held */
2745 			set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
2746 			mutex_unlock(&state->slot_mutex);
2747 			if (service->sync)
2748 				mutex_unlock(&state->sync_mutex);
2749 			break;
2750 		}
2751 
2752 		/* Change the state while the mutex is still held */
2753 		set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
2754 		mutex_unlock(&state->slot_mutex);
2755 		if (service->sync)
2756 			mutex_unlock(&state->sync_mutex);
2757 
2758 		status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
2759 		break;
2760 
2761 	case VCHIQ_SRVSTATE_CLOSESENT:
2762 		if (!close_recvd)
2763 			/* This happens when a process is killed mid-close */
2764 			break;
2765 
2766 		if (!do_abort_bulks(service)) {
2767 			status = VCHIQ_RETRY;
2768 			break;
2769 		}
2770 
2771 		if (status == VCHIQ_SUCCESS)
2772 			status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
2773 		break;
2774 
2775 	case VCHIQ_SRVSTATE_CLOSERECVD:
2776 		if (!close_recvd && is_server)
2777 			/* Force into LISTENING mode */
2778 			set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
2779 		status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
2780 		break;
2781 
2782 	default:
2783 		vchiq_log_error(vchiq_core_log_level, "%s(%d) called in state %s", __func__,
2784 				close_recvd, srvstate_names[service->srvstate]);
2785 		break;
2786 	}
2787 
2788 	return status;
2789 }
2790 
2791 /* Called from the application process upon process death */
2792 void
vchiq_terminate_service_internal(struct vchiq_service * service)2793 vchiq_terminate_service_internal(struct vchiq_service *service)
2794 {
2795 	struct vchiq_state *state = service->state;
2796 
2797 	vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)", state->id,
2798 		       service->localport, service->remoteport);
2799 
2800 	mark_service_closing(service);
2801 
2802 	/* Mark the service for removal by the slot handler */
2803 	request_poll(state, service, VCHIQ_POLL_REMOVE);
2804 }
2805 
2806 /* Called from the slot handler */
2807 void
vchiq_free_service_internal(struct vchiq_service * service)2808 vchiq_free_service_internal(struct vchiq_service *service)
2809 {
2810 	struct vchiq_state *state = service->state;
2811 
2812 	vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)", state->id, service->localport);
2813 
2814 	switch (service->srvstate) {
2815 	case VCHIQ_SRVSTATE_OPENING:
2816 	case VCHIQ_SRVSTATE_CLOSED:
2817 	case VCHIQ_SRVSTATE_HIDDEN:
2818 	case VCHIQ_SRVSTATE_LISTENING:
2819 	case VCHIQ_SRVSTATE_CLOSEWAIT:
2820 		break;
2821 	default:
2822 		vchiq_log_error(vchiq_core_log_level, "%d: fsi - (%d) in state %s", state->id,
2823 				service->localport, srvstate_names[service->srvstate]);
2824 		return;
2825 	}
2826 
2827 	set_service_state(service, VCHIQ_SRVSTATE_FREE);
2828 
2829 	complete(&service->remove_event);
2830 
2831 	/* Release the initial lock */
2832 	vchiq_service_put(service);
2833 }
2834 
2835 enum vchiq_status
vchiq_connect_internal(struct vchiq_state * state,struct vchiq_instance * instance)2836 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2837 {
2838 	struct vchiq_service *service;
2839 	int i;
2840 
2841 	/* Find all services registered to this client and enable them. */
2842 	i = 0;
2843 	while ((service = next_service_by_instance(state, instance, &i)) != NULL) {
2844 		if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
2845 			set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
2846 		vchiq_service_put(service);
2847 	}
2848 
2849 	if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
2850 		if (queue_message(state, NULL, MAKE_CONNECT, NULL, NULL, 0,
2851 				  QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
2852 			return VCHIQ_RETRY;
2853 
2854 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
2855 	}
2856 
2857 	if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
2858 		if (wait_for_completion_interruptible(&state->connect))
2859 			return VCHIQ_RETRY;
2860 
2861 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
2862 		complete(&state->connect);
2863 	}
2864 
2865 	return VCHIQ_SUCCESS;
2866 }
2867 
2868 void
vchiq_shutdown_internal(struct vchiq_state * state,struct vchiq_instance * instance)2869 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2870 {
2871 	struct vchiq_service *service;
2872 	int i;
2873 
2874 	/* Find all services registered to this client and remove them. */
2875 	i = 0;
2876 	while ((service = next_service_by_instance(state, instance, &i)) != NULL) {
2877 		(void)vchiq_remove_service(instance, service->handle);
2878 		vchiq_service_put(service);
2879 	}
2880 }
2881 
2882 enum vchiq_status
vchiq_close_service(struct vchiq_instance * instance,unsigned int handle)2883 vchiq_close_service(struct vchiq_instance *instance, unsigned int handle)
2884 {
2885 	/* Unregister the service */
2886 	struct vchiq_service *service = find_service_by_handle(instance, handle);
2887 	enum vchiq_status status = VCHIQ_SUCCESS;
2888 
2889 	if (!service)
2890 		return VCHIQ_ERROR;
2891 
2892 	vchiq_log_info(vchiq_core_log_level, "%d: close_service:%d",
2893 		       service->state->id, service->localport);
2894 
2895 	if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2896 	    (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2897 	    (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
2898 		vchiq_service_put(service);
2899 		return VCHIQ_ERROR;
2900 	}
2901 
2902 	mark_service_closing(service);
2903 
2904 	if (current == service->state->slot_handler_thread) {
2905 		status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
2906 		WARN_ON(status == VCHIQ_RETRY);
2907 	} else {
2908 		/* Mark the service for termination by the slot handler */
2909 		request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
2910 	}
2911 
2912 	while (1) {
2913 		if (wait_for_completion_interruptible(&service->remove_event)) {
2914 			status = VCHIQ_RETRY;
2915 			break;
2916 		}
2917 
2918 		if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2919 		    (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2920 		    (service->srvstate == VCHIQ_SRVSTATE_OPEN))
2921 			break;
2922 
2923 		vchiq_log_warning(vchiq_core_log_level,
2924 				  "%d: close_service:%d - waiting in state %s",
2925 				  service->state->id, service->localport,
2926 				  srvstate_names[service->srvstate]);
2927 	}
2928 
2929 	if ((status == VCHIQ_SUCCESS) &&
2930 	    (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
2931 	    (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
2932 		status = VCHIQ_ERROR;
2933 
2934 	vchiq_service_put(service);
2935 
2936 	return status;
2937 }
2938 EXPORT_SYMBOL(vchiq_close_service);
2939 
2940 enum vchiq_status
vchiq_remove_service(struct vchiq_instance * instance,unsigned int handle)2941 vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle)
2942 {
2943 	/* Unregister the service */
2944 	struct vchiq_service *service = find_service_by_handle(instance, handle);
2945 	enum vchiq_status status = VCHIQ_SUCCESS;
2946 
2947 	if (!service)
2948 		return VCHIQ_ERROR;
2949 
2950 	vchiq_log_info(vchiq_core_log_level, "%d: remove_service:%d",
2951 		       service->state->id, service->localport);
2952 
2953 	if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
2954 		vchiq_service_put(service);
2955 		return VCHIQ_ERROR;
2956 	}
2957 
2958 	mark_service_closing(service);
2959 
2960 	if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
2961 	    (current == service->state->slot_handler_thread)) {
2962 		/*
2963 		 * Make it look like a client, because it must be removed and
2964 		 * not left in the LISTENING state.
2965 		 */
2966 		service->public_fourcc = VCHIQ_FOURCC_INVALID;
2967 
2968 		status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
2969 		WARN_ON(status == VCHIQ_RETRY);
2970 	} else {
2971 		/* Mark the service for removal by the slot handler */
2972 		request_poll(service->state, service, VCHIQ_POLL_REMOVE);
2973 	}
2974 	while (1) {
2975 		if (wait_for_completion_interruptible(&service->remove_event)) {
2976 			status = VCHIQ_RETRY;
2977 			break;
2978 		}
2979 
2980 		if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2981 		    (service->srvstate == VCHIQ_SRVSTATE_OPEN))
2982 			break;
2983 
2984 		vchiq_log_warning(vchiq_core_log_level,
2985 				  "%d: remove_service:%d - waiting in state %s",
2986 				  service->state->id, service->localport,
2987 				  srvstate_names[service->srvstate]);
2988 	}
2989 
2990 	if ((status == VCHIQ_SUCCESS) &&
2991 	    (service->srvstate != VCHIQ_SRVSTATE_FREE))
2992 		status = VCHIQ_ERROR;
2993 
2994 	vchiq_service_put(service);
2995 
2996 	return status;
2997 }
2998 
2999 /*
3000  * This function may be called by kernel threads or user threads.
3001  * User threads may receive VCHIQ_RETRY to indicate that a signal has been
3002  * received and the call should be retried after being returned to user
3003  * context.
3004  * When called in blocking mode, the userdata field points to a bulk_waiter
3005  * structure.
3006  */
vchiq_bulk_transfer(struct vchiq_instance * instance,unsigned int handle,void * offset,void __user * uoffset,int size,void * userdata,enum vchiq_bulk_mode mode,enum vchiq_bulk_dir dir)3007 enum vchiq_status vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
3008 				      void *offset, void __user *uoffset, int size, void *userdata,
3009 				      enum vchiq_bulk_mode mode, enum vchiq_bulk_dir dir)
3010 {
3011 	struct vchiq_service *service = find_service_by_handle(instance, handle);
3012 	struct vchiq_bulk_queue *queue;
3013 	struct vchiq_bulk *bulk;
3014 	struct vchiq_state *state;
3015 	struct bulk_waiter *bulk_waiter = NULL;
3016 	const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
3017 	const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
3018 		VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
3019 	enum vchiq_status status = VCHIQ_ERROR;
3020 	int payload[2];
3021 
3022 	if (!service)
3023 		goto error_exit;
3024 
3025 	if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3026 		goto error_exit;
3027 
3028 	if (!offset && !uoffset)
3029 		goto error_exit;
3030 
3031 	if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3032 		goto error_exit;
3033 
3034 	switch (mode) {
3035 	case VCHIQ_BULK_MODE_NOCALLBACK:
3036 	case VCHIQ_BULK_MODE_CALLBACK:
3037 		break;
3038 	case VCHIQ_BULK_MODE_BLOCKING:
3039 		bulk_waiter = userdata;
3040 		init_completion(&bulk_waiter->event);
3041 		bulk_waiter->actual = 0;
3042 		bulk_waiter->bulk = NULL;
3043 		break;
3044 	case VCHIQ_BULK_MODE_WAITING:
3045 		bulk_waiter = userdata;
3046 		bulk = bulk_waiter->bulk;
3047 		goto waiting;
3048 	default:
3049 		goto error_exit;
3050 	}
3051 
3052 	state = service->state;
3053 
3054 	queue = (dir == VCHIQ_BULK_TRANSMIT) ?
3055 		&service->bulk_tx : &service->bulk_rx;
3056 
3057 	if (mutex_lock_killable(&service->bulk_mutex)) {
3058 		status = VCHIQ_RETRY;
3059 		goto error_exit;
3060 	}
3061 
3062 	if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
3063 		VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
3064 		do {
3065 			mutex_unlock(&service->bulk_mutex);
3066 			if (wait_for_completion_interruptible(&service->bulk_remove_event)) {
3067 				status = VCHIQ_RETRY;
3068 				goto error_exit;
3069 			}
3070 			if (mutex_lock_killable(&service->bulk_mutex)) {
3071 				status = VCHIQ_RETRY;
3072 				goto error_exit;
3073 			}
3074 		} while (queue->local_insert == queue->remove +
3075 				VCHIQ_NUM_SERVICE_BULKS);
3076 	}
3077 
3078 	bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
3079 
3080 	bulk->mode = mode;
3081 	bulk->dir = dir;
3082 	bulk->userdata = userdata;
3083 	bulk->size = size;
3084 	bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
3085 
3086 	if (vchiq_prepare_bulk_data(instance, bulk, offset, uoffset, size, dir))
3087 		goto unlock_error_exit;
3088 
3089 	/*
3090 	 * Ensure that the bulk data record is visible to the peer
3091 	 * before proceeding.
3092 	 */
3093 	wmb();
3094 
3095 	vchiq_log_info(vchiq_core_log_level, "%d: bt (%d->%d) %cx %x@%pad %pK",
3096 		       state->id, service->localport, service->remoteport,
3097 		       dir_char, size, &bulk->data, userdata);
3098 
3099 	/*
3100 	 * The slot mutex must be held when the service is being closed, so
3101 	 * claim it here to ensure that isn't happening
3102 	 */
3103 	if (mutex_lock_killable(&state->slot_mutex)) {
3104 		status = VCHIQ_RETRY;
3105 		goto cancel_bulk_error_exit;
3106 	}
3107 
3108 	if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3109 		goto unlock_both_error_exit;
3110 
3111 	payload[0] = lower_32_bits(bulk->data);
3112 	payload[1] = bulk->size;
3113 	status = queue_message(state,
3114 			       NULL,
3115 			       VCHIQ_MAKE_MSG(dir_msgtype,
3116 					      service->localport,
3117 					      service->remoteport),
3118 			       memcpy_copy_callback,
3119 			       &payload,
3120 			       sizeof(payload),
3121 			       QMFLAGS_IS_BLOCKING |
3122 			       QMFLAGS_NO_MUTEX_LOCK |
3123 			       QMFLAGS_NO_MUTEX_UNLOCK);
3124 	if (status != VCHIQ_SUCCESS)
3125 		goto unlock_both_error_exit;
3126 
3127 	queue->local_insert++;
3128 
3129 	mutex_unlock(&state->slot_mutex);
3130 	mutex_unlock(&service->bulk_mutex);
3131 
3132 	vchiq_log_trace(vchiq_core_log_level, "%d: bt:%d %cx li=%x ri=%x p=%x",
3133 			state->id, service->localport, dir_char, queue->local_insert,
3134 			queue->remote_insert, queue->process);
3135 
3136 waiting:
3137 	vchiq_service_put(service);
3138 
3139 	status = VCHIQ_SUCCESS;
3140 
3141 	if (bulk_waiter) {
3142 		bulk_waiter->bulk = bulk;
3143 		if (wait_for_completion_interruptible(&bulk_waiter->event))
3144 			status = VCHIQ_RETRY;
3145 		else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
3146 			status = VCHIQ_ERROR;
3147 	}
3148 
3149 	return status;
3150 
3151 unlock_both_error_exit:
3152 	mutex_unlock(&state->slot_mutex);
3153 cancel_bulk_error_exit:
3154 	vchiq_complete_bulk(service->instance, bulk);
3155 unlock_error_exit:
3156 	mutex_unlock(&service->bulk_mutex);
3157 
3158 error_exit:
3159 	if (service)
3160 		vchiq_service_put(service);
3161 	return status;
3162 }
3163 
3164 enum vchiq_status
vchiq_queue_message(struct vchiq_instance * instance,unsigned int handle,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,size_t size)3165 vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle,
3166 		    ssize_t (*copy_callback)(void *context, void *dest,
3167 					     size_t offset, size_t maxsize),
3168 		    void *context,
3169 		    size_t size)
3170 {
3171 	struct vchiq_service *service = find_service_by_handle(instance, handle);
3172 	enum vchiq_status status = VCHIQ_ERROR;
3173 	int data_id;
3174 
3175 	if (!service)
3176 		goto error_exit;
3177 
3178 	if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3179 		goto error_exit;
3180 
3181 	if (!size) {
3182 		VCHIQ_SERVICE_STATS_INC(service, error_count);
3183 		goto error_exit;
3184 	}
3185 
3186 	if (size > VCHIQ_MAX_MSG_SIZE) {
3187 		VCHIQ_SERVICE_STATS_INC(service, error_count);
3188 		goto error_exit;
3189 	}
3190 
3191 	data_id = MAKE_DATA(service->localport, service->remoteport);
3192 
3193 	switch (service->srvstate) {
3194 	case VCHIQ_SRVSTATE_OPEN:
3195 		status = queue_message(service->state, service, data_id,
3196 				       copy_callback, context, size, 1);
3197 		break;
3198 	case VCHIQ_SRVSTATE_OPENSYNC:
3199 		status = queue_message_sync(service->state, service, data_id,
3200 					    copy_callback, context, size, 1);
3201 		break;
3202 	default:
3203 		status = VCHIQ_ERROR;
3204 		break;
3205 	}
3206 
3207 error_exit:
3208 	if (service)
3209 		vchiq_service_put(service);
3210 
3211 	return status;
3212 }
3213 
vchiq_queue_kernel_message(struct vchiq_instance * instance,unsigned int handle,void * data,unsigned int size)3214 int vchiq_queue_kernel_message(struct vchiq_instance *instance, unsigned int handle, void *data,
3215 			       unsigned int size)
3216 {
3217 	enum vchiq_status status;
3218 
3219 	while (1) {
3220 		status = vchiq_queue_message(instance, handle, memcpy_copy_callback,
3221 					     data, size);
3222 
3223 		/*
3224 		 * vchiq_queue_message() may return VCHIQ_RETRY, so we need to
3225 		 * implement a retry mechanism since this function is supposed
3226 		 * to block until queued
3227 		 */
3228 		if (status != VCHIQ_RETRY)
3229 			break;
3230 
3231 		msleep(1);
3232 	}
3233 
3234 	return status;
3235 }
3236 EXPORT_SYMBOL(vchiq_queue_kernel_message);
3237 
3238 void
vchiq_release_message(struct vchiq_instance * instance,unsigned int handle,struct vchiq_header * header)3239 vchiq_release_message(struct vchiq_instance *instance, unsigned int handle,
3240 		      struct vchiq_header *header)
3241 {
3242 	struct vchiq_service *service = find_service_by_handle(instance, handle);
3243 	struct vchiq_shared_state *remote;
3244 	struct vchiq_state *state;
3245 	int slot_index;
3246 
3247 	if (!service)
3248 		return;
3249 
3250 	state = service->state;
3251 	remote = state->remote;
3252 
3253 	slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
3254 
3255 	if ((slot_index >= remote->slot_first) &&
3256 	    (slot_index <= remote->slot_last)) {
3257 		int msgid = header->msgid;
3258 
3259 		if (msgid & VCHIQ_MSGID_CLAIMED) {
3260 			struct vchiq_slot_info *slot_info =
3261 				SLOT_INFO_FROM_INDEX(state, slot_index);
3262 
3263 			release_slot(state, slot_info, header, service);
3264 		}
3265 	} else if (slot_index == remote->slot_sync) {
3266 		release_message_sync(state, header);
3267 	}
3268 
3269 	vchiq_service_put(service);
3270 }
3271 EXPORT_SYMBOL(vchiq_release_message);
3272 
3273 static void
release_message_sync(struct vchiq_state * state,struct vchiq_header * header)3274 release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
3275 {
3276 	header->msgid = VCHIQ_MSGID_PADDING;
3277 	remote_event_signal(&state->remote->sync_release);
3278 }
3279 
3280 enum vchiq_status
vchiq_get_peer_version(struct vchiq_instance * instance,unsigned int handle,short * peer_version)3281 vchiq_get_peer_version(struct vchiq_instance *instance, unsigned int handle, short *peer_version)
3282 {
3283 	enum vchiq_status status = VCHIQ_ERROR;
3284 	struct vchiq_service *service = find_service_by_handle(instance, handle);
3285 
3286 	if (!service)
3287 		goto exit;
3288 
3289 	if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3290 		goto exit;
3291 
3292 	if (!peer_version)
3293 		goto exit;
3294 
3295 	*peer_version = service->peer_version;
3296 	status = VCHIQ_SUCCESS;
3297 
3298 exit:
3299 	if (service)
3300 		vchiq_service_put(service);
3301 	return status;
3302 }
3303 EXPORT_SYMBOL(vchiq_get_peer_version);
3304 
vchiq_get_config(struct vchiq_config * config)3305 void vchiq_get_config(struct vchiq_config *config)
3306 {
3307 	config->max_msg_size           = VCHIQ_MAX_MSG_SIZE;
3308 	config->bulk_threshold         = VCHIQ_MAX_MSG_SIZE;
3309 	config->max_outstanding_bulks  = VCHIQ_NUM_SERVICE_BULKS;
3310 	config->max_services           = VCHIQ_MAX_SERVICES;
3311 	config->version                = VCHIQ_VERSION;
3312 	config->version_min            = VCHIQ_VERSION_MIN;
3313 }
3314 
3315 int
vchiq_set_service_option(struct vchiq_instance * instance,unsigned int handle,enum vchiq_service_option option,int value)3316 vchiq_set_service_option(struct vchiq_instance *instance, unsigned int handle,
3317 			 enum vchiq_service_option option, int value)
3318 {
3319 	struct vchiq_service *service = find_service_by_handle(instance, handle);
3320 	struct vchiq_service_quota *quota;
3321 	int ret = -EINVAL;
3322 
3323 	if (!service)
3324 		return -EINVAL;
3325 
3326 	switch (option) {
3327 	case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
3328 		service->auto_close = value;
3329 		ret = 0;
3330 		break;
3331 
3332 	case VCHIQ_SERVICE_OPTION_SLOT_QUOTA:
3333 		quota = &service->state->service_quotas[service->localport];
3334 		if (value == 0)
3335 			value = service->state->default_slot_quota;
3336 		if ((value >= quota->slot_use_count) &&
3337 		    (value < (unsigned short)~0)) {
3338 			quota->slot_quota = value;
3339 			if ((value >= quota->slot_use_count) &&
3340 			    (quota->message_quota >= quota->message_use_count))
3341 				/*
3342 				 * Signal the service that it may have
3343 				 * dropped below its quota
3344 				 */
3345 				complete(&quota->quota_event);
3346 			ret = 0;
3347 		}
3348 		break;
3349 
3350 	case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA:
3351 		quota = &service->state->service_quotas[service->localport];
3352 		if (value == 0)
3353 			value = service->state->default_message_quota;
3354 		if ((value >= quota->message_use_count) &&
3355 		    (value < (unsigned short)~0)) {
3356 			quota->message_quota = value;
3357 			if ((value >= quota->message_use_count) &&
3358 			    (quota->slot_quota >= quota->slot_use_count))
3359 				/*
3360 				 * Signal the service that it may have
3361 				 * dropped below its quota
3362 				 */
3363 				complete(&quota->quota_event);
3364 			ret = 0;
3365 		}
3366 		break;
3367 
3368 	case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
3369 		if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3370 		    (service->srvstate == VCHIQ_SRVSTATE_LISTENING)) {
3371 			service->sync = value;
3372 			ret = 0;
3373 		}
3374 		break;
3375 
3376 	case VCHIQ_SERVICE_OPTION_TRACE:
3377 		service->trace = value;
3378 		ret = 0;
3379 		break;
3380 
3381 	default:
3382 		break;
3383 	}
3384 	vchiq_service_put(service);
3385 
3386 	return ret;
3387 }
3388 
3389 static int
vchiq_dump_shared_state(void * dump_context,struct vchiq_state * state,struct vchiq_shared_state * shared,const char * label)3390 vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
3391 			struct vchiq_shared_state *shared, const char *label)
3392 {
3393 	static const char *const debug_names[] = {
3394 		"<entries>",
3395 		"SLOT_HANDLER_COUNT",
3396 		"SLOT_HANDLER_LINE",
3397 		"PARSE_LINE",
3398 		"PARSE_HEADER",
3399 		"PARSE_MSGID",
3400 		"AWAIT_COMPLETION_LINE",
3401 		"DEQUEUE_MESSAGE_LINE",
3402 		"SERVICE_CALLBACK_LINE",
3403 		"MSG_QUEUE_FULL_COUNT",
3404 		"COMPLETION_QUEUE_FULL_COUNT"
3405 	};
3406 	int i;
3407 	char buf[80];
3408 	int len;
3409 	int err;
3410 
3411 	len = scnprintf(buf, sizeof(buf), "  %s: slots %d-%d tx_pos=%x recycle=%x",
3412 			label, shared->slot_first, shared->slot_last,
3413 			shared->tx_pos, shared->slot_queue_recycle);
3414 	err = vchiq_dump(dump_context, buf, len + 1);
3415 	if (err)
3416 		return err;
3417 
3418 	len = scnprintf(buf, sizeof(buf), "    Slots claimed:");
3419 	err = vchiq_dump(dump_context, buf, len + 1);
3420 	if (err)
3421 		return err;
3422 
3423 	for (i = shared->slot_first; i <= shared->slot_last; i++) {
3424 		struct vchiq_slot_info slot_info =
3425 						*SLOT_INFO_FROM_INDEX(state, i);
3426 		if (slot_info.use_count != slot_info.release_count) {
3427 			len = scnprintf(buf, sizeof(buf), "      %d: %d/%d", i, slot_info.use_count,
3428 					slot_info.release_count);
3429 			err = vchiq_dump(dump_context, buf, len + 1);
3430 			if (err)
3431 				return err;
3432 		}
3433 	}
3434 
3435 	for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
3436 		len = scnprintf(buf, sizeof(buf), "    DEBUG: %s = %d(%x)",
3437 				debug_names[i], shared->debug[i], shared->debug[i]);
3438 		err = vchiq_dump(dump_context, buf, len + 1);
3439 		if (err)
3440 			return err;
3441 	}
3442 	return 0;
3443 }
3444 
vchiq_dump_state(void * dump_context,struct vchiq_state * state)3445 int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
3446 {
3447 	char buf[80];
3448 	int len;
3449 	int i;
3450 	int err;
3451 
3452 	len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
3453 			conn_state_names[state->conn_state]);
3454 	err = vchiq_dump(dump_context, buf, len + 1);
3455 	if (err)
3456 		return err;
3457 
3458 	len = scnprintf(buf, sizeof(buf), "  tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
3459 			state->local->tx_pos,
3460 			state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
3461 			state->rx_pos,
3462 			state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
3463 	err = vchiq_dump(dump_context, buf, len + 1);
3464 	if (err)
3465 		return err;
3466 
3467 	len = scnprintf(buf, sizeof(buf), "  Version: %d (min %d)",
3468 			VCHIQ_VERSION, VCHIQ_VERSION_MIN);
3469 	err = vchiq_dump(dump_context, buf, len + 1);
3470 	if (err)
3471 		return err;
3472 
3473 	if (VCHIQ_ENABLE_STATS) {
3474 		len = scnprintf(buf, sizeof(buf),
3475 				"  Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d",
3476 				state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
3477 				state->stats.error_count);
3478 		err = vchiq_dump(dump_context, buf, len + 1);
3479 		if (err)
3480 			return err;
3481 	}
3482 
3483 	len = scnprintf(buf, sizeof(buf),
3484 			"  Slots: %d available (%d data), %d recyclable, %d stalls (%d data)",
3485 			((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
3486 			state->local_tx_pos) / VCHIQ_SLOT_SIZE,
3487 			state->data_quota - state->data_use_count,
3488 			state->local->slot_queue_recycle - state->slot_queue_available,
3489 			state->stats.slot_stalls, state->stats.data_stalls);
3490 	err = vchiq_dump(dump_context, buf, len + 1);
3491 	if (err)
3492 		return err;
3493 
3494 	err = vchiq_dump_platform_state(dump_context);
3495 	if (err)
3496 		return err;
3497 
3498 	err = vchiq_dump_shared_state(dump_context,
3499 				      state,
3500 				      state->local,
3501 				      "Local");
3502 	if (err)
3503 		return err;
3504 	err = vchiq_dump_shared_state(dump_context,
3505 				      state,
3506 				      state->remote,
3507 				      "Remote");
3508 	if (err)
3509 		return err;
3510 
3511 	err = vchiq_dump_platform_instances(dump_context);
3512 	if (err)
3513 		return err;
3514 
3515 	for (i = 0; i < state->unused_service; i++) {
3516 		struct vchiq_service *service = find_service_by_port(state, i);
3517 
3518 		if (service) {
3519 			err = vchiq_dump_service_state(dump_context, service);
3520 			vchiq_service_put(service);
3521 			if (err)
3522 				return err;
3523 		}
3524 	}
3525 	return 0;
3526 }
3527 
vchiq_dump_service_state(void * dump_context,struct vchiq_service * service)3528 int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
3529 {
3530 	char buf[80];
3531 	int len;
3532 	int err;
3533 	unsigned int ref_count;
3534 
3535 	/*Don't include the lock just taken*/
3536 	ref_count = kref_read(&service->ref_count) - 1;
3537 	len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
3538 			service->localport, srvstate_names[service->srvstate],
3539 			ref_count);
3540 
3541 	if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
3542 		char remoteport[30];
3543 		struct vchiq_service_quota *quota =
3544 			&service->state->service_quotas[service->localport];
3545 		int fourcc = service->base.fourcc;
3546 		int tx_pending, rx_pending;
3547 
3548 		if (service->remoteport != VCHIQ_PORT_FREE) {
3549 			int len2 = scnprintf(remoteport, sizeof(remoteport),
3550 				"%u", service->remoteport);
3551 
3552 			if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
3553 				scnprintf(remoteport + len2, sizeof(remoteport) - len2,
3554 					  " (client %x)", service->client_id);
3555 		} else {
3556 			strscpy(remoteport, "n/a", sizeof(remoteport));
3557 		}
3558 
3559 		len += scnprintf(buf + len, sizeof(buf) - len,
3560 				 " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
3561 				 VCHIQ_FOURCC_AS_4CHARS(fourcc), remoteport,
3562 				 quota->message_use_count, quota->message_quota,
3563 				 quota->slot_use_count, quota->slot_quota);
3564 
3565 		err = vchiq_dump(dump_context, buf, len + 1);
3566 		if (err)
3567 			return err;
3568 
3569 		tx_pending = service->bulk_tx.local_insert -
3570 			service->bulk_tx.remote_insert;
3571 
3572 		rx_pending = service->bulk_rx.local_insert -
3573 			service->bulk_rx.remote_insert;
3574 
3575 		len = scnprintf(buf, sizeof(buf),
3576 				"  Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)",
3577 				tx_pending,
3578 				tx_pending ?
3579 				service->bulk_tx.bulks[BULK_INDEX(service->bulk_tx.remove)].size :
3580 				0, rx_pending, rx_pending ?
3581 				service->bulk_rx.bulks[BULK_INDEX(service->bulk_rx.remove)].size :
3582 				0);
3583 
3584 		if (VCHIQ_ENABLE_STATS) {
3585 			err = vchiq_dump(dump_context, buf, len + 1);
3586 			if (err)
3587 				return err;
3588 
3589 			len = scnprintf(buf, sizeof(buf),
3590 					"  Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3591 					service->stats.ctrl_tx_count, service->stats.ctrl_tx_bytes,
3592 					service->stats.ctrl_rx_count, service->stats.ctrl_rx_bytes);
3593 			err = vchiq_dump(dump_context, buf, len + 1);
3594 			if (err)
3595 				return err;
3596 
3597 			len = scnprintf(buf, sizeof(buf),
3598 					"  Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3599 					service->stats.bulk_tx_count, service->stats.bulk_tx_bytes,
3600 					service->stats.bulk_rx_count, service->stats.bulk_rx_bytes);
3601 			err = vchiq_dump(dump_context, buf, len + 1);
3602 			if (err)
3603 				return err;
3604 
3605 			len = scnprintf(buf, sizeof(buf),
3606 					"  %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors",
3607 					service->stats.quota_stalls, service->stats.slot_stalls,
3608 					service->stats.bulk_stalls,
3609 					service->stats.bulk_aborted_count,
3610 					service->stats.error_count);
3611 		}
3612 	}
3613 
3614 	err = vchiq_dump(dump_context, buf, len + 1);
3615 	if (err)
3616 		return err;
3617 
3618 	if (service->srvstate != VCHIQ_SRVSTATE_FREE)
3619 		err = vchiq_dump_platform_service_state(dump_context, service);
3620 	return err;
3621 }
3622 
3623 void
vchiq_loud_error_header(void)3624 vchiq_loud_error_header(void)
3625 {
3626 	vchiq_log_error(vchiq_core_log_level,
3627 			"============================================================================");
3628 	vchiq_log_error(vchiq_core_log_level,
3629 			"============================================================================");
3630 	vchiq_log_error(vchiq_core_log_level, "=====");
3631 }
3632 
3633 void
vchiq_loud_error_footer(void)3634 vchiq_loud_error_footer(void)
3635 {
3636 	vchiq_log_error(vchiq_core_log_level, "=====");
3637 	vchiq_log_error(vchiq_core_log_level,
3638 			"============================================================================");
3639 	vchiq_log_error(vchiq_core_log_level,
3640 			"============================================================================");
3641 }
3642 
vchiq_send_remote_use(struct vchiq_state * state)3643 enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state)
3644 {
3645 	if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3646 		return VCHIQ_RETRY;
3647 
3648 	return queue_message(state, NULL, MAKE_REMOTE_USE, NULL, NULL, 0, 0);
3649 }
3650 
vchiq_send_remote_use_active(struct vchiq_state * state)3651 enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state)
3652 {
3653 	if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3654 		return VCHIQ_RETRY;
3655 
3656 	return queue_message(state, NULL, MAKE_REMOTE_USE_ACTIVE,
3657 			     NULL, NULL, 0, 0);
3658 }
3659 
vchiq_log_dump_mem(const char * label,u32 addr,const void * void_mem,size_t num_bytes)3660 void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem, size_t num_bytes)
3661 {
3662 	const u8 *mem = void_mem;
3663 	size_t offset;
3664 	char line_buf[100];
3665 	char *s;
3666 
3667 	while (num_bytes > 0) {
3668 		s = line_buf;
3669 
3670 		for (offset = 0; offset < 16; offset++) {
3671 			if (offset < num_bytes)
3672 				s += scnprintf(s, 4, "%02x ", mem[offset]);
3673 			else
3674 				s += scnprintf(s, 4, "   ");
3675 		}
3676 
3677 		for (offset = 0; offset < 16; offset++) {
3678 			if (offset < num_bytes) {
3679 				u8 ch = mem[offset];
3680 
3681 				if ((ch < ' ') || (ch > '~'))
3682 					ch = '.';
3683 				*s++ = (char)ch;
3684 			}
3685 		}
3686 		*s++ = '\0';
3687 
3688 		if (label && (*label != '\0'))
3689 			vchiq_log_trace(VCHIQ_LOG_TRACE, "%s: %08x: %s", label, addr, line_buf);
3690 		else
3691 			vchiq_log_trace(VCHIQ_LOG_TRACE, "%08x: %s", addr, line_buf);
3692 
3693 		addr += 16;
3694 		mem += 16;
3695 		if (num_bytes > 16)
3696 			num_bytes -= 16;
3697 		else
3698 			num_bytes = 0;
3699 	}
3700 }
3701