• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
3 
4 #include <linux/types.h>
5 #include <linux/completion.h>
6 #include <linux/mutex.h>
7 #include <linux/bitops.h>
8 #include <linux/kthread.h>
9 #include <linux/wait.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/kref.h>
13 #include <linux/rcupdate.h>
14 #include <linux/sched/signal.h>
15 
16 #include "vchiq_core.h"
17 
18 #define VCHIQ_SLOT_HANDLER_STACK 8192
19 
20 #define HANDLE_STATE_SHIFT 12
21 
22 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
23 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
24 #define SLOT_INDEX_FROM_DATA(state, data) \
25 	(((unsigned int)((char *)data - (char *)state->slot_data)) / \
26 	VCHIQ_SLOT_SIZE)
27 #define SLOT_INDEX_FROM_INFO(state, info) \
28 	((unsigned int)(info - state->slot_info))
29 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \
30 	((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
31 
32 #define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
33 
34 #define SRVTRACE_LEVEL(srv) \
35 	(((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
36 #define SRVTRACE_ENABLED(srv, lev) \
37 	(((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
38 
39 struct vchiq_open_payload {
40 	int fourcc;
41 	int client_id;
42 	short version;
43 	short version_min;
44 };
45 
46 struct vchiq_openack_payload {
47 	short version;
48 };
49 
50 enum {
51 	QMFLAGS_IS_BLOCKING     = BIT(0),
52 	QMFLAGS_NO_MUTEX_LOCK   = BIT(1),
53 	QMFLAGS_NO_MUTEX_UNLOCK = BIT(2)
54 };
55 
56 /* we require this for consistency between endpoints */
57 vchiq_static_assert(sizeof(struct vchiq_header) == 8);
58 vchiq_static_assert(IS_POW2(sizeof(struct vchiq_header)));
59 vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
60 vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
61 vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
62 vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
63 
64 /* Run time control of log level, based on KERN_XXX level. */
65 int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
66 int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
67 int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
68 
69 DEFINE_SPINLOCK(bulk_waiter_spinlock);
70 static DEFINE_SPINLOCK(quota_spinlock);
71 
72 struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES];
73 static unsigned int handle_seq;
74 
75 static const char *const srvstate_names[] = {
76 	"FREE",
77 	"HIDDEN",
78 	"LISTENING",
79 	"OPENING",
80 	"OPEN",
81 	"OPENSYNC",
82 	"CLOSESENT",
83 	"CLOSERECVD",
84 	"CLOSEWAIT",
85 	"CLOSED"
86 };
87 
88 static const char *const reason_names[] = {
89 	"SERVICE_OPENED",
90 	"SERVICE_CLOSED",
91 	"MESSAGE_AVAILABLE",
92 	"BULK_TRANSMIT_DONE",
93 	"BULK_RECEIVE_DONE",
94 	"BULK_TRANSMIT_ABORTED",
95 	"BULK_RECEIVE_ABORTED"
96 };
97 
98 static const char *const conn_state_names[] = {
99 	"DISCONNECTED",
100 	"CONNECTING",
101 	"CONNECTED",
102 	"PAUSING",
103 	"PAUSE_SENT",
104 	"PAUSED",
105 	"RESUMING",
106 	"PAUSE_TIMEOUT",
107 	"RESUME_TIMEOUT"
108 };
109 
110 static void
111 release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
112 
msg_type_str(unsigned int msg_type)113 static const char *msg_type_str(unsigned int msg_type)
114 {
115 	switch (msg_type) {
116 	case VCHIQ_MSG_PADDING:       return "PADDING";
117 	case VCHIQ_MSG_CONNECT:       return "CONNECT";
118 	case VCHIQ_MSG_OPEN:          return "OPEN";
119 	case VCHIQ_MSG_OPENACK:       return "OPENACK";
120 	case VCHIQ_MSG_CLOSE:         return "CLOSE";
121 	case VCHIQ_MSG_DATA:          return "DATA";
122 	case VCHIQ_MSG_BULK_RX:       return "BULK_RX";
123 	case VCHIQ_MSG_BULK_TX:       return "BULK_TX";
124 	case VCHIQ_MSG_BULK_RX_DONE:  return "BULK_RX_DONE";
125 	case VCHIQ_MSG_BULK_TX_DONE:  return "BULK_TX_DONE";
126 	case VCHIQ_MSG_PAUSE:         return "PAUSE";
127 	case VCHIQ_MSG_RESUME:        return "RESUME";
128 	case VCHIQ_MSG_REMOTE_USE:    return "REMOTE_USE";
129 	case VCHIQ_MSG_REMOTE_RELEASE:      return "REMOTE_RELEASE";
130 	case VCHIQ_MSG_REMOTE_USE_ACTIVE:   return "REMOTE_USE_ACTIVE";
131 	}
132 	return "???";
133 }
134 
135 static inline void
vchiq_set_service_state(struct vchiq_service * service,int newstate)136 vchiq_set_service_state(struct vchiq_service *service, int newstate)
137 {
138 	vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
139 		service->state->id, service->localport,
140 		srvstate_names[service->srvstate],
141 		srvstate_names[newstate]);
142 	service->srvstate = newstate;
143 }
144 
145 struct vchiq_service *
find_service_by_handle(unsigned int handle)146 find_service_by_handle(unsigned int handle)
147 {
148 	struct vchiq_service *service;
149 
150 	rcu_read_lock();
151 	service = handle_to_service(handle);
152 	if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
153 	    service->handle == handle &&
154 	    kref_get_unless_zero(&service->ref_count)) {
155 		service = rcu_pointer_handoff(service);
156 		rcu_read_unlock();
157 		return service;
158 	}
159 	rcu_read_unlock();
160 	vchiq_log_info(vchiq_core_log_level,
161 		       "Invalid service handle 0x%x", handle);
162 	return NULL;
163 }
164 
165 struct vchiq_service *
find_service_by_port(struct vchiq_state * state,int localport)166 find_service_by_port(struct vchiq_state *state, int localport)
167 {
168 
169 	if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
170 		struct vchiq_service *service;
171 
172 		rcu_read_lock();
173 		service = rcu_dereference(state->services[localport]);
174 		if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
175 		    kref_get_unless_zero(&service->ref_count)) {
176 			service = rcu_pointer_handoff(service);
177 			rcu_read_unlock();
178 			return service;
179 		}
180 		rcu_read_unlock();
181 	}
182 	vchiq_log_info(vchiq_core_log_level,
183 		       "Invalid port %d", localport);
184 	return NULL;
185 }
186 
187 struct vchiq_service *
find_service_for_instance(struct vchiq_instance * instance,unsigned int handle)188 find_service_for_instance(struct vchiq_instance *instance,
189 	unsigned int handle)
190 {
191 	struct vchiq_service *service;
192 
193 	rcu_read_lock();
194 	service = handle_to_service(handle);
195 	if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
196 	    service->handle == handle &&
197 	    service->instance == instance &&
198 	    kref_get_unless_zero(&service->ref_count)) {
199 		service = rcu_pointer_handoff(service);
200 		rcu_read_unlock();
201 		return service;
202 	}
203 	rcu_read_unlock();
204 	vchiq_log_info(vchiq_core_log_level,
205 		       "Invalid service handle 0x%x", handle);
206 	return NULL;
207 }
208 
209 struct vchiq_service *
find_closed_service_for_instance(struct vchiq_instance * instance,unsigned int handle)210 find_closed_service_for_instance(struct vchiq_instance *instance,
211 	unsigned int handle)
212 {
213 	struct vchiq_service *service;
214 
215 	rcu_read_lock();
216 	service = handle_to_service(handle);
217 	if (service &&
218 	    (service->srvstate == VCHIQ_SRVSTATE_FREE ||
219 	     service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
220 	    service->handle == handle &&
221 	    service->instance == instance &&
222 	    kref_get_unless_zero(&service->ref_count)) {
223 		service = rcu_pointer_handoff(service);
224 		rcu_read_unlock();
225 		return service;
226 	}
227 	rcu_read_unlock();
228 	vchiq_log_info(vchiq_core_log_level,
229 		       "Invalid service handle 0x%x", handle);
230 	return service;
231 }
232 
233 struct vchiq_service *
__next_service_by_instance(struct vchiq_state * state,struct vchiq_instance * instance,int * pidx)234 __next_service_by_instance(struct vchiq_state *state,
235 			   struct vchiq_instance *instance,
236 			   int *pidx)
237 {
238 	struct vchiq_service *service = NULL;
239 	int idx = *pidx;
240 
241 	while (idx < state->unused_service) {
242 		struct vchiq_service *srv;
243 
244 		srv = rcu_dereference(state->services[idx++]);
245 		if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
246 		    srv->instance == instance) {
247 			service = srv;
248 			break;
249 		}
250 	}
251 
252 	*pidx = idx;
253 	return service;
254 }
255 
256 struct vchiq_service *
next_service_by_instance(struct vchiq_state * state,struct vchiq_instance * instance,int * pidx)257 next_service_by_instance(struct vchiq_state *state,
258 			 struct vchiq_instance *instance,
259 			 int *pidx)
260 {
261 	struct vchiq_service *service;
262 
263 	rcu_read_lock();
264 	while (1) {
265 		service = __next_service_by_instance(state, instance, pidx);
266 		if (!service)
267 			break;
268 		if (kref_get_unless_zero(&service->ref_count)) {
269 			service = rcu_pointer_handoff(service);
270 			break;
271 		}
272 	}
273 	rcu_read_unlock();
274 	return service;
275 }
276 
277 void
lock_service(struct vchiq_service * service)278 lock_service(struct vchiq_service *service)
279 {
280 	if (!service) {
281 		WARN(1, "%s service is NULL\n", __func__);
282 		return;
283 	}
284 	kref_get(&service->ref_count);
285 }
286 
service_release(struct kref * kref)287 static void service_release(struct kref *kref)
288 {
289 	struct vchiq_service *service =
290 		container_of(kref, struct vchiq_service, ref_count);
291 	struct vchiq_state *state = service->state;
292 
293 	WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
294 	rcu_assign_pointer(state->services[service->localport], NULL);
295 	if (service->userdata_term)
296 		service->userdata_term(service->base.userdata);
297 	kfree_rcu(service, rcu);
298 }
299 
300 void
unlock_service(struct vchiq_service * service)301 unlock_service(struct vchiq_service *service)
302 {
303 	if (!service) {
304 		WARN(1, "%s: service is NULL\n", __func__);
305 		return;
306 	}
307 	kref_put(&service->ref_count, service_release);
308 }
309 
310 int
vchiq_get_client_id(unsigned int handle)311 vchiq_get_client_id(unsigned int handle)
312 {
313 	struct vchiq_service *service;
314 	int id;
315 
316 	rcu_read_lock();
317 	service = handle_to_service(handle);
318 	id = service ? service->client_id : 0;
319 	rcu_read_unlock();
320 	return id;
321 }
322 
323 void *
vchiq_get_service_userdata(unsigned int handle)324 vchiq_get_service_userdata(unsigned int handle)
325 {
326 	void *userdata;
327 	struct vchiq_service *service;
328 
329 	rcu_read_lock();
330 	service = handle_to_service(handle);
331 	userdata = service ? service->base.userdata : NULL;
332 	rcu_read_unlock();
333 	return userdata;
334 }
335 EXPORT_SYMBOL(vchiq_get_service_userdata);
336 
337 static void
mark_service_closing_internal(struct vchiq_service * service,int sh_thread)338 mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
339 {
340 	struct vchiq_state *state = service->state;
341 	struct vchiq_service_quota *service_quota;
342 
343 	service->closing = 1;
344 
345 	/* Synchronise with other threads. */
346 	mutex_lock(&state->recycle_mutex);
347 	mutex_unlock(&state->recycle_mutex);
348 	if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
349 		/* If we're pausing then the slot_mutex is held until resume
350 		 * by the slot handler.  Therefore don't try to acquire this
351 		 * mutex if we're the slot handler and in the pause sent state.
352 		 * We don't need to in this case anyway. */
353 		mutex_lock(&state->slot_mutex);
354 		mutex_unlock(&state->slot_mutex);
355 	}
356 
357 	/* Unblock any sending thread. */
358 	service_quota = &state->service_quotas[service->localport];
359 	complete(&service_quota->quota_event);
360 }
361 
362 static void
mark_service_closing(struct vchiq_service * service)363 mark_service_closing(struct vchiq_service *service)
364 {
365 	mark_service_closing_internal(service, 0);
366 }
367 
368 static inline enum vchiq_status
make_service_callback(struct vchiq_service * service,enum vchiq_reason reason,struct vchiq_header * header,void * bulk_userdata)369 make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
370 		      struct vchiq_header *header, void *bulk_userdata)
371 {
372 	enum vchiq_status status;
373 
374 	vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
375 		service->state->id, service->localport, reason_names[reason],
376 		header, bulk_userdata);
377 	status = service->base.callback(reason, header, service->handle,
378 		bulk_userdata);
379 	if (status == VCHIQ_ERROR) {
380 		vchiq_log_warning(vchiq_core_log_level,
381 			"%d: ignoring ERROR from callback to service %x",
382 			service->state->id, service->handle);
383 		status = VCHIQ_SUCCESS;
384 	}
385 
386 	if (reason != VCHIQ_MESSAGE_AVAILABLE)
387 		vchiq_release_message(service->handle, header);
388 
389 	return status;
390 }
391 
392 inline void
vchiq_set_conn_state(struct vchiq_state * state,enum vchiq_connstate newstate)393 vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
394 {
395 	enum vchiq_connstate oldstate = state->conn_state;
396 
397 	vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
398 		conn_state_names[oldstate],
399 		conn_state_names[newstate]);
400 	state->conn_state = newstate;
401 	vchiq_platform_conn_state_changed(state, oldstate, newstate);
402 }
403 
404 static inline void
remote_event_create(wait_queue_head_t * wq,struct remote_event * event)405 remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
406 {
407 	event->armed = 0;
408 	/* Don't clear the 'fired' flag because it may already have been set
409 	** by the other side. */
410 	init_waitqueue_head(wq);
411 }
412 
413 /*
414  * All the event waiting routines in VCHIQ used a custom semaphore
415  * implementation that filtered most signals. This achieved a behaviour similar
416  * to the "killable" family of functions. While cleaning up this code all the
417  * routines where switched to the "interruptible" family of functions, as the
418  * former was deemed unjustified and the use "killable" set all VCHIQ's
419  * threads in D state.
420  */
421 static inline int
remote_event_wait(wait_queue_head_t * wq,struct remote_event * event)422 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
423 {
424 	if (!event->fired) {
425 		event->armed = 1;
426 		dsb(sy);
427 		if (wait_event_interruptible(*wq, event->fired)) {
428 			event->armed = 0;
429 			return 0;
430 		}
431 		event->armed = 0;
432 		wmb();
433 	}
434 
435 	event->fired = 0;
436 	return 1;
437 }
438 
439 static inline void
remote_event_signal_local(wait_queue_head_t * wq,struct remote_event * event)440 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
441 {
442 	event->fired = 1;
443 	event->armed = 0;
444 	wake_up_all(wq);
445 }
446 
447 static inline void
remote_event_poll(wait_queue_head_t * wq,struct remote_event * event)448 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
449 {
450 	if (event->fired && event->armed)
451 		remote_event_signal_local(wq, event);
452 }
453 
454 void
remote_event_pollall(struct vchiq_state * state)455 remote_event_pollall(struct vchiq_state *state)
456 {
457 	remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
458 	remote_event_poll(&state->sync_release_event, &state->local->sync_release);
459 	remote_event_poll(&state->trigger_event, &state->local->trigger);
460 	remote_event_poll(&state->recycle_event, &state->local->recycle);
461 }
462 
463 /* Round up message sizes so that any space at the end of a slot is always big
464 ** enough for a header. This relies on header size being a power of two, which
465 ** has been verified earlier by a static assertion. */
466 
467 static inline size_t
calc_stride(size_t size)468 calc_stride(size_t size)
469 {
470 	/* Allow room for the header */
471 	size += sizeof(struct vchiq_header);
472 
473 	/* Round up */
474 	return (size + sizeof(struct vchiq_header) - 1) &
475 		~(sizeof(struct vchiq_header) - 1);
476 }
477 
478 /* Called by the slot handler thread */
479 static struct vchiq_service *
get_listening_service(struct vchiq_state * state,int fourcc)480 get_listening_service(struct vchiq_state *state, int fourcc)
481 {
482 	int i;
483 
484 	WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
485 
486 	rcu_read_lock();
487 	for (i = 0; i < state->unused_service; i++) {
488 		struct vchiq_service *service;
489 
490 		service = rcu_dereference(state->services[i]);
491 		if (service &&
492 		    service->public_fourcc == fourcc &&
493 		    (service->srvstate == VCHIQ_SRVSTATE_LISTENING ||
494 		     (service->srvstate == VCHIQ_SRVSTATE_OPEN &&
495 		      service->remoteport == VCHIQ_PORT_FREE)) &&
496 		    kref_get_unless_zero(&service->ref_count)) {
497 			service = rcu_pointer_handoff(service);
498 			rcu_read_unlock();
499 			return service;
500 		}
501 	}
502 	rcu_read_unlock();
503 	return NULL;
504 }
505 
506 /* Called by the slot handler thread */
507 static struct vchiq_service *
get_connected_service(struct vchiq_state * state,unsigned int port)508 get_connected_service(struct vchiq_state *state, unsigned int port)
509 {
510 	int i;
511 
512 	rcu_read_lock();
513 	for (i = 0; i < state->unused_service; i++) {
514 		struct vchiq_service *service =
515 			rcu_dereference(state->services[i]);
516 
517 		if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN &&
518 		    service->remoteport == port &&
519 		    kref_get_unless_zero(&service->ref_count)) {
520 			service = rcu_pointer_handoff(service);
521 			rcu_read_unlock();
522 			return service;
523 		}
524 	}
525 	rcu_read_unlock();
526 	return NULL;
527 }
528 
529 inline void
request_poll(struct vchiq_state * state,struct vchiq_service * service,int poll_type)530 request_poll(struct vchiq_state *state, struct vchiq_service *service,
531 	     int poll_type)
532 {
533 	u32 value;
534 
535 	if (service) {
536 		do {
537 			value = atomic_read(&service->poll_flags);
538 		} while (atomic_cmpxchg(&service->poll_flags, value,
539 			value | BIT(poll_type)) != value);
540 
541 		do {
542 			value = atomic_read(&state->poll_services[
543 				service->localport>>5]);
544 		} while (atomic_cmpxchg(
545 			&state->poll_services[service->localport>>5],
546 			value, value | BIT(service->localport & 0x1f))
547 			!= value);
548 	}
549 
550 	state->poll_needed = 1;
551 	wmb();
552 
553 	/* ... and ensure the slot handler runs. */
554 	remote_event_signal_local(&state->trigger_event, &state->local->trigger);
555 }
556 
557 /* Called from queue_message, by the slot handler and application threads,
558 ** with slot_mutex held */
559 static struct vchiq_header *
reserve_space(struct vchiq_state * state,size_t space,int is_blocking)560 reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
561 {
562 	struct vchiq_shared_state *local = state->local;
563 	int tx_pos = state->local_tx_pos;
564 	int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
565 
566 	if (space > slot_space) {
567 		struct vchiq_header *header;
568 		/* Fill the remaining space with padding */
569 		WARN_ON(!state->tx_data);
570 		header = (struct vchiq_header *)
571 			(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
572 		header->msgid = VCHIQ_MSGID_PADDING;
573 		header->size = slot_space - sizeof(struct vchiq_header);
574 
575 		tx_pos += slot_space;
576 	}
577 
578 	/* If necessary, get the next slot. */
579 	if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
580 		int slot_index;
581 
582 		/* If there is no free slot... */
583 
584 		if (!try_wait_for_completion(&state->slot_available_event)) {
585 			/* ...wait for one. */
586 
587 			VCHIQ_STATS_INC(state, slot_stalls);
588 
589 			/* But first, flush through the last slot. */
590 			state->local_tx_pos = tx_pos;
591 			local->tx_pos = tx_pos;
592 			remote_event_signal(&state->remote->trigger);
593 
594 			if (!is_blocking ||
595 				(wait_for_completion_interruptible(
596 				&state->slot_available_event)))
597 				return NULL; /* No space available */
598 		}
599 
600 		if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
601 			complete(&state->slot_available_event);
602 			pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
603 			return NULL;
604 		}
605 
606 		slot_index = local->slot_queue[
607 			SLOT_QUEUE_INDEX_FROM_POS(tx_pos) &
608 			VCHIQ_SLOT_QUEUE_MASK];
609 		state->tx_data =
610 			(char *)SLOT_DATA_FROM_INDEX(state, slot_index);
611 	}
612 
613 	state->local_tx_pos = tx_pos + space;
614 
615 	return (struct vchiq_header *)(state->tx_data +
616 						(tx_pos & VCHIQ_SLOT_MASK));
617 }
618 
619 /* Called by the recycle thread. */
620 static void
process_free_queue(struct vchiq_state * state,BITSET_T * service_found,size_t length)621 process_free_queue(struct vchiq_state *state, BITSET_T *service_found,
622 		   size_t length)
623 {
624 	struct vchiq_shared_state *local = state->local;
625 	int slot_queue_available;
626 
627 	/* Find slots which have been freed by the other side, and return them
628 	** to the available queue. */
629 	slot_queue_available = state->slot_queue_available;
630 
631 	/*
632 	 * Use a memory barrier to ensure that any state that may have been
633 	 * modified by another thread is not masked by stale prefetched
634 	 * values.
635 	 */
636 	mb();
637 
638 	while (slot_queue_available != local->slot_queue_recycle) {
639 		unsigned int pos;
640 		int slot_index = local->slot_queue[slot_queue_available++ &
641 			VCHIQ_SLOT_QUEUE_MASK];
642 		char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
643 		int data_found = 0;
644 
645 		/*
646 		 * Beware of the address dependency - data is calculated
647 		 * using an index written by the other side.
648 		 */
649 		rmb();
650 
651 		vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
652 			state->id, slot_index, data,
653 			local->slot_queue_recycle, slot_queue_available);
654 
655 		/* Initialise the bitmask for services which have used this
656 		** slot */
657 		memset(service_found, 0, length);
658 
659 		pos = 0;
660 
661 		while (pos < VCHIQ_SLOT_SIZE) {
662 			struct vchiq_header *header =
663 				(struct vchiq_header *)(data + pos);
664 			int msgid = header->msgid;
665 
666 			if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
667 				int port = VCHIQ_MSG_SRCPORT(msgid);
668 				struct vchiq_service_quota *service_quota =
669 					&state->service_quotas[port];
670 				int count;
671 
672 				spin_lock(&quota_spinlock);
673 				count = service_quota->message_use_count;
674 				if (count > 0)
675 					service_quota->message_use_count =
676 						count - 1;
677 				spin_unlock(&quota_spinlock);
678 
679 				if (count == service_quota->message_quota)
680 					/* Signal the service that it
681 					** has dropped below its quota
682 					*/
683 					complete(&service_quota->quota_event);
684 				else if (count == 0) {
685 					vchiq_log_error(vchiq_core_log_level,
686 						"service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
687 						port,
688 						service_quota->message_use_count,
689 						header, msgid, header->msgid,
690 						header->size);
691 					WARN(1, "invalid message use count\n");
692 				}
693 				if (!BITSET_IS_SET(service_found, port)) {
694 					/* Set the found bit for this service */
695 					BITSET_SET(service_found, port);
696 
697 					spin_lock(&quota_spinlock);
698 					count = service_quota->slot_use_count;
699 					if (count > 0)
700 						service_quota->slot_use_count =
701 							count - 1;
702 					spin_unlock(&quota_spinlock);
703 
704 					if (count > 0) {
705 						/* Signal the service in case
706 						** it has dropped below its
707 						** quota */
708 						complete(&service_quota->quota_event);
709 						vchiq_log_trace(
710 							vchiq_core_log_level,
711 							"%d: pfq:%d %x@%pK - slot_use->%d",
712 							state->id, port,
713 							header->size, header,
714 							count - 1);
715 					} else {
716 						vchiq_log_error(
717 							vchiq_core_log_level,
718 								"service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
719 							port, count, header,
720 							msgid, header->msgid,
721 							header->size);
722 						WARN(1, "bad slot use count\n");
723 					}
724 				}
725 
726 				data_found = 1;
727 			}
728 
729 			pos += calc_stride(header->size);
730 			if (pos > VCHIQ_SLOT_SIZE) {
731 				vchiq_log_error(vchiq_core_log_level,
732 					"pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
733 					pos, header, msgid, header->msgid,
734 					header->size);
735 				WARN(1, "invalid slot position\n");
736 			}
737 		}
738 
739 		if (data_found) {
740 			int count;
741 
742 			spin_lock(&quota_spinlock);
743 			count = state->data_use_count;
744 			if (count > 0)
745 				state->data_use_count =
746 					count - 1;
747 			spin_unlock(&quota_spinlock);
748 			if (count == state->data_quota)
749 				complete(&state->data_quota_event);
750 		}
751 
752 		/*
753 		 * Don't allow the slot to be reused until we are no
754 		 * longer interested in it.
755 		 */
756 		mb();
757 
758 		state->slot_queue_available = slot_queue_available;
759 		complete(&state->slot_available_event);
760 	}
761 }
762 
763 static ssize_t
memcpy_copy_callback(void * context,void * dest,size_t offset,size_t maxsize)764 memcpy_copy_callback(
765 	void *context, void *dest,
766 	size_t offset, size_t maxsize)
767 {
768 	memcpy(dest + offset, context + offset, maxsize);
769 	return maxsize;
770 }
771 
772 static ssize_t
copy_message_data(ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,void * dest,size_t size)773 copy_message_data(
774 	ssize_t (*copy_callback)(void *context, void *dest,
775 				 size_t offset, size_t maxsize),
776 	void *context,
777 	void *dest,
778 	size_t size)
779 {
780 	size_t pos = 0;
781 
782 	while (pos < size) {
783 		ssize_t callback_result;
784 		size_t max_bytes = size - pos;
785 
786 		callback_result =
787 			copy_callback(context, dest + pos,
788 				      pos, max_bytes);
789 
790 		if (callback_result < 0)
791 			return callback_result;
792 
793 		if (!callback_result)
794 			return -EIO;
795 
796 		if (callback_result > max_bytes)
797 			return -EIO;
798 
799 		pos += callback_result;
800 	}
801 
802 	return size;
803 }
804 
805 /* Called by the slot handler and application threads */
806 static enum vchiq_status
queue_message(struct vchiq_state * state,struct vchiq_service * service,int msgid,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,size_t size,int flags)807 queue_message(struct vchiq_state *state, struct vchiq_service *service,
808 	      int msgid,
809 	      ssize_t (*copy_callback)(void *context, void *dest,
810 				       size_t offset, size_t maxsize),
811 	      void *context, size_t size, int flags)
812 {
813 	struct vchiq_shared_state *local;
814 	struct vchiq_service_quota *service_quota = NULL;
815 	struct vchiq_header *header;
816 	int type = VCHIQ_MSG_TYPE(msgid);
817 
818 	size_t stride;
819 
820 	local = state->local;
821 
822 	stride = calc_stride(size);
823 
824 	WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
825 
826 	if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
827 	    mutex_lock_killable(&state->slot_mutex))
828 		return VCHIQ_RETRY;
829 
830 	if (type == VCHIQ_MSG_DATA) {
831 		int tx_end_index;
832 
833 		if (!service) {
834 			WARN(1, "%s: service is NULL\n", __func__);
835 			mutex_unlock(&state->slot_mutex);
836 			return VCHIQ_ERROR;
837 		}
838 
839 		WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
840 				 QMFLAGS_NO_MUTEX_UNLOCK));
841 
842 		if (service->closing) {
843 			/* The service has been closed */
844 			mutex_unlock(&state->slot_mutex);
845 			return VCHIQ_ERROR;
846 		}
847 
848 		service_quota = &state->service_quotas[service->localport];
849 
850 		spin_lock(&quota_spinlock);
851 
852 		/* Ensure this service doesn't use more than its quota of
853 		** messages or slots */
854 		tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
855 			state->local_tx_pos + stride - 1);
856 
857 		/* Ensure data messages don't use more than their quota of
858 		** slots */
859 		while ((tx_end_index != state->previous_data_index) &&
860 			(state->data_use_count == state->data_quota)) {
861 			VCHIQ_STATS_INC(state, data_stalls);
862 			spin_unlock(&quota_spinlock);
863 			mutex_unlock(&state->slot_mutex);
864 
865 			if (wait_for_completion_interruptible(
866 						&state->data_quota_event))
867 				return VCHIQ_RETRY;
868 
869 			mutex_lock(&state->slot_mutex);
870 			spin_lock(&quota_spinlock);
871 			tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
872 				state->local_tx_pos + stride - 1);
873 			if ((tx_end_index == state->previous_data_index) ||
874 				(state->data_use_count < state->data_quota)) {
875 				/* Pass the signal on to other waiters */
876 				complete(&state->data_quota_event);
877 				break;
878 			}
879 		}
880 
881 		while ((service_quota->message_use_count ==
882 				service_quota->message_quota) ||
883 			((tx_end_index != service_quota->previous_tx_index) &&
884 			(service_quota->slot_use_count ==
885 				service_quota->slot_quota))) {
886 			spin_unlock(&quota_spinlock);
887 			vchiq_log_trace(vchiq_core_log_level,
888 				"%d: qm:%d %s,%zx - quota stall "
889 				"(msg %d, slot %d)",
890 				state->id, service->localport,
891 				msg_type_str(type), size,
892 				service_quota->message_use_count,
893 				service_quota->slot_use_count);
894 			VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
895 			mutex_unlock(&state->slot_mutex);
896 			if (wait_for_completion_interruptible(
897 						&service_quota->quota_event))
898 				return VCHIQ_RETRY;
899 			if (service->closing)
900 				return VCHIQ_ERROR;
901 			if (mutex_lock_killable(&state->slot_mutex))
902 				return VCHIQ_RETRY;
903 			if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
904 				/* The service has been closed */
905 				mutex_unlock(&state->slot_mutex);
906 				return VCHIQ_ERROR;
907 			}
908 			spin_lock(&quota_spinlock);
909 			tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
910 				state->local_tx_pos + stride - 1);
911 		}
912 
913 		spin_unlock(&quota_spinlock);
914 	}
915 
916 	header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
917 
918 	if (!header) {
919 		if (service)
920 			VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
921 		/* In the event of a failure, return the mutex to the
922 		   state it was in */
923 		if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
924 			mutex_unlock(&state->slot_mutex);
925 		return VCHIQ_RETRY;
926 	}
927 
928 	if (type == VCHIQ_MSG_DATA) {
929 		ssize_t callback_result;
930 		int tx_end_index;
931 		int slot_use_count;
932 
933 		vchiq_log_info(vchiq_core_log_level,
934 			"%d: qm %s@%pK,%zx (%d->%d)",
935 			state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
936 			header, size, VCHIQ_MSG_SRCPORT(msgid),
937 			VCHIQ_MSG_DSTPORT(msgid));
938 
939 		WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
940 				 QMFLAGS_NO_MUTEX_UNLOCK));
941 
942 		callback_result =
943 			copy_message_data(copy_callback, context,
944 					  header->data, size);
945 
946 		if (callback_result < 0) {
947 			mutex_unlock(&state->slot_mutex);
948 			VCHIQ_SERVICE_STATS_INC(service,
949 						error_count);
950 			return VCHIQ_ERROR;
951 		}
952 
953 		if (SRVTRACE_ENABLED(service,
954 				     VCHIQ_LOG_INFO))
955 			vchiq_log_dump_mem("Sent", 0,
956 					   header->data,
957 					   min((size_t)16,
958 					       (size_t)callback_result));
959 
960 		spin_lock(&quota_spinlock);
961 		service_quota->message_use_count++;
962 
963 		tx_end_index =
964 			SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
965 
966 		/* If this transmission can't fit in the last slot used by any
967 		** service, the data_use_count must be increased. */
968 		if (tx_end_index != state->previous_data_index) {
969 			state->previous_data_index = tx_end_index;
970 			state->data_use_count++;
971 		}
972 
973 		/* If this isn't the same slot last used by this service,
974 		** the service's slot_use_count must be increased. */
975 		if (tx_end_index != service_quota->previous_tx_index) {
976 			service_quota->previous_tx_index = tx_end_index;
977 			slot_use_count = ++service_quota->slot_use_count;
978 		} else {
979 			slot_use_count = 0;
980 		}
981 
982 		spin_unlock(&quota_spinlock);
983 
984 		if (slot_use_count)
985 			vchiq_log_trace(vchiq_core_log_level,
986 				"%d: qm:%d %s,%zx - slot_use->%d (hdr %p)",
987 				state->id, service->localport,
988 				msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
989 				slot_use_count, header);
990 
991 		VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
992 		VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
993 	} else {
994 		vchiq_log_info(vchiq_core_log_level,
995 			"%d: qm %s@%pK,%zx (%d->%d)", state->id,
996 			msg_type_str(VCHIQ_MSG_TYPE(msgid)),
997 			header, size, VCHIQ_MSG_SRCPORT(msgid),
998 			VCHIQ_MSG_DSTPORT(msgid));
999 		if (size != 0) {
1000 			/* It is assumed for now that this code path
1001 			 * only happens from calls inside this file.
1002 			 *
1003 			 * External callers are through the vchiq_queue_message
1004 			 * path which always sets the type to be VCHIQ_MSG_DATA
1005 			 *
1006 			 * At first glance this appears to be correct but
1007 			 * more review is needed.
1008 			 */
1009 			copy_message_data(copy_callback, context,
1010 					  header->data, size);
1011 		}
1012 		VCHIQ_STATS_INC(state, ctrl_tx_count);
1013 	}
1014 
1015 	header->msgid = msgid;
1016 	header->size = size;
1017 
1018 	{
1019 		int svc_fourcc;
1020 
1021 		svc_fourcc = service
1022 			? service->base.fourcc
1023 			: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1024 
1025 		vchiq_log_info(SRVTRACE_LEVEL(service),
1026 			"Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
1027 			msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1028 			VCHIQ_MSG_TYPE(msgid),
1029 			VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1030 			VCHIQ_MSG_SRCPORT(msgid),
1031 			VCHIQ_MSG_DSTPORT(msgid),
1032 			size);
1033 	}
1034 
1035 	/* Make sure the new header is visible to the peer. */
1036 	wmb();
1037 
1038 	/* Make the new tx_pos visible to the peer. */
1039 	local->tx_pos = state->local_tx_pos;
1040 	wmb();
1041 
1042 	if (service && (type == VCHIQ_MSG_CLOSE))
1043 		vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
1044 
1045 	if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
1046 		mutex_unlock(&state->slot_mutex);
1047 
1048 	remote_event_signal(&state->remote->trigger);
1049 
1050 	return VCHIQ_SUCCESS;
1051 }
1052 
1053 /* Called by the slot handler and application threads */
1054 static enum vchiq_status
queue_message_sync(struct vchiq_state * state,struct vchiq_service * service,int msgid,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,int size,int is_blocking)1055 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
1056 		   int msgid,
1057 		   ssize_t (*copy_callback)(void *context, void *dest,
1058 					    size_t offset, size_t maxsize),
1059 		   void *context, int size, int is_blocking)
1060 {
1061 	struct vchiq_shared_state *local;
1062 	struct vchiq_header *header;
1063 	ssize_t callback_result;
1064 
1065 	local = state->local;
1066 
1067 	if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME &&
1068 	    mutex_lock_killable(&state->sync_mutex))
1069 		return VCHIQ_RETRY;
1070 
1071 	remote_event_wait(&state->sync_release_event, &local->sync_release);
1072 
1073 	rmb();
1074 
1075 	header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1076 		local->slot_sync);
1077 
1078 	{
1079 		int oldmsgid = header->msgid;
1080 
1081 		if (oldmsgid != VCHIQ_MSGID_PADDING)
1082 			vchiq_log_error(vchiq_core_log_level,
1083 				"%d: qms - msgid %x, not PADDING",
1084 				state->id, oldmsgid);
1085 	}
1086 
1087 	vchiq_log_info(vchiq_sync_log_level,
1088 		       "%d: qms %s@%pK,%x (%d->%d)", state->id,
1089 		       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1090 		       header, size, VCHIQ_MSG_SRCPORT(msgid),
1091 		       VCHIQ_MSG_DSTPORT(msgid));
1092 
1093 	callback_result =
1094 		copy_message_data(copy_callback, context,
1095 				  header->data, size);
1096 
1097 	if (callback_result < 0) {
1098 		mutex_unlock(&state->slot_mutex);
1099 		VCHIQ_SERVICE_STATS_INC(service,
1100 					error_count);
1101 		return VCHIQ_ERROR;
1102 	}
1103 
1104 	if (service) {
1105 		if (SRVTRACE_ENABLED(service,
1106 				     VCHIQ_LOG_INFO))
1107 			vchiq_log_dump_mem("Sent", 0,
1108 					   header->data,
1109 					   min((size_t)16,
1110 					       (size_t)callback_result));
1111 
1112 		VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1113 		VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1114 	} else {
1115 		VCHIQ_STATS_INC(state, ctrl_tx_count);
1116 	}
1117 
1118 	header->size = size;
1119 	header->msgid = msgid;
1120 
1121 	if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
1122 		int svc_fourcc;
1123 
1124 		svc_fourcc = service
1125 			? service->base.fourcc
1126 			: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1127 
1128 		vchiq_log_trace(vchiq_sync_log_level,
1129 			"Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
1130 			msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1131 			VCHIQ_MSG_TYPE(msgid),
1132 			VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1133 			VCHIQ_MSG_SRCPORT(msgid),
1134 			VCHIQ_MSG_DSTPORT(msgid),
1135 			size);
1136 	}
1137 
1138 	remote_event_signal(&state->remote->sync_trigger);
1139 
1140 	if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
1141 		mutex_unlock(&state->sync_mutex);
1142 
1143 	return VCHIQ_SUCCESS;
1144 }
1145 
1146 static inline void
claim_slot(struct vchiq_slot_info * slot)1147 claim_slot(struct vchiq_slot_info *slot)
1148 {
1149 	slot->use_count++;
1150 }
1151 
1152 static void
release_slot(struct vchiq_state * state,struct vchiq_slot_info * slot_info,struct vchiq_header * header,struct vchiq_service * service)1153 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
1154 	     struct vchiq_header *header, struct vchiq_service *service)
1155 {
1156 	int release_count;
1157 
1158 	mutex_lock(&state->recycle_mutex);
1159 
1160 	if (header) {
1161 		int msgid = header->msgid;
1162 
1163 		if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
1164 			(service && service->closing)) {
1165 			mutex_unlock(&state->recycle_mutex);
1166 			return;
1167 		}
1168 
1169 		/* Rewrite the message header to prevent a double
1170 		** release */
1171 		header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
1172 	}
1173 
1174 	release_count = slot_info->release_count;
1175 	slot_info->release_count = ++release_count;
1176 
1177 	if (release_count == slot_info->use_count) {
1178 		int slot_queue_recycle;
1179 		/* Add to the freed queue */
1180 
1181 		/* A read barrier is necessary here to prevent speculative
1182 		** fetches of remote->slot_queue_recycle from overtaking the
1183 		** mutex. */
1184 		rmb();
1185 
1186 		slot_queue_recycle = state->remote->slot_queue_recycle;
1187 		state->remote->slot_queue[slot_queue_recycle &
1188 			VCHIQ_SLOT_QUEUE_MASK] =
1189 			SLOT_INDEX_FROM_INFO(state, slot_info);
1190 		state->remote->slot_queue_recycle = slot_queue_recycle + 1;
1191 		vchiq_log_info(vchiq_core_log_level,
1192 			"%d: %s %d - recycle->%x", state->id, __func__,
1193 			SLOT_INDEX_FROM_INFO(state, slot_info),
1194 			state->remote->slot_queue_recycle);
1195 
1196 		/* A write barrier is necessary, but remote_event_signal
1197 		** contains one. */
1198 		remote_event_signal(&state->remote->recycle);
1199 	}
1200 
1201 	mutex_unlock(&state->recycle_mutex);
1202 }
1203 
1204 /* Called by the slot handler - don't hold the bulk mutex */
1205 static enum vchiq_status
notify_bulks(struct vchiq_service * service,struct vchiq_bulk_queue * queue,int retry_poll)1206 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
1207 	     int retry_poll)
1208 {
1209 	enum vchiq_status status = VCHIQ_SUCCESS;
1210 
1211 	vchiq_log_trace(vchiq_core_log_level,
1212 		"%d: nb:%d %cx - p=%x rn=%x r=%x",
1213 		service->state->id, service->localport,
1214 		(queue == &service->bulk_tx) ? 't' : 'r',
1215 		queue->process, queue->remote_notify, queue->remove);
1216 
1217 	queue->remote_notify = queue->process;
1218 
1219 	if (status == VCHIQ_SUCCESS) {
1220 		while (queue->remove != queue->remote_notify) {
1221 			struct vchiq_bulk *bulk =
1222 				&queue->bulks[BULK_INDEX(queue->remove)];
1223 
1224 			/* Only generate callbacks for non-dummy bulk
1225 			** requests, and non-terminated services */
1226 			if (bulk->data && service->instance) {
1227 				if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
1228 					if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1229 						VCHIQ_SERVICE_STATS_INC(service,
1230 							bulk_tx_count);
1231 						VCHIQ_SERVICE_STATS_ADD(service,
1232 							bulk_tx_bytes,
1233 							bulk->actual);
1234 					} else {
1235 						VCHIQ_SERVICE_STATS_INC(service,
1236 							bulk_rx_count);
1237 						VCHIQ_SERVICE_STATS_ADD(service,
1238 							bulk_rx_bytes,
1239 							bulk->actual);
1240 					}
1241 				} else {
1242 					VCHIQ_SERVICE_STATS_INC(service,
1243 						bulk_aborted_count);
1244 				}
1245 				if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
1246 					struct bulk_waiter *waiter;
1247 
1248 					spin_lock(&bulk_waiter_spinlock);
1249 					waiter = bulk->userdata;
1250 					if (waiter) {
1251 						waiter->actual = bulk->actual;
1252 						complete(&waiter->event);
1253 					}
1254 					spin_unlock(&bulk_waiter_spinlock);
1255 				} else if (bulk->mode ==
1256 					VCHIQ_BULK_MODE_CALLBACK) {
1257 					enum vchiq_reason reason = (bulk->dir ==
1258 						VCHIQ_BULK_TRANSMIT) ?
1259 						((bulk->actual ==
1260 						VCHIQ_BULK_ACTUAL_ABORTED) ?
1261 						VCHIQ_BULK_TRANSMIT_ABORTED :
1262 						VCHIQ_BULK_TRANSMIT_DONE) :
1263 						((bulk->actual ==
1264 						VCHIQ_BULK_ACTUAL_ABORTED) ?
1265 						VCHIQ_BULK_RECEIVE_ABORTED :
1266 						VCHIQ_BULK_RECEIVE_DONE);
1267 					status = make_service_callback(service,
1268 						reason,	NULL, bulk->userdata);
1269 					if (status == VCHIQ_RETRY)
1270 						break;
1271 				}
1272 			}
1273 
1274 			queue->remove++;
1275 			complete(&service->bulk_remove_event);
1276 		}
1277 		if (!retry_poll)
1278 			status = VCHIQ_SUCCESS;
1279 	}
1280 
1281 	if (status == VCHIQ_RETRY)
1282 		request_poll(service->state, service,
1283 			(queue == &service->bulk_tx) ?
1284 			VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
1285 
1286 	return status;
1287 }
1288 
1289 /* Called by the slot handler thread */
1290 static void
poll_services(struct vchiq_state * state)1291 poll_services(struct vchiq_state *state)
1292 {
1293 	int group, i;
1294 
1295 	for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
1296 		u32 flags;
1297 
1298 		flags = atomic_xchg(&state->poll_services[group], 0);
1299 		for (i = 0; flags; i++) {
1300 			if (flags & BIT(i)) {
1301 				struct vchiq_service *service =
1302 					find_service_by_port(state,
1303 						(group<<5) + i);
1304 				u32 service_flags;
1305 
1306 				flags &= ~BIT(i);
1307 				if (!service)
1308 					continue;
1309 				service_flags =
1310 					atomic_xchg(&service->poll_flags, 0);
1311 				if (service_flags &
1312 					BIT(VCHIQ_POLL_REMOVE)) {
1313 					vchiq_log_info(vchiq_core_log_level,
1314 						"%d: ps - remove %d<->%d",
1315 						state->id, service->localport,
1316 						service->remoteport);
1317 
1318 					/* Make it look like a client, because
1319 					   it must be removed and not left in
1320 					   the LISTENING state. */
1321 					service->public_fourcc =
1322 						VCHIQ_FOURCC_INVALID;
1323 
1324 					if (vchiq_close_service_internal(
1325 						service, 0/*!close_recvd*/) !=
1326 						VCHIQ_SUCCESS)
1327 						request_poll(state, service,
1328 							VCHIQ_POLL_REMOVE);
1329 				} else if (service_flags &
1330 					BIT(VCHIQ_POLL_TERMINATE)) {
1331 					vchiq_log_info(vchiq_core_log_level,
1332 						"%d: ps - terminate %d<->%d",
1333 						state->id, service->localport,
1334 						service->remoteport);
1335 					if (vchiq_close_service_internal(
1336 						service, 0/*!close_recvd*/) !=
1337 						VCHIQ_SUCCESS)
1338 						request_poll(state, service,
1339 							VCHIQ_POLL_TERMINATE);
1340 				}
1341 				if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
1342 					notify_bulks(service,
1343 						&service->bulk_tx,
1344 						1/*retry_poll*/);
1345 				if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
1346 					notify_bulks(service,
1347 						&service->bulk_rx,
1348 						1/*retry_poll*/);
1349 				unlock_service(service);
1350 			}
1351 		}
1352 	}
1353 }
1354 
1355 /* Called with the bulk_mutex held */
1356 static void
abort_outstanding_bulks(struct vchiq_service * service,struct vchiq_bulk_queue * queue)1357 abort_outstanding_bulks(struct vchiq_service *service,
1358 			struct vchiq_bulk_queue *queue)
1359 {
1360 	int is_tx = (queue == &service->bulk_tx);
1361 
1362 	vchiq_log_trace(vchiq_core_log_level,
1363 		"%d: aob:%d %cx - li=%x ri=%x p=%x",
1364 		service->state->id, service->localport, is_tx ? 't' : 'r',
1365 		queue->local_insert, queue->remote_insert, queue->process);
1366 
1367 	WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
1368 	WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
1369 
1370 	while ((queue->process != queue->local_insert) ||
1371 		(queue->process != queue->remote_insert)) {
1372 		struct vchiq_bulk *bulk =
1373 				&queue->bulks[BULK_INDEX(queue->process)];
1374 
1375 		if (queue->process == queue->remote_insert) {
1376 			/* fabricate a matching dummy bulk */
1377 			bulk->remote_data = NULL;
1378 			bulk->remote_size = 0;
1379 			queue->remote_insert++;
1380 		}
1381 
1382 		if (queue->process != queue->local_insert) {
1383 			vchiq_complete_bulk(bulk);
1384 
1385 			vchiq_log_info(SRVTRACE_LEVEL(service),
1386 				"%s %c%c%c%c d:%d ABORTED - tx len:%d, "
1387 				"rx len:%d",
1388 				is_tx ? "Send Bulk to" : "Recv Bulk from",
1389 				VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1390 				service->remoteport,
1391 				bulk->size,
1392 				bulk->remote_size);
1393 		} else {
1394 			/* fabricate a matching dummy bulk */
1395 			bulk->data = 0;
1396 			bulk->size = 0;
1397 			bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
1398 			bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
1399 				VCHIQ_BULK_RECEIVE;
1400 			queue->local_insert++;
1401 		}
1402 
1403 		queue->process++;
1404 	}
1405 }
1406 
1407 static int
parse_open(struct vchiq_state * state,struct vchiq_header * header)1408 parse_open(struct vchiq_state *state, struct vchiq_header *header)
1409 {
1410 	struct vchiq_service *service = NULL;
1411 	int msgid, size;
1412 	unsigned int localport, remoteport;
1413 
1414 	msgid = header->msgid;
1415 	size = header->size;
1416 	localport = VCHIQ_MSG_DSTPORT(msgid);
1417 	remoteport = VCHIQ_MSG_SRCPORT(msgid);
1418 	if (size >= sizeof(struct vchiq_open_payload)) {
1419 		const struct vchiq_open_payload *payload =
1420 			(struct vchiq_open_payload *)header->data;
1421 		unsigned int fourcc;
1422 
1423 		fourcc = payload->fourcc;
1424 		vchiq_log_info(vchiq_core_log_level,
1425 			"%d: prs OPEN@%pK (%d->'%c%c%c%c')",
1426 			state->id, header, localport,
1427 			VCHIQ_FOURCC_AS_4CHARS(fourcc));
1428 
1429 		service = get_listening_service(state, fourcc);
1430 
1431 		if (service) {
1432 			/* A matching service exists */
1433 			short version = payload->version;
1434 			short version_min = payload->version_min;
1435 
1436 			if ((service->version < version_min) ||
1437 				(version < service->version_min)) {
1438 				/* Version mismatch */
1439 				vchiq_loud_error_header();
1440 				vchiq_loud_error("%d: service %d (%c%c%c%c) "
1441 					"version mismatch - local (%d, min %d)"
1442 					" vs. remote (%d, min %d)",
1443 					state->id, service->localport,
1444 					VCHIQ_FOURCC_AS_4CHARS(fourcc),
1445 					service->version, service->version_min,
1446 					version, version_min);
1447 				vchiq_loud_error_footer();
1448 				unlock_service(service);
1449 				service = NULL;
1450 				goto fail_open;
1451 			}
1452 			service->peer_version = version;
1453 
1454 			if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
1455 				struct vchiq_openack_payload ack_payload = {
1456 					service->version
1457 				};
1458 
1459 				if (state->version_common <
1460 				    VCHIQ_VERSION_SYNCHRONOUS_MODE)
1461 					service->sync = 0;
1462 
1463 				/* Acknowledge the OPEN */
1464 				if (service->sync) {
1465 					if (queue_message_sync(
1466 						state,
1467 						NULL,
1468 						VCHIQ_MAKE_MSG(
1469 							VCHIQ_MSG_OPENACK,
1470 							service->localport,
1471 							remoteport),
1472 						memcpy_copy_callback,
1473 						&ack_payload,
1474 						sizeof(ack_payload),
1475 						0) == VCHIQ_RETRY)
1476 						goto bail_not_ready;
1477 				} else {
1478 					if (queue_message(state,
1479 							NULL,
1480 							VCHIQ_MAKE_MSG(
1481 							VCHIQ_MSG_OPENACK,
1482 							service->localport,
1483 							remoteport),
1484 						memcpy_copy_callback,
1485 						&ack_payload,
1486 						sizeof(ack_payload),
1487 						0) == VCHIQ_RETRY)
1488 						goto bail_not_ready;
1489 				}
1490 
1491 				/* The service is now open */
1492 				vchiq_set_service_state(service,
1493 					service->sync ? VCHIQ_SRVSTATE_OPENSYNC
1494 					: VCHIQ_SRVSTATE_OPEN);
1495 			}
1496 
1497 			/* Success - the message has been dealt with */
1498 			unlock_service(service);
1499 			return 1;
1500 		}
1501 	}
1502 
1503 fail_open:
1504 	/* No available service, or an invalid request - send a CLOSE */
1505 	if (queue_message(state, NULL,
1506 		VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
1507 		NULL, NULL, 0, 0) == VCHIQ_RETRY)
1508 		goto bail_not_ready;
1509 
1510 	return 1;
1511 
1512 bail_not_ready:
1513 	if (service)
1514 		unlock_service(service);
1515 
1516 	return 0;
1517 }
1518 
1519 /* Called by the slot handler thread */
1520 static void
parse_rx_slots(struct vchiq_state * state)1521 parse_rx_slots(struct vchiq_state *state)
1522 {
1523 	struct vchiq_shared_state *remote = state->remote;
1524 	struct vchiq_service *service = NULL;
1525 	int tx_pos;
1526 
1527 	DEBUG_INITIALISE(state->local)
1528 
1529 	tx_pos = remote->tx_pos;
1530 
1531 	while (state->rx_pos != tx_pos) {
1532 		struct vchiq_header *header;
1533 		int msgid, size;
1534 		int type;
1535 		unsigned int localport, remoteport;
1536 
1537 		DEBUG_TRACE(PARSE_LINE);
1538 		if (!state->rx_data) {
1539 			int rx_index;
1540 
1541 			WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
1542 			rx_index = remote->slot_queue[
1543 				SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) &
1544 				VCHIQ_SLOT_QUEUE_MASK];
1545 			state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
1546 				rx_index);
1547 			state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
1548 
1549 			/* Initialise use_count to one, and increment
1550 			** release_count at the end of the slot to avoid
1551 			** releasing the slot prematurely. */
1552 			state->rx_info->use_count = 1;
1553 			state->rx_info->release_count = 0;
1554 		}
1555 
1556 		header = (struct vchiq_header *)(state->rx_data +
1557 			(state->rx_pos & VCHIQ_SLOT_MASK));
1558 		DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
1559 		msgid = header->msgid;
1560 		DEBUG_VALUE(PARSE_MSGID, msgid);
1561 		size = header->size;
1562 		type = VCHIQ_MSG_TYPE(msgid);
1563 		localport = VCHIQ_MSG_DSTPORT(msgid);
1564 		remoteport = VCHIQ_MSG_SRCPORT(msgid);
1565 
1566 		if (type != VCHIQ_MSG_DATA)
1567 			VCHIQ_STATS_INC(state, ctrl_rx_count);
1568 
1569 		switch (type) {
1570 		case VCHIQ_MSG_OPENACK:
1571 		case VCHIQ_MSG_CLOSE:
1572 		case VCHIQ_MSG_DATA:
1573 		case VCHIQ_MSG_BULK_RX:
1574 		case VCHIQ_MSG_BULK_TX:
1575 		case VCHIQ_MSG_BULK_RX_DONE:
1576 		case VCHIQ_MSG_BULK_TX_DONE:
1577 			service = find_service_by_port(state, localport);
1578 			if ((!service ||
1579 			     ((service->remoteport != remoteport) &&
1580 			      (service->remoteport != VCHIQ_PORT_FREE))) &&
1581 			    (localport == 0) &&
1582 			    (type == VCHIQ_MSG_CLOSE)) {
1583 				/* This could be a CLOSE from a client which
1584 				   hadn't yet received the OPENACK - look for
1585 				   the connected service */
1586 				if (service)
1587 					unlock_service(service);
1588 				service = get_connected_service(state,
1589 					remoteport);
1590 				if (service)
1591 					vchiq_log_warning(vchiq_core_log_level,
1592 						"%d: prs %s@%pK (%d->%d) - found connected service %d",
1593 						state->id, msg_type_str(type),
1594 						header, remoteport, localport,
1595 						service->localport);
1596 			}
1597 
1598 			if (!service) {
1599 				vchiq_log_error(vchiq_core_log_level,
1600 					"%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
1601 					state->id, msg_type_str(type),
1602 					header, remoteport, localport,
1603 					localport);
1604 				goto skip_message;
1605 			}
1606 			break;
1607 		default:
1608 			break;
1609 		}
1610 
1611 		if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
1612 			int svc_fourcc;
1613 
1614 			svc_fourcc = service
1615 				? service->base.fourcc
1616 				: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1617 			vchiq_log_info(SRVTRACE_LEVEL(service),
1618 				"Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d "
1619 				"len:%d",
1620 				msg_type_str(type), type,
1621 				VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1622 				remoteport, localport, size);
1623 			if (size > 0)
1624 				vchiq_log_dump_mem("Rcvd", 0, header->data,
1625 					min(16, size));
1626 		}
1627 
1628 		if (((unsigned long)header & VCHIQ_SLOT_MASK) +
1629 		    calc_stride(size) > VCHIQ_SLOT_SIZE) {
1630 			vchiq_log_error(vchiq_core_log_level,
1631 				"header %pK (msgid %x) - size %x too big for slot",
1632 				header, (unsigned int)msgid,
1633 				(unsigned int)size);
1634 			WARN(1, "oversized for slot\n");
1635 		}
1636 
1637 		switch (type) {
1638 		case VCHIQ_MSG_OPEN:
1639 			WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
1640 			if (!parse_open(state, header))
1641 				goto bail_not_ready;
1642 			break;
1643 		case VCHIQ_MSG_OPENACK:
1644 			if (size >= sizeof(struct vchiq_openack_payload)) {
1645 				const struct vchiq_openack_payload *payload =
1646 					(struct vchiq_openack_payload *)
1647 					header->data;
1648 				service->peer_version = payload->version;
1649 			}
1650 			vchiq_log_info(vchiq_core_log_level,
1651 				"%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
1652 				state->id, header, size, remoteport, localport,
1653 				service->peer_version);
1654 			if (service->srvstate ==
1655 				VCHIQ_SRVSTATE_OPENING) {
1656 				service->remoteport = remoteport;
1657 				vchiq_set_service_state(service,
1658 					VCHIQ_SRVSTATE_OPEN);
1659 				complete(&service->remove_event);
1660 			} else
1661 				vchiq_log_error(vchiq_core_log_level,
1662 					"OPENACK received in state %s",
1663 					srvstate_names[service->srvstate]);
1664 			break;
1665 		case VCHIQ_MSG_CLOSE:
1666 			WARN_ON(size != 0); /* There should be no data */
1667 
1668 			vchiq_log_info(vchiq_core_log_level,
1669 				"%d: prs CLOSE@%pK (%d->%d)",
1670 				state->id, header, remoteport, localport);
1671 
1672 			mark_service_closing_internal(service, 1);
1673 
1674 			if (vchiq_close_service_internal(service,
1675 				1/*close_recvd*/) == VCHIQ_RETRY)
1676 				goto bail_not_ready;
1677 
1678 			vchiq_log_info(vchiq_core_log_level,
1679 				"Close Service %c%c%c%c s:%u d:%d",
1680 				VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1681 				service->localport,
1682 				service->remoteport);
1683 			break;
1684 		case VCHIQ_MSG_DATA:
1685 			vchiq_log_info(vchiq_core_log_level,
1686 				"%d: prs DATA@%pK,%x (%d->%d)",
1687 				state->id, header, size, remoteport, localport);
1688 
1689 			if ((service->remoteport == remoteport)
1690 				&& (service->srvstate ==
1691 				VCHIQ_SRVSTATE_OPEN)) {
1692 				header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
1693 				claim_slot(state->rx_info);
1694 				DEBUG_TRACE(PARSE_LINE);
1695 				if (make_service_callback(service,
1696 					VCHIQ_MESSAGE_AVAILABLE, header,
1697 					NULL) == VCHIQ_RETRY) {
1698 					DEBUG_TRACE(PARSE_LINE);
1699 					goto bail_not_ready;
1700 				}
1701 				VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
1702 				VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
1703 					size);
1704 			} else {
1705 				VCHIQ_STATS_INC(state, error_count);
1706 			}
1707 			break;
1708 		case VCHIQ_MSG_CONNECT:
1709 			vchiq_log_info(vchiq_core_log_level,
1710 				"%d: prs CONNECT@%pK", state->id, header);
1711 			state->version_common =	((struct vchiq_slot_zero *)
1712 						 state->slot_data)->version;
1713 			complete(&state->connect);
1714 			break;
1715 		case VCHIQ_MSG_BULK_RX:
1716 		case VCHIQ_MSG_BULK_TX:
1717 			/*
1718 			 * We should never receive a bulk request from the
1719 			 * other side since we're not setup to perform as the
1720 			 * master.
1721 			 */
1722 			WARN_ON(1);
1723 			break;
1724 		case VCHIQ_MSG_BULK_RX_DONE:
1725 		case VCHIQ_MSG_BULK_TX_DONE:
1726 			if ((service->remoteport == remoteport)
1727 				&& (service->srvstate !=
1728 				VCHIQ_SRVSTATE_FREE)) {
1729 				struct vchiq_bulk_queue *queue;
1730 				struct vchiq_bulk *bulk;
1731 
1732 				queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
1733 					&service->bulk_rx : &service->bulk_tx;
1734 
1735 				DEBUG_TRACE(PARSE_LINE);
1736 				if (mutex_lock_killable(&service->bulk_mutex)) {
1737 					DEBUG_TRACE(PARSE_LINE);
1738 					goto bail_not_ready;
1739 				}
1740 				if ((int)(queue->remote_insert -
1741 					queue->local_insert) >= 0) {
1742 					vchiq_log_error(vchiq_core_log_level,
1743 						"%d: prs %s@%pK (%d->%d) "
1744 						"unexpected (ri=%d,li=%d)",
1745 						state->id, msg_type_str(type),
1746 						header, remoteport, localport,
1747 						queue->remote_insert,
1748 						queue->local_insert);
1749 					mutex_unlock(&service->bulk_mutex);
1750 					break;
1751 				}
1752 				if (queue->process != queue->remote_insert) {
1753 					pr_err("%s: p %x != ri %x\n",
1754 					       __func__,
1755 					       queue->process,
1756 					       queue->remote_insert);
1757 					mutex_unlock(&service->bulk_mutex);
1758 					goto bail_not_ready;
1759 				}
1760 
1761 				bulk = &queue->bulks[
1762 					BULK_INDEX(queue->remote_insert)];
1763 				bulk->actual = *(int *)header->data;
1764 				queue->remote_insert++;
1765 
1766 				vchiq_log_info(vchiq_core_log_level,
1767 					"%d: prs %s@%pK (%d->%d) %x@%pad",
1768 					state->id, msg_type_str(type),
1769 					header, remoteport, localport,
1770 					bulk->actual, &bulk->data);
1771 
1772 				vchiq_log_trace(vchiq_core_log_level,
1773 					"%d: prs:%d %cx li=%x ri=%x p=%x",
1774 					state->id, localport,
1775 					(type == VCHIQ_MSG_BULK_RX_DONE) ?
1776 						'r' : 't',
1777 					queue->local_insert,
1778 					queue->remote_insert, queue->process);
1779 
1780 				DEBUG_TRACE(PARSE_LINE);
1781 				WARN_ON(queue->process == queue->local_insert);
1782 				vchiq_complete_bulk(bulk);
1783 				queue->process++;
1784 				mutex_unlock(&service->bulk_mutex);
1785 				DEBUG_TRACE(PARSE_LINE);
1786 				notify_bulks(service, queue, 1/*retry_poll*/);
1787 				DEBUG_TRACE(PARSE_LINE);
1788 			}
1789 			break;
1790 		case VCHIQ_MSG_PADDING:
1791 			vchiq_log_trace(vchiq_core_log_level,
1792 				"%d: prs PADDING@%pK,%x",
1793 				state->id, header, size);
1794 			break;
1795 		case VCHIQ_MSG_PAUSE:
1796 			/* If initiated, signal the application thread */
1797 			vchiq_log_trace(vchiq_core_log_level,
1798 				"%d: prs PAUSE@%pK,%x",
1799 				state->id, header, size);
1800 			if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
1801 				vchiq_log_error(vchiq_core_log_level,
1802 					"%d: PAUSE received in state PAUSED",
1803 					state->id);
1804 				break;
1805 			}
1806 			if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
1807 				/* Send a PAUSE in response */
1808 				if (queue_message(state, NULL,
1809 					VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1810 					NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK)
1811 				    == VCHIQ_RETRY)
1812 					goto bail_not_ready;
1813 			}
1814 			/* At this point slot_mutex is held */
1815 			vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
1816 			break;
1817 		case VCHIQ_MSG_RESUME:
1818 			vchiq_log_trace(vchiq_core_log_level,
1819 				"%d: prs RESUME@%pK,%x",
1820 				state->id, header, size);
1821 			/* Release the slot mutex */
1822 			mutex_unlock(&state->slot_mutex);
1823 			vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1824 			break;
1825 
1826 		case VCHIQ_MSG_REMOTE_USE:
1827 			vchiq_on_remote_use(state);
1828 			break;
1829 		case VCHIQ_MSG_REMOTE_RELEASE:
1830 			vchiq_on_remote_release(state);
1831 			break;
1832 		case VCHIQ_MSG_REMOTE_USE_ACTIVE:
1833 			break;
1834 
1835 		default:
1836 			vchiq_log_error(vchiq_core_log_level,
1837 				"%d: prs invalid msgid %x@%pK,%x",
1838 				state->id, msgid, header, size);
1839 			WARN(1, "invalid message\n");
1840 			break;
1841 		}
1842 
1843 skip_message:
1844 		if (service) {
1845 			unlock_service(service);
1846 			service = NULL;
1847 		}
1848 
1849 		state->rx_pos += calc_stride(size);
1850 
1851 		DEBUG_TRACE(PARSE_LINE);
1852 		/* Perform some housekeeping when the end of the slot is
1853 		** reached. */
1854 		if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
1855 			/* Remove the extra reference count. */
1856 			release_slot(state, state->rx_info, NULL, NULL);
1857 			state->rx_data = NULL;
1858 		}
1859 	}
1860 
1861 bail_not_ready:
1862 	if (service)
1863 		unlock_service(service);
1864 }
1865 
1866 /* Called by the slot handler thread */
1867 static int
slot_handler_func(void * v)1868 slot_handler_func(void *v)
1869 {
1870 	struct vchiq_state *state = v;
1871 	struct vchiq_shared_state *local = state->local;
1872 
1873 	DEBUG_INITIALISE(local)
1874 
1875 	while (1) {
1876 		DEBUG_COUNT(SLOT_HANDLER_COUNT);
1877 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1878 		remote_event_wait(&state->trigger_event, &local->trigger);
1879 
1880 		rmb();
1881 
1882 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1883 		if (state->poll_needed) {
1884 
1885 			state->poll_needed = 0;
1886 
1887 			/* Handle service polling and other rare conditions here
1888 			** out of the mainline code */
1889 			switch (state->conn_state) {
1890 			case VCHIQ_CONNSTATE_CONNECTED:
1891 				/* Poll the services as requested */
1892 				poll_services(state);
1893 				break;
1894 
1895 			case VCHIQ_CONNSTATE_PAUSING:
1896 				if (queue_message(state, NULL,
1897 					VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1898 					NULL, NULL, 0,
1899 					QMFLAGS_NO_MUTEX_UNLOCK)
1900 				    != VCHIQ_RETRY) {
1901 					vchiq_set_conn_state(state,
1902 						VCHIQ_CONNSTATE_PAUSE_SENT);
1903 				} else {
1904 					/* Retry later */
1905 					state->poll_needed = 1;
1906 				}
1907 				break;
1908 
1909 			case VCHIQ_CONNSTATE_RESUMING:
1910 				if (queue_message(state, NULL,
1911 					VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
1912 					NULL, NULL, 0, QMFLAGS_NO_MUTEX_LOCK)
1913 					!= VCHIQ_RETRY) {
1914 					vchiq_set_conn_state(state,
1915 						VCHIQ_CONNSTATE_CONNECTED);
1916 				} else {
1917 					/* This should really be impossible,
1918 					** since the PAUSE should have flushed
1919 					** through outstanding messages. */
1920 					vchiq_log_error(vchiq_core_log_level,
1921 						"Failed to send RESUME "
1922 						"message");
1923 				}
1924 				break;
1925 			default:
1926 				break;
1927 			}
1928 
1929 		}
1930 
1931 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1932 		parse_rx_slots(state);
1933 	}
1934 	return 0;
1935 }
1936 
1937 /* Called by the recycle thread */
1938 static int
recycle_func(void * v)1939 recycle_func(void *v)
1940 {
1941 	struct vchiq_state *state = v;
1942 	struct vchiq_shared_state *local = state->local;
1943 	BITSET_T *found;
1944 	size_t length;
1945 
1946 	length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
1947 
1948 	found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
1949 			      GFP_KERNEL);
1950 	if (!found)
1951 		return -ENOMEM;
1952 
1953 	while (1) {
1954 		remote_event_wait(&state->recycle_event, &local->recycle);
1955 
1956 		process_free_queue(state, found, length);
1957 	}
1958 	return 0;
1959 }
1960 
1961 /* Called by the sync thread */
1962 static int
sync_func(void * v)1963 sync_func(void *v)
1964 {
1965 	struct vchiq_state *state = v;
1966 	struct vchiq_shared_state *local = state->local;
1967 	struct vchiq_header *header =
1968 		(struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1969 			state->remote->slot_sync);
1970 
1971 	while (1) {
1972 		struct vchiq_service *service;
1973 		int msgid, size;
1974 		int type;
1975 		unsigned int localport, remoteport;
1976 
1977 		remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
1978 
1979 		rmb();
1980 
1981 		msgid = header->msgid;
1982 		size = header->size;
1983 		type = VCHIQ_MSG_TYPE(msgid);
1984 		localport = VCHIQ_MSG_DSTPORT(msgid);
1985 		remoteport = VCHIQ_MSG_SRCPORT(msgid);
1986 
1987 		service = find_service_by_port(state, localport);
1988 
1989 		if (!service) {
1990 			vchiq_log_error(vchiq_sync_log_level,
1991 				"%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
1992 				state->id, msg_type_str(type),
1993 				header, remoteport, localport, localport);
1994 			release_message_sync(state, header);
1995 			continue;
1996 		}
1997 
1998 		if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
1999 			int svc_fourcc;
2000 
2001 			svc_fourcc = service
2002 				? service->base.fourcc
2003 				: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
2004 			vchiq_log_trace(vchiq_sync_log_level,
2005 				"Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
2006 				msg_type_str(type),
2007 				VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
2008 				remoteport, localport, size);
2009 			if (size > 0)
2010 				vchiq_log_dump_mem("Rcvd", 0, header->data,
2011 					min(16, size));
2012 		}
2013 
2014 		switch (type) {
2015 		case VCHIQ_MSG_OPENACK:
2016 			if (size >= sizeof(struct vchiq_openack_payload)) {
2017 				const struct vchiq_openack_payload *payload =
2018 					(struct vchiq_openack_payload *)
2019 					header->data;
2020 				service->peer_version = payload->version;
2021 			}
2022 			vchiq_log_info(vchiq_sync_log_level,
2023 				"%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
2024 				state->id, header, size, remoteport, localport,
2025 				service->peer_version);
2026 			if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2027 				service->remoteport = remoteport;
2028 				vchiq_set_service_state(service,
2029 					VCHIQ_SRVSTATE_OPENSYNC);
2030 				service->sync = 1;
2031 				complete(&service->remove_event);
2032 			}
2033 			release_message_sync(state, header);
2034 			break;
2035 
2036 		case VCHIQ_MSG_DATA:
2037 			vchiq_log_trace(vchiq_sync_log_level,
2038 				"%d: sf DATA@%pK,%x (%d->%d)",
2039 				state->id, header, size, remoteport, localport);
2040 
2041 			if ((service->remoteport == remoteport) &&
2042 				(service->srvstate ==
2043 				VCHIQ_SRVSTATE_OPENSYNC)) {
2044 				if (make_service_callback(service,
2045 					VCHIQ_MESSAGE_AVAILABLE, header,
2046 					NULL) == VCHIQ_RETRY)
2047 					vchiq_log_error(vchiq_sync_log_level,
2048 						"synchronous callback to "
2049 						"service %d returns "
2050 						"VCHIQ_RETRY",
2051 						localport);
2052 			}
2053 			break;
2054 
2055 		default:
2056 			vchiq_log_error(vchiq_sync_log_level,
2057 				"%d: sf unexpected msgid %x@%pK,%x",
2058 				state->id, msgid, header, size);
2059 			release_message_sync(state, header);
2060 			break;
2061 		}
2062 
2063 		unlock_service(service);
2064 	}
2065 
2066 	return 0;
2067 }
2068 
2069 static void
init_bulk_queue(struct vchiq_bulk_queue * queue)2070 init_bulk_queue(struct vchiq_bulk_queue *queue)
2071 {
2072 	queue->local_insert = 0;
2073 	queue->remote_insert = 0;
2074 	queue->process = 0;
2075 	queue->remote_notify = 0;
2076 	queue->remove = 0;
2077 }
2078 
2079 inline const char *
get_conn_state_name(enum vchiq_connstate conn_state)2080 get_conn_state_name(enum vchiq_connstate conn_state)
2081 {
2082 	return conn_state_names[conn_state];
2083 }
2084 
2085 struct vchiq_slot_zero *
vchiq_init_slots(void * mem_base,int mem_size)2086 vchiq_init_slots(void *mem_base, int mem_size)
2087 {
2088 	int mem_align =
2089 		(int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
2090 	struct vchiq_slot_zero *slot_zero =
2091 		(struct vchiq_slot_zero *)(mem_base + mem_align);
2092 	int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
2093 	int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
2094 
2095 	/* Ensure there is enough memory to run an absolutely minimum system */
2096 	num_slots -= first_data_slot;
2097 
2098 	if (num_slots < 4) {
2099 		vchiq_log_error(vchiq_core_log_level,
2100 			"%s - insufficient memory %x bytes",
2101 			__func__, mem_size);
2102 		return NULL;
2103 	}
2104 
2105 	memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
2106 
2107 	slot_zero->magic = VCHIQ_MAGIC;
2108 	slot_zero->version = VCHIQ_VERSION;
2109 	slot_zero->version_min = VCHIQ_VERSION_MIN;
2110 	slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
2111 	slot_zero->slot_size = VCHIQ_SLOT_SIZE;
2112 	slot_zero->max_slots = VCHIQ_MAX_SLOTS;
2113 	slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
2114 
2115 	slot_zero->master.slot_sync = first_data_slot;
2116 	slot_zero->master.slot_first = first_data_slot + 1;
2117 	slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
2118 	slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
2119 	slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
2120 	slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
2121 
2122 	return slot_zero;
2123 }
2124 
2125 enum vchiq_status
vchiq_init_state(struct vchiq_state * state,struct vchiq_slot_zero * slot_zero)2126 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
2127 {
2128 	struct vchiq_shared_state *local;
2129 	struct vchiq_shared_state *remote;
2130 	enum vchiq_status status;
2131 	char threadname[16];
2132 	int i;
2133 
2134 	if (vchiq_states[0]) {
2135 		pr_err("%s: VCHIQ state already initialized\n", __func__);
2136 		return VCHIQ_ERROR;
2137 	}
2138 
2139 	local = &slot_zero->slave;
2140 	remote = &slot_zero->master;
2141 
2142 	if (local->initialised) {
2143 		vchiq_loud_error_header();
2144 		if (remote->initialised)
2145 			vchiq_loud_error("local state has already been "
2146 				"initialised");
2147 		else
2148 			vchiq_loud_error("master/slave mismatch two slaves");
2149 		vchiq_loud_error_footer();
2150 		return VCHIQ_ERROR;
2151 	}
2152 
2153 	memset(state, 0, sizeof(struct vchiq_state));
2154 
2155 	/*
2156 		initialize shared state pointers
2157 	 */
2158 
2159 	state->local = local;
2160 	state->remote = remote;
2161 	state->slot_data = (struct vchiq_slot *)slot_zero;
2162 
2163 	/*
2164 		initialize events and mutexes
2165 	 */
2166 
2167 	init_completion(&state->connect);
2168 	mutex_init(&state->mutex);
2169 	mutex_init(&state->slot_mutex);
2170 	mutex_init(&state->recycle_mutex);
2171 	mutex_init(&state->sync_mutex);
2172 	mutex_init(&state->bulk_transfer_mutex);
2173 
2174 	init_completion(&state->slot_available_event);
2175 	init_completion(&state->slot_remove_event);
2176 	init_completion(&state->data_quota_event);
2177 
2178 	state->slot_queue_available = 0;
2179 
2180 	for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
2181 		struct vchiq_service_quota *service_quota =
2182 			&state->service_quotas[i];
2183 		init_completion(&service_quota->quota_event);
2184 	}
2185 
2186 	for (i = local->slot_first; i <= local->slot_last; i++) {
2187 		local->slot_queue[state->slot_queue_available++] = i;
2188 		complete(&state->slot_available_event);
2189 	}
2190 
2191 	state->default_slot_quota = state->slot_queue_available/2;
2192 	state->default_message_quota =
2193 		min((unsigned short)(state->default_slot_quota * 256),
2194 		(unsigned short)~0);
2195 
2196 	state->previous_data_index = -1;
2197 	state->data_use_count = 0;
2198 	state->data_quota = state->slot_queue_available - 1;
2199 
2200 	remote_event_create(&state->trigger_event, &local->trigger);
2201 	local->tx_pos = 0;
2202 	remote_event_create(&state->recycle_event, &local->recycle);
2203 	local->slot_queue_recycle = state->slot_queue_available;
2204 	remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
2205 	remote_event_create(&state->sync_release_event, &local->sync_release);
2206 
2207 	/* At start-of-day, the slot is empty and available */
2208 	((struct vchiq_header *)
2209 		SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
2210 							VCHIQ_MSGID_PADDING;
2211 	remote_event_signal_local(&state->sync_release_event, &local->sync_release);
2212 
2213 	local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
2214 
2215 	status = vchiq_platform_init_state(state);
2216 	if (status != VCHIQ_SUCCESS)
2217 		return VCHIQ_ERROR;
2218 
2219 	/*
2220 		bring up slot handler thread
2221 	 */
2222 	snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
2223 	state->slot_handler_thread = kthread_create(&slot_handler_func,
2224 		(void *)state,
2225 		threadname);
2226 
2227 	if (IS_ERR(state->slot_handler_thread)) {
2228 		vchiq_loud_error_header();
2229 		vchiq_loud_error("couldn't create thread %s", threadname);
2230 		vchiq_loud_error_footer();
2231 		return VCHIQ_ERROR;
2232 	}
2233 	set_user_nice(state->slot_handler_thread, -19);
2234 
2235 	snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
2236 	state->recycle_thread = kthread_create(&recycle_func,
2237 		(void *)state,
2238 		threadname);
2239 	if (IS_ERR(state->recycle_thread)) {
2240 		vchiq_loud_error_header();
2241 		vchiq_loud_error("couldn't create thread %s", threadname);
2242 		vchiq_loud_error_footer();
2243 		goto fail_free_handler_thread;
2244 	}
2245 	set_user_nice(state->recycle_thread, -19);
2246 
2247 	snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
2248 	state->sync_thread = kthread_create(&sync_func,
2249 		(void *)state,
2250 		threadname);
2251 	if (IS_ERR(state->sync_thread)) {
2252 		vchiq_loud_error_header();
2253 		vchiq_loud_error("couldn't create thread %s", threadname);
2254 		vchiq_loud_error_footer();
2255 		goto fail_free_recycle_thread;
2256 	}
2257 	set_user_nice(state->sync_thread, -20);
2258 
2259 	wake_up_process(state->slot_handler_thread);
2260 	wake_up_process(state->recycle_thread);
2261 	wake_up_process(state->sync_thread);
2262 
2263 	vchiq_states[0] = state;
2264 
2265 	/* Indicate readiness to the other side */
2266 	local->initialised = 1;
2267 
2268 	return status;
2269 
2270 fail_free_recycle_thread:
2271 	kthread_stop(state->recycle_thread);
2272 fail_free_handler_thread:
2273 	kthread_stop(state->slot_handler_thread);
2274 
2275 	return VCHIQ_ERROR;
2276 }
2277 
vchiq_msg_queue_push(unsigned int handle,struct vchiq_header * header)2278 void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header)
2279 {
2280 	struct vchiq_service *service = find_service_by_handle(handle);
2281 	int pos;
2282 
2283 	if (!service)
2284 		return;
2285 
2286 	while (service->msg_queue_write == service->msg_queue_read +
2287 		VCHIQ_MAX_SLOTS) {
2288 		if (wait_for_completion_interruptible(&service->msg_queue_pop))
2289 			flush_signals(current);
2290 	}
2291 
2292 	pos = service->msg_queue_write++ & (VCHIQ_MAX_SLOTS - 1);
2293 	service->msg_queue[pos] = header;
2294 
2295 	complete(&service->msg_queue_push);
2296 }
2297 EXPORT_SYMBOL(vchiq_msg_queue_push);
2298 
vchiq_msg_hold(unsigned int handle)2299 struct vchiq_header *vchiq_msg_hold(unsigned int handle)
2300 {
2301 	struct vchiq_service *service = find_service_by_handle(handle);
2302 	struct vchiq_header *header;
2303 	int pos;
2304 
2305 	if (!service)
2306 		return NULL;
2307 
2308 	if (service->msg_queue_write == service->msg_queue_read)
2309 		return NULL;
2310 
2311 	while (service->msg_queue_write == service->msg_queue_read) {
2312 		if (wait_for_completion_interruptible(&service->msg_queue_push))
2313 			flush_signals(current);
2314 	}
2315 
2316 	pos = service->msg_queue_read++ & (VCHIQ_MAX_SLOTS - 1);
2317 	header = service->msg_queue[pos];
2318 
2319 	complete(&service->msg_queue_pop);
2320 
2321 	return header;
2322 }
2323 EXPORT_SYMBOL(vchiq_msg_hold);
2324 
vchiq_validate_params(const struct vchiq_service_params_kernel * params)2325 static int vchiq_validate_params(const struct vchiq_service_params_kernel *params)
2326 {
2327 	if (!params->callback || !params->fourcc) {
2328 		vchiq_loud_error("Can't add service, invalid params\n");
2329 		return -EINVAL;
2330 	}
2331 
2332 	return 0;
2333 }
2334 
2335 /* Called from application thread when a client or server service is created. */
2336 struct vchiq_service *
vchiq_add_service_internal(struct vchiq_state * state,const struct vchiq_service_params_kernel * params,int srvstate,struct vchiq_instance * instance,vchiq_userdata_term userdata_term)2337 vchiq_add_service_internal(struct vchiq_state *state,
2338 			   const struct vchiq_service_params_kernel *params,
2339 			   int srvstate, struct vchiq_instance *instance,
2340 			   vchiq_userdata_term userdata_term)
2341 {
2342 	struct vchiq_service *service;
2343 	struct vchiq_service __rcu **pservice = NULL;
2344 	struct vchiq_service_quota *service_quota;
2345 	int ret;
2346 	int i;
2347 
2348 	ret = vchiq_validate_params(params);
2349 	if (ret)
2350 		return NULL;
2351 
2352 	service = kmalloc(sizeof(*service), GFP_KERNEL);
2353 	if (!service)
2354 		return service;
2355 
2356 	service->base.fourcc   = params->fourcc;
2357 	service->base.callback = params->callback;
2358 	service->base.userdata = params->userdata;
2359 	service->handle        = VCHIQ_SERVICE_HANDLE_INVALID;
2360 	kref_init(&service->ref_count);
2361 	service->srvstate      = VCHIQ_SRVSTATE_FREE;
2362 	service->userdata_term = userdata_term;
2363 	service->localport     = VCHIQ_PORT_FREE;
2364 	service->remoteport    = VCHIQ_PORT_FREE;
2365 
2366 	service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
2367 		VCHIQ_FOURCC_INVALID : params->fourcc;
2368 	service->client_id     = 0;
2369 	service->auto_close    = 1;
2370 	service->sync          = 0;
2371 	service->closing       = 0;
2372 	service->trace         = 0;
2373 	atomic_set(&service->poll_flags, 0);
2374 	service->version       = params->version;
2375 	service->version_min   = params->version_min;
2376 	service->state         = state;
2377 	service->instance      = instance;
2378 	service->service_use_count = 0;
2379 	service->msg_queue_read = 0;
2380 	service->msg_queue_write = 0;
2381 	init_bulk_queue(&service->bulk_tx);
2382 	init_bulk_queue(&service->bulk_rx);
2383 	init_completion(&service->remove_event);
2384 	init_completion(&service->bulk_remove_event);
2385 	init_completion(&service->msg_queue_pop);
2386 	init_completion(&service->msg_queue_push);
2387 	mutex_init(&service->bulk_mutex);
2388 	memset(&service->stats, 0, sizeof(service->stats));
2389 	memset(&service->msg_queue, 0, sizeof(service->msg_queue));
2390 
2391 	/* Although it is perfectly possible to use a spinlock
2392 	** to protect the creation of services, it is overkill as it
2393 	** disables interrupts while the array is searched.
2394 	** The only danger is of another thread trying to create a
2395 	** service - service deletion is safe.
2396 	** Therefore it is preferable to use state->mutex which,
2397 	** although slower to claim, doesn't block interrupts while
2398 	** it is held.
2399 	*/
2400 
2401 	mutex_lock(&state->mutex);
2402 
2403 	/* Prepare to use a previously unused service */
2404 	if (state->unused_service < VCHIQ_MAX_SERVICES)
2405 		pservice = &state->services[state->unused_service];
2406 
2407 	if (srvstate == VCHIQ_SRVSTATE_OPENING) {
2408 		for (i = 0; i < state->unused_service; i++) {
2409 			if (!rcu_access_pointer(state->services[i])) {
2410 				pservice = &state->services[i];
2411 				break;
2412 			}
2413 		}
2414 	} else {
2415 		rcu_read_lock();
2416 		for (i = (state->unused_service - 1); i >= 0; i--) {
2417 			struct vchiq_service *srv;
2418 
2419 			srv = rcu_dereference(state->services[i]);
2420 			if (!srv)
2421 				pservice = &state->services[i];
2422 			else if ((srv->public_fourcc == params->fourcc)
2423 				&& ((srv->instance != instance) ||
2424 				(srv->base.callback !=
2425 				params->callback))) {
2426 				/* There is another server using this
2427 				** fourcc which doesn't match. */
2428 				pservice = NULL;
2429 				break;
2430 			}
2431 		}
2432 		rcu_read_unlock();
2433 	}
2434 
2435 	if (pservice) {
2436 		service->localport = (pservice - state->services);
2437 		if (!handle_seq)
2438 			handle_seq = VCHIQ_MAX_STATES *
2439 				 VCHIQ_MAX_SERVICES;
2440 		service->handle = handle_seq |
2441 			(state->id * VCHIQ_MAX_SERVICES) |
2442 			service->localport;
2443 		handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
2444 		rcu_assign_pointer(*pservice, service);
2445 		if (pservice == &state->services[state->unused_service])
2446 			state->unused_service++;
2447 	}
2448 
2449 	mutex_unlock(&state->mutex);
2450 
2451 	if (!pservice) {
2452 		kfree(service);
2453 		return NULL;
2454 	}
2455 
2456 	service_quota = &state->service_quotas[service->localport];
2457 	service_quota->slot_quota = state->default_slot_quota;
2458 	service_quota->message_quota = state->default_message_quota;
2459 	if (service_quota->slot_use_count == 0)
2460 		service_quota->previous_tx_index =
2461 			SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
2462 			- 1;
2463 
2464 	/* Bring this service online */
2465 	vchiq_set_service_state(service, srvstate);
2466 
2467 	vchiq_log_info(vchiq_core_msg_log_level,
2468 		"%s Service %c%c%c%c SrcPort:%d",
2469 		(srvstate == VCHIQ_SRVSTATE_OPENING)
2470 		? "Open" : "Add",
2471 		VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
2472 		service->localport);
2473 
2474 	/* Don't unlock the service - leave it with a ref_count of 1. */
2475 
2476 	return service;
2477 }
2478 
2479 enum vchiq_status
vchiq_open_service_internal(struct vchiq_service * service,int client_id)2480 vchiq_open_service_internal(struct vchiq_service *service, int client_id)
2481 {
2482 	struct vchiq_open_payload payload = {
2483 		service->base.fourcc,
2484 		client_id,
2485 		service->version,
2486 		service->version_min
2487 	};
2488 	enum vchiq_status status = VCHIQ_SUCCESS;
2489 
2490 	service->client_id = client_id;
2491 	vchiq_use_service_internal(service);
2492 	status = queue_message(service->state,
2493 			       NULL,
2494 			       VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN,
2495 					      service->localport,
2496 					      0),
2497 			       memcpy_copy_callback,
2498 			       &payload,
2499 			       sizeof(payload),
2500 			       QMFLAGS_IS_BLOCKING);
2501 	if (status == VCHIQ_SUCCESS) {
2502 		/* Wait for the ACK/NAK */
2503 		if (wait_for_completion_interruptible(&service->remove_event)) {
2504 			status = VCHIQ_RETRY;
2505 			vchiq_release_service_internal(service);
2506 		} else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
2507 			   (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
2508 			if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
2509 				vchiq_log_error(vchiq_core_log_level,
2510 						"%d: osi - srvstate = %s (ref %u)",
2511 						service->state->id,
2512 						srvstate_names[service->srvstate],
2513 						kref_read(&service->ref_count));
2514 			status = VCHIQ_ERROR;
2515 			VCHIQ_SERVICE_STATS_INC(service, error_count);
2516 			vchiq_release_service_internal(service);
2517 		}
2518 	}
2519 	return status;
2520 }
2521 
2522 static void
release_service_messages(struct vchiq_service * service)2523 release_service_messages(struct vchiq_service *service)
2524 {
2525 	struct vchiq_state *state = service->state;
2526 	int slot_last = state->remote->slot_last;
2527 	int i;
2528 
2529 	/* Release any claimed messages aimed at this service */
2530 
2531 	if (service->sync) {
2532 		struct vchiq_header *header =
2533 			(struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2534 						state->remote->slot_sync);
2535 		if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
2536 			release_message_sync(state, header);
2537 
2538 		return;
2539 	}
2540 
2541 	for (i = state->remote->slot_first; i <= slot_last; i++) {
2542 		struct vchiq_slot_info *slot_info =
2543 			SLOT_INFO_FROM_INDEX(state, i);
2544 		if (slot_info->release_count != slot_info->use_count) {
2545 			char *data =
2546 				(char *)SLOT_DATA_FROM_INDEX(state, i);
2547 			unsigned int pos, end;
2548 
2549 			end = VCHIQ_SLOT_SIZE;
2550 			if (data == state->rx_data)
2551 				/* This buffer is still being read from - stop
2552 				** at the current read position */
2553 				end = state->rx_pos & VCHIQ_SLOT_MASK;
2554 
2555 			pos = 0;
2556 
2557 			while (pos < end) {
2558 				struct vchiq_header *header =
2559 					(struct vchiq_header *)(data + pos);
2560 				int msgid = header->msgid;
2561 				int port = VCHIQ_MSG_DSTPORT(msgid);
2562 
2563 				if ((port == service->localport) &&
2564 					(msgid & VCHIQ_MSGID_CLAIMED)) {
2565 					vchiq_log_info(vchiq_core_log_level,
2566 						"  fsi - hdr %pK", header);
2567 					release_slot(state, slot_info, header,
2568 						NULL);
2569 				}
2570 				pos += calc_stride(header->size);
2571 				if (pos > VCHIQ_SLOT_SIZE) {
2572 					vchiq_log_error(vchiq_core_log_level,
2573 						"fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
2574 						pos, header, msgid,
2575 						header->msgid, header->size);
2576 					WARN(1, "invalid slot position\n");
2577 				}
2578 			}
2579 		}
2580 	}
2581 }
2582 
2583 static int
do_abort_bulks(struct vchiq_service * service)2584 do_abort_bulks(struct vchiq_service *service)
2585 {
2586 	enum vchiq_status status;
2587 
2588 	/* Abort any outstanding bulk transfers */
2589 	if (mutex_lock_killable(&service->bulk_mutex))
2590 		return 0;
2591 	abort_outstanding_bulks(service, &service->bulk_tx);
2592 	abort_outstanding_bulks(service, &service->bulk_rx);
2593 	mutex_unlock(&service->bulk_mutex);
2594 
2595 	status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
2596 	if (status == VCHIQ_SUCCESS)
2597 		status = notify_bulks(service, &service->bulk_rx,
2598 			0/*!retry_poll*/);
2599 	return (status == VCHIQ_SUCCESS);
2600 }
2601 
2602 static enum vchiq_status
close_service_complete(struct vchiq_service * service,int failstate)2603 close_service_complete(struct vchiq_service *service, int failstate)
2604 {
2605 	enum vchiq_status status;
2606 	int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2607 	int newstate;
2608 
2609 	switch (service->srvstate) {
2610 	case VCHIQ_SRVSTATE_OPEN:
2611 	case VCHIQ_SRVSTATE_CLOSESENT:
2612 	case VCHIQ_SRVSTATE_CLOSERECVD:
2613 		if (is_server) {
2614 			if (service->auto_close) {
2615 				service->client_id = 0;
2616 				service->remoteport = VCHIQ_PORT_FREE;
2617 				newstate = VCHIQ_SRVSTATE_LISTENING;
2618 			} else
2619 				newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
2620 		} else
2621 			newstate = VCHIQ_SRVSTATE_CLOSED;
2622 		vchiq_set_service_state(service, newstate);
2623 		break;
2624 	case VCHIQ_SRVSTATE_LISTENING:
2625 		break;
2626 	default:
2627 		vchiq_log_error(vchiq_core_log_level,
2628 			"%s(%x) called in state %s", __func__,
2629 			service->handle, srvstate_names[service->srvstate]);
2630 		WARN(1, "%s in unexpected state\n", __func__);
2631 		return VCHIQ_ERROR;
2632 	}
2633 
2634 	status = make_service_callback(service,
2635 		VCHIQ_SERVICE_CLOSED, NULL, NULL);
2636 
2637 	if (status != VCHIQ_RETRY) {
2638 		int uc = service->service_use_count;
2639 		int i;
2640 		/* Complete the close process */
2641 		for (i = 0; i < uc; i++)
2642 			/* cater for cases where close is forced and the
2643 			** client may not close all it's handles */
2644 			vchiq_release_service_internal(service);
2645 
2646 		service->client_id = 0;
2647 		service->remoteport = VCHIQ_PORT_FREE;
2648 
2649 		if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
2650 			vchiq_free_service_internal(service);
2651 		else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
2652 			if (is_server)
2653 				service->closing = 0;
2654 
2655 			complete(&service->remove_event);
2656 		}
2657 	} else
2658 		vchiq_set_service_state(service, failstate);
2659 
2660 	return status;
2661 }
2662 
2663 /* Called by the slot handler */
2664 enum vchiq_status
vchiq_close_service_internal(struct vchiq_service * service,int close_recvd)2665 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
2666 {
2667 	struct vchiq_state *state = service->state;
2668 	enum vchiq_status status = VCHIQ_SUCCESS;
2669 	int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2670 
2671 	vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
2672 		service->state->id, service->localport, close_recvd,
2673 		srvstate_names[service->srvstate]);
2674 
2675 	switch (service->srvstate) {
2676 	case VCHIQ_SRVSTATE_CLOSED:
2677 	case VCHIQ_SRVSTATE_HIDDEN:
2678 	case VCHIQ_SRVSTATE_LISTENING:
2679 	case VCHIQ_SRVSTATE_CLOSEWAIT:
2680 		if (close_recvd)
2681 			vchiq_log_error(vchiq_core_log_level,
2682 				"%s(1) called "
2683 				"in state %s",
2684 				__func__, srvstate_names[service->srvstate]);
2685 		else if (is_server) {
2686 			if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
2687 				status = VCHIQ_ERROR;
2688 			} else {
2689 				service->client_id = 0;
2690 				service->remoteport = VCHIQ_PORT_FREE;
2691 				if (service->srvstate ==
2692 					VCHIQ_SRVSTATE_CLOSEWAIT)
2693 					vchiq_set_service_state(service,
2694 						VCHIQ_SRVSTATE_LISTENING);
2695 			}
2696 			complete(&service->remove_event);
2697 		} else
2698 			vchiq_free_service_internal(service);
2699 		break;
2700 	case VCHIQ_SRVSTATE_OPENING:
2701 		if (close_recvd) {
2702 			/* The open was rejected - tell the user */
2703 			vchiq_set_service_state(service,
2704 				VCHIQ_SRVSTATE_CLOSEWAIT);
2705 			complete(&service->remove_event);
2706 		} else {
2707 			/* Shutdown mid-open - let the other side know */
2708 			status = queue_message(state, service,
2709 				VCHIQ_MAKE_MSG
2710 				(VCHIQ_MSG_CLOSE,
2711 				service->localport,
2712 				VCHIQ_MSG_DSTPORT(service->remoteport)),
2713 				NULL, NULL, 0, 0);
2714 		}
2715 		break;
2716 
2717 	case VCHIQ_SRVSTATE_OPENSYNC:
2718 		mutex_lock(&state->sync_mutex);
2719 		fallthrough;
2720 	case VCHIQ_SRVSTATE_OPEN:
2721 		if (close_recvd) {
2722 			if (!do_abort_bulks(service))
2723 				status = VCHIQ_RETRY;
2724 		}
2725 
2726 		release_service_messages(service);
2727 
2728 		if (status == VCHIQ_SUCCESS)
2729 			status = queue_message(state, service,
2730 				VCHIQ_MAKE_MSG
2731 				(VCHIQ_MSG_CLOSE,
2732 				service->localport,
2733 				VCHIQ_MSG_DSTPORT(service->remoteport)),
2734 				NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
2735 
2736 		if (status == VCHIQ_SUCCESS) {
2737 			if (!close_recvd) {
2738 				/* Change the state while the mutex is
2739 				   still held */
2740 				vchiq_set_service_state(service,
2741 							VCHIQ_SRVSTATE_CLOSESENT);
2742 				mutex_unlock(&state->slot_mutex);
2743 				if (service->sync)
2744 					mutex_unlock(&state->sync_mutex);
2745 				break;
2746 			}
2747 		} else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
2748 			mutex_unlock(&state->sync_mutex);
2749 			break;
2750 		} else
2751 			break;
2752 
2753 		/* Change the state while the mutex is still held */
2754 		vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
2755 		mutex_unlock(&state->slot_mutex);
2756 		if (service->sync)
2757 			mutex_unlock(&state->sync_mutex);
2758 
2759 		status = close_service_complete(service,
2760 				VCHIQ_SRVSTATE_CLOSERECVD);
2761 		break;
2762 
2763 	case VCHIQ_SRVSTATE_CLOSESENT:
2764 		if (!close_recvd)
2765 			/* This happens when a process is killed mid-close */
2766 			break;
2767 
2768 		if (!do_abort_bulks(service)) {
2769 			status = VCHIQ_RETRY;
2770 			break;
2771 		}
2772 
2773 		if (status == VCHIQ_SUCCESS)
2774 			status = close_service_complete(service,
2775 				VCHIQ_SRVSTATE_CLOSERECVD);
2776 		break;
2777 
2778 	case VCHIQ_SRVSTATE_CLOSERECVD:
2779 		if (!close_recvd && is_server)
2780 			/* Force into LISTENING mode */
2781 			vchiq_set_service_state(service,
2782 				VCHIQ_SRVSTATE_LISTENING);
2783 		status = close_service_complete(service,
2784 			VCHIQ_SRVSTATE_CLOSERECVD);
2785 		break;
2786 
2787 	default:
2788 		vchiq_log_error(vchiq_core_log_level,
2789 			"%s(%d) called in state %s", __func__,
2790 			close_recvd, srvstate_names[service->srvstate]);
2791 		break;
2792 	}
2793 
2794 	return status;
2795 }
2796 
2797 /* Called from the application process upon process death */
2798 void
vchiq_terminate_service_internal(struct vchiq_service * service)2799 vchiq_terminate_service_internal(struct vchiq_service *service)
2800 {
2801 	struct vchiq_state *state = service->state;
2802 
2803 	vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
2804 		state->id, service->localport, service->remoteport);
2805 
2806 	mark_service_closing(service);
2807 
2808 	/* Mark the service for removal by the slot handler */
2809 	request_poll(state, service, VCHIQ_POLL_REMOVE);
2810 }
2811 
2812 /* Called from the slot handler */
2813 void
vchiq_free_service_internal(struct vchiq_service * service)2814 vchiq_free_service_internal(struct vchiq_service *service)
2815 {
2816 	struct vchiq_state *state = service->state;
2817 
2818 	vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
2819 		state->id, service->localport);
2820 
2821 	switch (service->srvstate) {
2822 	case VCHIQ_SRVSTATE_OPENING:
2823 	case VCHIQ_SRVSTATE_CLOSED:
2824 	case VCHIQ_SRVSTATE_HIDDEN:
2825 	case VCHIQ_SRVSTATE_LISTENING:
2826 	case VCHIQ_SRVSTATE_CLOSEWAIT:
2827 		break;
2828 	default:
2829 		vchiq_log_error(vchiq_core_log_level,
2830 			"%d: fsi - (%d) in state %s",
2831 			state->id, service->localport,
2832 			srvstate_names[service->srvstate]);
2833 		return;
2834 	}
2835 
2836 	vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
2837 
2838 	complete(&service->remove_event);
2839 
2840 	/* Release the initial lock */
2841 	unlock_service(service);
2842 }
2843 
2844 enum vchiq_status
vchiq_connect_internal(struct vchiq_state * state,struct vchiq_instance * instance)2845 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2846 {
2847 	struct vchiq_service *service;
2848 	int i;
2849 
2850 	/* Find all services registered to this client and enable them. */
2851 	i = 0;
2852 	while ((service = next_service_by_instance(state, instance,
2853 		&i)) !=	NULL) {
2854 		if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
2855 			vchiq_set_service_state(service,
2856 				VCHIQ_SRVSTATE_LISTENING);
2857 		unlock_service(service);
2858 	}
2859 
2860 	if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
2861 		if (queue_message(state, NULL,
2862 			VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, NULL,
2863 			0, QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
2864 			return VCHIQ_RETRY;
2865 
2866 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
2867 	}
2868 
2869 	if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
2870 		if (wait_for_completion_interruptible(&state->connect))
2871 			return VCHIQ_RETRY;
2872 
2873 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
2874 		complete(&state->connect);
2875 	}
2876 
2877 	return VCHIQ_SUCCESS;
2878 }
2879 
2880 enum vchiq_status
vchiq_shutdown_internal(struct vchiq_state * state,struct vchiq_instance * instance)2881 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2882 {
2883 	struct vchiq_service *service;
2884 	int i;
2885 
2886 	/* Find all services registered to this client and enable them. */
2887 	i = 0;
2888 	while ((service = next_service_by_instance(state, instance,
2889 		&i)) !=	NULL) {
2890 		(void)vchiq_remove_service(service->handle);
2891 		unlock_service(service);
2892 	}
2893 
2894 	return VCHIQ_SUCCESS;
2895 }
2896 
2897 enum vchiq_status
vchiq_close_service(unsigned int handle)2898 vchiq_close_service(unsigned int handle)
2899 {
2900 	/* Unregister the service */
2901 	struct vchiq_service *service = find_service_by_handle(handle);
2902 	enum vchiq_status status = VCHIQ_SUCCESS;
2903 
2904 	if (!service)
2905 		return VCHIQ_ERROR;
2906 
2907 	vchiq_log_info(vchiq_core_log_level,
2908 		"%d: close_service:%d",
2909 		service->state->id, service->localport);
2910 
2911 	if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2912 		(service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2913 		(service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
2914 		unlock_service(service);
2915 		return VCHIQ_ERROR;
2916 	}
2917 
2918 	mark_service_closing(service);
2919 
2920 	if (current == service->state->slot_handler_thread) {
2921 		status = vchiq_close_service_internal(service,
2922 			0/*!close_recvd*/);
2923 		WARN_ON(status == VCHIQ_RETRY);
2924 	} else {
2925 	/* Mark the service for termination by the slot handler */
2926 		request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
2927 	}
2928 
2929 	while (1) {
2930 		if (wait_for_completion_interruptible(&service->remove_event)) {
2931 			status = VCHIQ_RETRY;
2932 			break;
2933 		}
2934 
2935 		if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2936 			(service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2937 			(service->srvstate == VCHIQ_SRVSTATE_OPEN))
2938 			break;
2939 
2940 		vchiq_log_warning(vchiq_core_log_level,
2941 			"%d: close_service:%d - waiting in state %s",
2942 			service->state->id, service->localport,
2943 			srvstate_names[service->srvstate]);
2944 	}
2945 
2946 	if ((status == VCHIQ_SUCCESS) &&
2947 		(service->srvstate != VCHIQ_SRVSTATE_FREE) &&
2948 		(service->srvstate != VCHIQ_SRVSTATE_LISTENING))
2949 		status = VCHIQ_ERROR;
2950 
2951 	unlock_service(service);
2952 
2953 	return status;
2954 }
2955 EXPORT_SYMBOL(vchiq_close_service);
2956 
2957 enum vchiq_status
vchiq_remove_service(unsigned int handle)2958 vchiq_remove_service(unsigned int handle)
2959 {
2960 	/* Unregister the service */
2961 	struct vchiq_service *service = find_service_by_handle(handle);
2962 	enum vchiq_status status = VCHIQ_SUCCESS;
2963 
2964 	if (!service)
2965 		return VCHIQ_ERROR;
2966 
2967 	vchiq_log_info(vchiq_core_log_level,
2968 		"%d: remove_service:%d",
2969 		service->state->id, service->localport);
2970 
2971 	if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
2972 		unlock_service(service);
2973 		return VCHIQ_ERROR;
2974 	}
2975 
2976 	mark_service_closing(service);
2977 
2978 	if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
2979 		(current == service->state->slot_handler_thread)) {
2980 		/* Make it look like a client, because it must be removed and
2981 		   not left in the LISTENING state. */
2982 		service->public_fourcc = VCHIQ_FOURCC_INVALID;
2983 
2984 		status = vchiq_close_service_internal(service,
2985 			0/*!close_recvd*/);
2986 		WARN_ON(status == VCHIQ_RETRY);
2987 	} else {
2988 		/* Mark the service for removal by the slot handler */
2989 		request_poll(service->state, service, VCHIQ_POLL_REMOVE);
2990 	}
2991 	while (1) {
2992 		if (wait_for_completion_interruptible(&service->remove_event)) {
2993 			status = VCHIQ_RETRY;
2994 			break;
2995 		}
2996 
2997 		if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2998 			(service->srvstate == VCHIQ_SRVSTATE_OPEN))
2999 			break;
3000 
3001 		vchiq_log_warning(vchiq_core_log_level,
3002 			"%d: remove_service:%d - waiting in state %s",
3003 			service->state->id, service->localport,
3004 			srvstate_names[service->srvstate]);
3005 	}
3006 
3007 	if ((status == VCHIQ_SUCCESS) &&
3008 		(service->srvstate != VCHIQ_SRVSTATE_FREE))
3009 		status = VCHIQ_ERROR;
3010 
3011 	unlock_service(service);
3012 
3013 	return status;
3014 }
3015 
3016 /* This function may be called by kernel threads or user threads.
3017  * User threads may receive VCHIQ_RETRY to indicate that a signal has been
3018  * received and the call should be retried after being returned to user
3019  * context.
3020  * When called in blocking mode, the userdata field points to a bulk_waiter
3021  * structure.
3022  */
vchiq_bulk_transfer(unsigned int handle,void * offset,void __user * uoffset,int size,void * userdata,enum vchiq_bulk_mode mode,enum vchiq_bulk_dir dir)3023 enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
3024 				   void *offset, void __user *uoffset,
3025 				   int size, void *userdata,
3026 				   enum vchiq_bulk_mode mode,
3027 				   enum vchiq_bulk_dir dir)
3028 {
3029 	struct vchiq_service *service = find_service_by_handle(handle);
3030 	struct vchiq_bulk_queue *queue;
3031 	struct vchiq_bulk *bulk;
3032 	struct vchiq_state *state;
3033 	struct bulk_waiter *bulk_waiter = NULL;
3034 	const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
3035 	const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
3036 		VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
3037 	enum vchiq_status status = VCHIQ_ERROR;
3038 	int payload[2];
3039 
3040 	if (!service || service->srvstate != VCHIQ_SRVSTATE_OPEN ||
3041 	    (!offset && !uoffset) ||
3042 	    vchiq_check_service(service) != VCHIQ_SUCCESS)
3043 		goto error_exit;
3044 
3045 	switch (mode) {
3046 	case VCHIQ_BULK_MODE_NOCALLBACK:
3047 	case VCHIQ_BULK_MODE_CALLBACK:
3048 		break;
3049 	case VCHIQ_BULK_MODE_BLOCKING:
3050 		bulk_waiter = userdata;
3051 		init_completion(&bulk_waiter->event);
3052 		bulk_waiter->actual = 0;
3053 		bulk_waiter->bulk = NULL;
3054 		break;
3055 	case VCHIQ_BULK_MODE_WAITING:
3056 		bulk_waiter = userdata;
3057 		bulk = bulk_waiter->bulk;
3058 		goto waiting;
3059 	default:
3060 		goto error_exit;
3061 	}
3062 
3063 	state = service->state;
3064 
3065 	queue = (dir == VCHIQ_BULK_TRANSMIT) ?
3066 		&service->bulk_tx : &service->bulk_rx;
3067 
3068 	if (mutex_lock_killable(&service->bulk_mutex)) {
3069 		status = VCHIQ_RETRY;
3070 		goto error_exit;
3071 	}
3072 
3073 	if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
3074 		VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
3075 		do {
3076 			mutex_unlock(&service->bulk_mutex);
3077 			if (wait_for_completion_interruptible(
3078 						&service->bulk_remove_event)) {
3079 				status = VCHIQ_RETRY;
3080 				goto error_exit;
3081 			}
3082 			if (mutex_lock_killable(&service->bulk_mutex)) {
3083 				status = VCHIQ_RETRY;
3084 				goto error_exit;
3085 			}
3086 		} while (queue->local_insert == queue->remove +
3087 				VCHIQ_NUM_SERVICE_BULKS);
3088 	}
3089 
3090 	bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
3091 
3092 	bulk->mode = mode;
3093 	bulk->dir = dir;
3094 	bulk->userdata = userdata;
3095 	bulk->size = size;
3096 	bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
3097 
3098 	if (vchiq_prepare_bulk_data(bulk, offset, uoffset, size, dir)
3099 			!= VCHIQ_SUCCESS)
3100 		goto unlock_error_exit;
3101 
3102 	wmb();
3103 
3104 	vchiq_log_info(vchiq_core_log_level,
3105 		"%d: bt (%d->%d) %cx %x@%pad %pK",
3106 		state->id, service->localport, service->remoteport, dir_char,
3107 		size, &bulk->data, userdata);
3108 
3109 	/* The slot mutex must be held when the service is being closed, so
3110 	   claim it here to ensure that isn't happening */
3111 	if (mutex_lock_killable(&state->slot_mutex)) {
3112 		status = VCHIQ_RETRY;
3113 		goto cancel_bulk_error_exit;
3114 	}
3115 
3116 	if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3117 		goto unlock_both_error_exit;
3118 
3119 	payload[0] = lower_32_bits(bulk->data);
3120 	payload[1] = bulk->size;
3121 	status = queue_message(state,
3122 			       NULL,
3123 			       VCHIQ_MAKE_MSG(dir_msgtype,
3124 					      service->localport,
3125 					      service->remoteport),
3126 			       memcpy_copy_callback,
3127 			       &payload,
3128 			       sizeof(payload),
3129 			       QMFLAGS_IS_BLOCKING |
3130 			       QMFLAGS_NO_MUTEX_LOCK |
3131 			       QMFLAGS_NO_MUTEX_UNLOCK);
3132 	if (status != VCHIQ_SUCCESS)
3133 		goto unlock_both_error_exit;
3134 
3135 	queue->local_insert++;
3136 
3137 	mutex_unlock(&state->slot_mutex);
3138 	mutex_unlock(&service->bulk_mutex);
3139 
3140 	vchiq_log_trace(vchiq_core_log_level,
3141 		"%d: bt:%d %cx li=%x ri=%x p=%x",
3142 		state->id,
3143 		service->localport, dir_char,
3144 		queue->local_insert, queue->remote_insert, queue->process);
3145 
3146 waiting:
3147 	unlock_service(service);
3148 
3149 	status = VCHIQ_SUCCESS;
3150 
3151 	if (bulk_waiter) {
3152 		bulk_waiter->bulk = bulk;
3153 		if (wait_for_completion_interruptible(&bulk_waiter->event))
3154 			status = VCHIQ_RETRY;
3155 		else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
3156 			status = VCHIQ_ERROR;
3157 	}
3158 
3159 	return status;
3160 
3161 unlock_both_error_exit:
3162 	mutex_unlock(&state->slot_mutex);
3163 cancel_bulk_error_exit:
3164 	vchiq_complete_bulk(bulk);
3165 unlock_error_exit:
3166 	mutex_unlock(&service->bulk_mutex);
3167 
3168 error_exit:
3169 	if (service)
3170 		unlock_service(service);
3171 	return status;
3172 }
3173 
3174 enum vchiq_status
vchiq_queue_message(unsigned int handle,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,size_t size)3175 vchiq_queue_message(unsigned int handle,
3176 		    ssize_t (*copy_callback)(void *context, void *dest,
3177 					     size_t offset, size_t maxsize),
3178 		    void *context,
3179 		    size_t size)
3180 {
3181 	struct vchiq_service *service = find_service_by_handle(handle);
3182 	enum vchiq_status status = VCHIQ_ERROR;
3183 
3184 	if (!service ||
3185 		(vchiq_check_service(service) != VCHIQ_SUCCESS))
3186 		goto error_exit;
3187 
3188 	if (!size) {
3189 		VCHIQ_SERVICE_STATS_INC(service, error_count);
3190 		goto error_exit;
3191 
3192 	}
3193 
3194 	if (size > VCHIQ_MAX_MSG_SIZE) {
3195 		VCHIQ_SERVICE_STATS_INC(service, error_count);
3196 		goto error_exit;
3197 	}
3198 
3199 	switch (service->srvstate) {
3200 	case VCHIQ_SRVSTATE_OPEN:
3201 		status = queue_message(service->state, service,
3202 				VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3203 					service->localport,
3204 					service->remoteport),
3205 				copy_callback, context, size, 1);
3206 		break;
3207 	case VCHIQ_SRVSTATE_OPENSYNC:
3208 		status = queue_message_sync(service->state, service,
3209 				VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3210 					service->localport,
3211 					service->remoteport),
3212 				copy_callback, context, size, 1);
3213 		break;
3214 	default:
3215 		status = VCHIQ_ERROR;
3216 		break;
3217 	}
3218 
3219 error_exit:
3220 	if (service)
3221 		unlock_service(service);
3222 
3223 	return status;
3224 }
3225 
vchiq_queue_kernel_message(unsigned int handle,void * data,unsigned int size)3226 int vchiq_queue_kernel_message(unsigned int handle, void *data, unsigned int size)
3227 {
3228 	enum vchiq_status status;
3229 
3230 	while (1) {
3231 		status = vchiq_queue_message(handle, memcpy_copy_callback,
3232 					     data, size);
3233 
3234 		/*
3235 		 * vchiq_queue_message() may return VCHIQ_RETRY, so we need to
3236 		 * implement a retry mechanism since this function is supposed
3237 		 * to block until queued
3238 		 */
3239 		if (status != VCHIQ_RETRY)
3240 			break;
3241 
3242 		msleep(1);
3243 	}
3244 
3245 	return status;
3246 }
3247 EXPORT_SYMBOL(vchiq_queue_kernel_message);
3248 
3249 void
vchiq_release_message(unsigned int handle,struct vchiq_header * header)3250 vchiq_release_message(unsigned int handle,
3251 		      struct vchiq_header *header)
3252 {
3253 	struct vchiq_service *service = find_service_by_handle(handle);
3254 	struct vchiq_shared_state *remote;
3255 	struct vchiq_state *state;
3256 	int slot_index;
3257 
3258 	if (!service)
3259 		return;
3260 
3261 	state = service->state;
3262 	remote = state->remote;
3263 
3264 	slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
3265 
3266 	if ((slot_index >= remote->slot_first) &&
3267 		(slot_index <= remote->slot_last)) {
3268 		int msgid = header->msgid;
3269 
3270 		if (msgid & VCHIQ_MSGID_CLAIMED) {
3271 			struct vchiq_slot_info *slot_info =
3272 				SLOT_INFO_FROM_INDEX(state, slot_index);
3273 
3274 			release_slot(state, slot_info, header, service);
3275 		}
3276 	} else if (slot_index == remote->slot_sync)
3277 		release_message_sync(state, header);
3278 
3279 	unlock_service(service);
3280 }
3281 EXPORT_SYMBOL(vchiq_release_message);
3282 
3283 static void
release_message_sync(struct vchiq_state * state,struct vchiq_header * header)3284 release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
3285 {
3286 	header->msgid = VCHIQ_MSGID_PADDING;
3287 	remote_event_signal(&state->remote->sync_release);
3288 }
3289 
3290 enum vchiq_status
vchiq_get_peer_version(unsigned int handle,short * peer_version)3291 vchiq_get_peer_version(unsigned int handle, short *peer_version)
3292 {
3293 	enum vchiq_status status = VCHIQ_ERROR;
3294 	struct vchiq_service *service = find_service_by_handle(handle);
3295 
3296 	if (!service ||
3297 	    (vchiq_check_service(service) != VCHIQ_SUCCESS) ||
3298 	    !peer_version)
3299 		goto exit;
3300 	*peer_version = service->peer_version;
3301 	status = VCHIQ_SUCCESS;
3302 
3303 exit:
3304 	if (service)
3305 		unlock_service(service);
3306 	return status;
3307 }
3308 EXPORT_SYMBOL(vchiq_get_peer_version);
3309 
vchiq_get_config(struct vchiq_config * config)3310 void vchiq_get_config(struct vchiq_config *config)
3311 {
3312 	config->max_msg_size           = VCHIQ_MAX_MSG_SIZE;
3313 	config->bulk_threshold         = VCHIQ_MAX_MSG_SIZE;
3314 	config->max_outstanding_bulks  = VCHIQ_NUM_SERVICE_BULKS;
3315 	config->max_services           = VCHIQ_MAX_SERVICES;
3316 	config->version                = VCHIQ_VERSION;
3317 	config->version_min            = VCHIQ_VERSION_MIN;
3318 }
3319 
3320 enum vchiq_status
vchiq_set_service_option(unsigned int handle,enum vchiq_service_option option,int value)3321 vchiq_set_service_option(unsigned int handle,
3322 	enum vchiq_service_option option, int value)
3323 {
3324 	struct vchiq_service *service = find_service_by_handle(handle);
3325 	enum vchiq_status status = VCHIQ_ERROR;
3326 
3327 	if (service) {
3328 		switch (option) {
3329 		case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
3330 			service->auto_close = value;
3331 			status = VCHIQ_SUCCESS;
3332 			break;
3333 
3334 		case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: {
3335 			struct vchiq_service_quota *service_quota =
3336 				&service->state->service_quotas[
3337 					service->localport];
3338 			if (value == 0)
3339 				value = service->state->default_slot_quota;
3340 			if ((value >= service_quota->slot_use_count) &&
3341 				 (value < (unsigned short)~0)) {
3342 				service_quota->slot_quota = value;
3343 				if ((value >= service_quota->slot_use_count) &&
3344 					(service_quota->message_quota >=
3345 					 service_quota->message_use_count)) {
3346 					/* Signal the service that it may have
3347 					** dropped below its quota */
3348 					complete(&service_quota->quota_event);
3349 				}
3350 				status = VCHIQ_SUCCESS;
3351 			}
3352 		} break;
3353 
3354 		case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: {
3355 			struct vchiq_service_quota *service_quota =
3356 				&service->state->service_quotas[
3357 					service->localport];
3358 			if (value == 0)
3359 				value = service->state->default_message_quota;
3360 			if ((value >= service_quota->message_use_count) &&
3361 				 (value < (unsigned short)~0)) {
3362 				service_quota->message_quota = value;
3363 				if ((value >=
3364 					service_quota->message_use_count) &&
3365 					(service_quota->slot_quota >=
3366 					service_quota->slot_use_count))
3367 					/* Signal the service that it may have
3368 					** dropped below its quota */
3369 					complete(&service_quota->quota_event);
3370 				status = VCHIQ_SUCCESS;
3371 			}
3372 		} break;
3373 
3374 		case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
3375 			if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3376 				(service->srvstate ==
3377 				VCHIQ_SRVSTATE_LISTENING)) {
3378 				service->sync = value;
3379 				status = VCHIQ_SUCCESS;
3380 			}
3381 			break;
3382 
3383 		case VCHIQ_SERVICE_OPTION_TRACE:
3384 			service->trace = value;
3385 			status = VCHIQ_SUCCESS;
3386 			break;
3387 
3388 		default:
3389 			break;
3390 		}
3391 		unlock_service(service);
3392 	}
3393 
3394 	return status;
3395 }
3396 
3397 static int
vchiq_dump_shared_state(void * dump_context,struct vchiq_state * state,struct vchiq_shared_state * shared,const char * label)3398 vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
3399 			struct vchiq_shared_state *shared, const char *label)
3400 {
3401 	static const char *const debug_names[] = {
3402 		"<entries>",
3403 		"SLOT_HANDLER_COUNT",
3404 		"SLOT_HANDLER_LINE",
3405 		"PARSE_LINE",
3406 		"PARSE_HEADER",
3407 		"PARSE_MSGID",
3408 		"AWAIT_COMPLETION_LINE",
3409 		"DEQUEUE_MESSAGE_LINE",
3410 		"SERVICE_CALLBACK_LINE",
3411 		"MSG_QUEUE_FULL_COUNT",
3412 		"COMPLETION_QUEUE_FULL_COUNT"
3413 	};
3414 	int i;
3415 	char buf[80];
3416 	int len;
3417 	int err;
3418 
3419 	len = scnprintf(buf, sizeof(buf),
3420 		"  %s: slots %d-%d tx_pos=%x recycle=%x",
3421 		label, shared->slot_first, shared->slot_last,
3422 		shared->tx_pos, shared->slot_queue_recycle);
3423 	err = vchiq_dump(dump_context, buf, len + 1);
3424 	if (err)
3425 		return err;
3426 
3427 	len = scnprintf(buf, sizeof(buf),
3428 		"    Slots claimed:");
3429 	err = vchiq_dump(dump_context, buf, len + 1);
3430 	if (err)
3431 		return err;
3432 
3433 	for (i = shared->slot_first; i <= shared->slot_last; i++) {
3434 		struct vchiq_slot_info slot_info =
3435 						*SLOT_INFO_FROM_INDEX(state, i);
3436 		if (slot_info.use_count != slot_info.release_count) {
3437 			len = scnprintf(buf, sizeof(buf),
3438 				"      %d: %d/%d", i, slot_info.use_count,
3439 				slot_info.release_count);
3440 			err = vchiq_dump(dump_context, buf, len + 1);
3441 			if (err)
3442 				return err;
3443 		}
3444 	}
3445 
3446 	for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
3447 		len = scnprintf(buf, sizeof(buf), "    DEBUG: %s = %d(%x)",
3448 			debug_names[i], shared->debug[i], shared->debug[i]);
3449 		err = vchiq_dump(dump_context, buf, len + 1);
3450 		if (err)
3451 			return err;
3452 	}
3453 	return 0;
3454 }
3455 
vchiq_dump_state(void * dump_context,struct vchiq_state * state)3456 int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
3457 {
3458 	char buf[80];
3459 	int len;
3460 	int i;
3461 	int err;
3462 
3463 	len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
3464 		conn_state_names[state->conn_state]);
3465 	err = vchiq_dump(dump_context, buf, len + 1);
3466 	if (err)
3467 		return err;
3468 
3469 	len = scnprintf(buf, sizeof(buf),
3470 		"  tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
3471 		state->local->tx_pos,
3472 		state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
3473 		state->rx_pos,
3474 		state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
3475 	err = vchiq_dump(dump_context, buf, len + 1);
3476 	if (err)
3477 		return err;
3478 
3479 	len = scnprintf(buf, sizeof(buf),
3480 		"  Version: %d (min %d)",
3481 		VCHIQ_VERSION, VCHIQ_VERSION_MIN);
3482 	err = vchiq_dump(dump_context, buf, len + 1);
3483 	if (err)
3484 		return err;
3485 
3486 	if (VCHIQ_ENABLE_STATS) {
3487 		len = scnprintf(buf, sizeof(buf),
3488 			"  Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, "
3489 			"error_count=%d",
3490 			state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
3491 			state->stats.error_count);
3492 		err = vchiq_dump(dump_context, buf, len + 1);
3493 		if (err)
3494 			return err;
3495 	}
3496 
3497 	len = scnprintf(buf, sizeof(buf),
3498 		"  Slots: %d available (%d data), %d recyclable, %d stalls "
3499 		"(%d data)",
3500 		((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
3501 			state->local_tx_pos) / VCHIQ_SLOT_SIZE,
3502 		state->data_quota - state->data_use_count,
3503 		state->local->slot_queue_recycle - state->slot_queue_available,
3504 		state->stats.slot_stalls, state->stats.data_stalls);
3505 	err = vchiq_dump(dump_context, buf, len + 1);
3506 	if (err)
3507 		return err;
3508 
3509 	err = vchiq_dump_platform_state(dump_context);
3510 	if (err)
3511 		return err;
3512 
3513 	err = vchiq_dump_shared_state(dump_context,
3514 				      state,
3515 				      state->local,
3516 				      "Local");
3517 	if (err)
3518 		return err;
3519 	err = vchiq_dump_shared_state(dump_context,
3520 				      state,
3521 				      state->remote,
3522 				      "Remote");
3523 	if (err)
3524 		return err;
3525 
3526 	err = vchiq_dump_platform_instances(dump_context);
3527 	if (err)
3528 		return err;
3529 
3530 	for (i = 0; i < state->unused_service; i++) {
3531 		struct vchiq_service *service = find_service_by_port(state, i);
3532 
3533 		if (service) {
3534 			err = vchiq_dump_service_state(dump_context, service);
3535 			unlock_service(service);
3536 			if (err)
3537 				return err;
3538 		}
3539 	}
3540 	return 0;
3541 }
3542 
vchiq_dump_service_state(void * dump_context,struct vchiq_service * service)3543 int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
3544 {
3545 	char buf[80];
3546 	int len;
3547 	int err;
3548 	unsigned int ref_count;
3549 
3550 	/*Don't include the lock just taken*/
3551 	ref_count = kref_read(&service->ref_count) - 1;
3552 	len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
3553 			service->localport, srvstate_names[service->srvstate],
3554 			ref_count);
3555 
3556 	if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
3557 		char remoteport[30];
3558 		struct vchiq_service_quota *service_quota =
3559 			&service->state->service_quotas[service->localport];
3560 		int fourcc = service->base.fourcc;
3561 		int tx_pending, rx_pending;
3562 
3563 		if (service->remoteport != VCHIQ_PORT_FREE) {
3564 			int len2 = scnprintf(remoteport, sizeof(remoteport),
3565 				"%u", service->remoteport);
3566 
3567 			if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
3568 				scnprintf(remoteport + len2,
3569 					sizeof(remoteport) - len2,
3570 					" (client %x)", service->client_id);
3571 		} else
3572 			strcpy(remoteport, "n/a");
3573 
3574 		len += scnprintf(buf + len, sizeof(buf) - len,
3575 			" '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
3576 			VCHIQ_FOURCC_AS_4CHARS(fourcc),
3577 			remoteport,
3578 			service_quota->message_use_count,
3579 			service_quota->message_quota,
3580 			service_quota->slot_use_count,
3581 			service_quota->slot_quota);
3582 
3583 		err = vchiq_dump(dump_context, buf, len + 1);
3584 		if (err)
3585 			return err;
3586 
3587 		tx_pending = service->bulk_tx.local_insert -
3588 			service->bulk_tx.remote_insert;
3589 
3590 		rx_pending = service->bulk_rx.local_insert -
3591 			service->bulk_rx.remote_insert;
3592 
3593 		len = scnprintf(buf, sizeof(buf),
3594 			"  Bulk: tx_pending=%d (size %d),"
3595 			" rx_pending=%d (size %d)",
3596 			tx_pending,
3597 			tx_pending ? service->bulk_tx.bulks[
3598 			BULK_INDEX(service->bulk_tx.remove)].size : 0,
3599 			rx_pending,
3600 			rx_pending ? service->bulk_rx.bulks[
3601 			BULK_INDEX(service->bulk_rx.remove)].size : 0);
3602 
3603 		if (VCHIQ_ENABLE_STATS) {
3604 			err = vchiq_dump(dump_context, buf, len + 1);
3605 			if (err)
3606 				return err;
3607 
3608 			len = scnprintf(buf, sizeof(buf),
3609 				"  Ctrl: tx_count=%d, tx_bytes=%llu, "
3610 				"rx_count=%d, rx_bytes=%llu",
3611 				service->stats.ctrl_tx_count,
3612 				service->stats.ctrl_tx_bytes,
3613 				service->stats.ctrl_rx_count,
3614 				service->stats.ctrl_rx_bytes);
3615 			err = vchiq_dump(dump_context, buf, len + 1);
3616 			if (err)
3617 				return err;
3618 
3619 			len = scnprintf(buf, sizeof(buf),
3620 				"  Bulk: tx_count=%d, tx_bytes=%llu, "
3621 				"rx_count=%d, rx_bytes=%llu",
3622 				service->stats.bulk_tx_count,
3623 				service->stats.bulk_tx_bytes,
3624 				service->stats.bulk_rx_count,
3625 				service->stats.bulk_rx_bytes);
3626 			err = vchiq_dump(dump_context, buf, len + 1);
3627 			if (err)
3628 				return err;
3629 
3630 			len = scnprintf(buf, sizeof(buf),
3631 				"  %d quota stalls, %d slot stalls, "
3632 				"%d bulk stalls, %d aborted, %d errors",
3633 				service->stats.quota_stalls,
3634 				service->stats.slot_stalls,
3635 				service->stats.bulk_stalls,
3636 				service->stats.bulk_aborted_count,
3637 				service->stats.error_count);
3638 		}
3639 	}
3640 
3641 	err = vchiq_dump(dump_context, buf, len + 1);
3642 	if (err)
3643 		return err;
3644 
3645 	if (service->srvstate != VCHIQ_SRVSTATE_FREE)
3646 		err = vchiq_dump_platform_service_state(dump_context, service);
3647 	return err;
3648 }
3649 
3650 void
vchiq_loud_error_header(void)3651 vchiq_loud_error_header(void)
3652 {
3653 	vchiq_log_error(vchiq_core_log_level,
3654 		"============================================================"
3655 		"================");
3656 	vchiq_log_error(vchiq_core_log_level,
3657 		"============================================================"
3658 		"================");
3659 	vchiq_log_error(vchiq_core_log_level, "=====");
3660 }
3661 
3662 void
vchiq_loud_error_footer(void)3663 vchiq_loud_error_footer(void)
3664 {
3665 	vchiq_log_error(vchiq_core_log_level, "=====");
3666 	vchiq_log_error(vchiq_core_log_level,
3667 		"============================================================"
3668 		"================");
3669 	vchiq_log_error(vchiq_core_log_level,
3670 		"============================================================"
3671 		"================");
3672 }
3673 
vchiq_send_remote_use(struct vchiq_state * state)3674 enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state)
3675 {
3676 	enum vchiq_status status = VCHIQ_RETRY;
3677 
3678 	if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
3679 		status = queue_message(state, NULL,
3680 			VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
3681 			NULL, NULL, 0, 0);
3682 	return status;
3683 }
3684 
vchiq_send_remote_use_active(struct vchiq_state * state)3685 enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state)
3686 {
3687 	enum vchiq_status status = VCHIQ_RETRY;
3688 
3689 	if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
3690 		status = queue_message(state, NULL,
3691 			VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
3692 			NULL, NULL, 0, 0);
3693 	return status;
3694 }
3695 
vchiq_log_dump_mem(const char * label,u32 addr,const void * void_mem,size_t num_bytes)3696 void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem,
3697 	size_t num_bytes)
3698 {
3699 	const u8  *mem = void_mem;
3700 	size_t          offset;
3701 	char            line_buf[100];
3702 	char           *s;
3703 
3704 	while (num_bytes > 0) {
3705 		s = line_buf;
3706 
3707 		for (offset = 0; offset < 16; offset++) {
3708 			if (offset < num_bytes)
3709 				s += scnprintf(s, 4, "%02x ", mem[offset]);
3710 			else
3711 				s += scnprintf(s, 4, "   ");
3712 		}
3713 
3714 		for (offset = 0; offset < 16; offset++) {
3715 			if (offset < num_bytes) {
3716 				u8 ch = mem[offset];
3717 
3718 				if ((ch < ' ') || (ch > '~'))
3719 					ch = '.';
3720 				*s++ = (char)ch;
3721 			}
3722 		}
3723 		*s++ = '\0';
3724 
3725 		if (label && (*label != '\0'))
3726 			vchiq_log_trace(VCHIQ_LOG_TRACE,
3727 				"%s: %08x: %s", label, addr, line_buf);
3728 		else
3729 			vchiq_log_trace(VCHIQ_LOG_TRACE,
3730 				"%08x: %s", addr, line_buf);
3731 
3732 		addr += 16;
3733 		mem += 16;
3734 		if (num_bytes > 16)
3735 			num_bytes -= 16;
3736 		else
3737 			num_bytes = 0;
3738 	}
3739 }
3740