• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * core.c - Implementation of core module of MOST Linux driver stack
3  *
4  * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * This file is licensed under GPLv2.
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/fs.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/device.h>
20 #include <linux/list.h>
21 #include <linux/poll.h>
22 #include <linux/wait.h>
23 #include <linux/kobject.h>
24 #include <linux/mutex.h>
25 #include <linux/completion.h>
26 #include <linux/sysfs.h>
27 #include <linux/kthread.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/idr.h>
30 #include "mostcore.h"
31 
32 #define MAX_CHANNELS	64
33 #define STRING_SIZE	80
34 
35 static struct class *most_class;
36 static struct device *class_glue_dir;
37 static struct ida mdev_id;
38 static int modref;
39 static int dummy_num_buffers;
40 
41 struct most_c_aim_obj {
42 	struct most_aim *ptr;
43 	int refs;
44 	int num_buffers;
45 };
46 
47 struct most_c_obj {
48 	struct kobject kobj;
49 	struct completion cleanup;
50 	atomic_t mbo_ref;
51 	atomic_t mbo_nq_level;
52 	u16 channel_id;
53 	bool is_poisoned;
54 	struct mutex start_mutex;
55 	int is_starving;
56 	struct most_interface *iface;
57 	struct most_inst_obj *inst;
58 	struct most_channel_config cfg;
59 	bool keep_mbo;
60 	bool enqueue_halt;
61 	struct list_head fifo;
62 	spinlock_t fifo_lock;
63 	struct list_head halt_fifo;
64 	struct list_head list;
65 	struct most_c_aim_obj aim0;
66 	struct most_c_aim_obj aim1;
67 	struct list_head trash_fifo;
68 	struct task_struct *hdm_enqueue_task;
69 	struct mutex stop_task_mutex;
70 	wait_queue_head_t hdm_fifo_wq;
71 };
72 
73 #define to_c_obj(d) container_of(d, struct most_c_obj, kobj)
74 
75 struct most_inst_obj {
76 	int dev_id;
77 	atomic_t tainted;
78 	struct most_interface *iface;
79 	struct list_head channel_list;
80 	struct most_c_obj *channel[MAX_CHANNELS];
81 	struct kobject kobj;
82 	struct list_head list;
83 };
84 
85 #define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
86 
87 /**
88  * list_pop_mbo - retrieves the first MBO of the list and removes it
89  * @ptr: the list head to grab the MBO from.
90  */
91 #define list_pop_mbo(ptr)						\
92 ({									\
93 	struct mbo *_mbo = list_first_entry(ptr, struct mbo, list);	\
94 	list_del(&_mbo->list);						\
95 	_mbo;								\
96 })
97 
98 static struct mutex deregister_mutex;
99 
100 /*		     ___	     ___
101  *		     ___C H A N N E L___
102  */
103 
104 /**
105  * struct most_c_attr - to access the attributes of a channel object
106  * @attr: attributes of a channel
107  * @show: pointer to the show function
108  * @store: pointer to the store function
109  */
110 struct most_c_attr {
111 	struct attribute attr;
112 	ssize_t (*show)(struct most_c_obj *d,
113 			struct most_c_attr *attr,
114 			char *buf);
115 	ssize_t (*store)(struct most_c_obj *d,
116 			 struct most_c_attr *attr,
117 			 const char *buf,
118 			 size_t count);
119 };
120 
121 #define to_channel_attr(a) container_of(a, struct most_c_attr, attr)
122 
123 #define MOST_CHNL_ATTR(_name, _mode, _show, _store) \
124 		struct most_c_attr most_chnl_attr_##_name = \
125 		__ATTR(_name, _mode, _show, _store)
126 
127 /**
128  * channel_attr_show - show function of channel object
129  * @kobj: pointer to its kobject
130  * @attr: pointer to its attributes
131  * @buf: buffer
132  */
channel_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)133 static ssize_t channel_attr_show(struct kobject *kobj, struct attribute *attr,
134 				 char *buf)
135 {
136 	struct most_c_attr *channel_attr = to_channel_attr(attr);
137 	struct most_c_obj *c_obj = to_c_obj(kobj);
138 
139 	if (!channel_attr->show)
140 		return -EIO;
141 
142 	return channel_attr->show(c_obj, channel_attr, buf);
143 }
144 
145 /**
146  * channel_attr_store - store function of channel object
147  * @kobj: pointer to its kobject
148  * @attr: pointer to its attributes
149  * @buf: buffer
150  * @len: length of buffer
151  */
channel_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)152 static ssize_t channel_attr_store(struct kobject *kobj,
153 				  struct attribute *attr,
154 				  const char *buf,
155 				  size_t len)
156 {
157 	struct most_c_attr *channel_attr = to_channel_attr(attr);
158 	struct most_c_obj *c_obj = to_c_obj(kobj);
159 
160 	if (!channel_attr->store)
161 		return -EIO;
162 	return channel_attr->store(c_obj, channel_attr, buf, len);
163 }
164 
165 static const struct sysfs_ops most_channel_sysfs_ops = {
166 	.show = channel_attr_show,
167 	.store = channel_attr_store,
168 };
169 
170 /**
171  * most_free_mbo_coherent - free an MBO and its coherent buffer
172  * @mbo: buffer to be released
173  *
174  */
most_free_mbo_coherent(struct mbo * mbo)175 static void most_free_mbo_coherent(struct mbo *mbo)
176 {
177 	struct most_c_obj *c = mbo->context;
178 	u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
179 
180 	dma_free_coherent(NULL, coherent_buf_size, mbo->virt_address,
181 			  mbo->bus_address);
182 	kfree(mbo);
183 	if (atomic_sub_and_test(1, &c->mbo_ref))
184 		complete(&c->cleanup);
185 }
186 
187 /**
188  * flush_channel_fifos - clear the channel fifos
189  * @c: pointer to channel object
190  */
flush_channel_fifos(struct most_c_obj * c)191 static void flush_channel_fifos(struct most_c_obj *c)
192 {
193 	unsigned long flags, hf_flags;
194 	struct mbo *mbo, *tmp;
195 
196 	if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
197 		return;
198 
199 	spin_lock_irqsave(&c->fifo_lock, flags);
200 	list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
201 		list_del(&mbo->list);
202 		spin_unlock_irqrestore(&c->fifo_lock, flags);
203 		most_free_mbo_coherent(mbo);
204 		spin_lock_irqsave(&c->fifo_lock, flags);
205 	}
206 	spin_unlock_irqrestore(&c->fifo_lock, flags);
207 
208 	spin_lock_irqsave(&c->fifo_lock, hf_flags);
209 	list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
210 		list_del(&mbo->list);
211 		spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
212 		most_free_mbo_coherent(mbo);
213 		spin_lock_irqsave(&c->fifo_lock, hf_flags);
214 	}
215 	spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
216 
217 	if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
218 		pr_info("WARN: fifo | trash fifo not empty\n");
219 }
220 
221 /**
222  * flush_trash_fifo - clear the trash fifo
223  * @c: pointer to channel object
224  */
flush_trash_fifo(struct most_c_obj * c)225 static int flush_trash_fifo(struct most_c_obj *c)
226 {
227 	struct mbo *mbo, *tmp;
228 	unsigned long flags;
229 
230 	spin_lock_irqsave(&c->fifo_lock, flags);
231 	list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
232 		list_del(&mbo->list);
233 		spin_unlock_irqrestore(&c->fifo_lock, flags);
234 		most_free_mbo_coherent(mbo);
235 		spin_lock_irqsave(&c->fifo_lock, flags);
236 	}
237 	spin_unlock_irqrestore(&c->fifo_lock, flags);
238 	return 0;
239 }
240 
241 /**
242  * most_channel_release - release function of channel object
243  * @kobj: pointer to channel's kobject
244  */
most_channel_release(struct kobject * kobj)245 static void most_channel_release(struct kobject *kobj)
246 {
247 	struct most_c_obj *c = to_c_obj(kobj);
248 
249 	kfree(c);
250 }
251 
show_available_directions(struct most_c_obj * c,struct most_c_attr * attr,char * buf)252 static ssize_t show_available_directions(struct most_c_obj *c,
253 					 struct most_c_attr *attr,
254 					 char *buf)
255 {
256 	unsigned int i = c->channel_id;
257 
258 	strcpy(buf, "");
259 	if (c->iface->channel_vector[i].direction & MOST_CH_RX)
260 		strcat(buf, "dir_rx ");
261 	if (c->iface->channel_vector[i].direction & MOST_CH_TX)
262 		strcat(buf, "dir_tx ");
263 	strcat(buf, "\n");
264 	return strlen(buf) + 1;
265 }
266 
show_available_datatypes(struct most_c_obj * c,struct most_c_attr * attr,char * buf)267 static ssize_t show_available_datatypes(struct most_c_obj *c,
268 					struct most_c_attr *attr,
269 					char *buf)
270 {
271 	unsigned int i = c->channel_id;
272 
273 	strcpy(buf, "");
274 	if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
275 		strcat(buf, "control ");
276 	if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
277 		strcat(buf, "async ");
278 	if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
279 		strcat(buf, "sync ");
280 	if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC_AVP)
281 		strcat(buf, "isoc_avp ");
282 	strcat(buf, "\n");
283 	return strlen(buf) + 1;
284 }
285 
286 static
show_number_of_packet_buffers(struct most_c_obj * c,struct most_c_attr * attr,char * buf)287 ssize_t show_number_of_packet_buffers(struct most_c_obj *c,
288 				      struct most_c_attr *attr,
289 				      char *buf)
290 {
291 	unsigned int i = c->channel_id;
292 
293 	return snprintf(buf, PAGE_SIZE, "%d\n",
294 			c->iface->channel_vector[i].num_buffers_packet);
295 }
296 
297 static
show_number_of_stream_buffers(struct most_c_obj * c,struct most_c_attr * attr,char * buf)298 ssize_t show_number_of_stream_buffers(struct most_c_obj *c,
299 				      struct most_c_attr *attr,
300 				      char *buf)
301 {
302 	unsigned int i = c->channel_id;
303 
304 	return snprintf(buf, PAGE_SIZE, "%d\n",
305 			c->iface->channel_vector[i].num_buffers_streaming);
306 }
307 
308 static
show_size_of_packet_buffer(struct most_c_obj * c,struct most_c_attr * attr,char * buf)309 ssize_t show_size_of_packet_buffer(struct most_c_obj *c,
310 				   struct most_c_attr *attr,
311 				   char *buf)
312 {
313 	unsigned int i = c->channel_id;
314 
315 	return snprintf(buf, PAGE_SIZE, "%d\n",
316 			c->iface->channel_vector[i].buffer_size_packet);
317 }
318 
319 static
show_size_of_stream_buffer(struct most_c_obj * c,struct most_c_attr * attr,char * buf)320 ssize_t show_size_of_stream_buffer(struct most_c_obj *c,
321 				   struct most_c_attr *attr,
322 				   char *buf)
323 {
324 	unsigned int i = c->channel_id;
325 
326 	return snprintf(buf, PAGE_SIZE, "%d\n",
327 			c->iface->channel_vector[i].buffer_size_streaming);
328 }
329 
show_channel_starving(struct most_c_obj * c,struct most_c_attr * attr,char * buf)330 static ssize_t show_channel_starving(struct most_c_obj *c,
331 				     struct most_c_attr *attr,
332 				     char *buf)
333 {
334 	return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
335 }
336 
337 #define create_show_channel_attribute(val) \
338 	static MOST_CHNL_ATTR(val, S_IRUGO, show_##val, NULL)
339 
340 create_show_channel_attribute(available_directions);
341 create_show_channel_attribute(available_datatypes);
342 create_show_channel_attribute(number_of_packet_buffers);
343 create_show_channel_attribute(number_of_stream_buffers);
344 create_show_channel_attribute(size_of_stream_buffer);
345 create_show_channel_attribute(size_of_packet_buffer);
346 create_show_channel_attribute(channel_starving);
347 
show_set_number_of_buffers(struct most_c_obj * c,struct most_c_attr * attr,char * buf)348 static ssize_t show_set_number_of_buffers(struct most_c_obj *c,
349 					  struct most_c_attr *attr,
350 					  char *buf)
351 {
352 	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
353 }
354 
store_set_number_of_buffers(struct most_c_obj * c,struct most_c_attr * attr,const char * buf,size_t count)355 static ssize_t store_set_number_of_buffers(struct most_c_obj *c,
356 					   struct most_c_attr *attr,
357 					   const char *buf,
358 					   size_t count)
359 {
360 	int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
361 
362 	if (ret)
363 		return ret;
364 	return count;
365 }
366 
show_set_buffer_size(struct most_c_obj * c,struct most_c_attr * attr,char * buf)367 static ssize_t show_set_buffer_size(struct most_c_obj *c,
368 				    struct most_c_attr *attr,
369 				    char *buf)
370 {
371 	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
372 }
373 
store_set_buffer_size(struct most_c_obj * c,struct most_c_attr * attr,const char * buf,size_t count)374 static ssize_t store_set_buffer_size(struct most_c_obj *c,
375 				     struct most_c_attr *attr,
376 				     const char *buf,
377 				     size_t count)
378 {
379 	int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
380 
381 	if (ret)
382 		return ret;
383 	return count;
384 }
385 
show_set_direction(struct most_c_obj * c,struct most_c_attr * attr,char * buf)386 static ssize_t show_set_direction(struct most_c_obj *c,
387 				  struct most_c_attr *attr,
388 				  char *buf)
389 {
390 	if (c->cfg.direction & MOST_CH_TX)
391 		return snprintf(buf, PAGE_SIZE, "dir_tx\n");
392 	else if (c->cfg.direction & MOST_CH_RX)
393 		return snprintf(buf, PAGE_SIZE, "dir_rx\n");
394 	return snprintf(buf, PAGE_SIZE, "unconfigured\n");
395 }
396 
store_set_direction(struct most_c_obj * c,struct most_c_attr * attr,const char * buf,size_t count)397 static ssize_t store_set_direction(struct most_c_obj *c,
398 				   struct most_c_attr *attr,
399 				   const char *buf,
400 				   size_t count)
401 {
402 	if (!strcmp(buf, "dir_rx\n")) {
403 		c->cfg.direction = MOST_CH_RX;
404 	} else if (!strcmp(buf, "dir_tx\n")) {
405 		c->cfg.direction = MOST_CH_TX;
406 	} else {
407 		pr_info("WARN: invalid attribute settings\n");
408 		return -EINVAL;
409 	}
410 	return count;
411 }
412 
show_set_datatype(struct most_c_obj * c,struct most_c_attr * attr,char * buf)413 static ssize_t show_set_datatype(struct most_c_obj *c,
414 				 struct most_c_attr *attr,
415 				 char *buf)
416 {
417 	if (c->cfg.data_type & MOST_CH_CONTROL)
418 		return snprintf(buf, PAGE_SIZE, "control\n");
419 	else if (c->cfg.data_type & MOST_CH_ASYNC)
420 		return snprintf(buf, PAGE_SIZE, "async\n");
421 	else if (c->cfg.data_type & MOST_CH_SYNC)
422 		return snprintf(buf, PAGE_SIZE, "sync\n");
423 	else if (c->cfg.data_type & MOST_CH_ISOC_AVP)
424 		return snprintf(buf, PAGE_SIZE, "isoc_avp\n");
425 	return snprintf(buf, PAGE_SIZE, "unconfigured\n");
426 }
427 
store_set_datatype(struct most_c_obj * c,struct most_c_attr * attr,const char * buf,size_t count)428 static ssize_t store_set_datatype(struct most_c_obj *c,
429 				  struct most_c_attr *attr,
430 				  const char *buf,
431 				  size_t count)
432 {
433 	if (!strcmp(buf, "control\n")) {
434 		c->cfg.data_type = MOST_CH_CONTROL;
435 	} else if (!strcmp(buf, "async\n")) {
436 		c->cfg.data_type = MOST_CH_ASYNC;
437 	} else if (!strcmp(buf, "sync\n")) {
438 		c->cfg.data_type = MOST_CH_SYNC;
439 	} else if (!strcmp(buf, "isoc_avp\n")) {
440 		c->cfg.data_type = MOST_CH_ISOC_AVP;
441 	} else {
442 		pr_info("WARN: invalid attribute settings\n");
443 		return -EINVAL;
444 	}
445 	return count;
446 }
447 
show_set_subbuffer_size(struct most_c_obj * c,struct most_c_attr * attr,char * buf)448 static ssize_t show_set_subbuffer_size(struct most_c_obj *c,
449 				       struct most_c_attr *attr,
450 				       char *buf)
451 {
452 	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
453 }
454 
store_set_subbuffer_size(struct most_c_obj * c,struct most_c_attr * attr,const char * buf,size_t count)455 static ssize_t store_set_subbuffer_size(struct most_c_obj *c,
456 					struct most_c_attr *attr,
457 					const char *buf,
458 					size_t count)
459 {
460 	int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
461 
462 	if (ret)
463 		return ret;
464 	return count;
465 }
466 
show_set_packets_per_xact(struct most_c_obj * c,struct most_c_attr * attr,char * buf)467 static ssize_t show_set_packets_per_xact(struct most_c_obj *c,
468 					 struct most_c_attr *attr,
469 					 char *buf)
470 {
471 	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
472 }
473 
store_set_packets_per_xact(struct most_c_obj * c,struct most_c_attr * attr,const char * buf,size_t count)474 static ssize_t store_set_packets_per_xact(struct most_c_obj *c,
475 					  struct most_c_attr *attr,
476 					  const char *buf,
477 					  size_t count)
478 {
479 	int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
480 
481 	if (ret)
482 		return ret;
483 	return count;
484 }
485 
486 #define create_channel_attribute(value) \
487 	static MOST_CHNL_ATTR(value, S_IRUGO | S_IWUSR, \
488 			      show_##value, \
489 			      store_##value)
490 
491 create_channel_attribute(set_buffer_size);
492 create_channel_attribute(set_number_of_buffers);
493 create_channel_attribute(set_direction);
494 create_channel_attribute(set_datatype);
495 create_channel_attribute(set_subbuffer_size);
496 create_channel_attribute(set_packets_per_xact);
497 
498 /**
499  * most_channel_def_attrs - array of default attributes of channel object
500  */
501 static struct attribute *most_channel_def_attrs[] = {
502 	&most_chnl_attr_available_directions.attr,
503 	&most_chnl_attr_available_datatypes.attr,
504 	&most_chnl_attr_number_of_packet_buffers.attr,
505 	&most_chnl_attr_number_of_stream_buffers.attr,
506 	&most_chnl_attr_size_of_packet_buffer.attr,
507 	&most_chnl_attr_size_of_stream_buffer.attr,
508 	&most_chnl_attr_set_number_of_buffers.attr,
509 	&most_chnl_attr_set_buffer_size.attr,
510 	&most_chnl_attr_set_direction.attr,
511 	&most_chnl_attr_set_datatype.attr,
512 	&most_chnl_attr_set_subbuffer_size.attr,
513 	&most_chnl_attr_set_packets_per_xact.attr,
514 	&most_chnl_attr_channel_starving.attr,
515 	NULL,
516 };
517 
518 static struct kobj_type most_channel_ktype = {
519 	.sysfs_ops = &most_channel_sysfs_ops,
520 	.release = most_channel_release,
521 	.default_attrs = most_channel_def_attrs,
522 };
523 
524 static struct kset *most_channel_kset;
525 
526 /**
527  * create_most_c_obj - allocates a channel object
528  * @name: name of the channel object
529  * @parent: parent kobject
530  *
531  * This create a channel object and registers it with sysfs.
532  * Returns a pointer to the object or NULL when something went wrong.
533  */
534 static struct most_c_obj *
create_most_c_obj(const char * name,struct kobject * parent)535 create_most_c_obj(const char *name, struct kobject *parent)
536 {
537 	struct most_c_obj *c;
538 	int retval;
539 
540 	c = kzalloc(sizeof(*c), GFP_KERNEL);
541 	if (!c)
542 		return NULL;
543 	c->kobj.kset = most_channel_kset;
544 	retval = kobject_init_and_add(&c->kobj, &most_channel_ktype, parent,
545 				      "%s", name);
546 	if (retval) {
547 		kobject_put(&c->kobj);
548 		return NULL;
549 	}
550 	kobject_uevent(&c->kobj, KOBJ_ADD);
551 	return c;
552 }
553 
554 /**
555  * destroy_most_c_obj - channel release function
556  * @c: pointer to channel object
557  *
558  * This decrements the reference counter of the channel object.
559  * If the reference count turns zero, its release function is called.
560  */
destroy_most_c_obj(struct most_c_obj * c)561 static void destroy_most_c_obj(struct most_c_obj *c)
562 {
563 	if (c->aim0.ptr)
564 		c->aim0.ptr->disconnect_channel(c->iface, c->channel_id);
565 	if (c->aim1.ptr)
566 		c->aim1.ptr->disconnect_channel(c->iface, c->channel_id);
567 	c->aim0.ptr = NULL;
568 	c->aim1.ptr = NULL;
569 
570 	mutex_lock(&deregister_mutex);
571 	flush_trash_fifo(c);
572 	flush_channel_fifos(c);
573 	mutex_unlock(&deregister_mutex);
574 	kobject_put(&c->kobj);
575 }
576 
577 /*		     ___	       ___
578  *		     ___I N S T A N C E___
579  */
580 #define MOST_INST_ATTR(_name, _mode, _show, _store) \
581 		struct most_inst_attribute most_inst_attr_##_name = \
582 		__ATTR(_name, _mode, _show, _store)
583 
584 static struct list_head instance_list;
585 
586 /**
587  * struct most_inst_attribute - to access the attributes of instance object
588  * @attr: attributes of an instance
589  * @show: pointer to the show function
590  * @store: pointer to the store function
591  */
592 struct most_inst_attribute {
593 	struct attribute attr;
594 	ssize_t (*show)(struct most_inst_obj *d,
595 			struct most_inst_attribute *attr,
596 			char *buf);
597 	ssize_t (*store)(struct most_inst_obj *d,
598 			 struct most_inst_attribute *attr,
599 			 const char *buf,
600 			 size_t count);
601 };
602 
603 #define to_instance_attr(a) \
604 	container_of(a, struct most_inst_attribute, attr)
605 
606 /**
607  * instance_attr_show - show function for an instance object
608  * @kobj: pointer to kobject
609  * @attr: pointer to attribute struct
610  * @buf: buffer
611  */
instance_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)612 static ssize_t instance_attr_show(struct kobject *kobj,
613 				  struct attribute *attr,
614 				  char *buf)
615 {
616 	struct most_inst_attribute *instance_attr;
617 	struct most_inst_obj *instance_obj;
618 
619 	instance_attr = to_instance_attr(attr);
620 	instance_obj = to_inst_obj(kobj);
621 
622 	if (!instance_attr->show)
623 		return -EIO;
624 
625 	return instance_attr->show(instance_obj, instance_attr, buf);
626 }
627 
628 /**
629  * instance_attr_store - store function for an instance object
630  * @kobj: pointer to kobject
631  * @attr: pointer to attribute struct
632  * @buf: buffer
633  * @len: length of buffer
634  */
instance_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)635 static ssize_t instance_attr_store(struct kobject *kobj,
636 				   struct attribute *attr,
637 				   const char *buf,
638 				   size_t len)
639 {
640 	struct most_inst_attribute *instance_attr;
641 	struct most_inst_obj *instance_obj;
642 
643 	instance_attr = to_instance_attr(attr);
644 	instance_obj = to_inst_obj(kobj);
645 
646 	if (!instance_attr->store)
647 		return -EIO;
648 
649 	return instance_attr->store(instance_obj, instance_attr, buf, len);
650 }
651 
652 static const struct sysfs_ops most_inst_sysfs_ops = {
653 	.show = instance_attr_show,
654 	.store = instance_attr_store,
655 };
656 
657 /**
658  * most_inst_release - release function for instance object
659  * @kobj: pointer to instance's kobject
660  *
661  * This frees the allocated memory for the instance object
662  */
most_inst_release(struct kobject * kobj)663 static void most_inst_release(struct kobject *kobj)
664 {
665 	struct most_inst_obj *inst = to_inst_obj(kobj);
666 
667 	kfree(inst);
668 }
669 
show_description(struct most_inst_obj * instance_obj,struct most_inst_attribute * attr,char * buf)670 static ssize_t show_description(struct most_inst_obj *instance_obj,
671 				struct most_inst_attribute *attr,
672 				char *buf)
673 {
674 	return snprintf(buf, PAGE_SIZE, "%s\n",
675 			instance_obj->iface->description);
676 }
677 
show_interface(struct most_inst_obj * instance_obj,struct most_inst_attribute * attr,char * buf)678 static ssize_t show_interface(struct most_inst_obj *instance_obj,
679 			      struct most_inst_attribute *attr,
680 			      char *buf)
681 {
682 	switch (instance_obj->iface->interface) {
683 	case ITYPE_LOOPBACK:
684 		return snprintf(buf, PAGE_SIZE, "loopback\n");
685 	case ITYPE_I2C:
686 		return snprintf(buf, PAGE_SIZE, "i2c\n");
687 	case ITYPE_I2S:
688 		return snprintf(buf, PAGE_SIZE, "i2s\n");
689 	case ITYPE_TSI:
690 		return snprintf(buf, PAGE_SIZE, "tsi\n");
691 	case ITYPE_HBI:
692 		return snprintf(buf, PAGE_SIZE, "hbi\n");
693 	case ITYPE_MEDIALB_DIM:
694 		return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
695 	case ITYPE_MEDIALB_DIM2:
696 		return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
697 	case ITYPE_USB:
698 		return snprintf(buf, PAGE_SIZE, "usb\n");
699 	case ITYPE_PCIE:
700 		return snprintf(buf, PAGE_SIZE, "pcie\n");
701 	}
702 	return snprintf(buf, PAGE_SIZE, "unknown\n");
703 }
704 
705 #define create_inst_attribute(value) \
706 	static MOST_INST_ATTR(value, S_IRUGO, show_##value, NULL)
707 
708 create_inst_attribute(description);
709 create_inst_attribute(interface);
710 
711 static struct attribute *most_inst_def_attrs[] = {
712 	&most_inst_attr_description.attr,
713 	&most_inst_attr_interface.attr,
714 	NULL,
715 };
716 
717 static struct kobj_type most_inst_ktype = {
718 	.sysfs_ops = &most_inst_sysfs_ops,
719 	.release = most_inst_release,
720 	.default_attrs = most_inst_def_attrs,
721 };
722 
723 static struct kset *most_inst_kset;
724 
725 /**
726  * create_most_inst_obj - creates an instance object
727  * @name: name of the object to be created
728  *
729  * This allocates memory for an instance structure, assigns the proper kset
730  * and registers it with sysfs.
731  *
732  * Returns a pointer to the instance object or NULL when something went wrong.
733  */
create_most_inst_obj(const char * name)734 static struct most_inst_obj *create_most_inst_obj(const char *name)
735 {
736 	struct most_inst_obj *inst;
737 	int retval;
738 
739 	inst = kzalloc(sizeof(*inst), GFP_KERNEL);
740 	if (!inst)
741 		return NULL;
742 	inst->kobj.kset = most_inst_kset;
743 	retval = kobject_init_and_add(&inst->kobj, &most_inst_ktype, NULL,
744 				      "%s", name);
745 	if (retval) {
746 		kobject_put(&inst->kobj);
747 		return NULL;
748 	}
749 	kobject_uevent(&inst->kobj, KOBJ_ADD);
750 	return inst;
751 }
752 
753 /**
754  * destroy_most_inst_obj - MOST instance release function
755  * @inst: pointer to the instance object
756  *
757  * This decrements the reference counter of the instance object.
758  * If the reference count turns zero, its release function is called
759  */
destroy_most_inst_obj(struct most_inst_obj * inst)760 static void destroy_most_inst_obj(struct most_inst_obj *inst)
761 {
762 	struct most_c_obj *c, *tmp;
763 
764 	/* need to destroy channels first, since
765 	 * each channel incremented the
766 	 * reference count of the inst->kobj
767 	 */
768 	list_for_each_entry_safe(c, tmp, &inst->channel_list, list) {
769 		destroy_most_c_obj(c);
770 	}
771 	kobject_put(&inst->kobj);
772 }
773 
774 /*		     ___     ___
775  *		     ___A I M___
776  */
777 struct most_aim_obj {
778 	struct kobject kobj;
779 	struct list_head list;
780 	struct most_aim *driver;
781 	char add_link[STRING_SIZE];
782 	char remove_link[STRING_SIZE];
783 };
784 
785 #define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
786 
787 static struct list_head aim_list;
788 
789 /**
790  * struct most_aim_attribute - to access the attributes of AIM object
791  * @attr: attributes of an AIM
792  * @show: pointer to the show function
793  * @store: pointer to the store function
794  */
795 struct most_aim_attribute {
796 	struct attribute attr;
797 	ssize_t (*show)(struct most_aim_obj *d,
798 			struct most_aim_attribute *attr,
799 			char *buf);
800 	ssize_t (*store)(struct most_aim_obj *d,
801 			 struct most_aim_attribute *attr,
802 			 const char *buf,
803 			 size_t count);
804 };
805 
806 #define to_aim_attr(a) container_of(a, struct most_aim_attribute, attr)
807 
808 /**
809  * aim_attr_show - show function of an AIM object
810  * @kobj: pointer to kobject
811  * @attr: pointer to attribute struct
812  * @buf: buffer
813  */
aim_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)814 static ssize_t aim_attr_show(struct kobject *kobj,
815 			     struct attribute *attr,
816 			     char *buf)
817 {
818 	struct most_aim_attribute *aim_attr;
819 	struct most_aim_obj *aim_obj;
820 
821 	aim_attr = to_aim_attr(attr);
822 	aim_obj = to_aim_obj(kobj);
823 
824 	if (!aim_attr->show)
825 		return -EIO;
826 
827 	return aim_attr->show(aim_obj, aim_attr, buf);
828 }
829 
830 /**
831  * aim_attr_store - store function of an AIM object
832  * @kobj: pointer to kobject
833  * @attr: pointer to attribute struct
834  * @buf: buffer
835  * @len: length of buffer
836  */
aim_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)837 static ssize_t aim_attr_store(struct kobject *kobj,
838 			      struct attribute *attr,
839 			      const char *buf,
840 			      size_t len)
841 {
842 	struct most_aim_attribute *aim_attr;
843 	struct most_aim_obj *aim_obj;
844 
845 	aim_attr = to_aim_attr(attr);
846 	aim_obj = to_aim_obj(kobj);
847 
848 	if (!aim_attr->store)
849 		return -EIO;
850 	return aim_attr->store(aim_obj, aim_attr, buf, len);
851 }
852 
853 static const struct sysfs_ops most_aim_sysfs_ops = {
854 	.show = aim_attr_show,
855 	.store = aim_attr_store,
856 };
857 
858 /**
859  * most_aim_release - AIM release function
860  * @kobj: pointer to AIM's kobject
861  */
most_aim_release(struct kobject * kobj)862 static void most_aim_release(struct kobject *kobj)
863 {
864 	struct most_aim_obj *aim_obj = to_aim_obj(kobj);
865 
866 	kfree(aim_obj);
867 }
868 
show_add_link(struct most_aim_obj * aim_obj,struct most_aim_attribute * attr,char * buf)869 static ssize_t show_add_link(struct most_aim_obj *aim_obj,
870 			     struct most_aim_attribute *attr,
871 			     char *buf)
872 {
873 	return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->add_link);
874 }
875 
876 /**
877  * split_string - parses and changes string in the buffer buf and
878  * splits it into two mandatory and one optional substrings.
879  *
880  * @buf: complete string from attribute 'add_channel'
881  * @a: address of pointer to 1st substring (=instance name)
882  * @b: address of pointer to 2nd substring (=channel name)
883  * @c: optional address of pointer to 3rd substring (=user defined name)
884  *
885  * Examples:
886  *
887  * Input: "mdev0:ch0@ep_81:my_channel\n" or
888  *        "mdev0:ch0@ep_81:my_channel"
889  *
890  * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> "my_channel"
891  *
892  * Input: "mdev0:ch0@ep_81\n"
893  * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> ""
894  *
895  * Input: "mdev0:ch0@ep_81"
896  * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c == NULL
897  */
split_string(char * buf,char ** a,char ** b,char ** c)898 static int split_string(char *buf, char **a, char **b, char **c)
899 {
900 	*a = strsep(&buf, ":");
901 	if (!*a)
902 		return -EIO;
903 
904 	*b = strsep(&buf, ":\n");
905 	if (!*b)
906 		return -EIO;
907 
908 	if (c)
909 		*c = strsep(&buf, ":\n");
910 
911 	return 0;
912 }
913 
914 /**
915  * get_channel_by_name - get pointer to channel object
916  * @mdev: name of the device instance
917  * @mdev_ch: name of the respective channel
918  *
919  * This retrieves the pointer to a channel object.
920  */
921 static struct
get_channel_by_name(char * mdev,char * mdev_ch)922 most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
923 {
924 	struct most_c_obj *c, *tmp;
925 	struct most_inst_obj *i, *i_tmp;
926 	int found = 0;
927 
928 	list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
929 		if (!strcmp(kobject_name(&i->kobj), mdev)) {
930 			found++;
931 			break;
932 		}
933 	}
934 	if (unlikely(!found))
935 		return ERR_PTR(-EIO);
936 
937 	list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
938 		if (!strcmp(kobject_name(&c->kobj), mdev_ch)) {
939 			found++;
940 			break;
941 		}
942 	}
943 	if (unlikely(found < 2))
944 		return ERR_PTR(-EIO);
945 	return c;
946 }
947 
948 /**
949  * store_add_link - store() function for add_link attribute
950  * @aim_obj: pointer to AIM object
951  * @attr: its attributes
952  * @buf: buffer
953  * @len: buffer length
954  *
955  * This parses the string given by buf and splits it into
956  * three substrings. Note: third substring is optional. In case a cdev
957  * AIM is loaded the optional 3rd substring will make up the name of
958  * device node in the /dev directory. If omitted, the device node will
959  * inherit the channel's name within sysfs.
960  *
961  * Searches for a pair of device and channel and probes the AIM
962  *
963  * Example:
964  * (1) echo -n -e "mdev0:ch0@ep_81:my_rxchannel\n" >add_link
965  * (2) echo -n -e "mdev0:ch0@ep_81\n" >add_link
966  *
967  * (1) would create the device node /dev/my_rxchannel
968  * (2) would create the device node /dev/mdev0-ch0@ep_81
969  */
store_add_link(struct most_aim_obj * aim_obj,struct most_aim_attribute * attr,const char * buf,size_t len)970 static ssize_t store_add_link(struct most_aim_obj *aim_obj,
971 			      struct most_aim_attribute *attr,
972 			      const char *buf,
973 			      size_t len)
974 {
975 	struct most_c_obj *c;
976 	struct most_aim **aim_ptr;
977 	char buffer[STRING_SIZE];
978 	char *mdev;
979 	char *mdev_ch;
980 	char *mdev_devnod;
981 	char devnod_buf[STRING_SIZE];
982 	int ret;
983 	size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
984 
985 	strlcpy(buffer, buf, max_len);
986 	strlcpy(aim_obj->add_link, buf, max_len);
987 
988 	ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod);
989 	if (ret)
990 		return ret;
991 
992 	if (!mdev_devnod || *mdev_devnod == 0) {
993 		snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev,
994 			 mdev_ch);
995 		mdev_devnod = devnod_buf;
996 	}
997 
998 	c = get_channel_by_name(mdev, mdev_ch);
999 	if (IS_ERR(c))
1000 		return -ENODEV;
1001 
1002 	if (!c->aim0.ptr)
1003 		aim_ptr = &c->aim0.ptr;
1004 	else if (!c->aim1.ptr)
1005 		aim_ptr = &c->aim1.ptr;
1006 	else
1007 		return -ENOSPC;
1008 
1009 	ret = aim_obj->driver->probe_channel(c->iface, c->channel_id,
1010 					     &c->cfg, &c->kobj, mdev_devnod);
1011 	if (ret)
1012 		return ret;
1013 	*aim_ptr = aim_obj->driver;
1014 	return len;
1015 }
1016 
1017 static struct most_aim_attribute most_aim_attr_add_link =
1018 	__ATTR(add_link, S_IRUGO | S_IWUSR, show_add_link, store_add_link);
1019 
show_remove_link(struct most_aim_obj * aim_obj,struct most_aim_attribute * attr,char * buf)1020 static ssize_t show_remove_link(struct most_aim_obj *aim_obj,
1021 				struct most_aim_attribute *attr,
1022 				char *buf)
1023 {
1024 	return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->remove_link);
1025 }
1026 
1027 /**
1028  * store_remove_link - store function for remove_link attribute
1029  * @aim_obj: pointer to AIM object
1030  * @attr: its attributes
1031  * @buf: buffer
1032  * @len: buffer length
1033  *
1034  * Example:
1035  * echo -n -e "mdev0:ch0@ep_81\n" >remove_link
1036  */
store_remove_link(struct most_aim_obj * aim_obj,struct most_aim_attribute * attr,const char * buf,size_t len)1037 static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
1038 				 struct most_aim_attribute *attr,
1039 				 const char *buf,
1040 				 size_t len)
1041 {
1042 	struct most_c_obj *c;
1043 	char buffer[STRING_SIZE];
1044 	char *mdev;
1045 	char *mdev_ch;
1046 	int ret;
1047 	size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
1048 
1049 	strlcpy(buffer, buf, max_len);
1050 	strlcpy(aim_obj->remove_link, buf, max_len);
1051 	ret = split_string(buffer, &mdev, &mdev_ch, NULL);
1052 	if (ret)
1053 		return ret;
1054 
1055 	c = get_channel_by_name(mdev, mdev_ch);
1056 	if (IS_ERR(c))
1057 		return -ENODEV;
1058 
1059 	if (c->aim0.ptr == aim_obj->driver)
1060 		c->aim0.ptr = NULL;
1061 	if (c->aim1.ptr == aim_obj->driver)
1062 		c->aim1.ptr = NULL;
1063 	if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
1064 		return -EIO;
1065 	return len;
1066 }
1067 
1068 static struct most_aim_attribute most_aim_attr_remove_link =
1069 	__ATTR(remove_link, S_IRUGO | S_IWUSR, show_remove_link,
1070 	       store_remove_link);
1071 
1072 static struct attribute *most_aim_def_attrs[] = {
1073 	&most_aim_attr_add_link.attr,
1074 	&most_aim_attr_remove_link.attr,
1075 	NULL,
1076 };
1077 
1078 static struct kobj_type most_aim_ktype = {
1079 	.sysfs_ops = &most_aim_sysfs_ops,
1080 	.release = most_aim_release,
1081 	.default_attrs = most_aim_def_attrs,
1082 };
1083 
1084 static struct kset *most_aim_kset;
1085 
1086 /**
1087  * create_most_aim_obj - creates an AIM object
1088  * @name: name of the AIM
1089  *
1090  * This creates an AIM object assigns the proper kset and registers
1091  * it with sysfs.
1092  * Returns a pointer to the object or NULL if something went wrong.
1093  */
create_most_aim_obj(const char * name)1094 static struct most_aim_obj *create_most_aim_obj(const char *name)
1095 {
1096 	struct most_aim_obj *most_aim;
1097 	int retval;
1098 
1099 	most_aim = kzalloc(sizeof(*most_aim), GFP_KERNEL);
1100 	if (!most_aim)
1101 		return NULL;
1102 	most_aim->kobj.kset = most_aim_kset;
1103 	retval = kobject_init_and_add(&most_aim->kobj, &most_aim_ktype,
1104 				      NULL, "%s", name);
1105 	if (retval) {
1106 		kobject_put(&most_aim->kobj);
1107 		return NULL;
1108 	}
1109 	kobject_uevent(&most_aim->kobj, KOBJ_ADD);
1110 	return most_aim;
1111 }
1112 
1113 /**
1114  * destroy_most_aim_obj - AIM release function
1115  * @p: pointer to AIM object
1116  *
1117  * This decrements the reference counter of the AIM object. If the
1118  * reference count turns zero, its release function will be called.
1119  */
destroy_most_aim_obj(struct most_aim_obj * p)1120 static void destroy_most_aim_obj(struct most_aim_obj *p)
1121 {
1122 	kobject_put(&p->kobj);
1123 }
1124 
1125 /*		     ___       ___
1126  *		     ___C O R E___
1127  */
1128 
1129 /**
1130  * Instantiation of the MOST bus
1131  */
1132 static struct bus_type most_bus = {
1133 	.name = "most",
1134 };
1135 
1136 /**
1137  * Instantiation of the core driver
1138  */
1139 static struct device_driver mostcore = {
1140 	.name = "mostcore",
1141 	.bus = &most_bus,
1142 };
1143 
trash_mbo(struct mbo * mbo)1144 static inline void trash_mbo(struct mbo *mbo)
1145 {
1146 	unsigned long flags;
1147 	struct most_c_obj *c = mbo->context;
1148 
1149 	spin_lock_irqsave(&c->fifo_lock, flags);
1150 	list_add(&mbo->list, &c->trash_fifo);
1151 	spin_unlock_irqrestore(&c->fifo_lock, flags);
1152 }
1153 
get_hdm_mbo(struct most_c_obj * c)1154 static struct mbo *get_hdm_mbo(struct most_c_obj *c)
1155 {
1156 	unsigned long flags;
1157 	struct mbo *mbo;
1158 
1159 	spin_lock_irqsave(&c->fifo_lock, flags);
1160 	if (c->enqueue_halt || list_empty(&c->halt_fifo))
1161 		mbo = NULL;
1162 	else
1163 		mbo = list_pop_mbo(&c->halt_fifo);
1164 	spin_unlock_irqrestore(&c->fifo_lock, flags);
1165 	return mbo;
1166 }
1167 
nq_hdm_mbo(struct mbo * mbo)1168 static void nq_hdm_mbo(struct mbo *mbo)
1169 {
1170 	unsigned long flags;
1171 	struct most_c_obj *c = mbo->context;
1172 
1173 	spin_lock_irqsave(&c->fifo_lock, flags);
1174 	list_add_tail(&mbo->list, &c->halt_fifo);
1175 	spin_unlock_irqrestore(&c->fifo_lock, flags);
1176 	wake_up_interruptible(&c->hdm_fifo_wq);
1177 }
1178 
hdm_enqueue_thread(void * data)1179 static int hdm_enqueue_thread(void *data)
1180 {
1181 	struct most_c_obj *c = data;
1182 	struct mbo *mbo;
1183 	typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
1184 
1185 	while (likely(!kthread_should_stop())) {
1186 		wait_event_interruptible(c->hdm_fifo_wq,
1187 					 (mbo = get_hdm_mbo(c)) ||
1188 					 kthread_should_stop());
1189 
1190 		if (unlikely(!mbo))
1191 			continue;
1192 
1193 		if (c->cfg.direction == MOST_CH_RX)
1194 			mbo->buffer_length = c->cfg.buffer_size;
1195 
1196 		if (unlikely(enqueue(mbo->ifp, mbo->hdm_channel_id, mbo))) {
1197 			pr_err("hdm enqueue failed\n");
1198 			nq_hdm_mbo(mbo);
1199 			c->hdm_enqueue_task = NULL;
1200 			return 0;
1201 		}
1202 	}
1203 
1204 	return 0;
1205 }
1206 
run_enqueue_thread(struct most_c_obj * c,int channel_id)1207 static int run_enqueue_thread(struct most_c_obj *c, int channel_id)
1208 {
1209 	struct task_struct *task =
1210 		kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
1211 			    channel_id);
1212 
1213 	if (IS_ERR(task))
1214 		return PTR_ERR(task);
1215 
1216 	c->hdm_enqueue_task = task;
1217 	return 0;
1218 }
1219 
1220 /**
1221  * arm_mbo - recycle MBO for further usage
1222  * @mbo: buffer object
1223  *
1224  * This puts an MBO back to the list to have it ready for up coming
1225  * tx transactions.
1226  *
1227  * In case the MBO belongs to a channel that recently has been
1228  * poisoned, the MBO is scheduled to be trashed.
1229  * Calls the completion handler of an attached AIM.
1230  */
arm_mbo(struct mbo * mbo)1231 static void arm_mbo(struct mbo *mbo)
1232 {
1233 	unsigned long flags;
1234 	struct most_c_obj *c;
1235 
1236 	BUG_ON((!mbo) || (!mbo->context));
1237 	c = mbo->context;
1238 
1239 	if (c->is_poisoned) {
1240 		trash_mbo(mbo);
1241 		return;
1242 	}
1243 
1244 	spin_lock_irqsave(&c->fifo_lock, flags);
1245 	++*mbo->num_buffers_ptr;
1246 	list_add_tail(&mbo->list, &c->fifo);
1247 	spin_unlock_irqrestore(&c->fifo_lock, flags);
1248 
1249 	if (c->aim0.refs && c->aim0.ptr->tx_completion)
1250 		c->aim0.ptr->tx_completion(c->iface, c->channel_id);
1251 
1252 	if (c->aim1.refs && c->aim1.ptr->tx_completion)
1253 		c->aim1.ptr->tx_completion(c->iface, c->channel_id);
1254 }
1255 
1256 /**
1257  * arm_mbo_chain - helper function that arms an MBO chain for the HDM
1258  * @c: pointer to interface channel
1259  * @dir: direction of the channel
1260  * @compl: pointer to completion function
1261  *
1262  * This allocates buffer objects including the containing DMA coherent
1263  * buffer and puts them in the fifo.
1264  * Buffers of Rx channels are put in the kthread fifo, hence immediately
1265  * submitted to the HDM.
1266  *
1267  * Returns the number of allocated and enqueued MBOs.
1268  */
arm_mbo_chain(struct most_c_obj * c,int dir,void (* compl)(struct mbo *))1269 static int arm_mbo_chain(struct most_c_obj *c, int dir,
1270 			 void (*compl)(struct mbo *))
1271 {
1272 	unsigned int i;
1273 	int retval;
1274 	struct mbo *mbo;
1275 	u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
1276 
1277 	atomic_set(&c->mbo_nq_level, 0);
1278 
1279 	for (i = 0; i < c->cfg.num_buffers; i++) {
1280 		mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
1281 		if (!mbo) {
1282 			pr_info("WARN: Allocation of MBO failed.\n");
1283 			retval = i;
1284 			goto _exit;
1285 		}
1286 		mbo->context = c;
1287 		mbo->ifp = c->iface;
1288 		mbo->hdm_channel_id = c->channel_id;
1289 		mbo->virt_address = dma_alloc_coherent(NULL,
1290 						       coherent_buf_size,
1291 						       &mbo->bus_address,
1292 						       GFP_KERNEL);
1293 		if (!mbo->virt_address) {
1294 			pr_info("WARN: No DMA coherent buffer.\n");
1295 			retval = i;
1296 			goto _error1;
1297 		}
1298 		mbo->complete = compl;
1299 		mbo->num_buffers_ptr = &dummy_num_buffers;
1300 		if (dir == MOST_CH_RX) {
1301 			nq_hdm_mbo(mbo);
1302 			atomic_inc(&c->mbo_nq_level);
1303 		} else {
1304 			arm_mbo(mbo);
1305 		}
1306 	}
1307 	return i;
1308 
1309 _error1:
1310 	kfree(mbo);
1311 _exit:
1312 	return retval;
1313 }
1314 
1315 /**
1316  * most_submit_mbo - submits an MBO to fifo
1317  * @mbo: pointer to the MBO
1318  *
1319  */
most_submit_mbo(struct mbo * mbo)1320 int most_submit_mbo(struct mbo *mbo)
1321 {
1322 	struct most_c_obj *c;
1323 	struct most_inst_obj *i;
1324 
1325 	if (unlikely((!mbo) || (!mbo->context))) {
1326 		pr_err("Bad MBO or missing channel reference\n");
1327 		return -EINVAL;
1328 	}
1329 	c = mbo->context;
1330 	i = c->inst;
1331 
1332 	if (unlikely(atomic_read(&i->tainted)))
1333 		return -ENODEV;
1334 
1335 	nq_hdm_mbo(mbo);
1336 	return 0;
1337 }
1338 EXPORT_SYMBOL_GPL(most_submit_mbo);
1339 
1340 /**
1341  * most_write_completion - write completion handler
1342  * @mbo: pointer to MBO
1343  *
1344  * This recycles the MBO for further usage. In case the channel has been
1345  * poisoned, the MBO is scheduled to be trashed.
1346  */
most_write_completion(struct mbo * mbo)1347 static void most_write_completion(struct mbo *mbo)
1348 {
1349 	struct most_c_obj *c;
1350 
1351 	BUG_ON((!mbo) || (!mbo->context));
1352 
1353 	c = mbo->context;
1354 	if (mbo->status == MBO_E_INVAL)
1355 		pr_info("WARN: Tx MBO status: invalid\n");
1356 	if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
1357 		trash_mbo(mbo);
1358 	else
1359 		arm_mbo(mbo);
1360 }
1361 
1362 /**
1363  * get_channel_by_iface - get pointer to channel object
1364  * @iface: pointer to interface instance
1365  * @id: channel ID
1366  *
1367  * This retrieves a pointer to a channel of the given interface and channel ID.
1368  */
1369 static struct
get_channel_by_iface(struct most_interface * iface,int id)1370 most_c_obj *get_channel_by_iface(struct most_interface *iface, int id)
1371 {
1372 	struct most_inst_obj *i;
1373 
1374 	if (unlikely(!iface)) {
1375 		pr_err("Bad interface\n");
1376 		return NULL;
1377 	}
1378 	if (unlikely((id < 0) || (id >= iface->num_channels))) {
1379 		pr_err("Channel index (%d) out of range\n", id);
1380 		return NULL;
1381 	}
1382 	i = iface->priv;
1383 	if (unlikely(!i)) {
1384 		pr_err("interface is not registered\n");
1385 		return NULL;
1386 	}
1387 	return i->channel[id];
1388 }
1389 
channel_has_mbo(struct most_interface * iface,int id)1390 int channel_has_mbo(struct most_interface *iface, int id)
1391 {
1392 	struct most_c_obj *c = get_channel_by_iface(iface, id);
1393 	unsigned long flags;
1394 	int empty;
1395 
1396 	if (unlikely(!c))
1397 		return -EINVAL;
1398 
1399 	spin_lock_irqsave(&c->fifo_lock, flags);
1400 	empty = list_empty(&c->fifo);
1401 	spin_unlock_irqrestore(&c->fifo_lock, flags);
1402 	return !empty;
1403 }
1404 EXPORT_SYMBOL_GPL(channel_has_mbo);
1405 
1406 /**
1407  * most_get_mbo - get pointer to an MBO of pool
1408  * @iface: pointer to interface instance
1409  * @id: channel ID
1410  *
1411  * This attempts to get a free buffer out of the channel fifo.
1412  * Returns a pointer to MBO on success or NULL otherwise.
1413  */
most_get_mbo(struct most_interface * iface,int id,struct most_aim * aim)1414 struct mbo *most_get_mbo(struct most_interface *iface, int id,
1415 			 struct most_aim *aim)
1416 {
1417 	struct mbo *mbo;
1418 	struct most_c_obj *c;
1419 	unsigned long flags;
1420 	int *num_buffers_ptr;
1421 
1422 	c = get_channel_by_iface(iface, id);
1423 	if (unlikely(!c))
1424 		return NULL;
1425 
1426 	if (c->aim0.refs && c->aim1.refs &&
1427 	    ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
1428 	     (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
1429 		return NULL;
1430 
1431 	if (aim == c->aim0.ptr)
1432 		num_buffers_ptr = &c->aim0.num_buffers;
1433 	else if (aim == c->aim1.ptr)
1434 		num_buffers_ptr = &c->aim1.num_buffers;
1435 	else
1436 		num_buffers_ptr = &dummy_num_buffers;
1437 
1438 	spin_lock_irqsave(&c->fifo_lock, flags);
1439 	if (list_empty(&c->fifo)) {
1440 		spin_unlock_irqrestore(&c->fifo_lock, flags);
1441 		return NULL;
1442 	}
1443 	mbo = list_pop_mbo(&c->fifo);
1444 	--*num_buffers_ptr;
1445 	spin_unlock_irqrestore(&c->fifo_lock, flags);
1446 
1447 	mbo->num_buffers_ptr = num_buffers_ptr;
1448 	mbo->buffer_length = c->cfg.buffer_size;
1449 	return mbo;
1450 }
1451 EXPORT_SYMBOL_GPL(most_get_mbo);
1452 
1453 /**
1454  * most_put_mbo - return buffer to pool
1455  * @mbo: buffer object
1456  */
most_put_mbo(struct mbo * mbo)1457 void most_put_mbo(struct mbo *mbo)
1458 {
1459 	struct most_c_obj *c;
1460 	struct most_inst_obj *i;
1461 
1462 	c = mbo->context;
1463 	i = c->inst;
1464 
1465 	if (unlikely(atomic_read(&i->tainted))) {
1466 		mbo->status = MBO_E_CLOSE;
1467 		trash_mbo(mbo);
1468 		return;
1469 	}
1470 	if (c->cfg.direction == MOST_CH_TX) {
1471 		arm_mbo(mbo);
1472 		return;
1473 	}
1474 	nq_hdm_mbo(mbo);
1475 	atomic_inc(&c->mbo_nq_level);
1476 }
1477 EXPORT_SYMBOL_GPL(most_put_mbo);
1478 
1479 /**
1480  * most_read_completion - read completion handler
1481  * @mbo: pointer to MBO
1482  *
1483  * This function is called by the HDM when data has been received from the
1484  * hardware and copied to the buffer of the MBO.
1485  *
1486  * In case the channel has been poisoned it puts the buffer in the trash queue.
1487  * Otherwise, it passes the buffer to an AIM for further processing.
1488  */
most_read_completion(struct mbo * mbo)1489 static void most_read_completion(struct mbo *mbo)
1490 {
1491 	struct most_c_obj *c = mbo->context;
1492 
1493 	if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
1494 		trash_mbo(mbo);
1495 		return;
1496 	}
1497 
1498 	if (mbo->status == MBO_E_INVAL) {
1499 		nq_hdm_mbo(mbo);
1500 		atomic_inc(&c->mbo_nq_level);
1501 		return;
1502 	}
1503 
1504 	if (atomic_sub_and_test(1, &c->mbo_nq_level)) {
1505 		pr_info("WARN: rx device out of buffers\n");
1506 		c->is_starving = 1;
1507 	}
1508 
1509 	if (c->aim0.refs && c->aim0.ptr->rx_completion &&
1510 	    c->aim0.ptr->rx_completion(mbo) == 0)
1511 		return;
1512 
1513 	if (c->aim1.refs && c->aim1.ptr->rx_completion &&
1514 	    c->aim1.ptr->rx_completion(mbo) == 0)
1515 		return;
1516 
1517 	most_put_mbo(mbo);
1518 }
1519 
1520 /**
1521  * most_start_channel - prepares a channel for communication
1522  * @iface: pointer to interface instance
1523  * @id: channel ID
1524  *
1525  * This prepares the channel for usage. Cross-checks whether the
1526  * channel's been properly configured.
1527  *
1528  * Returns 0 on success or error code otherwise.
1529  */
most_start_channel(struct most_interface * iface,int id,struct most_aim * aim)1530 int most_start_channel(struct most_interface *iface, int id,
1531 		       struct most_aim *aim)
1532 {
1533 	int num_buffer;
1534 	int ret;
1535 	struct most_c_obj *c = get_channel_by_iface(iface, id);
1536 
1537 	if (unlikely(!c))
1538 		return -EINVAL;
1539 
1540 	mutex_lock(&c->start_mutex);
1541 	if (c->aim0.refs + c->aim1.refs > 0)
1542 		goto out; /* already started by other aim */
1543 
1544 	if (!try_module_get(iface->mod)) {
1545 		pr_info("failed to acquire HDM lock\n");
1546 		mutex_unlock(&c->start_mutex);
1547 		return -ENOLCK;
1548 	}
1549 	modref++;
1550 
1551 	c->cfg.extra_len = 0;
1552 	if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
1553 		pr_info("channel configuration failed. Go check settings...\n");
1554 		ret = -EINVAL;
1555 		goto error;
1556 	}
1557 
1558 	init_waitqueue_head(&c->hdm_fifo_wq);
1559 
1560 	if (c->cfg.direction == MOST_CH_RX)
1561 		num_buffer = arm_mbo_chain(c, c->cfg.direction,
1562 					   most_read_completion);
1563 	else
1564 		num_buffer = arm_mbo_chain(c, c->cfg.direction,
1565 					   most_write_completion);
1566 	if (unlikely(!num_buffer)) {
1567 		pr_info("failed to allocate memory\n");
1568 		ret = -ENOMEM;
1569 		goto error;
1570 	}
1571 
1572 	ret = run_enqueue_thread(c, id);
1573 	if (ret)
1574 		goto error;
1575 
1576 	c->is_starving = 0;
1577 	c->aim0.num_buffers = c->cfg.num_buffers / 2;
1578 	c->aim1.num_buffers = c->cfg.num_buffers - c->aim0.num_buffers;
1579 	atomic_set(&c->mbo_ref, num_buffer);
1580 
1581 out:
1582 	if (aim == c->aim0.ptr)
1583 		c->aim0.refs++;
1584 	if (aim == c->aim1.ptr)
1585 		c->aim1.refs++;
1586 	mutex_unlock(&c->start_mutex);
1587 	return 0;
1588 
1589 error:
1590 	if (iface->mod)
1591 		module_put(iface->mod);
1592 	modref--;
1593 	mutex_unlock(&c->start_mutex);
1594 	return ret;
1595 }
1596 EXPORT_SYMBOL_GPL(most_start_channel);
1597 
1598 /**
1599  * most_stop_channel - stops a running channel
1600  * @iface: pointer to interface instance
1601  * @id: channel ID
1602  */
most_stop_channel(struct most_interface * iface,int id,struct most_aim * aim)1603 int most_stop_channel(struct most_interface *iface, int id,
1604 		      struct most_aim *aim)
1605 {
1606 	struct most_c_obj *c;
1607 
1608 	if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
1609 		pr_err("Bad interface or index out of range\n");
1610 		return -EINVAL;
1611 	}
1612 	c = get_channel_by_iface(iface, id);
1613 	if (unlikely(!c))
1614 		return -EINVAL;
1615 
1616 	mutex_lock(&c->start_mutex);
1617 	if (c->aim0.refs + c->aim1.refs >= 2)
1618 		goto out;
1619 
1620 	mutex_lock(&c->stop_task_mutex);
1621 	if (c->hdm_enqueue_task)
1622 		kthread_stop(c->hdm_enqueue_task);
1623 	c->hdm_enqueue_task = NULL;
1624 	mutex_unlock(&c->stop_task_mutex);
1625 
1626 	mutex_lock(&deregister_mutex);
1627 	if (atomic_read(&c->inst->tainted)) {
1628 		mutex_unlock(&deregister_mutex);
1629 		mutex_unlock(&c->start_mutex);
1630 		return -ENODEV;
1631 	}
1632 	mutex_unlock(&deregister_mutex);
1633 
1634 	if (iface->mod && modref) {
1635 		module_put(iface->mod);
1636 		modref--;
1637 	}
1638 
1639 	c->is_poisoned = true;
1640 	if (c->iface->poison_channel(c->iface, c->channel_id)) {
1641 		pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
1642 		       c->iface->description);
1643 		mutex_unlock(&c->start_mutex);
1644 		return -EAGAIN;
1645 	}
1646 	flush_trash_fifo(c);
1647 	flush_channel_fifos(c);
1648 
1649 #ifdef CMPL_INTERRUPTIBLE
1650 	if (wait_for_completion_interruptible(&c->cleanup)) {
1651 		pr_info("Interrupted while clean up ch %d\n", c->channel_id);
1652 		mutex_unlock(&c->start_mutex);
1653 		return -EINTR;
1654 	}
1655 #else
1656 	wait_for_completion(&c->cleanup);
1657 #endif
1658 	c->is_poisoned = false;
1659 
1660 out:
1661 	if (aim == c->aim0.ptr)
1662 		c->aim0.refs--;
1663 	if (aim == c->aim1.ptr)
1664 		c->aim1.refs--;
1665 	mutex_unlock(&c->start_mutex);
1666 	return 0;
1667 }
1668 EXPORT_SYMBOL_GPL(most_stop_channel);
1669 
1670 /**
1671  * most_register_aim - registers an AIM (driver) with the core
1672  * @aim: instance of AIM to be registered
1673  */
most_register_aim(struct most_aim * aim)1674 int most_register_aim(struct most_aim *aim)
1675 {
1676 	struct most_aim_obj *aim_obj;
1677 
1678 	if (!aim) {
1679 		pr_err("Bad driver\n");
1680 		return -EINVAL;
1681 	}
1682 	aim_obj = create_most_aim_obj(aim->name);
1683 	if (!aim_obj) {
1684 		pr_info("failed to alloc driver object\n");
1685 		return -ENOMEM;
1686 	}
1687 	aim_obj->driver = aim;
1688 	aim->context = aim_obj;
1689 	pr_info("registered new application interfacing module %s\n",
1690 		aim->name);
1691 	list_add_tail(&aim_obj->list, &aim_list);
1692 	return 0;
1693 }
1694 EXPORT_SYMBOL_GPL(most_register_aim);
1695 
1696 /**
1697  * most_deregister_aim - deregisters an AIM (driver) with the core
1698  * @aim: AIM to be removed
1699  */
most_deregister_aim(struct most_aim * aim)1700 int most_deregister_aim(struct most_aim *aim)
1701 {
1702 	struct most_aim_obj *aim_obj;
1703 	struct most_c_obj *c, *tmp;
1704 	struct most_inst_obj *i, *i_tmp;
1705 
1706 	if (!aim) {
1707 		pr_err("Bad driver\n");
1708 		return -EINVAL;
1709 	}
1710 
1711 	aim_obj = aim->context;
1712 	if (!aim_obj) {
1713 		pr_info("driver not registered.\n");
1714 		return -EINVAL;
1715 	}
1716 	list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
1717 		list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
1718 			if (c->aim0.ptr == aim || c->aim1.ptr == aim)
1719 				aim->disconnect_channel(
1720 					c->iface, c->channel_id);
1721 			if (c->aim0.ptr == aim)
1722 				c->aim0.ptr = NULL;
1723 			if (c->aim1.ptr == aim)
1724 				c->aim1.ptr = NULL;
1725 		}
1726 	}
1727 	list_del(&aim_obj->list);
1728 	destroy_most_aim_obj(aim_obj);
1729 	pr_info("deregistering application interfacing module %s\n", aim->name);
1730 	return 0;
1731 }
1732 EXPORT_SYMBOL_GPL(most_deregister_aim);
1733 
1734 /**
1735  * most_register_interface - registers an interface with core
1736  * @iface: pointer to the instance of the interface description.
1737  *
1738  * Allocates and initializes a new interface instance and all of its channels.
1739  * Returns a pointer to kobject or an error pointer.
1740  */
most_register_interface(struct most_interface * iface)1741 struct kobject *most_register_interface(struct most_interface *iface)
1742 {
1743 	unsigned int i;
1744 	int id;
1745 	char name[STRING_SIZE];
1746 	char channel_name[STRING_SIZE];
1747 	struct most_c_obj *c;
1748 	struct most_inst_obj *inst;
1749 
1750 	if (!iface || !iface->enqueue || !iface->configure ||
1751 	    !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
1752 		pr_err("Bad interface or channel overflow\n");
1753 		return ERR_PTR(-EINVAL);
1754 	}
1755 
1756 	id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
1757 	if (id < 0) {
1758 		pr_info("Failed to alloc mdev ID\n");
1759 		return ERR_PTR(id);
1760 	}
1761 	snprintf(name, STRING_SIZE, "mdev%d", id);
1762 
1763 	inst = create_most_inst_obj(name);
1764 	if (!inst) {
1765 		pr_info("Failed to allocate interface instance\n");
1766 		return ERR_PTR(-ENOMEM);
1767 	}
1768 
1769 	iface->priv = inst;
1770 	INIT_LIST_HEAD(&inst->channel_list);
1771 	inst->iface = iface;
1772 	inst->dev_id = id;
1773 	atomic_set(&inst->tainted, 0);
1774 	list_add_tail(&inst->list, &instance_list);
1775 
1776 	for (i = 0; i < iface->num_channels; i++) {
1777 		const char *name_suffix = iface->channel_vector[i].name_suffix;
1778 
1779 		if (!name_suffix)
1780 			snprintf(channel_name, STRING_SIZE, "ch%d", i);
1781 		else if (name_suffix[0] == '@')
1782 			snprintf(channel_name, STRING_SIZE, "ch%d%s", i,
1783 				 name_suffix);
1784 		else
1785 			snprintf(channel_name, STRING_SIZE, "%s", name_suffix);
1786 
1787 		/* this increments the reference count of this instance */
1788 		c = create_most_c_obj(channel_name, &inst->kobj);
1789 		if (!c)
1790 			goto free_instance;
1791 		inst->channel[i] = c;
1792 		c->is_starving = 0;
1793 		c->iface = iface;
1794 		c->inst = inst;
1795 		c->channel_id = i;
1796 		c->keep_mbo = false;
1797 		c->enqueue_halt = false;
1798 		c->is_poisoned = false;
1799 		c->cfg.direction = 0;
1800 		c->cfg.data_type = 0;
1801 		c->cfg.num_buffers = 0;
1802 		c->cfg.buffer_size = 0;
1803 		c->cfg.subbuffer_size = 0;
1804 		c->cfg.packets_per_xact = 0;
1805 		spin_lock_init(&c->fifo_lock);
1806 		INIT_LIST_HEAD(&c->fifo);
1807 		INIT_LIST_HEAD(&c->trash_fifo);
1808 		INIT_LIST_HEAD(&c->halt_fifo);
1809 		init_completion(&c->cleanup);
1810 		atomic_set(&c->mbo_ref, 0);
1811 		mutex_init(&c->start_mutex);
1812 		mutex_init(&c->stop_task_mutex);
1813 		list_add_tail(&c->list, &inst->channel_list);
1814 	}
1815 	pr_info("registered new MOST device mdev%d (%s)\n",
1816 		inst->dev_id, iface->description);
1817 	return &inst->kobj;
1818 
1819 free_instance:
1820 	pr_info("Failed allocate channel(s)\n");
1821 	list_del(&inst->list);
1822 	destroy_most_inst_obj(inst);
1823 	return ERR_PTR(-ENOMEM);
1824 }
1825 EXPORT_SYMBOL_GPL(most_register_interface);
1826 
1827 /**
1828  * most_deregister_interface - deregisters an interface with core
1829  * @iface: pointer to the interface instance description.
1830  *
1831  * Before removing an interface instance from the list, all running
1832  * channels are stopped and poisoned.
1833  */
most_deregister_interface(struct most_interface * iface)1834 void most_deregister_interface(struct most_interface *iface)
1835 {
1836 	struct most_inst_obj *i = iface->priv;
1837 	struct most_c_obj *c;
1838 
1839 	mutex_lock(&deregister_mutex);
1840 	if (unlikely(!i)) {
1841 		pr_info("Bad Interface\n");
1842 		mutex_unlock(&deregister_mutex);
1843 		return;
1844 	}
1845 	pr_info("deregistering MOST device %s (%s)\n", i->kobj.name,
1846 		iface->description);
1847 
1848 	atomic_set(&i->tainted, 1);
1849 	mutex_unlock(&deregister_mutex);
1850 
1851 	while (modref) {
1852 		if (iface->mod && modref)
1853 			module_put(iface->mod);
1854 		modref--;
1855 	}
1856 
1857 	list_for_each_entry(c, &i->channel_list, list) {
1858 		if (c->aim0.refs + c->aim1.refs <= 0)
1859 			continue;
1860 
1861 		mutex_lock(&c->stop_task_mutex);
1862 		if (c->hdm_enqueue_task)
1863 			kthread_stop(c->hdm_enqueue_task);
1864 		c->hdm_enqueue_task = NULL;
1865 		mutex_unlock(&c->stop_task_mutex);
1866 
1867 		if (iface->poison_channel(iface, c->channel_id))
1868 			pr_err("Can't poison channel %d\n", c->channel_id);
1869 	}
1870 	ida_simple_remove(&mdev_id, i->dev_id);
1871 	list_del(&i->list);
1872 	destroy_most_inst_obj(i);
1873 }
1874 EXPORT_SYMBOL_GPL(most_deregister_interface);
1875 
1876 /**
1877  * most_stop_enqueue - prevents core from enqueueing MBOs
1878  * @iface: pointer to interface
1879  * @id: channel id
1880  *
1881  * This is called by an HDM that _cannot_ attend to its duties and
1882  * is imminent to get run over by the core. The core is not going to
1883  * enqueue any further packets unless the flagging HDM calls
1884  * most_resume enqueue().
1885  */
most_stop_enqueue(struct most_interface * iface,int id)1886 void most_stop_enqueue(struct most_interface *iface, int id)
1887 {
1888 	struct most_c_obj *c = get_channel_by_iface(iface, id);
1889 
1890 	if (likely(c))
1891 		c->enqueue_halt = true;
1892 }
1893 EXPORT_SYMBOL_GPL(most_stop_enqueue);
1894 
1895 /**
1896  * most_resume_enqueue - allow core to enqueue MBOs again
1897  * @iface: pointer to interface
1898  * @id: channel id
1899  *
1900  * This clears the enqueue halt flag and enqueues all MBOs currently
1901  * sitting in the wait fifo.
1902  */
most_resume_enqueue(struct most_interface * iface,int id)1903 void most_resume_enqueue(struct most_interface *iface, int id)
1904 {
1905 	struct most_c_obj *c = get_channel_by_iface(iface, id);
1906 
1907 	if (unlikely(!c))
1908 		return;
1909 	c->enqueue_halt = false;
1910 
1911 	wake_up_interruptible(&c->hdm_fifo_wq);
1912 }
1913 EXPORT_SYMBOL_GPL(most_resume_enqueue);
1914 
most_init(void)1915 static int __init most_init(void)
1916 {
1917 	pr_info("init()\n");
1918 	INIT_LIST_HEAD(&instance_list);
1919 	INIT_LIST_HEAD(&aim_list);
1920 	mutex_init(&deregister_mutex);
1921 	ida_init(&mdev_id);
1922 
1923 	if (bus_register(&most_bus)) {
1924 		pr_info("Cannot register most bus\n");
1925 		goto exit;
1926 	}
1927 
1928 	most_class = class_create(THIS_MODULE, "most");
1929 	if (IS_ERR(most_class)) {
1930 		pr_info("No udev support.\n");
1931 		goto exit_bus;
1932 	}
1933 	if (driver_register(&mostcore)) {
1934 		pr_info("Cannot register core driver\n");
1935 		goto exit_class;
1936 	}
1937 
1938 	class_glue_dir =
1939 		device_create(most_class, NULL, 0, NULL, "mostcore");
1940 	if (!class_glue_dir)
1941 		goto exit_driver;
1942 
1943 	most_aim_kset =
1944 		kset_create_and_add("aims", NULL, &class_glue_dir->kobj);
1945 	if (!most_aim_kset)
1946 		goto exit_class_container;
1947 
1948 	most_inst_kset =
1949 		kset_create_and_add("devices", NULL, &class_glue_dir->kobj);
1950 	if (!most_inst_kset)
1951 		goto exit_driver_kset;
1952 
1953 	return 0;
1954 
1955 exit_driver_kset:
1956 	kset_unregister(most_aim_kset);
1957 exit_class_container:
1958 	device_destroy(most_class, 0);
1959 exit_driver:
1960 	driver_unregister(&mostcore);
1961 exit_class:
1962 	class_destroy(most_class);
1963 exit_bus:
1964 	bus_unregister(&most_bus);
1965 exit:
1966 	return -ENOMEM;
1967 }
1968 
most_exit(void)1969 static void __exit most_exit(void)
1970 {
1971 	struct most_inst_obj *i, *i_tmp;
1972 	struct most_aim_obj *d, *d_tmp;
1973 
1974 	pr_info("exit core module\n");
1975 	list_for_each_entry_safe(d, d_tmp, &aim_list, list) {
1976 		destroy_most_aim_obj(d);
1977 	}
1978 
1979 	list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
1980 		list_del(&i->list);
1981 		destroy_most_inst_obj(i);
1982 	}
1983 	kset_unregister(most_inst_kset);
1984 	kset_unregister(most_aim_kset);
1985 	device_destroy(most_class, 0);
1986 	driver_unregister(&mostcore);
1987 	class_destroy(most_class);
1988 	bus_unregister(&most_bus);
1989 	ida_destroy(&mdev_id);
1990 }
1991 
1992 module_init(most_init);
1993 module_exit(most_exit);
1994 MODULE_LICENSE("GPL");
1995 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1996 MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");
1997