• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* -----------------------------------------------------------------------------
2  * Copyright (c) 2011 Ozmo Inc
3  * Released under the GNU General Public License Version 2 (GPLv2).
4  * -----------------------------------------------------------------------------
5  */
6 
7 #include <linux/module.h>
8 #include <linux/timer.h>
9 #include <linux/sched.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/errno.h>
13 #include "ozdbg.h"
14 #include "ozprotocol.h"
15 #include "ozeltbuf.h"
16 #include "ozpd.h"
17 #include "ozproto.h"
18 #include "ozcdev.h"
19 #include "ozusbsvc.h"
20 #include <asm/unaligned.h>
21 #include <linux/uaccess.h>
22 #include <net/psnap.h>
23 
24 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
25 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
26 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f);
27 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
28 static int oz_send_isoc_frame(struct oz_pd *pd);
29 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
30 static void oz_isoc_stream_free(struct oz_isoc_stream *st);
31 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data);
32 static void oz_isoc_destructor(struct sk_buff *skb);
33 
34 /*
35  * Counts the uncompleted isoc frames submitted to netcard.
36  */
37 static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
38 
39 /* Application handler functions.
40  */
41 static const struct oz_app_if g_app_if[OZ_NB_APPS] = {
42 	[OZ_APPID_USB] = {
43 		.init      = oz_usb_init,
44 		.term      = oz_usb_term,
45 		.start     = oz_usb_start,
46 		.stop      = oz_usb_stop,
47 		.rx        = oz_usb_rx,
48 		.heartbeat = oz_usb_heartbeat,
49 		.farewell  = oz_usb_farewell,
50 	},
51 	[OZ_APPID_SERIAL] = {
52 		.init      = oz_cdev_init,
53 		.term      = oz_cdev_term,
54 		.start     = oz_cdev_start,
55 		.stop      = oz_cdev_stop,
56 		.rx        = oz_cdev_rx,
57 	},
58 };
59 
60 
61 /*
62  * Context: softirq or process
63  */
oz_pd_set_state(struct oz_pd * pd,unsigned state)64 void oz_pd_set_state(struct oz_pd *pd, unsigned state)
65 {
66 	pd->state = state;
67 	switch (state) {
68 	case OZ_PD_S_IDLE:
69 		oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_IDLE\n");
70 		break;
71 	case OZ_PD_S_CONNECTED:
72 		oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_CONNECTED\n");
73 		break;
74 	case OZ_PD_S_STOPPED:
75 		oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_STOPPED\n");
76 		break;
77 	case OZ_PD_S_SLEEP:
78 		oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_SLEEP\n");
79 		break;
80 	}
81 }
82 
83 /*
84  * Context: softirq or process
85  */
oz_pd_get(struct oz_pd * pd)86 void oz_pd_get(struct oz_pd *pd)
87 {
88 	atomic_inc(&pd->ref_count);
89 }
90 
91 /*
92  * Context: softirq or process
93  */
oz_pd_put(struct oz_pd * pd)94 void oz_pd_put(struct oz_pd *pd)
95 {
96 	if (atomic_dec_and_test(&pd->ref_count))
97 		oz_pd_destroy(pd);
98 }
99 
100 /*
101  * Context: softirq-serialized
102  */
oz_pd_alloc(const u8 * mac_addr)103 struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
104 {
105 	struct oz_pd *pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
106 
107 	if (pd) {
108 		int i;
109 
110 		atomic_set(&pd->ref_count, 2);
111 		for (i = 0; i < OZ_NB_APPS; i++)
112 			spin_lock_init(&pd->app_lock[i]);
113 		pd->last_rx_pkt_num = 0xffffffff;
114 		oz_pd_set_state(pd, OZ_PD_S_IDLE);
115 		pd->max_tx_size = OZ_MAX_TX_SIZE;
116 		ether_addr_copy(pd->mac_addr, mac_addr);
117 		oz_elt_buf_init(&pd->elt_buff);
118 		spin_lock_init(&pd->tx_frame_lock);
119 		INIT_LIST_HEAD(&pd->tx_queue);
120 		INIT_LIST_HEAD(&pd->farewell_list);
121 		pd->last_sent_frame = &pd->tx_queue;
122 		spin_lock_init(&pd->stream_lock);
123 		INIT_LIST_HEAD(&pd->stream_list);
124 		tasklet_init(&pd->heartbeat_tasklet, oz_pd_heartbeat_handler,
125 							(unsigned long)pd);
126 		tasklet_init(&pd->timeout_tasklet, oz_pd_timeout_handler,
127 							(unsigned long)pd);
128 		hrtimer_init(&pd->heartbeat, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
129 		hrtimer_init(&pd->timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
130 		pd->heartbeat.function = oz_pd_heartbeat_event;
131 		pd->timeout.function = oz_pd_timeout_event;
132 	}
133 	return pd;
134 }
135 
136 /*
137  * Context: softirq or process
138  */
oz_pd_free(struct work_struct * work)139 static void oz_pd_free(struct work_struct *work)
140 {
141 	struct list_head *e, *n;
142 	struct oz_pd *pd;
143 
144 	oz_pd_dbg(pd, ON, "Destroying PD\n");
145 	pd = container_of(work, struct oz_pd, workitem);
146 	/*Disable timer tasklets*/
147 	tasklet_kill(&pd->heartbeat_tasklet);
148 	tasklet_kill(&pd->timeout_tasklet);
149 
150 	/* Free streams, queued tx frames and farewells. */
151 
152 	list_for_each_safe(e, n, &pd->stream_list)
153 		oz_isoc_stream_free(list_entry(e, struct oz_isoc_stream, link));
154 
155 	list_for_each_safe(e, n, &pd->tx_queue) {
156 		struct oz_tx_frame *f = list_entry(e, struct oz_tx_frame, link);
157 
158 		if (f->skb != NULL)
159 			kfree_skb(f->skb);
160 		oz_retire_frame(pd, f);
161 	}
162 
163 	oz_elt_buf_term(&pd->elt_buff);
164 
165 	list_for_each_safe(e, n, &pd->farewell_list)
166 		kfree(list_entry(e, struct oz_farewell, link));
167 
168 	if (pd->net_dev)
169 		dev_put(pd->net_dev);
170 	kfree(pd);
171 }
172 
173 /*
174  * Context: softirq or Process
175  */
oz_pd_destroy(struct oz_pd * pd)176 void oz_pd_destroy(struct oz_pd *pd)
177 {
178 	if (hrtimer_active(&pd->timeout))
179 		hrtimer_cancel(&pd->timeout);
180 	if (hrtimer_active(&pd->heartbeat))
181 		hrtimer_cancel(&pd->heartbeat);
182 
183 	INIT_WORK(&pd->workitem, oz_pd_free);
184 	if (!schedule_work(&pd->workitem))
185 		oz_pd_dbg(pd, ON, "failed to schedule workitem\n");
186 }
187 
188 /*
189  * Context: softirq-serialized
190  */
oz_services_start(struct oz_pd * pd,u16 apps,int resume)191 int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
192 {
193 	int i, rc = 0;
194 
195 	oz_pd_dbg(pd, ON, "%s: (0x%x) resume(%d)\n", __func__, apps, resume);
196 	for (i = 0; i < OZ_NB_APPS; i++) {
197 		if (g_app_if[i].start && (apps & (1 << i))) {
198 			if (g_app_if[i].start(pd, resume)) {
199 				rc = -1;
200 				oz_pd_dbg(pd, ON,
201 					  "Unable to start service %d\n", i);
202 				break;
203 			}
204 			spin_lock_bh(&g_polling_lock);
205 			pd->total_apps |= (1 << i);
206 			if (resume)
207 				pd->paused_apps &= ~(1 << i);
208 			spin_unlock_bh(&g_polling_lock);
209 		}
210 	}
211 	return rc;
212 }
213 
214 /*
215  * Context: softirq or process
216  */
oz_services_stop(struct oz_pd * pd,u16 apps,int pause)217 void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
218 {
219 	int i;
220 
221 	oz_pd_dbg(pd, ON, "%s: (0x%x) pause(%d)\n", __func__, apps, pause);
222 	for (i = 0; i < OZ_NB_APPS; i++) {
223 		if (g_app_if[i].stop && (apps & (1 << i))) {
224 			spin_lock_bh(&g_polling_lock);
225 			if (pause) {
226 				pd->paused_apps |=  (1 << i);
227 			} else {
228 				pd->total_apps  &= ~(1 << i);
229 				pd->paused_apps &= ~(1 << i);
230 			}
231 			spin_unlock_bh(&g_polling_lock);
232 			g_app_if[i].stop(pd, pause);
233 		}
234 	}
235 }
236 
237 /*
238  * Context: softirq
239  */
oz_pd_heartbeat(struct oz_pd * pd,u16 apps)240 void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
241 {
242 	int i, more = 0;
243 
244 	for (i = 0; i < OZ_NB_APPS; i++) {
245 		if (g_app_if[i].heartbeat && (apps & (1 << i))) {
246 			if (g_app_if[i].heartbeat(pd))
247 				more = 1;
248 		}
249 	}
250 	if ((!more) && (hrtimer_active(&pd->heartbeat)))
251 		hrtimer_cancel(&pd->heartbeat);
252 	if (pd->mode & OZ_F_ISOC_ANYTIME) {
253 		int count = 8;
254 
255 		while (count-- && (oz_send_isoc_frame(pd) >= 0))
256 			;
257 	}
258 }
259 
260 /*
261  * Context: softirq or process
262  */
oz_pd_stop(struct oz_pd * pd)263 void oz_pd_stop(struct oz_pd *pd)
264 {
265 	u16 stop_apps;
266 
267 	oz_dbg(ON, "oz_pd_stop() State = 0x%x\n", pd->state);
268 	oz_pd_indicate_farewells(pd);
269 	spin_lock_bh(&g_polling_lock);
270 	stop_apps = pd->total_apps;
271 	pd->total_apps = 0;
272 	pd->paused_apps = 0;
273 	spin_unlock_bh(&g_polling_lock);
274 	oz_services_stop(pd, stop_apps, 0);
275 	spin_lock_bh(&g_polling_lock);
276 	oz_pd_set_state(pd, OZ_PD_S_STOPPED);
277 	/* Remove from PD list.*/
278 	list_del(&pd->link);
279 	spin_unlock_bh(&g_polling_lock);
280 	oz_dbg(ON, "pd ref count = %d\n", atomic_read(&pd->ref_count));
281 	oz_pd_put(pd);
282 }
283 
284 /*
285  * Context: softirq
286  */
oz_pd_sleep(struct oz_pd * pd)287 int oz_pd_sleep(struct oz_pd *pd)
288 {
289 	int do_stop = 0;
290 	u16 stop_apps;
291 
292 	spin_lock_bh(&g_polling_lock);
293 	if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
294 		spin_unlock_bh(&g_polling_lock);
295 		return 0;
296 	}
297 	if (pd->keep_alive && pd->session_id)
298 		oz_pd_set_state(pd, OZ_PD_S_SLEEP);
299 	else
300 		do_stop = 1;
301 
302 	stop_apps = pd->total_apps;
303 	spin_unlock_bh(&g_polling_lock);
304 	if (do_stop) {
305 		oz_pd_stop(pd);
306 	} else {
307 		oz_services_stop(pd, stop_apps, 1);
308 		oz_timer_add(pd, OZ_TIMER_STOP, pd->keep_alive);
309 	}
310 	return do_stop;
311 }
312 
313 /*
314  * Context: softirq
315  */
oz_tx_frame_alloc(struct oz_pd * pd)316 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
317 {
318 	struct oz_tx_frame *f;
319 
320 	f = kmem_cache_alloc(oz_tx_frame_cache, GFP_ATOMIC);
321 	if (f) {
322 		f->total_size = sizeof(struct oz_hdr);
323 		INIT_LIST_HEAD(&f->link);
324 		INIT_LIST_HEAD(&f->elt_list);
325 	}
326 	return f;
327 }
328 
329 /*
330  * Context: softirq or process
331  */
oz_tx_isoc_free(struct oz_pd * pd,struct oz_tx_frame * f)332 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
333 {
334 	pd->nb_queued_isoc_frames--;
335 	list_del_init(&f->link);
336 
337 	kmem_cache_free(oz_tx_frame_cache, f);
338 
339 	oz_dbg(TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n",
340 	       pd->nb_queued_isoc_frames);
341 }
342 
343 /*
344  * Context: softirq or process
345  */
oz_tx_frame_free(struct oz_pd * pd,struct oz_tx_frame * f)346 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
347 {
348 	kmem_cache_free(oz_tx_frame_cache, f);
349 }
350 
351 /*
352  * Context: softirq-serialized
353  */
oz_set_more_bit(struct sk_buff * skb)354 static void oz_set_more_bit(struct sk_buff *skb)
355 {
356 	struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
357 
358 	oz_hdr->control |= OZ_F_MORE_DATA;
359 }
360 
361 /*
362  * Context: softirq-serialized
363  */
oz_set_last_pkt_nb(struct oz_pd * pd,struct sk_buff * skb)364 static void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
365 {
366 	struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
367 
368 	oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
369 }
370 
371 /*
372  * Context: softirq
373  */
oz_prepare_frame(struct oz_pd * pd,int empty)374 int oz_prepare_frame(struct oz_pd *pd, int empty)
375 {
376 	struct oz_tx_frame *f;
377 
378 	if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
379 		return -1;
380 	if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
381 		return -1;
382 	if (!empty && !oz_are_elts_available(&pd->elt_buff))
383 		return -1;
384 	f = oz_tx_frame_alloc(pd);
385 	if (f == NULL)
386 		return -1;
387 	f->skb = NULL;
388 	f->hdr.control =
389 		(OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
390 	++pd->last_tx_pkt_num;
391 	put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
392 	if (empty == 0) {
393 		oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
394 			pd->max_tx_size, &f->elt_list);
395 	}
396 	spin_lock(&pd->tx_frame_lock);
397 	list_add_tail(&f->link, &pd->tx_queue);
398 	pd->nb_queued_frames++;
399 	spin_unlock(&pd->tx_frame_lock);
400 	return 0;
401 }
402 
403 /*
404  * Context: softirq-serialized
405  */
oz_build_frame(struct oz_pd * pd,struct oz_tx_frame * f)406 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
407 {
408 	struct sk_buff *skb;
409 	struct net_device *dev = pd->net_dev;
410 	struct oz_hdr *oz_hdr;
411 	struct oz_elt *elt;
412 	struct oz_elt_info *ei;
413 
414 	/* Allocate skb with enough space for the lower layers as well
415 	 * as the space we need.
416 	 */
417 	skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
418 	if (skb == NULL)
419 		return NULL;
420 	/* Reserve the head room for lower layers.
421 	 */
422 	skb_reserve(skb, LL_RESERVED_SPACE(dev));
423 	skb_reset_network_header(skb);
424 	skb->dev = dev;
425 	skb->protocol = htons(OZ_ETHERTYPE);
426 	if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
427 		dev->dev_addr, skb->len) < 0)
428 		goto fail;
429 	/* Push the tail to the end of the area we are going to copy to.
430 	 */
431 	oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
432 	f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
433 	memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
434 	/* Copy the elements into the frame body.
435 	 */
436 	elt = (struct oz_elt *)(oz_hdr+1);
437 	list_for_each_entry(ei, &f->elt_list, link) {
438 		memcpy(elt, ei->data, ei->length);
439 		elt = oz_next_elt(elt);
440 	}
441 	return skb;
442 fail:
443 	kfree_skb(skb);
444 	return NULL;
445 }
446 
447 /*
448  * Context: softirq or process
449  */
oz_retire_frame(struct oz_pd * pd,struct oz_tx_frame * f)450 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
451 {
452 	struct oz_elt_info *ei, *n;
453 
454 	list_for_each_entry_safe(ei, n, &f->elt_list, link) {
455 		list_del_init(&ei->link);
456 		if (ei->callback)
457 			ei->callback(pd, ei->context);
458 		spin_lock_bh(&pd->elt_buff.lock);
459 		oz_elt_info_free(&pd->elt_buff, ei);
460 		spin_unlock_bh(&pd->elt_buff.lock);
461 	}
462 	oz_tx_frame_free(pd, f);
463 }
464 
465 /*
466  * Context: softirq-serialized
467  */
oz_send_next_queued_frame(struct oz_pd * pd,int more_data)468 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
469 {
470 	struct sk_buff *skb;
471 	struct oz_tx_frame *f;
472 	struct list_head *e;
473 
474 	spin_lock(&pd->tx_frame_lock);
475 	e = pd->last_sent_frame->next;
476 	if (e == &pd->tx_queue) {
477 		spin_unlock(&pd->tx_frame_lock);
478 		return -1;
479 	}
480 	f = list_entry(e, struct oz_tx_frame, link);
481 
482 	if (f->skb != NULL) {
483 		skb = f->skb;
484 		oz_tx_isoc_free(pd, f);
485 		spin_unlock(&pd->tx_frame_lock);
486 		if (more_data)
487 			oz_set_more_bit(skb);
488 		oz_set_last_pkt_nb(pd, skb);
489 		if ((int)atomic_read(&g_submitted_isoc) <
490 							OZ_MAX_SUBMITTED_ISOC) {
491 			if (dev_queue_xmit(skb) < 0) {
492 				oz_dbg(TX_FRAMES, "Dropping ISOC Frame\n");
493 				return -1;
494 			}
495 			atomic_inc(&g_submitted_isoc);
496 			oz_dbg(TX_FRAMES, "Sending ISOC Frame, nb_isoc= %d\n",
497 			       pd->nb_queued_isoc_frames);
498 			return 0;
499 		}
500 		kfree_skb(skb);
501 		oz_dbg(TX_FRAMES, "Dropping ISOC Frame>\n");
502 		return -1;
503 	}
504 
505 	pd->last_sent_frame = e;
506 	skb = oz_build_frame(pd, f);
507 	spin_unlock(&pd->tx_frame_lock);
508 	if (!skb)
509 		return -1;
510 	if (more_data)
511 		oz_set_more_bit(skb);
512 	oz_dbg(TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
513 	if (dev_queue_xmit(skb) < 0)
514 		return -1;
515 
516 	return 0;
517 }
518 
519 /*
520  * Context: softirq-serialized
521  */
oz_send_queued_frames(struct oz_pd * pd,int backlog)522 void oz_send_queued_frames(struct oz_pd *pd, int backlog)
523 {
524 	while (oz_prepare_frame(pd, 0) >= 0)
525 		backlog++;
526 
527 	switch (pd->mode & (OZ_F_ISOC_NO_ELTS | OZ_F_ISOC_ANYTIME)) {
528 
529 		case OZ_F_ISOC_NO_ELTS: {
530 			backlog += pd->nb_queued_isoc_frames;
531 			if (backlog <= 0)
532 				goto out;
533 			if (backlog > OZ_MAX_SUBMITTED_ISOC)
534 				backlog = OZ_MAX_SUBMITTED_ISOC;
535 			break;
536 		}
537 		case OZ_NO_ELTS_ANYTIME: {
538 			if ((backlog <= 0) && (pd->isoc_sent == 0))
539 				goto out;
540 			break;
541 		}
542 		default: {
543 			if (backlog <= 0)
544 				goto out;
545 			break;
546 		}
547 	}
548 	while (backlog--) {
549 		if (oz_send_next_queued_frame(pd, backlog) < 0)
550 			break;
551 	}
552 	return;
553 
554 out:	oz_prepare_frame(pd, 1);
555 	oz_send_next_queued_frame(pd, 0);
556 }
557 
558 /*
559  * Context: softirq
560  */
oz_send_isoc_frame(struct oz_pd * pd)561 static int oz_send_isoc_frame(struct oz_pd *pd)
562 {
563 	struct sk_buff *skb;
564 	struct net_device *dev = pd->net_dev;
565 	struct oz_hdr *oz_hdr;
566 	struct oz_elt *elt;
567 	struct oz_elt_info *ei;
568 	LIST_HEAD(list);
569 	int total_size = sizeof(struct oz_hdr);
570 
571 	oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
572 		pd->max_tx_size, &list);
573 	if (list_empty(&list))
574 		return 0;
575 	skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
576 	if (skb == NULL) {
577 		oz_dbg(ON, "Cannot alloc skb\n");
578 		oz_elt_info_free_chain(&pd->elt_buff, &list);
579 		return -1;
580 	}
581 	skb_reserve(skb, LL_RESERVED_SPACE(dev));
582 	skb_reset_network_header(skb);
583 	skb->dev = dev;
584 	skb->protocol = htons(OZ_ETHERTYPE);
585 	if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
586 		dev->dev_addr, skb->len) < 0) {
587 		kfree_skb(skb);
588 		return -1;
589 	}
590 	oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
591 	oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
592 	oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
593 	elt = (struct oz_elt *)(oz_hdr+1);
594 
595 	list_for_each_entry(ei, &list, link) {
596 		memcpy(elt, ei->data, ei->length);
597 		elt = oz_next_elt(elt);
598 	}
599 	dev_queue_xmit(skb);
600 	oz_elt_info_free_chain(&pd->elt_buff, &list);
601 	return 0;
602 }
603 
604 /*
605  * Context: softirq-serialized
606  */
oz_retire_tx_frames(struct oz_pd * pd,u8 lpn)607 void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
608 {
609 	struct oz_tx_frame *f, *tmp = NULL;
610 	u8 diff;
611 	u32 pkt_num;
612 
613 	LIST_HEAD(list);
614 
615 	spin_lock(&pd->tx_frame_lock);
616 	list_for_each_entry(f, &pd->tx_queue, link) {
617 		pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
618 		diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
619 		if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0))
620 			break;
621 		oz_dbg(TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
622 		       pkt_num, pd->nb_queued_frames);
623 		tmp = f;
624 		pd->nb_queued_frames--;
625 	}
626 	if (tmp)
627 		list_cut_position(&list, &pd->tx_queue, &tmp->link);
628 	pd->last_sent_frame = &pd->tx_queue;
629 	spin_unlock(&pd->tx_frame_lock);
630 
631 	list_for_each_entry_safe(f, tmp, &list, link)
632 		oz_retire_frame(pd, f);
633 }
634 
635 /*
636  * Precondition: stream_lock must be held.
637  * Context: softirq
638  */
pd_stream_find(struct oz_pd * pd,u8 ep_num)639 static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
640 {
641 	struct oz_isoc_stream *st;
642 
643 	list_for_each_entry(st, &pd->stream_list, link) {
644 		if (st->ep_num == ep_num)
645 			return st;
646 	}
647 	return NULL;
648 }
649 
650 /*
651  * Context: softirq
652  */
oz_isoc_stream_create(struct oz_pd * pd,u8 ep_num)653 int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
654 {
655 	struct oz_isoc_stream *st =
656 		kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
657 	if (!st)
658 		return -ENOMEM;
659 	st->ep_num = ep_num;
660 	spin_lock_bh(&pd->stream_lock);
661 	if (!pd_stream_find(pd, ep_num)) {
662 		list_add(&st->link, &pd->stream_list);
663 		st = NULL;
664 	}
665 	spin_unlock_bh(&pd->stream_lock);
666 	kfree(st);
667 	return 0;
668 }
669 
670 /*
671  * Context: softirq or process
672  */
oz_isoc_stream_free(struct oz_isoc_stream * st)673 static void oz_isoc_stream_free(struct oz_isoc_stream *st)
674 {
675 	kfree_skb(st->skb);
676 	kfree(st);
677 }
678 
679 /*
680  * Context: softirq
681  */
oz_isoc_stream_delete(struct oz_pd * pd,u8 ep_num)682 int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
683 {
684 	struct oz_isoc_stream *st;
685 
686 	spin_lock_bh(&pd->stream_lock);
687 	st = pd_stream_find(pd, ep_num);
688 	if (st)
689 		list_del(&st->link);
690 	spin_unlock_bh(&pd->stream_lock);
691 	if (st)
692 		oz_isoc_stream_free(st);
693 	return 0;
694 }
695 
696 /*
697  * Context: any
698  */
oz_isoc_destructor(struct sk_buff * skb)699 static void oz_isoc_destructor(struct sk_buff *skb)
700 {
701 	atomic_dec(&g_submitted_isoc);
702 }
703 
704 /*
705  * Context: softirq
706  */
oz_send_isoc_unit(struct oz_pd * pd,u8 ep_num,const u8 * data,int len)707 int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
708 {
709 	struct net_device *dev = pd->net_dev;
710 	struct oz_isoc_stream *st;
711 	u8 nb_units = 0;
712 	struct sk_buff *skb = NULL;
713 	struct oz_hdr *oz_hdr = NULL;
714 	int size = 0;
715 
716 	spin_lock_bh(&pd->stream_lock);
717 	st = pd_stream_find(pd, ep_num);
718 	if (st) {
719 		skb = st->skb;
720 		st->skb = NULL;
721 		nb_units = st->nb_units;
722 		st->nb_units = 0;
723 		oz_hdr = st->oz_hdr;
724 		size = st->size;
725 	}
726 	spin_unlock_bh(&pd->stream_lock);
727 	if (!st)
728 		return 0;
729 	if (!skb) {
730 		/* Allocate enough space for max size frame. */
731 		skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
732 				GFP_ATOMIC);
733 		if (skb == NULL)
734 			return 0;
735 		/* Reserve the head room for lower layers. */
736 		skb_reserve(skb, LL_RESERVED_SPACE(dev));
737 		skb_reset_network_header(skb);
738 		skb->dev = dev;
739 		skb->protocol = htons(OZ_ETHERTYPE);
740 		/* For audio packet set priority to AC_VO */
741 		skb->priority = 0x7;
742 		size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
743 		oz_hdr = (struct oz_hdr *)skb_put(skb, size);
744 	}
745 	memcpy(skb_put(skb, len), data, len);
746 	size += len;
747 	if (++nb_units < pd->ms_per_isoc) {
748 		spin_lock_bh(&pd->stream_lock);
749 		st->skb = skb;
750 		st->nb_units = nb_units;
751 		st->oz_hdr = oz_hdr;
752 		st->size = size;
753 		spin_unlock_bh(&pd->stream_lock);
754 	} else {
755 		struct oz_hdr oz;
756 		struct oz_isoc_large iso;
757 
758 		spin_lock_bh(&pd->stream_lock);
759 		iso.frame_number = st->frame_num;
760 		st->frame_num += nb_units;
761 		spin_unlock_bh(&pd->stream_lock);
762 		oz.control =
763 			(OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
764 		oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
765 		oz.pkt_num = 0;
766 		iso.endpoint = ep_num;
767 		iso.format = OZ_DATA_F_ISOC_LARGE;
768 		iso.ms_data = nb_units;
769 		memcpy(oz_hdr, &oz, sizeof(oz));
770 		memcpy(oz_hdr+1, &iso, sizeof(iso));
771 		if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
772 				dev->dev_addr, skb->len) < 0)
773 			goto out;
774 
775 		skb->destructor = oz_isoc_destructor;
776 		/*Queue for Xmit if mode is not ANYTIME*/
777 		if (!(pd->mode & OZ_F_ISOC_ANYTIME)) {
778 			struct oz_tx_frame *isoc_unit = NULL;
779 			int nb = pd->nb_queued_isoc_frames;
780 
781 			if (nb >= pd->isoc_latency) {
782 				struct oz_tx_frame *f;
783 
784 				oz_dbg(TX_FRAMES, "Dropping ISOC Unit nb= %d\n",
785 				       nb);
786 				spin_lock(&pd->tx_frame_lock);
787 				list_for_each_entry(f, &pd->tx_queue, link) {
788 					if (f->skb != NULL) {
789 						oz_tx_isoc_free(pd, f);
790 						break;
791 					}
792 				}
793 				spin_unlock(&pd->tx_frame_lock);
794 			}
795 			isoc_unit = oz_tx_frame_alloc(pd);
796 			if (isoc_unit == NULL)
797 				goto out;
798 			isoc_unit->hdr = oz;
799 			isoc_unit->skb = skb;
800 			spin_lock_bh(&pd->tx_frame_lock);
801 			list_add_tail(&isoc_unit->link, &pd->tx_queue);
802 			pd->nb_queued_isoc_frames++;
803 			spin_unlock_bh(&pd->tx_frame_lock);
804 			oz_dbg(TX_FRAMES,
805 			       "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
806 			       pd->nb_queued_isoc_frames, pd->nb_queued_frames);
807 			return 0;
808 		}
809 
810 		/*In ANYTIME mode Xmit unit immediately*/
811 		if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
812 			atomic_inc(&g_submitted_isoc);
813 			if (dev_queue_xmit(skb) < 0)
814 				return -1;
815 			return 0;
816 		}
817 
818 out:	kfree_skb(skb);
819 	return -1;
820 
821 	}
822 	return 0;
823 }
824 
825 /*
826  * Context: process
827  */
oz_apps_init(void)828 void oz_apps_init(void)
829 {
830 	int i;
831 
832 	for (i = 0; i < OZ_NB_APPS; i++) {
833 		if (g_app_if[i].init)
834 			g_app_if[i].init();
835 	}
836 }
837 
838 /*
839  * Context: process
840  */
oz_apps_term(void)841 void oz_apps_term(void)
842 {
843 	int i;
844 
845 	/* Terminate all the apps. */
846 	for (i = 0; i < OZ_NB_APPS; i++) {
847 		if (g_app_if[i].term)
848 			g_app_if[i].term();
849 	}
850 }
851 
852 /*
853  * Context: softirq-serialized
854  */
oz_handle_app_elt(struct oz_pd * pd,u8 app_id,struct oz_elt * elt)855 void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
856 {
857 	if (app_id < OZ_NB_APPS && g_app_if[app_id].rx)
858 		g_app_if[app_id].rx(pd, elt);
859 }
860 
861 /*
862  * Context: softirq or process
863  */
oz_pd_indicate_farewells(struct oz_pd * pd)864 void oz_pd_indicate_farewells(struct oz_pd *pd)
865 {
866 	struct oz_farewell *f;
867 	const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB];
868 
869 	while (1) {
870 		spin_lock_bh(&g_polling_lock);
871 		if (list_empty(&pd->farewell_list)) {
872 			spin_unlock_bh(&g_polling_lock);
873 			break;
874 		}
875 		f = list_first_entry(&pd->farewell_list,
876 				struct oz_farewell, link);
877 		list_del(&f->link);
878 		spin_unlock_bh(&g_polling_lock);
879 		if (ai->farewell)
880 			ai->farewell(pd, f->ep_num, f->report, f->len);
881 		kfree(f);
882 	}
883 }
884