• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * drivers/amlogic/media/stream_input/parser/thread_rw.c
3  *
4  * Copyright (C) 2016 Amlogic, Inc. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  */
17 
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/timer.h>
24 #include <linux/kfifo.h>
25 #include <linux/workqueue.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/dma-contiguous.h>
28 #include <linux/uaccess.h>
29 #include <linux/fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/amlogic/media/codec_mm/codec_mm.h>
32 
33 /* #include <mach/am_regs.h> */
34 #include <linux/delay.h>
35 
36 #include "streambuf.h"
37 #include "amports_priv.h"
38 #include "thread_rw.h"
39 
40 #define BUF_NAME "fetchbuf"
41 
42 #define DEFAULT_BLOCK_SIZE (64*1024)
43 
44 struct threadrw_buf {
45 	void *vbuffer;
46 	dma_addr_t dma_handle;
47 	int write_off;
48 	int data_size;
49 	int buffer_size;
50 	int from_cma;
51 };
52 
53 #define MAX_MM_BUFFER_NUM 16
54 struct threadrw_write_task {
55 	struct file *file;
56 	struct delayed_work write_work;
57 	DECLARE_KFIFO_PTR(datafifo, void *);
58 	DECLARE_KFIFO_PTR(freefifo, void *);
59 	int bufs_num;
60 	int max_bufs;
61 	int errors;
62 	spinlock_t lock;
63 	struct mutex mutex;
64 	struct stream_buf_s *sbuf;
65 	int buffered_data_size;
66 	int passed_data_len;
67 	int buffer_size;
68 	int def_block_size;
69 	int data_offset;
70 	int writework_on;
71 	unsigned long codec_mm_buffer[MAX_MM_BUFFER_NUM];
72 	int manual_write;
73 	int failed_onmore;
74 	wait_queue_head_t wq;
75 	ssize_t (*write)(struct file *,
76 		struct stream_buf_s *,
77 		const char __user *,
78 		size_t, int);
79 	struct threadrw_buf buf[1];
80 	/*don't add any after buf[] define */
81 };
82 
83 static int free_task_buffers(struct threadrw_write_task *task);
84 
threadrw_wq_get(void)85 static struct workqueue_struct *threadrw_wq_get(void)
86 {
87 	static struct workqueue_struct *threadrw_wq;
88 
89 	if (!threadrw_wq)
90 		threadrw_wq = create_singlethread_workqueue("threadrw");
91 	return threadrw_wq;
92 }
93 
threadrw_schedule_delayed_work(struct threadrw_write_task * task,unsigned long delay)94 static int threadrw_schedule_delayed_work(
95 		struct threadrw_write_task *task,
96 		unsigned long delay)
97 {
98 	bool ret;
99 
100 	if (threadrw_wq_get()) {
101 		ret = queue_delayed_work(threadrw_wq_get(),
102 			&task->write_work, delay);
103 	} else
104 		ret = schedule_delayed_work(&task->write_work, delay);
105 	if (!ret) {
106 		cancel_delayed_work(&task->write_work);
107 		if (threadrw_wq_get())
108 			ret = queue_delayed_work(threadrw_wq_get(),
109 					&task->write_work, 0);
110 		else
111 			ret = schedule_delayed_work(&task->write_work, 0);
112 	}
113 	return 0;
114 }
115 
threadrw_write_onece(struct threadrw_write_task * task,struct file * file,struct stream_buf_s * stbuf,const char __user * buf,size_t count)116 static ssize_t threadrw_write_onece(
117 	struct threadrw_write_task *task,
118 	struct file *file,
119 	struct stream_buf_s *stbuf,
120 	const char __user *buf, size_t count)
121 {
122 	struct threadrw_buf *rwbuf = NULL;
123 	int ret = 0;
124 	int to_write;
125 
126 	if (!kfifo_get(&task->freefifo, (void *)&rwbuf)) {
127 		if (task->errors)
128 			return task->errors;
129 		return -EAGAIN;
130 	}
131 
132 	to_write = min_t(u32, rwbuf->buffer_size, count);
133 	if (copy_from_user(rwbuf->vbuffer, buf, to_write)) {
134 		kfifo_put(&task->freefifo, (const void *)buf);
135 		ret = -EFAULT;
136 		goto err;
137 	}
138 	rwbuf->data_size = to_write;
139 	rwbuf->write_off = 0;
140 	kfifo_put(&task->datafifo, (const void *)rwbuf);
141 	threadrw_schedule_delayed_work(task, 0);
142 	return to_write;
143 err:
144 	return ret;
145 }
146 
threadrw_write_in(struct threadrw_write_task * task,struct stream_buf_s * stbuf,const char __user * buf,size_t count)147 static ssize_t threadrw_write_in(
148 	struct threadrw_write_task *task,
149 	struct stream_buf_s *stbuf,
150 	const char __user *buf, size_t count)
151 {
152 	int ret = 0;
153 	int off = 0;
154 	/* int change to size_t for buffer overflow on OTT-5057 */
155 	size_t left = count;
156 	int wait_num = 0;
157 	unsigned long flags;
158 
159 	while (left > 0) {
160 		ret = threadrw_write_onece(task,
161 				task->file,
162 				stbuf, buf + off, left);
163 
164 		/* firstly check ret < 0, avoid the risk of -EAGAIN in ret
165 		 * implicit convert to size_t when compare with "size_t left".
166 		 */
167 		if (ret < 0) {
168 			if (off > 0) {
169 				break;	/*have write ok some data. */
170 			} else if (ret == -EAGAIN) {
171 				if (!(task->file->f_flags & O_NONBLOCK) &&
172 					(++wait_num < 10)) {
173 					wait_event_interruptible_timeout(
174 						task->wq,
175 						!kfifo_is_empty(
176 							&task->freefifo),
177 						HZ / 100);
178 					continue;	/* write again. */
179 				}
180 				ret = -EAGAIN;
181 				break;
182 			}
183 			break;	/*to end */
184 		} else if (ret >= left) {
185 			off = count;
186 			left = 0;
187 		} else if (ret > 0) {
188 			off += ret;
189 			left -= ret;
190 		}
191 	}
192 
193 	/*end: */
194 	spin_lock_irqsave(&task->lock, flags);
195 	if (off > 0) {
196 		task->buffered_data_size += off;
197 		task->data_offset += off;
198 	}
199 	spin_unlock_irqrestore(&task->lock, flags);
200 	if (off > 0)
201 		return off;
202 	else
203 		return ret;
204 }
205 
do_write_work_in(struct threadrw_write_task * task)206 static int do_write_work_in(struct threadrw_write_task *task)
207 {
208 	struct threadrw_buf *rwbuf = NULL;
209 	int ret;
210 	int need_re_write = 0;
211 	int write_len = 0;
212 	unsigned long flags;
213 
214 	if (kfifo_is_empty(&task->datafifo))
215 		return 0;
216 	if (!kfifo_peek(&task->datafifo, (void *)&rwbuf))
217 		return 0;
218 	if (!task->manual_write &&
219 			rwbuf->from_cma &&
220 			!rwbuf->write_off)
221 		codec_mm_dma_flush(rwbuf->vbuffer,
222 						rwbuf->buffer_size,
223 						DMA_TO_DEVICE);
224 	if (task->manual_write) {
225 		ret = task->write(task->file, task->sbuf,
226 			(const char __user *)rwbuf->vbuffer + rwbuf->write_off,
227 			rwbuf->data_size,
228 			2);	/* noblock,virtual addr */
229 	} else {
230 		ret = task->write(task->file, task->sbuf,
231 		(const char __user *)rwbuf->dma_handle + rwbuf->write_off,
232 		rwbuf->data_size,
233 		3);	/* noblock,phy addr */
234 	}
235 	if (ret == -EAGAIN) {
236 		need_re_write = 0;
237 		/*do later retry. */
238 	} else if (ret >= rwbuf->data_size) {
239 		write_len += rwbuf->data_size;
240 		if (kfifo_get(&task->datafifo, (void *)&rwbuf)) {
241 			rwbuf->data_size = 0;
242 			kfifo_put(&task->freefifo, (const void *)rwbuf);
243 			/*wakeup write thread. */
244 			wake_up_interruptible(&task->wq);
245 		} else
246 			pr_err("write ok,but kfifo_get data failed.!!!\n");
247 		need_re_write = 1;
248 	} else if (ret > 0) {
249 		rwbuf->data_size -= ret;	/* half data write */
250 		rwbuf->write_off += ret;
251 		write_len += ret;
252 		need_re_write = 1;
253 	} else {		/*ret <=0 */
254 		pr_err("get errors ret=%d size=%d\n", ret,
255 			rwbuf->data_size);
256 		task->errors = ret;
257 	}
258 	if (write_len > 0) {
259 		spin_lock_irqsave(&task->lock, flags);
260 		task->passed_data_len += write_len;
261 		spin_unlock_irqrestore(&task->lock, flags);
262 	}
263 	return need_re_write;
264 
265 }
266 
do_write_work(struct work_struct * work)267 static void do_write_work(struct work_struct *work)
268 {
269 	struct threadrw_write_task *task = container_of(work,
270 					struct threadrw_write_task,
271 					write_work.work);
272 	int need_retry = 1;
273 
274 	task->writework_on = 1;
275 	while (need_retry) {
276 		mutex_lock(&task->mutex);
277 		need_retry = do_write_work_in(task);
278 		mutex_unlock(&task->mutex);
279 	}
280 	threadrw_schedule_delayed_work(task, HZ / 10);
281 	task->writework_on = 0;
282 }
283 
alloc_task_buffers_inlock(struct threadrw_write_task * task,int new_bubffers,int block_size)284 static int alloc_task_buffers_inlock(struct threadrw_write_task *task,
285 		int new_bubffers,
286 		int block_size)
287 {
288 	struct threadrw_buf *rwbuf;
289 	int i;
290 	int used_codec_mm = task->manual_write ? 0 : 1;
291 	int new_num = new_bubffers;
292 	int mm_slot = -1;
293 	int start_idx = task->bufs_num;
294 	int total_mm = 0;
295 	unsigned long addr;
296 
297 	if (codec_mm_get_total_size() < 80 ||
298 		codec_mm_get_free_size() < 40)
299 		used_codec_mm = 0;
300 	if (task->bufs_num + new_num > task->max_bufs)
301 		new_num = task->max_bufs - task->bufs_num;
302 	for (i = 0; i < MAX_MM_BUFFER_NUM; i++) {
303 		if (task->codec_mm_buffer[i] == 0) {
304 			mm_slot = i;
305 			break;
306 		}
307 	}
308 	if (mm_slot < 0)
309 		used_codec_mm = 0;
310 	if (block_size <= 0)
311 		block_size = DEFAULT_BLOCK_SIZE;
312 
313 	if (used_codec_mm && (block_size * new_num) >= 128 * 1024) {
314 		total_mm = ALIGN(block_size * new_num, (1 << 17));
315 		addr =
316 				codec_mm_alloc_for_dma(BUF_NAME,
317 					total_mm / PAGE_SIZE, 0,
318 					CODEC_MM_FLAGS_DMA_CPU);
319 		if (addr != 0) {
320 			task->codec_mm_buffer[mm_slot] = addr;
321 			task->buffer_size += total_mm;
322 		} else {
323 			used_codec_mm = 0;
324 		}
325 	}
326 	for (i = 0; i < new_num; i++) {
327 		int bufidx = start_idx + i;
328 
329 		rwbuf = &task->buf[bufidx];
330 		rwbuf->buffer_size = block_size;
331 		if (used_codec_mm) {
332 			unsigned long start_addr =
333 					task->codec_mm_buffer[mm_slot];
334 			if (i == new_num - 1)
335 				rwbuf->buffer_size = total_mm -
336 						block_size * i;
337 			rwbuf->dma_handle = (dma_addr_t) start_addr +
338 						block_size * i;
339 			rwbuf->vbuffer = codec_mm_phys_to_virt(
340 						rwbuf->dma_handle);
341 			rwbuf->from_cma = 1;
342 
343 		} else {
344 			rwbuf->vbuffer = dma_alloc_coherent(
345 					amports_get_dma_device(),
346 					rwbuf->buffer_size,
347 					&rwbuf->dma_handle, GFP_KERNEL);
348 			if (!rwbuf->vbuffer) {
349 				rwbuf->buffer_size = 0;
350 				rwbuf->dma_handle = 0;
351 				task->bufs_num = bufidx;
352 				break;
353 			}
354 			rwbuf->from_cma = 0;
355 			task->buffer_size += rwbuf->buffer_size;
356 		}
357 
358 		kfifo_put(&task->freefifo, (const void *)rwbuf);
359 		task->bufs_num = bufidx + 1;
360 	}
361 	if (start_idx > 0 ||/*have buffers before*/
362 		task->bufs_num >= 3 ||
363 		task->bufs_num == new_num) {
364 		if (!task->def_block_size)
365 			task->def_block_size = task->buf[0].buffer_size;
366 		return 0;	/*must >=3 for swap buffers. */
367 	}
368 	if (task->bufs_num > 0)
369 		free_task_buffers(task);
370 	return -1;
371 }
372 
free_task_buffers(struct threadrw_write_task * task)373 static int free_task_buffers(struct threadrw_write_task *task)
374 {
375 	int i;
376 
377 	for (i = 0; i < MAX_MM_BUFFER_NUM; i++) {
378 		if (task->codec_mm_buffer[i])
379 			codec_mm_free_for_dma(BUF_NAME,
380 				task->codec_mm_buffer[i]);
381 	}
382 	for (i = 0; i < task->bufs_num; i++) {
383 		if (task->buf[i].vbuffer && task->buf[i].from_cma == 0)
384 			dma_free_coherent(amports_get_dma_device(),
385 				task->buf[i].buffer_size,
386 				task->buf[i].vbuffer,
387 				task->buf[i].dma_handle);
388 	}
389 	return 0;
390 }
391 
threadrw_alloc_in(int num,int block_size,ssize_t (* write)(struct file *,struct stream_buf_s *,const char __user *,size_t,int),int flags)392 static struct threadrw_write_task *threadrw_alloc_in(int num,
393 		int block_size,
394 		ssize_t (*write)(struct file *,
395 			struct stream_buf_s *,
396 			const char __user *, size_t, int),
397 			int flags)
398 {
399 	int max_bufs = num;
400 	int task_buffer_size;
401 	struct threadrw_write_task *task;
402 	int ret;
403 
404 	if (!(flags & 1)) /*not audio*/
405 		max_bufs = 300; /*can great for video bufs.*/
406 	task_buffer_size = sizeof(struct threadrw_write_task) +
407 				sizeof(struct threadrw_buf) * max_bufs;
408 	task = vmalloc(task_buffer_size);
409 
410 	if (!task)
411 		return NULL;
412 	memset(task, 0, task_buffer_size);
413 
414 	spin_lock_init(&task->lock);
415 	mutex_init(&task->mutex);
416 	INIT_DELAYED_WORK(&task->write_work, do_write_work);
417 	init_waitqueue_head(&task->wq);
418 	ret = kfifo_alloc(&task->datafifo, max_bufs, GFP_KERNEL);
419 	if (ret)
420 		goto err1;
421 	ret = kfifo_alloc(&task->freefifo, max_bufs, GFP_KERNEL);
422 	if (ret)
423 		goto err2;
424 	task->write = write;
425 	task->file = NULL;
426 	task->buffer_size = 0;
427 	task->manual_write = flags & 1;
428 	task->max_bufs = max_bufs;
429 	mutex_lock(&task->mutex);
430 	ret = alloc_task_buffers_inlock(task, num, block_size);
431 	mutex_unlock(&task->mutex);
432 	if (ret < 0)
433 		goto err3;
434 	threadrw_wq_get();	/*start thread. */
435 	return task;
436 
437 err3:
438 	kfifo_free(&task->freefifo);
439 err2:
440 	kfifo_free(&task->datafifo);
441 err1:
442 	vfree(task);
443 	pr_err("alloc threadrw failed num:%d,block:%d\n", num, block_size);
444 	return NULL;
445 }
446 
447 /*
448  *fifo data size;
449  */
450 
threadrw_update_buffer_level(struct stream_buf_s * stbuf,int parsed_size)451 void threadrw_update_buffer_level(struct stream_buf_s *stbuf,
452 	int parsed_size)
453 {
454 	struct threadrw_write_task *task = stbuf->write_thread;
455 	unsigned long flags;
456 
457 	if (task)
458 	{
459 		spin_lock_irqsave(&task->lock, flags);
460 		task->buffered_data_size -= parsed_size;
461 		spin_unlock_irqrestore(&task->lock, flags);
462 	}
463 
464 }
465 EXPORT_SYMBOL(threadrw_update_buffer_level);
466 
threadrw_buffer_level(struct stream_buf_s * stbuf)467 int threadrw_buffer_level(struct stream_buf_s *stbuf)
468 {
469 	struct threadrw_write_task *task = stbuf->write_thread;
470 
471 	if (task)
472 		return task->buffered_data_size;
473 	return 0;
474 }
475 
threadrw_buffer_size(struct stream_buf_s * stbuf)476 int threadrw_buffer_size(struct stream_buf_s *stbuf)
477 {
478 	struct threadrw_write_task *task = stbuf->write_thread;
479 
480 	if (task)
481 		return task->buffer_size;
482 	return 0;
483 }
484 
threadrw_datafifo_len(struct stream_buf_s * stbuf)485 int threadrw_datafifo_len(struct stream_buf_s *stbuf)
486 {
487 	struct threadrw_write_task *task = stbuf->write_thread;
488 
489 	if (task)
490 		return kfifo_len(&task->datafifo);
491 	return 0;
492 }
493 
threadrw_freefifo_len(struct stream_buf_s * stbuf)494 int threadrw_freefifo_len(struct stream_buf_s *stbuf)
495 {
496 	struct threadrw_write_task *task = stbuf->write_thread;
497 
498 	if (task)
499 		return kfifo_len(&task->freefifo);
500 	return 0;
501 }
threadrw_support_more_buffers(struct stream_buf_s * stbuf)502 int threadrw_support_more_buffers(struct stream_buf_s *stbuf)
503 {
504 	struct threadrw_write_task *task = stbuf->write_thread;
505 
506 	if (!task)
507 		return 0;
508 	if (task->failed_onmore)
509 		return 0;
510 	return task->max_bufs - task->bufs_num;
511 }
512 
513 /*
514  *data len out fifo;
515  */
threadrw_passed_len(struct stream_buf_s * stbuf)516 int threadrw_passed_len(struct stream_buf_s *stbuf)
517 {
518 	struct threadrw_write_task *task = stbuf->write_thread;
519 
520 	if (task)
521 		return task->passed_data_len;
522 	return 0;
523 
524 }
525 /*
526  *all data writed.;
527  */
threadrw_dataoffset(struct stream_buf_s * stbuf)528 int threadrw_dataoffset(struct stream_buf_s *stbuf)
529 {
530 	struct threadrw_write_task *task = stbuf->write_thread;
531 	int offset = 0;
532 
533 	if (task)
534 		return task->data_offset;
535 	return offset;
536 
537 }
538 
threadrw_write(struct file * file,struct stream_buf_s * stbuf,const char __user * buf,size_t count)539 ssize_t threadrw_write(struct file *file, struct stream_buf_s *stbuf,
540 					   const char __user *buf, size_t count)
541 {
542 	struct threadrw_write_task *task = stbuf->write_thread;
543 	ssize_t size;
544 
545 	if (!task->file) {
546 		task->file = file;
547 		task->sbuf = stbuf;
548 	}
549 	mutex_lock(&task->mutex);
550 	size = threadrw_write_in(task, stbuf, buf, count);
551 	mutex_unlock(&task->mutex);
552 	return size;
553 }
554 
threadrw_flush_buffers(struct stream_buf_s * stbuf)555 int threadrw_flush_buffers(struct stream_buf_s *stbuf)
556 {
557 	struct threadrw_write_task *task = stbuf->write_thread;
558 	int max_retry = 20;
559 
560 	if (!task)
561 		return 0;
562 	while (!kfifo_is_empty(&task->datafifo) && max_retry-- > 0) {
563 		threadrw_schedule_delayed_work(task, 0);
564 		msleep(20);
565 	}
566 	if (!kfifo_is_empty(&task->datafifo))
567 		return -1;/*data not flushed*/
568 	return 0;
569 }
threadrw_alloc_more_buffer_size(struct stream_buf_s * stbuf,int size)570 int threadrw_alloc_more_buffer_size(
571 	struct stream_buf_s *stbuf,
572 	int size)
573 {
574 	struct threadrw_write_task *task = stbuf->write_thread;
575 	int block_size;
576 	int new_num;
577 	int ret = -1;
578 	int old_num;
579 
580 	if (!task)
581 		return -1;
582 	mutex_lock(&task->mutex);
583 	block_size = task->def_block_size;
584 	if (block_size == 0)
585 		block_size = 32 * 1024;
586 	new_num = size / block_size;
587 	old_num = task->bufs_num;
588 	if (new_num == 0)
589 		new_num = 1;
590 	else if (new_num > task->max_bufs - task->bufs_num)
591 		new_num = task->max_bufs - task->bufs_num;
592 	if (new_num != 0)
593 		ret = alloc_task_buffers_inlock(task, new_num,
594 			block_size);
595 	mutex_unlock(&task->mutex);
596 	pr_info("threadrw add more buffer from %d -> %d for size %d\n",
597 		old_num, task->bufs_num,
598 		size);
599 	if (ret < 0 || old_num == task->bufs_num)
600 		task->failed_onmore = 1;
601 	return ret;
602 }
603 
threadrw_alloc(int num,int block_size,ssize_t (* write)(struct file *,struct stream_buf_s *,const char __user *,size_t,int),int flags)604 void *threadrw_alloc(int num,
605 		int block_size,
606 			ssize_t (*write)(struct file *,
607 				struct stream_buf_s *,
608 				const char __user *,
609 				size_t, int),
610 				int flags)
611 {
612 	return threadrw_alloc_in(num, block_size, write, flags);
613 }
614 
threadrw_release(struct stream_buf_s * stbuf)615 void threadrw_release(struct stream_buf_s *stbuf)
616 {
617 	struct threadrw_write_task *task = stbuf->write_thread;
618 
619 	if (task) {
620 		wake_up_interruptible(&task->wq);
621 		cancel_delayed_work_sync(&task->write_work);
622 		mutex_lock(&task->mutex);
623 		free_task_buffers(task);
624 		mutex_unlock(&task->mutex);
625 		kfifo_free(&task->freefifo);
626 		kfifo_free(&task->datafifo);
627 		vfree(task);
628 	}
629 	stbuf->write_thread = NULL;
630 }
631