1 /*
2 * comedi_buf.c
3 *
4 * COMEDI - Linux Control and Measurement Device Interface
5 * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
6 * Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19 #include <linux/vmalloc.h>
20 #include <linux/slab.h>
21
22 #include "comedidev.h"
23 #include "comedi_internal.h"
24
25 #ifdef PAGE_KERNEL_NOCACHE
26 #define COMEDI_PAGE_PROTECTION PAGE_KERNEL_NOCACHE
27 #else
28 #define COMEDI_PAGE_PROTECTION PAGE_KERNEL
29 #endif
30
comedi_buf_map_kref_release(struct kref * kref)31 static void comedi_buf_map_kref_release(struct kref *kref)
32 {
33 struct comedi_buf_map *bm =
34 container_of(kref, struct comedi_buf_map, refcount);
35 struct comedi_buf_page *buf;
36 unsigned int i;
37
38 if (bm->page_list) {
39 for (i = 0; i < bm->n_pages; i++) {
40 buf = &bm->page_list[i];
41 clear_bit(PG_reserved,
42 &(virt_to_page(buf->virt_addr)->flags));
43 if (bm->dma_dir != DMA_NONE) {
44 #ifdef CONFIG_HAS_DMA
45 dma_free_coherent(bm->dma_hw_dev,
46 PAGE_SIZE,
47 buf->virt_addr,
48 buf->dma_addr);
49 #endif
50 } else {
51 free_page((unsigned long)buf->virt_addr);
52 }
53 }
54 vfree(bm->page_list);
55 }
56 if (bm->dma_dir != DMA_NONE)
57 put_device(bm->dma_hw_dev);
58 kfree(bm);
59 }
60
__comedi_buf_free(struct comedi_device * dev,struct comedi_subdevice * s)61 static void __comedi_buf_free(struct comedi_device *dev,
62 struct comedi_subdevice *s)
63 {
64 struct comedi_async *async = s->async;
65 struct comedi_buf_map *bm;
66 unsigned long flags;
67
68 if (async->prealloc_buf) {
69 vunmap(async->prealloc_buf);
70 async->prealloc_buf = NULL;
71 async->prealloc_bufsz = 0;
72 }
73
74 spin_lock_irqsave(&s->spin_lock, flags);
75 bm = async->buf_map;
76 async->buf_map = NULL;
77 spin_unlock_irqrestore(&s->spin_lock, flags);
78 comedi_buf_map_put(bm);
79 }
80
__comedi_buf_alloc(struct comedi_device * dev,struct comedi_subdevice * s,unsigned n_pages)81 static void __comedi_buf_alloc(struct comedi_device *dev,
82 struct comedi_subdevice *s,
83 unsigned n_pages)
84 {
85 struct comedi_async *async = s->async;
86 struct page **pages = NULL;
87 struct comedi_buf_map *bm;
88 struct comedi_buf_page *buf;
89 unsigned long flags;
90 unsigned i;
91
92 if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
93 dev_err(dev->class_dev,
94 "dma buffer allocation not supported\n");
95 return;
96 }
97
98 bm = kzalloc(sizeof(*async->buf_map), GFP_KERNEL);
99 if (!bm)
100 return;
101
102 kref_init(&bm->refcount);
103 spin_lock_irqsave(&s->spin_lock, flags);
104 async->buf_map = bm;
105 spin_unlock_irqrestore(&s->spin_lock, flags);
106 bm->dma_dir = s->async_dma_dir;
107 if (bm->dma_dir != DMA_NONE)
108 /* Need ref to hardware device to free buffer later. */
109 bm->dma_hw_dev = get_device(dev->hw_dev);
110
111 bm->page_list = vzalloc(sizeof(*buf) * n_pages);
112 if (bm->page_list)
113 pages = vmalloc(sizeof(struct page *) * n_pages);
114
115 if (!pages)
116 return;
117
118 for (i = 0; i < n_pages; i++) {
119 buf = &bm->page_list[i];
120 if (bm->dma_dir != DMA_NONE)
121 #ifdef CONFIG_HAS_DMA
122 buf->virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
123 PAGE_SIZE,
124 &buf->dma_addr,
125 GFP_KERNEL |
126 __GFP_COMP);
127 #else
128 break;
129 #endif
130 else
131 buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
132 if (!buf->virt_addr)
133 break;
134
135 set_bit(PG_reserved, &(virt_to_page(buf->virt_addr)->flags));
136
137 pages[i] = virt_to_page(buf->virt_addr);
138 }
139 spin_lock_irqsave(&s->spin_lock, flags);
140 bm->n_pages = i;
141 spin_unlock_irqrestore(&s->spin_lock, flags);
142
143 /* vmap the prealloc_buf if all the pages were allocated */
144 if (i == n_pages)
145 async->prealloc_buf = vmap(pages, n_pages, VM_MAP,
146 COMEDI_PAGE_PROTECTION);
147
148 vfree(pages);
149 }
150
comedi_buf_map_get(struct comedi_buf_map * bm)151 void comedi_buf_map_get(struct comedi_buf_map *bm)
152 {
153 if (bm)
154 kref_get(&bm->refcount);
155 }
156
comedi_buf_map_put(struct comedi_buf_map * bm)157 int comedi_buf_map_put(struct comedi_buf_map *bm)
158 {
159 if (bm)
160 return kref_put(&bm->refcount, comedi_buf_map_kref_release);
161 return 1;
162 }
163
164 /* returns s->async->buf_map and increments its kref refcount */
165 struct comedi_buf_map *
comedi_buf_map_from_subdev_get(struct comedi_subdevice * s)166 comedi_buf_map_from_subdev_get(struct comedi_subdevice *s)
167 {
168 struct comedi_async *async = s->async;
169 struct comedi_buf_map *bm = NULL;
170 unsigned long flags;
171
172 if (!async)
173 return NULL;
174
175 spin_lock_irqsave(&s->spin_lock, flags);
176 bm = async->buf_map;
177 /* only want it if buffer pages allocated */
178 if (bm && bm->n_pages)
179 comedi_buf_map_get(bm);
180 else
181 bm = NULL;
182 spin_unlock_irqrestore(&s->spin_lock, flags);
183
184 return bm;
185 }
186
comedi_buf_is_mmapped(struct comedi_subdevice * s)187 bool comedi_buf_is_mmapped(struct comedi_subdevice *s)
188 {
189 struct comedi_buf_map *bm = s->async->buf_map;
190
191 return bm && (atomic_read(&bm->refcount.refcount) > 1);
192 }
193
comedi_buf_alloc(struct comedi_device * dev,struct comedi_subdevice * s,unsigned long new_size)194 int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
195 unsigned long new_size)
196 {
197 struct comedi_async *async = s->async;
198
199 /* Round up new_size to multiple of PAGE_SIZE */
200 new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
201
202 /* if no change is required, do nothing */
203 if (async->prealloc_buf && async->prealloc_bufsz == new_size)
204 return 0;
205
206 /* deallocate old buffer */
207 __comedi_buf_free(dev, s);
208
209 /* allocate new buffer */
210 if (new_size) {
211 unsigned n_pages = new_size >> PAGE_SHIFT;
212
213 __comedi_buf_alloc(dev, s, n_pages);
214
215 if (!async->prealloc_buf) {
216 /* allocation failed */
217 __comedi_buf_free(dev, s);
218 return -ENOMEM;
219 }
220 }
221 async->prealloc_bufsz = new_size;
222
223 return 0;
224 }
225
comedi_buf_reset(struct comedi_subdevice * s)226 void comedi_buf_reset(struct comedi_subdevice *s)
227 {
228 struct comedi_async *async = s->async;
229
230 async->buf_write_alloc_count = 0;
231 async->buf_write_count = 0;
232 async->buf_read_alloc_count = 0;
233 async->buf_read_count = 0;
234
235 async->buf_write_ptr = 0;
236 async->buf_read_ptr = 0;
237
238 async->cur_chan = 0;
239 async->scan_progress = 0;
240 async->munge_chan = 0;
241 async->munge_count = 0;
242 async->munge_ptr = 0;
243
244 async->events = 0;
245 }
246
comedi_buf_write_n_available(struct comedi_subdevice * s)247 static unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s)
248 {
249 struct comedi_async *async = s->async;
250 unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
251
252 return free_end - async->buf_write_alloc_count;
253 }
254
__comedi_buf_write_alloc(struct comedi_subdevice * s,unsigned int nbytes,int strict)255 static unsigned int __comedi_buf_write_alloc(struct comedi_subdevice *s,
256 unsigned int nbytes,
257 int strict)
258 {
259 struct comedi_async *async = s->async;
260 unsigned int available = comedi_buf_write_n_available(s);
261
262 if (nbytes > available)
263 nbytes = strict ? 0 : available;
264
265 async->buf_write_alloc_count += nbytes;
266
267 /*
268 * ensure the async buffer 'counts' are read and updated
269 * before we write data to the write-alloc'ed buffer space
270 */
271 smp_mb();
272
273 return nbytes;
274 }
275
276 /* allocates chunk for the writer from free buffer space */
comedi_buf_write_alloc(struct comedi_subdevice * s,unsigned int nbytes)277 unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
278 unsigned int nbytes)
279 {
280 return __comedi_buf_write_alloc(s, nbytes, 0);
281 }
282 EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
283
284 /*
285 * munging is applied to data by core as it passes between user
286 * and kernel space
287 */
comedi_buf_munge(struct comedi_subdevice * s,unsigned int num_bytes)288 static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
289 unsigned int num_bytes)
290 {
291 struct comedi_async *async = s->async;
292 unsigned int count = 0;
293 const unsigned num_sample_bytes = bytes_per_sample(s);
294
295 if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
296 async->munge_count += num_bytes;
297 count = num_bytes;
298 } else {
299 /* don't munge partial samples */
300 num_bytes -= num_bytes % num_sample_bytes;
301 while (count < num_bytes) {
302 int block_size = num_bytes - count;
303 unsigned int buf_end;
304
305 buf_end = async->prealloc_bufsz - async->munge_ptr;
306 if (block_size > buf_end)
307 block_size = buf_end;
308
309 s->munge(s->device, s,
310 async->prealloc_buf + async->munge_ptr,
311 block_size, async->munge_chan);
312
313 /*
314 * ensure data is munged in buffer before the
315 * async buffer munge_count is incremented
316 */
317 smp_wmb();
318
319 async->munge_chan += block_size / num_sample_bytes;
320 async->munge_chan %= async->cmd.chanlist_len;
321 async->munge_count += block_size;
322 async->munge_ptr += block_size;
323 async->munge_ptr %= async->prealloc_bufsz;
324 count += block_size;
325 }
326 }
327
328 return count;
329 }
330
comedi_buf_write_n_allocated(struct comedi_subdevice * s)331 unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s)
332 {
333 struct comedi_async *async = s->async;
334
335 return async->buf_write_alloc_count - async->buf_write_count;
336 }
337
338 /* transfers a chunk from writer to filled buffer space */
comedi_buf_write_free(struct comedi_subdevice * s,unsigned int nbytes)339 unsigned int comedi_buf_write_free(struct comedi_subdevice *s,
340 unsigned int nbytes)
341 {
342 struct comedi_async *async = s->async;
343 unsigned int allocated = comedi_buf_write_n_allocated(s);
344
345 if (nbytes > allocated)
346 nbytes = allocated;
347
348 async->buf_write_count += nbytes;
349 async->buf_write_ptr += nbytes;
350 comedi_buf_munge(s, async->buf_write_count - async->munge_count);
351 if (async->buf_write_ptr >= async->prealloc_bufsz)
352 async->buf_write_ptr %= async->prealloc_bufsz;
353
354 return nbytes;
355 }
356 EXPORT_SYMBOL_GPL(comedi_buf_write_free);
357
comedi_buf_read_n_available(struct comedi_subdevice * s)358 unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
359 {
360 struct comedi_async *async = s->async;
361 unsigned num_bytes;
362
363 if (!async)
364 return 0;
365
366 num_bytes = async->munge_count - async->buf_read_count;
367
368 /*
369 * ensure the async buffer 'counts' are read before we
370 * attempt to read data from the buffer
371 */
372 smp_rmb();
373
374 return num_bytes;
375 }
376 EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
377
378 /* allocates a chunk for the reader from filled (and munged) buffer space */
comedi_buf_read_alloc(struct comedi_subdevice * s,unsigned int nbytes)379 unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s,
380 unsigned int nbytes)
381 {
382 struct comedi_async *async = s->async;
383 unsigned int available;
384
385 available = async->munge_count - async->buf_read_alloc_count;
386 if (nbytes > available)
387 nbytes = available;
388
389 async->buf_read_alloc_count += nbytes;
390
391 /*
392 * ensure the async buffer 'counts' are read before we
393 * attempt to read data from the read-alloc'ed buffer space
394 */
395 smp_rmb();
396
397 return nbytes;
398 }
399 EXPORT_SYMBOL_GPL(comedi_buf_read_alloc);
400
comedi_buf_read_n_allocated(struct comedi_async * async)401 static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async)
402 {
403 return async->buf_read_alloc_count - async->buf_read_count;
404 }
405
406 /* transfers control of a chunk from reader to free buffer space */
comedi_buf_read_free(struct comedi_subdevice * s,unsigned int nbytes)407 unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
408 unsigned int nbytes)
409 {
410 struct comedi_async *async = s->async;
411 unsigned int allocated;
412
413 /*
414 * ensure data has been read out of buffer before
415 * the async read count is incremented
416 */
417 smp_mb();
418
419 allocated = comedi_buf_read_n_allocated(async);
420 if (nbytes > allocated)
421 nbytes = allocated;
422
423 async->buf_read_count += nbytes;
424 async->buf_read_ptr += nbytes;
425 async->buf_read_ptr %= async->prealloc_bufsz;
426 return nbytes;
427 }
428 EXPORT_SYMBOL_GPL(comedi_buf_read_free);
429
comedi_buf_put(struct comedi_subdevice * s,unsigned short x)430 int comedi_buf_put(struct comedi_subdevice *s, unsigned short x)
431 {
432 struct comedi_async *async = s->async;
433 unsigned int n = __comedi_buf_write_alloc(s, sizeof(short), 1);
434
435 if (n < sizeof(short)) {
436 async->events |= COMEDI_CB_ERROR;
437 return 0;
438 }
439 *(unsigned short *)(async->prealloc_buf + async->buf_write_ptr) = x;
440 comedi_buf_write_free(s, sizeof(short));
441 return 1;
442 }
443 EXPORT_SYMBOL_GPL(comedi_buf_put);
444
comedi_buf_get(struct comedi_subdevice * s,unsigned short * x)445 int comedi_buf_get(struct comedi_subdevice *s, unsigned short *x)
446 {
447 struct comedi_async *async = s->async;
448 unsigned int n = comedi_buf_read_n_available(s);
449
450 if (n < sizeof(short))
451 return 0;
452 comedi_buf_read_alloc(s, sizeof(short));
453 *x = *(unsigned short *)(async->prealloc_buf + async->buf_read_ptr);
454 comedi_buf_read_free(s, sizeof(short));
455 return 1;
456 }
457 EXPORT_SYMBOL_GPL(comedi_buf_get);
458
comedi_buf_memcpy_to(struct comedi_subdevice * s,unsigned int offset,const void * data,unsigned int num_bytes)459 void comedi_buf_memcpy_to(struct comedi_subdevice *s, unsigned int offset,
460 const void *data, unsigned int num_bytes)
461 {
462 struct comedi_async *async = s->async;
463 unsigned int write_ptr = async->buf_write_ptr + offset;
464
465 if (write_ptr >= async->prealloc_bufsz)
466 write_ptr %= async->prealloc_bufsz;
467
468 while (num_bytes) {
469 unsigned int block_size;
470
471 if (write_ptr + num_bytes > async->prealloc_bufsz)
472 block_size = async->prealloc_bufsz - write_ptr;
473 else
474 block_size = num_bytes;
475
476 memcpy(async->prealloc_buf + write_ptr, data, block_size);
477
478 data += block_size;
479 num_bytes -= block_size;
480
481 write_ptr = 0;
482 }
483 }
484 EXPORT_SYMBOL_GPL(comedi_buf_memcpy_to);
485
comedi_buf_memcpy_from(struct comedi_subdevice * s,unsigned int offset,void * dest,unsigned int nbytes)486 void comedi_buf_memcpy_from(struct comedi_subdevice *s, unsigned int offset,
487 void *dest, unsigned int nbytes)
488 {
489 void *src;
490 struct comedi_async *async = s->async;
491 unsigned int read_ptr = async->buf_read_ptr + offset;
492
493 if (read_ptr >= async->prealloc_bufsz)
494 read_ptr %= async->prealloc_bufsz;
495
496 while (nbytes) {
497 unsigned int block_size;
498
499 src = async->prealloc_buf + read_ptr;
500
501 if (nbytes >= async->prealloc_bufsz - read_ptr)
502 block_size = async->prealloc_bufsz - read_ptr;
503 else
504 block_size = nbytes;
505
506 memcpy(dest, src, block_size);
507 nbytes -= block_size;
508 dest += block_size;
509 read_ptr = 0;
510 }
511 }
512 EXPORT_SYMBOL_GPL(comedi_buf_memcpy_from);
513
514 /**
515 * comedi_write_array_to_buffer - write data to comedi buffer
516 * @s: comedi_subdevice struct
517 * @data: destination
518 * @num_bytes: number of bytes to write
519 *
520 * Writes up to num_bytes bytes of data to the comedi buffer associated with
521 * the subdevice, marks it as written and updates the acquisition scan
522 * progress.
523 *
524 * Returns the amount of data written in bytes.
525 */
comedi_write_array_to_buffer(struct comedi_subdevice * s,const void * data,unsigned int num_bytes)526 unsigned int comedi_write_array_to_buffer(struct comedi_subdevice *s,
527 const void *data,
528 unsigned int num_bytes)
529 {
530 struct comedi_async *async = s->async;
531 unsigned int retval;
532
533 if (num_bytes == 0)
534 return 0;
535
536 retval = comedi_buf_write_alloc(s, num_bytes);
537 if (retval != num_bytes) {
538 dev_warn(s->device->class_dev, "buffer overrun\n");
539 async->events |= COMEDI_CB_OVERFLOW;
540 return 0;
541 }
542
543 comedi_buf_memcpy_to(s, 0, data, num_bytes);
544 comedi_buf_write_free(s, num_bytes);
545 comedi_inc_scan_progress(s, num_bytes);
546 async->events |= COMEDI_CB_BLOCK;
547
548 return num_bytes;
549 }
550 EXPORT_SYMBOL_GPL(comedi_write_array_to_buffer);
551
552 /**
553 * comedi_read_array_from_buffer - read data from comedi buffer
554 * @s: comedi_subdevice struct
555 * @data: destination
556 * @num_bytes: number of bytes to read
557 *
558 * Reads up to num_bytes bytes of data from the comedi buffer associated with
559 * the subdevice, marks it as read and updates the acquisition scan progress.
560 *
561 * Returns the amount of data read in bytes.
562 */
comedi_read_array_from_buffer(struct comedi_subdevice * s,void * data,unsigned int num_bytes)563 unsigned int comedi_read_array_from_buffer(struct comedi_subdevice *s,
564 void *data, unsigned int num_bytes)
565 {
566 if (num_bytes == 0)
567 return 0;
568
569 num_bytes = comedi_buf_read_alloc(s, num_bytes);
570 comedi_buf_memcpy_from(s, 0, data, num_bytes);
571 comedi_buf_read_free(s, num_bytes);
572 comedi_inc_scan_progress(s, num_bytes);
573 s->async->events |= COMEDI_CB_BLOCK;
574
575 return num_bytes;
576 }
577 EXPORT_SYMBOL_GPL(comedi_read_array_from_buffer);
578