• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * comedi_buf.c
3  *
4  * COMEDI - Linux Control and Measurement Device Interface
5  * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
6  * Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  */
18 
19 #include <linux/vmalloc.h>
20 #include <linux/slab.h>
21 
22 #include "comedidev.h"
23 #include "comedi_internal.h"
24 
25 #ifdef PAGE_KERNEL_NOCACHE
26 #define COMEDI_PAGE_PROTECTION		PAGE_KERNEL_NOCACHE
27 #else
28 #define COMEDI_PAGE_PROTECTION		PAGE_KERNEL
29 #endif
30 
comedi_buf_map_kref_release(struct kref * kref)31 static void comedi_buf_map_kref_release(struct kref *kref)
32 {
33 	struct comedi_buf_map *bm =
34 		container_of(kref, struct comedi_buf_map, refcount);
35 	struct comedi_buf_page *buf;
36 	unsigned int i;
37 
38 	if (bm->page_list) {
39 		for (i = 0; i < bm->n_pages; i++) {
40 			buf = &bm->page_list[i];
41 			clear_bit(PG_reserved,
42 				  &(virt_to_page(buf->virt_addr)->flags));
43 			if (bm->dma_dir != DMA_NONE) {
44 #ifdef CONFIG_HAS_DMA
45 				dma_free_coherent(bm->dma_hw_dev,
46 						  PAGE_SIZE,
47 						  buf->virt_addr,
48 						  buf->dma_addr);
49 #endif
50 			} else {
51 				free_page((unsigned long)buf->virt_addr);
52 			}
53 		}
54 		vfree(bm->page_list);
55 	}
56 	if (bm->dma_dir != DMA_NONE)
57 		put_device(bm->dma_hw_dev);
58 	kfree(bm);
59 }
60 
__comedi_buf_free(struct comedi_device * dev,struct comedi_subdevice * s)61 static void __comedi_buf_free(struct comedi_device *dev,
62 			      struct comedi_subdevice *s)
63 {
64 	struct comedi_async *async = s->async;
65 	struct comedi_buf_map *bm;
66 	unsigned long flags;
67 
68 	if (async->prealloc_buf) {
69 		vunmap(async->prealloc_buf);
70 		async->prealloc_buf = NULL;
71 		async->prealloc_bufsz = 0;
72 	}
73 
74 	spin_lock_irqsave(&s->spin_lock, flags);
75 	bm = async->buf_map;
76 	async->buf_map = NULL;
77 	spin_unlock_irqrestore(&s->spin_lock, flags);
78 	comedi_buf_map_put(bm);
79 }
80 
__comedi_buf_alloc(struct comedi_device * dev,struct comedi_subdevice * s,unsigned int n_pages)81 static void __comedi_buf_alloc(struct comedi_device *dev,
82 			       struct comedi_subdevice *s,
83 			       unsigned int n_pages)
84 {
85 	struct comedi_async *async = s->async;
86 	struct page **pages = NULL;
87 	struct comedi_buf_map *bm;
88 	struct comedi_buf_page *buf;
89 	unsigned long flags;
90 	unsigned int i;
91 
92 	if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
93 		dev_err(dev->class_dev,
94 			"dma buffer allocation not supported\n");
95 		return;
96 	}
97 
98 	bm = kzalloc(sizeof(*async->buf_map), GFP_KERNEL);
99 	if (!bm)
100 		return;
101 
102 	kref_init(&bm->refcount);
103 	spin_lock_irqsave(&s->spin_lock, flags);
104 	async->buf_map = bm;
105 	spin_unlock_irqrestore(&s->spin_lock, flags);
106 	bm->dma_dir = s->async_dma_dir;
107 	if (bm->dma_dir != DMA_NONE)
108 		/* Need ref to hardware device to free buffer later. */
109 		bm->dma_hw_dev = get_device(dev->hw_dev);
110 
111 	bm->page_list = vzalloc(sizeof(*buf) * n_pages);
112 	if (bm->page_list)
113 		pages = vmalloc(sizeof(struct page *) * n_pages);
114 
115 	if (!pages)
116 		return;
117 
118 	for (i = 0; i < n_pages; i++) {
119 		buf = &bm->page_list[i];
120 		if (bm->dma_dir != DMA_NONE)
121 #ifdef CONFIG_HAS_DMA
122 			buf->virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
123 							    PAGE_SIZE,
124 							    &buf->dma_addr,
125 							    GFP_KERNEL |
126 							    __GFP_COMP);
127 #else
128 			break;
129 #endif
130 		else
131 			buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
132 		if (!buf->virt_addr)
133 			break;
134 
135 		set_bit(PG_reserved, &(virt_to_page(buf->virt_addr)->flags));
136 
137 		pages[i] = virt_to_page(buf->virt_addr);
138 	}
139 	spin_lock_irqsave(&s->spin_lock, flags);
140 	bm->n_pages = i;
141 	spin_unlock_irqrestore(&s->spin_lock, flags);
142 
143 	/* vmap the prealloc_buf if all the pages were allocated */
144 	if (i == n_pages)
145 		async->prealloc_buf = vmap(pages, n_pages, VM_MAP,
146 					   COMEDI_PAGE_PROTECTION);
147 
148 	vfree(pages);
149 }
150 
comedi_buf_map_get(struct comedi_buf_map * bm)151 void comedi_buf_map_get(struct comedi_buf_map *bm)
152 {
153 	if (bm)
154 		kref_get(&bm->refcount);
155 }
156 
comedi_buf_map_put(struct comedi_buf_map * bm)157 int comedi_buf_map_put(struct comedi_buf_map *bm)
158 {
159 	if (bm)
160 		return kref_put(&bm->refcount, comedi_buf_map_kref_release);
161 	return 1;
162 }
163 
164 /* helper for "access" vm operation */
comedi_buf_map_access(struct comedi_buf_map * bm,unsigned long offset,void * buf,int len,int write)165 int comedi_buf_map_access(struct comedi_buf_map *bm, unsigned long offset,
166 			  void *buf, int len, int write)
167 {
168 	unsigned int pgoff = offset_in_page(offset);
169 	unsigned long pg = offset >> PAGE_SHIFT;
170 	int done = 0;
171 
172 	while (done < len && pg < bm->n_pages) {
173 		int l = min_t(int, len - done, PAGE_SIZE - pgoff);
174 		void *b = bm->page_list[pg].virt_addr + pgoff;
175 
176 		if (write)
177 			memcpy(b, buf, l);
178 		else
179 			memcpy(buf, b, l);
180 		buf += l;
181 		done += l;
182 		pg++;
183 		pgoff = 0;
184 	}
185 	return done;
186 }
187 
188 /* returns s->async->buf_map and increments its kref refcount */
189 struct comedi_buf_map *
comedi_buf_map_from_subdev_get(struct comedi_subdevice * s)190 comedi_buf_map_from_subdev_get(struct comedi_subdevice *s)
191 {
192 	struct comedi_async *async = s->async;
193 	struct comedi_buf_map *bm = NULL;
194 	unsigned long flags;
195 
196 	if (!async)
197 		return NULL;
198 
199 	spin_lock_irqsave(&s->spin_lock, flags);
200 	bm = async->buf_map;
201 	/* only want it if buffer pages allocated */
202 	if (bm && bm->n_pages)
203 		comedi_buf_map_get(bm);
204 	else
205 		bm = NULL;
206 	spin_unlock_irqrestore(&s->spin_lock, flags);
207 
208 	return bm;
209 }
210 
comedi_buf_is_mmapped(struct comedi_subdevice * s)211 bool comedi_buf_is_mmapped(struct comedi_subdevice *s)
212 {
213 	struct comedi_buf_map *bm = s->async->buf_map;
214 
215 	return bm && (kref_read(&bm->refcount) > 1);
216 }
217 
comedi_buf_alloc(struct comedi_device * dev,struct comedi_subdevice * s,unsigned long new_size)218 int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
219 		     unsigned long new_size)
220 {
221 	struct comedi_async *async = s->async;
222 
223 	/* Round up new_size to multiple of PAGE_SIZE */
224 	new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
225 
226 	/* if no change is required, do nothing */
227 	if (async->prealloc_buf && async->prealloc_bufsz == new_size)
228 		return 0;
229 
230 	/* deallocate old buffer */
231 	__comedi_buf_free(dev, s);
232 
233 	/* allocate new buffer */
234 	if (new_size) {
235 		unsigned int n_pages = new_size >> PAGE_SHIFT;
236 
237 		__comedi_buf_alloc(dev, s, n_pages);
238 
239 		if (!async->prealloc_buf) {
240 			/* allocation failed */
241 			__comedi_buf_free(dev, s);
242 			return -ENOMEM;
243 		}
244 	}
245 	async->prealloc_bufsz = new_size;
246 
247 	return 0;
248 }
249 
comedi_buf_reset(struct comedi_subdevice * s)250 void comedi_buf_reset(struct comedi_subdevice *s)
251 {
252 	struct comedi_async *async = s->async;
253 
254 	async->buf_write_alloc_count = 0;
255 	async->buf_write_count = 0;
256 	async->buf_read_alloc_count = 0;
257 	async->buf_read_count = 0;
258 
259 	async->buf_write_ptr = 0;
260 	async->buf_read_ptr = 0;
261 
262 	async->cur_chan = 0;
263 	async->scans_done = 0;
264 	async->scan_progress = 0;
265 	async->munge_chan = 0;
266 	async->munge_count = 0;
267 	async->munge_ptr = 0;
268 
269 	async->events = 0;
270 }
271 
comedi_buf_write_n_unalloc(struct comedi_subdevice * s)272 static unsigned int comedi_buf_write_n_unalloc(struct comedi_subdevice *s)
273 {
274 	struct comedi_async *async = s->async;
275 	unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
276 
277 	return free_end - async->buf_write_alloc_count;
278 }
279 
comedi_buf_write_n_available(struct comedi_subdevice * s)280 unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s)
281 {
282 	struct comedi_async *async = s->async;
283 	unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
284 
285 	return free_end - async->buf_write_count;
286 }
287 
288 /**
289  * comedi_buf_write_alloc() - Reserve buffer space for writing
290  * @s: COMEDI subdevice.
291  * @nbytes: Maximum space to reserve in bytes.
292  *
293  * Reserve up to @nbytes bytes of space to be written in the COMEDI acquisition
294  * data buffer associated with the subdevice.  The amount reserved is limited
295  * by the space available.
296  *
297  * Return: The amount of space reserved in bytes.
298  */
comedi_buf_write_alloc(struct comedi_subdevice * s,unsigned int nbytes)299 unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
300 				    unsigned int nbytes)
301 {
302 	struct comedi_async *async = s->async;
303 	unsigned int unalloc = comedi_buf_write_n_unalloc(s);
304 
305 	if (nbytes > unalloc)
306 		nbytes = unalloc;
307 
308 	async->buf_write_alloc_count += nbytes;
309 
310 	/*
311 	 * ensure the async buffer 'counts' are read and updated
312 	 * before we write data to the write-alloc'ed buffer space
313 	 */
314 	smp_mb();
315 
316 	return nbytes;
317 }
318 EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
319 
320 /*
321  * munging is applied to data by core as it passes between user
322  * and kernel space
323  */
comedi_buf_munge(struct comedi_subdevice * s,unsigned int num_bytes)324 static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
325 				     unsigned int num_bytes)
326 {
327 	struct comedi_async *async = s->async;
328 	unsigned int count = 0;
329 	const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
330 
331 	if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
332 		async->munge_count += num_bytes;
333 		count = num_bytes;
334 	} else {
335 		/* don't munge partial samples */
336 		num_bytes -= num_bytes % num_sample_bytes;
337 		while (count < num_bytes) {
338 			int block_size = num_bytes - count;
339 			unsigned int buf_end;
340 
341 			buf_end = async->prealloc_bufsz - async->munge_ptr;
342 			if (block_size > buf_end)
343 				block_size = buf_end;
344 
345 			s->munge(s->device, s,
346 				 async->prealloc_buf + async->munge_ptr,
347 				 block_size, async->munge_chan);
348 
349 			/*
350 			 * ensure data is munged in buffer before the
351 			 * async buffer munge_count is incremented
352 			 */
353 			smp_wmb();
354 
355 			async->munge_chan += block_size / num_sample_bytes;
356 			async->munge_chan %= async->cmd.chanlist_len;
357 			async->munge_count += block_size;
358 			async->munge_ptr += block_size;
359 			async->munge_ptr %= async->prealloc_bufsz;
360 			count += block_size;
361 		}
362 	}
363 
364 	return count;
365 }
366 
comedi_buf_write_n_allocated(struct comedi_subdevice * s)367 unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s)
368 {
369 	struct comedi_async *async = s->async;
370 
371 	return async->buf_write_alloc_count - async->buf_write_count;
372 }
373 
374 /**
375  * comedi_buf_write_free() - Free buffer space after it is written
376  * @s: COMEDI subdevice.
377  * @nbytes: Maximum space to free in bytes.
378  *
379  * Free up to @nbytes bytes of space previously reserved for writing in the
380  * COMEDI acquisition data buffer associated with the subdevice.  The amount of
381  * space freed is limited to the amount that was reserved.  The freed space is
382  * assumed to have been filled with sample data by the writer.
383  *
384  * If the samples in the freed space need to be "munged", do so here.  The
385  * freed space becomes available for allocation by the reader.
386  *
387  * Return: The amount of space freed in bytes.
388  */
comedi_buf_write_free(struct comedi_subdevice * s,unsigned int nbytes)389 unsigned int comedi_buf_write_free(struct comedi_subdevice *s,
390 				   unsigned int nbytes)
391 {
392 	struct comedi_async *async = s->async;
393 	unsigned int allocated = comedi_buf_write_n_allocated(s);
394 
395 	if (nbytes > allocated)
396 		nbytes = allocated;
397 
398 	async->buf_write_count += nbytes;
399 	async->buf_write_ptr += nbytes;
400 	comedi_buf_munge(s, async->buf_write_count - async->munge_count);
401 	if (async->buf_write_ptr >= async->prealloc_bufsz)
402 		async->buf_write_ptr %= async->prealloc_bufsz;
403 
404 	return nbytes;
405 }
406 EXPORT_SYMBOL_GPL(comedi_buf_write_free);
407 
408 /**
409  * comedi_buf_read_n_available() - Determine amount of readable buffer space
410  * @s: COMEDI subdevice.
411  *
412  * Determine the amount of readable buffer space in the COMEDI acquisition data
413  * buffer associated with the subdevice.  The readable buffer space is that
414  * which has been freed by the writer and "munged" to the sample data format
415  * expected by COMEDI if necessary.
416  *
417  * Return: The amount of readable buffer space.
418  */
comedi_buf_read_n_available(struct comedi_subdevice * s)419 unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
420 {
421 	struct comedi_async *async = s->async;
422 	unsigned int num_bytes;
423 
424 	if (!async)
425 		return 0;
426 
427 	num_bytes = async->munge_count - async->buf_read_count;
428 
429 	/*
430 	 * ensure the async buffer 'counts' are read before we
431 	 * attempt to read data from the buffer
432 	 */
433 	smp_rmb();
434 
435 	return num_bytes;
436 }
437 EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
438 
439 /**
440  * comedi_buf_read_alloc() - Reserve buffer space for reading
441  * @s: COMEDI subdevice.
442  * @nbytes: Maximum space to reserve in bytes.
443  *
444  * Reserve up to @nbytes bytes of previously written and "munged" buffer space
445  * for reading in the COMEDI acquisition data buffer associated with the
446  * subdevice.  The amount reserved is limited to the space available.  The
447  * reader can read from the reserved space and then free it.  A reader is also
448  * allowed to read from the space before reserving it as long as it determines
449  * the amount of readable data available, but the space needs to be marked as
450  * reserved before it can be freed.
451  *
452  * Return: The amount of space reserved in bytes.
453  */
comedi_buf_read_alloc(struct comedi_subdevice * s,unsigned int nbytes)454 unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s,
455 				   unsigned int nbytes)
456 {
457 	struct comedi_async *async = s->async;
458 	unsigned int available;
459 
460 	available = async->munge_count - async->buf_read_alloc_count;
461 	if (nbytes > available)
462 		nbytes = available;
463 
464 	async->buf_read_alloc_count += nbytes;
465 
466 	/*
467 	 * ensure the async buffer 'counts' are read before we
468 	 * attempt to read data from the read-alloc'ed buffer space
469 	 */
470 	smp_rmb();
471 
472 	return nbytes;
473 }
474 EXPORT_SYMBOL_GPL(comedi_buf_read_alloc);
475 
comedi_buf_read_n_allocated(struct comedi_async * async)476 static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async)
477 {
478 	return async->buf_read_alloc_count - async->buf_read_count;
479 }
480 
481 /**
482  * comedi_buf_read_free() - Free buffer space after it has been read
483  * @s: COMEDI subdevice.
484  * @nbytes: Maximum space to free in bytes.
485  *
486  * Free up to @nbytes bytes of buffer space previously reserved for reading in
487  * the COMEDI acquisition data buffer associated with the subdevice.  The
488  * amount of space freed is limited to the amount that was reserved.
489  *
490  * The freed space becomes available for allocation by the writer.
491  *
492  * Return: The amount of space freed in bytes.
493  */
comedi_buf_read_free(struct comedi_subdevice * s,unsigned int nbytes)494 unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
495 				  unsigned int nbytes)
496 {
497 	struct comedi_async *async = s->async;
498 	unsigned int allocated;
499 
500 	/*
501 	 * ensure data has been read out of buffer before
502 	 * the async read count is incremented
503 	 */
504 	smp_mb();
505 
506 	allocated = comedi_buf_read_n_allocated(async);
507 	if (nbytes > allocated)
508 		nbytes = allocated;
509 
510 	async->buf_read_count += nbytes;
511 	async->buf_read_ptr += nbytes;
512 	async->buf_read_ptr %= async->prealloc_bufsz;
513 	return nbytes;
514 }
515 EXPORT_SYMBOL_GPL(comedi_buf_read_free);
516 
comedi_buf_memcpy_to(struct comedi_subdevice * s,const void * data,unsigned int num_bytes)517 static void comedi_buf_memcpy_to(struct comedi_subdevice *s,
518 				 const void *data, unsigned int num_bytes)
519 {
520 	struct comedi_async *async = s->async;
521 	unsigned int write_ptr = async->buf_write_ptr;
522 
523 	while (num_bytes) {
524 		unsigned int block_size;
525 
526 		if (write_ptr + num_bytes > async->prealloc_bufsz)
527 			block_size = async->prealloc_bufsz - write_ptr;
528 		else
529 			block_size = num_bytes;
530 
531 		memcpy(async->prealloc_buf + write_ptr, data, block_size);
532 
533 		data += block_size;
534 		num_bytes -= block_size;
535 
536 		write_ptr = 0;
537 	}
538 }
539 
comedi_buf_memcpy_from(struct comedi_subdevice * s,void * dest,unsigned int nbytes)540 static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
541 				   void *dest, unsigned int nbytes)
542 {
543 	void *src;
544 	struct comedi_async *async = s->async;
545 	unsigned int read_ptr = async->buf_read_ptr;
546 
547 	while (nbytes) {
548 		unsigned int block_size;
549 
550 		src = async->prealloc_buf + read_ptr;
551 
552 		if (nbytes >= async->prealloc_bufsz - read_ptr)
553 			block_size = async->prealloc_bufsz - read_ptr;
554 		else
555 			block_size = nbytes;
556 
557 		memcpy(dest, src, block_size);
558 		nbytes -= block_size;
559 		dest += block_size;
560 		read_ptr = 0;
561 	}
562 }
563 
564 /**
565  * comedi_buf_write_samples() - Write sample data to COMEDI buffer
566  * @s: COMEDI subdevice.
567  * @data: Pointer to source samples.
568  * @nsamples: Number of samples to write.
569  *
570  * Write up to @nsamples samples to the COMEDI acquisition data buffer
571  * associated with the subdevice, mark it as written and update the
572  * acquisition scan progress.  If there is not enough room for the specified
573  * number of samples, the number of samples written is limited to the number
574  * that will fit and the %COMEDI_CB_OVERFLOW event flag is set to cause the
575  * acquisition to terminate with an overrun error.  Set the %COMEDI_CB_BLOCK
576  * event flag if any samples are written to cause waiting tasks to be woken
577  * when the event flags are processed.
578  *
579  * Return: The amount of data written in bytes.
580  */
comedi_buf_write_samples(struct comedi_subdevice * s,const void * data,unsigned int nsamples)581 unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
582 				      const void *data, unsigned int nsamples)
583 {
584 	unsigned int max_samples;
585 	unsigned int nbytes;
586 
587 	/*
588 	 * Make sure there is enough room in the buffer for all the samples.
589 	 * If not, clamp the nsamples to the number that will fit, flag the
590 	 * buffer overrun and add the samples that fit.
591 	 */
592 	max_samples = comedi_bytes_to_samples(s, comedi_buf_write_n_unalloc(s));
593 	if (nsamples > max_samples) {
594 		dev_warn(s->device->class_dev, "buffer overrun\n");
595 		s->async->events |= COMEDI_CB_OVERFLOW;
596 		nsamples = max_samples;
597 	}
598 
599 	if (nsamples == 0)
600 		return 0;
601 
602 	nbytes = comedi_buf_write_alloc(s,
603 					comedi_samples_to_bytes(s, nsamples));
604 	comedi_buf_memcpy_to(s, data, nbytes);
605 	comedi_buf_write_free(s, nbytes);
606 	comedi_inc_scan_progress(s, nbytes);
607 	s->async->events |= COMEDI_CB_BLOCK;
608 
609 	return nbytes;
610 }
611 EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
612 
613 /**
614  * comedi_buf_read_samples() - Read sample data from COMEDI buffer
615  * @s: COMEDI subdevice.
616  * @data: Pointer to destination.
617  * @nsamples: Maximum number of samples to read.
618  *
619  * Read up to @nsamples samples from the COMEDI acquisition data buffer
620  * associated with the subdevice, mark it as read and update the acquisition
621  * scan progress.  Limit the number of samples read to the number available.
622  * Set the %COMEDI_CB_BLOCK event flag if any samples are read to cause waiting
623  * tasks to be woken when the event flags are processed.
624  *
625  * Return: The amount of data read in bytes.
626  */
comedi_buf_read_samples(struct comedi_subdevice * s,void * data,unsigned int nsamples)627 unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
628 				     void *data, unsigned int nsamples)
629 {
630 	unsigned int max_samples;
631 	unsigned int nbytes;
632 
633 	/* clamp nsamples to the number of full samples available */
634 	max_samples = comedi_bytes_to_samples(s,
635 					      comedi_buf_read_n_available(s));
636 	if (nsamples > max_samples)
637 		nsamples = max_samples;
638 
639 	if (nsamples == 0)
640 		return 0;
641 
642 	nbytes = comedi_buf_read_alloc(s,
643 				       comedi_samples_to_bytes(s, nsamples));
644 	comedi_buf_memcpy_from(s, data, nbytes);
645 	comedi_buf_read_free(s, nbytes);
646 	comedi_inc_scan_progress(s, nbytes);
647 	s->async->events |= COMEDI_CB_BLOCK;
648 
649 	return nbytes;
650 }
651 EXPORT_SYMBOL_GPL(comedi_buf_read_samples);
652