• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core
3  *
4  * Copyright (c) 2008 Jonathan Cameron
5  *
6  * Handling of buffer allocation / resizing.
7  *
8  * Things to look at here.
9  * - Better memory allocation techniques?
10  * - Alternative access techniques?
11  */
12 #include <linux/anon_inodes.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/device.h>
16 #include <linux/file.h>
17 #include <linux/fs.h>
18 #include <linux/cdev.h>
19 #include <linux/slab.h>
20 #include <linux/poll.h>
21 #include <linux/sched/signal.h>
22 
23 #include <linux/iio/iio.h>
24 #include <linux/iio/iio-opaque.h>
25 #include "iio_core.h"
26 #include "iio_core_trigger.h"
27 #include <linux/iio/sysfs.h>
28 #include <linux/iio/buffer.h>
29 #include <linux/iio/buffer_impl.h>
30 
31 static const char * const iio_endian_prefix[] = {
32 	[IIO_BE] = "be",
33 	[IIO_LE] = "le",
34 };
35 
iio_buffer_is_active(struct iio_buffer * buf)36 static bool iio_buffer_is_active(struct iio_buffer *buf)
37 {
38 	return !list_empty(&buf->buffer_list);
39 }
40 
iio_buffer_data_available(struct iio_buffer * buf)41 static size_t iio_buffer_data_available(struct iio_buffer *buf)
42 {
43 	return buf->access->data_available(buf);
44 }
45 
iio_buffer_flush_hwfifo(struct iio_dev * indio_dev,struct iio_buffer * buf,size_t required)46 static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
47 				   struct iio_buffer *buf, size_t required)
48 {
49 	if (!indio_dev->info->hwfifo_flush_to_buffer)
50 		return -ENODEV;
51 
52 	return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
53 }
54 
iio_buffer_ready(struct iio_dev * indio_dev,struct iio_buffer * buf,size_t to_wait,int to_flush)55 static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
56 			     size_t to_wait, int to_flush)
57 {
58 	size_t avail;
59 	int flushed = 0;
60 
61 	/* wakeup if the device was unregistered */
62 	if (!indio_dev->info)
63 		return true;
64 
65 	/* drain the buffer if it was disabled */
66 	if (!iio_buffer_is_active(buf)) {
67 		to_wait = min_t(size_t, to_wait, 1);
68 		to_flush = 0;
69 	}
70 
71 	avail = iio_buffer_data_available(buf);
72 
73 	if (avail >= to_wait) {
74 		/* force a flush for non-blocking reads */
75 		if (!to_wait && avail < to_flush)
76 			iio_buffer_flush_hwfifo(indio_dev, buf,
77 						to_flush - avail);
78 		return true;
79 	}
80 
81 	if (to_flush)
82 		flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
83 						  to_wait - avail);
84 	if (flushed <= 0)
85 		return false;
86 
87 	if (avail + flushed >= to_wait)
88 		return true;
89 
90 	return false;
91 }
92 
93 /**
94  * iio_buffer_read() - chrdev read for buffer access
95  * @filp:	File structure pointer for the char device
96  * @buf:	Destination buffer for iio buffer read
97  * @n:		First n bytes to read
98  * @f_ps:	Long offset provided by the user as a seek position
99  *
100  * This function relies on all buffer implementations having an
101  * iio_buffer as their first element.
102  *
103  * Return: negative values corresponding to error codes or ret != 0
104  *	   for ending the reading activity
105  **/
iio_buffer_read(struct file * filp,char __user * buf,size_t n,loff_t * f_ps)106 static ssize_t iio_buffer_read(struct file *filp, char __user *buf,
107 			       size_t n, loff_t *f_ps)
108 {
109 	struct iio_dev_buffer_pair *ib = filp->private_data;
110 	struct iio_buffer *rb = ib->buffer;
111 	struct iio_dev *indio_dev = ib->indio_dev;
112 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
113 	size_t datum_size;
114 	size_t to_wait;
115 	int ret = 0;
116 
117 	if (!indio_dev->info)
118 		return -ENODEV;
119 
120 	if (!rb || !rb->access->read)
121 		return -EINVAL;
122 
123 	datum_size = rb->bytes_per_datum;
124 
125 	/*
126 	 * If datum_size is 0 there will never be anything to read from the
127 	 * buffer, so signal end of file now.
128 	 */
129 	if (!datum_size)
130 		return 0;
131 
132 	if (filp->f_flags & O_NONBLOCK)
133 		to_wait = 0;
134 	else
135 		to_wait = min_t(size_t, n / datum_size, rb->watermark);
136 
137 	add_wait_queue(&rb->pollq, &wait);
138 	do {
139 		if (!indio_dev->info) {
140 			ret = -ENODEV;
141 			break;
142 		}
143 
144 		if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
145 			if (signal_pending(current)) {
146 				ret = -ERESTARTSYS;
147 				break;
148 			}
149 
150 			wait_woken(&wait, TASK_INTERRUPTIBLE,
151 				   MAX_SCHEDULE_TIMEOUT);
152 			continue;
153 		}
154 
155 		ret = rb->access->read(rb, n, buf);
156 		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
157 			ret = -EAGAIN;
158 	} while (ret == 0);
159 	remove_wait_queue(&rb->pollq, &wait);
160 
161 	return ret;
162 }
163 
164 /**
165  * iio_buffer_poll() - poll the buffer to find out if it has data
166  * @filp:	File structure pointer for device access
167  * @wait:	Poll table structure pointer for which the driver adds
168  *		a wait queue
169  *
170  * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
171  *	   or 0 for other cases
172  */
iio_buffer_poll(struct file * filp,struct poll_table_struct * wait)173 static __poll_t iio_buffer_poll(struct file *filp,
174 				struct poll_table_struct *wait)
175 {
176 	struct iio_dev_buffer_pair *ib = filp->private_data;
177 	struct iio_buffer *rb = ib->buffer;
178 	struct iio_dev *indio_dev = ib->indio_dev;
179 
180 	if (!indio_dev->info || rb == NULL)
181 		return 0;
182 
183 	poll_wait(filp, &rb->pollq, wait);
184 	if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
185 		return EPOLLIN | EPOLLRDNORM;
186 	return 0;
187 }
188 
iio_buffer_read_wrapper(struct file * filp,char __user * buf,size_t n,loff_t * f_ps)189 ssize_t iio_buffer_read_wrapper(struct file *filp, char __user *buf,
190 				size_t n, loff_t *f_ps)
191 {
192 	struct iio_dev_buffer_pair *ib = filp->private_data;
193 	struct iio_buffer *rb = ib->buffer;
194 
195 	/* check if buffer was opened through new API */
196 	if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
197 		return -EBUSY;
198 
199 	return iio_buffer_read(filp, buf, n, f_ps);
200 }
201 
iio_buffer_poll_wrapper(struct file * filp,struct poll_table_struct * wait)202 __poll_t iio_buffer_poll_wrapper(struct file *filp,
203 				 struct poll_table_struct *wait)
204 {
205 	struct iio_dev_buffer_pair *ib = filp->private_data;
206 	struct iio_buffer *rb = ib->buffer;
207 
208 	/* check if buffer was opened through new API */
209 	if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
210 		return 0;
211 
212 	return iio_buffer_poll(filp, wait);
213 }
214 
215 /**
216  * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
217  * @indio_dev: The IIO device
218  *
219  * Wakes up the event waitqueue used for poll(). Should usually
220  * be called when the device is unregistered.
221  */
iio_buffer_wakeup_poll(struct iio_dev * indio_dev)222 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
223 {
224 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
225 	struct iio_buffer *buffer;
226 	unsigned int i;
227 
228 	for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
229 		buffer = iio_dev_opaque->attached_buffers[i];
230 		wake_up(&buffer->pollq);
231 	}
232 }
233 
iio_buffer_init(struct iio_buffer * buffer)234 void iio_buffer_init(struct iio_buffer *buffer)
235 {
236 	INIT_LIST_HEAD(&buffer->demux_list);
237 	INIT_LIST_HEAD(&buffer->buffer_list);
238 	init_waitqueue_head(&buffer->pollq);
239 	kref_init(&buffer->ref);
240 	if (!buffer->watermark)
241 		buffer->watermark = 1;
242 }
243 EXPORT_SYMBOL(iio_buffer_init);
244 
iio_device_detach_buffers(struct iio_dev * indio_dev)245 void iio_device_detach_buffers(struct iio_dev *indio_dev)
246 {
247 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
248 	struct iio_buffer *buffer;
249 	unsigned int i;
250 
251 	for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
252 		buffer = iio_dev_opaque->attached_buffers[i];
253 		iio_buffer_put(buffer);
254 	}
255 
256 	kfree(iio_dev_opaque->attached_buffers);
257 }
258 
iio_show_scan_index(struct device * dev,struct device_attribute * attr,char * buf)259 static ssize_t iio_show_scan_index(struct device *dev,
260 				   struct device_attribute *attr,
261 				   char *buf)
262 {
263 	return sysfs_emit(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
264 }
265 
iio_show_fixed_type(struct device * dev,struct device_attribute * attr,char * buf)266 static ssize_t iio_show_fixed_type(struct device *dev,
267 				   struct device_attribute *attr,
268 				   char *buf)
269 {
270 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
271 	u8 type = this_attr->c->scan_type.endianness;
272 
273 	if (type == IIO_CPU) {
274 #ifdef __LITTLE_ENDIAN
275 		type = IIO_LE;
276 #else
277 		type = IIO_BE;
278 #endif
279 	}
280 	if (this_attr->c->scan_type.repeat > 1)
281 		return sysfs_emit(buf, "%s:%c%d/%dX%d>>%u\n",
282 		       iio_endian_prefix[type],
283 		       this_attr->c->scan_type.sign,
284 		       this_attr->c->scan_type.realbits,
285 		       this_attr->c->scan_type.storagebits,
286 		       this_attr->c->scan_type.repeat,
287 		       this_attr->c->scan_type.shift);
288 	else
289 		return sysfs_emit(buf, "%s:%c%d/%d>>%u\n",
290 		       iio_endian_prefix[type],
291 		       this_attr->c->scan_type.sign,
292 		       this_attr->c->scan_type.realbits,
293 		       this_attr->c->scan_type.storagebits,
294 		       this_attr->c->scan_type.shift);
295 }
296 
iio_scan_el_show(struct device * dev,struct device_attribute * attr,char * buf)297 static ssize_t iio_scan_el_show(struct device *dev,
298 				struct device_attribute *attr,
299 				char *buf)
300 {
301 	int ret;
302 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
303 
304 	/* Ensure ret is 0 or 1. */
305 	ret = !!test_bit(to_iio_dev_attr(attr)->address,
306 		       buffer->scan_mask);
307 
308 	return sysfs_emit(buf, "%d\n", ret);
309 }
310 
311 /* Note NULL used as error indicator as it doesn't make sense. */
iio_scan_mask_match(const unsigned long * av_masks,unsigned int masklength,const unsigned long * mask,bool strict)312 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
313 					  unsigned int masklength,
314 					  const unsigned long *mask,
315 					  bool strict)
316 {
317 	if (bitmap_empty(mask, masklength))
318 		return NULL;
319 	while (*av_masks) {
320 		if (strict) {
321 			if (bitmap_equal(mask, av_masks, masklength))
322 				return av_masks;
323 		} else {
324 			if (bitmap_subset(mask, av_masks, masklength))
325 				return av_masks;
326 		}
327 		av_masks += BITS_TO_LONGS(masklength);
328 	}
329 	return NULL;
330 }
331 
iio_validate_scan_mask(struct iio_dev * indio_dev,const unsigned long * mask)332 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
333 	const unsigned long *mask)
334 {
335 	if (!indio_dev->setup_ops->validate_scan_mask)
336 		return true;
337 
338 	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
339 }
340 
341 /**
342  * iio_scan_mask_set() - set particular bit in the scan mask
343  * @indio_dev: the iio device
344  * @buffer: the buffer whose scan mask we are interested in
345  * @bit: the bit to be set.
346  *
347  * Note that at this point we have no way of knowing what other
348  * buffers might request, hence this code only verifies that the
349  * individual buffers request is plausible.
350  */
iio_scan_mask_set(struct iio_dev * indio_dev,struct iio_buffer * buffer,int bit)351 static int iio_scan_mask_set(struct iio_dev *indio_dev,
352 		      struct iio_buffer *buffer, int bit)
353 {
354 	const unsigned long *mask;
355 	unsigned long *trialmask;
356 
357 	if (!indio_dev->masklength) {
358 		WARN(1, "Trying to set scanmask prior to registering buffer\n");
359 		return -EINVAL;
360 	}
361 
362 	trialmask = bitmap_alloc(indio_dev->masklength, GFP_KERNEL);
363 	if (!trialmask)
364 		return -ENOMEM;
365 	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
366 	set_bit(bit, trialmask);
367 
368 	if (!iio_validate_scan_mask(indio_dev, trialmask))
369 		goto err_invalid_mask;
370 
371 	if (indio_dev->available_scan_masks) {
372 		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
373 					   indio_dev->masklength,
374 					   trialmask, false);
375 		if (!mask)
376 			goto err_invalid_mask;
377 	}
378 	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
379 
380 	bitmap_free(trialmask);
381 
382 	return 0;
383 
384 err_invalid_mask:
385 	bitmap_free(trialmask);
386 	return -EINVAL;
387 }
388 
iio_scan_mask_clear(struct iio_buffer * buffer,int bit)389 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
390 {
391 	clear_bit(bit, buffer->scan_mask);
392 	return 0;
393 }
394 
iio_scan_mask_query(struct iio_dev * indio_dev,struct iio_buffer * buffer,int bit)395 static int iio_scan_mask_query(struct iio_dev *indio_dev,
396 			       struct iio_buffer *buffer, int bit)
397 {
398 	if (bit > indio_dev->masklength)
399 		return -EINVAL;
400 
401 	if (!buffer->scan_mask)
402 		return 0;
403 
404 	/* Ensure return value is 0 or 1. */
405 	return !!test_bit(bit, buffer->scan_mask);
406 };
407 
iio_scan_el_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)408 static ssize_t iio_scan_el_store(struct device *dev,
409 				 struct device_attribute *attr,
410 				 const char *buf,
411 				 size_t len)
412 {
413 	int ret;
414 	bool state;
415 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
416 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
417 	struct iio_buffer *buffer = this_attr->buffer;
418 
419 	ret = strtobool(buf, &state);
420 	if (ret < 0)
421 		return ret;
422 	mutex_lock(&indio_dev->mlock);
423 	if (iio_buffer_is_active(buffer)) {
424 		ret = -EBUSY;
425 		goto error_ret;
426 	}
427 	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
428 	if (ret < 0)
429 		goto error_ret;
430 	if (!state && ret) {
431 		ret = iio_scan_mask_clear(buffer, this_attr->address);
432 		if (ret)
433 			goto error_ret;
434 	} else if (state && !ret) {
435 		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
436 		if (ret)
437 			goto error_ret;
438 	}
439 
440 error_ret:
441 	mutex_unlock(&indio_dev->mlock);
442 
443 	return ret < 0 ? ret : len;
444 
445 }
446 
iio_scan_el_ts_show(struct device * dev,struct device_attribute * attr,char * buf)447 static ssize_t iio_scan_el_ts_show(struct device *dev,
448 				   struct device_attribute *attr,
449 				   char *buf)
450 {
451 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
452 
453 	return sysfs_emit(buf, "%d\n", buffer->scan_timestamp);
454 }
455 
iio_scan_el_ts_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)456 static ssize_t iio_scan_el_ts_store(struct device *dev,
457 				    struct device_attribute *attr,
458 				    const char *buf,
459 				    size_t len)
460 {
461 	int ret;
462 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
463 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
464 	bool state;
465 
466 	ret = strtobool(buf, &state);
467 	if (ret < 0)
468 		return ret;
469 
470 	mutex_lock(&indio_dev->mlock);
471 	if (iio_buffer_is_active(buffer)) {
472 		ret = -EBUSY;
473 		goto error_ret;
474 	}
475 	buffer->scan_timestamp = state;
476 error_ret:
477 	mutex_unlock(&indio_dev->mlock);
478 
479 	return ret ? ret : len;
480 }
481 
iio_buffer_add_channel_sysfs(struct iio_dev * indio_dev,struct iio_buffer * buffer,const struct iio_chan_spec * chan)482 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
483 					struct iio_buffer *buffer,
484 					const struct iio_chan_spec *chan)
485 {
486 	int ret, attrcount = 0;
487 
488 	ret = __iio_add_chan_devattr("index",
489 				     chan,
490 				     &iio_show_scan_index,
491 				     NULL,
492 				     0,
493 				     IIO_SEPARATE,
494 				     &indio_dev->dev,
495 				     buffer,
496 				     &buffer->buffer_attr_list);
497 	if (ret)
498 		return ret;
499 	attrcount++;
500 	ret = __iio_add_chan_devattr("type",
501 				     chan,
502 				     &iio_show_fixed_type,
503 				     NULL,
504 				     0,
505 				     0,
506 				     &indio_dev->dev,
507 				     buffer,
508 				     &buffer->buffer_attr_list);
509 	if (ret)
510 		return ret;
511 	attrcount++;
512 	if (chan->type != IIO_TIMESTAMP)
513 		ret = __iio_add_chan_devattr("en",
514 					     chan,
515 					     &iio_scan_el_show,
516 					     &iio_scan_el_store,
517 					     chan->scan_index,
518 					     0,
519 					     &indio_dev->dev,
520 					     buffer,
521 					     &buffer->buffer_attr_list);
522 	else
523 		ret = __iio_add_chan_devattr("en",
524 					     chan,
525 					     &iio_scan_el_ts_show,
526 					     &iio_scan_el_ts_store,
527 					     chan->scan_index,
528 					     0,
529 					     &indio_dev->dev,
530 					     buffer,
531 					     &buffer->buffer_attr_list);
532 	if (ret)
533 		return ret;
534 	attrcount++;
535 	ret = attrcount;
536 	return ret;
537 }
538 
iio_buffer_read_length(struct device * dev,struct device_attribute * attr,char * buf)539 static ssize_t iio_buffer_read_length(struct device *dev,
540 				      struct device_attribute *attr,
541 				      char *buf)
542 {
543 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
544 
545 	return sysfs_emit(buf, "%d\n", buffer->length);
546 }
547 
iio_buffer_write_length(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)548 static ssize_t iio_buffer_write_length(struct device *dev,
549 				       struct device_attribute *attr,
550 				       const char *buf, size_t len)
551 {
552 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
553 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
554 	unsigned int val;
555 	int ret;
556 
557 	ret = kstrtouint(buf, 10, &val);
558 	if (ret)
559 		return ret;
560 
561 	if (val == buffer->length)
562 		return len;
563 
564 	mutex_lock(&indio_dev->mlock);
565 	if (iio_buffer_is_active(buffer)) {
566 		ret = -EBUSY;
567 	} else {
568 		buffer->access->set_length(buffer, val);
569 		ret = 0;
570 	}
571 	if (ret)
572 		goto out;
573 	if (buffer->length && buffer->length < buffer->watermark)
574 		buffer->watermark = buffer->length;
575 out:
576 	mutex_unlock(&indio_dev->mlock);
577 
578 	return ret ? ret : len;
579 }
580 
iio_buffer_show_enable(struct device * dev,struct device_attribute * attr,char * buf)581 static ssize_t iio_buffer_show_enable(struct device *dev,
582 				      struct device_attribute *attr,
583 				      char *buf)
584 {
585 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
586 
587 	return sysfs_emit(buf, "%d\n", iio_buffer_is_active(buffer));
588 }
589 
iio_storage_bytes_for_si(struct iio_dev * indio_dev,unsigned int scan_index)590 static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
591 					     unsigned int scan_index)
592 {
593 	const struct iio_chan_spec *ch;
594 	unsigned int bytes;
595 
596 	ch = iio_find_channel_from_si(indio_dev, scan_index);
597 	bytes = ch->scan_type.storagebits / 8;
598 	if (ch->scan_type.repeat > 1)
599 		bytes *= ch->scan_type.repeat;
600 	return bytes;
601 }
602 
iio_storage_bytes_for_timestamp(struct iio_dev * indio_dev)603 static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
604 {
605 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
606 
607 	return iio_storage_bytes_for_si(indio_dev,
608 					iio_dev_opaque->scan_index_timestamp);
609 }
610 
iio_compute_scan_bytes(struct iio_dev * indio_dev,const unsigned long * mask,bool timestamp)611 static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
612 				const unsigned long *mask, bool timestamp)
613 {
614 	unsigned bytes = 0;
615 	int length, i, largest = 0;
616 
617 	/* How much space will the demuxed element take? */
618 	for_each_set_bit(i, mask,
619 			 indio_dev->masklength) {
620 		length = iio_storage_bytes_for_si(indio_dev, i);
621 		bytes = ALIGN(bytes, length);
622 		bytes += length;
623 		largest = max(largest, length);
624 	}
625 
626 	if (timestamp) {
627 		length = iio_storage_bytes_for_timestamp(indio_dev);
628 		bytes = ALIGN(bytes, length);
629 		bytes += length;
630 		largest = max(largest, length);
631 	}
632 
633 	bytes = ALIGN(bytes, largest);
634 	return bytes;
635 }
636 
iio_buffer_activate(struct iio_dev * indio_dev,struct iio_buffer * buffer)637 static void iio_buffer_activate(struct iio_dev *indio_dev,
638 	struct iio_buffer *buffer)
639 {
640 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
641 
642 	iio_buffer_get(buffer);
643 	list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list);
644 }
645 
iio_buffer_deactivate(struct iio_buffer * buffer)646 static void iio_buffer_deactivate(struct iio_buffer *buffer)
647 {
648 	list_del_init(&buffer->buffer_list);
649 	wake_up_interruptible(&buffer->pollq);
650 	iio_buffer_put(buffer);
651 }
652 
iio_buffer_deactivate_all(struct iio_dev * indio_dev)653 static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
654 {
655 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
656 	struct iio_buffer *buffer, *_buffer;
657 
658 	list_for_each_entry_safe(buffer, _buffer,
659 			&iio_dev_opaque->buffer_list, buffer_list)
660 		iio_buffer_deactivate(buffer);
661 }
662 
iio_buffer_enable(struct iio_buffer * buffer,struct iio_dev * indio_dev)663 static int iio_buffer_enable(struct iio_buffer *buffer,
664 	struct iio_dev *indio_dev)
665 {
666 	if (!buffer->access->enable)
667 		return 0;
668 	return buffer->access->enable(buffer, indio_dev);
669 }
670 
iio_buffer_disable(struct iio_buffer * buffer,struct iio_dev * indio_dev)671 static int iio_buffer_disable(struct iio_buffer *buffer,
672 	struct iio_dev *indio_dev)
673 {
674 	if (!buffer->access->disable)
675 		return 0;
676 	return buffer->access->disable(buffer, indio_dev);
677 }
678 
iio_buffer_update_bytes_per_datum(struct iio_dev * indio_dev,struct iio_buffer * buffer)679 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
680 	struct iio_buffer *buffer)
681 {
682 	unsigned int bytes;
683 
684 	if (!buffer->access->set_bytes_per_datum)
685 		return;
686 
687 	bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
688 		buffer->scan_timestamp);
689 
690 	buffer->access->set_bytes_per_datum(buffer, bytes);
691 }
692 
iio_buffer_request_update(struct iio_dev * indio_dev,struct iio_buffer * buffer)693 static int iio_buffer_request_update(struct iio_dev *indio_dev,
694 	struct iio_buffer *buffer)
695 {
696 	int ret;
697 
698 	iio_buffer_update_bytes_per_datum(indio_dev, buffer);
699 	if (buffer->access->request_update) {
700 		ret = buffer->access->request_update(buffer);
701 		if (ret) {
702 			dev_dbg(&indio_dev->dev,
703 			       "Buffer not started: buffer parameter update failed (%d)\n",
704 				ret);
705 			return ret;
706 		}
707 	}
708 
709 	return 0;
710 }
711 
iio_free_scan_mask(struct iio_dev * indio_dev,const unsigned long * mask)712 static void iio_free_scan_mask(struct iio_dev *indio_dev,
713 	const unsigned long *mask)
714 {
715 	/* If the mask is dynamically allocated free it, otherwise do nothing */
716 	if (!indio_dev->available_scan_masks)
717 		bitmap_free(mask);
718 }
719 
720 struct iio_device_config {
721 	unsigned int mode;
722 	unsigned int watermark;
723 	const unsigned long *scan_mask;
724 	unsigned int scan_bytes;
725 	bool scan_timestamp;
726 };
727 
iio_verify_update(struct iio_dev * indio_dev,struct iio_buffer * insert_buffer,struct iio_buffer * remove_buffer,struct iio_device_config * config)728 static int iio_verify_update(struct iio_dev *indio_dev,
729 	struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
730 	struct iio_device_config *config)
731 {
732 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
733 	unsigned long *compound_mask;
734 	const unsigned long *scan_mask;
735 	bool strict_scanmask = false;
736 	struct iio_buffer *buffer;
737 	bool scan_timestamp;
738 	unsigned int modes;
739 
740 	if (insert_buffer &&
741 	    bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
742 		dev_dbg(&indio_dev->dev,
743 			"At least one scan element must be enabled first\n");
744 		return -EINVAL;
745 	}
746 
747 	memset(config, 0, sizeof(*config));
748 	config->watermark = ~0;
749 
750 	/*
751 	 * If there is just one buffer and we are removing it there is nothing
752 	 * to verify.
753 	 */
754 	if (remove_buffer && !insert_buffer &&
755 		list_is_singular(&iio_dev_opaque->buffer_list))
756 			return 0;
757 
758 	modes = indio_dev->modes;
759 
760 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
761 		if (buffer == remove_buffer)
762 			continue;
763 		modes &= buffer->access->modes;
764 		config->watermark = min(config->watermark, buffer->watermark);
765 	}
766 
767 	if (insert_buffer) {
768 		modes &= insert_buffer->access->modes;
769 		config->watermark = min(config->watermark,
770 			insert_buffer->watermark);
771 	}
772 
773 	/* Definitely possible for devices to support both of these. */
774 	if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
775 		config->mode = INDIO_BUFFER_TRIGGERED;
776 	} else if (modes & INDIO_BUFFER_HARDWARE) {
777 		/*
778 		 * Keep things simple for now and only allow a single buffer to
779 		 * be connected in hardware mode.
780 		 */
781 		if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list))
782 			return -EINVAL;
783 		config->mode = INDIO_BUFFER_HARDWARE;
784 		strict_scanmask = true;
785 	} else if (modes & INDIO_BUFFER_SOFTWARE) {
786 		config->mode = INDIO_BUFFER_SOFTWARE;
787 	} else {
788 		/* Can only occur on first buffer */
789 		if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
790 			dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
791 		return -EINVAL;
792 	}
793 
794 	/* What scan mask do we actually have? */
795 	compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
796 	if (compound_mask == NULL)
797 		return -ENOMEM;
798 
799 	scan_timestamp = false;
800 
801 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
802 		if (buffer == remove_buffer)
803 			continue;
804 		bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
805 			  indio_dev->masklength);
806 		scan_timestamp |= buffer->scan_timestamp;
807 	}
808 
809 	if (insert_buffer) {
810 		bitmap_or(compound_mask, compound_mask,
811 			  insert_buffer->scan_mask, indio_dev->masklength);
812 		scan_timestamp |= insert_buffer->scan_timestamp;
813 	}
814 
815 	if (indio_dev->available_scan_masks) {
816 		scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
817 				    indio_dev->masklength,
818 				    compound_mask,
819 				    strict_scanmask);
820 		bitmap_free(compound_mask);
821 		if (scan_mask == NULL)
822 			return -EINVAL;
823 	} else {
824 	    scan_mask = compound_mask;
825 	}
826 
827 	config->scan_bytes = iio_compute_scan_bytes(indio_dev,
828 				    scan_mask, scan_timestamp);
829 	config->scan_mask = scan_mask;
830 	config->scan_timestamp = scan_timestamp;
831 
832 	return 0;
833 }
834 
835 /**
836  * struct iio_demux_table - table describing demux memcpy ops
837  * @from:	index to copy from
838  * @to:		index to copy to
839  * @length:	how many bytes to copy
840  * @l:		list head used for management
841  */
842 struct iio_demux_table {
843 	unsigned from;
844 	unsigned to;
845 	unsigned length;
846 	struct list_head l;
847 };
848 
iio_buffer_demux_free(struct iio_buffer * buffer)849 static void iio_buffer_demux_free(struct iio_buffer *buffer)
850 {
851 	struct iio_demux_table *p, *q;
852 	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
853 		list_del(&p->l);
854 		kfree(p);
855 	}
856 }
857 
iio_buffer_add_demux(struct iio_buffer * buffer,struct iio_demux_table ** p,unsigned int in_loc,unsigned int out_loc,unsigned int length)858 static int iio_buffer_add_demux(struct iio_buffer *buffer,
859 	struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
860 	unsigned int length)
861 {
862 
863 	if (*p && (*p)->from + (*p)->length == in_loc &&
864 		(*p)->to + (*p)->length == out_loc) {
865 		(*p)->length += length;
866 	} else {
867 		*p = kmalloc(sizeof(**p), GFP_KERNEL);
868 		if (*p == NULL)
869 			return -ENOMEM;
870 		(*p)->from = in_loc;
871 		(*p)->to = out_loc;
872 		(*p)->length = length;
873 		list_add_tail(&(*p)->l, &buffer->demux_list);
874 	}
875 
876 	return 0;
877 }
878 
iio_buffer_update_demux(struct iio_dev * indio_dev,struct iio_buffer * buffer)879 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
880 				   struct iio_buffer *buffer)
881 {
882 	int ret, in_ind = -1, out_ind, length;
883 	unsigned in_loc = 0, out_loc = 0;
884 	struct iio_demux_table *p = NULL;
885 
886 	/* Clear out any old demux */
887 	iio_buffer_demux_free(buffer);
888 	kfree(buffer->demux_bounce);
889 	buffer->demux_bounce = NULL;
890 
891 	/* First work out which scan mode we will actually have */
892 	if (bitmap_equal(indio_dev->active_scan_mask,
893 			 buffer->scan_mask,
894 			 indio_dev->masklength))
895 		return 0;
896 
897 	/* Now we have the two masks, work from least sig and build up sizes */
898 	for_each_set_bit(out_ind,
899 			 buffer->scan_mask,
900 			 indio_dev->masklength) {
901 		in_ind = find_next_bit(indio_dev->active_scan_mask,
902 				       indio_dev->masklength,
903 				       in_ind + 1);
904 		while (in_ind != out_ind) {
905 			length = iio_storage_bytes_for_si(indio_dev, in_ind);
906 			/* Make sure we are aligned */
907 			in_loc = roundup(in_loc, length) + length;
908 			in_ind = find_next_bit(indio_dev->active_scan_mask,
909 					       indio_dev->masklength,
910 					       in_ind + 1);
911 		}
912 		length = iio_storage_bytes_for_si(indio_dev, in_ind);
913 		out_loc = roundup(out_loc, length);
914 		in_loc = roundup(in_loc, length);
915 		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
916 		if (ret)
917 			goto error_clear_mux_table;
918 		out_loc += length;
919 		in_loc += length;
920 	}
921 	/* Relies on scan_timestamp being last */
922 	if (buffer->scan_timestamp) {
923 		length = iio_storage_bytes_for_timestamp(indio_dev);
924 		out_loc = roundup(out_loc, length);
925 		in_loc = roundup(in_loc, length);
926 		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
927 		if (ret)
928 			goto error_clear_mux_table;
929 		out_loc += length;
930 	}
931 	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
932 	if (buffer->demux_bounce == NULL) {
933 		ret = -ENOMEM;
934 		goto error_clear_mux_table;
935 	}
936 	return 0;
937 
938 error_clear_mux_table:
939 	iio_buffer_demux_free(buffer);
940 
941 	return ret;
942 }
943 
iio_update_demux(struct iio_dev * indio_dev)944 static int iio_update_demux(struct iio_dev *indio_dev)
945 {
946 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
947 	struct iio_buffer *buffer;
948 	int ret;
949 
950 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
951 		ret = iio_buffer_update_demux(indio_dev, buffer);
952 		if (ret < 0)
953 			goto error_clear_mux_table;
954 	}
955 	return 0;
956 
957 error_clear_mux_table:
958 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list)
959 		iio_buffer_demux_free(buffer);
960 
961 	return ret;
962 }
963 
iio_enable_buffers(struct iio_dev * indio_dev,struct iio_device_config * config)964 static int iio_enable_buffers(struct iio_dev *indio_dev,
965 	struct iio_device_config *config)
966 {
967 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
968 	struct iio_buffer *buffer;
969 	int ret;
970 
971 	indio_dev->active_scan_mask = config->scan_mask;
972 	indio_dev->scan_timestamp = config->scan_timestamp;
973 	indio_dev->scan_bytes = config->scan_bytes;
974 	indio_dev->currentmode = config->mode;
975 
976 	iio_update_demux(indio_dev);
977 
978 	/* Wind up again */
979 	if (indio_dev->setup_ops->preenable) {
980 		ret = indio_dev->setup_ops->preenable(indio_dev);
981 		if (ret) {
982 			dev_dbg(&indio_dev->dev,
983 			       "Buffer not started: buffer preenable failed (%d)\n", ret);
984 			goto err_undo_config;
985 		}
986 	}
987 
988 	if (indio_dev->info->update_scan_mode) {
989 		ret = indio_dev->info
990 			->update_scan_mode(indio_dev,
991 					   indio_dev->active_scan_mask);
992 		if (ret < 0) {
993 			dev_dbg(&indio_dev->dev,
994 				"Buffer not started: update scan mode failed (%d)\n",
995 				ret);
996 			goto err_run_postdisable;
997 		}
998 	}
999 
1000 	if (indio_dev->info->hwfifo_set_watermark)
1001 		indio_dev->info->hwfifo_set_watermark(indio_dev,
1002 			config->watermark);
1003 
1004 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1005 		ret = iio_buffer_enable(buffer, indio_dev);
1006 		if (ret)
1007 			goto err_disable_buffers;
1008 	}
1009 
1010 	if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
1011 		ret = iio_trigger_attach_poll_func(indio_dev->trig,
1012 						   indio_dev->pollfunc);
1013 		if (ret)
1014 			goto err_disable_buffers;
1015 	}
1016 
1017 	if (indio_dev->setup_ops->postenable) {
1018 		ret = indio_dev->setup_ops->postenable(indio_dev);
1019 		if (ret) {
1020 			dev_dbg(&indio_dev->dev,
1021 			       "Buffer not started: postenable failed (%d)\n", ret);
1022 			goto err_detach_pollfunc;
1023 		}
1024 	}
1025 
1026 	return 0;
1027 
1028 err_detach_pollfunc:
1029 	if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
1030 		iio_trigger_detach_poll_func(indio_dev->trig,
1031 					     indio_dev->pollfunc);
1032 	}
1033 err_disable_buffers:
1034 	list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
1035 					     buffer_list)
1036 		iio_buffer_disable(buffer, indio_dev);
1037 err_run_postdisable:
1038 	if (indio_dev->setup_ops->postdisable)
1039 		indio_dev->setup_ops->postdisable(indio_dev);
1040 err_undo_config:
1041 	indio_dev->currentmode = INDIO_DIRECT_MODE;
1042 	indio_dev->active_scan_mask = NULL;
1043 
1044 	return ret;
1045 }
1046 
iio_disable_buffers(struct iio_dev * indio_dev)1047 static int iio_disable_buffers(struct iio_dev *indio_dev)
1048 {
1049 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1050 	struct iio_buffer *buffer;
1051 	int ret = 0;
1052 	int ret2;
1053 
1054 	/* Wind down existing buffers - iff there are any */
1055 	if (list_empty(&iio_dev_opaque->buffer_list))
1056 		return 0;
1057 
1058 	/*
1059 	 * If things go wrong at some step in disable we still need to continue
1060 	 * to perform the other steps, otherwise we leave the device in a
1061 	 * inconsistent state. We return the error code for the first error we
1062 	 * encountered.
1063 	 */
1064 
1065 	if (indio_dev->setup_ops->predisable) {
1066 		ret2 = indio_dev->setup_ops->predisable(indio_dev);
1067 		if (ret2 && !ret)
1068 			ret = ret2;
1069 	}
1070 
1071 	if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
1072 		iio_trigger_detach_poll_func(indio_dev->trig,
1073 					     indio_dev->pollfunc);
1074 	}
1075 
1076 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1077 		ret2 = iio_buffer_disable(buffer, indio_dev);
1078 		if (ret2 && !ret)
1079 			ret = ret2;
1080 	}
1081 
1082 	if (indio_dev->setup_ops->postdisable) {
1083 		ret2 = indio_dev->setup_ops->postdisable(indio_dev);
1084 		if (ret2 && !ret)
1085 			ret = ret2;
1086 	}
1087 
1088 	iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
1089 	indio_dev->active_scan_mask = NULL;
1090 	indio_dev->currentmode = INDIO_DIRECT_MODE;
1091 
1092 	return ret;
1093 }
1094 
__iio_update_buffers(struct iio_dev * indio_dev,struct iio_buffer * insert_buffer,struct iio_buffer * remove_buffer)1095 static int __iio_update_buffers(struct iio_dev *indio_dev,
1096 		       struct iio_buffer *insert_buffer,
1097 		       struct iio_buffer *remove_buffer)
1098 {
1099 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1100 	struct iio_device_config new_config;
1101 	int ret;
1102 
1103 	ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
1104 		&new_config);
1105 	if (ret)
1106 		return ret;
1107 
1108 	if (insert_buffer) {
1109 		ret = iio_buffer_request_update(indio_dev, insert_buffer);
1110 		if (ret)
1111 			goto err_free_config;
1112 	}
1113 
1114 	ret = iio_disable_buffers(indio_dev);
1115 	if (ret)
1116 		goto err_deactivate_all;
1117 
1118 	if (remove_buffer)
1119 		iio_buffer_deactivate(remove_buffer);
1120 	if (insert_buffer)
1121 		iio_buffer_activate(indio_dev, insert_buffer);
1122 
1123 	/* If no buffers in list, we are done */
1124 	if (list_empty(&iio_dev_opaque->buffer_list))
1125 		return 0;
1126 
1127 	ret = iio_enable_buffers(indio_dev, &new_config);
1128 	if (ret)
1129 		goto err_deactivate_all;
1130 
1131 	return 0;
1132 
1133 err_deactivate_all:
1134 	/*
1135 	 * We've already verified that the config is valid earlier. If things go
1136 	 * wrong in either enable or disable the most likely reason is an IO
1137 	 * error from the device. In this case there is no good recovery
1138 	 * strategy. Just make sure to disable everything and leave the device
1139 	 * in a sane state.  With a bit of luck the device might come back to
1140 	 * life again later and userspace can try again.
1141 	 */
1142 	iio_buffer_deactivate_all(indio_dev);
1143 
1144 err_free_config:
1145 	iio_free_scan_mask(indio_dev, new_config.scan_mask);
1146 	return ret;
1147 }
1148 
iio_update_buffers(struct iio_dev * indio_dev,struct iio_buffer * insert_buffer,struct iio_buffer * remove_buffer)1149 int iio_update_buffers(struct iio_dev *indio_dev,
1150 		       struct iio_buffer *insert_buffer,
1151 		       struct iio_buffer *remove_buffer)
1152 {
1153 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1154 	int ret;
1155 
1156 	if (insert_buffer == remove_buffer)
1157 		return 0;
1158 
1159 	mutex_lock(&iio_dev_opaque->info_exist_lock);
1160 	mutex_lock(&indio_dev->mlock);
1161 
1162 	if (insert_buffer && iio_buffer_is_active(insert_buffer))
1163 		insert_buffer = NULL;
1164 
1165 	if (remove_buffer && !iio_buffer_is_active(remove_buffer))
1166 		remove_buffer = NULL;
1167 
1168 	if (!insert_buffer && !remove_buffer) {
1169 		ret = 0;
1170 		goto out_unlock;
1171 	}
1172 
1173 	if (indio_dev->info == NULL) {
1174 		ret = -ENODEV;
1175 		goto out_unlock;
1176 	}
1177 
1178 	ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
1179 
1180 out_unlock:
1181 	mutex_unlock(&indio_dev->mlock);
1182 	mutex_unlock(&iio_dev_opaque->info_exist_lock);
1183 
1184 	return ret;
1185 }
1186 EXPORT_SYMBOL_GPL(iio_update_buffers);
1187 
iio_disable_all_buffers(struct iio_dev * indio_dev)1188 void iio_disable_all_buffers(struct iio_dev *indio_dev)
1189 {
1190 	iio_disable_buffers(indio_dev);
1191 	iio_buffer_deactivate_all(indio_dev);
1192 }
1193 
iio_buffer_store_enable(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1194 static ssize_t iio_buffer_store_enable(struct device *dev,
1195 				       struct device_attribute *attr,
1196 				       const char *buf,
1197 				       size_t len)
1198 {
1199 	int ret;
1200 	bool requested_state;
1201 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1202 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1203 	bool inlist;
1204 
1205 	ret = strtobool(buf, &requested_state);
1206 	if (ret < 0)
1207 		return ret;
1208 
1209 	mutex_lock(&indio_dev->mlock);
1210 
1211 	/* Find out if it is in the list */
1212 	inlist = iio_buffer_is_active(buffer);
1213 	/* Already in desired state */
1214 	if (inlist == requested_state)
1215 		goto done;
1216 
1217 	if (requested_state)
1218 		ret = __iio_update_buffers(indio_dev, buffer, NULL);
1219 	else
1220 		ret = __iio_update_buffers(indio_dev, NULL, buffer);
1221 
1222 done:
1223 	mutex_unlock(&indio_dev->mlock);
1224 	return (ret < 0) ? ret : len;
1225 }
1226 
iio_buffer_show_watermark(struct device * dev,struct device_attribute * attr,char * buf)1227 static ssize_t iio_buffer_show_watermark(struct device *dev,
1228 					 struct device_attribute *attr,
1229 					 char *buf)
1230 {
1231 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1232 
1233 	return sysfs_emit(buf, "%u\n", buffer->watermark);
1234 }
1235 
iio_buffer_store_watermark(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1236 static ssize_t iio_buffer_store_watermark(struct device *dev,
1237 					  struct device_attribute *attr,
1238 					  const char *buf,
1239 					  size_t len)
1240 {
1241 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1242 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1243 	unsigned int val;
1244 	int ret;
1245 
1246 	ret = kstrtouint(buf, 10, &val);
1247 	if (ret)
1248 		return ret;
1249 	if (!val)
1250 		return -EINVAL;
1251 
1252 	mutex_lock(&indio_dev->mlock);
1253 
1254 	if (val > buffer->length) {
1255 		ret = -EINVAL;
1256 		goto out;
1257 	}
1258 
1259 	if (iio_buffer_is_active(buffer)) {
1260 		ret = -EBUSY;
1261 		goto out;
1262 	}
1263 
1264 	buffer->watermark = val;
1265 out:
1266 	mutex_unlock(&indio_dev->mlock);
1267 
1268 	return ret ? ret : len;
1269 }
1270 
iio_dma_show_data_available(struct device * dev,struct device_attribute * attr,char * buf)1271 static ssize_t iio_dma_show_data_available(struct device *dev,
1272 						struct device_attribute *attr,
1273 						char *buf)
1274 {
1275 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1276 
1277 	return sysfs_emit(buf, "%zu\n", iio_buffer_data_available(buffer));
1278 }
1279 
1280 static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
1281 		   iio_buffer_write_length);
1282 static struct device_attribute dev_attr_length_ro = __ATTR(length,
1283 	S_IRUGO, iio_buffer_read_length, NULL);
1284 static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
1285 		   iio_buffer_show_enable, iio_buffer_store_enable);
1286 static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
1287 		   iio_buffer_show_watermark, iio_buffer_store_watermark);
1288 static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark,
1289 	S_IRUGO, iio_buffer_show_watermark, NULL);
1290 static DEVICE_ATTR(data_available, S_IRUGO,
1291 		iio_dma_show_data_available, NULL);
1292 
1293 static struct attribute *iio_buffer_attrs[] = {
1294 	&dev_attr_length.attr,
1295 	&dev_attr_enable.attr,
1296 	&dev_attr_watermark.attr,
1297 	&dev_attr_data_available.attr,
1298 };
1299 
1300 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
1301 
iio_buffer_wrap_attr(struct iio_buffer * buffer,struct attribute * attr)1302 static struct attribute *iio_buffer_wrap_attr(struct iio_buffer *buffer,
1303 					      struct attribute *attr)
1304 {
1305 	struct device_attribute *dattr = to_dev_attr(attr);
1306 	struct iio_dev_attr *iio_attr;
1307 
1308 	iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
1309 	if (!iio_attr)
1310 		return NULL;
1311 
1312 	iio_attr->buffer = buffer;
1313 	memcpy(&iio_attr->dev_attr, dattr, sizeof(iio_attr->dev_attr));
1314 	iio_attr->dev_attr.attr.name = kstrdup_const(attr->name, GFP_KERNEL);
1315 	if (!iio_attr->dev_attr.attr.name) {
1316 		kfree(iio_attr);
1317 		return NULL;
1318 	}
1319 
1320 	sysfs_attr_init(&iio_attr->dev_attr.attr);
1321 
1322 	list_add(&iio_attr->l, &buffer->buffer_attr_list);
1323 
1324 	return &iio_attr->dev_attr.attr;
1325 }
1326 
iio_buffer_register_legacy_sysfs_groups(struct iio_dev * indio_dev,struct attribute ** buffer_attrs,int buffer_attrcount,int scan_el_attrcount)1327 static int iio_buffer_register_legacy_sysfs_groups(struct iio_dev *indio_dev,
1328 						   struct attribute **buffer_attrs,
1329 						   int buffer_attrcount,
1330 						   int scan_el_attrcount)
1331 {
1332 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1333 	struct attribute_group *group;
1334 	struct attribute **attrs;
1335 	int ret;
1336 
1337 	attrs = kcalloc(buffer_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1338 	if (!attrs)
1339 		return -ENOMEM;
1340 
1341 	memcpy(attrs, buffer_attrs, buffer_attrcount * sizeof(*attrs));
1342 
1343 	group = &iio_dev_opaque->legacy_buffer_group;
1344 	group->attrs = attrs;
1345 	group->name = "buffer";
1346 
1347 	ret = iio_device_register_sysfs_group(indio_dev, group);
1348 	if (ret)
1349 		goto error_free_buffer_attrs;
1350 
1351 	attrs = kcalloc(scan_el_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1352 	if (!attrs) {
1353 		ret = -ENOMEM;
1354 		goto error_free_buffer_attrs;
1355 	}
1356 
1357 	memcpy(attrs, &buffer_attrs[buffer_attrcount],
1358 	       scan_el_attrcount * sizeof(*attrs));
1359 
1360 	group = &iio_dev_opaque->legacy_scan_el_group;
1361 	group->attrs = attrs;
1362 	group->name = "scan_elements";
1363 
1364 	ret = iio_device_register_sysfs_group(indio_dev, group);
1365 	if (ret)
1366 		goto error_free_scan_el_attrs;
1367 
1368 	return 0;
1369 
1370 error_free_scan_el_attrs:
1371 	kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1372 error_free_buffer_attrs:
1373 	kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1374 
1375 	return ret;
1376 }
1377 
iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev * indio_dev)1378 static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev)
1379 {
1380 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1381 
1382 	kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1383 	kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1384 }
1385 
iio_buffer_chrdev_release(struct inode * inode,struct file * filep)1386 static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
1387 {
1388 	struct iio_dev_buffer_pair *ib = filep->private_data;
1389 	struct iio_dev *indio_dev = ib->indio_dev;
1390 	struct iio_buffer *buffer = ib->buffer;
1391 
1392 	wake_up(&buffer->pollq);
1393 
1394 	kfree(ib);
1395 	clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1396 	iio_device_put(indio_dev);
1397 
1398 	return 0;
1399 }
1400 
1401 static const struct file_operations iio_buffer_chrdev_fileops = {
1402 	.owner = THIS_MODULE,
1403 	.llseek = noop_llseek,
1404 	.read = iio_buffer_read,
1405 	.poll = iio_buffer_poll,
1406 	.release = iio_buffer_chrdev_release,
1407 };
1408 
iio_device_buffer_getfd(struct iio_dev * indio_dev,unsigned long arg)1409 static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg)
1410 {
1411 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1412 	int __user *ival = (int __user *)arg;
1413 	struct iio_dev_buffer_pair *ib;
1414 	struct iio_buffer *buffer;
1415 	int fd, idx, ret;
1416 
1417 	if (copy_from_user(&idx, ival, sizeof(idx)))
1418 		return -EFAULT;
1419 
1420 	if (idx >= iio_dev_opaque->attached_buffers_cnt)
1421 		return -ENODEV;
1422 
1423 	iio_device_get(indio_dev);
1424 
1425 	buffer = iio_dev_opaque->attached_buffers[idx];
1426 
1427 	if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) {
1428 		ret = -EBUSY;
1429 		goto error_iio_dev_put;
1430 	}
1431 
1432 	ib = kzalloc(sizeof(*ib), GFP_KERNEL);
1433 	if (!ib) {
1434 		ret = -ENOMEM;
1435 		goto error_clear_busy_bit;
1436 	}
1437 
1438 	ib->indio_dev = indio_dev;
1439 	ib->buffer = buffer;
1440 
1441 	fd = anon_inode_getfd("iio:buffer", &iio_buffer_chrdev_fileops,
1442 			      ib, O_RDWR | O_CLOEXEC);
1443 	if (fd < 0) {
1444 		ret = fd;
1445 		goto error_free_ib;
1446 	}
1447 
1448 	if (copy_to_user(ival, &fd, sizeof(fd))) {
1449 		/*
1450 		 * "Leak" the fd, as there's not much we can do about this
1451 		 * anyway. 'fd' might have been closed already, as
1452 		 * anon_inode_getfd() called fd_install() on it, which made
1453 		 * it reachable by userland.
1454 		 *
1455 		 * Instead of allowing a malicious user to play tricks with
1456 		 * us, rely on the process exit path to do any necessary
1457 		 * cleanup, as in releasing the file, if still needed.
1458 		 */
1459 		return -EFAULT;
1460 	}
1461 
1462 	return 0;
1463 
1464 error_free_ib:
1465 	kfree(ib);
1466 error_clear_busy_bit:
1467 	clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1468 error_iio_dev_put:
1469 	iio_device_put(indio_dev);
1470 	return ret;
1471 }
1472 
iio_device_buffer_ioctl(struct iio_dev * indio_dev,struct file * filp,unsigned int cmd,unsigned long arg)1473 static long iio_device_buffer_ioctl(struct iio_dev *indio_dev, struct file *filp,
1474 				    unsigned int cmd, unsigned long arg)
1475 {
1476 	switch (cmd) {
1477 	case IIO_BUFFER_GET_FD_IOCTL:
1478 		return iio_device_buffer_getfd(indio_dev, arg);
1479 	default:
1480 		return IIO_IOCTL_UNHANDLED;
1481 	}
1482 }
1483 
__iio_buffer_alloc_sysfs_and_mask(struct iio_buffer * buffer,struct iio_dev * indio_dev,int index)1484 static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
1485 					     struct iio_dev *indio_dev,
1486 					     int index)
1487 {
1488 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1489 	struct iio_dev_attr *p;
1490 	struct attribute **attr;
1491 	int ret, i, attrn, scan_el_attrcount, buffer_attrcount;
1492 	const struct iio_chan_spec *channels;
1493 
1494 	buffer_attrcount = 0;
1495 	if (buffer->attrs) {
1496 		while (buffer->attrs[buffer_attrcount] != NULL)
1497 			buffer_attrcount++;
1498 	}
1499 
1500 	scan_el_attrcount = 0;
1501 	INIT_LIST_HEAD(&buffer->buffer_attr_list);
1502 	channels = indio_dev->channels;
1503 	if (channels) {
1504 		/* new magic */
1505 		for (i = 0; i < indio_dev->num_channels; i++) {
1506 			if (channels[i].scan_index < 0)
1507 				continue;
1508 
1509 			ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
1510 							 &channels[i]);
1511 			if (ret < 0)
1512 				goto error_cleanup_dynamic;
1513 			scan_el_attrcount += ret;
1514 			if (channels[i].type == IIO_TIMESTAMP)
1515 				iio_dev_opaque->scan_index_timestamp =
1516 					channels[i].scan_index;
1517 		}
1518 		if (indio_dev->masklength && buffer->scan_mask == NULL) {
1519 			buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
1520 							  GFP_KERNEL);
1521 			if (buffer->scan_mask == NULL) {
1522 				ret = -ENOMEM;
1523 				goto error_cleanup_dynamic;
1524 			}
1525 		}
1526 	}
1527 
1528 	attrn = buffer_attrcount + scan_el_attrcount + ARRAY_SIZE(iio_buffer_attrs);
1529 	attr = kcalloc(attrn + 1, sizeof(* attr), GFP_KERNEL);
1530 	if (!attr) {
1531 		ret = -ENOMEM;
1532 		goto error_free_scan_mask;
1533 	}
1534 
1535 	memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
1536 	if (!buffer->access->set_length)
1537 		attr[0] = &dev_attr_length_ro.attr;
1538 
1539 	if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
1540 		attr[2] = &dev_attr_watermark_ro.attr;
1541 
1542 	if (buffer->attrs)
1543 		memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
1544 		       sizeof(struct attribute *) * buffer_attrcount);
1545 
1546 	buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs);
1547 	buffer->buffer_group.attrs = attr;
1548 
1549 	for (i = 0; i < buffer_attrcount; i++) {
1550 		struct attribute *wrapped;
1551 
1552 		wrapped = iio_buffer_wrap_attr(buffer, attr[i]);
1553 		if (!wrapped) {
1554 			ret = -ENOMEM;
1555 			goto error_free_buffer_attrs;
1556 		}
1557 		attr[i] = wrapped;
1558 	}
1559 
1560 	attrn = 0;
1561 	list_for_each_entry(p, &buffer->buffer_attr_list, l)
1562 		attr[attrn++] = &p->dev_attr.attr;
1563 
1564 	buffer->buffer_group.name = kasprintf(GFP_KERNEL, "buffer%d", index);
1565 	if (!buffer->buffer_group.name) {
1566 		ret = -ENOMEM;
1567 		goto error_free_buffer_attrs;
1568 	}
1569 
1570 	ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group);
1571 	if (ret)
1572 		goto error_free_buffer_attr_group_name;
1573 
1574 	/* we only need to register the legacy groups for the first buffer */
1575 	if (index > 0)
1576 		return 0;
1577 
1578 	ret = iio_buffer_register_legacy_sysfs_groups(indio_dev, attr,
1579 						      buffer_attrcount,
1580 						      scan_el_attrcount);
1581 	if (ret)
1582 		goto error_free_buffer_attr_group_name;
1583 
1584 	return 0;
1585 
1586 error_free_buffer_attr_group_name:
1587 	kfree(buffer->buffer_group.name);
1588 error_free_buffer_attrs:
1589 	kfree(buffer->buffer_group.attrs);
1590 error_free_scan_mask:
1591 	bitmap_free(buffer->scan_mask);
1592 error_cleanup_dynamic:
1593 	iio_free_chan_devattr_list(&buffer->buffer_attr_list);
1594 
1595 	return ret;
1596 }
1597 
__iio_buffer_free_sysfs_and_mask(struct iio_buffer * buffer,struct iio_dev * indio_dev,int index)1598 static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer,
1599 					     struct iio_dev *indio_dev,
1600 					     int index)
1601 {
1602 	if (index == 0)
1603 		iio_buffer_unregister_legacy_sysfs_groups(indio_dev);
1604 	bitmap_free(buffer->scan_mask);
1605 	kfree(buffer->buffer_group.name);
1606 	kfree(buffer->buffer_group.attrs);
1607 	iio_free_chan_devattr_list(&buffer->buffer_attr_list);
1608 }
1609 
iio_buffers_alloc_sysfs_and_mask(struct iio_dev * indio_dev)1610 int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
1611 {
1612 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1613 	const struct iio_chan_spec *channels;
1614 	struct iio_buffer *buffer;
1615 	int unwind_idx;
1616 	int ret, i;
1617 	size_t sz;
1618 
1619 	channels = indio_dev->channels;
1620 	if (channels) {
1621 		int ml = indio_dev->masklength;
1622 
1623 		for (i = 0; i < indio_dev->num_channels; i++)
1624 			ml = max(ml, channels[i].scan_index + 1);
1625 		indio_dev->masklength = ml;
1626 	}
1627 
1628 	if (!iio_dev_opaque->attached_buffers_cnt)
1629 		return 0;
1630 
1631 	for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
1632 		buffer = iio_dev_opaque->attached_buffers[i];
1633 		ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, i);
1634 		if (ret) {
1635 			unwind_idx = i - 1;
1636 			goto error_unwind_sysfs_and_mask;
1637 		}
1638 	}
1639 	unwind_idx = iio_dev_opaque->attached_buffers_cnt - 1;
1640 
1641 	sz = sizeof(*(iio_dev_opaque->buffer_ioctl_handler));
1642 	iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL);
1643 	if (!iio_dev_opaque->buffer_ioctl_handler) {
1644 		ret = -ENOMEM;
1645 		goto error_unwind_sysfs_and_mask;
1646 	}
1647 
1648 	iio_dev_opaque->buffer_ioctl_handler->ioctl = iio_device_buffer_ioctl;
1649 	iio_device_ioctl_handler_register(indio_dev,
1650 					  iio_dev_opaque->buffer_ioctl_handler);
1651 
1652 	return 0;
1653 
1654 error_unwind_sysfs_and_mask:
1655 	for (; unwind_idx >= 0; unwind_idx--) {
1656 		buffer = iio_dev_opaque->attached_buffers[unwind_idx];
1657 		__iio_buffer_free_sysfs_and_mask(buffer, indio_dev, unwind_idx);
1658 	}
1659 	return ret;
1660 }
1661 
iio_buffers_free_sysfs_and_mask(struct iio_dev * indio_dev)1662 void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
1663 {
1664 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1665 	struct iio_buffer *buffer;
1666 	int i;
1667 
1668 	if (!iio_dev_opaque->attached_buffers_cnt)
1669 		return;
1670 
1671 	iio_device_ioctl_handler_unregister(iio_dev_opaque->buffer_ioctl_handler);
1672 	kfree(iio_dev_opaque->buffer_ioctl_handler);
1673 
1674 	for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) {
1675 		buffer = iio_dev_opaque->attached_buffers[i];
1676 		__iio_buffer_free_sysfs_and_mask(buffer, indio_dev, i);
1677 	}
1678 }
1679 
1680 /**
1681  * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1682  * @indio_dev: the iio device
1683  * @mask: scan mask to be checked
1684  *
1685  * Return true if exactly one bit is set in the scan mask, false otherwise. It
1686  * can be used for devices where only one channel can be active for sampling at
1687  * a time.
1688  */
iio_validate_scan_mask_onehot(struct iio_dev * indio_dev,const unsigned long * mask)1689 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
1690 	const unsigned long *mask)
1691 {
1692 	return bitmap_weight(mask, indio_dev->masklength) == 1;
1693 }
1694 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
1695 
iio_demux(struct iio_buffer * buffer,const void * datain)1696 static const void *iio_demux(struct iio_buffer *buffer,
1697 				 const void *datain)
1698 {
1699 	struct iio_demux_table *t;
1700 
1701 	if (list_empty(&buffer->demux_list))
1702 		return datain;
1703 	list_for_each_entry(t, &buffer->demux_list, l)
1704 		memcpy(buffer->demux_bounce + t->to,
1705 		       datain + t->from, t->length);
1706 
1707 	return buffer->demux_bounce;
1708 }
1709 
iio_push_to_buffer(struct iio_buffer * buffer,const void * data)1710 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
1711 {
1712 	const void *dataout = iio_demux(buffer, data);
1713 	int ret;
1714 
1715 	ret = buffer->access->store_to(buffer, dataout);
1716 	if (ret)
1717 		return ret;
1718 
1719 	/*
1720 	 * We can't just test for watermark to decide if we wake the poll queue
1721 	 * because read may request less samples than the watermark.
1722 	 */
1723 	wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
1724 	return 0;
1725 }
1726 
1727 /**
1728  * iio_push_to_buffers() - push to a registered buffer.
1729  * @indio_dev:		iio_dev structure for device.
1730  * @data:		Full scan.
1731  */
iio_push_to_buffers(struct iio_dev * indio_dev,const void * data)1732 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
1733 {
1734 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1735 	int ret;
1736 	struct iio_buffer *buf;
1737 
1738 	list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) {
1739 		ret = iio_push_to_buffer(buf, data);
1740 		if (ret < 0)
1741 			return ret;
1742 	}
1743 
1744 	return 0;
1745 }
1746 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
1747 
1748 /**
1749  * iio_buffer_release() - Free a buffer's resources
1750  * @ref: Pointer to the kref embedded in the iio_buffer struct
1751  *
1752  * This function is called when the last reference to the buffer has been
1753  * dropped. It will typically free all resources allocated by the buffer. Do not
1754  * call this function manually, always use iio_buffer_put() when done using a
1755  * buffer.
1756  */
iio_buffer_release(struct kref * ref)1757 static void iio_buffer_release(struct kref *ref)
1758 {
1759 	struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1760 
1761 	buffer->access->release(buffer);
1762 }
1763 
1764 /**
1765  * iio_buffer_get() - Grab a reference to the buffer
1766  * @buffer: The buffer to grab a reference for, may be NULL
1767  *
1768  * Returns the pointer to the buffer that was passed into the function.
1769  */
iio_buffer_get(struct iio_buffer * buffer)1770 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1771 {
1772 	if (buffer)
1773 		kref_get(&buffer->ref);
1774 
1775 	return buffer;
1776 }
1777 EXPORT_SYMBOL_GPL(iio_buffer_get);
1778 
1779 /**
1780  * iio_buffer_put() - Release the reference to the buffer
1781  * @buffer: The buffer to release the reference for, may be NULL
1782  */
iio_buffer_put(struct iio_buffer * buffer)1783 void iio_buffer_put(struct iio_buffer *buffer)
1784 {
1785 	if (buffer)
1786 		kref_put(&buffer->ref, iio_buffer_release);
1787 }
1788 EXPORT_SYMBOL_GPL(iio_buffer_put);
1789 
1790 /**
1791  * iio_device_attach_buffer - Attach a buffer to a IIO device
1792  * @indio_dev: The device the buffer should be attached to
1793  * @buffer: The buffer to attach to the device
1794  *
1795  * Return 0 if successful, negative if error.
1796  *
1797  * This function attaches a buffer to a IIO device. The buffer stays attached to
1798  * the device until the device is freed. For legacy reasons, the first attached
1799  * buffer will also be assigned to 'indio_dev->buffer'.
1800  * The array allocated here, will be free'd via the iio_device_detach_buffers()
1801  * call which is handled by the iio_device_free().
1802  */
iio_device_attach_buffer(struct iio_dev * indio_dev,struct iio_buffer * buffer)1803 int iio_device_attach_buffer(struct iio_dev *indio_dev,
1804 			     struct iio_buffer *buffer)
1805 {
1806 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1807 	struct iio_buffer **new, **old = iio_dev_opaque->attached_buffers;
1808 	unsigned int cnt = iio_dev_opaque->attached_buffers_cnt;
1809 
1810 	cnt++;
1811 
1812 	new = krealloc(old, sizeof(*new) * cnt, GFP_KERNEL);
1813 	if (!new)
1814 		return -ENOMEM;
1815 	iio_dev_opaque->attached_buffers = new;
1816 
1817 	buffer = iio_buffer_get(buffer);
1818 
1819 	/* first buffer is legacy; attach it to the IIO device directly */
1820 	if (!indio_dev->buffer)
1821 		indio_dev->buffer = buffer;
1822 
1823 	iio_dev_opaque->attached_buffers[cnt - 1] = buffer;
1824 	iio_dev_opaque->attached_buffers_cnt = cnt;
1825 
1826 	return 0;
1827 }
1828 EXPORT_SYMBOL_GPL(iio_device_attach_buffer);
1829