1 /* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * Handling of buffer allocation / resizing.
10 *
11 *
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
15 */
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
19 #include <linux/fs.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
24
25 #include <linux/iio/iio.h>
26 #include "iio_core.h"
27 #include <linux/iio/sysfs.h>
28 #include <linux/iio/buffer.h>
29
30 static const char * const iio_endian_prefix[] = {
31 [IIO_BE] = "be",
32 [IIO_LE] = "le",
33 };
34
iio_buffer_is_active(struct iio_buffer * buf)35 static bool iio_buffer_is_active(struct iio_buffer *buf)
36 {
37 return !list_empty(&buf->buffer_list);
38 }
39
iio_buffer_data_available(struct iio_buffer * buf)40 static size_t iio_buffer_data_available(struct iio_buffer *buf)
41 {
42 return buf->access->data_available(buf);
43 }
44
iio_buffer_flush_hwfifo(struct iio_dev * indio_dev,struct iio_buffer * buf,size_t required)45 static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
46 struct iio_buffer *buf, size_t required)
47 {
48 if (!indio_dev->info->hwfifo_flush_to_buffer)
49 return -ENODEV;
50
51 return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
52 }
53
iio_buffer_ready(struct iio_dev * indio_dev,struct iio_buffer * buf,size_t to_wait,int to_flush)54 static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
55 size_t to_wait, int to_flush)
56 {
57 size_t avail;
58 int flushed = 0;
59
60 /* wakeup if the device was unregistered */
61 if (!indio_dev->info)
62 return true;
63
64 /* drain the buffer if it was disabled */
65 if (!iio_buffer_is_active(buf)) {
66 to_wait = min_t(size_t, to_wait, 1);
67 to_flush = 0;
68 }
69
70 avail = iio_buffer_data_available(buf);
71
72 if (avail >= to_wait) {
73 /* force a flush for non-blocking reads */
74 if (!to_wait && avail < to_flush)
75 iio_buffer_flush_hwfifo(indio_dev, buf,
76 to_flush - avail);
77 return true;
78 }
79
80 if (to_flush)
81 flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
82 to_wait - avail);
83 if (flushed <= 0)
84 return false;
85
86 if (avail + flushed >= to_wait)
87 return true;
88
89 return false;
90 }
91
92 /**
93 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
94 * @filp: File structure pointer for the char device
95 * @buf: Destination buffer for iio buffer read
96 * @n: First n bytes to read
97 * @f_ps: Long offset provided by the user as a seek position
98 *
99 * This function relies on all buffer implementations having an
100 * iio_buffer as their first element.
101 *
102 * Return: negative values corresponding to error codes or ret != 0
103 * for ending the reading activity
104 **/
iio_buffer_read_first_n_outer(struct file * filp,char __user * buf,size_t n,loff_t * f_ps)105 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
106 size_t n, loff_t *f_ps)
107 {
108 struct iio_dev *indio_dev = filp->private_data;
109 struct iio_buffer *rb = indio_dev->buffer;
110 DEFINE_WAIT_FUNC(wait, woken_wake_function);
111 size_t datum_size;
112 size_t to_wait;
113 int ret = 0;
114
115 if (!indio_dev->info)
116 return -ENODEV;
117
118 if (!rb || !rb->access->read_first_n)
119 return -EINVAL;
120
121 datum_size = rb->bytes_per_datum;
122
123 /*
124 * If datum_size is 0 there will never be anything to read from the
125 * buffer, so signal end of file now.
126 */
127 if (!datum_size)
128 return 0;
129
130 if (filp->f_flags & O_NONBLOCK)
131 to_wait = 0;
132 else
133 to_wait = min_t(size_t, n / datum_size, rb->watermark);
134
135 add_wait_queue(&rb->pollq, &wait);
136 do {
137 if (!indio_dev->info) {
138 ret = -ENODEV;
139 break;
140 }
141
142 if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
143 if (signal_pending(current)) {
144 ret = -ERESTARTSYS;
145 break;
146 }
147
148 wait_woken(&wait, TASK_INTERRUPTIBLE,
149 MAX_SCHEDULE_TIMEOUT);
150 continue;
151 }
152
153 ret = rb->access->read_first_n(rb, n, buf);
154 if (ret == 0 && (filp->f_flags & O_NONBLOCK))
155 ret = -EAGAIN;
156 } while (ret == 0);
157 remove_wait_queue(&rb->pollq, &wait);
158
159 return ret;
160 }
161
162 /**
163 * iio_buffer_poll() - poll the buffer to find out if it has data
164 * @filp: File structure pointer for device access
165 * @wait: Poll table structure pointer for which the driver adds
166 * a wait queue
167 *
168 * Return: (POLLIN | POLLRDNORM) if data is available for reading
169 * or 0 for other cases
170 */
iio_buffer_poll(struct file * filp,struct poll_table_struct * wait)171 unsigned int iio_buffer_poll(struct file *filp,
172 struct poll_table_struct *wait)
173 {
174 struct iio_dev *indio_dev = filp->private_data;
175 struct iio_buffer *rb = indio_dev->buffer;
176
177 if (!indio_dev->info || rb == NULL)
178 return 0;
179
180 poll_wait(filp, &rb->pollq, wait);
181 if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
182 return POLLIN | POLLRDNORM;
183 return 0;
184 }
185
186 /**
187 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
188 * @indio_dev: The IIO device
189 *
190 * Wakes up the event waitqueue used for poll(). Should usually
191 * be called when the device is unregistered.
192 */
iio_buffer_wakeup_poll(struct iio_dev * indio_dev)193 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
194 {
195 if (!indio_dev->buffer)
196 return;
197
198 wake_up(&indio_dev->buffer->pollq);
199 }
200
iio_buffer_init(struct iio_buffer * buffer)201 void iio_buffer_init(struct iio_buffer *buffer)
202 {
203 INIT_LIST_HEAD(&buffer->demux_list);
204 INIT_LIST_HEAD(&buffer->buffer_list);
205 init_waitqueue_head(&buffer->pollq);
206 kref_init(&buffer->ref);
207 if (!buffer->watermark)
208 buffer->watermark = 1;
209 }
210 EXPORT_SYMBOL(iio_buffer_init);
211
iio_show_scan_index(struct device * dev,struct device_attribute * attr,char * buf)212 static ssize_t iio_show_scan_index(struct device *dev,
213 struct device_attribute *attr,
214 char *buf)
215 {
216 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
217 }
218
iio_show_fixed_type(struct device * dev,struct device_attribute * attr,char * buf)219 static ssize_t iio_show_fixed_type(struct device *dev,
220 struct device_attribute *attr,
221 char *buf)
222 {
223 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
224 u8 type = this_attr->c->scan_type.endianness;
225
226 if (type == IIO_CPU) {
227 #ifdef __LITTLE_ENDIAN
228 type = IIO_LE;
229 #else
230 type = IIO_BE;
231 #endif
232 }
233 if (this_attr->c->scan_type.repeat > 1)
234 return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
235 iio_endian_prefix[type],
236 this_attr->c->scan_type.sign,
237 this_attr->c->scan_type.realbits,
238 this_attr->c->scan_type.storagebits,
239 this_attr->c->scan_type.repeat,
240 this_attr->c->scan_type.shift);
241 else
242 return sprintf(buf, "%s:%c%d/%d>>%u\n",
243 iio_endian_prefix[type],
244 this_attr->c->scan_type.sign,
245 this_attr->c->scan_type.realbits,
246 this_attr->c->scan_type.storagebits,
247 this_attr->c->scan_type.shift);
248 }
249
iio_scan_el_show(struct device * dev,struct device_attribute * attr,char * buf)250 static ssize_t iio_scan_el_show(struct device *dev,
251 struct device_attribute *attr,
252 char *buf)
253 {
254 int ret;
255 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
256
257 /* Ensure ret is 0 or 1. */
258 ret = !!test_bit(to_iio_dev_attr(attr)->address,
259 indio_dev->buffer->scan_mask);
260
261 return sprintf(buf, "%d\n", ret);
262 }
263
264 /* Note NULL used as error indicator as it doesn't make sense. */
iio_scan_mask_match(const unsigned long * av_masks,unsigned int masklength,const unsigned long * mask,bool strict)265 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
266 unsigned int masklength,
267 const unsigned long *mask,
268 bool strict)
269 {
270 if (bitmap_empty(mask, masklength))
271 return NULL;
272 while (*av_masks) {
273 if (strict) {
274 if (bitmap_equal(mask, av_masks, masklength))
275 return av_masks;
276 } else {
277 if (bitmap_subset(mask, av_masks, masklength))
278 return av_masks;
279 }
280 av_masks += BITS_TO_LONGS(masklength);
281 }
282 return NULL;
283 }
284
iio_validate_scan_mask(struct iio_dev * indio_dev,const unsigned long * mask)285 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
286 const unsigned long *mask)
287 {
288 if (!indio_dev->setup_ops->validate_scan_mask)
289 return true;
290
291 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
292 }
293
294 /**
295 * iio_scan_mask_set() - set particular bit in the scan mask
296 * @indio_dev: the iio device
297 * @buffer: the buffer whose scan mask we are interested in
298 * @bit: the bit to be set.
299 *
300 * Note that at this point we have no way of knowing what other
301 * buffers might request, hence this code only verifies that the
302 * individual buffers request is plausible.
303 */
iio_scan_mask_set(struct iio_dev * indio_dev,struct iio_buffer * buffer,int bit)304 static int iio_scan_mask_set(struct iio_dev *indio_dev,
305 struct iio_buffer *buffer, int bit)
306 {
307 const unsigned long *mask;
308 unsigned long *trialmask;
309
310 trialmask = kmalloc(sizeof(*trialmask)*
311 BITS_TO_LONGS(indio_dev->masklength),
312 GFP_KERNEL);
313
314 if (trialmask == NULL)
315 return -ENOMEM;
316 if (!indio_dev->masklength) {
317 WARN(1, "Trying to set scanmask prior to registering buffer\n");
318 goto err_invalid_mask;
319 }
320 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
321 set_bit(bit, trialmask);
322
323 if (!iio_validate_scan_mask(indio_dev, trialmask))
324 goto err_invalid_mask;
325
326 if (indio_dev->available_scan_masks) {
327 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
328 indio_dev->masklength,
329 trialmask, false);
330 if (!mask)
331 goto err_invalid_mask;
332 }
333 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
334
335 kfree(trialmask);
336
337 return 0;
338
339 err_invalid_mask:
340 kfree(trialmask);
341 return -EINVAL;
342 }
343
iio_scan_mask_clear(struct iio_buffer * buffer,int bit)344 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
345 {
346 clear_bit(bit, buffer->scan_mask);
347 return 0;
348 }
349
iio_scan_el_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)350 static ssize_t iio_scan_el_store(struct device *dev,
351 struct device_attribute *attr,
352 const char *buf,
353 size_t len)
354 {
355 int ret;
356 bool state;
357 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
358 struct iio_buffer *buffer = indio_dev->buffer;
359 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
360
361 ret = strtobool(buf, &state);
362 if (ret < 0)
363 return ret;
364 mutex_lock(&indio_dev->mlock);
365 if (iio_buffer_is_active(indio_dev->buffer)) {
366 ret = -EBUSY;
367 goto error_ret;
368 }
369 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
370 if (ret < 0)
371 goto error_ret;
372 if (!state && ret) {
373 ret = iio_scan_mask_clear(buffer, this_attr->address);
374 if (ret)
375 goto error_ret;
376 } else if (state && !ret) {
377 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
378 if (ret)
379 goto error_ret;
380 }
381
382 error_ret:
383 mutex_unlock(&indio_dev->mlock);
384
385 return ret < 0 ? ret : len;
386
387 }
388
iio_scan_el_ts_show(struct device * dev,struct device_attribute * attr,char * buf)389 static ssize_t iio_scan_el_ts_show(struct device *dev,
390 struct device_attribute *attr,
391 char *buf)
392 {
393 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
394 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
395 }
396
iio_scan_el_ts_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)397 static ssize_t iio_scan_el_ts_store(struct device *dev,
398 struct device_attribute *attr,
399 const char *buf,
400 size_t len)
401 {
402 int ret;
403 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
404 bool state;
405
406 ret = strtobool(buf, &state);
407 if (ret < 0)
408 return ret;
409
410 mutex_lock(&indio_dev->mlock);
411 if (iio_buffer_is_active(indio_dev->buffer)) {
412 ret = -EBUSY;
413 goto error_ret;
414 }
415 indio_dev->buffer->scan_timestamp = state;
416 error_ret:
417 mutex_unlock(&indio_dev->mlock);
418
419 return ret ? ret : len;
420 }
421
iio_buffer_add_channel_sysfs(struct iio_dev * indio_dev,const struct iio_chan_spec * chan)422 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
423 const struct iio_chan_spec *chan)
424 {
425 int ret, attrcount = 0;
426 struct iio_buffer *buffer = indio_dev->buffer;
427
428 ret = __iio_add_chan_devattr("index",
429 chan,
430 &iio_show_scan_index,
431 NULL,
432 0,
433 IIO_SEPARATE,
434 &indio_dev->dev,
435 &buffer->scan_el_dev_attr_list);
436 if (ret)
437 return ret;
438 attrcount++;
439 ret = __iio_add_chan_devattr("type",
440 chan,
441 &iio_show_fixed_type,
442 NULL,
443 0,
444 0,
445 &indio_dev->dev,
446 &buffer->scan_el_dev_attr_list);
447 if (ret)
448 return ret;
449 attrcount++;
450 if (chan->type != IIO_TIMESTAMP)
451 ret = __iio_add_chan_devattr("en",
452 chan,
453 &iio_scan_el_show,
454 &iio_scan_el_store,
455 chan->scan_index,
456 0,
457 &indio_dev->dev,
458 &buffer->scan_el_dev_attr_list);
459 else
460 ret = __iio_add_chan_devattr("en",
461 chan,
462 &iio_scan_el_ts_show,
463 &iio_scan_el_ts_store,
464 chan->scan_index,
465 0,
466 &indio_dev->dev,
467 &buffer->scan_el_dev_attr_list);
468 if (ret)
469 return ret;
470 attrcount++;
471 ret = attrcount;
472 return ret;
473 }
474
iio_buffer_read_length(struct device * dev,struct device_attribute * attr,char * buf)475 static ssize_t iio_buffer_read_length(struct device *dev,
476 struct device_attribute *attr,
477 char *buf)
478 {
479 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
480 struct iio_buffer *buffer = indio_dev->buffer;
481
482 return sprintf(buf, "%d\n", buffer->length);
483 }
484
iio_buffer_write_length(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)485 static ssize_t iio_buffer_write_length(struct device *dev,
486 struct device_attribute *attr,
487 const char *buf, size_t len)
488 {
489 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
490 struct iio_buffer *buffer = indio_dev->buffer;
491 unsigned int val;
492 int ret;
493
494 ret = kstrtouint(buf, 10, &val);
495 if (ret)
496 return ret;
497
498 if (val == buffer->length)
499 return len;
500
501 mutex_lock(&indio_dev->mlock);
502 if (iio_buffer_is_active(indio_dev->buffer)) {
503 ret = -EBUSY;
504 } else {
505 buffer->access->set_length(buffer, val);
506 ret = 0;
507 }
508 if (ret)
509 goto out;
510 if (buffer->length && buffer->length < buffer->watermark)
511 buffer->watermark = buffer->length;
512 out:
513 mutex_unlock(&indio_dev->mlock);
514
515 return ret ? ret : len;
516 }
517
iio_buffer_show_enable(struct device * dev,struct device_attribute * attr,char * buf)518 static ssize_t iio_buffer_show_enable(struct device *dev,
519 struct device_attribute *attr,
520 char *buf)
521 {
522 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
523 return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
524 }
525
iio_storage_bytes_for_si(struct iio_dev * indio_dev,unsigned int scan_index)526 static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
527 unsigned int scan_index)
528 {
529 const struct iio_chan_spec *ch;
530 unsigned int bytes;
531
532 ch = iio_find_channel_from_si(indio_dev, scan_index);
533 bytes = ch->scan_type.storagebits / 8;
534 if (ch->scan_type.repeat > 1)
535 bytes *= ch->scan_type.repeat;
536 return bytes;
537 }
538
iio_storage_bytes_for_timestamp(struct iio_dev * indio_dev)539 static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
540 {
541 return iio_storage_bytes_for_si(indio_dev,
542 indio_dev->scan_index_timestamp);
543 }
544
iio_compute_scan_bytes(struct iio_dev * indio_dev,const unsigned long * mask,bool timestamp)545 static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
546 const unsigned long *mask, bool timestamp)
547 {
548 unsigned bytes = 0;
549 int length, i;
550
551 /* How much space will the demuxed element take? */
552 for_each_set_bit(i, mask,
553 indio_dev->masklength) {
554 length = iio_storage_bytes_for_si(indio_dev, i);
555 bytes = ALIGN(bytes, length);
556 bytes += length;
557 }
558
559 if (timestamp) {
560 length = iio_storage_bytes_for_timestamp(indio_dev);
561 bytes = ALIGN(bytes, length);
562 bytes += length;
563 }
564 return bytes;
565 }
566
iio_buffer_activate(struct iio_dev * indio_dev,struct iio_buffer * buffer)567 static void iio_buffer_activate(struct iio_dev *indio_dev,
568 struct iio_buffer *buffer)
569 {
570 iio_buffer_get(buffer);
571 list_add(&buffer->buffer_list, &indio_dev->buffer_list);
572 }
573
iio_buffer_deactivate(struct iio_buffer * buffer)574 static void iio_buffer_deactivate(struct iio_buffer *buffer)
575 {
576 list_del_init(&buffer->buffer_list);
577 wake_up_interruptible(&buffer->pollq);
578 iio_buffer_put(buffer);
579 }
580
iio_buffer_deactivate_all(struct iio_dev * indio_dev)581 static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
582 {
583 struct iio_buffer *buffer, *_buffer;
584
585 list_for_each_entry_safe(buffer, _buffer,
586 &indio_dev->buffer_list, buffer_list)
587 iio_buffer_deactivate(buffer);
588 }
589
iio_buffer_enable(struct iio_buffer * buffer,struct iio_dev * indio_dev)590 static int iio_buffer_enable(struct iio_buffer *buffer,
591 struct iio_dev *indio_dev)
592 {
593 if (!buffer->access->enable)
594 return 0;
595 return buffer->access->enable(buffer, indio_dev);
596 }
597
iio_buffer_disable(struct iio_buffer * buffer,struct iio_dev * indio_dev)598 static int iio_buffer_disable(struct iio_buffer *buffer,
599 struct iio_dev *indio_dev)
600 {
601 if (!buffer->access->disable)
602 return 0;
603 return buffer->access->disable(buffer, indio_dev);
604 }
605
iio_buffer_update_bytes_per_datum(struct iio_dev * indio_dev,struct iio_buffer * buffer)606 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
607 struct iio_buffer *buffer)
608 {
609 unsigned int bytes;
610
611 if (!buffer->access->set_bytes_per_datum)
612 return;
613
614 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
615 buffer->scan_timestamp);
616
617 buffer->access->set_bytes_per_datum(buffer, bytes);
618 }
619
iio_buffer_request_update(struct iio_dev * indio_dev,struct iio_buffer * buffer)620 static int iio_buffer_request_update(struct iio_dev *indio_dev,
621 struct iio_buffer *buffer)
622 {
623 int ret;
624
625 iio_buffer_update_bytes_per_datum(indio_dev, buffer);
626 if (buffer->access->request_update) {
627 ret = buffer->access->request_update(buffer);
628 if (ret) {
629 dev_dbg(&indio_dev->dev,
630 "Buffer not started: buffer parameter update failed (%d)\n",
631 ret);
632 return ret;
633 }
634 }
635
636 return 0;
637 }
638
iio_free_scan_mask(struct iio_dev * indio_dev,const unsigned long * mask)639 static void iio_free_scan_mask(struct iio_dev *indio_dev,
640 const unsigned long *mask)
641 {
642 /* If the mask is dynamically allocated free it, otherwise do nothing */
643 if (!indio_dev->available_scan_masks)
644 kfree(mask);
645 }
646
647 struct iio_device_config {
648 unsigned int mode;
649 unsigned int watermark;
650 const unsigned long *scan_mask;
651 unsigned int scan_bytes;
652 bool scan_timestamp;
653 };
654
iio_verify_update(struct iio_dev * indio_dev,struct iio_buffer * insert_buffer,struct iio_buffer * remove_buffer,struct iio_device_config * config)655 static int iio_verify_update(struct iio_dev *indio_dev,
656 struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
657 struct iio_device_config *config)
658 {
659 unsigned long *compound_mask;
660 const unsigned long *scan_mask;
661 bool strict_scanmask = false;
662 struct iio_buffer *buffer;
663 bool scan_timestamp;
664 unsigned int modes;
665
666 memset(config, 0, sizeof(*config));
667 config->watermark = ~0;
668
669 /*
670 * If there is just one buffer and we are removing it there is nothing
671 * to verify.
672 */
673 if (remove_buffer && !insert_buffer &&
674 list_is_singular(&indio_dev->buffer_list))
675 return 0;
676
677 modes = indio_dev->modes;
678
679 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
680 if (buffer == remove_buffer)
681 continue;
682 modes &= buffer->access->modes;
683 config->watermark = min(config->watermark, buffer->watermark);
684 }
685
686 if (insert_buffer) {
687 modes &= insert_buffer->access->modes;
688 config->watermark = min(config->watermark,
689 insert_buffer->watermark);
690 }
691
692 /* Definitely possible for devices to support both of these. */
693 if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
694 config->mode = INDIO_BUFFER_TRIGGERED;
695 } else if (modes & INDIO_BUFFER_HARDWARE) {
696 /*
697 * Keep things simple for now and only allow a single buffer to
698 * be connected in hardware mode.
699 */
700 if (insert_buffer && !list_empty(&indio_dev->buffer_list))
701 return -EINVAL;
702 config->mode = INDIO_BUFFER_HARDWARE;
703 strict_scanmask = true;
704 } else if (modes & INDIO_BUFFER_SOFTWARE) {
705 config->mode = INDIO_BUFFER_SOFTWARE;
706 } else {
707 /* Can only occur on first buffer */
708 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
709 dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
710 return -EINVAL;
711 }
712
713 /* What scan mask do we actually have? */
714 compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
715 sizeof(long), GFP_KERNEL);
716 if (compound_mask == NULL)
717 return -ENOMEM;
718
719 scan_timestamp = false;
720
721 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
722 if (buffer == remove_buffer)
723 continue;
724 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
725 indio_dev->masklength);
726 scan_timestamp |= buffer->scan_timestamp;
727 }
728
729 if (insert_buffer) {
730 bitmap_or(compound_mask, compound_mask,
731 insert_buffer->scan_mask, indio_dev->masklength);
732 scan_timestamp |= insert_buffer->scan_timestamp;
733 }
734
735 if (indio_dev->available_scan_masks) {
736 scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
737 indio_dev->masklength,
738 compound_mask,
739 strict_scanmask);
740 kfree(compound_mask);
741 if (scan_mask == NULL)
742 return -EINVAL;
743 } else {
744 scan_mask = compound_mask;
745 }
746
747 config->scan_bytes = iio_compute_scan_bytes(indio_dev,
748 scan_mask, scan_timestamp);
749 config->scan_mask = scan_mask;
750 config->scan_timestamp = scan_timestamp;
751
752 return 0;
753 }
754
iio_enable_buffers(struct iio_dev * indio_dev,struct iio_device_config * config)755 static int iio_enable_buffers(struct iio_dev *indio_dev,
756 struct iio_device_config *config)
757 {
758 struct iio_buffer *buffer;
759 int ret;
760
761 indio_dev->active_scan_mask = config->scan_mask;
762 indio_dev->scan_timestamp = config->scan_timestamp;
763 indio_dev->scan_bytes = config->scan_bytes;
764
765 iio_update_demux(indio_dev);
766
767 /* Wind up again */
768 if (indio_dev->setup_ops->preenable) {
769 ret = indio_dev->setup_ops->preenable(indio_dev);
770 if (ret) {
771 dev_dbg(&indio_dev->dev,
772 "Buffer not started: buffer preenable failed (%d)\n", ret);
773 goto err_undo_config;
774 }
775 }
776
777 if (indio_dev->info->update_scan_mode) {
778 ret = indio_dev->info
779 ->update_scan_mode(indio_dev,
780 indio_dev->active_scan_mask);
781 if (ret < 0) {
782 dev_dbg(&indio_dev->dev,
783 "Buffer not started: update scan mode failed (%d)\n",
784 ret);
785 goto err_run_postdisable;
786 }
787 }
788
789 if (indio_dev->info->hwfifo_set_watermark)
790 indio_dev->info->hwfifo_set_watermark(indio_dev,
791 config->watermark);
792
793 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
794 ret = iio_buffer_enable(buffer, indio_dev);
795 if (ret)
796 goto err_disable_buffers;
797 }
798
799 indio_dev->currentmode = config->mode;
800
801 if (indio_dev->setup_ops->postenable) {
802 ret = indio_dev->setup_ops->postenable(indio_dev);
803 if (ret) {
804 dev_dbg(&indio_dev->dev,
805 "Buffer not started: postenable failed (%d)\n", ret);
806 goto err_disable_buffers;
807 }
808 }
809
810 return 0;
811
812 err_disable_buffers:
813 list_for_each_entry_continue_reverse(buffer, &indio_dev->buffer_list,
814 buffer_list)
815 iio_buffer_disable(buffer, indio_dev);
816 err_run_postdisable:
817 indio_dev->currentmode = INDIO_DIRECT_MODE;
818 if (indio_dev->setup_ops->postdisable)
819 indio_dev->setup_ops->postdisable(indio_dev);
820 err_undo_config:
821 indio_dev->active_scan_mask = NULL;
822
823 return ret;
824 }
825
iio_disable_buffers(struct iio_dev * indio_dev)826 static int iio_disable_buffers(struct iio_dev *indio_dev)
827 {
828 struct iio_buffer *buffer;
829 int ret = 0;
830 int ret2;
831
832 /* Wind down existing buffers - iff there are any */
833 if (list_empty(&indio_dev->buffer_list))
834 return 0;
835
836 /*
837 * If things go wrong at some step in disable we still need to continue
838 * to perform the other steps, otherwise we leave the device in a
839 * inconsistent state. We return the error code for the first error we
840 * encountered.
841 */
842
843 if (indio_dev->setup_ops->predisable) {
844 ret2 = indio_dev->setup_ops->predisable(indio_dev);
845 if (ret2 && !ret)
846 ret = ret2;
847 }
848
849 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
850 ret2 = iio_buffer_disable(buffer, indio_dev);
851 if (ret2 && !ret)
852 ret = ret2;
853 }
854
855 indio_dev->currentmode = INDIO_DIRECT_MODE;
856
857 if (indio_dev->setup_ops->postdisable) {
858 ret2 = indio_dev->setup_ops->postdisable(indio_dev);
859 if (ret2 && !ret)
860 ret = ret2;
861 }
862
863 iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
864 indio_dev->active_scan_mask = NULL;
865
866 return ret;
867 }
868
__iio_update_buffers(struct iio_dev * indio_dev,struct iio_buffer * insert_buffer,struct iio_buffer * remove_buffer)869 static int __iio_update_buffers(struct iio_dev *indio_dev,
870 struct iio_buffer *insert_buffer,
871 struct iio_buffer *remove_buffer)
872 {
873 struct iio_device_config new_config;
874 int ret;
875
876 ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
877 &new_config);
878 if (ret)
879 return ret;
880
881 if (insert_buffer) {
882 ret = iio_buffer_request_update(indio_dev, insert_buffer);
883 if (ret)
884 goto err_free_config;
885 }
886
887 ret = iio_disable_buffers(indio_dev);
888 if (ret)
889 goto err_deactivate_all;
890
891 if (remove_buffer)
892 iio_buffer_deactivate(remove_buffer);
893 if (insert_buffer)
894 iio_buffer_activate(indio_dev, insert_buffer);
895
896 /* If no buffers in list, we are done */
897 if (list_empty(&indio_dev->buffer_list))
898 return 0;
899
900 ret = iio_enable_buffers(indio_dev, &new_config);
901 if (ret)
902 goto err_deactivate_all;
903
904 return 0;
905
906 err_deactivate_all:
907 /*
908 * We've already verified that the config is valid earlier. If things go
909 * wrong in either enable or disable the most likely reason is an IO
910 * error from the device. In this case there is no good recovery
911 * strategy. Just make sure to disable everything and leave the device
912 * in a sane state. With a bit of luck the device might come back to
913 * life again later and userspace can try again.
914 */
915 iio_buffer_deactivate_all(indio_dev);
916
917 err_free_config:
918 iio_free_scan_mask(indio_dev, new_config.scan_mask);
919 return ret;
920 }
921
iio_update_buffers(struct iio_dev * indio_dev,struct iio_buffer * insert_buffer,struct iio_buffer * remove_buffer)922 int iio_update_buffers(struct iio_dev *indio_dev,
923 struct iio_buffer *insert_buffer,
924 struct iio_buffer *remove_buffer)
925 {
926 int ret;
927
928 if (insert_buffer == remove_buffer)
929 return 0;
930
931 mutex_lock(&indio_dev->info_exist_lock);
932 mutex_lock(&indio_dev->mlock);
933
934 if (insert_buffer && iio_buffer_is_active(insert_buffer))
935 insert_buffer = NULL;
936
937 if (remove_buffer && !iio_buffer_is_active(remove_buffer))
938 remove_buffer = NULL;
939
940 if (!insert_buffer && !remove_buffer) {
941 ret = 0;
942 goto out_unlock;
943 }
944
945 if (indio_dev->info == NULL) {
946 ret = -ENODEV;
947 goto out_unlock;
948 }
949
950 ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
951
952 out_unlock:
953 mutex_unlock(&indio_dev->mlock);
954 mutex_unlock(&indio_dev->info_exist_lock);
955
956 return ret;
957 }
958 EXPORT_SYMBOL_GPL(iio_update_buffers);
959
iio_disable_all_buffers(struct iio_dev * indio_dev)960 void iio_disable_all_buffers(struct iio_dev *indio_dev)
961 {
962 iio_disable_buffers(indio_dev);
963 iio_buffer_deactivate_all(indio_dev);
964 }
965
iio_buffer_store_enable(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)966 static ssize_t iio_buffer_store_enable(struct device *dev,
967 struct device_attribute *attr,
968 const char *buf,
969 size_t len)
970 {
971 int ret;
972 bool requested_state;
973 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
974 bool inlist;
975
976 ret = strtobool(buf, &requested_state);
977 if (ret < 0)
978 return ret;
979
980 mutex_lock(&indio_dev->mlock);
981
982 /* Find out if it is in the list */
983 inlist = iio_buffer_is_active(indio_dev->buffer);
984 /* Already in desired state */
985 if (inlist == requested_state)
986 goto done;
987
988 if (requested_state)
989 ret = __iio_update_buffers(indio_dev,
990 indio_dev->buffer, NULL);
991 else
992 ret = __iio_update_buffers(indio_dev,
993 NULL, indio_dev->buffer);
994
995 done:
996 mutex_unlock(&indio_dev->mlock);
997 return (ret < 0) ? ret : len;
998 }
999
1000 static const char * const iio_scan_elements_group_name = "scan_elements";
1001
iio_buffer_show_watermark(struct device * dev,struct device_attribute * attr,char * buf)1002 static ssize_t iio_buffer_show_watermark(struct device *dev,
1003 struct device_attribute *attr,
1004 char *buf)
1005 {
1006 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1007 struct iio_buffer *buffer = indio_dev->buffer;
1008
1009 return sprintf(buf, "%u\n", buffer->watermark);
1010 }
1011
iio_buffer_store_watermark(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1012 static ssize_t iio_buffer_store_watermark(struct device *dev,
1013 struct device_attribute *attr,
1014 const char *buf,
1015 size_t len)
1016 {
1017 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1018 struct iio_buffer *buffer = indio_dev->buffer;
1019 unsigned int val;
1020 int ret;
1021
1022 ret = kstrtouint(buf, 10, &val);
1023 if (ret)
1024 return ret;
1025 if (!val)
1026 return -EINVAL;
1027
1028 mutex_lock(&indio_dev->mlock);
1029
1030 if (val > buffer->length) {
1031 ret = -EINVAL;
1032 goto out;
1033 }
1034
1035 if (iio_buffer_is_active(indio_dev->buffer)) {
1036 ret = -EBUSY;
1037 goto out;
1038 }
1039
1040 buffer->watermark = val;
1041 out:
1042 mutex_unlock(&indio_dev->mlock);
1043
1044 return ret ? ret : len;
1045 }
1046
1047 static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
1048 iio_buffer_write_length);
1049 static struct device_attribute dev_attr_length_ro = __ATTR(length,
1050 S_IRUGO, iio_buffer_read_length, NULL);
1051 static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
1052 iio_buffer_show_enable, iio_buffer_store_enable);
1053 static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
1054 iio_buffer_show_watermark, iio_buffer_store_watermark);
1055 static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark,
1056 S_IRUGO, iio_buffer_show_watermark, NULL);
1057
1058 static struct attribute *iio_buffer_attrs[] = {
1059 &dev_attr_length.attr,
1060 &dev_attr_enable.attr,
1061 &dev_attr_watermark.attr,
1062 };
1063
iio_buffer_alloc_sysfs_and_mask(struct iio_dev * indio_dev)1064 int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
1065 {
1066 struct iio_dev_attr *p;
1067 struct attribute **attr;
1068 struct iio_buffer *buffer = indio_dev->buffer;
1069 int ret, i, attrn, attrcount, attrcount_orig = 0;
1070 const struct iio_chan_spec *channels;
1071
1072 channels = indio_dev->channels;
1073 if (channels) {
1074 int ml = indio_dev->masklength;
1075
1076 for (i = 0; i < indio_dev->num_channels; i++)
1077 ml = max(ml, channels[i].scan_index + 1);
1078 indio_dev->masklength = ml;
1079 }
1080
1081 if (!buffer)
1082 return 0;
1083
1084 attrcount = 0;
1085 if (buffer->attrs) {
1086 while (buffer->attrs[attrcount] != NULL)
1087 attrcount++;
1088 }
1089
1090 attr = kcalloc(attrcount + ARRAY_SIZE(iio_buffer_attrs) + 1,
1091 sizeof(struct attribute *), GFP_KERNEL);
1092 if (!attr)
1093 return -ENOMEM;
1094
1095 memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
1096 if (!buffer->access->set_length)
1097 attr[0] = &dev_attr_length_ro.attr;
1098
1099 if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
1100 attr[2] = &dev_attr_watermark_ro.attr;
1101
1102 if (buffer->attrs)
1103 memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
1104 sizeof(struct attribute *) * attrcount);
1105
1106 attr[attrcount + ARRAY_SIZE(iio_buffer_attrs)] = NULL;
1107
1108 buffer->buffer_group.name = "buffer";
1109 buffer->buffer_group.attrs = attr;
1110
1111 indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
1112
1113 if (buffer->scan_el_attrs != NULL) {
1114 attr = buffer->scan_el_attrs->attrs;
1115 while (*attr++ != NULL)
1116 attrcount_orig++;
1117 }
1118 attrcount = attrcount_orig;
1119 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
1120 channels = indio_dev->channels;
1121 if (channels) {
1122 /* new magic */
1123 for (i = 0; i < indio_dev->num_channels; i++) {
1124 if (channels[i].scan_index < 0)
1125 continue;
1126
1127 ret = iio_buffer_add_channel_sysfs(indio_dev,
1128 &channels[i]);
1129 if (ret < 0)
1130 goto error_cleanup_dynamic;
1131 attrcount += ret;
1132 if (channels[i].type == IIO_TIMESTAMP)
1133 indio_dev->scan_index_timestamp =
1134 channels[i].scan_index;
1135 }
1136 if (indio_dev->masklength && buffer->scan_mask == NULL) {
1137 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
1138 sizeof(*buffer->scan_mask),
1139 GFP_KERNEL);
1140 if (buffer->scan_mask == NULL) {
1141 ret = -ENOMEM;
1142 goto error_cleanup_dynamic;
1143 }
1144 }
1145 }
1146
1147 buffer->scan_el_group.name = iio_scan_elements_group_name;
1148
1149 buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
1150 sizeof(buffer->scan_el_group.attrs[0]),
1151 GFP_KERNEL);
1152 if (buffer->scan_el_group.attrs == NULL) {
1153 ret = -ENOMEM;
1154 goto error_free_scan_mask;
1155 }
1156 if (buffer->scan_el_attrs)
1157 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
1158 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
1159 attrn = attrcount_orig;
1160
1161 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
1162 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
1163 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
1164
1165 return 0;
1166
1167 error_free_scan_mask:
1168 kfree(buffer->scan_mask);
1169 error_cleanup_dynamic:
1170 iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
1171 kfree(indio_dev->buffer->buffer_group.attrs);
1172
1173 return ret;
1174 }
1175
iio_buffer_free_sysfs_and_mask(struct iio_dev * indio_dev)1176 void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
1177 {
1178 if (!indio_dev->buffer)
1179 return;
1180
1181 kfree(indio_dev->buffer->scan_mask);
1182 kfree(indio_dev->buffer->buffer_group.attrs);
1183 kfree(indio_dev->buffer->scan_el_group.attrs);
1184 iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
1185 }
1186
1187 /**
1188 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1189 * @indio_dev: the iio device
1190 * @mask: scan mask to be checked
1191 *
1192 * Return true if exactly one bit is set in the scan mask, false otherwise. It
1193 * can be used for devices where only one channel can be active for sampling at
1194 * a time.
1195 */
iio_validate_scan_mask_onehot(struct iio_dev * indio_dev,const unsigned long * mask)1196 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
1197 const unsigned long *mask)
1198 {
1199 return bitmap_weight(mask, indio_dev->masklength) == 1;
1200 }
1201 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
1202
iio_scan_mask_query(struct iio_dev * indio_dev,struct iio_buffer * buffer,int bit)1203 int iio_scan_mask_query(struct iio_dev *indio_dev,
1204 struct iio_buffer *buffer, int bit)
1205 {
1206 if (bit > indio_dev->masklength)
1207 return -EINVAL;
1208
1209 if (!buffer->scan_mask)
1210 return 0;
1211
1212 /* Ensure return value is 0 or 1. */
1213 return !!test_bit(bit, buffer->scan_mask);
1214 };
1215 EXPORT_SYMBOL_GPL(iio_scan_mask_query);
1216
1217 /**
1218 * struct iio_demux_table - table describing demux memcpy ops
1219 * @from: index to copy from
1220 * @to: index to copy to
1221 * @length: how many bytes to copy
1222 * @l: list head used for management
1223 */
1224 struct iio_demux_table {
1225 unsigned from;
1226 unsigned to;
1227 unsigned length;
1228 struct list_head l;
1229 };
1230
iio_demux(struct iio_buffer * buffer,const void * datain)1231 static const void *iio_demux(struct iio_buffer *buffer,
1232 const void *datain)
1233 {
1234 struct iio_demux_table *t;
1235
1236 if (list_empty(&buffer->demux_list))
1237 return datain;
1238 list_for_each_entry(t, &buffer->demux_list, l)
1239 memcpy(buffer->demux_bounce + t->to,
1240 datain + t->from, t->length);
1241
1242 return buffer->demux_bounce;
1243 }
1244
iio_push_to_buffer(struct iio_buffer * buffer,const void * data)1245 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
1246 {
1247 const void *dataout = iio_demux(buffer, data);
1248 int ret;
1249
1250 ret = buffer->access->store_to(buffer, dataout);
1251 if (ret)
1252 return ret;
1253
1254 /*
1255 * We can't just test for watermark to decide if we wake the poll queue
1256 * because read may request less samples than the watermark.
1257 */
1258 wake_up_interruptible_poll(&buffer->pollq, POLLIN | POLLRDNORM);
1259 return 0;
1260 }
1261
iio_buffer_demux_free(struct iio_buffer * buffer)1262 static void iio_buffer_demux_free(struct iio_buffer *buffer)
1263 {
1264 struct iio_demux_table *p, *q;
1265 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
1266 list_del(&p->l);
1267 kfree(p);
1268 }
1269 }
1270
1271
iio_push_to_buffers(struct iio_dev * indio_dev,const void * data)1272 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
1273 {
1274 int ret;
1275 struct iio_buffer *buf;
1276
1277 list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
1278 ret = iio_push_to_buffer(buf, data);
1279 if (ret < 0)
1280 return ret;
1281 }
1282
1283 return 0;
1284 }
1285 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
1286
iio_buffer_add_demux(struct iio_buffer * buffer,struct iio_demux_table ** p,unsigned int in_loc,unsigned int out_loc,unsigned int length)1287 static int iio_buffer_add_demux(struct iio_buffer *buffer,
1288 struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
1289 unsigned int length)
1290 {
1291
1292 if (*p && (*p)->from + (*p)->length == in_loc &&
1293 (*p)->to + (*p)->length == out_loc) {
1294 (*p)->length += length;
1295 } else {
1296 *p = kmalloc(sizeof(**p), GFP_KERNEL);
1297 if (*p == NULL)
1298 return -ENOMEM;
1299 (*p)->from = in_loc;
1300 (*p)->to = out_loc;
1301 (*p)->length = length;
1302 list_add_tail(&(*p)->l, &buffer->demux_list);
1303 }
1304
1305 return 0;
1306 }
1307
iio_buffer_update_demux(struct iio_dev * indio_dev,struct iio_buffer * buffer)1308 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
1309 struct iio_buffer *buffer)
1310 {
1311 int ret, in_ind = -1, out_ind, length;
1312 unsigned in_loc = 0, out_loc = 0;
1313 struct iio_demux_table *p = NULL;
1314
1315 /* Clear out any old demux */
1316 iio_buffer_demux_free(buffer);
1317 kfree(buffer->demux_bounce);
1318 buffer->demux_bounce = NULL;
1319
1320 /* First work out which scan mode we will actually have */
1321 if (bitmap_equal(indio_dev->active_scan_mask,
1322 buffer->scan_mask,
1323 indio_dev->masklength))
1324 return 0;
1325
1326 /* Now we have the two masks, work from least sig and build up sizes */
1327 for_each_set_bit(out_ind,
1328 buffer->scan_mask,
1329 indio_dev->masklength) {
1330 in_ind = find_next_bit(indio_dev->active_scan_mask,
1331 indio_dev->masklength,
1332 in_ind + 1);
1333 while (in_ind != out_ind) {
1334 in_ind = find_next_bit(indio_dev->active_scan_mask,
1335 indio_dev->masklength,
1336 in_ind + 1);
1337 length = iio_storage_bytes_for_si(indio_dev, in_ind);
1338 /* Make sure we are aligned */
1339 in_loc = roundup(in_loc, length) + length;
1340 }
1341 length = iio_storage_bytes_for_si(indio_dev, in_ind);
1342 out_loc = roundup(out_loc, length);
1343 in_loc = roundup(in_loc, length);
1344 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1345 if (ret)
1346 goto error_clear_mux_table;
1347 out_loc += length;
1348 in_loc += length;
1349 }
1350 /* Relies on scan_timestamp being last */
1351 if (buffer->scan_timestamp) {
1352 length = iio_storage_bytes_for_timestamp(indio_dev);
1353 out_loc = roundup(out_loc, length);
1354 in_loc = roundup(in_loc, length);
1355 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1356 if (ret)
1357 goto error_clear_mux_table;
1358 out_loc += length;
1359 in_loc += length;
1360 }
1361 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
1362 if (buffer->demux_bounce == NULL) {
1363 ret = -ENOMEM;
1364 goto error_clear_mux_table;
1365 }
1366 return 0;
1367
1368 error_clear_mux_table:
1369 iio_buffer_demux_free(buffer);
1370
1371 return ret;
1372 }
1373
iio_update_demux(struct iio_dev * indio_dev)1374 int iio_update_demux(struct iio_dev *indio_dev)
1375 {
1376 struct iio_buffer *buffer;
1377 int ret;
1378
1379 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
1380 ret = iio_buffer_update_demux(indio_dev, buffer);
1381 if (ret < 0)
1382 goto error_clear_mux_table;
1383 }
1384 return 0;
1385
1386 error_clear_mux_table:
1387 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
1388 iio_buffer_demux_free(buffer);
1389
1390 return ret;
1391 }
1392 EXPORT_SYMBOL_GPL(iio_update_demux);
1393
1394 /**
1395 * iio_buffer_release() - Free a buffer's resources
1396 * @ref: Pointer to the kref embedded in the iio_buffer struct
1397 *
1398 * This function is called when the last reference to the buffer has been
1399 * dropped. It will typically free all resources allocated by the buffer. Do not
1400 * call this function manually, always use iio_buffer_put() when done using a
1401 * buffer.
1402 */
iio_buffer_release(struct kref * ref)1403 static void iio_buffer_release(struct kref *ref)
1404 {
1405 struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1406
1407 buffer->access->release(buffer);
1408 }
1409
1410 /**
1411 * iio_buffer_get() - Grab a reference to the buffer
1412 * @buffer: The buffer to grab a reference for, may be NULL
1413 *
1414 * Returns the pointer to the buffer that was passed into the function.
1415 */
iio_buffer_get(struct iio_buffer * buffer)1416 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1417 {
1418 if (buffer)
1419 kref_get(&buffer->ref);
1420
1421 return buffer;
1422 }
1423 EXPORT_SYMBOL_GPL(iio_buffer_get);
1424
1425 /**
1426 * iio_buffer_put() - Release the reference to the buffer
1427 * @buffer: The buffer to release the reference for, may be NULL
1428 */
iio_buffer_put(struct iio_buffer * buffer)1429 void iio_buffer_put(struct iio_buffer *buffer)
1430 {
1431 if (buffer)
1432 kref_put(&buffer->ref, iio_buffer_release);
1433 }
1434 EXPORT_SYMBOL_GPL(iio_buffer_put);
1435