1 /* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * Handling of buffer allocation / resizing.
10 *
11 *
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
15 */
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
19 #include <linux/fs.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23
24 #include <linux/iio/iio.h>
25 #include "iio_core.h"
26 #include <linux/iio/sysfs.h>
27 #include <linux/iio/buffer.h>
28
29 static const char * const iio_endian_prefix[] = {
30 [IIO_BE] = "be",
31 [IIO_LE] = "le",
32 };
33
iio_buffer_is_active(struct iio_dev * indio_dev,struct iio_buffer * buf)34 static bool iio_buffer_is_active(struct iio_dev *indio_dev,
35 struct iio_buffer *buf)
36 {
37 struct list_head *p;
38
39 list_for_each(p, &indio_dev->buffer_list)
40 if (p == &buf->buffer_list)
41 return true;
42
43 return false;
44 }
45
46 /**
47 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
48 *
49 * This function relies on all buffer implementations having an
50 * iio_buffer as their first element.
51 **/
iio_buffer_read_first_n_outer(struct file * filp,char __user * buf,size_t n,loff_t * f_ps)52 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
53 size_t n, loff_t *f_ps)
54 {
55 struct iio_dev *indio_dev = filp->private_data;
56 struct iio_buffer *rb = indio_dev->buffer;
57
58 if (!rb || !rb->access->read_first_n)
59 return -EINVAL;
60 return rb->access->read_first_n(rb, n, buf);
61 }
62
63 /**
64 * iio_buffer_poll() - poll the buffer to find out if it has data
65 */
iio_buffer_poll(struct file * filp,struct poll_table_struct * wait)66 unsigned int iio_buffer_poll(struct file *filp,
67 struct poll_table_struct *wait)
68 {
69 struct iio_dev *indio_dev = filp->private_data;
70 struct iio_buffer *rb = indio_dev->buffer;
71
72 poll_wait(filp, &rb->pollq, wait);
73 if (rb->stufftoread)
74 return POLLIN | POLLRDNORM;
75 /* need a way of knowing if there may be enough data... */
76 return 0;
77 }
78
iio_buffer_init(struct iio_buffer * buffer)79 void iio_buffer_init(struct iio_buffer *buffer)
80 {
81 INIT_LIST_HEAD(&buffer->demux_list);
82 init_waitqueue_head(&buffer->pollq);
83 }
84 EXPORT_SYMBOL(iio_buffer_init);
85
iio_show_scan_index(struct device * dev,struct device_attribute * attr,char * buf)86 static ssize_t iio_show_scan_index(struct device *dev,
87 struct device_attribute *attr,
88 char *buf)
89 {
90 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
91 }
92
iio_show_fixed_type(struct device * dev,struct device_attribute * attr,char * buf)93 static ssize_t iio_show_fixed_type(struct device *dev,
94 struct device_attribute *attr,
95 char *buf)
96 {
97 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
98 u8 type = this_attr->c->scan_type.endianness;
99
100 if (type == IIO_CPU) {
101 #ifdef __LITTLE_ENDIAN
102 type = IIO_LE;
103 #else
104 type = IIO_BE;
105 #endif
106 }
107 return sprintf(buf, "%s:%c%d/%d>>%u\n",
108 iio_endian_prefix[type],
109 this_attr->c->scan_type.sign,
110 this_attr->c->scan_type.realbits,
111 this_attr->c->scan_type.storagebits,
112 this_attr->c->scan_type.shift);
113 }
114
iio_scan_el_show(struct device * dev,struct device_attribute * attr,char * buf)115 static ssize_t iio_scan_el_show(struct device *dev,
116 struct device_attribute *attr,
117 char *buf)
118 {
119 int ret;
120 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
121
122 ret = test_bit(to_iio_dev_attr(attr)->address,
123 indio_dev->buffer->scan_mask);
124
125 return sprintf(buf, "%d\n", ret);
126 }
127
iio_scan_mask_clear(struct iio_buffer * buffer,int bit)128 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
129 {
130 clear_bit(bit, buffer->scan_mask);
131 return 0;
132 }
133
iio_scan_el_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)134 static ssize_t iio_scan_el_store(struct device *dev,
135 struct device_attribute *attr,
136 const char *buf,
137 size_t len)
138 {
139 int ret;
140 bool state;
141 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
142 struct iio_buffer *buffer = indio_dev->buffer;
143 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
144
145 ret = strtobool(buf, &state);
146 if (ret < 0)
147 return ret;
148 mutex_lock(&indio_dev->mlock);
149 if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
150 ret = -EBUSY;
151 goto error_ret;
152 }
153 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
154 if (ret < 0)
155 goto error_ret;
156 if (!state && ret) {
157 ret = iio_scan_mask_clear(buffer, this_attr->address);
158 if (ret)
159 goto error_ret;
160 } else if (state && !ret) {
161 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
162 if (ret)
163 goto error_ret;
164 }
165
166 error_ret:
167 mutex_unlock(&indio_dev->mlock);
168
169 return ret < 0 ? ret : len;
170
171 }
172
iio_scan_el_ts_show(struct device * dev,struct device_attribute * attr,char * buf)173 static ssize_t iio_scan_el_ts_show(struct device *dev,
174 struct device_attribute *attr,
175 char *buf)
176 {
177 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
178 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
179 }
180
iio_scan_el_ts_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)181 static ssize_t iio_scan_el_ts_store(struct device *dev,
182 struct device_attribute *attr,
183 const char *buf,
184 size_t len)
185 {
186 int ret;
187 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
188 bool state;
189
190 ret = strtobool(buf, &state);
191 if (ret < 0)
192 return ret;
193
194 mutex_lock(&indio_dev->mlock);
195 if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
196 ret = -EBUSY;
197 goto error_ret;
198 }
199 indio_dev->buffer->scan_timestamp = state;
200 error_ret:
201 mutex_unlock(&indio_dev->mlock);
202
203 return ret ? ret : len;
204 }
205
iio_buffer_add_channel_sysfs(struct iio_dev * indio_dev,const struct iio_chan_spec * chan)206 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
207 const struct iio_chan_spec *chan)
208 {
209 int ret, attrcount = 0;
210 struct iio_buffer *buffer = indio_dev->buffer;
211
212 ret = __iio_add_chan_devattr("index",
213 chan,
214 &iio_show_scan_index,
215 NULL,
216 0,
217 0,
218 &indio_dev->dev,
219 &buffer->scan_el_dev_attr_list);
220 if (ret)
221 goto error_ret;
222 attrcount++;
223 ret = __iio_add_chan_devattr("type",
224 chan,
225 &iio_show_fixed_type,
226 NULL,
227 0,
228 0,
229 &indio_dev->dev,
230 &buffer->scan_el_dev_attr_list);
231 if (ret)
232 goto error_ret;
233 attrcount++;
234 if (chan->type != IIO_TIMESTAMP)
235 ret = __iio_add_chan_devattr("en",
236 chan,
237 &iio_scan_el_show,
238 &iio_scan_el_store,
239 chan->scan_index,
240 0,
241 &indio_dev->dev,
242 &buffer->scan_el_dev_attr_list);
243 else
244 ret = __iio_add_chan_devattr("en",
245 chan,
246 &iio_scan_el_ts_show,
247 &iio_scan_el_ts_store,
248 chan->scan_index,
249 0,
250 &indio_dev->dev,
251 &buffer->scan_el_dev_attr_list);
252 attrcount++;
253 ret = attrcount;
254 error_ret:
255 return ret;
256 }
257
iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev * indio_dev,struct iio_dev_attr * p)258 static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
259 struct iio_dev_attr *p)
260 {
261 kfree(p->dev_attr.attr.name);
262 kfree(p);
263 }
264
__iio_buffer_attr_cleanup(struct iio_dev * indio_dev)265 static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
266 {
267 struct iio_dev_attr *p, *n;
268 struct iio_buffer *buffer = indio_dev->buffer;
269
270 list_for_each_entry_safe(p, n,
271 &buffer->scan_el_dev_attr_list, l)
272 iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
273 }
274
275 static const char * const iio_scan_elements_group_name = "scan_elements";
276
iio_buffer_register(struct iio_dev * indio_dev,const struct iio_chan_spec * channels,int num_channels)277 int iio_buffer_register(struct iio_dev *indio_dev,
278 const struct iio_chan_spec *channels,
279 int num_channels)
280 {
281 struct iio_dev_attr *p;
282 struct attribute **attr;
283 struct iio_buffer *buffer = indio_dev->buffer;
284 int ret, i, attrn, attrcount, attrcount_orig = 0;
285
286 if (buffer->attrs)
287 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
288
289 if (buffer->scan_el_attrs != NULL) {
290 attr = buffer->scan_el_attrs->attrs;
291 while (*attr++ != NULL)
292 attrcount_orig++;
293 }
294 attrcount = attrcount_orig;
295 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
296 if (channels) {
297 /* new magic */
298 for (i = 0; i < num_channels; i++) {
299 if (channels[i].scan_index < 0)
300 continue;
301
302 /* Establish necessary mask length */
303 if (channels[i].scan_index >
304 (int)indio_dev->masklength - 1)
305 indio_dev->masklength
306 = channels[i].scan_index + 1;
307
308 ret = iio_buffer_add_channel_sysfs(indio_dev,
309 &channels[i]);
310 if (ret < 0)
311 goto error_cleanup_dynamic;
312 attrcount += ret;
313 if (channels[i].type == IIO_TIMESTAMP)
314 indio_dev->scan_index_timestamp =
315 channels[i].scan_index;
316 }
317 if (indio_dev->masklength && buffer->scan_mask == NULL) {
318 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
319 sizeof(*buffer->scan_mask),
320 GFP_KERNEL);
321 if (buffer->scan_mask == NULL) {
322 ret = -ENOMEM;
323 goto error_cleanup_dynamic;
324 }
325 }
326 }
327
328 buffer->scan_el_group.name = iio_scan_elements_group_name;
329
330 buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
331 sizeof(buffer->scan_el_group.attrs[0]),
332 GFP_KERNEL);
333 if (buffer->scan_el_group.attrs == NULL) {
334 ret = -ENOMEM;
335 goto error_free_scan_mask;
336 }
337 if (buffer->scan_el_attrs)
338 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
339 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
340 attrn = attrcount_orig;
341
342 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
343 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
344 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
345
346 return 0;
347
348 error_free_scan_mask:
349 kfree(buffer->scan_mask);
350 error_cleanup_dynamic:
351 __iio_buffer_attr_cleanup(indio_dev);
352
353 return ret;
354 }
355 EXPORT_SYMBOL(iio_buffer_register);
356
iio_buffer_unregister(struct iio_dev * indio_dev)357 void iio_buffer_unregister(struct iio_dev *indio_dev)
358 {
359 kfree(indio_dev->buffer->scan_mask);
360 kfree(indio_dev->buffer->scan_el_group.attrs);
361 __iio_buffer_attr_cleanup(indio_dev);
362 }
363 EXPORT_SYMBOL(iio_buffer_unregister);
364
iio_buffer_read_length(struct device * dev,struct device_attribute * attr,char * buf)365 ssize_t iio_buffer_read_length(struct device *dev,
366 struct device_attribute *attr,
367 char *buf)
368 {
369 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
370 struct iio_buffer *buffer = indio_dev->buffer;
371
372 if (buffer->access->get_length)
373 return sprintf(buf, "%d\n",
374 buffer->access->get_length(buffer));
375
376 return 0;
377 }
378 EXPORT_SYMBOL(iio_buffer_read_length);
379
iio_buffer_write_length(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)380 ssize_t iio_buffer_write_length(struct device *dev,
381 struct device_attribute *attr,
382 const char *buf,
383 size_t len)
384 {
385 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
386 struct iio_buffer *buffer = indio_dev->buffer;
387 unsigned int val;
388 int ret;
389
390 ret = kstrtouint(buf, 10, &val);
391 if (ret)
392 return ret;
393
394 if (buffer->access->get_length)
395 if (val == buffer->access->get_length(buffer))
396 return len;
397
398 mutex_lock(&indio_dev->mlock);
399 if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
400 ret = -EBUSY;
401 } else {
402 if (buffer->access->set_length)
403 buffer->access->set_length(buffer, val);
404 ret = 0;
405 }
406 mutex_unlock(&indio_dev->mlock);
407
408 return ret ? ret : len;
409 }
410 EXPORT_SYMBOL(iio_buffer_write_length);
411
iio_buffer_show_enable(struct device * dev,struct device_attribute * attr,char * buf)412 ssize_t iio_buffer_show_enable(struct device *dev,
413 struct device_attribute *attr,
414 char *buf)
415 {
416 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
417 return sprintf(buf, "%d\n",
418 iio_buffer_is_active(indio_dev,
419 indio_dev->buffer));
420 }
421 EXPORT_SYMBOL(iio_buffer_show_enable);
422
423 /* note NULL used as error indicator as it doesn't make sense. */
iio_scan_mask_match(const unsigned long * av_masks,unsigned int masklength,const unsigned long * mask)424 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
425 unsigned int masklength,
426 const unsigned long *mask)
427 {
428 if (bitmap_empty(mask, masklength))
429 return NULL;
430 while (*av_masks) {
431 if (bitmap_subset(mask, av_masks, masklength))
432 return av_masks;
433 av_masks += BITS_TO_LONGS(masklength);
434 }
435 return NULL;
436 }
437
iio_compute_scan_bytes(struct iio_dev * indio_dev,const long * mask,bool timestamp)438 static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
439 bool timestamp)
440 {
441 const struct iio_chan_spec *ch;
442 unsigned bytes = 0;
443 int length, i;
444
445 /* How much space will the demuxed element take? */
446 for_each_set_bit(i, mask,
447 indio_dev->masklength) {
448 ch = iio_find_channel_from_si(indio_dev, i);
449 length = ch->scan_type.storagebits / 8;
450 bytes = ALIGN(bytes, length);
451 bytes += length;
452 }
453 if (timestamp) {
454 ch = iio_find_channel_from_si(indio_dev,
455 indio_dev->scan_index_timestamp);
456 length = ch->scan_type.storagebits / 8;
457 bytes = ALIGN(bytes, length);
458 bytes += length;
459 }
460 return bytes;
461 }
462
iio_update_buffers(struct iio_dev * indio_dev,struct iio_buffer * insert_buffer,struct iio_buffer * remove_buffer)463 int iio_update_buffers(struct iio_dev *indio_dev,
464 struct iio_buffer *insert_buffer,
465 struct iio_buffer *remove_buffer)
466 {
467 int ret;
468 int success = 0;
469 struct iio_buffer *buffer;
470 unsigned long *compound_mask;
471 const unsigned long *old_mask;
472
473 /* Wind down existing buffers - iff there are any */
474 if (!list_empty(&indio_dev->buffer_list)) {
475 if (indio_dev->setup_ops->predisable) {
476 ret = indio_dev->setup_ops->predisable(indio_dev);
477 if (ret)
478 goto error_ret;
479 }
480 indio_dev->currentmode = INDIO_DIRECT_MODE;
481 if (indio_dev->setup_ops->postdisable) {
482 ret = indio_dev->setup_ops->postdisable(indio_dev);
483 if (ret)
484 goto error_ret;
485 }
486 }
487 /* Keep a copy of current setup to allow roll back */
488 old_mask = indio_dev->active_scan_mask;
489 if (!indio_dev->available_scan_masks)
490 indio_dev->active_scan_mask = NULL;
491
492 if (remove_buffer)
493 list_del(&remove_buffer->buffer_list);
494 if (insert_buffer)
495 list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list);
496
497 /* If no buffers in list, we are done */
498 if (list_empty(&indio_dev->buffer_list)) {
499 indio_dev->currentmode = INDIO_DIRECT_MODE;
500 if (indio_dev->available_scan_masks == NULL)
501 kfree(old_mask);
502 return 0;
503 }
504
505 /* What scan mask do we actually have ?*/
506 compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
507 sizeof(long), GFP_KERNEL);
508 if (compound_mask == NULL) {
509 if (indio_dev->available_scan_masks == NULL)
510 kfree(old_mask);
511 return -ENOMEM;
512 }
513 indio_dev->scan_timestamp = 0;
514
515 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
516 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
517 indio_dev->masklength);
518 indio_dev->scan_timestamp |= buffer->scan_timestamp;
519 }
520 if (indio_dev->available_scan_masks) {
521 indio_dev->active_scan_mask =
522 iio_scan_mask_match(indio_dev->available_scan_masks,
523 indio_dev->masklength,
524 compound_mask);
525 if (indio_dev->active_scan_mask == NULL) {
526 /*
527 * Roll back.
528 * Note can only occur when adding a buffer.
529 */
530 list_del(&insert_buffer->buffer_list);
531 indio_dev->active_scan_mask = old_mask;
532 success = -EINVAL;
533 }
534 } else {
535 indio_dev->active_scan_mask = compound_mask;
536 }
537
538 iio_update_demux(indio_dev);
539
540 /* Wind up again */
541 if (indio_dev->setup_ops->preenable) {
542 ret = indio_dev->setup_ops->preenable(indio_dev);
543 if (ret) {
544 printk(KERN_ERR
545 "Buffer not started:"
546 "buffer preenable failed\n");
547 goto error_remove_inserted;
548 }
549 }
550 indio_dev->scan_bytes =
551 iio_compute_scan_bytes(indio_dev,
552 indio_dev->active_scan_mask,
553 indio_dev->scan_timestamp);
554 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
555 if (buffer->access->request_update) {
556 ret = buffer->access->request_update(buffer);
557 if (ret) {
558 printk(KERN_INFO
559 "Buffer not started:"
560 "buffer parameter update failed\n");
561 goto error_run_postdisable;
562 }
563 }
564 if (indio_dev->info->update_scan_mode) {
565 ret = indio_dev->info
566 ->update_scan_mode(indio_dev,
567 indio_dev->active_scan_mask);
568 if (ret < 0) {
569 printk(KERN_INFO "update scan mode failed\n");
570 goto error_run_postdisable;
571 }
572 }
573 /* Definitely possible for devices to support both of these.*/
574 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
575 if (!indio_dev->trig) {
576 printk(KERN_INFO "Buffer not started: no trigger\n");
577 ret = -EINVAL;
578 /* Can only occur on first buffer */
579 goto error_run_postdisable;
580 }
581 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
582 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
583 indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
584 } else { /* should never be reached */
585 ret = -EINVAL;
586 goto error_run_postdisable;
587 }
588
589 if (indio_dev->setup_ops->postenable) {
590 ret = indio_dev->setup_ops->postenable(indio_dev);
591 if (ret) {
592 printk(KERN_INFO
593 "Buffer not started: postenable failed\n");
594 indio_dev->currentmode = INDIO_DIRECT_MODE;
595 if (indio_dev->setup_ops->postdisable)
596 indio_dev->setup_ops->postdisable(indio_dev);
597 goto error_disable_all_buffers;
598 }
599 }
600
601 if (indio_dev->available_scan_masks)
602 kfree(compound_mask);
603 else
604 kfree(old_mask);
605
606 return success;
607
608 error_disable_all_buffers:
609 indio_dev->currentmode = INDIO_DIRECT_MODE;
610 error_run_postdisable:
611 if (indio_dev->setup_ops->postdisable)
612 indio_dev->setup_ops->postdisable(indio_dev);
613 error_remove_inserted:
614
615 if (insert_buffer)
616 list_del(&insert_buffer->buffer_list);
617 indio_dev->active_scan_mask = old_mask;
618 kfree(compound_mask);
619 error_ret:
620
621 return ret;
622 }
623 EXPORT_SYMBOL_GPL(iio_update_buffers);
624
iio_buffer_store_enable(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)625 ssize_t iio_buffer_store_enable(struct device *dev,
626 struct device_attribute *attr,
627 const char *buf,
628 size_t len)
629 {
630 int ret;
631 bool requested_state;
632 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
633 struct iio_buffer *pbuf = indio_dev->buffer;
634 bool inlist;
635
636 ret = strtobool(buf, &requested_state);
637 if (ret < 0)
638 return ret;
639
640 mutex_lock(&indio_dev->mlock);
641
642 /* Find out if it is in the list */
643 inlist = iio_buffer_is_active(indio_dev, pbuf);
644 /* Already in desired state */
645 if (inlist == requested_state)
646 goto done;
647
648 if (requested_state)
649 ret = iio_update_buffers(indio_dev,
650 indio_dev->buffer, NULL);
651 else
652 ret = iio_update_buffers(indio_dev,
653 NULL, indio_dev->buffer);
654
655 if (ret < 0)
656 goto done;
657 done:
658 mutex_unlock(&indio_dev->mlock);
659 return (ret < 0) ? ret : len;
660 }
661 EXPORT_SYMBOL(iio_buffer_store_enable);
662
iio_sw_buffer_preenable(struct iio_dev * indio_dev)663 int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
664 {
665 struct iio_buffer *buffer;
666 unsigned bytes;
667 dev_dbg(&indio_dev->dev, "%s\n", __func__);
668
669 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
670 if (buffer->access->set_bytes_per_datum) {
671 bytes = iio_compute_scan_bytes(indio_dev,
672 buffer->scan_mask,
673 buffer->scan_timestamp);
674
675 buffer->access->set_bytes_per_datum(buffer, bytes);
676 }
677 return 0;
678 }
679 EXPORT_SYMBOL(iio_sw_buffer_preenable);
680
681 /**
682 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
683 * @indio_dev: the iio device
684 * @mask: scan mask to be checked
685 *
686 * Return true if exactly one bit is set in the scan mask, false otherwise. It
687 * can be used for devices where only one channel can be active for sampling at
688 * a time.
689 */
iio_validate_scan_mask_onehot(struct iio_dev * indio_dev,const unsigned long * mask)690 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
691 const unsigned long *mask)
692 {
693 return bitmap_weight(mask, indio_dev->masklength) == 1;
694 }
695 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
696
iio_validate_scan_mask(struct iio_dev * indio_dev,const unsigned long * mask)697 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
698 const unsigned long *mask)
699 {
700 if (!indio_dev->setup_ops->validate_scan_mask)
701 return true;
702
703 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
704 }
705
706 /**
707 * iio_scan_mask_set() - set particular bit in the scan mask
708 * @buffer: the buffer whose scan mask we are interested in
709 * @bit: the bit to be set.
710 *
711 * Note that at this point we have no way of knowing what other
712 * buffers might request, hence this code only verifies that the
713 * individual buffers request is plausible.
714 */
iio_scan_mask_set(struct iio_dev * indio_dev,struct iio_buffer * buffer,int bit)715 int iio_scan_mask_set(struct iio_dev *indio_dev,
716 struct iio_buffer *buffer, int bit)
717 {
718 const unsigned long *mask;
719 unsigned long *trialmask;
720
721 trialmask = kmalloc(sizeof(*trialmask)*
722 BITS_TO_LONGS(indio_dev->masklength),
723 GFP_KERNEL);
724
725 if (trialmask == NULL)
726 return -ENOMEM;
727 if (!indio_dev->masklength) {
728 WARN_ON("trying to set scanmask prior to registering buffer\n");
729 goto err_invalid_mask;
730 }
731 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
732 set_bit(bit, trialmask);
733
734 if (!iio_validate_scan_mask(indio_dev, trialmask))
735 goto err_invalid_mask;
736
737 if (indio_dev->available_scan_masks) {
738 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
739 indio_dev->masklength,
740 trialmask);
741 if (!mask)
742 goto err_invalid_mask;
743 }
744 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
745
746 kfree(trialmask);
747
748 return 0;
749
750 err_invalid_mask:
751 kfree(trialmask);
752 return -EINVAL;
753 }
754 EXPORT_SYMBOL_GPL(iio_scan_mask_set);
755
iio_scan_mask_query(struct iio_dev * indio_dev,struct iio_buffer * buffer,int bit)756 int iio_scan_mask_query(struct iio_dev *indio_dev,
757 struct iio_buffer *buffer, int bit)
758 {
759 if (bit > indio_dev->masklength)
760 return -EINVAL;
761
762 if (!buffer->scan_mask)
763 return 0;
764
765 return test_bit(bit, buffer->scan_mask);
766 };
767 EXPORT_SYMBOL_GPL(iio_scan_mask_query);
768
769 /**
770 * struct iio_demux_table() - table describing demux memcpy ops
771 * @from: index to copy from
772 * @to: index to copy to
773 * @length: how many bytes to copy
774 * @l: list head used for management
775 */
776 struct iio_demux_table {
777 unsigned from;
778 unsigned to;
779 unsigned length;
780 struct list_head l;
781 };
782
iio_demux(struct iio_buffer * buffer,unsigned char * datain)783 static unsigned char *iio_demux(struct iio_buffer *buffer,
784 unsigned char *datain)
785 {
786 struct iio_demux_table *t;
787
788 if (list_empty(&buffer->demux_list))
789 return datain;
790 list_for_each_entry(t, &buffer->demux_list, l)
791 memcpy(buffer->demux_bounce + t->to,
792 datain + t->from, t->length);
793
794 return buffer->demux_bounce;
795 }
796
iio_push_to_buffer(struct iio_buffer * buffer,unsigned char * data)797 static int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data)
798 {
799 unsigned char *dataout = iio_demux(buffer, data);
800
801 return buffer->access->store_to(buffer, dataout);
802 }
803
iio_buffer_demux_free(struct iio_buffer * buffer)804 static void iio_buffer_demux_free(struct iio_buffer *buffer)
805 {
806 struct iio_demux_table *p, *q;
807 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
808 list_del(&p->l);
809 kfree(p);
810 }
811 }
812
813
iio_push_to_buffers(struct iio_dev * indio_dev,unsigned char * data)814 int iio_push_to_buffers(struct iio_dev *indio_dev, unsigned char *data)
815 {
816 int ret;
817 struct iio_buffer *buf;
818
819 list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
820 ret = iio_push_to_buffer(buf, data);
821 if (ret < 0)
822 return ret;
823 }
824
825 return 0;
826 }
827 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
828
iio_buffer_update_demux(struct iio_dev * indio_dev,struct iio_buffer * buffer)829 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
830 struct iio_buffer *buffer)
831 {
832 const struct iio_chan_spec *ch;
833 int ret, in_ind = -1, out_ind, length;
834 unsigned in_loc = 0, out_loc = 0;
835 struct iio_demux_table *p;
836
837 /* Clear out any old demux */
838 iio_buffer_demux_free(buffer);
839 kfree(buffer->demux_bounce);
840 buffer->demux_bounce = NULL;
841
842 /* First work out which scan mode we will actually have */
843 if (bitmap_equal(indio_dev->active_scan_mask,
844 buffer->scan_mask,
845 indio_dev->masklength))
846 return 0;
847
848 /* Now we have the two masks, work from least sig and build up sizes */
849 for_each_set_bit(out_ind,
850 indio_dev->active_scan_mask,
851 indio_dev->masklength) {
852 in_ind = find_next_bit(indio_dev->active_scan_mask,
853 indio_dev->masklength,
854 in_ind + 1);
855 while (in_ind != out_ind) {
856 in_ind = find_next_bit(indio_dev->active_scan_mask,
857 indio_dev->masklength,
858 in_ind + 1);
859 ch = iio_find_channel_from_si(indio_dev, in_ind);
860 length = ch->scan_type.storagebits/8;
861 /* Make sure we are aligned */
862 in_loc += length;
863 if (in_loc % length)
864 in_loc += length - in_loc % length;
865 }
866 p = kmalloc(sizeof(*p), GFP_KERNEL);
867 if (p == NULL) {
868 ret = -ENOMEM;
869 goto error_clear_mux_table;
870 }
871 ch = iio_find_channel_from_si(indio_dev, in_ind);
872 length = ch->scan_type.storagebits/8;
873 if (out_loc % length)
874 out_loc += length - out_loc % length;
875 if (in_loc % length)
876 in_loc += length - in_loc % length;
877 p->from = in_loc;
878 p->to = out_loc;
879 p->length = length;
880 list_add_tail(&p->l, &buffer->demux_list);
881 out_loc += length;
882 in_loc += length;
883 }
884 /* Relies on scan_timestamp being last */
885 if (buffer->scan_timestamp) {
886 p = kmalloc(sizeof(*p), GFP_KERNEL);
887 if (p == NULL) {
888 ret = -ENOMEM;
889 goto error_clear_mux_table;
890 }
891 ch = iio_find_channel_from_si(indio_dev,
892 indio_dev->scan_index_timestamp);
893 length = ch->scan_type.storagebits/8;
894 if (out_loc % length)
895 out_loc += length - out_loc % length;
896 if (in_loc % length)
897 in_loc += length - in_loc % length;
898 p->from = in_loc;
899 p->to = out_loc;
900 p->length = length;
901 list_add_tail(&p->l, &buffer->demux_list);
902 out_loc += length;
903 in_loc += length;
904 }
905 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
906 if (buffer->demux_bounce == NULL) {
907 ret = -ENOMEM;
908 goto error_clear_mux_table;
909 }
910 return 0;
911
912 error_clear_mux_table:
913 iio_buffer_demux_free(buffer);
914
915 return ret;
916 }
917
iio_update_demux(struct iio_dev * indio_dev)918 int iio_update_demux(struct iio_dev *indio_dev)
919 {
920 struct iio_buffer *buffer;
921 int ret;
922
923 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
924 ret = iio_buffer_update_demux(indio_dev, buffer);
925 if (ret < 0)
926 goto error_clear_mux_table;
927 }
928 return 0;
929
930 error_clear_mux_table:
931 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
932 iio_buffer_demux_free(buffer);
933
934 return ret;
935 }
936 EXPORT_SYMBOL_GPL(iio_update_demux);
937