1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core
3 *
4 * Copyright (c) 2008 Jonathan Cameron
5 *
6 * Based on elements of hwmon and input subsystems.
7 */
8
9 #define pr_fmt(fmt) "iio-core: " fmt
10
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/idr.h>
14 #include <linux/kdev_t.h>
15 #include <linux/err.h>
16 #include <linux/device.h>
17 #include <linux/fs.h>
18 #include <linux/poll.h>
19 #include <linux/property.h>
20 #include <linux/sched.h>
21 #include <linux/wait.h>
22 #include <linux/cdev.h>
23 #include <linux/slab.h>
24 #include <linux/anon_inodes.h>
25 #include <linux/debugfs.h>
26 #include <linux/mutex.h>
27 #include <linux/iio/iio.h>
28 #include <linux/iio/iio-opaque.h>
29 #include "iio_core.h"
30 #include "iio_core_trigger.h"
31 #include <linux/iio/sysfs.h>
32 #include <linux/iio/events.h>
33 #include <linux/iio/buffer.h>
34 #include <linux/iio/buffer_impl.h>
35
36 /* IDA to assign each registered device a unique id */
37 static DEFINE_IDA(iio_ida);
38
39 static dev_t iio_devt;
40
41 #define IIO_DEV_MAX 256
42 struct bus_type iio_bus_type = {
43 .name = "iio",
44 };
45 EXPORT_SYMBOL(iio_bus_type);
46
47 static struct dentry *iio_debugfs_dentry;
48
49 static const char * const iio_direction[] = {
50 [0] = "in",
51 [1] = "out",
52 };
53
54 static const char * const iio_chan_type_name_spec[] = {
55 [IIO_VOLTAGE] = "voltage",
56 [IIO_CURRENT] = "current",
57 [IIO_POWER] = "power",
58 [IIO_ACCEL] = "accel",
59 [IIO_ANGL_VEL] = "anglvel",
60 [IIO_MAGN] = "magn",
61 [IIO_LIGHT] = "illuminance",
62 [IIO_INTENSITY] = "intensity",
63 [IIO_PROXIMITY] = "proximity",
64 [IIO_TEMP] = "temp",
65 [IIO_INCLI] = "incli",
66 [IIO_ROT] = "rot",
67 [IIO_ANGL] = "angl",
68 [IIO_TIMESTAMP] = "timestamp",
69 [IIO_CAPACITANCE] = "capacitance",
70 [IIO_ALTVOLTAGE] = "altvoltage",
71 [IIO_CCT] = "cct",
72 [IIO_PRESSURE] = "pressure",
73 [IIO_HUMIDITYRELATIVE] = "humidityrelative",
74 [IIO_ACTIVITY] = "activity",
75 [IIO_STEPS] = "steps",
76 [IIO_ENERGY] = "energy",
77 [IIO_DISTANCE] = "distance",
78 [IIO_VELOCITY] = "velocity",
79 [IIO_CONCENTRATION] = "concentration",
80 [IIO_RESISTANCE] = "resistance",
81 [IIO_PH] = "ph",
82 [IIO_UVINDEX] = "uvindex",
83 [IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity",
84 [IIO_COUNT] = "count",
85 [IIO_INDEX] = "index",
86 [IIO_GRAVITY] = "gravity",
87 [IIO_POSITIONRELATIVE] = "positionrelative",
88 [IIO_PHASE] = "phase",
89 [IIO_MASSCONCENTRATION] = "massconcentration",
90 };
91
92 static const char * const iio_modifier_names[] = {
93 [IIO_MOD_X] = "x",
94 [IIO_MOD_Y] = "y",
95 [IIO_MOD_Z] = "z",
96 [IIO_MOD_X_AND_Y] = "x&y",
97 [IIO_MOD_X_AND_Z] = "x&z",
98 [IIO_MOD_Y_AND_Z] = "y&z",
99 [IIO_MOD_X_AND_Y_AND_Z] = "x&y&z",
100 [IIO_MOD_X_OR_Y] = "x|y",
101 [IIO_MOD_X_OR_Z] = "x|z",
102 [IIO_MOD_Y_OR_Z] = "y|z",
103 [IIO_MOD_X_OR_Y_OR_Z] = "x|y|z",
104 [IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)",
105 [IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2",
106 [IIO_MOD_LIGHT_BOTH] = "both",
107 [IIO_MOD_LIGHT_IR] = "ir",
108 [IIO_MOD_LIGHT_CLEAR] = "clear",
109 [IIO_MOD_LIGHT_RED] = "red",
110 [IIO_MOD_LIGHT_GREEN] = "green",
111 [IIO_MOD_LIGHT_BLUE] = "blue",
112 [IIO_MOD_LIGHT_UV] = "uv",
113 [IIO_MOD_LIGHT_DUV] = "duv",
114 [IIO_MOD_QUATERNION] = "quaternion",
115 [IIO_MOD_TEMP_AMBIENT] = "ambient",
116 [IIO_MOD_TEMP_OBJECT] = "object",
117 [IIO_MOD_NORTH_MAGN] = "from_north_magnetic",
118 [IIO_MOD_NORTH_TRUE] = "from_north_true",
119 [IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp",
120 [IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp",
121 [IIO_MOD_RUNNING] = "running",
122 [IIO_MOD_JOGGING] = "jogging",
123 [IIO_MOD_WALKING] = "walking",
124 [IIO_MOD_STILL] = "still",
125 [IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)",
126 [IIO_MOD_I] = "i",
127 [IIO_MOD_Q] = "q",
128 [IIO_MOD_CO2] = "co2",
129 [IIO_MOD_VOC] = "voc",
130 [IIO_MOD_PM1] = "pm1",
131 [IIO_MOD_PM2P5] = "pm2p5",
132 [IIO_MOD_PM4] = "pm4",
133 [IIO_MOD_PM10] = "pm10",
134 [IIO_MOD_ETHANOL] = "ethanol",
135 [IIO_MOD_H2] = "h2",
136 [IIO_MOD_O2] = "o2",
137 };
138
139 /* relies on pairs of these shared then separate */
140 static const char * const iio_chan_info_postfix[] = {
141 [IIO_CHAN_INFO_RAW] = "raw",
142 [IIO_CHAN_INFO_PROCESSED] = "input",
143 [IIO_CHAN_INFO_SCALE] = "scale",
144 [IIO_CHAN_INFO_OFFSET] = "offset",
145 [IIO_CHAN_INFO_CALIBSCALE] = "calibscale",
146 [IIO_CHAN_INFO_CALIBBIAS] = "calibbias",
147 [IIO_CHAN_INFO_PEAK] = "peak_raw",
148 [IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale",
149 [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw",
150 [IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw",
151 [IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY]
152 = "filter_low_pass_3db_frequency",
153 [IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY]
154 = "filter_high_pass_3db_frequency",
155 [IIO_CHAN_INFO_SAMP_FREQ] = "sampling_frequency",
156 [IIO_CHAN_INFO_FREQUENCY] = "frequency",
157 [IIO_CHAN_INFO_PHASE] = "phase",
158 [IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain",
159 [IIO_CHAN_INFO_HYSTERESIS] = "hysteresis",
160 [IIO_CHAN_INFO_HYSTERESIS_RELATIVE] = "hysteresis_relative",
161 [IIO_CHAN_INFO_INT_TIME] = "integration_time",
162 [IIO_CHAN_INFO_ENABLE] = "en",
163 [IIO_CHAN_INFO_CALIBHEIGHT] = "calibheight",
164 [IIO_CHAN_INFO_CALIBWEIGHT] = "calibweight",
165 [IIO_CHAN_INFO_DEBOUNCE_COUNT] = "debounce_count",
166 [IIO_CHAN_INFO_DEBOUNCE_TIME] = "debounce_time",
167 [IIO_CHAN_INFO_CALIBEMISSIVITY] = "calibemissivity",
168 [IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio",
169 [IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type",
170 [IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient",
171 };
172 /**
173 * iio_device_id() - query the unique ID for the device
174 * @indio_dev: Device structure whose ID is being queried
175 *
176 * The IIO device ID is a unique index used for example for the naming
177 * of the character device /dev/iio\:device[ID]
178 */
iio_device_id(struct iio_dev * indio_dev)179 int iio_device_id(struct iio_dev *indio_dev)
180 {
181 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
182
183 return iio_dev_opaque->id;
184 }
185 EXPORT_SYMBOL_GPL(iio_device_id);
186
187 /**
188 * iio_buffer_enabled() - helper function to test if the buffer is enabled
189 * @indio_dev: IIO device structure for device
190 */
iio_buffer_enabled(struct iio_dev * indio_dev)191 bool iio_buffer_enabled(struct iio_dev *indio_dev)
192 {
193 return indio_dev->currentmode
194 & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE |
195 INDIO_BUFFER_SOFTWARE);
196 }
197 EXPORT_SYMBOL_GPL(iio_buffer_enabled);
198
199 /**
200 * iio_sysfs_match_string_with_gaps - matches given string in an array with gaps
201 * @array: array of strings
202 * @n: number of strings in the array
203 * @str: string to match with
204 *
205 * Returns index of @str in the @array or -EINVAL, similar to match_string().
206 * Uses sysfs_streq instead of strcmp for matching.
207 *
208 * This routine will look for a string in an array of strings.
209 * The search will continue until the element is found or the n-th element
210 * is reached, regardless of any NULL elements in the array.
211 */
iio_sysfs_match_string_with_gaps(const char * const * array,size_t n,const char * str)212 static int iio_sysfs_match_string_with_gaps(const char * const *array, size_t n,
213 const char *str)
214 {
215 const char *item;
216 int index;
217
218 for (index = 0; index < n; index++) {
219 item = array[index];
220 if (!item)
221 continue;
222 if (sysfs_streq(item, str))
223 return index;
224 }
225
226 return -EINVAL;
227 }
228
229 #if defined(CONFIG_DEBUG_FS)
230 /*
231 * There's also a CONFIG_DEBUG_FS guard in include/linux/iio/iio.h for
232 * iio_get_debugfs_dentry() to make it inline if CONFIG_DEBUG_FS is undefined
233 */
iio_get_debugfs_dentry(struct iio_dev * indio_dev)234 struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
235 {
236 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
237 return iio_dev_opaque->debugfs_dentry;
238 }
239 EXPORT_SYMBOL_GPL(iio_get_debugfs_dentry);
240 #endif
241
242 /**
243 * iio_find_channel_from_si() - get channel from its scan index
244 * @indio_dev: device
245 * @si: scan index to match
246 */
247 const struct iio_chan_spec
iio_find_channel_from_si(struct iio_dev * indio_dev,int si)248 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si)
249 {
250 int i;
251
252 for (i = 0; i < indio_dev->num_channels; i++)
253 if (indio_dev->channels[i].scan_index == si)
254 return &indio_dev->channels[i];
255 return NULL;
256 }
257
258 /* This turns up an awful lot */
iio_read_const_attr(struct device * dev,struct device_attribute * attr,char * buf)259 ssize_t iio_read_const_attr(struct device *dev,
260 struct device_attribute *attr,
261 char *buf)
262 {
263 return sysfs_emit(buf, "%s\n", to_iio_const_attr(attr)->string);
264 }
265 EXPORT_SYMBOL(iio_read_const_attr);
266
267 /**
268 * iio_device_set_clock() - Set current timestamping clock for the device
269 * @indio_dev: IIO device structure containing the device
270 * @clock_id: timestamping clock posix identifier to set.
271 */
iio_device_set_clock(struct iio_dev * indio_dev,clockid_t clock_id)272 int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id)
273 {
274 int ret;
275 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
276 const struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
277
278 ret = mutex_lock_interruptible(&indio_dev->mlock);
279 if (ret)
280 return ret;
281 if ((ev_int && iio_event_enabled(ev_int)) ||
282 iio_buffer_enabled(indio_dev)) {
283 mutex_unlock(&indio_dev->mlock);
284 return -EBUSY;
285 }
286 iio_dev_opaque->clock_id = clock_id;
287 mutex_unlock(&indio_dev->mlock);
288
289 return 0;
290 }
291 EXPORT_SYMBOL(iio_device_set_clock);
292
293 /**
294 * iio_device_get_clock() - Retrieve current timestamping clock for the device
295 * @indio_dev: IIO device structure containing the device
296 */
iio_device_get_clock(const struct iio_dev * indio_dev)297 clockid_t iio_device_get_clock(const struct iio_dev *indio_dev)
298 {
299 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
300
301 return iio_dev_opaque->clock_id;
302 }
303 EXPORT_SYMBOL(iio_device_get_clock);
304
305 /**
306 * iio_get_time_ns() - utility function to get a time stamp for events etc
307 * @indio_dev: device
308 */
iio_get_time_ns(const struct iio_dev * indio_dev)309 s64 iio_get_time_ns(const struct iio_dev *indio_dev)
310 {
311 struct timespec64 tp;
312
313 switch (iio_device_get_clock(indio_dev)) {
314 case CLOCK_REALTIME:
315 return ktime_get_real_ns();
316 case CLOCK_MONOTONIC:
317 return ktime_get_ns();
318 case CLOCK_MONOTONIC_RAW:
319 return ktime_get_raw_ns();
320 case CLOCK_REALTIME_COARSE:
321 return ktime_to_ns(ktime_get_coarse_real());
322 case CLOCK_MONOTONIC_COARSE:
323 ktime_get_coarse_ts64(&tp);
324 return timespec64_to_ns(&tp);
325 case CLOCK_BOOTTIME:
326 return ktime_get_boottime_ns();
327 case CLOCK_TAI:
328 return ktime_get_clocktai_ns();
329 default:
330 BUG();
331 }
332 }
333 EXPORT_SYMBOL(iio_get_time_ns);
334
335 /**
336 * iio_get_time_res() - utility function to get time stamp clock resolution in
337 * nano seconds.
338 * @indio_dev: device
339 */
iio_get_time_res(const struct iio_dev * indio_dev)340 unsigned int iio_get_time_res(const struct iio_dev *indio_dev)
341 {
342 switch (iio_device_get_clock(indio_dev)) {
343 case CLOCK_REALTIME:
344 case CLOCK_MONOTONIC:
345 case CLOCK_MONOTONIC_RAW:
346 case CLOCK_BOOTTIME:
347 case CLOCK_TAI:
348 return hrtimer_resolution;
349 case CLOCK_REALTIME_COARSE:
350 case CLOCK_MONOTONIC_COARSE:
351 return LOW_RES_NSEC;
352 default:
353 BUG();
354 }
355 }
356 EXPORT_SYMBOL(iio_get_time_res);
357
iio_init(void)358 static int __init iio_init(void)
359 {
360 int ret;
361
362 /* Register sysfs bus */
363 ret = bus_register(&iio_bus_type);
364 if (ret < 0) {
365 pr_err("could not register bus type\n");
366 goto error_nothing;
367 }
368
369 ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
370 if (ret < 0) {
371 pr_err("failed to allocate char dev region\n");
372 goto error_unregister_bus_type;
373 }
374
375 iio_debugfs_dentry = debugfs_create_dir("iio", NULL);
376
377 return 0;
378
379 error_unregister_bus_type:
380 bus_unregister(&iio_bus_type);
381 error_nothing:
382 return ret;
383 }
384
iio_exit(void)385 static void __exit iio_exit(void)
386 {
387 if (iio_devt)
388 unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
389 bus_unregister(&iio_bus_type);
390 debugfs_remove(iio_debugfs_dentry);
391 }
392
393 #if defined(CONFIG_DEBUG_FS)
iio_debugfs_read_reg(struct file * file,char __user * userbuf,size_t count,loff_t * ppos)394 static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
395 size_t count, loff_t *ppos)
396 {
397 struct iio_dev *indio_dev = file->private_data;
398 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
399 unsigned val = 0;
400 int ret;
401
402 if (*ppos > 0)
403 return simple_read_from_buffer(userbuf, count, ppos,
404 iio_dev_opaque->read_buf,
405 iio_dev_opaque->read_buf_len);
406
407 ret = indio_dev->info->debugfs_reg_access(indio_dev,
408 iio_dev_opaque->cached_reg_addr,
409 0, &val);
410 if (ret) {
411 dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
412 return ret;
413 }
414
415 iio_dev_opaque->read_buf_len = snprintf(iio_dev_opaque->read_buf,
416 sizeof(iio_dev_opaque->read_buf),
417 "0x%X\n", val);
418
419 return simple_read_from_buffer(userbuf, count, ppos,
420 iio_dev_opaque->read_buf,
421 iio_dev_opaque->read_buf_len);
422 }
423
iio_debugfs_write_reg(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)424 static ssize_t iio_debugfs_write_reg(struct file *file,
425 const char __user *userbuf, size_t count, loff_t *ppos)
426 {
427 struct iio_dev *indio_dev = file->private_data;
428 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
429 unsigned reg, val;
430 char buf[80];
431 int ret;
432
433 count = min_t(size_t, count, (sizeof(buf)-1));
434 if (copy_from_user(buf, userbuf, count))
435 return -EFAULT;
436
437 buf[count] = 0;
438
439 ret = sscanf(buf, "%i %i", ®, &val);
440
441 switch (ret) {
442 case 1:
443 iio_dev_opaque->cached_reg_addr = reg;
444 break;
445 case 2:
446 iio_dev_opaque->cached_reg_addr = reg;
447 ret = indio_dev->info->debugfs_reg_access(indio_dev, reg,
448 val, NULL);
449 if (ret) {
450 dev_err(indio_dev->dev.parent, "%s: write failed\n",
451 __func__);
452 return ret;
453 }
454 break;
455 default:
456 return -EINVAL;
457 }
458
459 return count;
460 }
461
462 static const struct file_operations iio_debugfs_reg_fops = {
463 .open = simple_open,
464 .read = iio_debugfs_read_reg,
465 .write = iio_debugfs_write_reg,
466 };
467
iio_device_unregister_debugfs(struct iio_dev * indio_dev)468 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
469 {
470 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
471 debugfs_remove_recursive(iio_dev_opaque->debugfs_dentry);
472 }
473
iio_device_register_debugfs(struct iio_dev * indio_dev)474 static void iio_device_register_debugfs(struct iio_dev *indio_dev)
475 {
476 struct iio_dev_opaque *iio_dev_opaque;
477
478 if (indio_dev->info->debugfs_reg_access == NULL)
479 return;
480
481 if (!iio_debugfs_dentry)
482 return;
483
484 iio_dev_opaque = to_iio_dev_opaque(indio_dev);
485
486 iio_dev_opaque->debugfs_dentry =
487 debugfs_create_dir(dev_name(&indio_dev->dev),
488 iio_debugfs_dentry);
489
490 debugfs_create_file("direct_reg_access", 0644,
491 iio_dev_opaque->debugfs_dentry, indio_dev,
492 &iio_debugfs_reg_fops);
493 }
494 #else
iio_device_register_debugfs(struct iio_dev * indio_dev)495 static void iio_device_register_debugfs(struct iio_dev *indio_dev)
496 {
497 }
498
iio_device_unregister_debugfs(struct iio_dev * indio_dev)499 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
500 {
501 }
502 #endif /* CONFIG_DEBUG_FS */
503
iio_read_channel_ext_info(struct device * dev,struct device_attribute * attr,char * buf)504 static ssize_t iio_read_channel_ext_info(struct device *dev,
505 struct device_attribute *attr,
506 char *buf)
507 {
508 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
509 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
510 const struct iio_chan_spec_ext_info *ext_info;
511
512 ext_info = &this_attr->c->ext_info[this_attr->address];
513
514 return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf);
515 }
516
iio_write_channel_ext_info(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)517 static ssize_t iio_write_channel_ext_info(struct device *dev,
518 struct device_attribute *attr,
519 const char *buf,
520 size_t len)
521 {
522 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
523 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
524 const struct iio_chan_spec_ext_info *ext_info;
525
526 ext_info = &this_attr->c->ext_info[this_attr->address];
527
528 return ext_info->write(indio_dev, ext_info->private,
529 this_attr->c, buf, len);
530 }
531
iio_enum_available_read(struct iio_dev * indio_dev,uintptr_t priv,const struct iio_chan_spec * chan,char * buf)532 ssize_t iio_enum_available_read(struct iio_dev *indio_dev,
533 uintptr_t priv, const struct iio_chan_spec *chan, char *buf)
534 {
535 const struct iio_enum *e = (const struct iio_enum *)priv;
536 unsigned int i;
537 size_t len = 0;
538
539 if (!e->num_items)
540 return 0;
541
542 for (i = 0; i < e->num_items; ++i) {
543 if (!e->items[i])
544 continue;
545 len += sysfs_emit_at(buf, len, "%s ", e->items[i]);
546 }
547
548 /* replace last space with a newline */
549 buf[len - 1] = '\n';
550
551 return len;
552 }
553 EXPORT_SYMBOL_GPL(iio_enum_available_read);
554
iio_enum_read(struct iio_dev * indio_dev,uintptr_t priv,const struct iio_chan_spec * chan,char * buf)555 ssize_t iio_enum_read(struct iio_dev *indio_dev,
556 uintptr_t priv, const struct iio_chan_spec *chan, char *buf)
557 {
558 const struct iio_enum *e = (const struct iio_enum *)priv;
559 int i;
560
561 if (!e->get)
562 return -EINVAL;
563
564 i = e->get(indio_dev, chan);
565 if (i < 0)
566 return i;
567 else if (i >= e->num_items || !e->items[i])
568 return -EINVAL;
569
570 return sysfs_emit(buf, "%s\n", e->items[i]);
571 }
572 EXPORT_SYMBOL_GPL(iio_enum_read);
573
iio_enum_write(struct iio_dev * indio_dev,uintptr_t priv,const struct iio_chan_spec * chan,const char * buf,size_t len)574 ssize_t iio_enum_write(struct iio_dev *indio_dev,
575 uintptr_t priv, const struct iio_chan_spec *chan, const char *buf,
576 size_t len)
577 {
578 const struct iio_enum *e = (const struct iio_enum *)priv;
579 int ret;
580
581 if (!e->set)
582 return -EINVAL;
583
584 ret = iio_sysfs_match_string_with_gaps(e->items, e->num_items, buf);
585 if (ret < 0)
586 return ret;
587
588 ret = e->set(indio_dev, chan, ret);
589 return ret ? ret : len;
590 }
591 EXPORT_SYMBOL_GPL(iio_enum_write);
592
593 static const struct iio_mount_matrix iio_mount_idmatrix = {
594 .rotation = {
595 "1", "0", "0",
596 "0", "1", "0",
597 "0", "0", "1"
598 }
599 };
600
iio_setup_mount_idmatrix(const struct device * dev,struct iio_mount_matrix * matrix)601 static int iio_setup_mount_idmatrix(const struct device *dev,
602 struct iio_mount_matrix *matrix)
603 {
604 *matrix = iio_mount_idmatrix;
605 dev_info(dev, "mounting matrix not found: using identity...\n");
606 return 0;
607 }
608
iio_show_mount_matrix(struct iio_dev * indio_dev,uintptr_t priv,const struct iio_chan_spec * chan,char * buf)609 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
610 const struct iio_chan_spec *chan, char *buf)
611 {
612 const struct iio_mount_matrix *mtx = ((iio_get_mount_matrix_t *)
613 priv)(indio_dev, chan);
614
615 if (IS_ERR(mtx))
616 return PTR_ERR(mtx);
617
618 if (!mtx)
619 mtx = &iio_mount_idmatrix;
620
621 return sysfs_emit(buf, "%s, %s, %s; %s, %s, %s; %s, %s, %s\n",
622 mtx->rotation[0], mtx->rotation[1], mtx->rotation[2],
623 mtx->rotation[3], mtx->rotation[4], mtx->rotation[5],
624 mtx->rotation[6], mtx->rotation[7], mtx->rotation[8]);
625 }
626 EXPORT_SYMBOL_GPL(iio_show_mount_matrix);
627
628 /**
629 * iio_read_mount_matrix() - retrieve iio device mounting matrix from
630 * device "mount-matrix" property
631 * @dev: device the mounting matrix property is assigned to
632 * @matrix: where to store retrieved matrix
633 *
634 * If device is assigned no mounting matrix property, a default 3x3 identity
635 * matrix will be filled in.
636 *
637 * Return: 0 if success, or a negative error code on failure.
638 */
iio_read_mount_matrix(struct device * dev,struct iio_mount_matrix * matrix)639 int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix)
640 {
641 size_t len = ARRAY_SIZE(iio_mount_idmatrix.rotation);
642 int err;
643
644 err = device_property_read_string_array(dev, "mount-matrix", matrix->rotation, len);
645 if (err == len)
646 return 0;
647
648 if (err >= 0)
649 /* Invalid number of matrix entries. */
650 return -EINVAL;
651
652 if (err != -EINVAL)
653 /* Invalid matrix declaration format. */
654 return err;
655
656 /* Matrix was not declared at all: fallback to identity. */
657 return iio_setup_mount_idmatrix(dev, matrix);
658 }
659 EXPORT_SYMBOL(iio_read_mount_matrix);
660
__iio_format_value(char * buf,size_t offset,unsigned int type,int size,const int * vals)661 static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type,
662 int size, const int *vals)
663 {
664 int tmp0, tmp1;
665 s64 tmp2;
666 bool scale_db = false;
667
668 switch (type) {
669 case IIO_VAL_INT:
670 return sysfs_emit_at(buf, offset, "%d", vals[0]);
671 case IIO_VAL_INT_PLUS_MICRO_DB:
672 scale_db = true;
673 fallthrough;
674 case IIO_VAL_INT_PLUS_MICRO:
675 if (vals[1] < 0)
676 return sysfs_emit_at(buf, offset, "-%d.%06u%s",
677 abs(vals[0]), -vals[1],
678 scale_db ? " dB" : "");
679 else
680 return sysfs_emit_at(buf, offset, "%d.%06u%s", vals[0],
681 vals[1], scale_db ? " dB" : "");
682 case IIO_VAL_INT_PLUS_NANO:
683 if (vals[1] < 0)
684 return sysfs_emit_at(buf, offset, "-%d.%09u",
685 abs(vals[0]), -vals[1]);
686 else
687 return sysfs_emit_at(buf, offset, "%d.%09u", vals[0],
688 vals[1]);
689 case IIO_VAL_FRACTIONAL:
690 tmp2 = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
691 tmp1 = vals[1];
692 tmp0 = (int)div_s64_rem(tmp2, 1000000000, &tmp1);
693 if ((tmp2 < 0) && (tmp0 == 0))
694 return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1));
695 else
696 return sysfs_emit_at(buf, offset, "%d.%09u", tmp0,
697 abs(tmp1));
698 case IIO_VAL_FRACTIONAL_LOG2:
699 tmp2 = shift_right((s64)vals[0] * 1000000000LL, vals[1]);
700 tmp0 = (int)div_s64_rem(tmp2, 1000000000LL, &tmp1);
701 if (tmp0 == 0 && tmp2 < 0)
702 return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1));
703 else
704 return sysfs_emit_at(buf, offset, "%d.%09u", tmp0,
705 abs(tmp1));
706 case IIO_VAL_INT_MULTIPLE:
707 {
708 int i;
709 int l = 0;
710
711 for (i = 0; i < size; ++i)
712 l += sysfs_emit_at(buf, offset + l, "%d ", vals[i]);
713 return l;
714 }
715 case IIO_VAL_CHAR:
716 return sysfs_emit_at(buf, offset, "%c", (char)vals[0]);
717 default:
718 return 0;
719 }
720 }
721
722 /**
723 * iio_format_value() - Formats a IIO value into its string representation
724 * @buf: The buffer to which the formatted value gets written
725 * which is assumed to be big enough (i.e. PAGE_SIZE).
726 * @type: One of the IIO_VAL_* constants. This decides how the val
727 * and val2 parameters are formatted.
728 * @size: Number of IIO value entries contained in vals
729 * @vals: Pointer to the values, exact meaning depends on the
730 * type parameter.
731 *
732 * Return: 0 by default, a negative number on failure or the
733 * total number of characters written for a type that belongs
734 * to the IIO_VAL_* constant.
735 */
iio_format_value(char * buf,unsigned int type,int size,int * vals)736 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
737 {
738 ssize_t len;
739
740 len = __iio_format_value(buf, 0, type, size, vals);
741 if (len >= PAGE_SIZE - 1)
742 return -EFBIG;
743
744 return len + sysfs_emit_at(buf, len, "\n");
745 }
746 EXPORT_SYMBOL_GPL(iio_format_value);
747
iio_read_channel_label(struct device * dev,struct device_attribute * attr,char * buf)748 static ssize_t iio_read_channel_label(struct device *dev,
749 struct device_attribute *attr,
750 char *buf)
751 {
752 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
753 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
754
755 if (indio_dev->info->read_label)
756 return indio_dev->info->read_label(indio_dev, this_attr->c, buf);
757
758 if (this_attr->c->extend_name)
759 return sprintf(buf, "%s\n", this_attr->c->extend_name);
760
761 return -EINVAL;
762 }
763
iio_read_channel_info(struct device * dev,struct device_attribute * attr,char * buf)764 static ssize_t iio_read_channel_info(struct device *dev,
765 struct device_attribute *attr,
766 char *buf)
767 {
768 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
769 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
770 int vals[INDIO_MAX_RAW_ELEMENTS];
771 int ret;
772 int val_len = 2;
773
774 if (indio_dev->info->read_raw_multi)
775 ret = indio_dev->info->read_raw_multi(indio_dev, this_attr->c,
776 INDIO_MAX_RAW_ELEMENTS,
777 vals, &val_len,
778 this_attr->address);
779 else
780 ret = indio_dev->info->read_raw(indio_dev, this_attr->c,
781 &vals[0], &vals[1], this_attr->address);
782
783 if (ret < 0)
784 return ret;
785
786 return iio_format_value(buf, ret, val_len, vals);
787 }
788
iio_format_list(char * buf,const int * vals,int type,int length,const char * prefix,const char * suffix)789 static ssize_t iio_format_list(char *buf, const int *vals, int type, int length,
790 const char *prefix, const char *suffix)
791 {
792 ssize_t len;
793 int stride;
794 int i;
795
796 switch (type) {
797 case IIO_VAL_INT:
798 stride = 1;
799 break;
800 default:
801 stride = 2;
802 break;
803 }
804
805 len = sysfs_emit(buf, prefix);
806
807 for (i = 0; i <= length - stride; i += stride) {
808 if (i != 0) {
809 len += sysfs_emit_at(buf, len, " ");
810 if (len >= PAGE_SIZE)
811 return -EFBIG;
812 }
813
814 len += __iio_format_value(buf, len, type, stride, &vals[i]);
815 if (len >= PAGE_SIZE)
816 return -EFBIG;
817 }
818
819 len += sysfs_emit_at(buf, len, "%s\n", suffix);
820
821 return len;
822 }
823
iio_format_avail_list(char * buf,const int * vals,int type,int length)824 static ssize_t iio_format_avail_list(char *buf, const int *vals,
825 int type, int length)
826 {
827
828 return iio_format_list(buf, vals, type, length, "", "");
829 }
830
iio_format_avail_range(char * buf,const int * vals,int type)831 static ssize_t iio_format_avail_range(char *buf, const int *vals, int type)
832 {
833 int length;
834
835 /*
836 * length refers to the array size , not the number of elements.
837 * The purpose is to print the range [min , step ,max] so length should
838 * be 3 in case of int, and 6 for other types.
839 */
840 switch (type) {
841 case IIO_VAL_INT:
842 length = 3;
843 break;
844 default:
845 length = 6;
846 break;
847 }
848
849 return iio_format_list(buf, vals, type, length, "[", "]");
850 }
851
iio_read_channel_info_avail(struct device * dev,struct device_attribute * attr,char * buf)852 static ssize_t iio_read_channel_info_avail(struct device *dev,
853 struct device_attribute *attr,
854 char *buf)
855 {
856 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
857 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
858 const int *vals;
859 int ret;
860 int length;
861 int type;
862
863 ret = indio_dev->info->read_avail(indio_dev, this_attr->c,
864 &vals, &type, &length,
865 this_attr->address);
866
867 if (ret < 0)
868 return ret;
869 switch (ret) {
870 case IIO_AVAIL_LIST:
871 return iio_format_avail_list(buf, vals, type, length);
872 case IIO_AVAIL_RANGE:
873 return iio_format_avail_range(buf, vals, type);
874 default:
875 return -EINVAL;
876 }
877 }
878
879 /**
880 * __iio_str_to_fixpoint() - Parse a fixed-point number from a string
881 * @str: The string to parse
882 * @fract_mult: Multiplier for the first decimal place, should be a power of 10
883 * @integer: The integer part of the number
884 * @fract: The fractional part of the number
885 * @scale_db: True if this should parse as dB
886 *
887 * Returns 0 on success, or a negative error code if the string could not be
888 * parsed.
889 */
__iio_str_to_fixpoint(const char * str,int fract_mult,int * integer,int * fract,bool scale_db)890 static int __iio_str_to_fixpoint(const char *str, int fract_mult,
891 int *integer, int *fract, bool scale_db)
892 {
893 int i = 0, f = 0;
894 bool integer_part = true, negative = false;
895
896 if (fract_mult == 0) {
897 *fract = 0;
898
899 return kstrtoint(str, 0, integer);
900 }
901
902 if (str[0] == '-') {
903 negative = true;
904 str++;
905 } else if (str[0] == '+') {
906 str++;
907 }
908
909 while (*str) {
910 if ('0' <= *str && *str <= '9') {
911 if (integer_part) {
912 i = i * 10 + *str - '0';
913 } else {
914 f += fract_mult * (*str - '0');
915 fract_mult /= 10;
916 }
917 } else if (*str == '\n') {
918 if (*(str + 1) == '\0')
919 break;
920 else
921 return -EINVAL;
922 } else if (!strncmp(str, " dB", sizeof(" dB") - 1) && scale_db) {
923 /* Ignore the dB suffix */
924 str += sizeof(" dB") - 1;
925 continue;
926 } else if (!strncmp(str, "dB", sizeof("dB") - 1) && scale_db) {
927 /* Ignore the dB suffix */
928 str += sizeof("dB") - 1;
929 continue;
930 } else if (*str == '.' && integer_part) {
931 integer_part = false;
932 } else {
933 return -EINVAL;
934 }
935 str++;
936 }
937
938 if (negative) {
939 if (i)
940 i = -i;
941 else
942 f = -f;
943 }
944
945 *integer = i;
946 *fract = f;
947
948 return 0;
949 }
950
951 /**
952 * iio_str_to_fixpoint() - Parse a fixed-point number from a string
953 * @str: The string to parse
954 * @fract_mult: Multiplier for the first decimal place, should be a power of 10
955 * @integer: The integer part of the number
956 * @fract: The fractional part of the number
957 *
958 * Returns 0 on success, or a negative error code if the string could not be
959 * parsed.
960 */
iio_str_to_fixpoint(const char * str,int fract_mult,int * integer,int * fract)961 int iio_str_to_fixpoint(const char *str, int fract_mult,
962 int *integer, int *fract)
963 {
964 return __iio_str_to_fixpoint(str, fract_mult, integer, fract, false);
965 }
966 EXPORT_SYMBOL_GPL(iio_str_to_fixpoint);
967
iio_write_channel_info(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)968 static ssize_t iio_write_channel_info(struct device *dev,
969 struct device_attribute *attr,
970 const char *buf,
971 size_t len)
972 {
973 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
974 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
975 int ret, fract_mult = 100000;
976 int integer, fract = 0;
977 bool is_char = false;
978 bool scale_db = false;
979
980 /* Assumes decimal - precision based on number of digits */
981 if (!indio_dev->info->write_raw)
982 return -EINVAL;
983
984 if (indio_dev->info->write_raw_get_fmt)
985 switch (indio_dev->info->write_raw_get_fmt(indio_dev,
986 this_attr->c, this_attr->address)) {
987 case IIO_VAL_INT:
988 fract_mult = 0;
989 break;
990 case IIO_VAL_INT_PLUS_MICRO_DB:
991 scale_db = true;
992 fallthrough;
993 case IIO_VAL_INT_PLUS_MICRO:
994 fract_mult = 100000;
995 break;
996 case IIO_VAL_INT_PLUS_NANO:
997 fract_mult = 100000000;
998 break;
999 case IIO_VAL_CHAR:
1000 is_char = true;
1001 break;
1002 default:
1003 return -EINVAL;
1004 }
1005
1006 if (is_char) {
1007 char ch;
1008
1009 if (sscanf(buf, "%c", &ch) != 1)
1010 return -EINVAL;
1011 integer = ch;
1012 } else {
1013 ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract,
1014 scale_db);
1015 if (ret)
1016 return ret;
1017 }
1018
1019 ret = indio_dev->info->write_raw(indio_dev, this_attr->c,
1020 integer, fract, this_attr->address);
1021 if (ret)
1022 return ret;
1023
1024 return len;
1025 }
1026
1027 static
__iio_device_attr_init(struct device_attribute * dev_attr,const char * postfix,struct iio_chan_spec const * chan,ssize_t (* readfunc)(struct device * dev,struct device_attribute * attr,char * buf),ssize_t (* writefunc)(struct device * dev,struct device_attribute * attr,const char * buf,size_t len),enum iio_shared_by shared_by)1028 int __iio_device_attr_init(struct device_attribute *dev_attr,
1029 const char *postfix,
1030 struct iio_chan_spec const *chan,
1031 ssize_t (*readfunc)(struct device *dev,
1032 struct device_attribute *attr,
1033 char *buf),
1034 ssize_t (*writefunc)(struct device *dev,
1035 struct device_attribute *attr,
1036 const char *buf,
1037 size_t len),
1038 enum iio_shared_by shared_by)
1039 {
1040 int ret = 0;
1041 char *name = NULL;
1042 char *full_postfix;
1043 sysfs_attr_init(&dev_attr->attr);
1044
1045 /* Build up postfix of <extend_name>_<modifier>_postfix */
1046 if (chan->modified && (shared_by == IIO_SEPARATE)) {
1047 if (chan->extend_name)
1048 full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
1049 iio_modifier_names[chan
1050 ->channel2],
1051 chan->extend_name,
1052 postfix);
1053 else
1054 full_postfix = kasprintf(GFP_KERNEL, "%s_%s",
1055 iio_modifier_names[chan
1056 ->channel2],
1057 postfix);
1058 } else {
1059 if (chan->extend_name == NULL || shared_by != IIO_SEPARATE)
1060 full_postfix = kstrdup(postfix, GFP_KERNEL);
1061 else
1062 full_postfix = kasprintf(GFP_KERNEL,
1063 "%s_%s",
1064 chan->extend_name,
1065 postfix);
1066 }
1067 if (full_postfix == NULL)
1068 return -ENOMEM;
1069
1070 if (chan->differential) { /* Differential can not have modifier */
1071 switch (shared_by) {
1072 case IIO_SHARED_BY_ALL:
1073 name = kasprintf(GFP_KERNEL, "%s", full_postfix);
1074 break;
1075 case IIO_SHARED_BY_DIR:
1076 name = kasprintf(GFP_KERNEL, "%s_%s",
1077 iio_direction[chan->output],
1078 full_postfix);
1079 break;
1080 case IIO_SHARED_BY_TYPE:
1081 name = kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
1082 iio_direction[chan->output],
1083 iio_chan_type_name_spec[chan->type],
1084 iio_chan_type_name_spec[chan->type],
1085 full_postfix);
1086 break;
1087 case IIO_SEPARATE:
1088 if (!chan->indexed) {
1089 WARN(1, "Differential channels must be indexed\n");
1090 ret = -EINVAL;
1091 goto error_free_full_postfix;
1092 }
1093 name = kasprintf(GFP_KERNEL,
1094 "%s_%s%d-%s%d_%s",
1095 iio_direction[chan->output],
1096 iio_chan_type_name_spec[chan->type],
1097 chan->channel,
1098 iio_chan_type_name_spec[chan->type],
1099 chan->channel2,
1100 full_postfix);
1101 break;
1102 }
1103 } else { /* Single ended */
1104 switch (shared_by) {
1105 case IIO_SHARED_BY_ALL:
1106 name = kasprintf(GFP_KERNEL, "%s", full_postfix);
1107 break;
1108 case IIO_SHARED_BY_DIR:
1109 name = kasprintf(GFP_KERNEL, "%s_%s",
1110 iio_direction[chan->output],
1111 full_postfix);
1112 break;
1113 case IIO_SHARED_BY_TYPE:
1114 name = kasprintf(GFP_KERNEL, "%s_%s_%s",
1115 iio_direction[chan->output],
1116 iio_chan_type_name_spec[chan->type],
1117 full_postfix);
1118 break;
1119
1120 case IIO_SEPARATE:
1121 if (chan->indexed)
1122 name = kasprintf(GFP_KERNEL, "%s_%s%d_%s",
1123 iio_direction[chan->output],
1124 iio_chan_type_name_spec[chan->type],
1125 chan->channel,
1126 full_postfix);
1127 else
1128 name = kasprintf(GFP_KERNEL, "%s_%s_%s",
1129 iio_direction[chan->output],
1130 iio_chan_type_name_spec[chan->type],
1131 full_postfix);
1132 break;
1133 }
1134 }
1135 if (name == NULL) {
1136 ret = -ENOMEM;
1137 goto error_free_full_postfix;
1138 }
1139 dev_attr->attr.name = name;
1140
1141 if (readfunc) {
1142 dev_attr->attr.mode |= S_IRUGO;
1143 dev_attr->show = readfunc;
1144 }
1145
1146 if (writefunc) {
1147 dev_attr->attr.mode |= S_IWUSR;
1148 dev_attr->store = writefunc;
1149 }
1150
1151 error_free_full_postfix:
1152 kfree(full_postfix);
1153
1154 return ret;
1155 }
1156
__iio_device_attr_deinit(struct device_attribute * dev_attr)1157 static void __iio_device_attr_deinit(struct device_attribute *dev_attr)
1158 {
1159 kfree(dev_attr->attr.name);
1160 }
1161
__iio_add_chan_devattr(const char * postfix,struct iio_chan_spec const * chan,ssize_t (* readfunc)(struct device * dev,struct device_attribute * attr,char * buf),ssize_t (* writefunc)(struct device * dev,struct device_attribute * attr,const char * buf,size_t len),u64 mask,enum iio_shared_by shared_by,struct device * dev,struct iio_buffer * buffer,struct list_head * attr_list)1162 int __iio_add_chan_devattr(const char *postfix,
1163 struct iio_chan_spec const *chan,
1164 ssize_t (*readfunc)(struct device *dev,
1165 struct device_attribute *attr,
1166 char *buf),
1167 ssize_t (*writefunc)(struct device *dev,
1168 struct device_attribute *attr,
1169 const char *buf,
1170 size_t len),
1171 u64 mask,
1172 enum iio_shared_by shared_by,
1173 struct device *dev,
1174 struct iio_buffer *buffer,
1175 struct list_head *attr_list)
1176 {
1177 int ret;
1178 struct iio_dev_attr *iio_attr, *t;
1179
1180 iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
1181 if (iio_attr == NULL)
1182 return -ENOMEM;
1183 ret = __iio_device_attr_init(&iio_attr->dev_attr,
1184 postfix, chan,
1185 readfunc, writefunc, shared_by);
1186 if (ret)
1187 goto error_iio_dev_attr_free;
1188 iio_attr->c = chan;
1189 iio_attr->address = mask;
1190 iio_attr->buffer = buffer;
1191 list_for_each_entry(t, attr_list, l)
1192 if (strcmp(t->dev_attr.attr.name,
1193 iio_attr->dev_attr.attr.name) == 0) {
1194 if (shared_by == IIO_SEPARATE)
1195 dev_err(dev, "tried to double register : %s\n",
1196 t->dev_attr.attr.name);
1197 ret = -EBUSY;
1198 goto error_device_attr_deinit;
1199 }
1200 list_add(&iio_attr->l, attr_list);
1201
1202 return 0;
1203
1204 error_device_attr_deinit:
1205 __iio_device_attr_deinit(&iio_attr->dev_attr);
1206 error_iio_dev_attr_free:
1207 kfree(iio_attr);
1208 return ret;
1209 }
1210
iio_device_add_channel_label(struct iio_dev * indio_dev,struct iio_chan_spec const * chan)1211 static int iio_device_add_channel_label(struct iio_dev *indio_dev,
1212 struct iio_chan_spec const *chan)
1213 {
1214 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1215 int ret;
1216
1217 if (!indio_dev->info->read_label && !chan->extend_name)
1218 return 0;
1219
1220 ret = __iio_add_chan_devattr("label",
1221 chan,
1222 &iio_read_channel_label,
1223 NULL,
1224 0,
1225 IIO_SEPARATE,
1226 &indio_dev->dev,
1227 NULL,
1228 &iio_dev_opaque->channel_attr_list);
1229 if (ret < 0)
1230 return ret;
1231
1232 return 1;
1233 }
1234
iio_device_add_info_mask_type(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,enum iio_shared_by shared_by,const long * infomask)1235 static int iio_device_add_info_mask_type(struct iio_dev *indio_dev,
1236 struct iio_chan_spec const *chan,
1237 enum iio_shared_by shared_by,
1238 const long *infomask)
1239 {
1240 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1241 int i, ret, attrcount = 0;
1242
1243 for_each_set_bit(i, infomask, sizeof(*infomask)*8) {
1244 if (i >= ARRAY_SIZE(iio_chan_info_postfix))
1245 return -EINVAL;
1246 ret = __iio_add_chan_devattr(iio_chan_info_postfix[i],
1247 chan,
1248 &iio_read_channel_info,
1249 &iio_write_channel_info,
1250 i,
1251 shared_by,
1252 &indio_dev->dev,
1253 NULL,
1254 &iio_dev_opaque->channel_attr_list);
1255 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
1256 continue;
1257 else if (ret < 0)
1258 return ret;
1259 attrcount++;
1260 }
1261
1262 return attrcount;
1263 }
1264
iio_device_add_info_mask_type_avail(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,enum iio_shared_by shared_by,const long * infomask)1265 static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev,
1266 struct iio_chan_spec const *chan,
1267 enum iio_shared_by shared_by,
1268 const long *infomask)
1269 {
1270 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1271 int i, ret, attrcount = 0;
1272 char *avail_postfix;
1273
1274 for_each_set_bit(i, infomask, sizeof(*infomask) * 8) {
1275 if (i >= ARRAY_SIZE(iio_chan_info_postfix))
1276 return -EINVAL;
1277 avail_postfix = kasprintf(GFP_KERNEL,
1278 "%s_available",
1279 iio_chan_info_postfix[i]);
1280 if (!avail_postfix)
1281 return -ENOMEM;
1282
1283 ret = __iio_add_chan_devattr(avail_postfix,
1284 chan,
1285 &iio_read_channel_info_avail,
1286 NULL,
1287 i,
1288 shared_by,
1289 &indio_dev->dev,
1290 NULL,
1291 &iio_dev_opaque->channel_attr_list);
1292 kfree(avail_postfix);
1293 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
1294 continue;
1295 else if (ret < 0)
1296 return ret;
1297 attrcount++;
1298 }
1299
1300 return attrcount;
1301 }
1302
iio_device_add_channel_sysfs(struct iio_dev * indio_dev,struct iio_chan_spec const * chan)1303 static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
1304 struct iio_chan_spec const *chan)
1305 {
1306 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1307 int ret, attrcount = 0;
1308 const struct iio_chan_spec_ext_info *ext_info;
1309
1310 if (chan->channel < 0)
1311 return 0;
1312 ret = iio_device_add_info_mask_type(indio_dev, chan,
1313 IIO_SEPARATE,
1314 &chan->info_mask_separate);
1315 if (ret < 0)
1316 return ret;
1317 attrcount += ret;
1318
1319 ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
1320 IIO_SEPARATE,
1321 &chan->
1322 info_mask_separate_available);
1323 if (ret < 0)
1324 return ret;
1325 attrcount += ret;
1326
1327 ret = iio_device_add_info_mask_type(indio_dev, chan,
1328 IIO_SHARED_BY_TYPE,
1329 &chan->info_mask_shared_by_type);
1330 if (ret < 0)
1331 return ret;
1332 attrcount += ret;
1333
1334 ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
1335 IIO_SHARED_BY_TYPE,
1336 &chan->
1337 info_mask_shared_by_type_available);
1338 if (ret < 0)
1339 return ret;
1340 attrcount += ret;
1341
1342 ret = iio_device_add_info_mask_type(indio_dev, chan,
1343 IIO_SHARED_BY_DIR,
1344 &chan->info_mask_shared_by_dir);
1345 if (ret < 0)
1346 return ret;
1347 attrcount += ret;
1348
1349 ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
1350 IIO_SHARED_BY_DIR,
1351 &chan->info_mask_shared_by_dir_available);
1352 if (ret < 0)
1353 return ret;
1354 attrcount += ret;
1355
1356 ret = iio_device_add_info_mask_type(indio_dev, chan,
1357 IIO_SHARED_BY_ALL,
1358 &chan->info_mask_shared_by_all);
1359 if (ret < 0)
1360 return ret;
1361 attrcount += ret;
1362
1363 ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
1364 IIO_SHARED_BY_ALL,
1365 &chan->info_mask_shared_by_all_available);
1366 if (ret < 0)
1367 return ret;
1368 attrcount += ret;
1369
1370 ret = iio_device_add_channel_label(indio_dev, chan);
1371 if (ret < 0)
1372 return ret;
1373 attrcount += ret;
1374
1375 if (chan->ext_info) {
1376 unsigned int i = 0;
1377 for (ext_info = chan->ext_info; ext_info->name; ext_info++) {
1378 ret = __iio_add_chan_devattr(ext_info->name,
1379 chan,
1380 ext_info->read ?
1381 &iio_read_channel_ext_info : NULL,
1382 ext_info->write ?
1383 &iio_write_channel_ext_info : NULL,
1384 i,
1385 ext_info->shared,
1386 &indio_dev->dev,
1387 NULL,
1388 &iio_dev_opaque->channel_attr_list);
1389 i++;
1390 if (ret == -EBUSY && ext_info->shared)
1391 continue;
1392
1393 if (ret)
1394 return ret;
1395
1396 attrcount++;
1397 }
1398 }
1399
1400 return attrcount;
1401 }
1402
1403 /**
1404 * iio_free_chan_devattr_list() - Free a list of IIO device attributes
1405 * @attr_list: List of IIO device attributes
1406 *
1407 * This function frees the memory allocated for each of the IIO device
1408 * attributes in the list.
1409 */
iio_free_chan_devattr_list(struct list_head * attr_list)1410 void iio_free_chan_devattr_list(struct list_head *attr_list)
1411 {
1412 struct iio_dev_attr *p, *n;
1413
1414 list_for_each_entry_safe(p, n, attr_list, l) {
1415 kfree_const(p->dev_attr.attr.name);
1416 list_del(&p->l);
1417 kfree(p);
1418 }
1419 }
1420
iio_show_dev_name(struct device * dev,struct device_attribute * attr,char * buf)1421 static ssize_t iio_show_dev_name(struct device *dev,
1422 struct device_attribute *attr,
1423 char *buf)
1424 {
1425 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1426 return sysfs_emit(buf, "%s\n", indio_dev->name);
1427 }
1428
1429 static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL);
1430
iio_show_dev_label(struct device * dev,struct device_attribute * attr,char * buf)1431 static ssize_t iio_show_dev_label(struct device *dev,
1432 struct device_attribute *attr,
1433 char *buf)
1434 {
1435 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1436 return sysfs_emit(buf, "%s\n", indio_dev->label);
1437 }
1438
1439 static DEVICE_ATTR(label, S_IRUGO, iio_show_dev_label, NULL);
1440
iio_show_timestamp_clock(struct device * dev,struct device_attribute * attr,char * buf)1441 static ssize_t iio_show_timestamp_clock(struct device *dev,
1442 struct device_attribute *attr,
1443 char *buf)
1444 {
1445 const struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1446 const clockid_t clk = iio_device_get_clock(indio_dev);
1447 const char *name;
1448 ssize_t sz;
1449
1450 switch (clk) {
1451 case CLOCK_REALTIME:
1452 name = "realtime\n";
1453 sz = sizeof("realtime\n");
1454 break;
1455 case CLOCK_MONOTONIC:
1456 name = "monotonic\n";
1457 sz = sizeof("monotonic\n");
1458 break;
1459 case CLOCK_MONOTONIC_RAW:
1460 name = "monotonic_raw\n";
1461 sz = sizeof("monotonic_raw\n");
1462 break;
1463 case CLOCK_REALTIME_COARSE:
1464 name = "realtime_coarse\n";
1465 sz = sizeof("realtime_coarse\n");
1466 break;
1467 case CLOCK_MONOTONIC_COARSE:
1468 name = "monotonic_coarse\n";
1469 sz = sizeof("monotonic_coarse\n");
1470 break;
1471 case CLOCK_BOOTTIME:
1472 name = "boottime\n";
1473 sz = sizeof("boottime\n");
1474 break;
1475 case CLOCK_TAI:
1476 name = "tai\n";
1477 sz = sizeof("tai\n");
1478 break;
1479 default:
1480 BUG();
1481 }
1482
1483 memcpy(buf, name, sz);
1484 return sz;
1485 }
1486
iio_store_timestamp_clock(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1487 static ssize_t iio_store_timestamp_clock(struct device *dev,
1488 struct device_attribute *attr,
1489 const char *buf, size_t len)
1490 {
1491 clockid_t clk;
1492 int ret;
1493
1494 if (sysfs_streq(buf, "realtime"))
1495 clk = CLOCK_REALTIME;
1496 else if (sysfs_streq(buf, "monotonic"))
1497 clk = CLOCK_MONOTONIC;
1498 else if (sysfs_streq(buf, "monotonic_raw"))
1499 clk = CLOCK_MONOTONIC_RAW;
1500 else if (sysfs_streq(buf, "realtime_coarse"))
1501 clk = CLOCK_REALTIME_COARSE;
1502 else if (sysfs_streq(buf, "monotonic_coarse"))
1503 clk = CLOCK_MONOTONIC_COARSE;
1504 else if (sysfs_streq(buf, "boottime"))
1505 clk = CLOCK_BOOTTIME;
1506 else if (sysfs_streq(buf, "tai"))
1507 clk = CLOCK_TAI;
1508 else
1509 return -EINVAL;
1510
1511 ret = iio_device_set_clock(dev_to_iio_dev(dev), clk);
1512 if (ret)
1513 return ret;
1514
1515 return len;
1516 }
1517
iio_device_register_sysfs_group(struct iio_dev * indio_dev,const struct attribute_group * group)1518 int iio_device_register_sysfs_group(struct iio_dev *indio_dev,
1519 const struct attribute_group *group)
1520 {
1521 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1522 const struct attribute_group **new, **old = iio_dev_opaque->groups;
1523 unsigned int cnt = iio_dev_opaque->groupcounter;
1524
1525 new = krealloc(old, sizeof(*new) * (cnt + 2), GFP_KERNEL);
1526 if (!new)
1527 return -ENOMEM;
1528
1529 new[iio_dev_opaque->groupcounter++] = group;
1530 new[iio_dev_opaque->groupcounter] = NULL;
1531
1532 iio_dev_opaque->groups = new;
1533
1534 return 0;
1535 }
1536
1537 static DEVICE_ATTR(current_timestamp_clock, S_IRUGO | S_IWUSR,
1538 iio_show_timestamp_clock, iio_store_timestamp_clock);
1539
iio_device_register_sysfs(struct iio_dev * indio_dev)1540 static int iio_device_register_sysfs(struct iio_dev *indio_dev)
1541 {
1542 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1543 int i, ret = 0, attrcount, attrn, attrcount_orig = 0;
1544 struct iio_dev_attr *p;
1545 struct attribute **attr, *clk = NULL;
1546
1547 /* First count elements in any existing group */
1548 if (indio_dev->info->attrs) {
1549 attr = indio_dev->info->attrs->attrs;
1550 while (*attr++ != NULL)
1551 attrcount_orig++;
1552 }
1553 attrcount = attrcount_orig;
1554 /*
1555 * New channel registration method - relies on the fact a group does
1556 * not need to be initialized if its name is NULL.
1557 */
1558 if (indio_dev->channels)
1559 for (i = 0; i < indio_dev->num_channels; i++) {
1560 const struct iio_chan_spec *chan =
1561 &indio_dev->channels[i];
1562
1563 if (chan->type == IIO_TIMESTAMP)
1564 clk = &dev_attr_current_timestamp_clock.attr;
1565
1566 ret = iio_device_add_channel_sysfs(indio_dev, chan);
1567 if (ret < 0)
1568 goto error_clear_attrs;
1569 attrcount += ret;
1570 }
1571
1572 if (iio_dev_opaque->event_interface)
1573 clk = &dev_attr_current_timestamp_clock.attr;
1574
1575 if (indio_dev->name)
1576 attrcount++;
1577 if (indio_dev->label)
1578 attrcount++;
1579 if (clk)
1580 attrcount++;
1581
1582 iio_dev_opaque->chan_attr_group.attrs =
1583 kcalloc(attrcount + 1,
1584 sizeof(iio_dev_opaque->chan_attr_group.attrs[0]),
1585 GFP_KERNEL);
1586 if (iio_dev_opaque->chan_attr_group.attrs == NULL) {
1587 ret = -ENOMEM;
1588 goto error_clear_attrs;
1589 }
1590 /* Copy across original attributes */
1591 if (indio_dev->info->attrs) {
1592 memcpy(iio_dev_opaque->chan_attr_group.attrs,
1593 indio_dev->info->attrs->attrs,
1594 sizeof(iio_dev_opaque->chan_attr_group.attrs[0])
1595 *attrcount_orig);
1596 iio_dev_opaque->chan_attr_group.is_visible =
1597 indio_dev->info->attrs->is_visible;
1598 }
1599 attrn = attrcount_orig;
1600 /* Add all elements from the list. */
1601 list_for_each_entry(p, &iio_dev_opaque->channel_attr_list, l)
1602 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr;
1603 if (indio_dev->name)
1604 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr;
1605 if (indio_dev->label)
1606 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_label.attr;
1607 if (clk)
1608 iio_dev_opaque->chan_attr_group.attrs[attrn++] = clk;
1609
1610 ret = iio_device_register_sysfs_group(indio_dev,
1611 &iio_dev_opaque->chan_attr_group);
1612 if (ret)
1613 goto error_free_chan_attrs;
1614
1615 return 0;
1616
1617 error_free_chan_attrs:
1618 kfree(iio_dev_opaque->chan_attr_group.attrs);
1619 iio_dev_opaque->chan_attr_group.attrs = NULL;
1620 error_clear_attrs:
1621 iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list);
1622
1623 return ret;
1624 }
1625
iio_device_unregister_sysfs(struct iio_dev * indio_dev)1626 static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
1627 {
1628 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1629
1630 iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list);
1631 kfree(iio_dev_opaque->chan_attr_group.attrs);
1632 iio_dev_opaque->chan_attr_group.attrs = NULL;
1633 kfree(iio_dev_opaque->groups);
1634 iio_dev_opaque->groups = NULL;
1635 }
1636
iio_dev_release(struct device * device)1637 static void iio_dev_release(struct device *device)
1638 {
1639 struct iio_dev *indio_dev = dev_to_iio_dev(device);
1640 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1641
1642 if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES)
1643 iio_device_unregister_trigger_consumer(indio_dev);
1644 iio_device_unregister_eventset(indio_dev);
1645 iio_device_unregister_sysfs(indio_dev);
1646
1647 iio_device_detach_buffers(indio_dev);
1648
1649 ida_simple_remove(&iio_ida, iio_dev_opaque->id);
1650 kfree(iio_dev_opaque);
1651 }
1652
1653 struct device_type iio_device_type = {
1654 .name = "iio_device",
1655 .release = iio_dev_release,
1656 };
1657
1658 /**
1659 * iio_device_alloc() - allocate an iio_dev from a driver
1660 * @parent: Parent device.
1661 * @sizeof_priv: Space to allocate for private structure.
1662 **/
iio_device_alloc(struct device * parent,int sizeof_priv)1663 struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
1664 {
1665 struct iio_dev_opaque *iio_dev_opaque;
1666 struct iio_dev *indio_dev;
1667 size_t alloc_size;
1668
1669 alloc_size = sizeof(struct iio_dev_opaque);
1670 if (sizeof_priv) {
1671 alloc_size = ALIGN(alloc_size, IIO_ALIGN);
1672 alloc_size += sizeof_priv;
1673 }
1674
1675 iio_dev_opaque = kzalloc(alloc_size, GFP_KERNEL);
1676 if (!iio_dev_opaque)
1677 return NULL;
1678
1679 indio_dev = &iio_dev_opaque->indio_dev;
1680 indio_dev->priv = (char *)iio_dev_opaque +
1681 ALIGN(sizeof(struct iio_dev_opaque), IIO_ALIGN);
1682
1683 indio_dev->dev.parent = parent;
1684 indio_dev->dev.type = &iio_device_type;
1685 indio_dev->dev.bus = &iio_bus_type;
1686 device_initialize(&indio_dev->dev);
1687 iio_device_set_drvdata(indio_dev, (void *)indio_dev);
1688 mutex_init(&indio_dev->mlock);
1689 mutex_init(&iio_dev_opaque->info_exist_lock);
1690 INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list);
1691
1692 iio_dev_opaque->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
1693 if (iio_dev_opaque->id < 0) {
1694 /* cannot use a dev_err as the name isn't available */
1695 pr_err("failed to get device id\n");
1696 kfree(iio_dev_opaque);
1697 return NULL;
1698 }
1699
1700 if (dev_set_name(&indio_dev->dev, "iio:device%d", iio_dev_opaque->id)) {
1701 ida_simple_remove(&iio_ida, iio_dev_opaque->id);
1702 kfree(iio_dev_opaque);
1703 return NULL;
1704 }
1705
1706 INIT_LIST_HEAD(&iio_dev_opaque->buffer_list);
1707 INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers);
1708
1709 return indio_dev;
1710 }
1711 EXPORT_SYMBOL(iio_device_alloc);
1712
1713 /**
1714 * iio_device_free() - free an iio_dev from a driver
1715 * @dev: the iio_dev associated with the device
1716 **/
iio_device_free(struct iio_dev * dev)1717 void iio_device_free(struct iio_dev *dev)
1718 {
1719 if (dev)
1720 put_device(&dev->dev);
1721 }
1722 EXPORT_SYMBOL(iio_device_free);
1723
devm_iio_device_release(void * iio_dev)1724 static void devm_iio_device_release(void *iio_dev)
1725 {
1726 iio_device_free(iio_dev);
1727 }
1728
1729 /**
1730 * devm_iio_device_alloc - Resource-managed iio_device_alloc()
1731 * @parent: Device to allocate iio_dev for, and parent for this IIO device
1732 * @sizeof_priv: Space to allocate for private structure.
1733 *
1734 * Managed iio_device_alloc. iio_dev allocated with this function is
1735 * automatically freed on driver detach.
1736 *
1737 * RETURNS:
1738 * Pointer to allocated iio_dev on success, NULL on failure.
1739 */
devm_iio_device_alloc(struct device * parent,int sizeof_priv)1740 struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv)
1741 {
1742 struct iio_dev *iio_dev;
1743 int ret;
1744
1745 iio_dev = iio_device_alloc(parent, sizeof_priv);
1746 if (!iio_dev)
1747 return NULL;
1748
1749 ret = devm_add_action_or_reset(parent, devm_iio_device_release,
1750 iio_dev);
1751 if (ret)
1752 return NULL;
1753
1754 return iio_dev;
1755 }
1756 EXPORT_SYMBOL_GPL(devm_iio_device_alloc);
1757
1758 /**
1759 * iio_chrdev_open() - chrdev file open for buffer access and ioctls
1760 * @inode: Inode structure for identifying the device in the file system
1761 * @filp: File structure for iio device used to keep and later access
1762 * private data
1763 *
1764 * Return: 0 on success or -EBUSY if the device is already opened
1765 **/
iio_chrdev_open(struct inode * inode,struct file * filp)1766 static int iio_chrdev_open(struct inode *inode, struct file *filp)
1767 {
1768 struct iio_dev_opaque *iio_dev_opaque =
1769 container_of(inode->i_cdev, struct iio_dev_opaque, chrdev);
1770 struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev;
1771 struct iio_dev_buffer_pair *ib;
1772
1773 if (test_and_set_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags))
1774 return -EBUSY;
1775
1776 iio_device_get(indio_dev);
1777
1778 ib = kmalloc(sizeof(*ib), GFP_KERNEL);
1779 if (!ib) {
1780 iio_device_put(indio_dev);
1781 clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags);
1782 return -ENOMEM;
1783 }
1784
1785 ib->indio_dev = indio_dev;
1786 ib->buffer = indio_dev->buffer;
1787
1788 filp->private_data = ib;
1789
1790 return 0;
1791 }
1792
1793 /**
1794 * iio_chrdev_release() - chrdev file close buffer access and ioctls
1795 * @inode: Inode structure pointer for the char device
1796 * @filp: File structure pointer for the char device
1797 *
1798 * Return: 0 for successful release
1799 */
iio_chrdev_release(struct inode * inode,struct file * filp)1800 static int iio_chrdev_release(struct inode *inode, struct file *filp)
1801 {
1802 struct iio_dev_buffer_pair *ib = filp->private_data;
1803 struct iio_dev_opaque *iio_dev_opaque =
1804 container_of(inode->i_cdev, struct iio_dev_opaque, chrdev);
1805 struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev;
1806 kfree(ib);
1807 clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags);
1808 iio_device_put(indio_dev);
1809
1810 return 0;
1811 }
1812
iio_device_ioctl_handler_register(struct iio_dev * indio_dev,struct iio_ioctl_handler * h)1813 void iio_device_ioctl_handler_register(struct iio_dev *indio_dev,
1814 struct iio_ioctl_handler *h)
1815 {
1816 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1817
1818 list_add_tail(&h->entry, &iio_dev_opaque->ioctl_handlers);
1819 }
1820
iio_device_ioctl_handler_unregister(struct iio_ioctl_handler * h)1821 void iio_device_ioctl_handler_unregister(struct iio_ioctl_handler *h)
1822 {
1823 list_del(&h->entry);
1824 }
1825
iio_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1826 static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1827 {
1828 struct iio_dev_buffer_pair *ib = filp->private_data;
1829 struct iio_dev *indio_dev = ib->indio_dev;
1830 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1831 struct iio_ioctl_handler *h;
1832 int ret = -ENODEV;
1833
1834 mutex_lock(&iio_dev_opaque->info_exist_lock);
1835
1836 /**
1837 * The NULL check here is required to prevent crashing when a device
1838 * is being removed while userspace would still have open file handles
1839 * to try to access this device.
1840 */
1841 if (!indio_dev->info)
1842 goto out_unlock;
1843
1844 list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) {
1845 ret = h->ioctl(indio_dev, filp, cmd, arg);
1846 if (ret != IIO_IOCTL_UNHANDLED)
1847 break;
1848 }
1849
1850 if (ret == IIO_IOCTL_UNHANDLED)
1851 ret = -ENODEV;
1852
1853 out_unlock:
1854 mutex_unlock(&iio_dev_opaque->info_exist_lock);
1855
1856 return ret;
1857 }
1858
1859 static const struct file_operations iio_buffer_fileops = {
1860 .owner = THIS_MODULE,
1861 .llseek = noop_llseek,
1862 .read = iio_buffer_read_outer_addr,
1863 .poll = iio_buffer_poll_addr,
1864 .unlocked_ioctl = iio_ioctl,
1865 .compat_ioctl = compat_ptr_ioctl,
1866 .open = iio_chrdev_open,
1867 .release = iio_chrdev_release,
1868 };
1869
1870 static const struct file_operations iio_event_fileops = {
1871 .owner = THIS_MODULE,
1872 .llseek = noop_llseek,
1873 .unlocked_ioctl = iio_ioctl,
1874 .compat_ioctl = compat_ptr_ioctl,
1875 .open = iio_chrdev_open,
1876 .release = iio_chrdev_release,
1877 };
1878
iio_check_unique_scan_index(struct iio_dev * indio_dev)1879 static int iio_check_unique_scan_index(struct iio_dev *indio_dev)
1880 {
1881 int i, j;
1882 const struct iio_chan_spec *channels = indio_dev->channels;
1883
1884 if (!(indio_dev->modes & INDIO_ALL_BUFFER_MODES))
1885 return 0;
1886
1887 for (i = 0; i < indio_dev->num_channels - 1; i++) {
1888 if (channels[i].scan_index < 0)
1889 continue;
1890 for (j = i + 1; j < indio_dev->num_channels; j++)
1891 if (channels[i].scan_index == channels[j].scan_index) {
1892 dev_err(&indio_dev->dev,
1893 "Duplicate scan index %d\n",
1894 channels[i].scan_index);
1895 return -EINVAL;
1896 }
1897 }
1898
1899 return 0;
1900 }
1901
iio_check_extended_name(const struct iio_dev * indio_dev)1902 static int iio_check_extended_name(const struct iio_dev *indio_dev)
1903 {
1904 unsigned int i;
1905
1906 if (!indio_dev->info->read_label)
1907 return 0;
1908
1909 for (i = 0; i < indio_dev->num_channels; i++) {
1910 if (indio_dev->channels[i].extend_name) {
1911 dev_err(&indio_dev->dev,
1912 "Cannot use labels and extend_name at the same time\n");
1913 return -EINVAL;
1914 }
1915 }
1916
1917 return 0;
1918 }
1919
1920 static const struct iio_buffer_setup_ops noop_ring_setup_ops;
1921
__iio_device_register(struct iio_dev * indio_dev,struct module * this_mod)1922 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
1923 {
1924 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1925 const char *label;
1926 int ret;
1927
1928 if (!indio_dev->info)
1929 return -EINVAL;
1930
1931 iio_dev_opaque->driver_module = this_mod;
1932 /* If the calling driver did not initialize of_node, do it here */
1933 if (!indio_dev->dev.of_node && indio_dev->dev.parent)
1934 indio_dev->dev.of_node = indio_dev->dev.parent->of_node;
1935
1936 label = of_get_property(indio_dev->dev.of_node, "label", NULL);
1937 if (label)
1938 indio_dev->label = label;
1939
1940 ret = iio_check_unique_scan_index(indio_dev);
1941 if (ret < 0)
1942 return ret;
1943
1944 ret = iio_check_extended_name(indio_dev);
1945 if (ret < 0)
1946 return ret;
1947
1948 iio_device_register_debugfs(indio_dev);
1949
1950 ret = iio_buffers_alloc_sysfs_and_mask(indio_dev);
1951 if (ret) {
1952 dev_err(indio_dev->dev.parent,
1953 "Failed to create buffer sysfs interfaces\n");
1954 goto error_unreg_debugfs;
1955 }
1956
1957 ret = iio_device_register_sysfs(indio_dev);
1958 if (ret) {
1959 dev_err(indio_dev->dev.parent,
1960 "Failed to register sysfs interfaces\n");
1961 goto error_buffer_free_sysfs;
1962 }
1963 ret = iio_device_register_eventset(indio_dev);
1964 if (ret) {
1965 dev_err(indio_dev->dev.parent,
1966 "Failed to register event set\n");
1967 goto error_free_sysfs;
1968 }
1969 if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES)
1970 iio_device_register_trigger_consumer(indio_dev);
1971
1972 if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) &&
1973 indio_dev->setup_ops == NULL)
1974 indio_dev->setup_ops = &noop_ring_setup_ops;
1975
1976 if (iio_dev_opaque->attached_buffers_cnt)
1977 cdev_init(&iio_dev_opaque->chrdev, &iio_buffer_fileops);
1978 else if (iio_dev_opaque->event_interface)
1979 cdev_init(&iio_dev_opaque->chrdev, &iio_event_fileops);
1980
1981 if (iio_dev_opaque->attached_buffers_cnt || iio_dev_opaque->event_interface) {
1982 indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), iio_dev_opaque->id);
1983 iio_dev_opaque->chrdev.owner = this_mod;
1984 }
1985
1986 /* assign device groups now; they should be all registered now */
1987 indio_dev->dev.groups = iio_dev_opaque->groups;
1988
1989 ret = cdev_device_add(&iio_dev_opaque->chrdev, &indio_dev->dev);
1990 if (ret < 0)
1991 goto error_unreg_eventset;
1992
1993 return 0;
1994
1995 error_unreg_eventset:
1996 iio_device_unregister_eventset(indio_dev);
1997 error_free_sysfs:
1998 iio_device_unregister_sysfs(indio_dev);
1999 error_buffer_free_sysfs:
2000 iio_buffers_free_sysfs_and_mask(indio_dev);
2001 error_unreg_debugfs:
2002 iio_device_unregister_debugfs(indio_dev);
2003 return ret;
2004 }
2005 EXPORT_SYMBOL(__iio_device_register);
2006
2007 /**
2008 * iio_device_unregister() - unregister a device from the IIO subsystem
2009 * @indio_dev: Device structure representing the device.
2010 **/
iio_device_unregister(struct iio_dev * indio_dev)2011 void iio_device_unregister(struct iio_dev *indio_dev)
2012 {
2013 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2014
2015 cdev_device_del(&iio_dev_opaque->chrdev, &indio_dev->dev);
2016
2017 mutex_lock(&iio_dev_opaque->info_exist_lock);
2018
2019 iio_device_unregister_debugfs(indio_dev);
2020
2021 iio_disable_all_buffers(indio_dev);
2022
2023 indio_dev->info = NULL;
2024
2025 iio_device_wakeup_eventset(indio_dev);
2026 iio_buffer_wakeup_poll(indio_dev);
2027
2028 mutex_unlock(&iio_dev_opaque->info_exist_lock);
2029
2030 iio_buffers_free_sysfs_and_mask(indio_dev);
2031 }
2032 EXPORT_SYMBOL(iio_device_unregister);
2033
devm_iio_device_unreg(void * indio_dev)2034 static void devm_iio_device_unreg(void *indio_dev)
2035 {
2036 iio_device_unregister(indio_dev);
2037 }
2038
__devm_iio_device_register(struct device * dev,struct iio_dev * indio_dev,struct module * this_mod)2039 int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
2040 struct module *this_mod)
2041 {
2042 int ret;
2043
2044 ret = __iio_device_register(indio_dev, this_mod);
2045 if (ret)
2046 return ret;
2047
2048 return devm_add_action_or_reset(dev, devm_iio_device_unreg, indio_dev);
2049 }
2050 EXPORT_SYMBOL_GPL(__devm_iio_device_register);
2051
2052 /**
2053 * iio_device_claim_direct_mode - Keep device in direct mode
2054 * @indio_dev: the iio_dev associated with the device
2055 *
2056 * If the device is in direct mode it is guaranteed to stay
2057 * that way until iio_device_release_direct_mode() is called.
2058 *
2059 * Use with iio_device_release_direct_mode()
2060 *
2061 * Returns: 0 on success, -EBUSY on failure
2062 */
iio_device_claim_direct_mode(struct iio_dev * indio_dev)2063 int iio_device_claim_direct_mode(struct iio_dev *indio_dev)
2064 {
2065 mutex_lock(&indio_dev->mlock);
2066
2067 if (iio_buffer_enabled(indio_dev)) {
2068 mutex_unlock(&indio_dev->mlock);
2069 return -EBUSY;
2070 }
2071 return 0;
2072 }
2073 EXPORT_SYMBOL_GPL(iio_device_claim_direct_mode);
2074
2075 /**
2076 * iio_device_release_direct_mode - releases claim on direct mode
2077 * @indio_dev: the iio_dev associated with the device
2078 *
2079 * Release the claim. Device is no longer guaranteed to stay
2080 * in direct mode.
2081 *
2082 * Use with iio_device_claim_direct_mode()
2083 */
iio_device_release_direct_mode(struct iio_dev * indio_dev)2084 void iio_device_release_direct_mode(struct iio_dev *indio_dev)
2085 {
2086 mutex_unlock(&indio_dev->mlock);
2087 }
2088 EXPORT_SYMBOL_GPL(iio_device_release_direct_mode);
2089
2090 /**
2091 * iio_device_claim_buffer_mode - Keep device in buffer mode
2092 * @indio_dev: the iio_dev associated with the device
2093 *
2094 * If the device is in buffer mode it is guaranteed to stay
2095 * that way until iio_device_release_buffer_mode() is called.
2096 *
2097 * Use with iio_device_release_buffer_mode().
2098 *
2099 * Returns: 0 on success, -EBUSY on failure.
2100 */
iio_device_claim_buffer_mode(struct iio_dev * indio_dev)2101 int iio_device_claim_buffer_mode(struct iio_dev *indio_dev)
2102 {
2103 mutex_lock(&indio_dev->mlock);
2104
2105 if (iio_buffer_enabled(indio_dev))
2106 return 0;
2107
2108 mutex_unlock(&indio_dev->mlock);
2109 return -EBUSY;
2110 }
2111 EXPORT_SYMBOL_GPL(iio_device_claim_buffer_mode);
2112
2113 /**
2114 * iio_device_release_buffer_mode - releases claim on buffer mode
2115 * @indio_dev: the iio_dev associated with the device
2116 *
2117 * Release the claim. Device is no longer guaranteed to stay
2118 * in buffer mode.
2119 *
2120 * Use with iio_device_claim_buffer_mode().
2121 */
iio_device_release_buffer_mode(struct iio_dev * indio_dev)2122 void iio_device_release_buffer_mode(struct iio_dev *indio_dev)
2123 {
2124 mutex_unlock(&indio_dev->mlock);
2125 }
2126 EXPORT_SYMBOL_GPL(iio_device_release_buffer_mode);
2127
2128 /**
2129 * iio_device_get_current_mode() - helper function providing read-only access to
2130 * the @currentmode variable
2131 * @indio_dev: IIO device structure for device
2132 */
iio_device_get_current_mode(struct iio_dev * indio_dev)2133 int iio_device_get_current_mode(struct iio_dev *indio_dev)
2134 {
2135 return indio_dev->currentmode;
2136 }
2137 EXPORT_SYMBOL_GPL(iio_device_get_current_mode);
2138
2139 subsys_initcall(iio_init);
2140 module_exit(iio_exit);
2141
2142 MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
2143 MODULE_DESCRIPTION("Industrial I/O core");
2144 MODULE_LICENSE("GPL");
2145