• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2022 ROHM Semiconductors
4  *
5  * ROHM/KIONIX KX022A accelerometer driver
6  */
7 
8 #include <linux/delay.h>
9 #include <linux/device.h>
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/mutex.h>
14 #include <linux/property.h>
15 #include <linux/regmap.h>
16 #include <linux/regulator/consumer.h>
17 #include <linux/slab.h>
18 #include <linux/string_helpers.h>
19 #include <linux/units.h>
20 
21 #include <linux/iio/iio.h>
22 #include <linux/iio/sysfs.h>
23 #include <linux/iio/trigger.h>
24 #include <linux/iio/trigger_consumer.h>
25 #include <linux/iio/triggered_buffer.h>
26 
27 #include "kionix-kx022a.h"
28 
29 /*
30  * The KX022A has FIFO which can store 43 samples of HiRes data from 2
31  * channels. This equals to 43 (samples) * 3 (channels) * 2 (bytes/sample) to
32  * 258 bytes of sample data. The quirk to know is that the amount of bytes in
33  * the FIFO is advertised via 8 bit register (max value 255). The thing to note
34  * is that full 258 bytes of data is indicated using the max value 255.
35  */
36 #define KX022A_FIFO_LENGTH			43
37 #define KX022A_FIFO_FULL_VALUE			255
38 #define KX022A_SOFT_RESET_WAIT_TIME_US		(5 * USEC_PER_MSEC)
39 #define KX022A_SOFT_RESET_TOTAL_WAIT_TIME_US	(500 * USEC_PER_MSEC)
40 
41 /* 3 axis, 2 bytes of data for each of the axis */
42 #define KX022A_FIFO_SAMPLES_SIZE_BYTES		6
43 #define KX022A_FIFO_MAX_BYTES					\
44 	(KX022A_FIFO_LENGTH * KX022A_FIFO_SAMPLES_SIZE_BYTES)
45 
46 enum {
47 	KX022A_STATE_SAMPLE,
48 	KX022A_STATE_FIFO,
49 };
50 
51 /* Regmap configs */
52 static const struct regmap_range kx022a_volatile_ranges[] = {
53 	{
54 		.range_min = KX022A_REG_XHP_L,
55 		.range_max = KX022A_REG_COTR,
56 	}, {
57 		.range_min = KX022A_REG_TSCP,
58 		.range_max = KX022A_REG_INT_REL,
59 	}, {
60 		/* The reset bit will be cleared by sensor */
61 		.range_min = KX022A_REG_CNTL2,
62 		.range_max = KX022A_REG_CNTL2,
63 	}, {
64 		.range_min = KX022A_REG_BUF_STATUS_1,
65 		.range_max = KX022A_REG_BUF_READ,
66 	},
67 };
68 
69 static const struct regmap_access_table kx022a_volatile_regs = {
70 	.yes_ranges = &kx022a_volatile_ranges[0],
71 	.n_yes_ranges = ARRAY_SIZE(kx022a_volatile_ranges),
72 };
73 
74 static const struct regmap_range kx022a_precious_ranges[] = {
75 	{
76 		.range_min = KX022A_REG_INT_REL,
77 		.range_max = KX022A_REG_INT_REL,
78 	},
79 };
80 
81 static const struct regmap_access_table kx022a_precious_regs = {
82 	.yes_ranges = &kx022a_precious_ranges[0],
83 	.n_yes_ranges = ARRAY_SIZE(kx022a_precious_ranges),
84 };
85 
86 /*
87  * The HW does not set WHO_AM_I reg as read-only but we don't want to write it
88  * so we still include it in the read-only ranges.
89  */
90 static const struct regmap_range kx022a_read_only_ranges[] = {
91 	{
92 		.range_min = KX022A_REG_XHP_L,
93 		.range_max = KX022A_REG_INT_REL,
94 	}, {
95 		.range_min = KX022A_REG_BUF_STATUS_1,
96 		.range_max = KX022A_REG_BUF_STATUS_2,
97 	}, {
98 		.range_min = KX022A_REG_BUF_READ,
99 		.range_max = KX022A_REG_BUF_READ,
100 	},
101 };
102 
103 static const struct regmap_access_table kx022a_ro_regs = {
104 	.no_ranges = &kx022a_read_only_ranges[0],
105 	.n_no_ranges = ARRAY_SIZE(kx022a_read_only_ranges),
106 };
107 
108 static const struct regmap_range kx022a_write_only_ranges[] = {
109 	{
110 		.range_min = KX022A_REG_BTS_WUF_TH,
111 		.range_max = KX022A_REG_BTS_WUF_TH,
112 	}, {
113 		.range_min = KX022A_REG_MAN_WAKE,
114 		.range_max = KX022A_REG_MAN_WAKE,
115 	}, {
116 		.range_min = KX022A_REG_SELF_TEST,
117 		.range_max = KX022A_REG_SELF_TEST,
118 	}, {
119 		.range_min = KX022A_REG_BUF_CLEAR,
120 		.range_max = KX022A_REG_BUF_CLEAR,
121 	},
122 };
123 
124 static const struct regmap_access_table kx022a_wo_regs = {
125 	.no_ranges = &kx022a_write_only_ranges[0],
126 	.n_no_ranges = ARRAY_SIZE(kx022a_write_only_ranges),
127 };
128 
129 static const struct regmap_range kx022a_noinc_read_ranges[] = {
130 	{
131 		.range_min = KX022A_REG_BUF_READ,
132 		.range_max = KX022A_REG_BUF_READ,
133 	},
134 };
135 
136 static const struct regmap_access_table kx022a_nir_regs = {
137 	.yes_ranges = &kx022a_noinc_read_ranges[0],
138 	.n_yes_ranges = ARRAY_SIZE(kx022a_noinc_read_ranges),
139 };
140 
141 const struct regmap_config kx022a_regmap = {
142 	.reg_bits = 8,
143 	.val_bits = 8,
144 	.volatile_table = &kx022a_volatile_regs,
145 	.rd_table = &kx022a_wo_regs,
146 	.wr_table = &kx022a_ro_regs,
147 	.rd_noinc_table = &kx022a_nir_regs,
148 	.precious_table = &kx022a_precious_regs,
149 	.max_register = KX022A_MAX_REGISTER,
150 	.cache_type = REGCACHE_RBTREE,
151 };
152 EXPORT_SYMBOL_NS_GPL(kx022a_regmap, IIO_KX022A);
153 
154 struct kx022a_data {
155 	struct regmap *regmap;
156 	struct iio_trigger *trig;
157 	struct device *dev;
158 	struct iio_mount_matrix orientation;
159 	int64_t timestamp, old_timestamp;
160 
161 	int irq;
162 	int inc_reg;
163 	int ien_reg;
164 
165 	unsigned int state;
166 	unsigned int odr_ns;
167 
168 	bool trigger_enabled;
169 	/*
170 	 * Prevent toggling the sensor stby/active state (PC1 bit) in the
171 	 * middle of a configuration, or when the fifo is enabled. Also,
172 	 * protect the data stored/retrieved from this structure from
173 	 * concurrent accesses.
174 	 */
175 	struct mutex mutex;
176 	u8 watermark;
177 
178 	/* 3 x 16bit accel data + timestamp */
179 	__le16 buffer[8] __aligned(IIO_DMA_MINALIGN);
180 	struct {
181 		__le16 channels[3];
182 		s64 ts __aligned(8);
183 	} scan;
184 };
185 
186 static const struct iio_mount_matrix *
kx022a_get_mount_matrix(const struct iio_dev * idev,const struct iio_chan_spec * chan)187 kx022a_get_mount_matrix(const struct iio_dev *idev,
188 			const struct iio_chan_spec *chan)
189 {
190 	struct kx022a_data *data = iio_priv(idev);
191 
192 	return &data->orientation;
193 }
194 
195 enum {
196 	AXIS_X,
197 	AXIS_Y,
198 	AXIS_Z,
199 	AXIS_MAX
200 };
201 
202 static const unsigned long kx022a_scan_masks[] = {
203 	BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z), 0
204 };
205 
206 static const struct iio_chan_spec_ext_info kx022a_ext_info[] = {
207 	IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, kx022a_get_mount_matrix),
208 	{ }
209 };
210 
211 #define KX022A_ACCEL_CHAN(axis, index)				\
212 {								\
213 	.type = IIO_ACCEL,					\
214 	.modified = 1,						\
215 	.channel2 = IIO_MOD_##axis,				\
216 	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),		\
217 	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |	\
218 				BIT(IIO_CHAN_INFO_SAMP_FREQ),	\
219 	.info_mask_shared_by_type_available =			\
220 				BIT(IIO_CHAN_INFO_SCALE) |	\
221 				BIT(IIO_CHAN_INFO_SAMP_FREQ),	\
222 	.ext_info = kx022a_ext_info,				\
223 	.address = KX022A_REG_##axis##OUT_L,			\
224 	.scan_index = index,					\
225 	.scan_type = {                                          \
226 		.sign = 's',					\
227 		.realbits = 16,					\
228 		.storagebits = 16,				\
229 		.endianness = IIO_LE,				\
230 	},							\
231 }
232 
233 static const struct iio_chan_spec kx022a_channels[] = {
234 	KX022A_ACCEL_CHAN(X, 0),
235 	KX022A_ACCEL_CHAN(Y, 1),
236 	KX022A_ACCEL_CHAN(Z, 2),
237 	IIO_CHAN_SOFT_TIMESTAMP(3),
238 };
239 
240 /*
241  * The sensor HW can support ODR up to 1600 Hz, which is beyond what most of the
242  * Linux CPUs can handle without dropping samples. Also, the low power mode is
243  * not available for higher sample rates. Thus, the driver only supports 200 Hz
244  * and slower ODRs. The slowest is 0.78 Hz.
245  */
246 static const int kx022a_accel_samp_freq_table[][2] = {
247 	{ 0, 780000 },
248 	{ 1, 563000 },
249 	{ 3, 125000 },
250 	{ 6, 250000 },
251 	{ 12, 500000 },
252 	{ 25, 0 },
253 	{ 50, 0 },
254 	{ 100, 0 },
255 	{ 200, 0 },
256 };
257 
258 static const unsigned int kx022a_odrs[] = {
259 	1282051282,
260 	639795266,
261 	320 * MEGA,
262 	160 * MEGA,
263 	80 * MEGA,
264 	40 * MEGA,
265 	20 * MEGA,
266 	10 * MEGA,
267 	5 * MEGA,
268 };
269 
270 /*
271  * range is typically +-2G/4G/8G/16G, distributed over the amount of bits.
272  * The scale table can be calculated using
273  *	(range / 2^bits) * g = (range / 2^bits) * 9.80665 m/s^2
274  *	=> KX022A uses 16 bit (HiRes mode - assume the low 8 bits are zeroed
275  *	in low-power mode(?) )
276  *	=> +/-2G  => 4 / 2^16 * 9,80665
277  *	=> +/-2G  - 0.000598550415
278  *	   +/-4G  - 0.00119710083
279  *	   +/-8G  - 0.00239420166
280  *	   +/-16G - 0.00478840332
281  */
282 static const int kx022a_scale_table[][2] = {
283 	{ 0, 598550 },
284 	{ 0, 1197101 },
285 	{ 0, 2394202 },
286 	{ 0, 4788403 },
287 };
288 
kx022a_read_avail(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,const int ** vals,int * type,int * length,long mask)289 static int kx022a_read_avail(struct iio_dev *indio_dev,
290 			     struct iio_chan_spec const *chan,
291 			     const int **vals, int *type, int *length,
292 			     long mask)
293 {
294 	switch (mask) {
295 	case IIO_CHAN_INFO_SAMP_FREQ:
296 		*vals = (const int *)kx022a_accel_samp_freq_table;
297 		*length = ARRAY_SIZE(kx022a_accel_samp_freq_table) *
298 			  ARRAY_SIZE(kx022a_accel_samp_freq_table[0]);
299 		*type = IIO_VAL_INT_PLUS_MICRO;
300 		return IIO_AVAIL_LIST;
301 	case IIO_CHAN_INFO_SCALE:
302 		*vals = (const int *)kx022a_scale_table;
303 		*length = ARRAY_SIZE(kx022a_scale_table) *
304 			  ARRAY_SIZE(kx022a_scale_table[0]);
305 		*type = IIO_VAL_INT_PLUS_NANO;
306 		return IIO_AVAIL_LIST;
307 	default:
308 		return -EINVAL;
309 	}
310 }
311 
312 #define KX022A_DEFAULT_PERIOD_NS (20 * NSEC_PER_MSEC)
313 
kx022a_reg2freq(unsigned int val,int * val1,int * val2)314 static void kx022a_reg2freq(unsigned int val,  int *val1, int *val2)
315 {
316 	*val1 = kx022a_accel_samp_freq_table[val & KX022A_MASK_ODR][0];
317 	*val2 = kx022a_accel_samp_freq_table[val & KX022A_MASK_ODR][1];
318 }
319 
kx022a_reg2scale(unsigned int val,unsigned int * val1,unsigned int * val2)320 static void kx022a_reg2scale(unsigned int val, unsigned int *val1,
321 			     unsigned int *val2)
322 {
323 	val &= KX022A_MASK_GSEL;
324 	val >>= KX022A_GSEL_SHIFT;
325 
326 	*val1 = kx022a_scale_table[val][0];
327 	*val2 = kx022a_scale_table[val][1];
328 }
329 
kx022a_turn_on_off_unlocked(struct kx022a_data * data,bool on)330 static int kx022a_turn_on_off_unlocked(struct kx022a_data *data, bool on)
331 {
332 	int ret;
333 
334 	if (on)
335 		ret = regmap_set_bits(data->regmap, KX022A_REG_CNTL,
336 				      KX022A_MASK_PC1);
337 	else
338 		ret = regmap_clear_bits(data->regmap, KX022A_REG_CNTL,
339 					KX022A_MASK_PC1);
340 	if (ret)
341 		dev_err(data->dev, "Turn %s fail %d\n", str_on_off(on), ret);
342 
343 	return ret;
344 
345 }
346 
kx022a_turn_off_lock(struct kx022a_data * data)347 static int kx022a_turn_off_lock(struct kx022a_data *data)
348 {
349 	int ret;
350 
351 	mutex_lock(&data->mutex);
352 	ret = kx022a_turn_on_off_unlocked(data, false);
353 	if (ret)
354 		mutex_unlock(&data->mutex);
355 
356 	return ret;
357 }
358 
kx022a_turn_on_unlock(struct kx022a_data * data)359 static int kx022a_turn_on_unlock(struct kx022a_data *data)
360 {
361 	int ret;
362 
363 	ret = kx022a_turn_on_off_unlocked(data, true);
364 	mutex_unlock(&data->mutex);
365 
366 	return ret;
367 }
368 
kx022a_write_raw_get_fmt(struct iio_dev * idev,struct iio_chan_spec const * chan,long mask)369 static int kx022a_write_raw_get_fmt(struct iio_dev *idev,
370 				    struct iio_chan_spec const *chan,
371 				    long mask)
372 {
373 	switch (mask) {
374 	case IIO_CHAN_INFO_SCALE:
375 		return IIO_VAL_INT_PLUS_NANO;
376 	case IIO_CHAN_INFO_SAMP_FREQ:
377 		return IIO_VAL_INT_PLUS_MICRO;
378 	default:
379 		return -EINVAL;
380 	}
381 }
382 
kx022a_write_raw(struct iio_dev * idev,struct iio_chan_spec const * chan,int val,int val2,long mask)383 static int kx022a_write_raw(struct iio_dev *idev,
384 			    struct iio_chan_spec const *chan,
385 			    int val, int val2, long mask)
386 {
387 	struct kx022a_data *data = iio_priv(idev);
388 	int ret, n;
389 
390 	/*
391 	 * We should not allow changing scale or frequency when FIFO is running
392 	 * as it will mess the timestamp/scale for samples existing in the
393 	 * buffer. If this turns out to be an issue we can later change logic
394 	 * to internally flush the fifo before reconfiguring so the samples in
395 	 * fifo keep matching the freq/scale settings. (Such setup could cause
396 	 * issues if users trust the watermark to be reached within known
397 	 * time-limit).
398 	 */
399 	ret = iio_device_claim_direct_mode(idev);
400 	if (ret)
401 		return ret;
402 
403 	switch (mask) {
404 	case IIO_CHAN_INFO_SAMP_FREQ:
405 		n = ARRAY_SIZE(kx022a_accel_samp_freq_table);
406 
407 		while (n--)
408 			if (val == kx022a_accel_samp_freq_table[n][0] &&
409 			    val2 == kx022a_accel_samp_freq_table[n][1])
410 				break;
411 		if (n < 0) {
412 			ret = -EINVAL;
413 			goto unlock_out;
414 		}
415 		ret = kx022a_turn_off_lock(data);
416 		if (ret)
417 			break;
418 
419 		ret = regmap_update_bits(data->regmap,
420 					 KX022A_REG_ODCNTL,
421 					 KX022A_MASK_ODR, n);
422 		data->odr_ns = kx022a_odrs[n];
423 		kx022a_turn_on_unlock(data);
424 		break;
425 	case IIO_CHAN_INFO_SCALE:
426 		n = ARRAY_SIZE(kx022a_scale_table);
427 
428 		while (n-- > 0)
429 			if (val == kx022a_scale_table[n][0] &&
430 			    val2 == kx022a_scale_table[n][1])
431 				break;
432 		if (n < 0) {
433 			ret = -EINVAL;
434 			goto unlock_out;
435 		}
436 
437 		ret = kx022a_turn_off_lock(data);
438 		if (ret)
439 			break;
440 
441 		ret = regmap_update_bits(data->regmap, KX022A_REG_CNTL,
442 					 KX022A_MASK_GSEL,
443 					 n << KX022A_GSEL_SHIFT);
444 		kx022a_turn_on_unlock(data);
445 		break;
446 	default:
447 		ret = -EINVAL;
448 		break;
449 	}
450 
451 unlock_out:
452 	iio_device_release_direct_mode(idev);
453 
454 	return ret;
455 }
456 
kx022a_fifo_set_wmi(struct kx022a_data * data)457 static int kx022a_fifo_set_wmi(struct kx022a_data *data)
458 {
459 	u8 threshold;
460 
461 	threshold = data->watermark;
462 
463 	return regmap_update_bits(data->regmap, KX022A_REG_BUF_CNTL1,
464 				  KX022A_MASK_WM_TH, threshold);
465 }
466 
kx022a_get_axis(struct kx022a_data * data,struct iio_chan_spec const * chan,int * val)467 static int kx022a_get_axis(struct kx022a_data *data,
468 			   struct iio_chan_spec const *chan,
469 			   int *val)
470 {
471 	int ret;
472 
473 	ret = regmap_bulk_read(data->regmap, chan->address, &data->buffer[0],
474 			       sizeof(__le16));
475 	if (ret)
476 		return ret;
477 
478 	*val = le16_to_cpu(data->buffer[0]);
479 
480 	return IIO_VAL_INT;
481 }
482 
kx022a_read_raw(struct iio_dev * idev,struct iio_chan_spec const * chan,int * val,int * val2,long mask)483 static int kx022a_read_raw(struct iio_dev *idev,
484 			   struct iio_chan_spec const *chan,
485 			   int *val, int *val2, long mask)
486 {
487 	struct kx022a_data *data = iio_priv(idev);
488 	unsigned int regval;
489 	int ret;
490 
491 	switch (mask) {
492 	case IIO_CHAN_INFO_RAW:
493 		ret = iio_device_claim_direct_mode(idev);
494 		if (ret)
495 			return ret;
496 
497 		mutex_lock(&data->mutex);
498 		ret = kx022a_get_axis(data, chan, val);
499 		mutex_unlock(&data->mutex);
500 
501 		iio_device_release_direct_mode(idev);
502 
503 		return ret;
504 
505 	case IIO_CHAN_INFO_SAMP_FREQ:
506 		ret = regmap_read(data->regmap, KX022A_REG_ODCNTL, &regval);
507 		if (ret)
508 			return ret;
509 
510 		if ((regval & KX022A_MASK_ODR) >
511 		    ARRAY_SIZE(kx022a_accel_samp_freq_table)) {
512 			dev_err(data->dev, "Invalid ODR\n");
513 			return -EINVAL;
514 		}
515 
516 		kx022a_reg2freq(regval, val, val2);
517 
518 		return IIO_VAL_INT_PLUS_MICRO;
519 
520 	case IIO_CHAN_INFO_SCALE:
521 		ret = regmap_read(data->regmap, KX022A_REG_CNTL, &regval);
522 		if (ret < 0)
523 			return ret;
524 
525 		kx022a_reg2scale(regval, val, val2);
526 
527 		return IIO_VAL_INT_PLUS_NANO;
528 	}
529 
530 	return -EINVAL;
531 };
532 
kx022a_set_watermark(struct iio_dev * idev,unsigned int val)533 static int kx022a_set_watermark(struct iio_dev *idev, unsigned int val)
534 {
535 	struct kx022a_data *data = iio_priv(idev);
536 
537 	if (val > KX022A_FIFO_LENGTH)
538 		val = KX022A_FIFO_LENGTH;
539 
540 	mutex_lock(&data->mutex);
541 	data->watermark = val;
542 	mutex_unlock(&data->mutex);
543 
544 	return 0;
545 }
546 
hwfifo_enabled_show(struct device * dev,struct device_attribute * attr,char * buf)547 static ssize_t hwfifo_enabled_show(struct device *dev,
548 				   struct device_attribute *attr,
549 				   char *buf)
550 {
551 	struct iio_dev *idev = dev_to_iio_dev(dev);
552 	struct kx022a_data *data = iio_priv(idev);
553 	bool state;
554 
555 	mutex_lock(&data->mutex);
556 	state = data->state;
557 	mutex_unlock(&data->mutex);
558 
559 	return sysfs_emit(buf, "%d\n", state);
560 }
561 
hwfifo_watermark_show(struct device * dev,struct device_attribute * attr,char * buf)562 static ssize_t hwfifo_watermark_show(struct device *dev,
563 				     struct device_attribute *attr,
564 				     char *buf)
565 {
566 	struct iio_dev *idev = dev_to_iio_dev(dev);
567 	struct kx022a_data *data = iio_priv(idev);
568 	int wm;
569 
570 	mutex_lock(&data->mutex);
571 	wm = data->watermark;
572 	mutex_unlock(&data->mutex);
573 
574 	return sysfs_emit(buf, "%d\n", wm);
575 }
576 
577 static IIO_DEVICE_ATTR_RO(hwfifo_enabled, 0);
578 static IIO_DEVICE_ATTR_RO(hwfifo_watermark, 0);
579 
580 static const struct iio_dev_attr *kx022a_fifo_attributes[] = {
581 	&iio_dev_attr_hwfifo_watermark,
582 	&iio_dev_attr_hwfifo_enabled,
583 	NULL
584 };
585 
kx022a_drop_fifo_contents(struct kx022a_data * data)586 static int kx022a_drop_fifo_contents(struct kx022a_data *data)
587 {
588 	/*
589 	 * We must clear the old time-stamp to avoid computing the timestamps
590 	 * based on samples acquired when buffer was last enabled.
591 	 *
592 	 * We don't need to protect the timestamp as long as we are only
593 	 * called from fifo-disable where we can guarantee the sensor is not
594 	 * triggering interrupts and where the mutex is locked to prevent the
595 	 * user-space access.
596 	 */
597 	data->timestamp = 0;
598 
599 	return regmap_write(data->regmap, KX022A_REG_BUF_CLEAR, 0x0);
600 }
601 
__kx022a_fifo_flush(struct iio_dev * idev,unsigned int samples,bool irq)602 static int __kx022a_fifo_flush(struct iio_dev *idev, unsigned int samples,
603 			       bool irq)
604 {
605 	struct kx022a_data *data = iio_priv(idev);
606 	struct device *dev = regmap_get_device(data->regmap);
607 	__le16 buffer[KX022A_FIFO_LENGTH * 3];
608 	uint64_t sample_period;
609 	int count, fifo_bytes;
610 	bool renable = false;
611 	int64_t tstamp;
612 	int ret, i;
613 
614 	ret = regmap_read(data->regmap, KX022A_REG_BUF_STATUS_1, &fifo_bytes);
615 	if (ret) {
616 		dev_err(dev, "Error reading buffer status\n");
617 		return ret;
618 	}
619 
620 	/* Let's not overflow if we for some reason get bogus value from i2c */
621 	if (fifo_bytes == KX022A_FIFO_FULL_VALUE)
622 		fifo_bytes = KX022A_FIFO_MAX_BYTES;
623 
624 	if (fifo_bytes % KX022A_FIFO_SAMPLES_SIZE_BYTES)
625 		dev_warn(data->dev, "Bad FIFO alignment. Data may be corrupt\n");
626 
627 	count = fifo_bytes / KX022A_FIFO_SAMPLES_SIZE_BYTES;
628 	if (!count)
629 		return 0;
630 
631 	/*
632 	 * If we are being called from IRQ handler we know the stored timestamp
633 	 * is fairly accurate for the last stored sample. Otherwise, if we are
634 	 * called as a result of a read operation from userspace and hence
635 	 * before the watermark interrupt was triggered, take a timestamp
636 	 * now. We can fall anywhere in between two samples so the error in this
637 	 * case is at most one sample period.
638 	 */
639 	if (!irq) {
640 		/*
641 		 * We need to have the IRQ disabled or we risk of messing-up
642 		 * the timestamps. If we are ran from IRQ, then the
643 		 * IRQF_ONESHOT has us covered - but if we are ran by the
644 		 * user-space read we need to disable the IRQ to be on a safe
645 		 * side. We do this usng synchronous disable so that if the
646 		 * IRQ thread is being ran on other CPU we wait for it to be
647 		 * finished.
648 		 */
649 		disable_irq(data->irq);
650 		renable = true;
651 
652 		data->old_timestamp = data->timestamp;
653 		data->timestamp = iio_get_time_ns(idev);
654 	}
655 
656 	/*
657 	 * Approximate timestamps for each of the sample based on the sampling
658 	 * frequency, timestamp for last sample and number of samples.
659 	 *
660 	 * We'd better not use the current bandwidth settings to compute the
661 	 * sample period. The real sample rate varies with the device and
662 	 * small variation adds when we store a large number of samples.
663 	 *
664 	 * To avoid this issue we compute the actual sample period ourselves
665 	 * based on the timestamp delta between the last two flush operations.
666 	 */
667 	if (data->old_timestamp) {
668 		sample_period = data->timestamp - data->old_timestamp;
669 		do_div(sample_period, count);
670 	} else {
671 		sample_period = data->odr_ns;
672 	}
673 	tstamp = data->timestamp - (count - 1) * sample_period;
674 
675 	if (samples && count > samples) {
676 		/*
677 		 * Here we leave some old samples to the buffer. We need to
678 		 * adjust the timestamp to match the first sample in the buffer
679 		 * or we will miscalculate the sample_period at next round.
680 		 */
681 		data->timestamp -= (count - samples) * sample_period;
682 		count = samples;
683 	}
684 
685 	fifo_bytes = count * KX022A_FIFO_SAMPLES_SIZE_BYTES;
686 	ret = regmap_noinc_read(data->regmap, KX022A_REG_BUF_READ,
687 				&buffer[0], fifo_bytes);
688 	if (ret)
689 		goto renable_out;
690 
691 	for (i = 0; i < count; i++) {
692 		__le16 *sam = &buffer[i * 3];
693 		__le16 *chs;
694 		int bit;
695 
696 		chs = &data->scan.channels[0];
697 		for_each_set_bit(bit, idev->active_scan_mask, AXIS_MAX)
698 			chs[bit] = sam[bit];
699 
700 		iio_push_to_buffers_with_timestamp(idev, &data->scan, tstamp);
701 
702 		tstamp += sample_period;
703 	}
704 
705 	ret = count;
706 
707 renable_out:
708 	if (renable)
709 		enable_irq(data->irq);
710 
711 	return ret;
712 }
713 
kx022a_fifo_flush(struct iio_dev * idev,unsigned int samples)714 static int kx022a_fifo_flush(struct iio_dev *idev, unsigned int samples)
715 {
716 	struct kx022a_data *data = iio_priv(idev);
717 	int ret;
718 
719 	mutex_lock(&data->mutex);
720 	ret = __kx022a_fifo_flush(idev, samples, false);
721 	mutex_unlock(&data->mutex);
722 
723 	return ret;
724 }
725 
726 static const struct iio_info kx022a_info = {
727 	.read_raw = &kx022a_read_raw,
728 	.write_raw = &kx022a_write_raw,
729 	.write_raw_get_fmt = &kx022a_write_raw_get_fmt,
730 	.read_avail = &kx022a_read_avail,
731 
732 	.validate_trigger	= iio_validate_own_trigger,
733 	.hwfifo_set_watermark	= kx022a_set_watermark,
734 	.hwfifo_flush_to_buffer	= kx022a_fifo_flush,
735 };
736 
kx022a_set_drdy_irq(struct kx022a_data * data,bool en)737 static int kx022a_set_drdy_irq(struct kx022a_data *data, bool en)
738 {
739 	if (en)
740 		return regmap_set_bits(data->regmap, KX022A_REG_CNTL,
741 				       KX022A_MASK_DRDY);
742 
743 	return regmap_clear_bits(data->regmap, KX022A_REG_CNTL,
744 				 KX022A_MASK_DRDY);
745 }
746 
kx022a_prepare_irq_pin(struct kx022a_data * data)747 static int kx022a_prepare_irq_pin(struct kx022a_data *data)
748 {
749 	/* Enable IRQ1 pin. Set polarity to active low */
750 	int mask = KX022A_MASK_IEN | KX022A_MASK_IPOL |
751 		   KX022A_MASK_ITYP;
752 	int val = KX022A_MASK_IEN | KX022A_IPOL_LOW |
753 		  KX022A_ITYP_LEVEL;
754 	int ret;
755 
756 	ret = regmap_update_bits(data->regmap, data->inc_reg, mask, val);
757 	if (ret)
758 		return ret;
759 
760 	/* We enable WMI to IRQ pin only at buffer_enable */
761 	mask = KX022A_MASK_INS2_DRDY;
762 
763 	return regmap_set_bits(data->regmap, data->ien_reg, mask);
764 }
765 
kx022a_fifo_disable(struct kx022a_data * data)766 static int kx022a_fifo_disable(struct kx022a_data *data)
767 {
768 	int ret = 0;
769 
770 	ret = kx022a_turn_off_lock(data);
771 	if (ret)
772 		return ret;
773 
774 	ret = regmap_clear_bits(data->regmap, data->ien_reg, KX022A_MASK_WMI);
775 	if (ret)
776 		goto unlock_out;
777 
778 	ret = regmap_clear_bits(data->regmap, KX022A_REG_BUF_CNTL2,
779 				KX022A_MASK_BUF_EN);
780 	if (ret)
781 		goto unlock_out;
782 
783 	data->state &= ~KX022A_STATE_FIFO;
784 
785 	kx022a_drop_fifo_contents(data);
786 
787 	return kx022a_turn_on_unlock(data);
788 
789 unlock_out:
790 	mutex_unlock(&data->mutex);
791 
792 	return ret;
793 }
794 
kx022a_buffer_predisable(struct iio_dev * idev)795 static int kx022a_buffer_predisable(struct iio_dev *idev)
796 {
797 	struct kx022a_data *data = iio_priv(idev);
798 
799 	if (iio_device_get_current_mode(idev) == INDIO_BUFFER_TRIGGERED)
800 		return 0;
801 
802 	return kx022a_fifo_disable(data);
803 }
804 
kx022a_fifo_enable(struct kx022a_data * data)805 static int kx022a_fifo_enable(struct kx022a_data *data)
806 {
807 	int ret;
808 
809 	ret = kx022a_turn_off_lock(data);
810 	if (ret)
811 		return ret;
812 
813 	/* Update watermark to HW */
814 	ret = kx022a_fifo_set_wmi(data);
815 	if (ret)
816 		goto unlock_out;
817 
818 	/* Enable buffer */
819 	ret = regmap_set_bits(data->regmap, KX022A_REG_BUF_CNTL2,
820 			      KX022A_MASK_BUF_EN);
821 	if (ret)
822 		goto unlock_out;
823 
824 	data->state |= KX022A_STATE_FIFO;
825 	ret = regmap_set_bits(data->regmap, data->ien_reg,
826 			      KX022A_MASK_WMI);
827 	if (ret)
828 		goto unlock_out;
829 
830 	return kx022a_turn_on_unlock(data);
831 
832 unlock_out:
833 	mutex_unlock(&data->mutex);
834 
835 	return ret;
836 }
837 
kx022a_buffer_postenable(struct iio_dev * idev)838 static int kx022a_buffer_postenable(struct iio_dev *idev)
839 {
840 	struct kx022a_data *data = iio_priv(idev);
841 
842 	/*
843 	 * If we use data-ready trigger, then the IRQ masks should be handled by
844 	 * trigger enable and the hardware buffer is not used but we just update
845 	 * results to the IIO fifo when data-ready triggers.
846 	 */
847 	if (iio_device_get_current_mode(idev) == INDIO_BUFFER_TRIGGERED)
848 		return 0;
849 
850 	return kx022a_fifo_enable(data);
851 }
852 
853 static const struct iio_buffer_setup_ops kx022a_buffer_ops = {
854 	.postenable = kx022a_buffer_postenable,
855 	.predisable = kx022a_buffer_predisable,
856 };
857 
kx022a_trigger_handler(int irq,void * p)858 static irqreturn_t kx022a_trigger_handler(int irq, void *p)
859 {
860 	struct iio_poll_func *pf = p;
861 	struct iio_dev *idev = pf->indio_dev;
862 	struct kx022a_data *data = iio_priv(idev);
863 	int ret;
864 
865 	ret = regmap_bulk_read(data->regmap, KX022A_REG_XOUT_L, data->buffer,
866 			       KX022A_FIFO_SAMPLES_SIZE_BYTES);
867 	if (ret < 0)
868 		goto err_read;
869 
870 	iio_push_to_buffers_with_timestamp(idev, data->buffer, data->timestamp);
871 err_read:
872 	iio_trigger_notify_done(idev->trig);
873 
874 	return IRQ_HANDLED;
875 }
876 
877 /* Get timestamps and wake the thread if we need to read data */
kx022a_irq_handler(int irq,void * private)878 static irqreturn_t kx022a_irq_handler(int irq, void *private)
879 {
880 	struct iio_dev *idev = private;
881 	struct kx022a_data *data = iio_priv(idev);
882 
883 	data->old_timestamp = data->timestamp;
884 	data->timestamp = iio_get_time_ns(idev);
885 
886 	if (data->state & KX022A_STATE_FIFO || data->trigger_enabled)
887 		return IRQ_WAKE_THREAD;
888 
889 	return IRQ_NONE;
890 }
891 
892 /*
893  * WMI and data-ready IRQs are acked when results are read. If we add
894  * TILT/WAKE or other IRQs - then we may need to implement the acking
895  * (which is racy).
896  */
kx022a_irq_thread_handler(int irq,void * private)897 static irqreturn_t kx022a_irq_thread_handler(int irq, void *private)
898 {
899 	struct iio_dev *idev = private;
900 	struct kx022a_data *data = iio_priv(idev);
901 	irqreturn_t ret = IRQ_NONE;
902 
903 	mutex_lock(&data->mutex);
904 
905 	if (data->trigger_enabled) {
906 		iio_trigger_poll_nested(data->trig);
907 		ret = IRQ_HANDLED;
908 	}
909 
910 	if (data->state & KX022A_STATE_FIFO) {
911 		int ok;
912 
913 		ok = __kx022a_fifo_flush(idev, KX022A_FIFO_LENGTH, true);
914 		if (ok > 0)
915 			ret = IRQ_HANDLED;
916 	}
917 
918 	mutex_unlock(&data->mutex);
919 
920 	return ret;
921 }
922 
kx022a_trigger_set_state(struct iio_trigger * trig,bool state)923 static int kx022a_trigger_set_state(struct iio_trigger *trig,
924 				    bool state)
925 {
926 	struct kx022a_data *data = iio_trigger_get_drvdata(trig);
927 	int ret = 0;
928 
929 	mutex_lock(&data->mutex);
930 
931 	if (data->trigger_enabled == state)
932 		goto unlock_out;
933 
934 	if (data->state & KX022A_STATE_FIFO) {
935 		dev_warn(data->dev, "Can't set trigger when FIFO enabled\n");
936 		ret = -EBUSY;
937 		goto unlock_out;
938 	}
939 
940 	ret = kx022a_turn_on_off_unlocked(data, false);
941 	if (ret)
942 		goto unlock_out;
943 
944 	data->trigger_enabled = state;
945 	ret = kx022a_set_drdy_irq(data, state);
946 	if (ret)
947 		goto unlock_out;
948 
949 	ret = kx022a_turn_on_off_unlocked(data, true);
950 
951 unlock_out:
952 	mutex_unlock(&data->mutex);
953 
954 	return ret;
955 }
956 
957 static const struct iio_trigger_ops kx022a_trigger_ops = {
958 	.set_trigger_state = kx022a_trigger_set_state,
959 };
960 
kx022a_chip_init(struct kx022a_data * data)961 static int kx022a_chip_init(struct kx022a_data *data)
962 {
963 	int ret, val;
964 
965 	/* Reset the senor */
966 	ret = regmap_write(data->regmap, KX022A_REG_CNTL2, KX022A_MASK_SRST);
967 	if (ret)
968 		return ret;
969 
970 	/*
971 	 * I've seen I2C read failures if we poll too fast after the sensor
972 	 * reset. Slight delay gives I2C block the time to recover.
973 	 */
974 	msleep(1);
975 
976 	ret = regmap_read_poll_timeout(data->regmap, KX022A_REG_CNTL2, val,
977 				       !(val & KX022A_MASK_SRST),
978 				       KX022A_SOFT_RESET_WAIT_TIME_US,
979 				       KX022A_SOFT_RESET_TOTAL_WAIT_TIME_US);
980 	if (ret) {
981 		dev_err(data->dev, "Sensor reset %s\n",
982 			val & KX022A_MASK_SRST ? "timeout" : "fail#");
983 		return ret;
984 	}
985 
986 	ret = regmap_reinit_cache(data->regmap, &kx022a_regmap);
987 	if (ret) {
988 		dev_err(data->dev, "Failed to reinit reg cache\n");
989 		return ret;
990 	}
991 
992 	/* set data res 16bit */
993 	ret = regmap_set_bits(data->regmap, KX022A_REG_BUF_CNTL2,
994 			      KX022A_MASK_BRES16);
995 	if (ret) {
996 		dev_err(data->dev, "Failed to set data resolution\n");
997 		return ret;
998 	}
999 
1000 	return kx022a_prepare_irq_pin(data);
1001 }
1002 
kx022a_probe_internal(struct device * dev)1003 int kx022a_probe_internal(struct device *dev)
1004 {
1005 	static const char * const regulator_names[] = {"io-vdd", "vdd"};
1006 	struct iio_trigger *indio_trig;
1007 	struct fwnode_handle *fwnode;
1008 	struct kx022a_data *data;
1009 	struct regmap *regmap;
1010 	unsigned int chip_id;
1011 	struct iio_dev *idev;
1012 	int ret, irq;
1013 	char *name;
1014 
1015 	regmap = dev_get_regmap(dev, NULL);
1016 	if (!regmap) {
1017 		dev_err(dev, "no regmap\n");
1018 		return -EINVAL;
1019 	}
1020 
1021 	fwnode = dev_fwnode(dev);
1022 	if (!fwnode)
1023 		return -ENODEV;
1024 
1025 	idev = devm_iio_device_alloc(dev, sizeof(*data));
1026 	if (!idev)
1027 		return -ENOMEM;
1028 
1029 	data = iio_priv(idev);
1030 
1031 	/*
1032 	 * VDD is the analog and digital domain voltage supply and
1033 	 * IO_VDD is the digital I/O voltage supply.
1034 	 */
1035 	ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulator_names),
1036 					     regulator_names);
1037 	if (ret && ret != -ENODEV)
1038 		return dev_err_probe(dev, ret, "failed to enable regulator\n");
1039 
1040 	ret = regmap_read(regmap, KX022A_REG_WHO, &chip_id);
1041 	if (ret)
1042 		return dev_err_probe(dev, ret, "Failed to access sensor\n");
1043 
1044 	if (chip_id != KX022A_ID) {
1045 		dev_err(dev, "unsupported device 0x%x\n", chip_id);
1046 		return -EINVAL;
1047 	}
1048 
1049 	irq = fwnode_irq_get_byname(fwnode, "INT1");
1050 	if (irq > 0) {
1051 		data->inc_reg = KX022A_REG_INC1;
1052 		data->ien_reg = KX022A_REG_INC4;
1053 	} else {
1054 		irq = fwnode_irq_get_byname(fwnode, "INT2");
1055 		if (irq < 0)
1056 			return dev_err_probe(dev, irq, "No suitable IRQ\n");
1057 
1058 		data->inc_reg = KX022A_REG_INC5;
1059 		data->ien_reg = KX022A_REG_INC6;
1060 	}
1061 
1062 	data->regmap = regmap;
1063 	data->dev = dev;
1064 	data->irq = irq;
1065 	data->odr_ns = KX022A_DEFAULT_PERIOD_NS;
1066 	mutex_init(&data->mutex);
1067 
1068 	idev->channels = kx022a_channels;
1069 	idev->num_channels = ARRAY_SIZE(kx022a_channels);
1070 	idev->name = "kx022-accel";
1071 	idev->info = &kx022a_info;
1072 	idev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
1073 	idev->available_scan_masks = kx022a_scan_masks;
1074 
1075 	/* Read the mounting matrix, if present */
1076 	ret = iio_read_mount_matrix(dev, &data->orientation);
1077 	if (ret)
1078 		return ret;
1079 
1080 	/* The sensor must be turned off for configuration */
1081 	ret = kx022a_turn_off_lock(data);
1082 	if (ret)
1083 		return ret;
1084 
1085 	ret = kx022a_chip_init(data);
1086 	if (ret) {
1087 		mutex_unlock(&data->mutex);
1088 		return ret;
1089 	}
1090 
1091 	ret = kx022a_turn_on_unlock(data);
1092 	if (ret)
1093 		return ret;
1094 
1095 	ret = devm_iio_triggered_buffer_setup_ext(dev, idev,
1096 						  &iio_pollfunc_store_time,
1097 						  kx022a_trigger_handler,
1098 						  IIO_BUFFER_DIRECTION_IN,
1099 						  &kx022a_buffer_ops,
1100 						  kx022a_fifo_attributes);
1101 
1102 	if (ret)
1103 		return dev_err_probe(data->dev, ret,
1104 				     "iio_triggered_buffer_setup_ext FAIL\n");
1105 	indio_trig = devm_iio_trigger_alloc(dev, "%sdata-rdy-dev%d", idev->name,
1106 					    iio_device_id(idev));
1107 	if (!indio_trig)
1108 		return -ENOMEM;
1109 
1110 	data->trig = indio_trig;
1111 
1112 	indio_trig->ops = &kx022a_trigger_ops;
1113 	iio_trigger_set_drvdata(indio_trig, data);
1114 
1115 	/*
1116 	 * No need to check for NULL. request_threaded_irq() defaults to
1117 	 * dev_name() should the alloc fail.
1118 	 */
1119 	name = devm_kasprintf(data->dev, GFP_KERNEL, "%s-kx022a",
1120 			      dev_name(data->dev));
1121 
1122 	ret = devm_request_threaded_irq(data->dev, irq, kx022a_irq_handler,
1123 					&kx022a_irq_thread_handler,
1124 					IRQF_ONESHOT, name, idev);
1125 	if (ret)
1126 		return dev_err_probe(data->dev, ret, "Could not request IRQ\n");
1127 
1128 
1129 	ret = devm_iio_trigger_register(dev, indio_trig);
1130 	if (ret)
1131 		return dev_err_probe(data->dev, ret,
1132 				     "Trigger registration failed\n");
1133 
1134 	ret = devm_iio_device_register(data->dev, idev);
1135 	if (ret < 0)
1136 		return dev_err_probe(dev, ret,
1137 				     "Unable to register iio device\n");
1138 
1139 	return ret;
1140 }
1141 EXPORT_SYMBOL_NS_GPL(kx022a_probe_internal, IIO_KX022A);
1142 
1143 MODULE_DESCRIPTION("ROHM/Kionix KX022A accelerometer driver");
1144 MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
1145 MODULE_LICENSE("GPL");
1146