1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2021 Google LLC.
4 *
5 * Common part of most Semtech SAR sensor.
6 */
7
8 #include <linux/bitops.h>
9 #include <linux/byteorder/generic.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/err.h>
13 #include <linux/export.h>
14 #include <linux/interrupt.h>
15 #include <linux/irqreturn.h>
16 #include <linux/i2c.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/regmap.h>
20 #include <linux/regulator/consumer.h>
21 #include <vdso/bits.h>
22
23 #include <linux/iio/buffer.h>
24 #include <linux/iio/events.h>
25 #include <linux/iio/iio.h>
26 #include <linux/iio/trigger.h>
27 #include <linux/iio/triggered_buffer.h>
28 #include <linux/iio/trigger_consumer.h>
29
30 #include "sx_common.h"
31
32 /* All Semtech SAR sensors have IRQ bit in the same order. */
33 #define SX_COMMON_CONVDONE_IRQ BIT(0)
34 #define SX_COMMON_FAR_IRQ BIT(2)
35 #define SX_COMMON_CLOSE_IRQ BIT(3)
36
37 const struct iio_event_spec sx_common_events[3] = {
38 {
39 .type = IIO_EV_TYPE_THRESH,
40 .dir = IIO_EV_DIR_RISING,
41 .mask_shared_by_all = BIT(IIO_EV_INFO_PERIOD),
42 },
43 {
44 .type = IIO_EV_TYPE_THRESH,
45 .dir = IIO_EV_DIR_FALLING,
46 .mask_shared_by_all = BIT(IIO_EV_INFO_PERIOD),
47 },
48 {
49 .type = IIO_EV_TYPE_THRESH,
50 .dir = IIO_EV_DIR_EITHER,
51 .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
52 BIT(IIO_EV_INFO_HYSTERESIS) |
53 BIT(IIO_EV_INFO_VALUE),
54 },
55 };
56 EXPORT_SYMBOL_NS_GPL(sx_common_events, SEMTECH_PROX);
57
sx_common_irq_handler(int irq,void * private)58 static irqreturn_t sx_common_irq_handler(int irq, void *private)
59 {
60 struct iio_dev *indio_dev = private;
61 struct sx_common_data *data = iio_priv(indio_dev);
62
63 if (data->trigger_enabled)
64 iio_trigger_poll(data->trig);
65
66 /*
67 * Even if no event is enabled, we need to wake the thread to clear the
68 * interrupt state by reading SX_COMMON_REG_IRQ_SRC.
69 * It is not possible to do that here because regmap_read takes a mutex.
70 */
71 return IRQ_WAKE_THREAD;
72 }
73
sx_common_push_events(struct iio_dev * indio_dev)74 static void sx_common_push_events(struct iio_dev *indio_dev)
75 {
76 int ret;
77 unsigned int val, chan;
78 struct sx_common_data *data = iio_priv(indio_dev);
79 s64 timestamp = iio_get_time_ns(indio_dev);
80 unsigned long prox_changed;
81
82 /* Read proximity state on all channels */
83 ret = regmap_read(data->regmap, data->chip_info->reg_stat, &val);
84 if (ret) {
85 dev_err(&data->client->dev, "i2c transfer error in irq\n");
86 return;
87 }
88
89 val >>= data->chip_info->stat_offset;
90
91 /*
92 * Only iterate over channels with changes on proximity status that have
93 * events enabled.
94 */
95 prox_changed = (data->chan_prox_stat ^ val) & data->chan_event;
96
97 for_each_set_bit(chan, &prox_changed, data->chip_info->num_channels) {
98 int dir;
99 u64 ev;
100
101 dir = (val & BIT(chan)) ? IIO_EV_DIR_FALLING : IIO_EV_DIR_RISING;
102 ev = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, chan,
103 IIO_EV_TYPE_THRESH, dir);
104
105 iio_push_event(indio_dev, ev, timestamp);
106 }
107 data->chan_prox_stat = val;
108 }
109
sx_common_enable_irq(struct sx_common_data * data,unsigned int irq)110 static int sx_common_enable_irq(struct sx_common_data *data, unsigned int irq)
111 {
112 if (!data->client->irq)
113 return 0;
114 return regmap_update_bits(data->regmap, data->chip_info->reg_irq_msk,
115 irq << data->chip_info->irq_msk_offset,
116 irq << data->chip_info->irq_msk_offset);
117 }
118
sx_common_disable_irq(struct sx_common_data * data,unsigned int irq)119 static int sx_common_disable_irq(struct sx_common_data *data, unsigned int irq)
120 {
121 if (!data->client->irq)
122 return 0;
123 return regmap_update_bits(data->regmap, data->chip_info->reg_irq_msk,
124 irq << data->chip_info->irq_msk_offset, 0);
125 }
126
sx_common_update_chan_en(struct sx_common_data * data,unsigned long chan_read,unsigned long chan_event)127 static int sx_common_update_chan_en(struct sx_common_data *data,
128 unsigned long chan_read,
129 unsigned long chan_event)
130 {
131 int ret;
132 unsigned long channels = chan_read | chan_event;
133
134 if ((data->chan_read | data->chan_event) != channels) {
135 ret = regmap_update_bits(data->regmap,
136 data->chip_info->reg_enable_chan,
137 data->chip_info->mask_enable_chan,
138 channels);
139 if (ret)
140 return ret;
141 }
142 data->chan_read = chan_read;
143 data->chan_event = chan_event;
144 return 0;
145 }
146
sx_common_get_read_channel(struct sx_common_data * data,int channel)147 static int sx_common_get_read_channel(struct sx_common_data *data, int channel)
148 {
149 return sx_common_update_chan_en(data, data->chan_read | BIT(channel),
150 data->chan_event);
151 }
152
sx_common_put_read_channel(struct sx_common_data * data,int channel)153 static int sx_common_put_read_channel(struct sx_common_data *data, int channel)
154 {
155 return sx_common_update_chan_en(data, data->chan_read & ~BIT(channel),
156 data->chan_event);
157 }
158
sx_common_get_event_channel(struct sx_common_data * data,int channel)159 static int sx_common_get_event_channel(struct sx_common_data *data, int channel)
160 {
161 return sx_common_update_chan_en(data, data->chan_read,
162 data->chan_event | BIT(channel));
163 }
164
sx_common_put_event_channel(struct sx_common_data * data,int channel)165 static int sx_common_put_event_channel(struct sx_common_data *data, int channel)
166 {
167 return sx_common_update_chan_en(data, data->chan_read,
168 data->chan_event & ~BIT(channel));
169 }
170
171 /**
172 * sx_common_read_proximity() - Read raw proximity value.
173 * @data: Internal data
174 * @chan: Channel to read
175 * @val: pointer to return read value.
176 *
177 * Request a conversion, wait for the sensor to be ready and
178 * return the raw proximity value.
179 */
sx_common_read_proximity(struct sx_common_data * data,const struct iio_chan_spec * chan,int * val)180 int sx_common_read_proximity(struct sx_common_data *data,
181 const struct iio_chan_spec *chan, int *val)
182 {
183 int ret;
184 __be16 rawval;
185
186 mutex_lock(&data->mutex);
187
188 ret = sx_common_get_read_channel(data, chan->channel);
189 if (ret)
190 goto out;
191
192 ret = sx_common_enable_irq(data, SX_COMMON_CONVDONE_IRQ);
193 if (ret)
194 goto out_put_channel;
195
196 mutex_unlock(&data->mutex);
197
198 if (data->client->irq) {
199 ret = wait_for_completion_interruptible(&data->completion);
200 reinit_completion(&data->completion);
201 } else {
202 ret = data->chip_info->ops.wait_for_sample(data);
203 }
204
205 mutex_lock(&data->mutex);
206
207 if (ret)
208 goto out_disable_irq;
209
210 ret = data->chip_info->ops.read_prox_data(data, chan, &rawval);
211 if (ret)
212 goto out_disable_irq;
213
214 *val = sign_extend32(be16_to_cpu(rawval), chan->scan_type.realbits - 1);
215
216 ret = sx_common_disable_irq(data, SX_COMMON_CONVDONE_IRQ);
217 if (ret)
218 goto out_put_channel;
219
220 ret = sx_common_put_read_channel(data, chan->channel);
221 if (ret)
222 goto out;
223
224 mutex_unlock(&data->mutex);
225
226 return IIO_VAL_INT;
227
228 out_disable_irq:
229 sx_common_disable_irq(data, SX_COMMON_CONVDONE_IRQ);
230 out_put_channel:
231 sx_common_put_read_channel(data, chan->channel);
232 out:
233 mutex_unlock(&data->mutex);
234
235 return ret;
236 }
237 EXPORT_SYMBOL_NS_GPL(sx_common_read_proximity, SEMTECH_PROX);
238
239 /**
240 * sx_common_read_event_config() - Configure event setting.
241 * @indio_dev: iio device object
242 * @chan: Channel to read
243 * @type: Type of event (unused)
244 * @dir: Direction of event (unused)
245 *
246 * return if the given channel is used for event gathering.
247 */
sx_common_read_event_config(struct iio_dev * indio_dev,const struct iio_chan_spec * chan,enum iio_event_type type,enum iio_event_direction dir)248 int sx_common_read_event_config(struct iio_dev *indio_dev,
249 const struct iio_chan_spec *chan,
250 enum iio_event_type type,
251 enum iio_event_direction dir)
252 {
253 struct sx_common_data *data = iio_priv(indio_dev);
254
255 return !!(data->chan_event & BIT(chan->channel));
256 }
257 EXPORT_SYMBOL_NS_GPL(sx_common_read_event_config, SEMTECH_PROX);
258
259 /**
260 * sx_common_write_event_config() - Configure event setting.
261 * @indio_dev: iio device object
262 * @chan: Channel to enable
263 * @type: Type of event (unused)
264 * @dir: Direction of event (unused)
265 * @state: State of the event.
266 *
267 * Enable/Disable event on a given channel.
268 */
sx_common_write_event_config(struct iio_dev * indio_dev,const struct iio_chan_spec * chan,enum iio_event_type type,enum iio_event_direction dir,int state)269 int sx_common_write_event_config(struct iio_dev *indio_dev,
270 const struct iio_chan_spec *chan,
271 enum iio_event_type type,
272 enum iio_event_direction dir, int state)
273 {
274 struct sx_common_data *data = iio_priv(indio_dev);
275 unsigned int eventirq = SX_COMMON_FAR_IRQ | SX_COMMON_CLOSE_IRQ;
276 int ret;
277
278 /* If the state hasn't changed, there's nothing to do. */
279 if (!!(data->chan_event & BIT(chan->channel)) == state)
280 return 0;
281
282 mutex_lock(&data->mutex);
283 if (state) {
284 ret = sx_common_get_event_channel(data, chan->channel);
285 if (ret)
286 goto out_unlock;
287 if (!(data->chan_event & ~BIT(chan->channel))) {
288 ret = sx_common_enable_irq(data, eventirq);
289 if (ret)
290 sx_common_put_event_channel(data, chan->channel);
291 }
292 } else {
293 ret = sx_common_put_event_channel(data, chan->channel);
294 if (ret)
295 goto out_unlock;
296 if (!data->chan_event) {
297 ret = sx_common_disable_irq(data, eventirq);
298 if (ret)
299 sx_common_get_event_channel(data, chan->channel);
300 }
301 }
302
303 out_unlock:
304 mutex_unlock(&data->mutex);
305 return ret;
306 }
307 EXPORT_SYMBOL_NS_GPL(sx_common_write_event_config, SEMTECH_PROX);
308
sx_common_set_trigger_state(struct iio_trigger * trig,bool state)309 static int sx_common_set_trigger_state(struct iio_trigger *trig, bool state)
310 {
311 struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
312 struct sx_common_data *data = iio_priv(indio_dev);
313 int ret = 0;
314
315 mutex_lock(&data->mutex);
316
317 if (state)
318 ret = sx_common_enable_irq(data, SX_COMMON_CONVDONE_IRQ);
319 else if (!data->chan_read)
320 ret = sx_common_disable_irq(data, SX_COMMON_CONVDONE_IRQ);
321 if (ret)
322 goto out;
323
324 data->trigger_enabled = state;
325
326 out:
327 mutex_unlock(&data->mutex);
328
329 return ret;
330 }
331
332 static const struct iio_trigger_ops sx_common_trigger_ops = {
333 .set_trigger_state = sx_common_set_trigger_state,
334 };
335
sx_common_irq_thread_handler(int irq,void * private)336 static irqreturn_t sx_common_irq_thread_handler(int irq, void *private)
337 {
338 struct iio_dev *indio_dev = private;
339 struct sx_common_data *data = iio_priv(indio_dev);
340 int ret;
341 unsigned int val;
342
343 mutex_lock(&data->mutex);
344
345 ret = regmap_read(data->regmap, SX_COMMON_REG_IRQ_SRC, &val);
346 if (ret) {
347 dev_err(&data->client->dev, "i2c transfer error in irq\n");
348 goto out;
349 }
350
351 if (val & ((SX_COMMON_FAR_IRQ | SX_COMMON_CLOSE_IRQ) << data->chip_info->irq_msk_offset))
352 sx_common_push_events(indio_dev);
353
354 if (val & (SX_COMMON_CONVDONE_IRQ << data->chip_info->irq_msk_offset))
355 complete(&data->completion);
356
357 out:
358 mutex_unlock(&data->mutex);
359
360 return IRQ_HANDLED;
361 }
362
sx_common_trigger_handler(int irq,void * private)363 static irqreturn_t sx_common_trigger_handler(int irq, void *private)
364 {
365 struct iio_poll_func *pf = private;
366 struct iio_dev *indio_dev = pf->indio_dev;
367 struct sx_common_data *data = iio_priv(indio_dev);
368 __be16 val;
369 int bit, ret, i = 0;
370
371 mutex_lock(&data->mutex);
372
373 for_each_set_bit(bit, indio_dev->active_scan_mask,
374 indio_dev->masklength) {
375 ret = data->chip_info->ops.read_prox_data(data,
376 &indio_dev->channels[bit],
377 &val);
378 if (ret)
379 goto out;
380
381 data->buffer.channels[i++] = val;
382 }
383
384 iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
385 pf->timestamp);
386
387 out:
388 mutex_unlock(&data->mutex);
389
390 iio_trigger_notify_done(indio_dev->trig);
391
392 return IRQ_HANDLED;
393 }
394
sx_common_buffer_preenable(struct iio_dev * indio_dev)395 static int sx_common_buffer_preenable(struct iio_dev *indio_dev)
396 {
397 struct sx_common_data *data = iio_priv(indio_dev);
398 unsigned long channels = 0;
399 int bit, ret;
400
401 mutex_lock(&data->mutex);
402 for_each_set_bit(bit, indio_dev->active_scan_mask,
403 indio_dev->masklength)
404 __set_bit(indio_dev->channels[bit].channel, &channels);
405
406 ret = sx_common_update_chan_en(data, channels, data->chan_event);
407 mutex_unlock(&data->mutex);
408 return ret;
409 }
410
sx_common_buffer_postdisable(struct iio_dev * indio_dev)411 static int sx_common_buffer_postdisable(struct iio_dev *indio_dev)
412 {
413 struct sx_common_data *data = iio_priv(indio_dev);
414 int ret;
415
416 mutex_lock(&data->mutex);
417 ret = sx_common_update_chan_en(data, 0, data->chan_event);
418 mutex_unlock(&data->mutex);
419 return ret;
420 }
421
422 static const struct iio_buffer_setup_ops sx_common_buffer_setup_ops = {
423 .preenable = sx_common_buffer_preenable,
424 .postdisable = sx_common_buffer_postdisable,
425 };
426
sx_common_regulator_disable(void * _data)427 static void sx_common_regulator_disable(void *_data)
428 {
429 struct sx_common_data *data = _data;
430
431 regulator_bulk_disable(ARRAY_SIZE(data->supplies), data->supplies);
432 }
433
434 #define SX_COMMON_SOFT_RESET 0xde
435
sx_common_init_device(struct device * dev,struct iio_dev * indio_dev)436 static int sx_common_init_device(struct device *dev, struct iio_dev *indio_dev)
437 {
438 struct sx_common_data *data = iio_priv(indio_dev);
439 struct sx_common_reg_default tmp;
440 const struct sx_common_reg_default *initval;
441 int ret;
442 unsigned int i, val;
443
444 ret = regmap_write(data->regmap, data->chip_info->reg_reset,
445 SX_COMMON_SOFT_RESET);
446 if (ret)
447 return ret;
448
449 usleep_range(1000, 2000); /* power-up time is ~1ms. */
450
451 /* Clear reset interrupt state by reading SX_COMMON_REG_IRQ_SRC. */
452 ret = regmap_read(data->regmap, SX_COMMON_REG_IRQ_SRC, &val);
453 if (ret)
454 return ret;
455
456 /* Program defaults from constant or BIOS. */
457 for (i = 0; i < data->chip_info->num_default_regs; i++) {
458 initval = data->chip_info->ops.get_default_reg(dev, i, &tmp);
459 ret = regmap_write(data->regmap, initval->reg, initval->def);
460 if (ret)
461 return ret;
462 }
463
464 return data->chip_info->ops.init_compensation(indio_dev);
465 }
466
467 /**
468 * sx_common_probe() - Common setup for Semtech SAR sensor
469 * @client: I2C client object
470 * @chip_info: Semtech sensor chip information.
471 * @regmap_config: Sensor registers map configuration.
472 */
sx_common_probe(struct i2c_client * client,const struct sx_common_chip_info * chip_info,const struct regmap_config * regmap_config)473 int sx_common_probe(struct i2c_client *client,
474 const struct sx_common_chip_info *chip_info,
475 const struct regmap_config *regmap_config)
476 {
477 struct device *dev = &client->dev;
478 struct iio_dev *indio_dev;
479 struct sx_common_data *data;
480 int ret;
481
482 indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
483 if (!indio_dev)
484 return -ENOMEM;
485
486 data = iio_priv(indio_dev);
487
488 data->chip_info = chip_info;
489 data->client = client;
490 data->supplies[0].supply = "vdd";
491 data->supplies[1].supply = "svdd";
492 mutex_init(&data->mutex);
493 init_completion(&data->completion);
494
495 data->regmap = devm_regmap_init_i2c(client, regmap_config);
496 if (IS_ERR(data->regmap))
497 return dev_err_probe(dev, PTR_ERR(data->regmap),
498 "Could init register map\n");
499
500 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(data->supplies),
501 data->supplies);
502 if (ret)
503 return dev_err_probe(dev, ret, "Unable to get regulators\n");
504
505 ret = regulator_bulk_enable(ARRAY_SIZE(data->supplies), data->supplies);
506 if (ret)
507 return dev_err_probe(dev, ret, "Unable to enable regulators\n");
508
509 /* Must wait for Tpor time after initial power up */
510 usleep_range(1000, 1100);
511
512 ret = devm_add_action_or_reset(dev, sx_common_regulator_disable, data);
513 if (ret)
514 return dev_err_probe(dev, ret,
515 "Unable to register regulators deleter\n");
516
517 ret = data->chip_info->ops.check_whoami(dev, indio_dev);
518 if (ret)
519 return dev_err_probe(dev, ret, "error reading WHOAMI\n");
520
521 indio_dev->modes = INDIO_DIRECT_MODE;
522
523 indio_dev->channels = data->chip_info->iio_channels;
524 indio_dev->num_channels = data->chip_info->num_iio_channels;
525 indio_dev->info = &data->chip_info->iio_info;
526
527 i2c_set_clientdata(client, indio_dev);
528
529 ret = sx_common_init_device(dev, indio_dev);
530 if (ret)
531 return dev_err_probe(dev, ret, "Unable to initialize sensor\n");
532
533 if (client->irq) {
534 ret = devm_request_threaded_irq(dev, client->irq,
535 sx_common_irq_handler,
536 sx_common_irq_thread_handler,
537 IRQF_ONESHOT,
538 "sx_event", indio_dev);
539 if (ret)
540 return dev_err_probe(dev, ret, "No IRQ\n");
541
542 data->trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
543 indio_dev->name,
544 iio_device_id(indio_dev));
545 if (!data->trig)
546 return -ENOMEM;
547
548 data->trig->ops = &sx_common_trigger_ops;
549 iio_trigger_set_drvdata(data->trig, indio_dev);
550
551 ret = devm_iio_trigger_register(dev, data->trig);
552 if (ret)
553 return ret;
554 }
555
556 ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
557 iio_pollfunc_store_time,
558 sx_common_trigger_handler,
559 &sx_common_buffer_setup_ops);
560 if (ret)
561 return ret;
562
563 return devm_iio_device_register(dev, indio_dev);
564 }
565 EXPORT_SYMBOL_NS_GPL(sx_common_probe, SEMTECH_PROX);
566
567 MODULE_AUTHOR("Gwendal Grignou <gwendal@chromium.org>");
568 MODULE_DESCRIPTION("Common functions and structures for Semtech sensor");
569 MODULE_LICENSE("GPL v2");
570