1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2021 Google LLC.
4 *
5 * Common part of most Semtech SAR sensor.
6 */
7
8 #include <linux/bitops.h>
9 #include <linux/byteorder/generic.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/err.h>
13 #include <linux/export.h>
14 #include <linux/interrupt.h>
15 #include <linux/irqreturn.h>
16 #include <linux/i2c.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/regmap.h>
20 #include <linux/regulator/consumer.h>
21 #include <vdso/bits.h>
22
23 #include <linux/iio/buffer.h>
24 #include <linux/iio/events.h>
25 #include <linux/iio/iio.h>
26 #include <linux/iio/trigger.h>
27 #include <linux/iio/triggered_buffer.h>
28 #include <linux/iio/trigger_consumer.h>
29
30 #include "sx_common.h"
31
32 /* All Semtech SAR sensors have IRQ bit in the same order. */
33 #define SX_COMMON_CONVDONE_IRQ BIT(0)
34 #define SX_COMMON_FAR_IRQ BIT(2)
35 #define SX_COMMON_CLOSE_IRQ BIT(3)
36
37 const struct iio_event_spec sx_common_events[3] = {
38 {
39 .type = IIO_EV_TYPE_THRESH,
40 .dir = IIO_EV_DIR_RISING,
41 .mask_shared_by_all = BIT(IIO_EV_INFO_PERIOD),
42 },
43 {
44 .type = IIO_EV_TYPE_THRESH,
45 .dir = IIO_EV_DIR_FALLING,
46 .mask_shared_by_all = BIT(IIO_EV_INFO_PERIOD),
47 },
48 {
49 .type = IIO_EV_TYPE_THRESH,
50 .dir = IIO_EV_DIR_EITHER,
51 .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
52 BIT(IIO_EV_INFO_HYSTERESIS) |
53 BIT(IIO_EV_INFO_VALUE),
54 },
55 };
56 EXPORT_SYMBOL_NS_GPL(sx_common_events, SEMTECH_PROX);
57
sx_common_irq_handler(int irq,void * private)58 static irqreturn_t sx_common_irq_handler(int irq, void *private)
59 {
60 struct iio_dev *indio_dev = private;
61 struct sx_common_data *data = iio_priv(indio_dev);
62
63 if (data->trigger_enabled)
64 iio_trigger_poll(data->trig);
65
66 /*
67 * Even if no event is enabled, we need to wake the thread to clear the
68 * interrupt state by reading SX_COMMON_REG_IRQ_SRC.
69 * It is not possible to do that here because regmap_read takes a mutex.
70 */
71 return IRQ_WAKE_THREAD;
72 }
73
sx_common_push_events(struct iio_dev * indio_dev)74 static void sx_common_push_events(struct iio_dev *indio_dev)
75 {
76 int ret;
77 unsigned int val, chan;
78 struct sx_common_data *data = iio_priv(indio_dev);
79 s64 timestamp = iio_get_time_ns(indio_dev);
80 unsigned long prox_changed;
81
82 /* Read proximity state on all channels */
83 ret = regmap_read(data->regmap, data->chip_info->reg_stat, &val);
84 if (ret) {
85 dev_err(&data->client->dev, "i2c transfer error in irq\n");
86 return;
87 }
88
89 val >>= data->chip_info->stat_offset;
90
91 /*
92 * Only iterate over channels with changes on proximity status that have
93 * events enabled.
94 */
95 prox_changed = (data->chan_prox_stat ^ val) & data->chan_event;
96
97 for_each_set_bit(chan, &prox_changed, data->chip_info->num_channels) {
98 int dir;
99 u64 ev;
100
101 dir = (val & BIT(chan)) ? IIO_EV_DIR_FALLING : IIO_EV_DIR_RISING;
102 ev = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, chan,
103 IIO_EV_TYPE_THRESH, dir);
104
105 iio_push_event(indio_dev, ev, timestamp);
106 }
107 data->chan_prox_stat = val;
108 }
109
sx_common_enable_irq(struct sx_common_data * data,unsigned int irq)110 static int sx_common_enable_irq(struct sx_common_data *data, unsigned int irq)
111 {
112 if (!data->client->irq)
113 return 0;
114 return regmap_update_bits(data->regmap, data->chip_info->reg_irq_msk,
115 irq << data->chip_info->irq_msk_offset,
116 irq << data->chip_info->irq_msk_offset);
117 }
118
sx_common_disable_irq(struct sx_common_data * data,unsigned int irq)119 static int sx_common_disable_irq(struct sx_common_data *data, unsigned int irq)
120 {
121 if (!data->client->irq)
122 return 0;
123 return regmap_update_bits(data->regmap, data->chip_info->reg_irq_msk,
124 irq << data->chip_info->irq_msk_offset, 0);
125 }
126
sx_common_update_chan_en(struct sx_common_data * data,unsigned long chan_read,unsigned long chan_event)127 static int sx_common_update_chan_en(struct sx_common_data *data,
128 unsigned long chan_read,
129 unsigned long chan_event)
130 {
131 int ret;
132 unsigned long channels = chan_read | chan_event;
133
134 if ((data->chan_read | data->chan_event) != channels) {
135 ret = regmap_update_bits(data->regmap,
136 data->chip_info->reg_enable_chan,
137 data->chip_info->mask_enable_chan,
138 channels);
139 if (ret)
140 return ret;
141 }
142 data->chan_read = chan_read;
143 data->chan_event = chan_event;
144 return 0;
145 }
146
sx_common_get_read_channel(struct sx_common_data * data,int channel)147 static int sx_common_get_read_channel(struct sx_common_data *data, int channel)
148 {
149 return sx_common_update_chan_en(data, data->chan_read | BIT(channel),
150 data->chan_event);
151 }
152
sx_common_put_read_channel(struct sx_common_data * data,int channel)153 static int sx_common_put_read_channel(struct sx_common_data *data, int channel)
154 {
155 return sx_common_update_chan_en(data, data->chan_read & ~BIT(channel),
156 data->chan_event);
157 }
158
sx_common_get_event_channel(struct sx_common_data * data,int channel)159 static int sx_common_get_event_channel(struct sx_common_data *data, int channel)
160 {
161 return sx_common_update_chan_en(data, data->chan_read,
162 data->chan_event | BIT(channel));
163 }
164
sx_common_put_event_channel(struct sx_common_data * data,int channel)165 static int sx_common_put_event_channel(struct sx_common_data *data, int channel)
166 {
167 return sx_common_update_chan_en(data, data->chan_read,
168 data->chan_event & ~BIT(channel));
169 }
170
171 /**
172 * sx_common_read_proximity() - Read raw proximity value.
173 * @data: Internal data
174 * @chan: Channel to read
175 * @val: pointer to return read value.
176 *
177 * Request a conversion, wait for the sensor to be ready and
178 * return the raw proximity value.
179 */
sx_common_read_proximity(struct sx_common_data * data,const struct iio_chan_spec * chan,int * val)180 int sx_common_read_proximity(struct sx_common_data *data,
181 const struct iio_chan_spec *chan, int *val)
182 {
183 int ret;
184 __be16 rawval;
185
186 mutex_lock(&data->mutex);
187
188 ret = sx_common_get_read_channel(data, chan->channel);
189 if (ret)
190 goto out;
191
192 ret = sx_common_enable_irq(data, SX_COMMON_CONVDONE_IRQ);
193 if (ret)
194 goto out_put_channel;
195
196 mutex_unlock(&data->mutex);
197
198 if (data->client->irq) {
199 ret = wait_for_completion_interruptible(&data->completion);
200 reinit_completion(&data->completion);
201 } else {
202 ret = data->chip_info->ops.wait_for_sample(data);
203 }
204
205 mutex_lock(&data->mutex);
206
207 if (ret)
208 goto out_disable_irq;
209
210 ret = data->chip_info->ops.read_prox_data(data, chan, &rawval);
211 if (ret)
212 goto out_disable_irq;
213
214 *val = sign_extend32(be16_to_cpu(rawval), chan->scan_type.realbits - 1);
215
216 ret = sx_common_disable_irq(data, SX_COMMON_CONVDONE_IRQ);
217 if (ret)
218 goto out_put_channel;
219
220 ret = sx_common_put_read_channel(data, chan->channel);
221 if (ret)
222 goto out;
223
224 mutex_unlock(&data->mutex);
225
226 return IIO_VAL_INT;
227
228 out_disable_irq:
229 sx_common_disable_irq(data, SX_COMMON_CONVDONE_IRQ);
230 out_put_channel:
231 sx_common_put_read_channel(data, chan->channel);
232 out:
233 mutex_unlock(&data->mutex);
234
235 return ret;
236 }
237 EXPORT_SYMBOL_NS_GPL(sx_common_read_proximity, SEMTECH_PROX);
238
239 /**
240 * sx_common_read_event_config() - Configure event setting.
241 * @indio_dev: iio device object
242 * @chan: Channel to read
243 * @type: Type of event (unused)
244 * @dir: Direction of event (unused)
245 *
246 * return if the given channel is used for event gathering.
247 */
sx_common_read_event_config(struct iio_dev * indio_dev,const struct iio_chan_spec * chan,enum iio_event_type type,enum iio_event_direction dir)248 int sx_common_read_event_config(struct iio_dev *indio_dev,
249 const struct iio_chan_spec *chan,
250 enum iio_event_type type,
251 enum iio_event_direction dir)
252 {
253 struct sx_common_data *data = iio_priv(indio_dev);
254
255 return !!(data->chan_event & BIT(chan->channel));
256 }
257 EXPORT_SYMBOL_NS_GPL(sx_common_read_event_config, SEMTECH_PROX);
258
259 /**
260 * sx_common_write_event_config() - Configure event setting.
261 * @indio_dev: iio device object
262 * @chan: Channel to enable
263 * @type: Type of event (unused)
264 * @dir: Direction of event (unused)
265 * @state: State of the event.
266 *
267 * Enable/Disable event on a given channel.
268 */
sx_common_write_event_config(struct iio_dev * indio_dev,const struct iio_chan_spec * chan,enum iio_event_type type,enum iio_event_direction dir,int state)269 int sx_common_write_event_config(struct iio_dev *indio_dev,
270 const struct iio_chan_spec *chan,
271 enum iio_event_type type,
272 enum iio_event_direction dir, int state)
273 {
274 struct sx_common_data *data = iio_priv(indio_dev);
275 unsigned int eventirq = SX_COMMON_FAR_IRQ | SX_COMMON_CLOSE_IRQ;
276 int ret;
277
278 /* If the state hasn't changed, there's nothing to do. */
279 if (!!(data->chan_event & BIT(chan->channel)) == state)
280 return 0;
281
282 mutex_lock(&data->mutex);
283 if (state) {
284 ret = sx_common_get_event_channel(data, chan->channel);
285 if (ret)
286 goto out_unlock;
287 if (!(data->chan_event & ~BIT(chan->channel))) {
288 ret = sx_common_enable_irq(data, eventirq);
289 if (ret)
290 sx_common_put_event_channel(data, chan->channel);
291 }
292 } else {
293 ret = sx_common_put_event_channel(data, chan->channel);
294 if (ret)
295 goto out_unlock;
296 if (!data->chan_event) {
297 ret = sx_common_disable_irq(data, eventirq);
298 if (ret)
299 sx_common_get_event_channel(data, chan->channel);
300 }
301 }
302
303 out_unlock:
304 mutex_unlock(&data->mutex);
305 return ret;
306 }
307 EXPORT_SYMBOL_NS_GPL(sx_common_write_event_config, SEMTECH_PROX);
308
sx_common_set_trigger_state(struct iio_trigger * trig,bool state)309 static int sx_common_set_trigger_state(struct iio_trigger *trig, bool state)
310 {
311 struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
312 struct sx_common_data *data = iio_priv(indio_dev);
313 int ret = 0;
314
315 mutex_lock(&data->mutex);
316
317 if (state)
318 ret = sx_common_enable_irq(data, SX_COMMON_CONVDONE_IRQ);
319 else if (!data->chan_read)
320 ret = sx_common_disable_irq(data, SX_COMMON_CONVDONE_IRQ);
321 if (ret)
322 goto out;
323
324 data->trigger_enabled = state;
325
326 out:
327 mutex_unlock(&data->mutex);
328
329 return ret;
330 }
331
332 static const struct iio_trigger_ops sx_common_trigger_ops = {
333 .set_trigger_state = sx_common_set_trigger_state,
334 };
335
sx_common_irq_thread_handler(int irq,void * private)336 static irqreturn_t sx_common_irq_thread_handler(int irq, void *private)
337 {
338 struct iio_dev *indio_dev = private;
339 struct sx_common_data *data = iio_priv(indio_dev);
340 int ret;
341 unsigned int val;
342
343 mutex_lock(&data->mutex);
344
345 ret = regmap_read(data->regmap, SX_COMMON_REG_IRQ_SRC, &val);
346 if (ret) {
347 dev_err(&data->client->dev, "i2c transfer error in irq\n");
348 goto out;
349 }
350
351 if (val & ((SX_COMMON_FAR_IRQ | SX_COMMON_CLOSE_IRQ) << data->chip_info->irq_msk_offset))
352 sx_common_push_events(indio_dev);
353
354 if (val & (SX_COMMON_CONVDONE_IRQ << data->chip_info->irq_msk_offset))
355 complete(&data->completion);
356
357 out:
358 mutex_unlock(&data->mutex);
359
360 return IRQ_HANDLED;
361 }
362
sx_common_trigger_handler(int irq,void * private)363 static irqreturn_t sx_common_trigger_handler(int irq, void *private)
364 {
365 struct iio_poll_func *pf = private;
366 struct iio_dev *indio_dev = pf->indio_dev;
367 struct sx_common_data *data = iio_priv(indio_dev);
368 __be16 val;
369 int bit, ret, i = 0;
370
371 mutex_lock(&data->mutex);
372
373 for_each_set_bit(bit, indio_dev->active_scan_mask,
374 indio_dev->masklength) {
375 ret = data->chip_info->ops.read_prox_data(data,
376 &indio_dev->channels[bit],
377 &val);
378 if (ret)
379 goto out;
380
381 data->buffer.channels[i++] = val;
382 }
383
384 iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
385 pf->timestamp);
386
387 out:
388 mutex_unlock(&data->mutex);
389
390 iio_trigger_notify_done(indio_dev->trig);
391
392 return IRQ_HANDLED;
393 }
394
sx_common_buffer_preenable(struct iio_dev * indio_dev)395 static int sx_common_buffer_preenable(struct iio_dev *indio_dev)
396 {
397 struct sx_common_data *data = iio_priv(indio_dev);
398 unsigned long channels = 0;
399 int bit, ret;
400
401 mutex_lock(&data->mutex);
402 for_each_set_bit(bit, indio_dev->active_scan_mask,
403 indio_dev->masklength)
404 __set_bit(indio_dev->channels[bit].channel, &channels);
405
406 ret = sx_common_update_chan_en(data, channels, data->chan_event);
407 mutex_unlock(&data->mutex);
408 return ret;
409 }
410
sx_common_buffer_postdisable(struct iio_dev * indio_dev)411 static int sx_common_buffer_postdisable(struct iio_dev *indio_dev)
412 {
413 struct sx_common_data *data = iio_priv(indio_dev);
414 int ret;
415
416 mutex_lock(&data->mutex);
417 ret = sx_common_update_chan_en(data, 0, data->chan_event);
418 mutex_unlock(&data->mutex);
419 return ret;
420 }
421
422 static const struct iio_buffer_setup_ops sx_common_buffer_setup_ops = {
423 .preenable = sx_common_buffer_preenable,
424 .postdisable = sx_common_buffer_postdisable,
425 };
426
sx_common_get_raw_register_config(struct device * dev,struct sx_common_reg_default * reg_def)427 void sx_common_get_raw_register_config(struct device *dev,
428 struct sx_common_reg_default *reg_def)
429 {
430 #ifdef CONFIG_ACPI
431 struct acpi_device *adev = ACPI_COMPANION(dev);
432 u32 raw = 0, ret;
433 char prop[80];
434
435 if (!reg_def->property || !adev)
436 return;
437
438 snprintf(prop, ARRAY_SIZE(prop), "%s,reg_%s", acpi_device_hid(adev), reg_def->property);
439 ret = device_property_read_u32(dev, prop, &raw);
440 if (ret)
441 return;
442
443 reg_def->def = raw;
444 #endif
445 }
446 EXPORT_SYMBOL_NS_GPL(sx_common_get_raw_register_config, SEMTECH_PROX);
447
448 #define SX_COMMON_SOFT_RESET 0xde
449
sx_common_init_device(struct device * dev,struct iio_dev * indio_dev)450 static int sx_common_init_device(struct device *dev, struct iio_dev *indio_dev)
451 {
452 struct sx_common_data *data = iio_priv(indio_dev);
453 struct sx_common_reg_default tmp;
454 const struct sx_common_reg_default *initval;
455 int ret;
456 unsigned int i, val;
457
458 ret = regmap_write(data->regmap, data->chip_info->reg_reset,
459 SX_COMMON_SOFT_RESET);
460 if (ret)
461 return ret;
462
463 usleep_range(1000, 2000); /* power-up time is ~1ms. */
464
465 /* Clear reset interrupt state by reading SX_COMMON_REG_IRQ_SRC. */
466 ret = regmap_read(data->regmap, SX_COMMON_REG_IRQ_SRC, &val);
467 if (ret)
468 return ret;
469
470 /* Program defaults from constant or BIOS. */
471 for (i = 0; i < data->chip_info->num_default_regs; i++) {
472 initval = data->chip_info->ops.get_default_reg(dev, i, &tmp);
473 ret = regmap_write(data->regmap, initval->reg, initval->def);
474 if (ret)
475 return ret;
476 }
477
478 return data->chip_info->ops.init_compensation(indio_dev);
479 }
480
481 /**
482 * sx_common_probe() - Common setup for Semtech SAR sensor
483 * @client: I2C client object
484 * @chip_info: Semtech sensor chip information.
485 * @regmap_config: Sensor registers map configuration.
486 */
sx_common_probe(struct i2c_client * client,const struct sx_common_chip_info * chip_info,const struct regmap_config * regmap_config)487 int sx_common_probe(struct i2c_client *client,
488 const struct sx_common_chip_info *chip_info,
489 const struct regmap_config *regmap_config)
490 {
491 static const char * const regulator_names[] = { "vdd", "svdd" };
492 struct device *dev = &client->dev;
493 struct iio_dev *indio_dev;
494 struct sx_common_data *data;
495 int ret;
496
497 indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
498 if (!indio_dev)
499 return -ENOMEM;
500
501 data = iio_priv(indio_dev);
502
503 data->chip_info = chip_info;
504 data->client = client;
505 mutex_init(&data->mutex);
506 init_completion(&data->completion);
507
508 data->regmap = devm_regmap_init_i2c(client, regmap_config);
509 if (IS_ERR(data->regmap))
510 return dev_err_probe(dev, PTR_ERR(data->regmap),
511 "Could init register map\n");
512
513 ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulator_names),
514 regulator_names);
515 if (ret)
516 return dev_err_probe(dev, ret, "Unable to get regulators\n");
517
518 /* Must wait for Tpor time after initial power up */
519 usleep_range(1000, 1100);
520
521 ret = data->chip_info->ops.check_whoami(dev, indio_dev);
522 if (ret)
523 return dev_err_probe(dev, ret, "error reading WHOAMI\n");
524
525 indio_dev->modes = INDIO_DIRECT_MODE;
526
527 indio_dev->channels = data->chip_info->iio_channels;
528 indio_dev->num_channels = data->chip_info->num_iio_channels;
529 indio_dev->info = &data->chip_info->iio_info;
530
531 i2c_set_clientdata(client, indio_dev);
532
533 ret = sx_common_init_device(dev, indio_dev);
534 if (ret)
535 return dev_err_probe(dev, ret, "Unable to initialize sensor\n");
536
537 if (client->irq) {
538 ret = devm_request_threaded_irq(dev, client->irq,
539 sx_common_irq_handler,
540 sx_common_irq_thread_handler,
541 IRQF_ONESHOT,
542 "sx_event", indio_dev);
543 if (ret)
544 return dev_err_probe(dev, ret, "No IRQ\n");
545
546 data->trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
547 indio_dev->name,
548 iio_device_id(indio_dev));
549 if (!data->trig)
550 return -ENOMEM;
551
552 data->trig->ops = &sx_common_trigger_ops;
553 iio_trigger_set_drvdata(data->trig, indio_dev);
554
555 ret = devm_iio_trigger_register(dev, data->trig);
556 if (ret)
557 return ret;
558 }
559
560 ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
561 iio_pollfunc_store_time,
562 sx_common_trigger_handler,
563 &sx_common_buffer_setup_ops);
564 if (ret)
565 return ret;
566
567 return devm_iio_device_register(dev, indio_dev);
568 }
569 EXPORT_SYMBOL_NS_GPL(sx_common_probe, SEMTECH_PROX);
570
571 MODULE_AUTHOR("Gwendal Grignou <gwendal@chromium.org>");
572 MODULE_DESCRIPTION("Common functions and structures for Semtech sensor");
573 MODULE_LICENSE("GPL v2");
574