1 #include <linux/interrupt.h>
2 #include <linux/gpio.h>
3 #include <linux/mutex.h>
4 #include <linux/kernel.h>
5 #include <linux/spi/spi.h>
6 #include <linux/slab.h>
7 #include <linux/export.h>
8
9 #include <linux/iio/iio.h>
10 #include <linux/iio/kfifo_buf.h>
11 #include <linux/iio/trigger.h>
12 #include <linux/iio/trigger_consumer.h>
13 #include "lis3l02dq.h"
14
15 /**
16 * combine_8_to_16() utility function to munge two u8s into u16
17 **/
combine_8_to_16(u8 lower,u8 upper)18 static inline u16 combine_8_to_16(u8 lower, u8 upper)
19 {
20 u16 _lower = lower;
21 u16 _upper = upper;
22
23 return _lower | (_upper << 8);
24 }
25
26 /**
27 * lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
28 **/
lis3l02dq_data_rdy_trig_poll(int irq,void * private)29 irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private)
30 {
31 struct iio_dev *indio_dev = private;
32 struct lis3l02dq_state *st = iio_priv(indio_dev);
33
34 if (st->trigger_on) {
35 iio_trigger_poll(st->trig);
36 return IRQ_HANDLED;
37 }
38
39 return IRQ_WAKE_THREAD;
40 }
41
42 static const u8 read_all_tx_array[] = {
43 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
44 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
45 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR), 0,
46 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR), 0,
47 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR), 0,
48 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR), 0,
49 };
50
51 /**
52 * lis3l02dq_read_all() Reads all channels currently selected
53 * @indio_dev: IIO device state
54 * @rx_array: (dma capable) receive array, must be at least
55 * 4*number of channels
56 **/
lis3l02dq_read_all(struct iio_dev * indio_dev,u8 * rx_array)57 static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
58 {
59 struct lis3l02dq_state *st = iio_priv(indio_dev);
60 struct spi_transfer *xfers;
61 struct spi_message msg;
62 int ret, i, j = 0;
63
64 xfers = kcalloc(bitmap_weight(indio_dev->active_scan_mask,
65 indio_dev->masklength) * 2,
66 sizeof(*xfers), GFP_KERNEL);
67 if (!xfers)
68 return -ENOMEM;
69
70 mutex_lock(&st->buf_lock);
71
72 for (i = 0; i < ARRAY_SIZE(read_all_tx_array) / 4; i++)
73 if (test_bit(i, indio_dev->active_scan_mask)) {
74 /* lower byte */
75 xfers[j].tx_buf = st->tx + (2 * j);
76 st->tx[2 * j] = read_all_tx_array[i * 4];
77 st->tx[2 * j + 1] = 0;
78 if (rx_array)
79 xfers[j].rx_buf = rx_array + (j * 2);
80 xfers[j].bits_per_word = 8;
81 xfers[j].len = 2;
82 xfers[j].cs_change = 1;
83 j++;
84
85 /* upper byte */
86 xfers[j].tx_buf = st->tx + (2 * j);
87 st->tx[2 * j] = read_all_tx_array[i * 4 + 2];
88 st->tx[2 * j + 1] = 0;
89 if (rx_array)
90 xfers[j].rx_buf = rx_array + (j * 2);
91 xfers[j].bits_per_word = 8;
92 xfers[j].len = 2;
93 xfers[j].cs_change = 1;
94 j++;
95 }
96
97 /* After these are transmitted, the rx_buff should have
98 * values in alternate bytes
99 */
100 spi_message_init(&msg);
101 for (j = 0; j < bitmap_weight(indio_dev->active_scan_mask,
102 indio_dev->masklength) * 2; j++)
103 spi_message_add_tail(&xfers[j], &msg);
104
105 ret = spi_sync(st->us, &msg);
106 mutex_unlock(&st->buf_lock);
107 kfree(xfers);
108
109 return ret;
110 }
111
lis3l02dq_get_buffer_element(struct iio_dev * indio_dev,u8 * buf)112 static int lis3l02dq_get_buffer_element(struct iio_dev *indio_dev,
113 u8 *buf)
114 {
115 int ret, i;
116 u8 *rx_array;
117 s16 *data = (s16 *)buf;
118 int scan_count = bitmap_weight(indio_dev->active_scan_mask,
119 indio_dev->masklength);
120
121 rx_array = kcalloc(4, scan_count, GFP_KERNEL);
122 if (!rx_array)
123 return -ENOMEM;
124 ret = lis3l02dq_read_all(indio_dev, rx_array);
125 if (ret < 0) {
126 kfree(rx_array);
127 return ret;
128 }
129 for (i = 0; i < scan_count; i++)
130 data[i] = combine_8_to_16(rx_array[i * 4 + 1],
131 rx_array[i * 4 + 3]);
132 kfree(rx_array);
133
134 return i * sizeof(data[0]);
135 }
136
lis3l02dq_trigger_handler(int irq,void * p)137 static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p)
138 {
139 struct iio_poll_func *pf = p;
140 struct iio_dev *indio_dev = pf->indio_dev;
141 int len = 0;
142 char *data;
143
144 data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
145 if (!data)
146 goto done;
147
148 if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
149 len = lis3l02dq_get_buffer_element(indio_dev, data);
150
151 iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp);
152
153 kfree(data);
154 done:
155 iio_trigger_notify_done(indio_dev->trig);
156 return IRQ_HANDLED;
157 }
158
159 /* Caller responsible for locking as necessary. */
160 static int
__lis3l02dq_write_data_ready_config(struct iio_dev * indio_dev,bool state)161 __lis3l02dq_write_data_ready_config(struct iio_dev *indio_dev, bool state)
162 {
163 int ret;
164 u8 valold;
165 bool currentlyset;
166 struct lis3l02dq_state *st = iio_priv(indio_dev);
167
168 /* Get the current event mask register */
169 ret = lis3l02dq_spi_read_reg_8(indio_dev,
170 LIS3L02DQ_REG_CTRL_2_ADDR,
171 &valold);
172 if (ret)
173 goto error_ret;
174 /* Find out if data ready is already on */
175 currentlyset
176 = valold & LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
177
178 /* Disable requested */
179 if (!state && currentlyset) {
180 /* Disable the data ready signal */
181 valold &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
182
183 /* The double write is to overcome a hardware bug? */
184 ret = lis3l02dq_spi_write_reg_8(indio_dev,
185 LIS3L02DQ_REG_CTRL_2_ADDR,
186 valold);
187 if (ret)
188 goto error_ret;
189 ret = lis3l02dq_spi_write_reg_8(indio_dev,
190 LIS3L02DQ_REG_CTRL_2_ADDR,
191 valold);
192 if (ret)
193 goto error_ret;
194 st->trigger_on = false;
195 /* Enable requested */
196 } else if (state && !currentlyset) {
197 /* If not set, enable requested
198 * first disable all events
199 */
200 ret = lis3l02dq_disable_all_events(indio_dev);
201 if (ret < 0)
202 goto error_ret;
203
204 valold = ret |
205 LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
206
207 st->trigger_on = true;
208 ret = lis3l02dq_spi_write_reg_8(indio_dev,
209 LIS3L02DQ_REG_CTRL_2_ADDR,
210 valold);
211 if (ret)
212 goto error_ret;
213 }
214
215 return 0;
216 error_ret:
217 return ret;
218 }
219
220 /**
221 * lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state
222 *
223 * If disabling the interrupt also does a final read to ensure it is clear.
224 * This is only important in some cases where the scan enable elements are
225 * switched before the buffer is reenabled.
226 **/
lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger * trig,bool state)227 static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
228 bool state)
229 {
230 struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
231 int ret = 0;
232 u8 t;
233
234 __lis3l02dq_write_data_ready_config(indio_dev, state);
235 if (!state) {
236 /*
237 * A possible quirk with the handler is currently worked around
238 * by ensuring outstanding read events are cleared.
239 */
240 ret = lis3l02dq_read_all(indio_dev, NULL);
241 }
242 lis3l02dq_spi_read_reg_8(indio_dev,
243 LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
244 &t);
245 return ret;
246 }
247
248 /**
249 * lis3l02dq_trig_try_reen() try reenabling irq for data rdy trigger
250 * @trig: the datardy trigger
251 */
lis3l02dq_trig_try_reen(struct iio_trigger * trig)252 static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
253 {
254 struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
255 struct lis3l02dq_state *st = iio_priv(indio_dev);
256 int i;
257
258 /* If gpio still high (or high again)
259 * In theory possible we will need to do this several times
260 */
261 for (i = 0; i < 5; i++)
262 if (gpio_get_value(st->gpio))
263 lis3l02dq_read_all(indio_dev, NULL);
264 else
265 break;
266 if (i == 5)
267 pr_info("Failed to clear the interrupt for lis3l02dq\n");
268
269 /* irq reenabled so success! */
270 return 0;
271 }
272
273 static const struct iio_trigger_ops lis3l02dq_trigger_ops = {
274 .owner = THIS_MODULE,
275 .set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state,
276 .try_reenable = &lis3l02dq_trig_try_reen,
277 };
278
lis3l02dq_probe_trigger(struct iio_dev * indio_dev)279 int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
280 {
281 int ret;
282 struct lis3l02dq_state *st = iio_priv(indio_dev);
283
284 st->trig = iio_trigger_alloc("lis3l02dq-dev%d", indio_dev->id);
285 if (!st->trig) {
286 ret = -ENOMEM;
287 goto error_ret;
288 }
289
290 st->trig->dev.parent = &st->us->dev;
291 st->trig->ops = &lis3l02dq_trigger_ops;
292 iio_trigger_set_drvdata(st->trig, indio_dev);
293 ret = iio_trigger_register(st->trig);
294 if (ret)
295 goto error_free_trig;
296
297 return 0;
298
299 error_free_trig:
300 iio_trigger_free(st->trig);
301 error_ret:
302 return ret;
303 }
304
lis3l02dq_remove_trigger(struct iio_dev * indio_dev)305 void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
306 {
307 struct lis3l02dq_state *st = iio_priv(indio_dev);
308
309 iio_trigger_unregister(st->trig);
310 iio_trigger_free(st->trig);
311 }
312
lis3l02dq_unconfigure_buffer(struct iio_dev * indio_dev)313 void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev)
314 {
315 iio_dealloc_pollfunc(indio_dev->pollfunc);
316 iio_kfifo_free(indio_dev->buffer);
317 }
318
lis3l02dq_buffer_postenable(struct iio_dev * indio_dev)319 static int lis3l02dq_buffer_postenable(struct iio_dev *indio_dev)
320 {
321 /* Disable unwanted channels otherwise the interrupt will not clear */
322 u8 t;
323 int ret;
324 bool oneenabled = false;
325
326 ret = lis3l02dq_spi_read_reg_8(indio_dev,
327 LIS3L02DQ_REG_CTRL_1_ADDR,
328 &t);
329 if (ret)
330 goto error_ret;
331
332 if (test_bit(0, indio_dev->active_scan_mask)) {
333 t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
334 oneenabled = true;
335 } else {
336 t &= ~LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
337 }
338 if (test_bit(1, indio_dev->active_scan_mask)) {
339 t |= LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
340 oneenabled = true;
341 } else {
342 t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
343 }
344 if (test_bit(2, indio_dev->active_scan_mask)) {
345 t |= LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
346 oneenabled = true;
347 } else {
348 t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
349 }
350 if (!oneenabled) /* what happens in this case is unknown */
351 return -EINVAL;
352 ret = lis3l02dq_spi_write_reg_8(indio_dev,
353 LIS3L02DQ_REG_CTRL_1_ADDR,
354 t);
355 if (ret)
356 goto error_ret;
357
358 return iio_triggered_buffer_postenable(indio_dev);
359 error_ret:
360 return ret;
361 }
362
363 /* Turn all channels on again */
lis3l02dq_buffer_predisable(struct iio_dev * indio_dev)364 static int lis3l02dq_buffer_predisable(struct iio_dev *indio_dev)
365 {
366 u8 t;
367 int ret;
368
369 ret = iio_triggered_buffer_predisable(indio_dev);
370 if (ret)
371 goto error_ret;
372
373 ret = lis3l02dq_spi_read_reg_8(indio_dev,
374 LIS3L02DQ_REG_CTRL_1_ADDR,
375 &t);
376 if (ret)
377 goto error_ret;
378 t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE |
379 LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE |
380 LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
381
382 ret = lis3l02dq_spi_write_reg_8(indio_dev,
383 LIS3L02DQ_REG_CTRL_1_ADDR,
384 t);
385
386 error_ret:
387 return ret;
388 }
389
390 static const struct iio_buffer_setup_ops lis3l02dq_buffer_setup_ops = {
391 .postenable = &lis3l02dq_buffer_postenable,
392 .predisable = &lis3l02dq_buffer_predisable,
393 };
394
lis3l02dq_configure_buffer(struct iio_dev * indio_dev)395 int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
396 {
397 int ret;
398 struct iio_buffer *buffer;
399
400 buffer = iio_kfifo_allocate();
401 if (!buffer)
402 return -ENOMEM;
403
404 iio_device_attach_buffer(indio_dev, buffer);
405
406 buffer->scan_timestamp = true;
407 indio_dev->setup_ops = &lis3l02dq_buffer_setup_ops;
408
409 /* Functions are NULL as we set handler below */
410 indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
411 &lis3l02dq_trigger_handler,
412 0,
413 indio_dev,
414 "lis3l02dq_consumer%d",
415 indio_dev->id);
416
417 if (!indio_dev->pollfunc) {
418 ret = -ENOMEM;
419 goto error_iio_sw_rb_free;
420 }
421
422 indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
423 return 0;
424
425 error_iio_sw_rb_free:
426 iio_kfifo_free(indio_dev->buffer);
427 return ret;
428 }
429