1 #include <linux/interrupt.h>
2 #include <linux/gpio.h>
3 #include <linux/mutex.h>
4 #include <linux/kernel.h>
5 #include <linux/spi/spi.h>
6 #include <linux/slab.h>
7 #include <linux/export.h>
8
9 #include <linux/iio/iio.h>
10 #include <linux/iio/kfifo_buf.h>
11 #include <linux/iio/trigger.h>
12 #include <linux/iio/trigger_consumer.h>
13 #include "lis3l02dq.h"
14
15 /**
16 * combine_8_to_16() utility function to munge two u8s into u16
17 **/
combine_8_to_16(u8 lower,u8 upper)18 static inline u16 combine_8_to_16(u8 lower, u8 upper)
19 {
20 u16 _lower = lower;
21 u16 _upper = upper;
22
23 return _lower | (_upper << 8);
24 }
25
26 /**
27 * lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
28 **/
lis3l02dq_data_rdy_trig_poll(int irq,void * private)29 irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private)
30 {
31 struct iio_dev *indio_dev = private;
32 struct lis3l02dq_state *st = iio_priv(indio_dev);
33
34 if (st->trigger_on) {
35 iio_trigger_poll(st->trig);
36 return IRQ_HANDLED;
37 }
38 return IRQ_WAKE_THREAD;
39 }
40
41 static const u8 read_all_tx_array[] = {
42 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
43 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
44 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR), 0,
45 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR), 0,
46 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR), 0,
47 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR), 0,
48 };
49
50 /**
51 * lis3l02dq_read_all() Reads all channels currently selected
52 * @indio_dev: IIO device state
53 * @rx_array: (dma capable) receive array, must be at least
54 * 4*number of channels
55 **/
lis3l02dq_read_all(struct iio_dev * indio_dev,u8 * rx_array)56 static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
57 {
58 struct lis3l02dq_state *st = iio_priv(indio_dev);
59 struct spi_transfer *xfers;
60 struct spi_message msg;
61 int ret, i, j = 0;
62
63 xfers = kcalloc(bitmap_weight(indio_dev->active_scan_mask,
64 indio_dev->masklength) * 2,
65 sizeof(*xfers), GFP_KERNEL);
66 if (!xfers)
67 return -ENOMEM;
68
69 mutex_lock(&st->buf_lock);
70
71 for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++)
72 if (test_bit(i, indio_dev->active_scan_mask)) {
73 /* lower byte */
74 xfers[j].tx_buf = st->tx + 2*j;
75 st->tx[2*j] = read_all_tx_array[i*4];
76 st->tx[2*j + 1] = 0;
77 if (rx_array)
78 xfers[j].rx_buf = rx_array + j*2;
79 xfers[j].bits_per_word = 8;
80 xfers[j].len = 2;
81 xfers[j].cs_change = 1;
82 j++;
83
84 /* upper byte */
85 xfers[j].tx_buf = st->tx + 2*j;
86 st->tx[2*j] = read_all_tx_array[i*4 + 2];
87 st->tx[2*j + 1] = 0;
88 if (rx_array)
89 xfers[j].rx_buf = rx_array + j*2;
90 xfers[j].bits_per_word = 8;
91 xfers[j].len = 2;
92 xfers[j].cs_change = 1;
93 j++;
94 }
95
96 /* After these are transmitted, the rx_buff should have
97 * values in alternate bytes
98 */
99 spi_message_init(&msg);
100 for (j = 0; j < bitmap_weight(indio_dev->active_scan_mask,
101 indio_dev->masklength) * 2; j++)
102 spi_message_add_tail(&xfers[j], &msg);
103
104 ret = spi_sync(st->us, &msg);
105 mutex_unlock(&st->buf_lock);
106 kfree(xfers);
107
108 return ret;
109 }
110
lis3l02dq_get_buffer_element(struct iio_dev * indio_dev,u8 * buf)111 static int lis3l02dq_get_buffer_element(struct iio_dev *indio_dev,
112 u8 *buf)
113 {
114 int ret, i;
115 u8 *rx_array;
116 s16 *data = (s16 *)buf;
117 int scan_count = bitmap_weight(indio_dev->active_scan_mask,
118 indio_dev->masklength);
119
120 rx_array = kzalloc(4 * scan_count, GFP_KERNEL);
121 if (rx_array == NULL)
122 return -ENOMEM;
123 ret = lis3l02dq_read_all(indio_dev, rx_array);
124 if (ret < 0) {
125 kfree(rx_array);
126 return ret;
127 }
128 for (i = 0; i < scan_count; i++)
129 data[i] = combine_8_to_16(rx_array[i*4+1],
130 rx_array[i*4+3]);
131 kfree(rx_array);
132
133 return i*sizeof(data[0]);
134 }
135
lis3l02dq_trigger_handler(int irq,void * p)136 static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p)
137 {
138 struct iio_poll_func *pf = p;
139 struct iio_dev *indio_dev = pf->indio_dev;
140 int len = 0;
141 char *data;
142
143 data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
144 if (data == NULL)
145 goto done;
146
147 if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
148 len = lis3l02dq_get_buffer_element(indio_dev, data);
149
150 iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp);
151
152 kfree(data);
153 done:
154 iio_trigger_notify_done(indio_dev->trig);
155 return IRQ_HANDLED;
156 }
157
158 /* Caller responsible for locking as necessary. */
159 static int
__lis3l02dq_write_data_ready_config(struct iio_dev * indio_dev,bool state)160 __lis3l02dq_write_data_ready_config(struct iio_dev *indio_dev, bool state)
161 {
162 int ret;
163 u8 valold;
164 bool currentlyset;
165 struct lis3l02dq_state *st = iio_priv(indio_dev);
166
167 /* Get the current event mask register */
168 ret = lis3l02dq_spi_read_reg_8(indio_dev,
169 LIS3L02DQ_REG_CTRL_2_ADDR,
170 &valold);
171 if (ret)
172 goto error_ret;
173 /* Find out if data ready is already on */
174 currentlyset
175 = valold & LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
176
177 /* Disable requested */
178 if (!state && currentlyset) {
179 /* Disable the data ready signal */
180 valold &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
181
182 /* The double write is to overcome a hardware bug? */
183 ret = lis3l02dq_spi_write_reg_8(indio_dev,
184 LIS3L02DQ_REG_CTRL_2_ADDR,
185 valold);
186 if (ret)
187 goto error_ret;
188 ret = lis3l02dq_spi_write_reg_8(indio_dev,
189 LIS3L02DQ_REG_CTRL_2_ADDR,
190 valold);
191 if (ret)
192 goto error_ret;
193 st->trigger_on = false;
194 /* Enable requested */
195 } else if (state && !currentlyset) {
196 /* If not set, enable requested
197 * first disable all events */
198 ret = lis3l02dq_disable_all_events(indio_dev);
199 if (ret < 0)
200 goto error_ret;
201
202 valold = ret |
203 LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
204
205 st->trigger_on = true;
206 ret = lis3l02dq_spi_write_reg_8(indio_dev,
207 LIS3L02DQ_REG_CTRL_2_ADDR,
208 valold);
209 if (ret)
210 goto error_ret;
211 }
212
213 return 0;
214 error_ret:
215 return ret;
216 }
217
218 /**
219 * lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state
220 *
221 * If disabling the interrupt also does a final read to ensure it is clear.
222 * This is only important in some cases where the scan enable elements are
223 * switched before the buffer is reenabled.
224 **/
lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger * trig,bool state)225 static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
226 bool state)
227 {
228 struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
229 int ret = 0;
230 u8 t;
231
232 __lis3l02dq_write_data_ready_config(indio_dev, state);
233 if (!state) {
234 /*
235 * A possible quirk with the handler is currently worked around
236 * by ensuring outstanding read events are cleared.
237 */
238 ret = lis3l02dq_read_all(indio_dev, NULL);
239 }
240 lis3l02dq_spi_read_reg_8(indio_dev,
241 LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
242 &t);
243 return ret;
244 }
245
246 /**
247 * lis3l02dq_trig_try_reen() try reenabling irq for data rdy trigger
248 * @trig: the datardy trigger
249 */
lis3l02dq_trig_try_reen(struct iio_trigger * trig)250 static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
251 {
252 struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
253 struct lis3l02dq_state *st = iio_priv(indio_dev);
254 int i;
255
256 /* If gpio still high (or high again)
257 * In theory possible we will need to do this several times */
258 for (i = 0; i < 5; i++)
259 if (gpio_get_value(st->gpio))
260 lis3l02dq_read_all(indio_dev, NULL);
261 else
262 break;
263 if (i == 5)
264 pr_info("Failed to clear the interrupt for lis3l02dq\n");
265
266 /* irq reenabled so success! */
267 return 0;
268 }
269
270 static const struct iio_trigger_ops lis3l02dq_trigger_ops = {
271 .owner = THIS_MODULE,
272 .set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state,
273 .try_reenable = &lis3l02dq_trig_try_reen,
274 };
275
lis3l02dq_probe_trigger(struct iio_dev * indio_dev)276 int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
277 {
278 int ret;
279 struct lis3l02dq_state *st = iio_priv(indio_dev);
280
281 st->trig = iio_trigger_alloc("lis3l02dq-dev%d", indio_dev->id);
282 if (!st->trig) {
283 ret = -ENOMEM;
284 goto error_ret;
285 }
286
287 st->trig->dev.parent = &st->us->dev;
288 st->trig->ops = &lis3l02dq_trigger_ops;
289 iio_trigger_set_drvdata(st->trig, indio_dev);
290 ret = iio_trigger_register(st->trig);
291 if (ret)
292 goto error_free_trig;
293
294 return 0;
295
296 error_free_trig:
297 iio_trigger_free(st->trig);
298 error_ret:
299 return ret;
300 }
301
lis3l02dq_remove_trigger(struct iio_dev * indio_dev)302 void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
303 {
304 struct lis3l02dq_state *st = iio_priv(indio_dev);
305
306 iio_trigger_unregister(st->trig);
307 iio_trigger_free(st->trig);
308 }
309
lis3l02dq_unconfigure_buffer(struct iio_dev * indio_dev)310 void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev)
311 {
312 iio_dealloc_pollfunc(indio_dev->pollfunc);
313 iio_kfifo_free(indio_dev->buffer);
314 }
315
lis3l02dq_buffer_postenable(struct iio_dev * indio_dev)316 static int lis3l02dq_buffer_postenable(struct iio_dev *indio_dev)
317 {
318 /* Disable unwanted channels otherwise the interrupt will not clear */
319 u8 t;
320 int ret;
321 bool oneenabled = false;
322
323 ret = lis3l02dq_spi_read_reg_8(indio_dev,
324 LIS3L02DQ_REG_CTRL_1_ADDR,
325 &t);
326 if (ret)
327 goto error_ret;
328
329 if (test_bit(0, indio_dev->active_scan_mask)) {
330 t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
331 oneenabled = true;
332 } else
333 t &= ~LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
334 if (test_bit(1, indio_dev->active_scan_mask)) {
335 t |= LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
336 oneenabled = true;
337 } else
338 t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
339 if (test_bit(2, indio_dev->active_scan_mask)) {
340 t |= LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
341 oneenabled = true;
342 } else
343 t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
344
345 if (!oneenabled) /* what happens in this case is unknown */
346 return -EINVAL;
347 ret = lis3l02dq_spi_write_reg_8(indio_dev,
348 LIS3L02DQ_REG_CTRL_1_ADDR,
349 t);
350 if (ret)
351 goto error_ret;
352
353 return iio_triggered_buffer_postenable(indio_dev);
354 error_ret:
355 return ret;
356 }
357
358 /* Turn all channels on again */
lis3l02dq_buffer_predisable(struct iio_dev * indio_dev)359 static int lis3l02dq_buffer_predisable(struct iio_dev *indio_dev)
360 {
361 u8 t;
362 int ret;
363
364 ret = iio_triggered_buffer_predisable(indio_dev);
365 if (ret)
366 goto error_ret;
367
368 ret = lis3l02dq_spi_read_reg_8(indio_dev,
369 LIS3L02DQ_REG_CTRL_1_ADDR,
370 &t);
371 if (ret)
372 goto error_ret;
373 t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE |
374 LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE |
375 LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
376
377 ret = lis3l02dq_spi_write_reg_8(indio_dev,
378 LIS3L02DQ_REG_CTRL_1_ADDR,
379 t);
380
381 error_ret:
382 return ret;
383 }
384
385 static const struct iio_buffer_setup_ops lis3l02dq_buffer_setup_ops = {
386 .postenable = &lis3l02dq_buffer_postenable,
387 .predisable = &lis3l02dq_buffer_predisable,
388 };
389
lis3l02dq_configure_buffer(struct iio_dev * indio_dev)390 int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
391 {
392 int ret;
393 struct iio_buffer *buffer;
394
395 buffer = iio_kfifo_allocate(indio_dev);
396 if (!buffer)
397 return -ENOMEM;
398
399 iio_device_attach_buffer(indio_dev, buffer);
400
401 buffer->scan_timestamp = true;
402 indio_dev->setup_ops = &lis3l02dq_buffer_setup_ops;
403
404 /* Functions are NULL as we set handler below */
405 indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
406 &lis3l02dq_trigger_handler,
407 0,
408 indio_dev,
409 "lis3l02dq_consumer%d",
410 indio_dev->id);
411
412 if (indio_dev->pollfunc == NULL) {
413 ret = -ENOMEM;
414 goto error_iio_sw_rb_free;
415 }
416
417 indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
418 return 0;
419
420 error_iio_sw_rb_free:
421 iio_kfifo_free(indio_dev->buffer);
422 return ret;
423 }
424