• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Driver for Motorola PCAP2 as present in EZX phones
4  *
5  * Copyright (C) 2006 Harald Welte <laforge@openezx.org>
6  * Copyright (C) 2009 Daniel Ribeiro <drwyrm@gmail.com>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/platform_device.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/mfd/ezx-pcap.h>
15 #include <linux/spi/spi.h>
16 #include <linux/gpio.h>
17 #include <linux/slab.h>
18 
19 #define PCAP_ADC_MAXQ		8
20 struct pcap_adc_request {
21 	u8 bank;
22 	u8 ch[2];
23 	u32 flags;
24 	void (*callback)(void *, u16[]);
25 	void *data;
26 };
27 
28 struct pcap_adc_sync_request {
29 	u16 res[2];
30 	struct completion completion;
31 };
32 
33 struct pcap_chip {
34 	struct spi_device *spi;
35 
36 	/* IO */
37 	u32 buf;
38 	spinlock_t io_lock;
39 
40 	/* IRQ */
41 	unsigned int irq_base;
42 	u32 msr;
43 	struct work_struct isr_work;
44 	struct work_struct msr_work;
45 	struct workqueue_struct *workqueue;
46 
47 	/* ADC */
48 	struct pcap_adc_request *adc_queue[PCAP_ADC_MAXQ];
49 	u8 adc_head;
50 	u8 adc_tail;
51 	spinlock_t adc_lock;
52 };
53 
54 /* IO */
ezx_pcap_putget(struct pcap_chip * pcap,u32 * data)55 static int ezx_pcap_putget(struct pcap_chip *pcap, u32 *data)
56 {
57 	struct spi_transfer t;
58 	struct spi_message m;
59 	int status;
60 
61 	memset(&t, 0, sizeof(t));
62 	spi_message_init(&m);
63 	t.len = sizeof(u32);
64 	spi_message_add_tail(&t, &m);
65 
66 	pcap->buf = *data;
67 	t.tx_buf = (u8 *) &pcap->buf;
68 	t.rx_buf = (u8 *) &pcap->buf;
69 	status = spi_sync(pcap->spi, &m);
70 
71 	if (status == 0)
72 		*data = pcap->buf;
73 
74 	return status;
75 }
76 
ezx_pcap_write(struct pcap_chip * pcap,u8 reg_num,u32 value)77 int ezx_pcap_write(struct pcap_chip *pcap, u8 reg_num, u32 value)
78 {
79 	unsigned long flags;
80 	int ret;
81 
82 	spin_lock_irqsave(&pcap->io_lock, flags);
83 	value &= PCAP_REGISTER_VALUE_MASK;
84 	value |= PCAP_REGISTER_WRITE_OP_BIT
85 		| (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
86 	ret = ezx_pcap_putget(pcap, &value);
87 	spin_unlock_irqrestore(&pcap->io_lock, flags);
88 
89 	return ret;
90 }
91 EXPORT_SYMBOL_GPL(ezx_pcap_write);
92 
ezx_pcap_read(struct pcap_chip * pcap,u8 reg_num,u32 * value)93 int ezx_pcap_read(struct pcap_chip *pcap, u8 reg_num, u32 *value)
94 {
95 	unsigned long flags;
96 	int ret;
97 
98 	spin_lock_irqsave(&pcap->io_lock, flags);
99 	*value = PCAP_REGISTER_READ_OP_BIT
100 		| (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
101 
102 	ret = ezx_pcap_putget(pcap, value);
103 	spin_unlock_irqrestore(&pcap->io_lock, flags);
104 
105 	return ret;
106 }
107 EXPORT_SYMBOL_GPL(ezx_pcap_read);
108 
ezx_pcap_set_bits(struct pcap_chip * pcap,u8 reg_num,u32 mask,u32 val)109 int ezx_pcap_set_bits(struct pcap_chip *pcap, u8 reg_num, u32 mask, u32 val)
110 {
111 	unsigned long flags;
112 	int ret;
113 	u32 tmp = PCAP_REGISTER_READ_OP_BIT |
114 		(reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
115 
116 	spin_lock_irqsave(&pcap->io_lock, flags);
117 	ret = ezx_pcap_putget(pcap, &tmp);
118 	if (ret)
119 		goto out_unlock;
120 
121 	tmp &= (PCAP_REGISTER_VALUE_MASK & ~mask);
122 	tmp |= (val & mask) | PCAP_REGISTER_WRITE_OP_BIT |
123 		(reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
124 
125 	ret = ezx_pcap_putget(pcap, &tmp);
126 out_unlock:
127 	spin_unlock_irqrestore(&pcap->io_lock, flags);
128 
129 	return ret;
130 }
131 EXPORT_SYMBOL_GPL(ezx_pcap_set_bits);
132 
133 /* IRQ */
irq_to_pcap(struct pcap_chip * pcap,int irq)134 int irq_to_pcap(struct pcap_chip *pcap, int irq)
135 {
136 	return irq - pcap->irq_base;
137 }
138 EXPORT_SYMBOL_GPL(irq_to_pcap);
139 
pcap_to_irq(struct pcap_chip * pcap,int irq)140 int pcap_to_irq(struct pcap_chip *pcap, int irq)
141 {
142 	return pcap->irq_base + irq;
143 }
144 EXPORT_SYMBOL_GPL(pcap_to_irq);
145 
pcap_mask_irq(struct irq_data * d)146 static void pcap_mask_irq(struct irq_data *d)
147 {
148 	struct pcap_chip *pcap = irq_data_get_irq_chip_data(d);
149 
150 	pcap->msr |= 1 << irq_to_pcap(pcap, d->irq);
151 	queue_work(pcap->workqueue, &pcap->msr_work);
152 }
153 
pcap_unmask_irq(struct irq_data * d)154 static void pcap_unmask_irq(struct irq_data *d)
155 {
156 	struct pcap_chip *pcap = irq_data_get_irq_chip_data(d);
157 
158 	pcap->msr &= ~(1 << irq_to_pcap(pcap, d->irq));
159 	queue_work(pcap->workqueue, &pcap->msr_work);
160 }
161 
162 static struct irq_chip pcap_irq_chip = {
163 	.name		= "pcap",
164 	.irq_disable	= pcap_mask_irq,
165 	.irq_mask	= pcap_mask_irq,
166 	.irq_unmask	= pcap_unmask_irq,
167 };
168 
pcap_msr_work(struct work_struct * work)169 static void pcap_msr_work(struct work_struct *work)
170 {
171 	struct pcap_chip *pcap = container_of(work, struct pcap_chip, msr_work);
172 
173 	ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
174 }
175 
pcap_isr_work(struct work_struct * work)176 static void pcap_isr_work(struct work_struct *work)
177 {
178 	struct pcap_chip *pcap = container_of(work, struct pcap_chip, isr_work);
179 	struct pcap_platform_data *pdata = dev_get_platdata(&pcap->spi->dev);
180 	u32 msr, isr, int_sel, service;
181 	int irq;
182 
183 	do {
184 		ezx_pcap_read(pcap, PCAP_REG_MSR, &msr);
185 		ezx_pcap_read(pcap, PCAP_REG_ISR, &isr);
186 
187 		/* We can't service/ack irqs that are assigned to port 2 */
188 		if (!(pdata->config & PCAP_SECOND_PORT)) {
189 			ezx_pcap_read(pcap, PCAP_REG_INT_SEL, &int_sel);
190 			isr &= ~int_sel;
191 		}
192 
193 		ezx_pcap_write(pcap, PCAP_REG_MSR, isr | msr);
194 		ezx_pcap_write(pcap, PCAP_REG_ISR, isr);
195 
196 		service = isr & ~msr;
197 		for (irq = pcap->irq_base; service; service >>= 1, irq++) {
198 			if (service & 1)
199 				generic_handle_irq_safe(irq);
200 		}
201 		ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
202 	} while (gpio_get_value(pdata->gpio));
203 }
204 
pcap_irq_handler(struct irq_desc * desc)205 static void pcap_irq_handler(struct irq_desc *desc)
206 {
207 	struct pcap_chip *pcap = irq_desc_get_handler_data(desc);
208 
209 	desc->irq_data.chip->irq_ack(&desc->irq_data);
210 	queue_work(pcap->workqueue, &pcap->isr_work);
211 }
212 
213 /* ADC */
pcap_set_ts_bits(struct pcap_chip * pcap,u32 bits)214 void pcap_set_ts_bits(struct pcap_chip *pcap, u32 bits)
215 {
216 	unsigned long flags;
217 	u32 tmp;
218 
219 	spin_lock_irqsave(&pcap->adc_lock, flags);
220 	ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
221 	tmp &= ~(PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
222 	tmp |= bits & (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
223 	ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
224 	spin_unlock_irqrestore(&pcap->adc_lock, flags);
225 }
226 EXPORT_SYMBOL_GPL(pcap_set_ts_bits);
227 
pcap_disable_adc(struct pcap_chip * pcap)228 static void pcap_disable_adc(struct pcap_chip *pcap)
229 {
230 	u32 tmp;
231 
232 	ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
233 	tmp &= ~(PCAP_ADC_ADEN|PCAP_ADC_BATT_I_ADC|PCAP_ADC_BATT_I_POLARITY);
234 	ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
235 }
236 
pcap_adc_trigger(struct pcap_chip * pcap)237 static void pcap_adc_trigger(struct pcap_chip *pcap)
238 {
239 	unsigned long flags;
240 	u32 tmp;
241 	u8 head;
242 
243 	spin_lock_irqsave(&pcap->adc_lock, flags);
244 	head = pcap->adc_head;
245 	if (!pcap->adc_queue[head]) {
246 		/* queue is empty, save power */
247 		pcap_disable_adc(pcap);
248 		spin_unlock_irqrestore(&pcap->adc_lock, flags);
249 		return;
250 	}
251 	/* start conversion on requested bank, save TS_M bits */
252 	ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
253 	tmp &= (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
254 	tmp |= pcap->adc_queue[head]->flags | PCAP_ADC_ADEN;
255 
256 	if (pcap->adc_queue[head]->bank == PCAP_ADC_BANK_1)
257 		tmp |= PCAP_ADC_AD_SEL1;
258 
259 	ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
260 	spin_unlock_irqrestore(&pcap->adc_lock, flags);
261 	ezx_pcap_write(pcap, PCAP_REG_ADR, PCAP_ADR_ASC);
262 }
263 
pcap_adc_irq(int irq,void * _pcap)264 static irqreturn_t pcap_adc_irq(int irq, void *_pcap)
265 {
266 	struct pcap_chip *pcap = _pcap;
267 	struct pcap_adc_request *req;
268 	u16 res[2];
269 	u32 tmp;
270 
271 	spin_lock(&pcap->adc_lock);
272 	req = pcap->adc_queue[pcap->adc_head];
273 
274 	if (WARN(!req, "adc irq without pending request\n")) {
275 		spin_unlock(&pcap->adc_lock);
276 		return IRQ_HANDLED;
277 	}
278 
279 	/* read requested channels results */
280 	ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
281 	tmp &= ~(PCAP_ADC_ADA1_MASK | PCAP_ADC_ADA2_MASK);
282 	tmp |= (req->ch[0] << PCAP_ADC_ADA1_SHIFT);
283 	tmp |= (req->ch[1] << PCAP_ADC_ADA2_SHIFT);
284 	ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
285 	ezx_pcap_read(pcap, PCAP_REG_ADR, &tmp);
286 	res[0] = (tmp & PCAP_ADR_ADD1_MASK) >> PCAP_ADR_ADD1_SHIFT;
287 	res[1] = (tmp & PCAP_ADR_ADD2_MASK) >> PCAP_ADR_ADD2_SHIFT;
288 
289 	pcap->adc_queue[pcap->adc_head] = NULL;
290 	pcap->adc_head = (pcap->adc_head + 1) & (PCAP_ADC_MAXQ - 1);
291 	spin_unlock(&pcap->adc_lock);
292 
293 	/* pass the results and release memory */
294 	req->callback(req->data, res);
295 	kfree(req);
296 
297 	/* trigger next conversion (if any) on queue */
298 	pcap_adc_trigger(pcap);
299 
300 	return IRQ_HANDLED;
301 }
302 
pcap_adc_async(struct pcap_chip * pcap,u8 bank,u32 flags,u8 ch[],void * callback,void * data)303 int pcap_adc_async(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[],
304 						void *callback, void *data)
305 {
306 	struct pcap_adc_request *req;
307 	unsigned long irq_flags;
308 
309 	/* This will be freed after we have a result */
310 	req = kmalloc(sizeof(struct pcap_adc_request), GFP_KERNEL);
311 	if (!req)
312 		return -ENOMEM;
313 
314 	req->bank = bank;
315 	req->flags = flags;
316 	req->ch[0] = ch[0];
317 	req->ch[1] = ch[1];
318 	req->callback = callback;
319 	req->data = data;
320 
321 	spin_lock_irqsave(&pcap->adc_lock, irq_flags);
322 	if (pcap->adc_queue[pcap->adc_tail]) {
323 		spin_unlock_irqrestore(&pcap->adc_lock, irq_flags);
324 		kfree(req);
325 		return -EBUSY;
326 	}
327 	pcap->adc_queue[pcap->adc_tail] = req;
328 	pcap->adc_tail = (pcap->adc_tail + 1) & (PCAP_ADC_MAXQ - 1);
329 	spin_unlock_irqrestore(&pcap->adc_lock, irq_flags);
330 
331 	/* start conversion */
332 	pcap_adc_trigger(pcap);
333 
334 	return 0;
335 }
336 EXPORT_SYMBOL_GPL(pcap_adc_async);
337 
pcap_adc_sync_cb(void * param,u16 res[])338 static void pcap_adc_sync_cb(void *param, u16 res[])
339 {
340 	struct pcap_adc_sync_request *req = param;
341 
342 	req->res[0] = res[0];
343 	req->res[1] = res[1];
344 	complete(&req->completion);
345 }
346 
pcap_adc_sync(struct pcap_chip * pcap,u8 bank,u32 flags,u8 ch[],u16 res[])347 int pcap_adc_sync(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[],
348 								u16 res[])
349 {
350 	struct pcap_adc_sync_request sync_data;
351 	int ret;
352 
353 	init_completion(&sync_data.completion);
354 	ret = pcap_adc_async(pcap, bank, flags, ch, pcap_adc_sync_cb,
355 								&sync_data);
356 	if (ret)
357 		return ret;
358 	wait_for_completion(&sync_data.completion);
359 	res[0] = sync_data.res[0];
360 	res[1] = sync_data.res[1];
361 
362 	return 0;
363 }
364 EXPORT_SYMBOL_GPL(pcap_adc_sync);
365 
366 /* subdevs */
pcap_remove_subdev(struct device * dev,void * unused)367 static int pcap_remove_subdev(struct device *dev, void *unused)
368 {
369 	platform_device_unregister(to_platform_device(dev));
370 	return 0;
371 }
372 
pcap_add_subdev(struct pcap_chip * pcap,struct pcap_subdev * subdev)373 static int pcap_add_subdev(struct pcap_chip *pcap,
374 						struct pcap_subdev *subdev)
375 {
376 	struct platform_device *pdev;
377 	int ret;
378 
379 	pdev = platform_device_alloc(subdev->name, subdev->id);
380 	if (!pdev)
381 		return -ENOMEM;
382 
383 	pdev->dev.parent = &pcap->spi->dev;
384 	pdev->dev.platform_data = subdev->platform_data;
385 
386 	ret = platform_device_add(pdev);
387 	if (ret)
388 		platform_device_put(pdev);
389 
390 	return ret;
391 }
392 
ezx_pcap_remove(struct spi_device * spi)393 static void ezx_pcap_remove(struct spi_device *spi)
394 {
395 	struct pcap_chip *pcap = spi_get_drvdata(spi);
396 	unsigned long flags;
397 	int i;
398 
399 	/* remove all registered subdevs */
400 	device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
401 
402 	/* cleanup ADC */
403 	spin_lock_irqsave(&pcap->adc_lock, flags);
404 	for (i = 0; i < PCAP_ADC_MAXQ; i++)
405 		kfree(pcap->adc_queue[i]);
406 	spin_unlock_irqrestore(&pcap->adc_lock, flags);
407 
408 	/* cleanup irqchip */
409 	for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
410 		irq_set_chip_and_handler(i, NULL, NULL);
411 
412 	destroy_workqueue(pcap->workqueue);
413 }
414 
ezx_pcap_probe(struct spi_device * spi)415 static int ezx_pcap_probe(struct spi_device *spi)
416 {
417 	struct pcap_platform_data *pdata = dev_get_platdata(&spi->dev);
418 	struct pcap_chip *pcap;
419 	int i, adc_irq;
420 	int ret = -ENODEV;
421 
422 	/* platform data is required */
423 	if (!pdata)
424 		goto ret;
425 
426 	pcap = devm_kzalloc(&spi->dev, sizeof(*pcap), GFP_KERNEL);
427 	if (!pcap) {
428 		ret = -ENOMEM;
429 		goto ret;
430 	}
431 
432 	spin_lock_init(&pcap->io_lock);
433 	spin_lock_init(&pcap->adc_lock);
434 	INIT_WORK(&pcap->isr_work, pcap_isr_work);
435 	INIT_WORK(&pcap->msr_work, pcap_msr_work);
436 	spi_set_drvdata(spi, pcap);
437 
438 	/* setup spi */
439 	spi->bits_per_word = 32;
440 	spi->mode = SPI_MODE_0 | (pdata->config & PCAP_CS_AH ? SPI_CS_HIGH : 0);
441 	ret = spi_setup(spi);
442 	if (ret)
443 		goto ret;
444 
445 	pcap->spi = spi;
446 
447 	/* setup irq */
448 	pcap->irq_base = pdata->irq_base;
449 	pcap->workqueue = create_singlethread_workqueue("pcapd");
450 	if (!pcap->workqueue) {
451 		ret = -ENOMEM;
452 		dev_err(&spi->dev, "can't create pcap thread\n");
453 		goto ret;
454 	}
455 
456 	/* redirect interrupts to AP, except adcdone2 */
457 	if (!(pdata->config & PCAP_SECOND_PORT))
458 		ezx_pcap_write(pcap, PCAP_REG_INT_SEL,
459 					(1 << PCAP_IRQ_ADCDONE2));
460 
461 	/* setup irq chip */
462 	for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) {
463 		irq_set_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq);
464 		irq_set_chip_data(i, pcap);
465 		irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE);
466 	}
467 
468 	/* mask/ack all PCAP interrupts */
469 	ezx_pcap_write(pcap, PCAP_REG_MSR, PCAP_MASK_ALL_INTERRUPT);
470 	ezx_pcap_write(pcap, PCAP_REG_ISR, PCAP_CLEAR_INTERRUPT_REGISTER);
471 	pcap->msr = PCAP_MASK_ALL_INTERRUPT;
472 
473 	irq_set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING);
474 	irq_set_chained_handler_and_data(spi->irq, pcap_irq_handler, pcap);
475 	irq_set_irq_wake(spi->irq, 1);
476 
477 	/* ADC */
478 	adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ?
479 					PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE);
480 
481 	ret = devm_request_irq(&spi->dev, adc_irq, pcap_adc_irq, 0, "ADC",
482 				pcap);
483 	if (ret)
484 		goto free_irqchip;
485 
486 	/* setup subdevs */
487 	for (i = 0; i < pdata->num_subdevs; i++) {
488 		ret = pcap_add_subdev(pcap, &pdata->subdevs[i]);
489 		if (ret)
490 			goto remove_subdevs;
491 	}
492 
493 	/* board specific quirks */
494 	if (pdata->init)
495 		pdata->init(pcap);
496 
497 	return 0;
498 
499 remove_subdevs:
500 	device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
501 free_irqchip:
502 	for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
503 		irq_set_chip_and_handler(i, NULL, NULL);
504 /* destroy_workqueue: */
505 	destroy_workqueue(pcap->workqueue);
506 ret:
507 	return ret;
508 }
509 
510 static struct spi_driver ezxpcap_driver = {
511 	.probe	= ezx_pcap_probe,
512 	.remove = ezx_pcap_remove,
513 	.driver = {
514 		.name	= "ezx-pcap",
515 	},
516 };
517 
ezx_pcap_init(void)518 static int __init ezx_pcap_init(void)
519 {
520 	return spi_register_driver(&ezxpcap_driver);
521 }
522 
ezx_pcap_exit(void)523 static void __exit ezx_pcap_exit(void)
524 {
525 	spi_unregister_driver(&ezxpcap_driver);
526 }
527 
528 subsys_initcall(ezx_pcap_init);
529 module_exit(ezx_pcap_exit);
530 
531 MODULE_AUTHOR("Daniel Ribeiro / Harald Welte");
532 MODULE_DESCRIPTION("Motorola PCAP2 ASIC Driver");
533 MODULE_ALIAS("spi:ezx-pcap");
534