• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2023, Intel Corporation.
4  * Intel Visual Sensing Controller Transport Layer Linux driver
5  */
6 
7 #include <linux/acpi.h>
8 #include <linux/cleanup.h>
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/interrupt.h>
13 #include <linux/iopoll.h>
14 #include <linux/irq.h>
15 #include <linux/irqreturn.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/platform_device.h>
19 #include <linux/spi/spi.h>
20 #include <linux/types.h>
21 
22 #include "vsc-tp.h"
23 
24 #define VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS	20
25 #define VSC_TP_ROM_BOOTUP_DELAY_MS		10
26 #define VSC_TP_ROM_XFER_POLL_TIMEOUT_US		(500 * USEC_PER_MSEC)
27 #define VSC_TP_ROM_XFER_POLL_DELAY_US		(20 * USEC_PER_MSEC)
28 #define VSC_TP_WAIT_FW_POLL_TIMEOUT		(2 * HZ)
29 #define VSC_TP_WAIT_FW_POLL_DELAY_US		(20 * USEC_PER_MSEC)
30 #define VSC_TP_MAX_XFER_COUNT			5
31 
32 #define VSC_TP_PACKET_SYNC			0x31
33 #define VSC_TP_CRC_SIZE				sizeof(u32)
34 #define VSC_TP_MAX_MSG_SIZE			2048
35 /* SPI xfer timeout size */
36 #define VSC_TP_XFER_TIMEOUT_BYTES		700
37 #define VSC_TP_PACKET_PADDING_SIZE		1
38 #define VSC_TP_PACKET_SIZE(pkt) \
39 	(sizeof(struct vsc_tp_packet_hdr) + le16_to_cpu((pkt)->hdr.len) + VSC_TP_CRC_SIZE)
40 #define VSC_TP_MAX_PACKET_SIZE \
41 	(sizeof(struct vsc_tp_packet_hdr) + VSC_TP_MAX_MSG_SIZE + VSC_TP_CRC_SIZE)
42 #define VSC_TP_MAX_XFER_SIZE \
43 	(VSC_TP_MAX_PACKET_SIZE + VSC_TP_XFER_TIMEOUT_BYTES)
44 #define VSC_TP_NEXT_XFER_LEN(len, offset) \
45 	(len + sizeof(struct vsc_tp_packet_hdr) + VSC_TP_CRC_SIZE - offset + VSC_TP_PACKET_PADDING_SIZE)
46 
47 struct vsc_tp_packet_hdr {
48 	__u8 sync;
49 	__u8 cmd;
50 	__le16 len;
51 	__le32 seq;
52 };
53 
54 struct vsc_tp_packet {
55 	struct vsc_tp_packet_hdr hdr;
56 	__u8 buf[VSC_TP_MAX_XFER_SIZE - sizeof(struct vsc_tp_packet_hdr)];
57 };
58 
59 struct vsc_tp {
60 	/* do the actual data transfer */
61 	struct spi_device *spi;
62 
63 	/* bind with mei framework */
64 	struct platform_device *pdev;
65 
66 	struct gpio_desc *wakeuphost;
67 	struct gpio_desc *resetfw;
68 	struct gpio_desc *wakeupfw;
69 
70 	/* command sequence number */
71 	u32 seq;
72 
73 	/* command buffer */
74 	struct vsc_tp_packet *tx_buf;
75 	struct vsc_tp_packet *rx_buf;
76 
77 	atomic_t assert_cnt;
78 	wait_queue_head_t xfer_wait;
79 
80 	vsc_tp_event_cb_t event_notify;
81 	void *event_notify_context;
82 	struct mutex event_notify_mutex;	/* protects event_notify + context */
83 	struct mutex mutex;			/* protects command download */
84 };
85 
86 /* GPIO resources */
87 static const struct acpi_gpio_params wakeuphost_gpio = { 0, 0, false };
88 static const struct acpi_gpio_params wakeuphostint_gpio = { 1, 0, false };
89 static const struct acpi_gpio_params resetfw_gpio = { 2, 0, false };
90 static const struct acpi_gpio_params wakeupfw = { 3, 0, false };
91 
92 static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = {
93 	{ "wakeuphost-gpios", &wakeuphost_gpio, 1 },
94 	{ "wakeuphostint-gpios", &wakeuphostint_gpio, 1 },
95 	{ "resetfw-gpios", &resetfw_gpio, 1 },
96 	{ "wakeupfw-gpios", &wakeupfw, 1 },
97 	{}
98 };
99 
vsc_tp_isr(int irq,void * data)100 static irqreturn_t vsc_tp_isr(int irq, void *data)
101 {
102 	struct vsc_tp *tp = data;
103 
104 	atomic_inc(&tp->assert_cnt);
105 
106 	wake_up(&tp->xfer_wait);
107 
108 	return IRQ_WAKE_THREAD;
109 }
110 
vsc_tp_thread_isr(int irq,void * data)111 static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
112 {
113 	struct vsc_tp *tp = data;
114 
115 	guard(mutex)(&tp->event_notify_mutex);
116 
117 	if (tp->event_notify)
118 		tp->event_notify(tp->event_notify_context);
119 
120 	return IRQ_HANDLED;
121 }
122 
123 /* wakeup firmware and wait for response */
vsc_tp_wakeup_request(struct vsc_tp * tp)124 static int vsc_tp_wakeup_request(struct vsc_tp *tp)
125 {
126 	int ret;
127 
128 	gpiod_set_value_cansleep(tp->wakeupfw, 0);
129 
130 	ret = wait_event_timeout(tp->xfer_wait,
131 				 atomic_read(&tp->assert_cnt),
132 				 VSC_TP_WAIT_FW_POLL_TIMEOUT);
133 	if (!ret)
134 		return -ETIMEDOUT;
135 
136 	return read_poll_timeout(gpiod_get_value_cansleep, ret, ret,
137 				 VSC_TP_WAIT_FW_POLL_DELAY_US,
138 				 VSC_TP_WAIT_FW_POLL_TIMEOUT, false,
139 				 tp->wakeuphost);
140 }
141 
vsc_tp_wakeup_release(struct vsc_tp * tp)142 static void vsc_tp_wakeup_release(struct vsc_tp *tp)
143 {
144 	atomic_dec_if_positive(&tp->assert_cnt);
145 
146 	gpiod_set_value_cansleep(tp->wakeupfw, 1);
147 }
148 
vsc_tp_dev_xfer(struct vsc_tp * tp,void * obuf,void * ibuf,size_t len)149 static int vsc_tp_dev_xfer(struct vsc_tp *tp, void *obuf, void *ibuf, size_t len)
150 {
151 	struct spi_message msg = { 0 };
152 	struct spi_transfer xfer = {
153 		.tx_buf = obuf,
154 		.rx_buf = ibuf,
155 		.len = len,
156 	};
157 
158 	spi_message_init_with_transfers(&msg, &xfer, 1);
159 
160 	return spi_sync_locked(tp->spi, &msg);
161 }
162 
vsc_tp_xfer_helper(struct vsc_tp * tp,struct vsc_tp_packet * pkt,void * ibuf,u16 ilen)163 static int vsc_tp_xfer_helper(struct vsc_tp *tp, struct vsc_tp_packet *pkt,
164 			      void *ibuf, u16 ilen)
165 {
166 	int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet_hdr);
167 	int next_xfer_len = VSC_TP_PACKET_SIZE(pkt) + VSC_TP_XFER_TIMEOUT_BYTES;
168 	u8 *src, *crc_src, *rx_buf = (u8 *)tp->rx_buf;
169 	int count_down = VSC_TP_MAX_XFER_COUNT;
170 	u32 recv_crc = 0, crc = ~0;
171 	struct vsc_tp_packet_hdr ack;
172 	u8 *dst = (u8 *)&ack;
173 	bool synced = false;
174 
175 	do {
176 		ret = vsc_tp_dev_xfer(tp, pkt, rx_buf, next_xfer_len);
177 		if (ret)
178 			return ret;
179 		memset(pkt, 0, VSC_TP_MAX_XFER_SIZE);
180 
181 		if (synced) {
182 			src = rx_buf;
183 			src_len = next_xfer_len;
184 		} else {
185 			src = memchr(rx_buf, VSC_TP_PACKET_SYNC, next_xfer_len);
186 			if (!src)
187 				continue;
188 			synced = true;
189 			src_len = next_xfer_len - (src - rx_buf);
190 		}
191 
192 		/* traverse received data */
193 		while (src_len > 0) {
194 			cpy_len = min(src_len, dst_len);
195 			memcpy(dst, src, cpy_len);
196 			crc_src = src;
197 			src += cpy_len;
198 			src_len -= cpy_len;
199 			dst += cpy_len;
200 			dst_len -= cpy_len;
201 
202 			if (offset < sizeof(ack)) {
203 				offset += cpy_len;
204 				crc = crc32(crc, crc_src, cpy_len);
205 
206 				if (!src_len)
207 					continue;
208 
209 				if (le16_to_cpu(ack.len)) {
210 					dst = ibuf;
211 					dst_len = min(ilen, le16_to_cpu(ack.len));
212 				} else {
213 					dst = (u8 *)&recv_crc;
214 					dst_len = sizeof(recv_crc);
215 				}
216 			} else if (offset < sizeof(ack) + le16_to_cpu(ack.len)) {
217 				offset += cpy_len;
218 				crc = crc32(crc, crc_src, cpy_len);
219 
220 				if (src_len) {
221 					int remain = sizeof(ack) + le16_to_cpu(ack.len) - offset;
222 
223 					cpy_len = min(src_len, remain);
224 					offset += cpy_len;
225 					crc = crc32(crc, src, cpy_len);
226 					src += cpy_len;
227 					src_len -= cpy_len;
228 					if (src_len) {
229 						dst = (u8 *)&recv_crc;
230 						dst_len = sizeof(recv_crc);
231 						continue;
232 					}
233 				}
234 				next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
235 			} else if (offset < sizeof(ack) + le16_to_cpu(ack.len) + VSC_TP_CRC_SIZE) {
236 				offset += cpy_len;
237 
238 				if (src_len) {
239 					/* terminate the traverse */
240 					next_xfer_len = 0;
241 					break;
242 				}
243 				next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
244 			}
245 		}
246 	} while (next_xfer_len > 0 && --count_down);
247 
248 	if (next_xfer_len > 0)
249 		return -EAGAIN;
250 
251 	if (~recv_crc != crc || le32_to_cpu(ack.seq) != tp->seq) {
252 		dev_err(&tp->spi->dev, "recv crc or seq error\n");
253 		return -EINVAL;
254 	}
255 
256 	if (ack.cmd == VSC_TP_CMD_ACK || ack.cmd == VSC_TP_CMD_NACK ||
257 	    ack.cmd == VSC_TP_CMD_BUSY) {
258 		dev_err(&tp->spi->dev, "recv cmd ack error\n");
259 		return -EAGAIN;
260 	}
261 
262 	return min(le16_to_cpu(ack.len), ilen);
263 }
264 
265 /**
266  * vsc_tp_xfer - transfer data to firmware
267  * @tp: vsc_tp device handle
268  * @cmd: the command to be sent to the device
269  * @obuf: the tx buffer to be sent to the device
270  * @olen: the length of tx buffer
271  * @ibuf: the rx buffer to receive from the device
272  * @ilen: the length of rx buffer
273  * Return: the length of received data in case of success,
274  *	otherwise negative value
275  */
vsc_tp_xfer(struct vsc_tp * tp,u8 cmd,const void * obuf,size_t olen,void * ibuf,size_t ilen)276 int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
277 		void *ibuf, size_t ilen)
278 {
279 	struct vsc_tp_packet *pkt = tp->tx_buf;
280 	u32 crc;
281 	int ret;
282 
283 	if (!obuf || !ibuf || olen > VSC_TP_MAX_MSG_SIZE)
284 		return -EINVAL;
285 
286 	guard(mutex)(&tp->mutex);
287 
288 	pkt->hdr.sync = VSC_TP_PACKET_SYNC;
289 	pkt->hdr.cmd = cmd;
290 	pkt->hdr.len = cpu_to_le16(olen);
291 	pkt->hdr.seq = cpu_to_le32(++tp->seq);
292 	memcpy(pkt->buf, obuf, olen);
293 
294 	crc = ~crc32(~0, (u8 *)pkt, sizeof(pkt) + olen);
295 	memcpy(pkt->buf + olen, &crc, sizeof(crc));
296 
297 	ret = vsc_tp_wakeup_request(tp);
298 	if (unlikely(ret))
299 		dev_err(&tp->spi->dev, "wakeup firmware failed ret: %d\n", ret);
300 	else
301 		ret = vsc_tp_xfer_helper(tp, pkt, ibuf, ilen);
302 
303 	vsc_tp_wakeup_release(tp);
304 
305 	return ret;
306 }
307 EXPORT_SYMBOL_NS_GPL(vsc_tp_xfer, VSC_TP);
308 
309 /**
310  * vsc_tp_rom_xfer - transfer data to rom code
311  * @tp: vsc_tp device handle
312  * @obuf: the data buffer to be sent to the device
313  * @ibuf: the buffer to receive data from the device
314  * @len: the length of tx buffer and rx buffer
315  * Return: 0 in case of success, negative value in case of error
316  */
vsc_tp_rom_xfer(struct vsc_tp * tp,const void * obuf,void * ibuf,size_t len)317 int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len)
318 {
319 	size_t words = len / sizeof(__be32);
320 	int ret;
321 
322 	if (len % sizeof(__be32) || len > VSC_TP_MAX_MSG_SIZE)
323 		return -EINVAL;
324 
325 	guard(mutex)(&tp->mutex);
326 
327 	/* rom xfer is big endian */
328 	cpu_to_be32_array((__be32 *)tp->tx_buf, obuf, words);
329 
330 	ret = read_poll_timeout(gpiod_get_value_cansleep, ret,
331 				!ret, VSC_TP_ROM_XFER_POLL_DELAY_US,
332 				VSC_TP_ROM_XFER_POLL_TIMEOUT_US, false,
333 				tp->wakeuphost);
334 	if (ret) {
335 		dev_err(&tp->spi->dev, "wait rom failed ret: %d\n", ret);
336 		return ret;
337 	}
338 
339 	ret = vsc_tp_dev_xfer(tp, tp->tx_buf, ibuf ? tp->rx_buf : NULL, len);
340 	if (ret)
341 		return ret;
342 
343 	if (ibuf)
344 		be32_to_cpu_array(ibuf, (__be32 *)tp->rx_buf, words);
345 
346 	return ret;
347 }
348 
349 /**
350  * vsc_tp_reset - reset vsc transport layer
351  * @tp: vsc_tp device handle
352  */
vsc_tp_reset(struct vsc_tp * tp)353 void vsc_tp_reset(struct vsc_tp *tp)
354 {
355 	disable_irq(tp->spi->irq);
356 
357 	/* toggle reset pin */
358 	gpiod_set_value_cansleep(tp->resetfw, 0);
359 	msleep(VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS);
360 	gpiod_set_value_cansleep(tp->resetfw, 1);
361 
362 	/* wait for ROM */
363 	msleep(VSC_TP_ROM_BOOTUP_DELAY_MS);
364 
365 	/*
366 	 * Set default host wakeup pin to non-active
367 	 * to avoid unexpected host irq interrupt.
368 	 */
369 	gpiod_set_value_cansleep(tp->wakeupfw, 1);
370 
371 	atomic_set(&tp->assert_cnt, 0);
372 
373 	enable_irq(tp->spi->irq);
374 }
375 EXPORT_SYMBOL_NS_GPL(vsc_tp_reset, VSC_TP);
376 
377 /**
378  * vsc_tp_need_read - check if device has data to sent
379  * @tp: vsc_tp device handle
380  * Return: true if device has data to sent, otherwise false
381  */
vsc_tp_need_read(struct vsc_tp * tp)382 bool vsc_tp_need_read(struct vsc_tp *tp)
383 {
384 	if (!atomic_read(&tp->assert_cnt))
385 		return false;
386 	if (!gpiod_get_value_cansleep(tp->wakeuphost))
387 		return false;
388 	if (!gpiod_get_value_cansleep(tp->wakeupfw))
389 		return false;
390 
391 	return true;
392 }
393 EXPORT_SYMBOL_NS_GPL(vsc_tp_need_read, VSC_TP);
394 
395 /**
396  * vsc_tp_register_event_cb - register a callback function to receive event
397  * @tp: vsc_tp device handle
398  * @event_cb: callback function
399  * @context: execution context of event callback
400  * Return: 0 in case of success, negative value in case of error
401  */
vsc_tp_register_event_cb(struct vsc_tp * tp,vsc_tp_event_cb_t event_cb,void * context)402 int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
403 			    void *context)
404 {
405 	guard(mutex)(&tp->event_notify_mutex);
406 
407 	tp->event_notify = event_cb;
408 	tp->event_notify_context = context;
409 
410 	return 0;
411 }
412 EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, VSC_TP);
413 
414 /**
415  * vsc_tp_request_irq - request irq for vsc_tp device
416  * @tp: vsc_tp device handle
417  */
vsc_tp_request_irq(struct vsc_tp * tp)418 int vsc_tp_request_irq(struct vsc_tp *tp)
419 {
420 	struct spi_device *spi = tp->spi;
421 	struct device *dev = &spi->dev;
422 	int ret;
423 
424 	irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
425 	ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
426 				   IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
427 				   dev_name(dev), tp);
428 	if (ret)
429 		return ret;
430 
431 	return 0;
432 }
433 EXPORT_SYMBOL_NS_GPL(vsc_tp_request_irq, VSC_TP);
434 
435 /**
436  * vsc_tp_free_irq - free irq for vsc_tp device
437  * @tp: vsc_tp device handle
438  */
vsc_tp_free_irq(struct vsc_tp * tp)439 void vsc_tp_free_irq(struct vsc_tp *tp)
440 {
441 	free_irq(tp->spi->irq, tp);
442 }
443 EXPORT_SYMBOL_NS_GPL(vsc_tp_free_irq, VSC_TP);
444 
445 /**
446  * vsc_tp_intr_synchronize - synchronize vsc_tp interrupt
447  * @tp: vsc_tp device handle
448  */
vsc_tp_intr_synchronize(struct vsc_tp * tp)449 void vsc_tp_intr_synchronize(struct vsc_tp *tp)
450 {
451 	synchronize_irq(tp->spi->irq);
452 }
453 EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_synchronize, VSC_TP);
454 
455 /**
456  * vsc_tp_intr_enable - enable vsc_tp interrupt
457  * @tp: vsc_tp device handle
458  */
vsc_tp_intr_enable(struct vsc_tp * tp)459 void vsc_tp_intr_enable(struct vsc_tp *tp)
460 {
461 	enable_irq(tp->spi->irq);
462 }
463 EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_enable, VSC_TP);
464 
465 /**
466  * vsc_tp_intr_disable - disable vsc_tp interrupt
467  * @tp: vsc_tp device handle
468  */
vsc_tp_intr_disable(struct vsc_tp * tp)469 void vsc_tp_intr_disable(struct vsc_tp *tp)
470 {
471 	disable_irq(tp->spi->irq);
472 }
473 EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, VSC_TP);
474 
vsc_tp_match_any(struct acpi_device * adev,void * data)475 static int vsc_tp_match_any(struct acpi_device *adev, void *data)
476 {
477 	struct acpi_device **__adev = data;
478 
479 	*__adev = adev;
480 
481 	return 1;
482 }
483 
vsc_tp_probe(struct spi_device * spi)484 static int vsc_tp_probe(struct spi_device *spi)
485 {
486 	struct vsc_tp *tp;
487 	struct platform_device_info pinfo = {
488 		.name = "intel_vsc",
489 		.data = &tp,
490 		.size_data = sizeof(tp),
491 		.id = PLATFORM_DEVID_NONE,
492 	};
493 	struct device *dev = &spi->dev;
494 	struct platform_device *pdev;
495 	struct acpi_device *adev;
496 	int ret;
497 
498 	tp = devm_kzalloc(dev, sizeof(*tp), GFP_KERNEL);
499 	if (!tp)
500 		return -ENOMEM;
501 
502 	tp->tx_buf = devm_kzalloc(dev, sizeof(*tp->tx_buf), GFP_KERNEL);
503 	if (!tp->tx_buf)
504 		return -ENOMEM;
505 
506 	tp->rx_buf = devm_kzalloc(dev, sizeof(*tp->rx_buf), GFP_KERNEL);
507 	if (!tp->rx_buf)
508 		return -ENOMEM;
509 
510 	ret = devm_acpi_dev_add_driver_gpios(dev, vsc_tp_acpi_gpios);
511 	if (ret)
512 		return ret;
513 
514 	tp->wakeuphost = devm_gpiod_get(dev, "wakeuphostint", GPIOD_IN);
515 	if (IS_ERR(tp->wakeuphost))
516 		return PTR_ERR(tp->wakeuphost);
517 
518 	tp->resetfw = devm_gpiod_get(dev, "resetfw", GPIOD_OUT_HIGH);
519 	if (IS_ERR(tp->resetfw))
520 		return PTR_ERR(tp->resetfw);
521 
522 	tp->wakeupfw = devm_gpiod_get(dev, "wakeupfw", GPIOD_OUT_HIGH);
523 	if (IS_ERR(tp->wakeupfw))
524 		return PTR_ERR(tp->wakeupfw);
525 
526 	atomic_set(&tp->assert_cnt, 0);
527 	init_waitqueue_head(&tp->xfer_wait);
528 	tp->spi = spi;
529 
530 	irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
531 	ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
532 				   IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
533 				   dev_name(dev), tp);
534 	if (ret)
535 		return ret;
536 
537 	mutex_init(&tp->mutex);
538 	mutex_init(&tp->event_notify_mutex);
539 
540 	/* only one child acpi device */
541 	ret = acpi_dev_for_each_child(ACPI_COMPANION(dev),
542 				      vsc_tp_match_any, &adev);
543 	if (!ret) {
544 		ret = -ENODEV;
545 		goto err_destroy_lock;
546 	}
547 
548 	pinfo.fwnode = acpi_fwnode_handle(adev);
549 	pdev = platform_device_register_full(&pinfo);
550 	if (IS_ERR(pdev)) {
551 		ret = PTR_ERR(pdev);
552 		goto err_destroy_lock;
553 	}
554 
555 	tp->pdev = pdev;
556 	spi_set_drvdata(spi, tp);
557 
558 	return 0;
559 
560 err_destroy_lock:
561 	free_irq(spi->irq, tp);
562 
563 	mutex_destroy(&tp->event_notify_mutex);
564 	mutex_destroy(&tp->mutex);
565 
566 	return ret;
567 }
568 
vsc_tp_remove(struct spi_device * spi)569 static void vsc_tp_remove(struct spi_device *spi)
570 {
571 	struct vsc_tp *tp = spi_get_drvdata(spi);
572 
573 	platform_device_unregister(tp->pdev);
574 
575 	free_irq(spi->irq, tp);
576 
577 	mutex_destroy(&tp->event_notify_mutex);
578 	mutex_destroy(&tp->mutex);
579 }
580 
vsc_tp_shutdown(struct spi_device * spi)581 static void vsc_tp_shutdown(struct spi_device *spi)
582 {
583 	struct vsc_tp *tp = spi_get_drvdata(spi);
584 
585 	platform_device_unregister(tp->pdev);
586 
587 	mutex_destroy(&tp->mutex);
588 
589 	vsc_tp_reset(tp);
590 
591 	free_irq(spi->irq, tp);
592 }
593 
594 static const struct acpi_device_id vsc_tp_acpi_ids[] = {
595 	{ "INTC1009" }, /* Raptor Lake */
596 	{ "INTC1058" }, /* Tiger Lake */
597 	{ "INTC1094" }, /* Alder Lake */
598 	{ "INTC10D0" }, /* Meteor Lake */
599 	{}
600 };
601 MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids);
602 
603 static struct spi_driver vsc_tp_driver = {
604 	.probe = vsc_tp_probe,
605 	.remove = vsc_tp_remove,
606 	.shutdown = vsc_tp_shutdown,
607 	.driver = {
608 		.name = "vsc-tp",
609 		.acpi_match_table = vsc_tp_acpi_ids,
610 	},
611 };
612 module_spi_driver(vsc_tp_driver);
613 
614 MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
615 MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
616 MODULE_DESCRIPTION("Intel Visual Sensing Controller Transport Layer");
617 MODULE_LICENSE("GPL");
618