• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019, Linaro Ltd
4  */
5 #include <dt-bindings/power/qcom-aoss-qmp.h>
6 #include <linux/clk-provider.h>
7 #include <linux/interrupt.h>
8 #include <linux/io.h>
9 #include <linux/mailbox_client.h>
10 #include <linux/module.h>
11 #include <linux/of_platform.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_domain.h>
14 #include <linux/thermal.h>
15 #include <linux/slab.h>
16 #include <linux/soc/qcom/qcom_aoss.h>
17 
18 #define QMP_DESC_MAGIC			0x0
19 #define QMP_DESC_VERSION		0x4
20 #define QMP_DESC_FEATURES		0x8
21 
22 /* AOP-side offsets */
23 #define QMP_DESC_UCORE_LINK_STATE	0xc
24 #define QMP_DESC_UCORE_LINK_STATE_ACK	0x10
25 #define QMP_DESC_UCORE_CH_STATE		0x14
26 #define QMP_DESC_UCORE_CH_STATE_ACK	0x18
27 #define QMP_DESC_UCORE_MBOX_SIZE	0x1c
28 #define QMP_DESC_UCORE_MBOX_OFFSET	0x20
29 
30 /* Linux-side offsets */
31 #define QMP_DESC_MCORE_LINK_STATE	0x24
32 #define QMP_DESC_MCORE_LINK_STATE_ACK	0x28
33 #define QMP_DESC_MCORE_CH_STATE		0x2c
34 #define QMP_DESC_MCORE_CH_STATE_ACK	0x30
35 #define QMP_DESC_MCORE_MBOX_SIZE	0x34
36 #define QMP_DESC_MCORE_MBOX_OFFSET	0x38
37 
38 #define QMP_STATE_UP			GENMASK(15, 0)
39 #define QMP_STATE_DOWN			GENMASK(31, 16)
40 
41 #define QMP_MAGIC			0x4d41494c /* mail */
42 #define QMP_VERSION			1
43 
44 /* 64 bytes is enough to store the requests and provides padding to 4 bytes */
45 #define QMP_MSG_LEN			64
46 
47 #define QMP_NUM_COOLING_RESOURCES	2
48 
49 static bool qmp_cdev_max_state = 1;
50 
51 struct qmp_cooling_device {
52 	struct thermal_cooling_device *cdev;
53 	struct qmp *qmp;
54 	char *name;
55 	bool state;
56 };
57 
58 /**
59  * struct qmp - driver state for QMP implementation
60  * @msgram: iomem referencing the message RAM used for communication
61  * @dev: reference to QMP device
62  * @mbox_client: mailbox client used to ring the doorbell on transmit
63  * @mbox_chan: mailbox channel used to ring the doorbell on transmit
64  * @offset: offset within @msgram where messages should be written
65  * @size: maximum size of the messages to be transmitted
66  * @event: wait_queue for synchronization with the IRQ
67  * @tx_lock: provides synchronization between multiple callers of qmp_send()
68  * @qdss_clk: QDSS clock hw struct
69  * @pd_data: genpd data
70  * @cooling_devs: thermal cooling devices
71  */
72 struct qmp {
73 	void __iomem *msgram;
74 	struct device *dev;
75 
76 	struct mbox_client mbox_client;
77 	struct mbox_chan *mbox_chan;
78 
79 	size_t offset;
80 	size_t size;
81 
82 	wait_queue_head_t event;
83 
84 	struct mutex tx_lock;
85 
86 	struct clk_hw qdss_clk;
87 	struct genpd_onecell_data pd_data;
88 	struct qmp_cooling_device *cooling_devs;
89 };
90 
91 struct qmp_pd {
92 	struct qmp *qmp;
93 	struct generic_pm_domain pd;
94 };
95 
96 #define to_qmp_pd_resource(res) container_of(res, struct qmp_pd, pd)
97 
qmp_kick(struct qmp * qmp)98 static void qmp_kick(struct qmp *qmp)
99 {
100 	mbox_send_message(qmp->mbox_chan, NULL);
101 	mbox_client_txdone(qmp->mbox_chan, 0);
102 }
103 
qmp_magic_valid(struct qmp * qmp)104 static bool qmp_magic_valid(struct qmp *qmp)
105 {
106 	return readl(qmp->msgram + QMP_DESC_MAGIC) == QMP_MAGIC;
107 }
108 
qmp_link_acked(struct qmp * qmp)109 static bool qmp_link_acked(struct qmp *qmp)
110 {
111 	return readl(qmp->msgram + QMP_DESC_MCORE_LINK_STATE_ACK) == QMP_STATE_UP;
112 }
113 
qmp_mcore_channel_acked(struct qmp * qmp)114 static bool qmp_mcore_channel_acked(struct qmp *qmp)
115 {
116 	return readl(qmp->msgram + QMP_DESC_MCORE_CH_STATE_ACK) == QMP_STATE_UP;
117 }
118 
qmp_ucore_channel_up(struct qmp * qmp)119 static bool qmp_ucore_channel_up(struct qmp *qmp)
120 {
121 	return readl(qmp->msgram + QMP_DESC_UCORE_CH_STATE) == QMP_STATE_UP;
122 }
123 
qmp_open(struct qmp * qmp)124 static int qmp_open(struct qmp *qmp)
125 {
126 	int ret;
127 	u32 val;
128 
129 	if (!qmp_magic_valid(qmp)) {
130 		dev_err(qmp->dev, "QMP magic doesn't match\n");
131 		return -EINVAL;
132 	}
133 
134 	val = readl(qmp->msgram + QMP_DESC_VERSION);
135 	if (val != QMP_VERSION) {
136 		dev_err(qmp->dev, "unsupported QMP version %d\n", val);
137 		return -EINVAL;
138 	}
139 
140 	qmp->offset = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_OFFSET);
141 	qmp->size = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_SIZE);
142 	if (!qmp->size) {
143 		dev_err(qmp->dev, "invalid mailbox size\n");
144 		return -EINVAL;
145 	}
146 
147 	/* Ack remote core's link state */
148 	val = readl(qmp->msgram + QMP_DESC_UCORE_LINK_STATE);
149 	writel(val, qmp->msgram + QMP_DESC_UCORE_LINK_STATE_ACK);
150 
151 	/* Set local core's link state to up */
152 	writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
153 
154 	qmp_kick(qmp);
155 
156 	ret = wait_event_timeout(qmp->event, qmp_link_acked(qmp), HZ);
157 	if (!ret) {
158 		dev_err(qmp->dev, "ucore didn't ack link\n");
159 		goto timeout_close_link;
160 	}
161 
162 	writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
163 
164 	qmp_kick(qmp);
165 
166 	ret = wait_event_timeout(qmp->event, qmp_ucore_channel_up(qmp), HZ);
167 	if (!ret) {
168 		dev_err(qmp->dev, "ucore didn't open channel\n");
169 		goto timeout_close_channel;
170 	}
171 
172 	/* Ack remote core's channel state */
173 	writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_UCORE_CH_STATE_ACK);
174 
175 	qmp_kick(qmp);
176 
177 	ret = wait_event_timeout(qmp->event, qmp_mcore_channel_acked(qmp), HZ);
178 	if (!ret) {
179 		dev_err(qmp->dev, "ucore didn't ack channel\n");
180 		goto timeout_close_channel;
181 	}
182 
183 	return 0;
184 
185 timeout_close_channel:
186 	writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
187 
188 timeout_close_link:
189 	writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
190 	qmp_kick(qmp);
191 
192 	return -ETIMEDOUT;
193 }
194 
qmp_close(struct qmp * qmp)195 static void qmp_close(struct qmp *qmp)
196 {
197 	writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
198 	writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
199 	qmp_kick(qmp);
200 }
201 
qmp_intr(int irq,void * data)202 static irqreturn_t qmp_intr(int irq, void *data)
203 {
204 	struct qmp *qmp = data;
205 
206 	wake_up_all(&qmp->event);
207 
208 	return IRQ_HANDLED;
209 }
210 
qmp_message_empty(struct qmp * qmp)211 static bool qmp_message_empty(struct qmp *qmp)
212 {
213 	return readl(qmp->msgram + qmp->offset) == 0;
214 }
215 
216 /**
217  * qmp_send() - send a message to the AOSS
218  * @qmp: qmp context
219  * @data: message to be sent
220  * @len: length of the message
221  *
222  * Transmit @data to AOSS and wait for the AOSS to acknowledge the message.
223  * @len must be a multiple of 4 and not longer than the mailbox size. Access is
224  * synchronized by this implementation.
225  *
226  * Return: 0 on success, negative errno on failure
227  */
qmp_send(struct qmp * qmp,const void * data,size_t len)228 int qmp_send(struct qmp *qmp, const void *data, size_t len)
229 {
230 	long time_left;
231 	int ret;
232 
233 	if (WARN_ON(IS_ERR_OR_NULL(qmp) || !data))
234 		return -EINVAL;
235 
236 	if (WARN_ON(len + sizeof(u32) > qmp->size))
237 		return -EINVAL;
238 
239 	if (WARN_ON(len % sizeof(u32)))
240 		return -EINVAL;
241 
242 	mutex_lock(&qmp->tx_lock);
243 
244 	/* The message RAM only implements 32-bit accesses */
245 	__iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32),
246 			 data, len / sizeof(u32));
247 	writel(len, qmp->msgram + qmp->offset);
248 
249 	/* Read back len to confirm data written in message RAM */
250 	readl(qmp->msgram + qmp->offset);
251 	qmp_kick(qmp);
252 
253 	time_left = wait_event_interruptible_timeout(qmp->event,
254 						     qmp_message_empty(qmp), HZ);
255 	if (!time_left) {
256 		dev_err(qmp->dev, "ucore did not ack channel\n");
257 		ret = -ETIMEDOUT;
258 
259 		/* Clear message from buffer */
260 		writel(0, qmp->msgram + qmp->offset);
261 	} else {
262 		ret = 0;
263 	}
264 
265 	mutex_unlock(&qmp->tx_lock);
266 
267 	return ret;
268 }
269 EXPORT_SYMBOL(qmp_send);
270 
qmp_qdss_clk_prepare(struct clk_hw * hw)271 static int qmp_qdss_clk_prepare(struct clk_hw *hw)
272 {
273 	static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 1}";
274 	struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
275 
276 	return qmp_send(qmp, buf, sizeof(buf));
277 }
278 
qmp_qdss_clk_unprepare(struct clk_hw * hw)279 static void qmp_qdss_clk_unprepare(struct clk_hw *hw)
280 {
281 	static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 0}";
282 	struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
283 
284 	qmp_send(qmp, buf, sizeof(buf));
285 }
286 
287 static const struct clk_ops qmp_qdss_clk_ops = {
288 	.prepare = qmp_qdss_clk_prepare,
289 	.unprepare = qmp_qdss_clk_unprepare,
290 };
291 
qmp_qdss_clk_add(struct qmp * qmp)292 static int qmp_qdss_clk_add(struct qmp *qmp)
293 {
294 	static const struct clk_init_data qdss_init = {
295 		.ops = &qmp_qdss_clk_ops,
296 		.name = "qdss",
297 	};
298 	int ret;
299 
300 	qmp->qdss_clk.init = &qdss_init;
301 	ret = clk_hw_register(qmp->dev, &qmp->qdss_clk);
302 	if (ret < 0) {
303 		dev_err(qmp->dev, "failed to register qdss clock\n");
304 		return ret;
305 	}
306 
307 	ret = of_clk_add_hw_provider(qmp->dev->of_node, of_clk_hw_simple_get,
308 				     &qmp->qdss_clk);
309 	if (ret < 0) {
310 		dev_err(qmp->dev, "unable to register of clk hw provider\n");
311 		clk_hw_unregister(&qmp->qdss_clk);
312 	}
313 
314 	return ret;
315 }
316 
qmp_qdss_clk_remove(struct qmp * qmp)317 static void qmp_qdss_clk_remove(struct qmp *qmp)
318 {
319 	of_clk_del_provider(qmp->dev->of_node);
320 	clk_hw_unregister(&qmp->qdss_clk);
321 }
322 
qmp_pd_power_toggle(struct qmp_pd * res,bool enable)323 static int qmp_pd_power_toggle(struct qmp_pd *res, bool enable)
324 {
325 	char buf[QMP_MSG_LEN] = {};
326 
327 	snprintf(buf, sizeof(buf),
328 		 "{class: image, res: load_state, name: %s, val: %s}",
329 		 res->pd.name, enable ? "on" : "off");
330 	return qmp_send(res->qmp, buf, sizeof(buf));
331 }
332 
qmp_pd_power_on(struct generic_pm_domain * domain)333 static int qmp_pd_power_on(struct generic_pm_domain *domain)
334 {
335 	return qmp_pd_power_toggle(to_qmp_pd_resource(domain), true);
336 }
337 
qmp_pd_power_off(struct generic_pm_domain * domain)338 static int qmp_pd_power_off(struct generic_pm_domain *domain)
339 {
340 	return qmp_pd_power_toggle(to_qmp_pd_resource(domain), false);
341 }
342 
343 static const char * const sdm845_resources[] = {
344 	[AOSS_QMP_LS_CDSP] = "cdsp",
345 	[AOSS_QMP_LS_LPASS] = "adsp",
346 	[AOSS_QMP_LS_MODEM] = "modem",
347 	[AOSS_QMP_LS_SLPI] = "slpi",
348 	[AOSS_QMP_LS_SPSS] = "spss",
349 	[AOSS_QMP_LS_VENUS] = "venus",
350 };
351 
qmp_pd_add(struct qmp * qmp)352 static int qmp_pd_add(struct qmp *qmp)
353 {
354 	struct genpd_onecell_data *data = &qmp->pd_data;
355 	struct device *dev = qmp->dev;
356 	struct qmp_pd *res;
357 	size_t num = ARRAY_SIZE(sdm845_resources);
358 	int ret;
359 	int i;
360 
361 	res = devm_kcalloc(dev, num, sizeof(*res), GFP_KERNEL);
362 	if (!res)
363 		return -ENOMEM;
364 
365 	data->domains = devm_kcalloc(dev, num, sizeof(*data->domains),
366 				     GFP_KERNEL);
367 	if (!data->domains)
368 		return -ENOMEM;
369 
370 	for (i = 0; i < num; i++) {
371 		res[i].qmp = qmp;
372 		res[i].pd.name = sdm845_resources[i];
373 		res[i].pd.power_on = qmp_pd_power_on;
374 		res[i].pd.power_off = qmp_pd_power_off;
375 
376 		ret = pm_genpd_init(&res[i].pd, NULL, true);
377 		if (ret < 0) {
378 			dev_err(dev, "failed to init genpd\n");
379 			goto unroll_genpds;
380 		}
381 
382 		data->domains[i] = &res[i].pd;
383 	}
384 
385 	data->num_domains = i;
386 
387 	ret = of_genpd_add_provider_onecell(dev->of_node, data);
388 	if (ret < 0)
389 		goto unroll_genpds;
390 
391 	return 0;
392 
393 unroll_genpds:
394 	for (i--; i >= 0; i--)
395 		pm_genpd_remove(data->domains[i]);
396 
397 	return ret;
398 }
399 
qmp_pd_remove(struct qmp * qmp)400 static void qmp_pd_remove(struct qmp *qmp)
401 {
402 	struct genpd_onecell_data *data = &qmp->pd_data;
403 	struct device *dev = qmp->dev;
404 	int i;
405 
406 	of_genpd_del_provider(dev->of_node);
407 
408 	for (i = 0; i < data->num_domains; i++)
409 		pm_genpd_remove(data->domains[i]);
410 }
411 
qmp_cdev_get_max_state(struct thermal_cooling_device * cdev,unsigned long * state)412 static int qmp_cdev_get_max_state(struct thermal_cooling_device *cdev,
413 				  unsigned long *state)
414 {
415 	*state = qmp_cdev_max_state;
416 	return 0;
417 }
418 
qmp_cdev_get_cur_state(struct thermal_cooling_device * cdev,unsigned long * state)419 static int qmp_cdev_get_cur_state(struct thermal_cooling_device *cdev,
420 				  unsigned long *state)
421 {
422 	struct qmp_cooling_device *qmp_cdev = cdev->devdata;
423 
424 	*state = qmp_cdev->state;
425 	return 0;
426 }
427 
qmp_cdev_set_cur_state(struct thermal_cooling_device * cdev,unsigned long state)428 static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev,
429 				  unsigned long state)
430 {
431 	struct qmp_cooling_device *qmp_cdev = cdev->devdata;
432 	char buf[QMP_MSG_LEN] = {};
433 	bool cdev_state;
434 	int ret;
435 
436 	/* Normalize state */
437 	cdev_state = !!state;
438 
439 	if (qmp_cdev->state == state)
440 		return 0;
441 
442 	snprintf(buf, sizeof(buf),
443 		 "{class: volt_flr, event:zero_temp, res:%s, value:%s}",
444 			qmp_cdev->name,
445 			cdev_state ? "on" : "off");
446 
447 	ret = qmp_send(qmp_cdev->qmp, buf, sizeof(buf));
448 
449 	if (!ret)
450 		qmp_cdev->state = cdev_state;
451 
452 	return ret;
453 }
454 
455 static struct thermal_cooling_device_ops qmp_cooling_device_ops = {
456 	.get_max_state = qmp_cdev_get_max_state,
457 	.get_cur_state = qmp_cdev_get_cur_state,
458 	.set_cur_state = qmp_cdev_set_cur_state,
459 };
460 
qmp_cooling_device_add(struct qmp * qmp,struct qmp_cooling_device * qmp_cdev,struct device_node * node)461 static int qmp_cooling_device_add(struct qmp *qmp,
462 				  struct qmp_cooling_device *qmp_cdev,
463 				  struct device_node *node)
464 {
465 	char *cdev_name = (char *)node->name;
466 
467 	qmp_cdev->qmp = qmp;
468 	qmp_cdev->state = !qmp_cdev_max_state;
469 	qmp_cdev->name = cdev_name;
470 	qmp_cdev->cdev = devm_thermal_of_cooling_device_register
471 				(qmp->dev, node,
472 				cdev_name,
473 				qmp_cdev, &qmp_cooling_device_ops);
474 
475 	if (IS_ERR(qmp_cdev->cdev))
476 		dev_err(qmp->dev, "unable to register %s cooling device\n",
477 			cdev_name);
478 
479 	return PTR_ERR_OR_ZERO(qmp_cdev->cdev);
480 }
481 
qmp_cooling_devices_register(struct qmp * qmp)482 static int qmp_cooling_devices_register(struct qmp *qmp)
483 {
484 	struct device_node *np, *child;
485 	int count = 0;
486 	int ret;
487 
488 	np = qmp->dev->of_node;
489 
490 	qmp->cooling_devs = devm_kcalloc(qmp->dev, QMP_NUM_COOLING_RESOURCES,
491 					 sizeof(*qmp->cooling_devs),
492 					 GFP_KERNEL);
493 
494 	if (!qmp->cooling_devs)
495 		return -ENOMEM;
496 
497 	for_each_available_child_of_node(np, child) {
498 		if (!of_find_property(child, "#cooling-cells", NULL))
499 			continue;
500 		ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++],
501 					     child);
502 		if (ret) {
503 			of_node_put(child);
504 			goto unroll;
505 		}
506 	}
507 
508 	if (!count)
509 		devm_kfree(qmp->dev, qmp->cooling_devs);
510 
511 	return 0;
512 
513 unroll:
514 	while (--count >= 0)
515 		thermal_cooling_device_unregister
516 			(qmp->cooling_devs[count].cdev);
517 	devm_kfree(qmp->dev, qmp->cooling_devs);
518 
519 	return ret;
520 }
521 
qmp_cooling_devices_remove(struct qmp * qmp)522 static void qmp_cooling_devices_remove(struct qmp *qmp)
523 {
524 	int i;
525 
526 	for (i = 0; i < QMP_NUM_COOLING_RESOURCES; i++)
527 		thermal_cooling_device_unregister(qmp->cooling_devs[i].cdev);
528 }
529 
530 /**
531  * qmp_get() - get a qmp handle from a device
532  * @dev: client device pointer
533  *
534  * Return: handle to qmp device on success, ERR_PTR() on failure
535  */
qmp_get(struct device * dev)536 struct qmp *qmp_get(struct device *dev)
537 {
538 	struct platform_device *pdev;
539 	struct device_node *np;
540 	struct qmp *qmp;
541 
542 	if (!dev || !dev->of_node)
543 		return ERR_PTR(-EINVAL);
544 
545 	np = of_parse_phandle(dev->of_node, "qcom,qmp", 0);
546 	if (!np)
547 		return ERR_PTR(-ENODEV);
548 
549 	pdev = of_find_device_by_node(np);
550 	of_node_put(np);
551 	if (!pdev)
552 		return ERR_PTR(-EINVAL);
553 
554 	qmp = platform_get_drvdata(pdev);
555 
556 	if (!qmp) {
557 		put_device(&pdev->dev);
558 		return ERR_PTR(-EPROBE_DEFER);
559 	}
560 	return qmp;
561 }
562 EXPORT_SYMBOL(qmp_get);
563 
564 /**
565  * qmp_put() - release a qmp handle
566  * @qmp: qmp handle obtained from qmp_get()
567  */
qmp_put(struct qmp * qmp)568 void qmp_put(struct qmp *qmp)
569 {
570 	/*
571 	 * Match get_device() inside of_find_device_by_node() in
572 	 * qmp_get()
573 	 */
574 	if (!IS_ERR_OR_NULL(qmp))
575 		put_device(qmp->dev);
576 }
577 EXPORT_SYMBOL(qmp_put);
578 
qmp_probe(struct platform_device * pdev)579 static int qmp_probe(struct platform_device *pdev)
580 {
581 	struct resource *res;
582 	struct qmp *qmp;
583 	int irq;
584 	int ret;
585 
586 	qmp = devm_kzalloc(&pdev->dev, sizeof(*qmp), GFP_KERNEL);
587 	if (!qmp)
588 		return -ENOMEM;
589 
590 	qmp->dev = &pdev->dev;
591 	init_waitqueue_head(&qmp->event);
592 	mutex_init(&qmp->tx_lock);
593 
594 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
595 	qmp->msgram = devm_ioremap_resource(&pdev->dev, res);
596 	if (IS_ERR(qmp->msgram))
597 		return PTR_ERR(qmp->msgram);
598 
599 	qmp->mbox_client.dev = &pdev->dev;
600 	qmp->mbox_client.knows_txdone = true;
601 	qmp->mbox_chan = mbox_request_channel(&qmp->mbox_client, 0);
602 	if (IS_ERR(qmp->mbox_chan)) {
603 		dev_err(&pdev->dev, "failed to acquire ipc mailbox\n");
604 		return PTR_ERR(qmp->mbox_chan);
605 	}
606 
607 	irq = platform_get_irq(pdev, 0);
608 	ret = devm_request_irq(&pdev->dev, irq, qmp_intr, 0,
609 			       "aoss-qmp", qmp);
610 	if (ret < 0) {
611 		dev_err(&pdev->dev, "failed to request interrupt\n");
612 		goto err_free_mbox;
613 	}
614 
615 	ret = qmp_open(qmp);
616 	if (ret < 0)
617 		goto err_free_mbox;
618 
619 	ret = qmp_qdss_clk_add(qmp);
620 	if (ret)
621 		goto err_close_qmp;
622 
623 	ret = qmp_pd_add(qmp);
624 	if (ret)
625 		goto err_remove_qdss_clk;
626 
627 	ret = qmp_cooling_devices_register(qmp);
628 	if (ret)
629 		dev_err(&pdev->dev, "failed to register aoss cooling devices\n");
630 
631 	platform_set_drvdata(pdev, qmp);
632 
633 	return 0;
634 
635 err_remove_qdss_clk:
636 	qmp_qdss_clk_remove(qmp);
637 err_close_qmp:
638 	qmp_close(qmp);
639 err_free_mbox:
640 	mbox_free_channel(qmp->mbox_chan);
641 
642 	return ret;
643 }
644 
qmp_remove(struct platform_device * pdev)645 static int qmp_remove(struct platform_device *pdev)
646 {
647 	struct qmp *qmp = platform_get_drvdata(pdev);
648 
649 	qmp_qdss_clk_remove(qmp);
650 	qmp_pd_remove(qmp);
651 	qmp_cooling_devices_remove(qmp);
652 
653 	qmp_close(qmp);
654 	mbox_free_channel(qmp->mbox_chan);
655 
656 	return 0;
657 }
658 
659 static const struct of_device_id qmp_dt_match[] = {
660 	{ .compatible = "qcom,sc7180-aoss-qmp", },
661 	{ .compatible = "qcom,sc7280-aoss-qmp", },
662 	{ .compatible = "qcom,sdm845-aoss-qmp", },
663 	{ .compatible = "qcom,sm8150-aoss-qmp", },
664 	{ .compatible = "qcom,sm8250-aoss-qmp", },
665 	{ .compatible = "qcom,sm8350-aoss-qmp", },
666 	{ .compatible = "qcom,aoss-qmp", },
667 	{}
668 };
669 MODULE_DEVICE_TABLE(of, qmp_dt_match);
670 
671 static struct platform_driver qmp_driver = {
672 	.driver = {
673 		.name		= "qcom_aoss_qmp",
674 		.of_match_table	= qmp_dt_match,
675 		.suppress_bind_attrs = true,
676 	},
677 	.probe = qmp_probe,
678 	.remove	= qmp_remove,
679 };
680 module_platform_driver(qmp_driver);
681 
682 MODULE_DESCRIPTION("Qualcomm AOSS QMP driver");
683 MODULE_LICENSE("GPL v2");
684