• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2012-2023, Intel Corporation. All rights reserved.
4  * Intel Management Engine Interface (Intel MEI) Linux driver
5  */
6 
7 #include <linux/module.h>
8 #include <linux/device.h>
9 #include <linux/kernel.h>
10 #include <linux/sched/signal.h>
11 #include <linux/init.h>
12 #include <linux/errno.h>
13 #include <linux/slab.h>
14 #include <linux/mutex.h>
15 #include <linux/interrupt.h>
16 #include <linux/scatterlist.h>
17 #include <linux/mei_cl_bus.h>
18 
19 #include "mei_dev.h"
20 #include "client.h"
21 
22 #define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver)
23 
24 /**
25  * __mei_cl_send - internal client send (write)
26  *
27  * @cl: host client
28  * @buf: buffer to send
29  * @length: buffer length
30  * @vtag: virtual tag
31  * @mode: sending mode
32  *
33  * Return: written size bytes or < 0 on error
34  */
__mei_cl_send(struct mei_cl * cl,const u8 * buf,size_t length,u8 vtag,unsigned int mode)35 ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
36 		      unsigned int mode)
37 {
38 	return __mei_cl_send_timeout(cl, buf, length, vtag, mode, MAX_SCHEDULE_TIMEOUT);
39 }
40 
41 /**
42  * __mei_cl_send_timeout - internal client send (write)
43  *
44  * @cl: host client
45  * @buf: buffer to send
46  * @length: buffer length
47  * @vtag: virtual tag
48  * @mode: sending mode
49  * @timeout: send timeout in milliseconds.
50  *           effective only for blocking writes: the MEI_CL_IO_TX_BLOCKING mode bit is set.
51  *           set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait.
52  *
53  * Return: written size bytes or < 0 on error
54  */
__mei_cl_send_timeout(struct mei_cl * cl,const u8 * buf,size_t length,u8 vtag,unsigned int mode,unsigned long timeout)55 ssize_t __mei_cl_send_timeout(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
56 			      unsigned int mode, unsigned long timeout)
57 {
58 	struct mei_device *bus;
59 	struct mei_cl_cb *cb;
60 	ssize_t rets;
61 
62 	if (WARN_ON(!cl || !cl->dev))
63 		return -ENODEV;
64 
65 	bus = cl->dev;
66 
67 	mutex_lock(&bus->device_lock);
68 	if (bus->dev_state != MEI_DEV_ENABLED &&
69 	    bus->dev_state != MEI_DEV_POWERING_DOWN) {
70 		rets = -ENODEV;
71 		goto out;
72 	}
73 
74 	if (!mei_cl_is_connected(cl)) {
75 		rets = -ENODEV;
76 		goto out;
77 	}
78 
79 	/* Check if we have an ME client device */
80 	if (!mei_me_cl_is_active(cl->me_cl)) {
81 		rets = -ENOTTY;
82 		goto out;
83 	}
84 
85 	if (vtag) {
86 		/* Check if vtag is supported by client */
87 		rets = mei_cl_vt_support_check(cl);
88 		if (rets)
89 			goto out;
90 	}
91 
92 	if (length > mei_cl_mtu(cl)) {
93 		rets = -EFBIG;
94 		goto out;
95 	}
96 
97 	while (cl->tx_cb_queued >= bus->tx_queue_limit) {
98 		mutex_unlock(&bus->device_lock);
99 		rets = wait_event_interruptible(cl->tx_wait,
100 				cl->writing_state == MEI_WRITE_COMPLETE ||
101 				(!mei_cl_is_connected(cl)));
102 		mutex_lock(&bus->device_lock);
103 		if (rets) {
104 			if (signal_pending(current))
105 				rets = -EINTR;
106 			goto out;
107 		}
108 		if (!mei_cl_is_connected(cl)) {
109 			rets = -ENODEV;
110 			goto out;
111 		}
112 	}
113 
114 	cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, NULL);
115 	if (!cb) {
116 		rets = -ENOMEM;
117 		goto out;
118 	}
119 	cb->vtag = vtag;
120 
121 	cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL);
122 	cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING);
123 	memcpy(cb->buf.data, buf, length);
124 	/* hack we point data to header */
125 	if (mode & MEI_CL_IO_SGL) {
126 		cb->ext_hdr = (struct mei_ext_hdr *)cb->buf.data;
127 		cb->buf.data = NULL;
128 		cb->buf.size = 0;
129 	}
130 
131 	rets = mei_cl_write(cl, cb, timeout);
132 
133 	if (mode & MEI_CL_IO_SGL && rets == 0)
134 		rets = length;
135 
136 out:
137 	mutex_unlock(&bus->device_lock);
138 
139 	return rets;
140 }
141 
142 /**
143  * __mei_cl_recv - internal client receive (read)
144  *
145  * @cl: host client
146  * @buf: buffer to receive
147  * @length: buffer length
148  * @mode: io mode
149  * @vtag: virtual tag
150  * @timeout: recv timeout, 0 for infinite timeout
151  *
152  * Return: read size in bytes of < 0 on error
153  */
__mei_cl_recv(struct mei_cl * cl,u8 * buf,size_t length,u8 * vtag,unsigned int mode,unsigned long timeout)154 ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag,
155 		      unsigned int mode, unsigned long timeout)
156 {
157 	struct mei_device *bus;
158 	struct mei_cl_cb *cb;
159 	size_t r_length;
160 	ssize_t rets;
161 	bool nonblock = !!(mode & MEI_CL_IO_RX_NONBLOCK);
162 
163 	if (WARN_ON(!cl || !cl->dev))
164 		return -ENODEV;
165 
166 	bus = cl->dev;
167 
168 	mutex_lock(&bus->device_lock);
169 	if (bus->dev_state != MEI_DEV_ENABLED &&
170 	    bus->dev_state != MEI_DEV_POWERING_DOWN) {
171 		rets = -ENODEV;
172 		goto out;
173 	}
174 
175 	cb = mei_cl_read_cb(cl, NULL);
176 	if (cb)
177 		goto copy;
178 
179 	rets = mei_cl_read_start(cl, length, NULL);
180 	if (rets && rets != -EBUSY)
181 		goto out;
182 
183 	if (nonblock) {
184 		rets = -EAGAIN;
185 		goto out;
186 	}
187 
188 	/* wait on event only if there is no other waiter */
189 	/* synchronized under device mutex */
190 	if (!waitqueue_active(&cl->rx_wait)) {
191 
192 		mutex_unlock(&bus->device_lock);
193 
194 		if (timeout) {
195 			rets = wait_event_interruptible_timeout
196 					(cl->rx_wait,
197 					mei_cl_read_cb(cl, NULL) ||
198 					(!mei_cl_is_connected(cl)),
199 					msecs_to_jiffies(timeout));
200 			if (rets == 0)
201 				return -ETIME;
202 			if (rets < 0) {
203 				if (signal_pending(current))
204 					return -EINTR;
205 				return -ERESTARTSYS;
206 			}
207 		} else {
208 			if (wait_event_interruptible
209 					(cl->rx_wait,
210 					mei_cl_read_cb(cl, NULL) ||
211 					(!mei_cl_is_connected(cl)))) {
212 				if (signal_pending(current))
213 					return -EINTR;
214 				return -ERESTARTSYS;
215 			}
216 		}
217 
218 		mutex_lock(&bus->device_lock);
219 
220 		if (!mei_cl_is_connected(cl)) {
221 			rets = -ENODEV;
222 			goto out;
223 		}
224 	}
225 
226 	cb = mei_cl_read_cb(cl, NULL);
227 	if (!cb) {
228 		rets = 0;
229 		goto out;
230 	}
231 
232 copy:
233 	if (cb->status) {
234 		rets = cb->status;
235 		goto free;
236 	}
237 
238 	/* for the GSC type - copy the extended header to the buffer */
239 	if (cb->ext_hdr && cb->ext_hdr->type == MEI_EXT_HDR_GSC) {
240 		r_length = min_t(size_t, length, cb->ext_hdr->length * sizeof(u32));
241 		memcpy(buf, cb->ext_hdr, r_length);
242 	} else {
243 		r_length = min_t(size_t, length, cb->buf_idx);
244 		memcpy(buf, cb->buf.data, r_length);
245 	}
246 	rets = r_length;
247 
248 	if (vtag)
249 		*vtag = cb->vtag;
250 
251 free:
252 	mei_cl_del_rd_completed(cl, cb);
253 out:
254 	mutex_unlock(&bus->device_lock);
255 
256 	return rets;
257 }
258 
259 /**
260  * mei_cldev_send_vtag - me device send with vtag  (write)
261  *
262  * @cldev: me client device
263  * @buf: buffer to send
264  * @length: buffer length
265  * @vtag: virtual tag
266  *
267  * Return:
268  *  * written size in bytes
269  *  * < 0 on error
270  */
271 
mei_cldev_send_vtag(struct mei_cl_device * cldev,const u8 * buf,size_t length,u8 vtag)272 ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
273 			    size_t length, u8 vtag)
274 {
275 	struct mei_cl *cl = cldev->cl;
276 
277 	return __mei_cl_send(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING);
278 }
279 EXPORT_SYMBOL_GPL(mei_cldev_send_vtag);
280 
281 /**
282  * mei_cldev_recv_vtag - client receive with vtag (read)
283  *
284  * @cldev: me client device
285  * @buf: buffer to receive
286  * @length: buffer length
287  * @vtag: virtual tag
288  *
289  * Return:
290  * * read size in bytes
291  * *  < 0 on error
292  */
293 
mei_cldev_recv_vtag(struct mei_cl_device * cldev,u8 * buf,size_t length,u8 * vtag)294 ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
295 			    u8 *vtag)
296 {
297 	struct mei_cl *cl = cldev->cl;
298 
299 	return __mei_cl_recv(cl, buf, length, vtag, 0, 0);
300 }
301 EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag);
302 
303 /**
304  * mei_cldev_recv_nonblock_vtag - non block client receive with vtag (read)
305  *
306  * @cldev: me client device
307  * @buf: buffer to receive
308  * @length: buffer length
309  * @vtag: virtual tag
310  *
311  * Return:
312  * * read size in bytes
313  * * -EAGAIN if function will block.
314  * * < 0 on other error
315  */
mei_cldev_recv_nonblock_vtag(struct mei_cl_device * cldev,u8 * buf,size_t length,u8 * vtag)316 ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
317 				     size_t length, u8 *vtag)
318 {
319 	struct mei_cl *cl = cldev->cl;
320 
321 	return __mei_cl_recv(cl, buf, length, vtag, MEI_CL_IO_RX_NONBLOCK, 0);
322 }
323 EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock_vtag);
324 
325 /**
326  * mei_cldev_send - me device send  (write)
327  *
328  * @cldev: me client device
329  * @buf: buffer to send
330  * @length: buffer length
331  *
332  * Return:
333  *  * written size in bytes
334  *  * < 0 on error
335  */
mei_cldev_send(struct mei_cl_device * cldev,const u8 * buf,size_t length)336 ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, size_t length)
337 {
338 	return mei_cldev_send_vtag(cldev, buf, length, 0);
339 }
340 EXPORT_SYMBOL_GPL(mei_cldev_send);
341 
342 /**
343  * mei_cldev_recv - client receive (read)
344  *
345  * @cldev: me client device
346  * @buf: buffer to receive
347  * @length: buffer length
348  *
349  * Return: read size in bytes of < 0 on error
350  */
mei_cldev_recv(struct mei_cl_device * cldev,u8 * buf,size_t length)351 ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
352 {
353 	return mei_cldev_recv_vtag(cldev, buf, length, NULL);
354 }
355 EXPORT_SYMBOL_GPL(mei_cldev_recv);
356 
357 /**
358  * mei_cldev_recv_nonblock - non block client receive (read)
359  *
360  * @cldev: me client device
361  * @buf: buffer to receive
362  * @length: buffer length
363  *
364  * Return: read size in bytes of < 0 on error
365  *         -EAGAIN if function will block.
366  */
mei_cldev_recv_nonblock(struct mei_cl_device * cldev,u8 * buf,size_t length)367 ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
368 				size_t length)
369 {
370 	return mei_cldev_recv_nonblock_vtag(cldev, buf, length, NULL);
371 }
372 EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock);
373 
374 /**
375  * mei_cl_bus_rx_work - dispatch rx event for a bus device
376  *
377  * @work: work
378  */
mei_cl_bus_rx_work(struct work_struct * work)379 static void mei_cl_bus_rx_work(struct work_struct *work)
380 {
381 	struct mei_cl_device *cldev;
382 	struct mei_device *bus;
383 
384 	cldev = container_of(work, struct mei_cl_device, rx_work);
385 
386 	bus = cldev->bus;
387 
388 	if (cldev->rx_cb)
389 		cldev->rx_cb(cldev);
390 
391 	mutex_lock(&bus->device_lock);
392 	if (mei_cl_is_connected(cldev->cl))
393 		mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
394 	mutex_unlock(&bus->device_lock);
395 }
396 
397 /**
398  * mei_cl_bus_notif_work - dispatch FW notif event for a bus device
399  *
400  * @work: work
401  */
mei_cl_bus_notif_work(struct work_struct * work)402 static void mei_cl_bus_notif_work(struct work_struct *work)
403 {
404 	struct mei_cl_device *cldev;
405 
406 	cldev = container_of(work, struct mei_cl_device, notif_work);
407 
408 	if (cldev->notif_cb)
409 		cldev->notif_cb(cldev);
410 }
411 
412 /**
413  * mei_cl_bus_notify_event - schedule notify cb on bus client
414  *
415  * @cl: host client
416  *
417  * Return: true if event was scheduled
418  *         false if the client is not waiting for event
419  */
mei_cl_bus_notify_event(struct mei_cl * cl)420 bool mei_cl_bus_notify_event(struct mei_cl *cl)
421 {
422 	struct mei_cl_device *cldev = cl->cldev;
423 
424 	if (!cldev || !cldev->notif_cb)
425 		return false;
426 
427 	if (!cl->notify_ev)
428 		return false;
429 
430 	schedule_work(&cldev->notif_work);
431 
432 	cl->notify_ev = false;
433 
434 	return true;
435 }
436 
437 /**
438  * mei_cl_bus_rx_event - schedule rx event
439  *
440  * @cl: host client
441  *
442  * Return: true if event was scheduled
443  *         false if the client is not waiting for event
444  */
mei_cl_bus_rx_event(struct mei_cl * cl)445 bool mei_cl_bus_rx_event(struct mei_cl *cl)
446 {
447 	struct mei_cl_device *cldev = cl->cldev;
448 
449 	if (!cldev || !cldev->rx_cb)
450 		return false;
451 
452 	schedule_work(&cldev->rx_work);
453 
454 	return true;
455 }
456 
457 /**
458  * mei_cldev_register_rx_cb - register Rx event callback
459  *
460  * @cldev: me client devices
461  * @rx_cb: callback function
462  *
463  * Return: 0 on success
464  *         -EALREADY if an callback is already registered
465  *         <0 on other errors
466  */
mei_cldev_register_rx_cb(struct mei_cl_device * cldev,mei_cldev_cb_t rx_cb)467 int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb)
468 {
469 	struct mei_device *bus = cldev->bus;
470 	int ret;
471 
472 	if (!rx_cb)
473 		return -EINVAL;
474 	if (cldev->rx_cb)
475 		return -EALREADY;
476 
477 	cldev->rx_cb = rx_cb;
478 	INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work);
479 
480 	mutex_lock(&bus->device_lock);
481 	if (mei_cl_is_connected(cldev->cl))
482 		ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
483 	else
484 		ret = -ENODEV;
485 	mutex_unlock(&bus->device_lock);
486 	if (ret && ret != -EBUSY) {
487 		cancel_work_sync(&cldev->rx_work);
488 		cldev->rx_cb = NULL;
489 		return ret;
490 	}
491 
492 	return 0;
493 }
494 EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb);
495 
496 /**
497  * mei_cldev_register_notif_cb - register FW notification event callback
498  *
499  * @cldev: me client devices
500  * @notif_cb: callback function
501  *
502  * Return: 0 on success
503  *         -EALREADY if an callback is already registered
504  *         <0 on other errors
505  */
mei_cldev_register_notif_cb(struct mei_cl_device * cldev,mei_cldev_cb_t notif_cb)506 int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
507 				mei_cldev_cb_t notif_cb)
508 {
509 	struct mei_device *bus = cldev->bus;
510 	int ret;
511 
512 	if (!notif_cb)
513 		return -EINVAL;
514 
515 	if (cldev->notif_cb)
516 		return -EALREADY;
517 
518 	cldev->notif_cb = notif_cb;
519 	INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work);
520 
521 	mutex_lock(&bus->device_lock);
522 	ret = mei_cl_notify_request(cldev->cl, NULL, 1);
523 	mutex_unlock(&bus->device_lock);
524 	if (ret) {
525 		cancel_work_sync(&cldev->notif_work);
526 		cldev->notif_cb = NULL;
527 		return ret;
528 	}
529 
530 	return 0;
531 }
532 EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb);
533 
534 /**
535  * mei_cldev_get_drvdata - driver data getter
536  *
537  * @cldev: mei client device
538  *
539  * Return: driver private data
540  */
mei_cldev_get_drvdata(const struct mei_cl_device * cldev)541 void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev)
542 {
543 	return dev_get_drvdata(&cldev->dev);
544 }
545 EXPORT_SYMBOL_GPL(mei_cldev_get_drvdata);
546 
547 /**
548  * mei_cldev_set_drvdata - driver data setter
549  *
550  * @cldev: mei client device
551  * @data: data to store
552  */
mei_cldev_set_drvdata(struct mei_cl_device * cldev,void * data)553 void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data)
554 {
555 	dev_set_drvdata(&cldev->dev, data);
556 }
557 EXPORT_SYMBOL_GPL(mei_cldev_set_drvdata);
558 
559 /**
560  * mei_cldev_uuid - return uuid of the underlying me client
561  *
562  * @cldev: mei client device
563  *
564  * Return: me client uuid
565  */
mei_cldev_uuid(const struct mei_cl_device * cldev)566 const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev)
567 {
568 	return mei_me_cl_uuid(cldev->me_cl);
569 }
570 EXPORT_SYMBOL_GPL(mei_cldev_uuid);
571 
572 /**
573  * mei_cldev_ver - return protocol version of the underlying me client
574  *
575  * @cldev: mei client device
576  *
577  * Return: me client protocol version
578  */
mei_cldev_ver(const struct mei_cl_device * cldev)579 u8 mei_cldev_ver(const struct mei_cl_device *cldev)
580 {
581 	return mei_me_cl_ver(cldev->me_cl);
582 }
583 EXPORT_SYMBOL_GPL(mei_cldev_ver);
584 
585 /**
586  * mei_cldev_enabled - check whether the device is enabled
587  *
588  * @cldev: mei client device
589  *
590  * Return: true if me client is initialized and connected
591  */
mei_cldev_enabled(const struct mei_cl_device * cldev)592 bool mei_cldev_enabled(const struct mei_cl_device *cldev)
593 {
594 	return mei_cl_is_connected(cldev->cl);
595 }
596 EXPORT_SYMBOL_GPL(mei_cldev_enabled);
597 
598 /**
599  * mei_cl_bus_module_get - acquire module of the underlying
600  *    hw driver.
601  *
602  * @cldev: mei client device
603  *
604  * Return: true on success; false if the module was removed.
605  */
mei_cl_bus_module_get(struct mei_cl_device * cldev)606 static bool mei_cl_bus_module_get(struct mei_cl_device *cldev)
607 {
608 	return try_module_get(cldev->bus->dev->driver->owner);
609 }
610 
611 /**
612  * mei_cl_bus_module_put -  release the underlying hw module.
613  *
614  * @cldev: mei client device
615  */
mei_cl_bus_module_put(struct mei_cl_device * cldev)616 static void mei_cl_bus_module_put(struct mei_cl_device *cldev)
617 {
618 	module_put(cldev->bus->dev->driver->owner);
619 }
620 
621 /**
622  * mei_cl_bus_vtag - get bus vtag entry wrapper
623  *     The tag for bus client is always first.
624  *
625  * @cl: host client
626  *
627  * Return: bus vtag or NULL
628  */
mei_cl_bus_vtag(struct mei_cl * cl)629 static inline struct mei_cl_vtag *mei_cl_bus_vtag(struct mei_cl *cl)
630 {
631 	return list_first_entry_or_null(&cl->vtag_map,
632 					struct mei_cl_vtag, list);
633 }
634 
635 /**
636  * mei_cl_bus_vtag_alloc - add bus client entry to vtag map
637  *
638  * @cldev: me client device
639  *
640  * Return:
641  * * 0 on success
642  * * -ENOMEM if memory allocation failed
643  */
mei_cl_bus_vtag_alloc(struct mei_cl_device * cldev)644 static int mei_cl_bus_vtag_alloc(struct mei_cl_device *cldev)
645 {
646 	struct mei_cl *cl = cldev->cl;
647 	struct mei_cl_vtag *cl_vtag;
648 
649 	/*
650 	 * Bail out if the client does not supports vtags
651 	 * or has already allocated one
652 	 */
653 	if (mei_cl_vt_support_check(cl) || mei_cl_bus_vtag(cl))
654 		return 0;
655 
656 	cl_vtag = mei_cl_vtag_alloc(NULL, 0);
657 	if (IS_ERR(cl_vtag))
658 		return -ENOMEM;
659 
660 	list_add_tail(&cl_vtag->list, &cl->vtag_map);
661 
662 	return 0;
663 }
664 
665 /**
666  * mei_cl_bus_vtag_free - remove the bus entry from vtag map
667  *
668  * @cldev: me client device
669  */
mei_cl_bus_vtag_free(struct mei_cl_device * cldev)670 static void mei_cl_bus_vtag_free(struct mei_cl_device *cldev)
671 {
672 	struct mei_cl *cl = cldev->cl;
673 	struct mei_cl_vtag *cl_vtag;
674 
675 	cl_vtag = mei_cl_bus_vtag(cl);
676 	if (!cl_vtag)
677 		return;
678 
679 	list_del(&cl_vtag->list);
680 	kfree(cl_vtag);
681 }
682 
mei_cldev_dma_map(struct mei_cl_device * cldev,u8 buffer_id,size_t size)683 void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size)
684 {
685 	struct mei_device *bus;
686 	struct mei_cl *cl;
687 	int ret;
688 
689 	if (!cldev || !buffer_id || !size)
690 		return ERR_PTR(-EINVAL);
691 
692 	if (!IS_ALIGNED(size, MEI_FW_PAGE_SIZE)) {
693 		dev_err(&cldev->dev, "Map size should be aligned to %lu\n",
694 			MEI_FW_PAGE_SIZE);
695 		return ERR_PTR(-EINVAL);
696 	}
697 
698 	cl = cldev->cl;
699 	bus = cldev->bus;
700 
701 	mutex_lock(&bus->device_lock);
702 	if (cl->state == MEI_FILE_UNINITIALIZED) {
703 		ret = mei_cl_link(cl);
704 		if (ret)
705 			goto notlinked;
706 		/* update pointers */
707 		cl->cldev = cldev;
708 	}
709 
710 	ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size);
711 	if (ret)
712 		mei_cl_unlink(cl);
713 notlinked:
714 	mutex_unlock(&bus->device_lock);
715 	if (ret)
716 		return ERR_PTR(ret);
717 	return cl->dma.vaddr;
718 }
719 EXPORT_SYMBOL_GPL(mei_cldev_dma_map);
720 
mei_cldev_dma_unmap(struct mei_cl_device * cldev)721 int mei_cldev_dma_unmap(struct mei_cl_device *cldev)
722 {
723 	struct mei_device *bus;
724 	struct mei_cl *cl;
725 	int ret;
726 
727 	if (!cldev)
728 		return -EINVAL;
729 
730 	cl = cldev->cl;
731 	bus = cldev->bus;
732 
733 	mutex_lock(&bus->device_lock);
734 	ret = mei_cl_dma_unmap(cl, NULL);
735 
736 	mei_cl_flush_queues(cl, NULL);
737 	mei_cl_unlink(cl);
738 	mutex_unlock(&bus->device_lock);
739 	return ret;
740 }
741 EXPORT_SYMBOL_GPL(mei_cldev_dma_unmap);
742 
743 /**
744  * mei_cldev_enable - enable me client device
745  *     create connection with me client
746  *
747  * @cldev: me client device
748  *
749  * Return: 0 on success and < 0 on error
750  */
mei_cldev_enable(struct mei_cl_device * cldev)751 int mei_cldev_enable(struct mei_cl_device *cldev)
752 {
753 	struct mei_device *bus = cldev->bus;
754 	struct mei_cl *cl;
755 	int ret;
756 
757 	cl = cldev->cl;
758 
759 	mutex_lock(&bus->device_lock);
760 	if (cl->state == MEI_FILE_UNINITIALIZED) {
761 		ret = mei_cl_link(cl);
762 		if (ret)
763 			goto notlinked;
764 		/* update pointers */
765 		cl->cldev = cldev;
766 	}
767 
768 	if (mei_cl_is_connected(cl)) {
769 		ret = 0;
770 		goto out;
771 	}
772 
773 	if (!mei_me_cl_is_active(cldev->me_cl)) {
774 		dev_err(&cldev->dev, "me client is not active\n");
775 		ret = -ENOTTY;
776 		goto out;
777 	}
778 
779 	ret = mei_cl_bus_vtag_alloc(cldev);
780 	if (ret)
781 		goto out;
782 
783 	ret = mei_cl_connect(cl, cldev->me_cl, NULL);
784 	if (ret < 0) {
785 		dev_err(&cldev->dev, "cannot connect\n");
786 		mei_cl_bus_vtag_free(cldev);
787 	}
788 
789 out:
790 	if (ret)
791 		mei_cl_unlink(cl);
792 notlinked:
793 	mutex_unlock(&bus->device_lock);
794 
795 	return ret;
796 }
797 EXPORT_SYMBOL_GPL(mei_cldev_enable);
798 
799 /**
800  * mei_cldev_unregister_callbacks - internal wrapper for unregistering
801  *  callbacks.
802  *
803  * @cldev: client device
804  */
mei_cldev_unregister_callbacks(struct mei_cl_device * cldev)805 static void mei_cldev_unregister_callbacks(struct mei_cl_device *cldev)
806 {
807 	if (cldev->rx_cb) {
808 		cancel_work_sync(&cldev->rx_work);
809 		cldev->rx_cb = NULL;
810 	}
811 
812 	if (cldev->notif_cb) {
813 		cancel_work_sync(&cldev->notif_work);
814 		cldev->notif_cb = NULL;
815 	}
816 }
817 
818 /**
819  * mei_cldev_disable - disable me client device
820  *     disconnect form the me client
821  *
822  * @cldev: me client device
823  *
824  * Return: 0 on success and < 0 on error
825  */
mei_cldev_disable(struct mei_cl_device * cldev)826 int mei_cldev_disable(struct mei_cl_device *cldev)
827 {
828 	struct mei_device *bus;
829 	struct mei_cl *cl;
830 	int err;
831 
832 	if (!cldev)
833 		return -ENODEV;
834 
835 	cl = cldev->cl;
836 
837 	bus = cldev->bus;
838 
839 	mei_cldev_unregister_callbacks(cldev);
840 
841 	mutex_lock(&bus->device_lock);
842 
843 	mei_cl_bus_vtag_free(cldev);
844 
845 	if (!mei_cl_is_connected(cl)) {
846 		dev_dbg(bus->dev, "Already disconnected\n");
847 		err = 0;
848 		goto out;
849 	}
850 
851 	err = mei_cl_disconnect(cl);
852 	if (err < 0)
853 		dev_err(bus->dev, "Could not disconnect from the ME client\n");
854 
855 out:
856 	/* Flush queues and remove any pending read unless we have mapped DMA */
857 	if (!cl->dma_mapped) {
858 		mei_cl_flush_queues(cl, NULL);
859 		mei_cl_unlink(cl);
860 	}
861 
862 	mutex_unlock(&bus->device_lock);
863 	return err;
864 }
865 EXPORT_SYMBOL_GPL(mei_cldev_disable);
866 
867 /**
868  * mei_cldev_send_gsc_command - sends a gsc command, by sending
869  * a gsl mei message to gsc and receiving reply from gsc
870  *
871  * @cldev: me client device
872  * @client_id: client id to send the command to
873  * @fence_id: fence id to send the command to
874  * @sg_in: scatter gather list containing addresses for rx message buffer
875  * @total_in_len: total length of data in 'in' sg, can be less than the sum of buffers sizes
876  * @sg_out: scatter gather list containing addresses for tx message buffer
877  *
878  * Return:
879  *  * written size in bytes
880  *  * < 0 on error
881  */
mei_cldev_send_gsc_command(struct mei_cl_device * cldev,u8 client_id,u32 fence_id,struct scatterlist * sg_in,size_t total_in_len,struct scatterlist * sg_out)882 ssize_t mei_cldev_send_gsc_command(struct mei_cl_device *cldev,
883 				   u8 client_id, u32 fence_id,
884 				   struct scatterlist *sg_in,
885 				   size_t total_in_len,
886 				   struct scatterlist *sg_out)
887 {
888 	struct mei_cl *cl;
889 	struct mei_device *bus;
890 	ssize_t ret = 0;
891 
892 	struct mei_ext_hdr_gsc_h2f *ext_hdr;
893 	size_t buf_sz = sizeof(struct mei_ext_hdr_gsc_h2f);
894 	int sg_out_nents, sg_in_nents;
895 	int i;
896 	struct scatterlist *sg;
897 	struct mei_ext_hdr_gsc_f2h rx_msg;
898 	unsigned int sg_len;
899 
900 	if (!cldev || !sg_in || !sg_out)
901 		return -EINVAL;
902 
903 	cl = cldev->cl;
904 	bus = cldev->bus;
905 
906 	dev_dbg(bus->dev, "client_id %u, fence_id %u\n", client_id, fence_id);
907 
908 	if (!bus->hbm_f_gsc_supported)
909 		return -EOPNOTSUPP;
910 
911 	sg_out_nents = sg_nents(sg_out);
912 	sg_in_nents = sg_nents(sg_in);
913 	/* at least one entry in tx and rx sgls must be present */
914 	if (sg_out_nents <= 0 || sg_in_nents <= 0)
915 		return -EINVAL;
916 
917 	buf_sz += (sg_out_nents + sg_in_nents) * sizeof(struct mei_gsc_sgl);
918 	ext_hdr = kzalloc(buf_sz, GFP_KERNEL);
919 	if (!ext_hdr)
920 		return -ENOMEM;
921 
922 	/* construct the GSC message */
923 	ext_hdr->hdr.type = MEI_EXT_HDR_GSC;
924 	ext_hdr->hdr.length = buf_sz / sizeof(u32); /* length is in dw */
925 
926 	ext_hdr->client_id = client_id;
927 	ext_hdr->addr_type = GSC_ADDRESS_TYPE_PHYSICAL_SGL;
928 	ext_hdr->fence_id = fence_id;
929 	ext_hdr->input_address_count = sg_in_nents;
930 	ext_hdr->output_address_count = sg_out_nents;
931 	ext_hdr->reserved[0] = 0;
932 	ext_hdr->reserved[1] = 0;
933 
934 	/* copy in-sgl to the message */
935 	for (i = 0, sg = sg_in; i < sg_in_nents; i++, sg++) {
936 		ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg));
937 		ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg));
938 		sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE);
939 		ext_hdr->sgl[i].length = (sg_len <= total_in_len) ? sg_len : total_in_len;
940 		total_in_len -= ext_hdr->sgl[i].length;
941 	}
942 
943 	/* copy out-sgl to the message */
944 	for (i = sg_in_nents, sg = sg_out; i < sg_in_nents + sg_out_nents; i++, sg++) {
945 		ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg));
946 		ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg));
947 		sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE);
948 		ext_hdr->sgl[i].length = sg_len;
949 	}
950 
951 	/* send the message to GSC */
952 	ret = __mei_cl_send(cl, (u8 *)ext_hdr, buf_sz, 0, MEI_CL_IO_SGL);
953 	if (ret < 0) {
954 		dev_err(bus->dev, "__mei_cl_send failed, returned %zd\n", ret);
955 		goto end;
956 	}
957 	if (ret != buf_sz) {
958 		dev_err(bus->dev, "__mei_cl_send returned %zd instead of expected %zd\n",
959 			ret, buf_sz);
960 		ret = -EIO;
961 		goto end;
962 	}
963 
964 	/* receive the reply from GSC, note that at this point sg_in should contain the reply */
965 	ret = __mei_cl_recv(cl, (u8 *)&rx_msg, sizeof(rx_msg), NULL, MEI_CL_IO_SGL, 0);
966 
967 	if (ret != sizeof(rx_msg)) {
968 		dev_err(bus->dev, "__mei_cl_recv returned %zd instead of expected %zd\n",
969 			ret, sizeof(rx_msg));
970 		if (ret >= 0)
971 			ret = -EIO;
972 		goto end;
973 	}
974 
975 	/* check rx_msg.client_id and rx_msg.fence_id match the ones we send */
976 	if (rx_msg.client_id != client_id || rx_msg.fence_id != fence_id) {
977 		dev_err(bus->dev, "received client_id/fence_id  %u/%u  instead of %u/%u sent\n",
978 			rx_msg.client_id, rx_msg.fence_id, client_id, fence_id);
979 		ret = -EFAULT;
980 		goto end;
981 	}
982 
983 	dev_dbg(bus->dev, "gsc command: successfully written %u bytes\n",  rx_msg.written);
984 	ret = rx_msg.written;
985 
986 end:
987 	kfree(ext_hdr);
988 	return ret;
989 }
990 EXPORT_SYMBOL_GPL(mei_cldev_send_gsc_command);
991 
992 /**
993  * mei_cl_device_find - find matching entry in the driver id table
994  *
995  * @cldev: me client device
996  * @cldrv: me client driver
997  *
998  * Return: id on success; NULL if no id is matching
999  */
1000 static const
mei_cl_device_find(const struct mei_cl_device * cldev,const struct mei_cl_driver * cldrv)1001 struct mei_cl_device_id *mei_cl_device_find(const struct mei_cl_device *cldev,
1002 					    const struct mei_cl_driver *cldrv)
1003 {
1004 	const struct mei_cl_device_id *id;
1005 	const uuid_le *uuid;
1006 	u8 version;
1007 	bool match;
1008 
1009 	uuid = mei_me_cl_uuid(cldev->me_cl);
1010 	version = mei_me_cl_ver(cldev->me_cl);
1011 
1012 	id = cldrv->id_table;
1013 	while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
1014 		if (!uuid_le_cmp(*uuid, id->uuid)) {
1015 			match = true;
1016 
1017 			if (cldev->name[0])
1018 				if (strncmp(cldev->name, id->name,
1019 					    sizeof(id->name)))
1020 					match = false;
1021 
1022 			if (id->version != MEI_CL_VERSION_ANY)
1023 				if (id->version != version)
1024 					match = false;
1025 			if (match)
1026 				return id;
1027 		}
1028 
1029 		id++;
1030 	}
1031 
1032 	return NULL;
1033 }
1034 
1035 /**
1036  * mei_cl_device_match  - device match function
1037  *
1038  * @dev: device
1039  * @drv: driver
1040  *
1041  * Return:  1 if matching device was found 0 otherwise
1042  */
mei_cl_device_match(struct device * dev,struct device_driver * drv)1043 static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
1044 {
1045 	const struct mei_cl_device *cldev = to_mei_cl_device(dev);
1046 	const struct mei_cl_driver *cldrv = to_mei_cl_driver(drv);
1047 	const struct mei_cl_device_id *found_id;
1048 
1049 	if (!cldev->do_match)
1050 		return 0;
1051 
1052 	if (!cldrv || !cldrv->id_table)
1053 		return 0;
1054 
1055 	found_id = mei_cl_device_find(cldev, cldrv);
1056 	if (found_id)
1057 		return 1;
1058 
1059 	return 0;
1060 }
1061 
1062 /**
1063  * mei_cl_device_probe - bus probe function
1064  *
1065  * @dev: device
1066  *
1067  * Return:  0 on success; < 0 otherwise
1068  */
mei_cl_device_probe(struct device * dev)1069 static int mei_cl_device_probe(struct device *dev)
1070 {
1071 	struct mei_cl_device *cldev;
1072 	struct mei_cl_driver *cldrv;
1073 	const struct mei_cl_device_id *id;
1074 	int ret;
1075 
1076 	cldev = to_mei_cl_device(dev);
1077 	cldrv = to_mei_cl_driver(dev->driver);
1078 
1079 	if (!cldrv || !cldrv->probe)
1080 		return -ENODEV;
1081 
1082 	id = mei_cl_device_find(cldev, cldrv);
1083 	if (!id)
1084 		return -ENODEV;
1085 
1086 	if (!mei_cl_bus_module_get(cldev)) {
1087 		dev_err(&cldev->dev, "get hw module failed");
1088 		return -ENODEV;
1089 	}
1090 
1091 	ret = cldrv->probe(cldev, id);
1092 	if (ret) {
1093 		mei_cl_bus_module_put(cldev);
1094 		return ret;
1095 	}
1096 
1097 	__module_get(THIS_MODULE);
1098 	return 0;
1099 }
1100 
1101 /**
1102  * mei_cl_device_remove - remove device from the bus
1103  *
1104  * @dev: device
1105  *
1106  * Return:  0 on success; < 0 otherwise
1107  */
mei_cl_device_remove(struct device * dev)1108 static void mei_cl_device_remove(struct device *dev)
1109 {
1110 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1111 	struct mei_cl_driver *cldrv = to_mei_cl_driver(dev->driver);
1112 
1113 	if (cldrv->remove)
1114 		cldrv->remove(cldev);
1115 
1116 	mei_cldev_unregister_callbacks(cldev);
1117 
1118 	mei_cl_bus_module_put(cldev);
1119 	module_put(THIS_MODULE);
1120 }
1121 
name_show(struct device * dev,struct device_attribute * a,char * buf)1122 static ssize_t name_show(struct device *dev, struct device_attribute *a,
1123 			     char *buf)
1124 {
1125 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1126 
1127 	return scnprintf(buf, PAGE_SIZE, "%s", cldev->name);
1128 }
1129 static DEVICE_ATTR_RO(name);
1130 
uuid_show(struct device * dev,struct device_attribute * a,char * buf)1131 static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
1132 			     char *buf)
1133 {
1134 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1135 	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1136 
1137 	return sprintf(buf, "%pUl", uuid);
1138 }
1139 static DEVICE_ATTR_RO(uuid);
1140 
version_show(struct device * dev,struct device_attribute * a,char * buf)1141 static ssize_t version_show(struct device *dev, struct device_attribute *a,
1142 			     char *buf)
1143 {
1144 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1145 	u8 version = mei_me_cl_ver(cldev->me_cl);
1146 
1147 	return sprintf(buf, "%02X", version);
1148 }
1149 static DEVICE_ATTR_RO(version);
1150 
modalias_show(struct device * dev,struct device_attribute * a,char * buf)1151 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
1152 			     char *buf)
1153 {
1154 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1155 	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1156 	u8 version = mei_me_cl_ver(cldev->me_cl);
1157 
1158 	return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:",
1159 			 cldev->name, uuid, version);
1160 }
1161 static DEVICE_ATTR_RO(modalias);
1162 
max_conn_show(struct device * dev,struct device_attribute * a,char * buf)1163 static ssize_t max_conn_show(struct device *dev, struct device_attribute *a,
1164 			     char *buf)
1165 {
1166 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1167 	u8 maxconn = mei_me_cl_max_conn(cldev->me_cl);
1168 
1169 	return sprintf(buf, "%d", maxconn);
1170 }
1171 static DEVICE_ATTR_RO(max_conn);
1172 
fixed_show(struct device * dev,struct device_attribute * a,char * buf)1173 static ssize_t fixed_show(struct device *dev, struct device_attribute *a,
1174 			  char *buf)
1175 {
1176 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1177 	u8 fixed = mei_me_cl_fixed(cldev->me_cl);
1178 
1179 	return sprintf(buf, "%d", fixed);
1180 }
1181 static DEVICE_ATTR_RO(fixed);
1182 
vtag_show(struct device * dev,struct device_attribute * a,char * buf)1183 static ssize_t vtag_show(struct device *dev, struct device_attribute *a,
1184 			 char *buf)
1185 {
1186 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1187 	bool vt = mei_me_cl_vt(cldev->me_cl);
1188 
1189 	return sprintf(buf, "%d", vt);
1190 }
1191 static DEVICE_ATTR_RO(vtag);
1192 
max_len_show(struct device * dev,struct device_attribute * a,char * buf)1193 static ssize_t max_len_show(struct device *dev, struct device_attribute *a,
1194 			    char *buf)
1195 {
1196 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1197 	u32 maxlen = mei_me_cl_max_len(cldev->me_cl);
1198 
1199 	return sprintf(buf, "%u", maxlen);
1200 }
1201 static DEVICE_ATTR_RO(max_len);
1202 
1203 static struct attribute *mei_cldev_attrs[] = {
1204 	&dev_attr_name.attr,
1205 	&dev_attr_uuid.attr,
1206 	&dev_attr_version.attr,
1207 	&dev_attr_modalias.attr,
1208 	&dev_attr_max_conn.attr,
1209 	&dev_attr_fixed.attr,
1210 	&dev_attr_vtag.attr,
1211 	&dev_attr_max_len.attr,
1212 	NULL,
1213 };
1214 ATTRIBUTE_GROUPS(mei_cldev);
1215 
1216 /**
1217  * mei_cl_device_uevent - me client bus uevent handler
1218  *
1219  * @dev: device
1220  * @env: uevent kobject
1221  *
1222  * Return: 0 on success -ENOMEM on when add_uevent_var fails
1223  */
mei_cl_device_uevent(const struct device * dev,struct kobj_uevent_env * env)1224 static int mei_cl_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
1225 {
1226 	const struct mei_cl_device *cldev = to_mei_cl_device(dev);
1227 	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1228 	u8 version = mei_me_cl_ver(cldev->me_cl);
1229 
1230 	if (add_uevent_var(env, "MEI_CL_VERSION=%d", version))
1231 		return -ENOMEM;
1232 
1233 	if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
1234 		return -ENOMEM;
1235 
1236 	if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name))
1237 		return -ENOMEM;
1238 
1239 	if (add_uevent_var(env, "MODALIAS=mei:%s:%pUl:%02X:",
1240 			   cldev->name, uuid, version))
1241 		return -ENOMEM;
1242 
1243 	return 0;
1244 }
1245 
1246 static struct bus_type mei_cl_bus_type = {
1247 	.name		= "mei",
1248 	.dev_groups	= mei_cldev_groups,
1249 	.match		= mei_cl_device_match,
1250 	.probe		= mei_cl_device_probe,
1251 	.remove		= mei_cl_device_remove,
1252 	.uevent		= mei_cl_device_uevent,
1253 };
1254 
mei_dev_bus_get(struct mei_device * bus)1255 static struct mei_device *mei_dev_bus_get(struct mei_device *bus)
1256 {
1257 	if (bus)
1258 		get_device(bus->dev);
1259 
1260 	return bus;
1261 }
1262 
mei_dev_bus_put(struct mei_device * bus)1263 static void mei_dev_bus_put(struct mei_device *bus)
1264 {
1265 	if (bus)
1266 		put_device(bus->dev);
1267 }
1268 
mei_cl_bus_dev_release(struct device * dev)1269 static void mei_cl_bus_dev_release(struct device *dev)
1270 {
1271 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1272 
1273 	mei_cl_flush_queues(cldev->cl, NULL);
1274 	mei_me_cl_put(cldev->me_cl);
1275 	mei_dev_bus_put(cldev->bus);
1276 	kfree(cldev->cl);
1277 	kfree(cldev);
1278 }
1279 
1280 static const struct device_type mei_cl_device_type = {
1281 	.release = mei_cl_bus_dev_release,
1282 };
1283 
1284 /**
1285  * mei_cl_bus_set_name - set device name for me client device
1286  *  <controller>-<client device>
1287  *  Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb
1288  *
1289  * @cldev: me client device
1290  */
mei_cl_bus_set_name(struct mei_cl_device * cldev)1291 static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
1292 {
1293 	dev_set_name(&cldev->dev, "%s-%pUl",
1294 		     dev_name(cldev->bus->dev),
1295 		     mei_me_cl_uuid(cldev->me_cl));
1296 }
1297 
1298 /**
1299  * mei_cl_bus_dev_alloc - initialize and allocate mei client device
1300  *
1301  * @bus: mei device
1302  * @me_cl: me client
1303  *
1304  * Return: allocated device structur or NULL on allocation failure
1305  */
mei_cl_bus_dev_alloc(struct mei_device * bus,struct mei_me_client * me_cl)1306 static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
1307 						  struct mei_me_client *me_cl)
1308 {
1309 	struct mei_cl_device *cldev;
1310 	struct mei_cl *cl;
1311 
1312 	cldev = kzalloc(sizeof(*cldev), GFP_KERNEL);
1313 	if (!cldev)
1314 		return NULL;
1315 
1316 	cl = mei_cl_allocate(bus);
1317 	if (!cl) {
1318 		kfree(cldev);
1319 		return NULL;
1320 	}
1321 
1322 	device_initialize(&cldev->dev);
1323 	cldev->dev.parent = bus->dev;
1324 	cldev->dev.bus    = &mei_cl_bus_type;
1325 	cldev->dev.type   = &mei_cl_device_type;
1326 	cldev->bus        = mei_dev_bus_get(bus);
1327 	cldev->me_cl      = mei_me_cl_get(me_cl);
1328 	cldev->cl         = cl;
1329 	mei_cl_bus_set_name(cldev);
1330 	cldev->is_added   = 0;
1331 	INIT_LIST_HEAD(&cldev->bus_list);
1332 	device_enable_async_suspend(&cldev->dev);
1333 
1334 	return cldev;
1335 }
1336 
1337 /**
1338  * mei_cl_bus_dev_setup - setup me client device
1339  *    run fix up routines and set the device name
1340  *
1341  * @bus: mei device
1342  * @cldev: me client device
1343  *
1344  * Return: true if the device is eligible for enumeration
1345  */
mei_cl_bus_dev_setup(struct mei_device * bus,struct mei_cl_device * cldev)1346 static bool mei_cl_bus_dev_setup(struct mei_device *bus,
1347 				 struct mei_cl_device *cldev)
1348 {
1349 	cldev->do_match = 1;
1350 	mei_cl_bus_dev_fixup(cldev);
1351 
1352 	/* the device name can change during fix up */
1353 	if (cldev->do_match)
1354 		mei_cl_bus_set_name(cldev);
1355 
1356 	return cldev->do_match == 1;
1357 }
1358 
1359 /**
1360  * mei_cl_bus_dev_add - add me client devices
1361  *
1362  * @cldev: me client device
1363  *
1364  * Return: 0 on success; < 0 on failre
1365  */
mei_cl_bus_dev_add(struct mei_cl_device * cldev)1366 static int mei_cl_bus_dev_add(struct mei_cl_device *cldev)
1367 {
1368 	int ret;
1369 
1370 	dev_dbg(cldev->bus->dev, "adding %pUL:%02X\n",
1371 		mei_me_cl_uuid(cldev->me_cl),
1372 		mei_me_cl_ver(cldev->me_cl));
1373 	ret = device_add(&cldev->dev);
1374 	if (!ret)
1375 		cldev->is_added = 1;
1376 
1377 	return ret;
1378 }
1379 
1380 /**
1381  * mei_cl_bus_dev_stop - stop the driver
1382  *
1383  * @cldev: me client device
1384  */
mei_cl_bus_dev_stop(struct mei_cl_device * cldev)1385 static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev)
1386 {
1387 	cldev->do_match = 0;
1388 	if (cldev->is_added)
1389 		device_release_driver(&cldev->dev);
1390 }
1391 
1392 /**
1393  * mei_cl_bus_dev_destroy - destroy me client devices object
1394  *
1395  * @cldev: me client device
1396  *
1397  * Locking: called under "dev->cl_bus_lock" lock
1398  */
mei_cl_bus_dev_destroy(struct mei_cl_device * cldev)1399 static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev)
1400 {
1401 
1402 	WARN_ON(!mutex_is_locked(&cldev->bus->cl_bus_lock));
1403 
1404 	if (!cldev->is_added)
1405 		return;
1406 
1407 	device_del(&cldev->dev);
1408 
1409 	list_del_init(&cldev->bus_list);
1410 
1411 	cldev->is_added = 0;
1412 	put_device(&cldev->dev);
1413 }
1414 
1415 /**
1416  * mei_cl_bus_remove_device - remove a devices form the bus
1417  *
1418  * @cldev: me client device
1419  */
mei_cl_bus_remove_device(struct mei_cl_device * cldev)1420 static void mei_cl_bus_remove_device(struct mei_cl_device *cldev)
1421 {
1422 	mei_cl_bus_dev_stop(cldev);
1423 	mei_cl_bus_dev_destroy(cldev);
1424 }
1425 
1426 /**
1427  * mei_cl_bus_remove_devices - remove all devices form the bus
1428  *
1429  * @bus: mei device
1430  */
mei_cl_bus_remove_devices(struct mei_device * bus)1431 void mei_cl_bus_remove_devices(struct mei_device *bus)
1432 {
1433 	struct mei_cl_device *cldev, *next;
1434 
1435 	mutex_lock(&bus->cl_bus_lock);
1436 	list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list)
1437 		mei_cl_bus_remove_device(cldev);
1438 	mutex_unlock(&bus->cl_bus_lock);
1439 }
1440 
1441 
1442 /**
1443  * mei_cl_bus_dev_init - allocate and initializes an mei client devices
1444  *     based on me client
1445  *
1446  * @bus: mei device
1447  * @me_cl: me client
1448  *
1449  * Locking: called under "dev->cl_bus_lock" lock
1450  */
mei_cl_bus_dev_init(struct mei_device * bus,struct mei_me_client * me_cl)1451 static void mei_cl_bus_dev_init(struct mei_device *bus,
1452 				struct mei_me_client *me_cl)
1453 {
1454 	struct mei_cl_device *cldev;
1455 
1456 	WARN_ON(!mutex_is_locked(&bus->cl_bus_lock));
1457 
1458 	dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
1459 
1460 	if (me_cl->bus_added)
1461 		return;
1462 
1463 	cldev = mei_cl_bus_dev_alloc(bus, me_cl);
1464 	if (!cldev)
1465 		return;
1466 
1467 	me_cl->bus_added = true;
1468 	list_add_tail(&cldev->bus_list, &bus->device_list);
1469 
1470 }
1471 
1472 /**
1473  * mei_cl_bus_rescan - scan me clients list and add create
1474  *    devices for eligible clients
1475  *
1476  * @bus: mei device
1477  */
mei_cl_bus_rescan(struct mei_device * bus)1478 static void mei_cl_bus_rescan(struct mei_device *bus)
1479 {
1480 	struct mei_cl_device *cldev, *n;
1481 	struct mei_me_client *me_cl;
1482 
1483 	mutex_lock(&bus->cl_bus_lock);
1484 
1485 	down_read(&bus->me_clients_rwsem);
1486 	list_for_each_entry(me_cl, &bus->me_clients, list)
1487 		mei_cl_bus_dev_init(bus, me_cl);
1488 	up_read(&bus->me_clients_rwsem);
1489 
1490 	list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) {
1491 
1492 		if (!mei_me_cl_is_active(cldev->me_cl)) {
1493 			mei_cl_bus_remove_device(cldev);
1494 			continue;
1495 		}
1496 
1497 		if (cldev->is_added)
1498 			continue;
1499 
1500 		if (mei_cl_bus_dev_setup(bus, cldev))
1501 			mei_cl_bus_dev_add(cldev);
1502 		else {
1503 			list_del_init(&cldev->bus_list);
1504 			put_device(&cldev->dev);
1505 		}
1506 	}
1507 	mutex_unlock(&bus->cl_bus_lock);
1508 
1509 	dev_dbg(bus->dev, "rescan end");
1510 }
1511 
mei_cl_bus_rescan_work(struct work_struct * work)1512 void mei_cl_bus_rescan_work(struct work_struct *work)
1513 {
1514 	struct mei_device *bus =
1515 		container_of(work, struct mei_device, bus_rescan_work);
1516 
1517 	mei_cl_bus_rescan(bus);
1518 }
1519 
__mei_cldev_driver_register(struct mei_cl_driver * cldrv,struct module * owner)1520 int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
1521 				struct module *owner)
1522 {
1523 	int err;
1524 
1525 	cldrv->driver.name = cldrv->name;
1526 	cldrv->driver.owner = owner;
1527 	cldrv->driver.bus = &mei_cl_bus_type;
1528 
1529 	err = driver_register(&cldrv->driver);
1530 	if (err)
1531 		return err;
1532 
1533 	pr_debug("mei: driver [%s] registered\n", cldrv->driver.name);
1534 
1535 	return 0;
1536 }
1537 EXPORT_SYMBOL_GPL(__mei_cldev_driver_register);
1538 
mei_cldev_driver_unregister(struct mei_cl_driver * cldrv)1539 void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv)
1540 {
1541 	driver_unregister(&cldrv->driver);
1542 
1543 	pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name);
1544 }
1545 EXPORT_SYMBOL_GPL(mei_cldev_driver_unregister);
1546 
1547 
mei_cl_bus_init(void)1548 int __init mei_cl_bus_init(void)
1549 {
1550 	return bus_register(&mei_cl_bus_type);
1551 }
1552 
mei_cl_bus_exit(void)1553 void __exit mei_cl_bus_exit(void)
1554 {
1555 	bus_unregister(&mei_cl_bus_type);
1556 }
1557