• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  * Intel Management Engine Interface (Intel MEI) Linux driver
4  * Copyright (c) 2003-2012, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16 
17 #include <linux/pci.h>
18 #include <linux/sched.h>
19 #include <linux/wait.h>
20 #include <linux/delay.h>
21 
22 #include <linux/mei.h>
23 
24 #include "mei_dev.h"
25 #include "hbm.h"
26 #include "client.h"
27 
28 /**
29  * mei_me_cl_by_uuid - locate index of me client
30  *
31  * @dev: mei device
32  * returns me client index or -ENOENT if not found
33  */
mei_me_cl_by_uuid(const struct mei_device * dev,const uuid_le * uuid)34 int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
35 {
36 	int i, res = -ENOENT;
37 
38 	for (i = 0; i < dev->me_clients_num; ++i)
39 		if (uuid_le_cmp(*uuid,
40 				dev->me_clients[i].props.protocol_name) == 0) {
41 			res = i;
42 			break;
43 		}
44 
45 	return res;
46 }
47 
48 
49 /**
50  * mei_me_cl_by_id return index to me_clients for client_id
51  *
52  * @dev: the device structure
53  * @client_id: me client id
54  *
55  * Locking: called under "dev->device_lock" lock
56  *
57  * returns index on success, -ENOENT on failure.
58  */
59 
mei_me_cl_by_id(struct mei_device * dev,u8 client_id)60 int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
61 {
62 	int i;
63 	for (i = 0; i < dev->me_clients_num; i++)
64 		if (dev->me_clients[i].client_id == client_id)
65 			break;
66 	if (WARN_ON(dev->me_clients[i].client_id != client_id))
67 		return -ENOENT;
68 
69 	if (i == dev->me_clients_num)
70 		return -ENOENT;
71 
72 	return i;
73 }
74 
75 
76 /**
77  * mei_io_list_flush - removes list entry belonging to cl.
78  *
79  * @list:  An instance of our list structure
80  * @cl: host client
81  */
mei_io_list_flush(struct mei_cl_cb * list,struct mei_cl * cl)82 void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
83 {
84 	struct mei_cl_cb *cb;
85 	struct mei_cl_cb *next;
86 
87 	list_for_each_entry_safe(cb, next, &list->list, list) {
88 		if (cb->cl && mei_cl_cmp_id(cl, cb->cl))
89 			list_del(&cb->list);
90 	}
91 }
92 
93 /**
94  * mei_io_cb_free - free mei_cb_private related memory
95  *
96  * @cb: mei callback struct
97  */
mei_io_cb_free(struct mei_cl_cb * cb)98 void mei_io_cb_free(struct mei_cl_cb *cb)
99 {
100 	if (cb == NULL)
101 		return;
102 
103 	kfree(cb->request_buffer.data);
104 	kfree(cb->response_buffer.data);
105 	kfree(cb);
106 }
107 
108 /**
109  * mei_io_cb_init - allocate and initialize io callback
110  *
111  * @cl - mei client
112  * @fp: pointer to file structure
113  *
114  * returns mei_cl_cb pointer or NULL;
115  */
mei_io_cb_init(struct mei_cl * cl,struct file * fp)116 struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
117 {
118 	struct mei_cl_cb *cb;
119 
120 	cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
121 	if (!cb)
122 		return NULL;
123 
124 	mei_io_list_init(cb);
125 
126 	cb->file_object = fp;
127 	cb->cl = cl;
128 	cb->buf_idx = 0;
129 	return cb;
130 }
131 
132 /**
133  * mei_io_cb_alloc_req_buf - allocate request buffer
134  *
135  * @cb: io callback structure
136  * @length: size of the buffer
137  *
138  * returns 0 on success
139  *         -EINVAL if cb is NULL
140  *         -ENOMEM if allocation failed
141  */
mei_io_cb_alloc_req_buf(struct mei_cl_cb * cb,size_t length)142 int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
143 {
144 	if (!cb)
145 		return -EINVAL;
146 
147 	if (length == 0)
148 		return 0;
149 
150 	cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
151 	if (!cb->request_buffer.data)
152 		return -ENOMEM;
153 	cb->request_buffer.size = length;
154 	return 0;
155 }
156 /**
157  * mei_io_cb_alloc_resp_buf - allocate respose buffer
158  *
159  * @cb: io callback structure
160  * @length: size of the buffer
161  *
162  * returns 0 on success
163  *         -EINVAL if cb is NULL
164  *         -ENOMEM if allocation failed
165  */
mei_io_cb_alloc_resp_buf(struct mei_cl_cb * cb,size_t length)166 int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
167 {
168 	if (!cb)
169 		return -EINVAL;
170 
171 	if (length == 0)
172 		return 0;
173 
174 	cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
175 	if (!cb->response_buffer.data)
176 		return -ENOMEM;
177 	cb->response_buffer.size = length;
178 	return 0;
179 }
180 
181 
182 
183 /**
184  * mei_cl_flush_queues - flushes queue lists belonging to cl.
185  *
186  * @cl: host client
187  */
mei_cl_flush_queues(struct mei_cl * cl)188 int mei_cl_flush_queues(struct mei_cl *cl)
189 {
190 	if (WARN_ON(!cl || !cl->dev))
191 		return -EINVAL;
192 
193 	dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n");
194 	mei_io_list_flush(&cl->dev->read_list, cl);
195 	mei_io_list_flush(&cl->dev->write_list, cl);
196 	mei_io_list_flush(&cl->dev->write_waiting_list, cl);
197 	mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
198 	mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
199 	mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
200 	mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
201 	return 0;
202 }
203 
204 
205 /**
206  * mei_cl_init - initializes intialize cl.
207  *
208  * @cl: host client to be initialized
209  * @dev: mei device
210  */
mei_cl_init(struct mei_cl * cl,struct mei_device * dev)211 void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
212 {
213 	memset(cl, 0, sizeof(struct mei_cl));
214 	init_waitqueue_head(&cl->wait);
215 	init_waitqueue_head(&cl->rx_wait);
216 	init_waitqueue_head(&cl->tx_wait);
217 	INIT_LIST_HEAD(&cl->link);
218 	INIT_LIST_HEAD(&cl->device_link);
219 	cl->reading_state = MEI_IDLE;
220 	cl->writing_state = MEI_IDLE;
221 	cl->dev = dev;
222 }
223 
224 /**
225  * mei_cl_allocate - allocates cl  structure and sets it up.
226  *
227  * @dev: mei device
228  * returns  The allocated file or NULL on failure
229  */
mei_cl_allocate(struct mei_device * dev)230 struct mei_cl *mei_cl_allocate(struct mei_device *dev)
231 {
232 	struct mei_cl *cl;
233 
234 	cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
235 	if (!cl)
236 		return NULL;
237 
238 	mei_cl_init(cl, dev);
239 
240 	return cl;
241 }
242 
243 /**
244  * mei_cl_find_read_cb - find this cl's callback in the read list
245  *
246  * @cl: host client
247  *
248  * returns cb on success, NULL on error
249  */
mei_cl_find_read_cb(struct mei_cl * cl)250 struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
251 {
252 	struct mei_device *dev = cl->dev;
253 	struct mei_cl_cb *cb = NULL;
254 	struct mei_cl_cb *next = NULL;
255 
256 	list_for_each_entry_safe(cb, next, &dev->read_list.list, list)
257 		if (mei_cl_cmp_id(cl, cb->cl))
258 			return cb;
259 	return NULL;
260 }
261 
262 /** mei_cl_link: allocte host id in the host map
263  *
264  * @cl - host client
265  * @id - fixed host id or -1 for genereting one
266  *
267  * returns 0 on success
268  *	-EINVAL on incorrect values
269  *	-ENONET if client not found
270  */
mei_cl_link(struct mei_cl * cl,int id)271 int mei_cl_link(struct mei_cl *cl, int id)
272 {
273 	struct mei_device *dev;
274 
275 	if (WARN_ON(!cl || !cl->dev))
276 		return -EINVAL;
277 
278 	dev = cl->dev;
279 
280 	/* If Id is not asigned get one*/
281 	if (id == MEI_HOST_CLIENT_ID_ANY)
282 		id = find_first_zero_bit(dev->host_clients_map,
283 					MEI_CLIENTS_MAX);
284 
285 	if (id >= MEI_CLIENTS_MAX) {
286 		dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ;
287 		return -ENOENT;
288 	}
289 
290 	dev->open_handle_count++;
291 
292 	cl->host_client_id = id;
293 	list_add_tail(&cl->link, &dev->file_list);
294 
295 	set_bit(id, dev->host_clients_map);
296 
297 	cl->state = MEI_FILE_INITIALIZING;
298 
299 	dev_dbg(&dev->pdev->dev, "link cl host id = %d\n", cl->host_client_id);
300 	return 0;
301 }
302 
303 /**
304  * mei_cl_unlink - remove me_cl from the list
305  *
306  * @cl: host client
307  */
mei_cl_unlink(struct mei_cl * cl)308 int mei_cl_unlink(struct mei_cl *cl)
309 {
310 	struct mei_device *dev;
311 	struct mei_cl *pos, *next;
312 
313 	/* don't shout on error exit path */
314 	if (!cl)
315 		return 0;
316 
317 	/* wd and amthif might not be initialized */
318 	if (!cl->dev)
319 		return 0;
320 
321 	dev = cl->dev;
322 
323 	list_for_each_entry_safe(pos, next, &dev->file_list, link) {
324 		if (cl->host_client_id == pos->host_client_id) {
325 			dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n",
326 				pos->host_client_id, pos->me_client_id);
327 			list_del_init(&pos->link);
328 			break;
329 		}
330 	}
331 	return 0;
332 }
333 
334 
mei_host_client_init(struct work_struct * work)335 void mei_host_client_init(struct work_struct *work)
336 {
337 	struct mei_device *dev = container_of(work,
338 					      struct mei_device, init_work);
339 	struct mei_client_properties *client_props;
340 	int i;
341 
342 	mutex_lock(&dev->device_lock);
343 
344 	bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
345 	dev->open_handle_count = 0;
346 
347 	/*
348 	 * Reserving the first three client IDs
349 	 * 0: Reserved for MEI Bus Message communications
350 	 * 1: Reserved for Watchdog
351 	 * 2: Reserved for AMTHI
352 	 */
353 	bitmap_set(dev->host_clients_map, 0, 3);
354 
355 	for (i = 0; i < dev->me_clients_num; i++) {
356 		client_props = &dev->me_clients[i].props;
357 
358 		if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid))
359 			mei_amthif_host_init(dev);
360 		else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
361 			mei_wd_host_init(dev);
362 		else if (!uuid_le_cmp(client_props->protocol_name, mei_nfc_guid))
363 			mei_nfc_host_init(dev);
364 
365 	}
366 
367 	dev->dev_state = MEI_DEV_ENABLED;
368 
369 	mutex_unlock(&dev->device_lock);
370 }
371 
372 
373 /**
374  * mei_cl_disconnect - disconnect host clinet form the me one
375  *
376  * @cl: host client
377  *
378  * Locking: called under "dev->device_lock" lock
379  *
380  * returns 0 on success, <0 on failure.
381  */
mei_cl_disconnect(struct mei_cl * cl)382 int mei_cl_disconnect(struct mei_cl *cl)
383 {
384 	struct mei_device *dev;
385 	struct mei_cl_cb *cb;
386 	int rets, err;
387 
388 	if (WARN_ON(!cl || !cl->dev))
389 		return -ENODEV;
390 
391 	dev = cl->dev;
392 
393 	if (cl->state != MEI_FILE_DISCONNECTING)
394 		return 0;
395 
396 	cb = mei_io_cb_init(cl, NULL);
397 	if (!cb)
398 		return -ENOMEM;
399 
400 	cb->fop_type = MEI_FOP_CLOSE;
401 	if (dev->hbuf_is_ready) {
402 		dev->hbuf_is_ready = false;
403 		if (mei_hbm_cl_disconnect_req(dev, cl)) {
404 			rets = -ENODEV;
405 			dev_err(&dev->pdev->dev, "failed to disconnect.\n");
406 			goto free;
407 		}
408 		mdelay(10); /* Wait for hardware disconnection ready */
409 		list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
410 	} else {
411 		dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
412 		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
413 
414 	}
415 	mutex_unlock(&dev->device_lock);
416 
417 	err = wait_event_timeout(dev->wait_recvd_msg,
418 			MEI_FILE_DISCONNECTED == cl->state,
419 			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
420 
421 	mutex_lock(&dev->device_lock);
422 	if (MEI_FILE_DISCONNECTED == cl->state) {
423 		rets = 0;
424 		dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n");
425 	} else {
426 		rets = -ENODEV;
427 		if (MEI_FILE_DISCONNECTED != cl->state)
428 			dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n");
429 
430 		if (err)
431 			dev_dbg(&dev->pdev->dev,
432 					"wait failed disconnect err=%08x\n",
433 					err);
434 
435 		dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n");
436 	}
437 
438 	mei_io_list_flush(&dev->ctrl_rd_list, cl);
439 	mei_io_list_flush(&dev->ctrl_wr_list, cl);
440 free:
441 	mei_io_cb_free(cb);
442 	return rets;
443 }
444 
445 
446 /**
447  * mei_cl_is_other_connecting - checks if other
448  *    client with the same me client id is connecting
449  *
450  * @cl: private data of the file object
451  *
452  * returns ture if other client is connected, 0 - otherwise.
453  */
mei_cl_is_other_connecting(struct mei_cl * cl)454 bool mei_cl_is_other_connecting(struct mei_cl *cl)
455 {
456 	struct mei_device *dev;
457 	struct mei_cl *pos;
458 	struct mei_cl *next;
459 
460 	if (WARN_ON(!cl || !cl->dev))
461 		return false;
462 
463 	dev = cl->dev;
464 
465 	list_for_each_entry_safe(pos, next, &dev->file_list, link) {
466 		if ((pos->state == MEI_FILE_CONNECTING) &&
467 		    (pos != cl) && cl->me_client_id == pos->me_client_id)
468 			return true;
469 
470 	}
471 
472 	return false;
473 }
474 
475 /**
476  * mei_cl_connect - connect host clinet to the me one
477  *
478  * @cl: host client
479  *
480  * Locking: called under "dev->device_lock" lock
481  *
482  * returns 0 on success, <0 on failure.
483  */
mei_cl_connect(struct mei_cl * cl,struct file * file)484 int mei_cl_connect(struct mei_cl *cl, struct file *file)
485 {
486 	struct mei_device *dev;
487 	struct mei_cl_cb *cb;
488 	long timeout = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
489 	int rets;
490 
491 	if (WARN_ON(!cl || !cl->dev))
492 		return -ENODEV;
493 
494 	dev = cl->dev;
495 
496 	cb = mei_io_cb_init(cl, file);
497 	if (!cb) {
498 		rets = -ENOMEM;
499 		goto out;
500 	}
501 
502 	cb->fop_type = MEI_FOP_IOCTL;
503 
504 	if (dev->hbuf_is_ready && !mei_cl_is_other_connecting(cl)) {
505 		dev->hbuf_is_ready = false;
506 
507 		if (mei_hbm_cl_connect_req(dev, cl)) {
508 			rets = -ENODEV;
509 			goto out;
510 		}
511 		cl->timer_count = MEI_CONNECT_TIMEOUT;
512 		list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
513 	} else {
514 		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
515 	}
516 
517 	mutex_unlock(&dev->device_lock);
518 	rets = wait_event_timeout(dev->wait_recvd_msg,
519 				 (cl->state == MEI_FILE_CONNECTED ||
520 				  cl->state == MEI_FILE_DISCONNECTED),
521 				 timeout * HZ);
522 	mutex_lock(&dev->device_lock);
523 
524 	if (cl->state != MEI_FILE_CONNECTED) {
525 		rets = -EFAULT;
526 
527 		mei_io_list_flush(&dev->ctrl_rd_list, cl);
528 		mei_io_list_flush(&dev->ctrl_wr_list, cl);
529 		goto out;
530 	}
531 
532 	rets = cl->status;
533 
534 out:
535 	mei_io_cb_free(cb);
536 	return rets;
537 }
538 
539 /**
540  * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
541  *
542  * @cl: private data of the file object
543  *
544  * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
545  *	-ENOENT if mei_cl is not present
546  *	-EINVAL if single_recv_buf == 0
547  */
mei_cl_flow_ctrl_creds(struct mei_cl * cl)548 int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
549 {
550 	struct mei_device *dev;
551 	int i;
552 
553 	if (WARN_ON(!cl || !cl->dev))
554 		return -EINVAL;
555 
556 	dev = cl->dev;
557 
558 	if (!dev->me_clients_num)
559 		return 0;
560 
561 	if (cl->mei_flow_ctrl_creds > 0)
562 		return 1;
563 
564 	for (i = 0; i < dev->me_clients_num; i++) {
565 		struct mei_me_client  *me_cl = &dev->me_clients[i];
566 		if (me_cl->client_id == cl->me_client_id) {
567 			if (me_cl->mei_flow_ctrl_creds) {
568 				if (WARN_ON(me_cl->props.single_recv_buf == 0))
569 					return -EINVAL;
570 				return 1;
571 			} else {
572 				return 0;
573 			}
574 		}
575 	}
576 	return -ENOENT;
577 }
578 
579 /**
580  * mei_cl_flow_ctrl_reduce - reduces flow_control.
581  *
582  * @cl: private data of the file object
583  *
584  * @returns
585  *	0 on success
586  *	-ENOENT when me client is not found
587  *	-EINVAL when ctrl credits are <= 0
588  */
mei_cl_flow_ctrl_reduce(struct mei_cl * cl)589 int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
590 {
591 	struct mei_device *dev;
592 	int i;
593 
594 	if (WARN_ON(!cl || !cl->dev))
595 		return -EINVAL;
596 
597 	dev = cl->dev;
598 
599 	if (!dev->me_clients_num)
600 		return -ENOENT;
601 
602 	for (i = 0; i < dev->me_clients_num; i++) {
603 		struct mei_me_client  *me_cl = &dev->me_clients[i];
604 		if (me_cl->client_id == cl->me_client_id) {
605 			if (me_cl->props.single_recv_buf != 0) {
606 				if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
607 					return -EINVAL;
608 				dev->me_clients[i].mei_flow_ctrl_creds--;
609 			} else {
610 				if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
611 					return -EINVAL;
612 				cl->mei_flow_ctrl_creds--;
613 			}
614 			return 0;
615 		}
616 	}
617 	return -ENOENT;
618 }
619 
620 /**
621  * mei_cl_read_start - the start read client message function.
622  *
623  * @cl: host client
624  *
625  * returns 0 on success, <0 on failure.
626  */
mei_cl_read_start(struct mei_cl * cl,size_t length)627 int mei_cl_read_start(struct mei_cl *cl, size_t length)
628 {
629 	struct mei_device *dev;
630 	struct mei_cl_cb *cb;
631 	int rets;
632 	int i;
633 
634 	if (WARN_ON(!cl || !cl->dev))
635 		return -ENODEV;
636 
637 	dev = cl->dev;
638 
639 	if (cl->state != MEI_FILE_CONNECTED)
640 		return -ENODEV;
641 
642 	if (dev->dev_state != MEI_DEV_ENABLED)
643 		return -ENODEV;
644 
645 	if (cl->read_cb) {
646 		dev_dbg(&dev->pdev->dev, "read is pending.\n");
647 		return -EBUSY;
648 	}
649 	i = mei_me_cl_by_id(dev, cl->me_client_id);
650 	if (i < 0) {
651 		dev_err(&dev->pdev->dev, "no such me client %d\n",
652 			cl->me_client_id);
653 		return  -ENODEV;
654 	}
655 
656 	cb = mei_io_cb_init(cl, NULL);
657 	if (!cb)
658 		return -ENOMEM;
659 
660 	/* always allocate at least client max message */
661 	length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length);
662 	rets = mei_io_cb_alloc_resp_buf(cb, length);
663 	if (rets)
664 		goto err;
665 
666 	cb->fop_type = MEI_FOP_READ;
667 	cl->read_cb = cb;
668 	if (dev->hbuf_is_ready) {
669 		dev->hbuf_is_ready = false;
670 		if (mei_hbm_cl_flow_control_req(dev, cl)) {
671 			rets = -ENODEV;
672 			goto err;
673 		}
674 		list_add_tail(&cb->list, &dev->read_list.list);
675 	} else {
676 		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
677 	}
678 	return rets;
679 err:
680 	mei_io_cb_free(cb);
681 	return rets;
682 }
683 
684 /**
685  * mei_cl_write - submit a write cb to mei device
686 	assumes device_lock is locked
687  *
688  * @cl: host client
689  * @cl: write callback with filled data
690  *
691  * returns numbe of bytes sent on success, <0 on failure.
692  */
mei_cl_write(struct mei_cl * cl,struct mei_cl_cb * cb,bool blocking)693 int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
694 {
695 	struct mei_device *dev;
696 	struct mei_msg_data *buf;
697 	struct mei_msg_hdr mei_hdr;
698 	int rets;
699 
700 
701 	if (WARN_ON(!cl || !cl->dev))
702 		return -ENODEV;
703 
704 	if (WARN_ON(!cb))
705 		return -EINVAL;
706 
707 	dev = cl->dev;
708 
709 
710 	buf = &cb->request_buffer;
711 
712 	dev_dbg(&dev->pdev->dev, "mei_cl_write %d\n", buf->size);
713 
714 
715 	cb->fop_type = MEI_FOP_WRITE;
716 
717 	rets = mei_cl_flow_ctrl_creds(cl);
718 	if (rets < 0)
719 		goto err;
720 
721 	/* Host buffer is not ready, we queue the request */
722 	if (rets == 0 || !dev->hbuf_is_ready) {
723 		cb->buf_idx = 0;
724 		/* unseting complete will enqueue the cb for write */
725 		mei_hdr.msg_complete = 0;
726 		cl->writing_state = MEI_WRITING;
727 		rets = buf->size;
728 		goto out;
729 	}
730 
731 	dev->hbuf_is_ready = false;
732 
733 	/* Check for a maximum length */
734 	if (buf->size > mei_hbuf_max_len(dev)) {
735 		mei_hdr.length = mei_hbuf_max_len(dev);
736 		mei_hdr.msg_complete = 0;
737 	} else {
738 		mei_hdr.length = buf->size;
739 		mei_hdr.msg_complete = 1;
740 	}
741 
742 	mei_hdr.host_addr = cl->host_client_id;
743 	mei_hdr.me_addr = cl->me_client_id;
744 	mei_hdr.reserved = 0;
745 
746 	dev_dbg(&dev->pdev->dev, "write " MEI_HDR_FMT "\n",
747 		MEI_HDR_PRM(&mei_hdr));
748 
749 
750 	if (mei_write_message(dev, &mei_hdr, buf->data)) {
751 		rets = -EIO;
752 		goto err;
753 	}
754 
755 	cl->writing_state = MEI_WRITING;
756 	cb->buf_idx = mei_hdr.length;
757 
758 	rets = buf->size;
759 out:
760 	if (mei_hdr.msg_complete) {
761 		if (mei_cl_flow_ctrl_reduce(cl)) {
762 			rets = -ENODEV;
763 			goto err;
764 		}
765 		list_add_tail(&cb->list, &dev->write_waiting_list.list);
766 	} else {
767 		list_add_tail(&cb->list, &dev->write_list.list);
768 	}
769 
770 
771 	if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
772 
773 		mutex_unlock(&dev->device_lock);
774 		if (wait_event_interruptible(cl->tx_wait,
775 			cl->writing_state == MEI_WRITE_COMPLETE)) {
776 				if (signal_pending(current))
777 					rets = -EINTR;
778 				else
779 					rets = -ERESTARTSYS;
780 		}
781 		mutex_lock(&dev->device_lock);
782 	}
783 err:
784 	return rets;
785 }
786 
787 
788 
789 /**
790  * mei_cl_all_disconnect - disconnect forcefully all connected clients
791  *
792  * @dev - mei device
793  */
794 
mei_cl_all_disconnect(struct mei_device * dev)795 void mei_cl_all_disconnect(struct mei_device *dev)
796 {
797 	struct mei_cl *cl, *next;
798 
799 	list_for_each_entry_safe(cl, next, &dev->file_list, link) {
800 		cl->state = MEI_FILE_DISCONNECTED;
801 		cl->mei_flow_ctrl_creds = 0;
802 		cl->read_cb = NULL;
803 		cl->timer_count = 0;
804 	}
805 }
806 
807 
808 /**
809  * mei_cl_all_read_wakeup  - wake up all readings so they can be interrupted
810  *
811  * @dev  - mei device
812  */
mei_cl_all_read_wakeup(struct mei_device * dev)813 void mei_cl_all_read_wakeup(struct mei_device *dev)
814 {
815 	struct mei_cl *cl, *next;
816 	list_for_each_entry_safe(cl, next, &dev->file_list, link) {
817 		if (waitqueue_active(&cl->rx_wait)) {
818 			dev_dbg(&dev->pdev->dev, "Waking up client!\n");
819 			wake_up_interruptible(&cl->rx_wait);
820 		}
821 	}
822 }
823 
824 /**
825  * mei_cl_all_write_clear - clear all pending writes
826 
827  * @dev - mei device
828  */
mei_cl_all_write_clear(struct mei_device * dev)829 void mei_cl_all_write_clear(struct mei_device *dev)
830 {
831 	struct mei_cl_cb *cb, *next;
832 
833 	list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
834 		list_del(&cb->list);
835 		mei_io_cb_free(cb);
836 	}
837 }
838 
839 
840