• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * cec-api.c - HDMI Consumer Electronics Control framework - API
3  *
4  * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
5  *
6  * This program is free software; you may redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; version 2 of the License.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
11  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
12  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
13  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
14  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
15  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
17  * SOFTWARE.
18  */
19 
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/kmod.h>
25 #include <linux/ktime.h>
26 #include <linux/slab.h>
27 #include <linux/mm.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/uaccess.h>
31 #include <linux/version.h>
32 
33 #include "cec-priv.h"
34 
cec_devnode_data(struct file * filp)35 static inline struct cec_devnode *cec_devnode_data(struct file *filp)
36 {
37 	struct cec_fh *fh = filp->private_data;
38 
39 	return &fh->adap->devnode;
40 }
41 
42 /* CEC file operations */
43 
cec_poll(struct file * filp,struct poll_table_struct * poll)44 static unsigned int cec_poll(struct file *filp,
45 			     struct poll_table_struct *poll)
46 {
47 	struct cec_devnode *devnode = cec_devnode_data(filp);
48 	struct cec_fh *fh = filp->private_data;
49 	struct cec_adapter *adap = fh->adap;
50 	unsigned int res = 0;
51 
52 	if (!devnode->registered)
53 		return POLLERR | POLLHUP;
54 	mutex_lock(&adap->lock);
55 	if (adap->is_configured &&
56 	    adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
57 		res |= POLLOUT | POLLWRNORM;
58 	if (fh->queued_msgs)
59 		res |= POLLIN | POLLRDNORM;
60 	if (fh->pending_events)
61 		res |= POLLPRI;
62 	poll_wait(filp, &fh->wait, poll);
63 	mutex_unlock(&adap->lock);
64 	return res;
65 }
66 
cec_is_busy(const struct cec_adapter * adap,const struct cec_fh * fh)67 static bool cec_is_busy(const struct cec_adapter *adap,
68 			const struct cec_fh *fh)
69 {
70 	bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh;
71 	bool valid_follower = adap->cec_follower && adap->cec_follower == fh;
72 
73 	/*
74 	 * Exclusive initiators and followers can always access the CEC adapter
75 	 */
76 	if (valid_initiator || valid_follower)
77 		return false;
78 	/*
79 	 * All others can only access the CEC adapter if there is no
80 	 * exclusive initiator and they are in INITIATOR mode.
81 	 */
82 	return adap->cec_initiator ||
83 	       fh->mode_initiator == CEC_MODE_NO_INITIATOR;
84 }
85 
cec_adap_g_caps(struct cec_adapter * adap,struct cec_caps __user * parg)86 static long cec_adap_g_caps(struct cec_adapter *adap,
87 			    struct cec_caps __user *parg)
88 {
89 	struct cec_caps caps = {};
90 
91 	strlcpy(caps.driver, adap->devnode.parent->driver->name,
92 		sizeof(caps.driver));
93 	strlcpy(caps.name, adap->name, sizeof(caps.name));
94 	caps.available_log_addrs = adap->available_log_addrs;
95 	caps.capabilities = adap->capabilities;
96 	caps.version = LINUX_VERSION_CODE;
97 	if (copy_to_user(parg, &caps, sizeof(caps)))
98 		return -EFAULT;
99 	return 0;
100 }
101 
cec_adap_g_phys_addr(struct cec_adapter * adap,__u16 __user * parg)102 static long cec_adap_g_phys_addr(struct cec_adapter *adap,
103 				 __u16 __user *parg)
104 {
105 	u16 phys_addr;
106 
107 	mutex_lock(&adap->lock);
108 	phys_addr = adap->phys_addr;
109 	mutex_unlock(&adap->lock);
110 	if (copy_to_user(parg, &phys_addr, sizeof(phys_addr)))
111 		return -EFAULT;
112 	return 0;
113 }
114 
cec_adap_s_phys_addr(struct cec_adapter * adap,struct cec_fh * fh,bool block,__u16 __user * parg)115 static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
116 				 bool block, __u16 __user *parg)
117 {
118 	u16 phys_addr;
119 	long err;
120 
121 	if (!(adap->capabilities & CEC_CAP_PHYS_ADDR))
122 		return -ENOTTY;
123 	if (copy_from_user(&phys_addr, parg, sizeof(phys_addr)))
124 		return -EFAULT;
125 
126 	err = cec_phys_addr_validate(phys_addr, NULL, NULL);
127 	if (err)
128 		return err;
129 	mutex_lock(&adap->lock);
130 	if (cec_is_busy(adap, fh))
131 		err = -EBUSY;
132 	else
133 		__cec_s_phys_addr(adap, phys_addr, block);
134 	mutex_unlock(&adap->lock);
135 	return err;
136 }
137 
cec_adap_g_log_addrs(struct cec_adapter * adap,struct cec_log_addrs __user * parg)138 static long cec_adap_g_log_addrs(struct cec_adapter *adap,
139 				 struct cec_log_addrs __user *parg)
140 {
141 	struct cec_log_addrs log_addrs;
142 
143 	mutex_lock(&adap->lock);
144 	log_addrs = adap->log_addrs;
145 	if (!adap->is_configured)
146 		memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
147 		       sizeof(log_addrs.log_addr));
148 	mutex_unlock(&adap->lock);
149 
150 	if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
151 		return -EFAULT;
152 	return 0;
153 }
154 
cec_adap_s_log_addrs(struct cec_adapter * adap,struct cec_fh * fh,bool block,struct cec_log_addrs __user * parg)155 static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
156 				 bool block, struct cec_log_addrs __user *parg)
157 {
158 	struct cec_log_addrs log_addrs;
159 	long err = -EBUSY;
160 
161 	if (!(adap->capabilities & CEC_CAP_LOG_ADDRS))
162 		return -ENOTTY;
163 	if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
164 		return -EFAULT;
165 	log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK;
166 	mutex_lock(&adap->lock);
167 	if (!adap->is_configuring &&
168 	    (!log_addrs.num_log_addrs || !adap->is_configured) &&
169 	    !cec_is_busy(adap, fh)) {
170 		err = __cec_s_log_addrs(adap, &log_addrs, block);
171 		if (!err)
172 			log_addrs = adap->log_addrs;
173 	}
174 	mutex_unlock(&adap->lock);
175 	if (err)
176 		return err;
177 	if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
178 		return -EFAULT;
179 	return 0;
180 }
181 
cec_transmit(struct cec_adapter * adap,struct cec_fh * fh,bool block,struct cec_msg __user * parg)182 static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
183 			 bool block, struct cec_msg __user *parg)
184 {
185 	struct cec_msg msg = {};
186 	long err = 0;
187 
188 	if (!(adap->capabilities & CEC_CAP_TRANSMIT))
189 		return -ENOTTY;
190 	if (copy_from_user(&msg, parg, sizeof(msg)))
191 		return -EFAULT;
192 	mutex_lock(&adap->lock);
193 	if (!adap->is_configured)
194 		err = -ENONET;
195 	else if (cec_is_busy(adap, fh))
196 		err = -EBUSY;
197 	else
198 		err = cec_transmit_msg_fh(adap, &msg, fh, block);
199 	mutex_unlock(&adap->lock);
200 	if (err)
201 		return err;
202 	if (copy_to_user(parg, &msg, sizeof(msg)))
203 		return -EFAULT;
204 	return 0;
205 }
206 
207 /* Called by CEC_RECEIVE: wait for a message to arrive */
cec_receive_msg(struct cec_fh * fh,struct cec_msg * msg,bool block)208 static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block)
209 {
210 	u32 timeout = msg->timeout;
211 	int res;
212 
213 	do {
214 		mutex_lock(&fh->lock);
215 		/* Are there received messages queued up? */
216 		if (fh->queued_msgs) {
217 			/* Yes, return the first one */
218 			struct cec_msg_entry *entry =
219 				list_first_entry(&fh->msgs,
220 						 struct cec_msg_entry, list);
221 
222 			list_del(&entry->list);
223 			*msg = entry->msg;
224 			kfree(entry);
225 			fh->queued_msgs--;
226 			mutex_unlock(&fh->lock);
227 			/* restore original timeout value */
228 			msg->timeout = timeout;
229 			return 0;
230 		}
231 
232 		/* No, return EAGAIN in non-blocking mode or wait */
233 		mutex_unlock(&fh->lock);
234 
235 		/* Return when in non-blocking mode */
236 		if (!block)
237 			return -EAGAIN;
238 
239 		if (msg->timeout) {
240 			/* The user specified a timeout */
241 			res = wait_event_interruptible_timeout(fh->wait,
242 							       fh->queued_msgs,
243 				msecs_to_jiffies(msg->timeout));
244 			if (res == 0)
245 				res = -ETIMEDOUT;
246 			else if (res > 0)
247 				res = 0;
248 		} else {
249 			/* Wait indefinitely */
250 			res = wait_event_interruptible(fh->wait,
251 						       fh->queued_msgs);
252 		}
253 		/* Exit on error, otherwise loop to get the new message */
254 	} while (!res);
255 	return res;
256 }
257 
cec_receive(struct cec_adapter * adap,struct cec_fh * fh,bool block,struct cec_msg __user * parg)258 static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
259 			bool block, struct cec_msg __user *parg)
260 {
261 	struct cec_msg msg = {};
262 	long err = 0;
263 
264 	if (copy_from_user(&msg, parg, sizeof(msg)))
265 		return -EFAULT;
266 	mutex_lock(&adap->lock);
267 	if (!adap->is_configured && fh->mode_follower < CEC_MODE_MONITOR)
268 		err = -ENONET;
269 	mutex_unlock(&adap->lock);
270 	if (err)
271 		return err;
272 
273 	err = cec_receive_msg(fh, &msg, block);
274 	if (err)
275 		return err;
276 	if (copy_to_user(parg, &msg, sizeof(msg)))
277 		return -EFAULT;
278 	return 0;
279 }
280 
cec_dqevent(struct cec_adapter * adap,struct cec_fh * fh,bool block,struct cec_event __user * parg)281 static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
282 			bool block, struct cec_event __user *parg)
283 {
284 	struct cec_event *ev = NULL;
285 	u64 ts = ~0ULL;
286 	unsigned int i;
287 	long err = 0;
288 
289 	mutex_lock(&fh->lock);
290 	while (!fh->pending_events && block) {
291 		mutex_unlock(&fh->lock);
292 		err = wait_event_interruptible(fh->wait, fh->pending_events);
293 		if (err)
294 			return err;
295 		mutex_lock(&fh->lock);
296 	}
297 
298 	/* Find the oldest event */
299 	for (i = 0; i < CEC_NUM_EVENTS; i++) {
300 		if (fh->pending_events & (1 << (i + 1)) &&
301 		    fh->events[i].ts <= ts) {
302 			ev = &fh->events[i];
303 			ts = ev->ts;
304 		}
305 	}
306 	if (!ev) {
307 		err = -EAGAIN;
308 		goto unlock;
309 	}
310 
311 	if (copy_to_user(parg, ev, sizeof(*ev))) {
312 		err = -EFAULT;
313 		goto unlock;
314 	}
315 
316 	fh->pending_events &= ~(1 << ev->event);
317 
318 unlock:
319 	mutex_unlock(&fh->lock);
320 	return err;
321 }
322 
cec_g_mode(struct cec_adapter * adap,struct cec_fh * fh,u32 __user * parg)323 static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh,
324 		       u32 __user *parg)
325 {
326 	u32 mode = fh->mode_initiator | fh->mode_follower;
327 
328 	if (copy_to_user(parg, &mode, sizeof(mode)))
329 		return -EFAULT;
330 	return 0;
331 }
332 
cec_s_mode(struct cec_adapter * adap,struct cec_fh * fh,u32 __user * parg)333 static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
334 		       u32 __user *parg)
335 {
336 	u32 mode;
337 	u8 mode_initiator;
338 	u8 mode_follower;
339 	long err = 0;
340 
341 	if (copy_from_user(&mode, parg, sizeof(mode)))
342 		return -EFAULT;
343 	if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK))
344 		return -EINVAL;
345 
346 	mode_initiator = mode & CEC_MODE_INITIATOR_MSK;
347 	mode_follower = mode & CEC_MODE_FOLLOWER_MSK;
348 
349 	if (mode_initiator > CEC_MODE_EXCL_INITIATOR ||
350 	    mode_follower > CEC_MODE_MONITOR_ALL)
351 		return -EINVAL;
352 
353 	if (mode_follower == CEC_MODE_MONITOR_ALL &&
354 	    !(adap->capabilities & CEC_CAP_MONITOR_ALL))
355 		return -EINVAL;
356 
357 	/* Follower modes should always be able to send CEC messages */
358 	if ((mode_initiator == CEC_MODE_NO_INITIATOR ||
359 	     !(adap->capabilities & CEC_CAP_TRANSMIT)) &&
360 	    mode_follower >= CEC_MODE_FOLLOWER &&
361 	    mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU)
362 		return -EINVAL;
363 
364 	/* Monitor modes require CEC_MODE_NO_INITIATOR */
365 	if (mode_initiator && mode_follower >= CEC_MODE_MONITOR)
366 		return -EINVAL;
367 
368 	/* Monitor modes require CAP_NET_ADMIN */
369 	if (mode_follower >= CEC_MODE_MONITOR && !capable(CAP_NET_ADMIN))
370 		return -EPERM;
371 
372 	mutex_lock(&adap->lock);
373 	/*
374 	 * You can't become exclusive follower if someone else already
375 	 * has that job.
376 	 */
377 	if ((mode_follower == CEC_MODE_EXCL_FOLLOWER ||
378 	     mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) &&
379 	    adap->cec_follower && adap->cec_follower != fh)
380 		err = -EBUSY;
381 	/*
382 	 * You can't become exclusive initiator if someone else already
383 	 * has that job.
384 	 */
385 	if (mode_initiator == CEC_MODE_EXCL_INITIATOR &&
386 	    adap->cec_initiator && adap->cec_initiator != fh)
387 		err = -EBUSY;
388 
389 	if (!err) {
390 		bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL;
391 		bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL;
392 
393 		if (old_mon_all != new_mon_all) {
394 			if (new_mon_all)
395 				err = cec_monitor_all_cnt_inc(adap);
396 			else
397 				cec_monitor_all_cnt_dec(adap);
398 		}
399 	}
400 
401 	if (err) {
402 		mutex_unlock(&adap->lock);
403 		return err;
404 	}
405 
406 	if (fh->mode_follower == CEC_MODE_FOLLOWER)
407 		adap->follower_cnt--;
408 	if (mode_follower == CEC_MODE_FOLLOWER)
409 		adap->follower_cnt++;
410 	if (mode_follower == CEC_MODE_EXCL_FOLLOWER ||
411 	    mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
412 		adap->passthrough =
413 			mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
414 		adap->cec_follower = fh;
415 	} else if (adap->cec_follower == fh) {
416 		adap->passthrough = false;
417 		adap->cec_follower = NULL;
418 	}
419 	if (mode_initiator == CEC_MODE_EXCL_INITIATOR)
420 		adap->cec_initiator = fh;
421 	else if (adap->cec_initiator == fh)
422 		adap->cec_initiator = NULL;
423 	fh->mode_initiator = mode_initiator;
424 	fh->mode_follower = mode_follower;
425 	mutex_unlock(&adap->lock);
426 	return 0;
427 }
428 
cec_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)429 static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
430 {
431 	struct cec_devnode *devnode = cec_devnode_data(filp);
432 	struct cec_fh *fh = filp->private_data;
433 	struct cec_adapter *adap = fh->adap;
434 	bool block = !(filp->f_flags & O_NONBLOCK);
435 	void __user *parg = (void __user *)arg;
436 
437 	if (!devnode->registered)
438 		return -ENODEV;
439 
440 	switch (cmd) {
441 	case CEC_ADAP_G_CAPS:
442 		return cec_adap_g_caps(adap, parg);
443 
444 	case CEC_ADAP_G_PHYS_ADDR:
445 		return cec_adap_g_phys_addr(adap, parg);
446 
447 	case CEC_ADAP_S_PHYS_ADDR:
448 		return cec_adap_s_phys_addr(adap, fh, block, parg);
449 
450 	case CEC_ADAP_G_LOG_ADDRS:
451 		return cec_adap_g_log_addrs(adap, parg);
452 
453 	case CEC_ADAP_S_LOG_ADDRS:
454 		return cec_adap_s_log_addrs(adap, fh, block, parg);
455 
456 	case CEC_TRANSMIT:
457 		return cec_transmit(adap, fh, block, parg);
458 
459 	case CEC_RECEIVE:
460 		return cec_receive(adap, fh, block, parg);
461 
462 	case CEC_DQEVENT:
463 		return cec_dqevent(adap, fh, block, parg);
464 
465 	case CEC_G_MODE:
466 		return cec_g_mode(adap, fh, parg);
467 
468 	case CEC_S_MODE:
469 		return cec_s_mode(adap, fh, parg);
470 
471 	default:
472 		return -ENOTTY;
473 	}
474 }
475 
cec_open(struct inode * inode,struct file * filp)476 static int cec_open(struct inode *inode, struct file *filp)
477 {
478 	struct cec_devnode *devnode =
479 		container_of(inode->i_cdev, struct cec_devnode, cdev);
480 	struct cec_adapter *adap = to_cec_adapter(devnode);
481 	struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
482 	/*
483 	 * Initial events that are automatically sent when the cec device is
484 	 * opened.
485 	 */
486 	struct cec_event ev_state = {
487 		.event = CEC_EVENT_STATE_CHANGE,
488 		.flags = CEC_EVENT_FL_INITIAL_STATE,
489 	};
490 	int err;
491 
492 	if (!fh)
493 		return -ENOMEM;
494 
495 	INIT_LIST_HEAD(&fh->msgs);
496 	INIT_LIST_HEAD(&fh->xfer_list);
497 	mutex_init(&fh->lock);
498 	init_waitqueue_head(&fh->wait);
499 
500 	fh->mode_initiator = CEC_MODE_INITIATOR;
501 	fh->adap = adap;
502 
503 	err = cec_get_device(devnode);
504 	if (err) {
505 		kfree(fh);
506 		return err;
507 	}
508 
509 	filp->private_data = fh;
510 
511 	mutex_lock(&devnode->lock);
512 	/* Queue up initial state events */
513 	ev_state.state_change.phys_addr = adap->phys_addr;
514 	ev_state.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
515 	cec_queue_event_fh(fh, &ev_state, 0);
516 
517 	list_add(&fh->list, &devnode->fhs);
518 	mutex_unlock(&devnode->lock);
519 
520 	return 0;
521 }
522 
523 /* Override for the release function */
cec_release(struct inode * inode,struct file * filp)524 static int cec_release(struct inode *inode, struct file *filp)
525 {
526 	struct cec_devnode *devnode = cec_devnode_data(filp);
527 	struct cec_adapter *adap = to_cec_adapter(devnode);
528 	struct cec_fh *fh = filp->private_data;
529 
530 	mutex_lock(&adap->lock);
531 	if (adap->cec_initiator == fh)
532 		adap->cec_initiator = NULL;
533 	if (adap->cec_follower == fh) {
534 		adap->cec_follower = NULL;
535 		adap->passthrough = false;
536 	}
537 	if (fh->mode_follower == CEC_MODE_FOLLOWER)
538 		adap->follower_cnt--;
539 	if (fh->mode_follower == CEC_MODE_MONITOR_ALL)
540 		cec_monitor_all_cnt_dec(adap);
541 	mutex_unlock(&adap->lock);
542 
543 	mutex_lock(&devnode->lock);
544 	list_del(&fh->list);
545 	mutex_unlock(&devnode->lock);
546 
547 	/* Unhook pending transmits from this filehandle. */
548 	mutex_lock(&adap->lock);
549 	while (!list_empty(&fh->xfer_list)) {
550 		struct cec_data *data =
551 			list_first_entry(&fh->xfer_list, struct cec_data, xfer_list);
552 
553 		data->blocking = false;
554 		data->fh = NULL;
555 		list_del(&data->xfer_list);
556 	}
557 	mutex_unlock(&adap->lock);
558 	while (!list_empty(&fh->msgs)) {
559 		struct cec_msg_entry *entry =
560 			list_first_entry(&fh->msgs, struct cec_msg_entry, list);
561 
562 		list_del(&entry->list);
563 		kfree(entry);
564 	}
565 	kfree(fh);
566 
567 	cec_put_device(devnode);
568 	filp->private_data = NULL;
569 	return 0;
570 }
571 
572 const struct file_operations cec_devnode_fops = {
573 	.owner = THIS_MODULE,
574 	.open = cec_open,
575 	.unlocked_ioctl = cec_ioctl,
576 	.release = cec_release,
577 	.poll = cec_poll,
578 	.llseek = no_llseek,
579 };
580