• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel Speed Select Interface: Common functions
4  * Copyright (c) 2019, Intel Corporation.
5  * All rights reserved.
6  *
7  * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
8  */
9 
10 #include <linux/cpufeature.h>
11 #include <linux/cpuhotplug.h>
12 #include <linux/fs.h>
13 #include <linux/hashtable.h>
14 #include <linux/miscdevice.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/sched/signal.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <uapi/linux/isst_if.h>
21 
22 #include "isst_if_common.h"
23 
24 #define MSR_THREAD_ID_INFO	0x53
25 #define MSR_CPU_BUS_NUMBER	0x128
26 
27 static struct isst_if_cmd_cb punit_callbacks[ISST_IF_DEV_MAX];
28 
29 static int punit_msr_white_list[] = {
30 	MSR_TURBO_RATIO_LIMIT,
31 	MSR_CONFIG_TDP_CONTROL,
32 	MSR_TURBO_RATIO_LIMIT1,
33 	MSR_TURBO_RATIO_LIMIT2,
34 };
35 
36 struct isst_valid_cmd_ranges {
37 	u16 cmd;
38 	u16 sub_cmd_beg;
39 	u16 sub_cmd_end;
40 };
41 
42 struct isst_cmd_set_req_type {
43 	u16 cmd;
44 	u16 sub_cmd;
45 	u16 param;
46 };
47 
48 static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
49 	{0xD0, 0x00, 0x03},
50 	{0x7F, 0x00, 0x0B},
51 	{0x7F, 0x10, 0x12},
52 	{0x7F, 0x20, 0x23},
53 };
54 
55 static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
56 	{0xD0, 0x00, 0x08},
57 	{0xD0, 0x01, 0x08},
58 	{0xD0, 0x02, 0x08},
59 	{0xD0, 0x03, 0x08},
60 	{0x7F, 0x02, 0x00},
61 	{0x7F, 0x08, 0x00},
62 };
63 
64 struct isst_cmd {
65 	struct hlist_node hnode;
66 	u64 data;
67 	u32 cmd;
68 	int cpu;
69 	int mbox_cmd_type;
70 	u32 param;
71 };
72 
73 static DECLARE_HASHTABLE(isst_hash, 8);
74 static DEFINE_MUTEX(isst_hash_lock);
75 
isst_store_new_cmd(int cmd,u32 cpu,int mbox_cmd_type,u32 param,u32 data)76 static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param,
77 			      u32 data)
78 {
79 	struct isst_cmd *sst_cmd;
80 
81 	sst_cmd = kmalloc(sizeof(*sst_cmd), GFP_KERNEL);
82 	if (!sst_cmd)
83 		return -ENOMEM;
84 
85 	sst_cmd->cpu = cpu;
86 	sst_cmd->cmd = cmd;
87 	sst_cmd->mbox_cmd_type = mbox_cmd_type;
88 	sst_cmd->param = param;
89 	sst_cmd->data = data;
90 
91 	hash_add(isst_hash, &sst_cmd->hnode, sst_cmd->cmd);
92 
93 	return 0;
94 }
95 
isst_delete_hash(void)96 static void isst_delete_hash(void)
97 {
98 	struct isst_cmd *sst_cmd;
99 	struct hlist_node *tmp;
100 	int i;
101 
102 	hash_for_each_safe(isst_hash, i, tmp, sst_cmd, hnode) {
103 		hash_del(&sst_cmd->hnode);
104 		kfree(sst_cmd);
105 	}
106 }
107 
108 /**
109  * isst_store_cmd() - Store command to a hash table
110  * @cmd: Mailbox command.
111  * @sub_cmd: Mailbox sub-command or MSR id.
112  * @mbox_cmd_type: Mailbox or MSR command.
113  * @param: Mailbox parameter.
114  * @data: Mailbox request data or MSR data.
115  *
116  * Stores the command to a hash table if there is no such command already
117  * stored. If already stored update the latest parameter and data for the
118  * command.
119  *
120  * Return: Return result of store to hash table, 0 for success, others for
121  * failure.
122  */
isst_store_cmd(int cmd,int sub_cmd,u32 cpu,int mbox_cmd_type,u32 param,u64 data)123 int isst_store_cmd(int cmd, int sub_cmd, u32 cpu, int mbox_cmd_type,
124 		   u32 param, u64 data)
125 {
126 	struct isst_cmd *sst_cmd;
127 	int full_cmd, ret;
128 
129 	full_cmd = (cmd & GENMASK_ULL(15, 0)) << 16;
130 	full_cmd |= (sub_cmd & GENMASK_ULL(15, 0));
131 	mutex_lock(&isst_hash_lock);
132 	hash_for_each_possible(isst_hash, sst_cmd, hnode, full_cmd) {
133 		if (sst_cmd->cmd == full_cmd && sst_cmd->cpu == cpu &&
134 		    sst_cmd->mbox_cmd_type == mbox_cmd_type) {
135 			sst_cmd->param = param;
136 			sst_cmd->data = data;
137 			mutex_unlock(&isst_hash_lock);
138 			return 0;
139 		}
140 	}
141 
142 	ret = isst_store_new_cmd(full_cmd, cpu, mbox_cmd_type, param, data);
143 	mutex_unlock(&isst_hash_lock);
144 
145 	return ret;
146 }
147 EXPORT_SYMBOL_GPL(isst_store_cmd);
148 
isst_mbox_resume_command(struct isst_if_cmd_cb * cb,struct isst_cmd * sst_cmd)149 static void isst_mbox_resume_command(struct isst_if_cmd_cb *cb,
150 				     struct isst_cmd *sst_cmd)
151 {
152 	struct isst_if_mbox_cmd mbox_cmd;
153 	int wr_only;
154 
155 	mbox_cmd.command = (sst_cmd->cmd & GENMASK_ULL(31, 16)) >> 16;
156 	mbox_cmd.sub_command = sst_cmd->cmd & GENMASK_ULL(15, 0);
157 	mbox_cmd.parameter = sst_cmd->param;
158 	mbox_cmd.req_data = sst_cmd->data;
159 	mbox_cmd.logical_cpu = sst_cmd->cpu;
160 	(cb->cmd_callback)((u8 *)&mbox_cmd, &wr_only, 1);
161 }
162 
163 /**
164  * isst_resume_common() - Process Resume request
165  *
166  * On resume replay all mailbox commands and MSRs.
167  *
168  * Return: None.
169  */
isst_resume_common(void)170 void isst_resume_common(void)
171 {
172 	struct isst_cmd *sst_cmd;
173 	int i;
174 
175 	hash_for_each(isst_hash, i, sst_cmd, hnode) {
176 		struct isst_if_cmd_cb *cb;
177 
178 		if (sst_cmd->mbox_cmd_type) {
179 			cb = &punit_callbacks[ISST_IF_DEV_MBOX];
180 			if (cb->registered)
181 				isst_mbox_resume_command(cb, sst_cmd);
182 		} else {
183 			wrmsrl_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd,
184 					   sst_cmd->data);
185 		}
186 	}
187 }
188 EXPORT_SYMBOL_GPL(isst_resume_common);
189 
isst_restore_msr_local(int cpu)190 static void isst_restore_msr_local(int cpu)
191 {
192 	struct isst_cmd *sst_cmd;
193 	int i;
194 
195 	mutex_lock(&isst_hash_lock);
196 	for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
197 		if (!punit_msr_white_list[i])
198 			break;
199 
200 		hash_for_each_possible(isst_hash, sst_cmd, hnode,
201 				       punit_msr_white_list[i]) {
202 			if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu)
203 				wrmsrl_safe(sst_cmd->cmd, sst_cmd->data);
204 		}
205 	}
206 	mutex_unlock(&isst_hash_lock);
207 }
208 
209 /**
210  * isst_if_mbox_cmd_invalid() - Check invalid mailbox commands
211  * @cmd: Pointer to the command structure to verify.
212  *
213  * Invalid command to PUNIT to may result in instability of the platform.
214  * This function has a whitelist of commands, which are allowed.
215  *
216  * Return: Return true if the command is invalid, else false.
217  */
isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd * cmd)218 bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd)
219 {
220 	int i;
221 
222 	if (cmd->logical_cpu >= nr_cpu_ids)
223 		return true;
224 
225 	for (i = 0; i < ARRAY_SIZE(isst_valid_cmds); ++i) {
226 		if (cmd->command == isst_valid_cmds[i].cmd &&
227 		    (cmd->sub_command >= isst_valid_cmds[i].sub_cmd_beg &&
228 		     cmd->sub_command <= isst_valid_cmds[i].sub_cmd_end)) {
229 			return false;
230 		}
231 	}
232 
233 	return true;
234 }
235 EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_invalid);
236 
237 /**
238  * isst_if_mbox_cmd_set_req() - Check mailbox command is a set request
239  * @cmd: Pointer to the command structure to verify.
240  *
241  * Check if the given mail box level is set request and not a get request.
242  *
243  * Return: Return true if the command is set_req, else false.
244  */
isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd * cmd)245 bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *cmd)
246 {
247 	int i;
248 
249 	for (i = 0; i < ARRAY_SIZE(isst_cmd_set_reqs); ++i) {
250 		if (cmd->command == isst_cmd_set_reqs[i].cmd &&
251 		    cmd->sub_command == isst_cmd_set_reqs[i].sub_cmd &&
252 		    cmd->parameter == isst_cmd_set_reqs[i].param) {
253 			return true;
254 		}
255 	}
256 
257 	return false;
258 }
259 EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_set_req);
260 
isst_if_get_platform_info(void __user * argp)261 static int isst_if_get_platform_info(void __user *argp)
262 {
263 	struct isst_if_platform_info info;
264 
265 	info.api_version = ISST_IF_API_VERSION,
266 	info.driver_version = ISST_IF_DRIVER_VERSION,
267 	info.max_cmds_per_ioctl = ISST_IF_CMD_LIMIT,
268 	info.mbox_supported = punit_callbacks[ISST_IF_DEV_MBOX].registered;
269 	info.mmio_supported = punit_callbacks[ISST_IF_DEV_MMIO].registered;
270 
271 	if (copy_to_user(argp, &info, sizeof(info)))
272 		return -EFAULT;
273 
274 	return 0;
275 }
276 
277 
278 struct isst_if_cpu_info {
279 	/* For BUS 0 and BUS 1 only, which we need for PUNIT interface */
280 	int bus_info[2];
281 	int punit_cpu_id;
282 };
283 
284 static struct isst_if_cpu_info *isst_cpu_info;
285 
286 /**
287  * isst_if_get_pci_dev() - Get the PCI device instance for a CPU
288  * @cpu: Logical CPU number.
289  * @bus_number: The bus number assigned by the hardware.
290  * @dev: The device number assigned by the hardware.
291  * @fn: The function number assigned by the hardware.
292  *
293  * Using cached bus information, find out the PCI device for a bus number,
294  * device and function.
295  *
296  * Return: Return pci_dev pointer or NULL.
297  */
isst_if_get_pci_dev(int cpu,int bus_no,int dev,int fn)298 struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
299 {
300 	int bus_number;
301 
302 	if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
303 	    cpu >= num_possible_cpus())
304 		return NULL;
305 
306 	bus_number = isst_cpu_info[cpu].bus_info[bus_no];
307 	if (bus_number < 0)
308 		return NULL;
309 
310 	return pci_get_domain_bus_and_slot(0, bus_number, PCI_DEVFN(dev, fn));
311 }
312 EXPORT_SYMBOL_GPL(isst_if_get_pci_dev);
313 
isst_if_cpu_online(unsigned int cpu)314 static int isst_if_cpu_online(unsigned int cpu)
315 {
316 	u64 data;
317 	int ret;
318 
319 	ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data);
320 	if (ret) {
321 		/* This is not a fatal error on MSR mailbox only I/F */
322 		isst_cpu_info[cpu].bus_info[0] = -1;
323 		isst_cpu_info[cpu].bus_info[1] = -1;
324 	} else {
325 		isst_cpu_info[cpu].bus_info[0] = data & 0xff;
326 		isst_cpu_info[cpu].bus_info[1] = (data >> 8) & 0xff;
327 	}
328 
329 	ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data);
330 	if (ret) {
331 		isst_cpu_info[cpu].punit_cpu_id = -1;
332 		return ret;
333 	}
334 	isst_cpu_info[cpu].punit_cpu_id = data;
335 
336 	isst_restore_msr_local(cpu);
337 
338 	return 0;
339 }
340 
341 static int isst_if_online_id;
342 
isst_if_cpu_info_init(void)343 static int isst_if_cpu_info_init(void)
344 {
345 	int ret;
346 
347 	isst_cpu_info = kcalloc(num_possible_cpus(),
348 				sizeof(*isst_cpu_info),
349 				GFP_KERNEL);
350 	if (!isst_cpu_info)
351 		return -ENOMEM;
352 
353 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
354 				"platform/x86/isst-if:online",
355 				isst_if_cpu_online, NULL);
356 	if (ret < 0) {
357 		kfree(isst_cpu_info);
358 		return ret;
359 	}
360 
361 	isst_if_online_id = ret;
362 
363 	return 0;
364 }
365 
isst_if_cpu_info_exit(void)366 static void isst_if_cpu_info_exit(void)
367 {
368 	cpuhp_remove_state(isst_if_online_id);
369 	kfree(isst_cpu_info);
370 };
371 
isst_if_proc_phyid_req(u8 * cmd_ptr,int * write_only,int resume)372 static long isst_if_proc_phyid_req(u8 *cmd_ptr, int *write_only, int resume)
373 {
374 	struct isst_if_cpu_map *cpu_map;
375 
376 	cpu_map = (struct isst_if_cpu_map *)cmd_ptr;
377 	if (cpu_map->logical_cpu >= nr_cpu_ids ||
378 	    cpu_map->logical_cpu >= num_possible_cpus())
379 		return -EINVAL;
380 
381 	*write_only = 0;
382 	cpu_map->physical_cpu = isst_cpu_info[cpu_map->logical_cpu].punit_cpu_id;
383 
384 	return 0;
385 }
386 
match_punit_msr_white_list(int msr)387 static bool match_punit_msr_white_list(int msr)
388 {
389 	int i;
390 
391 	for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
392 		if (punit_msr_white_list[i] == msr)
393 			return true;
394 	}
395 
396 	return false;
397 }
398 
isst_if_msr_cmd_req(u8 * cmd_ptr,int * write_only,int resume)399 static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume)
400 {
401 	struct isst_if_msr_cmd *msr_cmd;
402 	int ret;
403 
404 	msr_cmd = (struct isst_if_msr_cmd *)cmd_ptr;
405 
406 	if (!match_punit_msr_white_list(msr_cmd->msr))
407 		return -EINVAL;
408 
409 	if (msr_cmd->logical_cpu >= nr_cpu_ids)
410 		return -EINVAL;
411 
412 	if (msr_cmd->read_write) {
413 		if (!capable(CAP_SYS_ADMIN))
414 			return -EPERM;
415 
416 		ret = wrmsrl_safe_on_cpu(msr_cmd->logical_cpu,
417 					 msr_cmd->msr,
418 					 msr_cmd->data);
419 		*write_only = 1;
420 		if (!ret && !resume)
421 			ret = isst_store_cmd(0, msr_cmd->msr,
422 					     msr_cmd->logical_cpu,
423 					     0, 0, msr_cmd->data);
424 	} else {
425 		u64 data;
426 
427 		ret = rdmsrl_safe_on_cpu(msr_cmd->logical_cpu,
428 					 msr_cmd->msr, &data);
429 		if (!ret) {
430 			msr_cmd->data = data;
431 			*write_only = 0;
432 		}
433 	}
434 
435 
436 	return ret;
437 }
438 
isst_if_exec_multi_cmd(void __user * argp,struct isst_if_cmd_cb * cb)439 static long isst_if_exec_multi_cmd(void __user *argp, struct isst_if_cmd_cb *cb)
440 {
441 	unsigned char __user *ptr;
442 	u32 cmd_count;
443 	u8 *cmd_ptr;
444 	long ret;
445 	int i;
446 
447 	/* Each multi command has u32 command count as the first field */
448 	if (copy_from_user(&cmd_count, argp, sizeof(cmd_count)))
449 		return -EFAULT;
450 
451 	if (!cmd_count || cmd_count > ISST_IF_CMD_LIMIT)
452 		return -EINVAL;
453 
454 	cmd_ptr = kmalloc(cb->cmd_size, GFP_KERNEL);
455 	if (!cmd_ptr)
456 		return -ENOMEM;
457 
458 	/* cb->offset points to start of the command after the command count */
459 	ptr = argp + cb->offset;
460 
461 	for (i = 0; i < cmd_count; ++i) {
462 		int wr_only;
463 
464 		if (signal_pending(current)) {
465 			ret = -EINTR;
466 			break;
467 		}
468 
469 		if (copy_from_user(cmd_ptr, ptr, cb->cmd_size)) {
470 			ret = -EFAULT;
471 			break;
472 		}
473 
474 		ret = cb->cmd_callback(cmd_ptr, &wr_only, 0);
475 		if (ret)
476 			break;
477 
478 		if (!wr_only && copy_to_user(ptr, cmd_ptr, cb->cmd_size)) {
479 			ret = -EFAULT;
480 			break;
481 		}
482 
483 		ptr += cb->cmd_size;
484 	}
485 
486 	kfree(cmd_ptr);
487 
488 	return i ? i : ret;
489 }
490 
isst_if_def_ioctl(struct file * file,unsigned int cmd,unsigned long arg)491 static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
492 			      unsigned long arg)
493 {
494 	void __user *argp = (void __user *)arg;
495 	struct isst_if_cmd_cb cmd_cb;
496 	struct isst_if_cmd_cb *cb;
497 	long ret = -ENOTTY;
498 
499 	switch (cmd) {
500 	case ISST_IF_GET_PLATFORM_INFO:
501 		ret = isst_if_get_platform_info(argp);
502 		break;
503 	case ISST_IF_GET_PHY_ID:
504 		cmd_cb.cmd_size = sizeof(struct isst_if_cpu_map);
505 		cmd_cb.offset = offsetof(struct isst_if_cpu_maps, cpu_map);
506 		cmd_cb.cmd_callback = isst_if_proc_phyid_req;
507 		ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
508 		break;
509 	case ISST_IF_IO_CMD:
510 		cb = &punit_callbacks[ISST_IF_DEV_MMIO];
511 		if (cb->registered)
512 			ret = isst_if_exec_multi_cmd(argp, cb);
513 		break;
514 	case ISST_IF_MBOX_COMMAND:
515 		cb = &punit_callbacks[ISST_IF_DEV_MBOX];
516 		if (cb->registered)
517 			ret = isst_if_exec_multi_cmd(argp, cb);
518 		break;
519 	case ISST_IF_MSR_COMMAND:
520 		cmd_cb.cmd_size = sizeof(struct isst_if_msr_cmd);
521 		cmd_cb.offset = offsetof(struct isst_if_msr_cmds, msr_cmd);
522 		cmd_cb.cmd_callback = isst_if_msr_cmd_req;
523 		ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
524 		break;
525 	default:
526 		break;
527 	}
528 
529 	return ret;
530 }
531 
532 static DEFINE_MUTEX(punit_misc_dev_lock);
533 static int misc_usage_count;
534 static int misc_device_ret;
535 static int misc_device_open;
536 
isst_if_open(struct inode * inode,struct file * file)537 static int isst_if_open(struct inode *inode, struct file *file)
538 {
539 	int i, ret = 0;
540 
541 	/* Fail open, if a module is going away */
542 	mutex_lock(&punit_misc_dev_lock);
543 	for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
544 		struct isst_if_cmd_cb *cb = &punit_callbacks[i];
545 
546 		if (cb->registered && !try_module_get(cb->owner)) {
547 			ret = -ENODEV;
548 			break;
549 		}
550 	}
551 	if (ret) {
552 		int j;
553 
554 		for (j = 0; j < i; ++j) {
555 			struct isst_if_cmd_cb *cb;
556 
557 			cb = &punit_callbacks[j];
558 			if (cb->registered)
559 				module_put(cb->owner);
560 		}
561 	} else {
562 		misc_device_open++;
563 	}
564 	mutex_unlock(&punit_misc_dev_lock);
565 
566 	return ret;
567 }
568 
isst_if_relase(struct inode * inode,struct file * f)569 static int isst_if_relase(struct inode *inode, struct file *f)
570 {
571 	int i;
572 
573 	mutex_lock(&punit_misc_dev_lock);
574 	misc_device_open--;
575 	for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
576 		struct isst_if_cmd_cb *cb = &punit_callbacks[i];
577 
578 		if (cb->registered)
579 			module_put(cb->owner);
580 	}
581 	mutex_unlock(&punit_misc_dev_lock);
582 
583 	return 0;
584 }
585 
586 static const struct file_operations isst_if_char_driver_ops = {
587 	.open = isst_if_open,
588 	.unlocked_ioctl = isst_if_def_ioctl,
589 	.release = isst_if_relase,
590 };
591 
592 static struct miscdevice isst_if_char_driver = {
593 	.minor		= MISC_DYNAMIC_MINOR,
594 	.name		= "isst_interface",
595 	.fops		= &isst_if_char_driver_ops,
596 };
597 
598 /**
599  * isst_if_cdev_register() - Register callback for IOCTL
600  * @device_type: The device type this callback handling.
601  * @cb:	Callback structure.
602  *
603  * This function registers a callback to device type. On very first call
604  * it will register a misc device, which is used for user kernel interface.
605  * Other calls simply increment ref count. Registry will fail, if the user
606  * already opened misc device for operation. Also if the misc device
607  * creation failed, then it will not try again and all callers will get
608  * failure code.
609  *
610  * Return: Return the return value from the misc creation device or -EINVAL
611  * for unsupported device type.
612  */
isst_if_cdev_register(int device_type,struct isst_if_cmd_cb * cb)613 int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
614 {
615 	if (misc_device_ret)
616 		return misc_device_ret;
617 
618 	if (device_type >= ISST_IF_DEV_MAX)
619 		return -EINVAL;
620 
621 	mutex_lock(&punit_misc_dev_lock);
622 	if (misc_device_open) {
623 		mutex_unlock(&punit_misc_dev_lock);
624 		return -EAGAIN;
625 	}
626 	if (!misc_usage_count) {
627 		int ret;
628 
629 		misc_device_ret = misc_register(&isst_if_char_driver);
630 		if (misc_device_ret)
631 			goto unlock_exit;
632 
633 		ret = isst_if_cpu_info_init();
634 		if (ret) {
635 			misc_deregister(&isst_if_char_driver);
636 			misc_device_ret = ret;
637 			goto unlock_exit;
638 		}
639 	}
640 	memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
641 	punit_callbacks[device_type].registered = 1;
642 	misc_usage_count++;
643 unlock_exit:
644 	mutex_unlock(&punit_misc_dev_lock);
645 
646 	return misc_device_ret;
647 }
648 EXPORT_SYMBOL_GPL(isst_if_cdev_register);
649 
650 /**
651  * isst_if_cdev_unregister() - Unregister callback for IOCTL
652  * @device_type: The device type to unregister.
653  *
654  * This function unregisters the previously registered callback. If this
655  * is the last callback unregistering, then misc device is removed.
656  *
657  * Return: None.
658  */
isst_if_cdev_unregister(int device_type)659 void isst_if_cdev_unregister(int device_type)
660 {
661 	mutex_lock(&punit_misc_dev_lock);
662 	misc_usage_count--;
663 	punit_callbacks[device_type].registered = 0;
664 	if (device_type == ISST_IF_DEV_MBOX)
665 		isst_delete_hash();
666 	if (!misc_usage_count && !misc_device_ret) {
667 		misc_deregister(&isst_if_char_driver);
668 		isst_if_cpu_info_exit();
669 	}
670 	mutex_unlock(&punit_misc_dev_lock);
671 }
672 EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
673 
674 MODULE_LICENSE("GPL v2");
675