1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Intel Speed Select Interface: Common functions
4 * Copyright (c) 2019, Intel Corporation.
5 * All rights reserved.
6 *
7 * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
8 */
9
10 #include <linux/cpufeature.h>
11 #include <linux/cpuhotplug.h>
12 #include <linux/fs.h>
13 #include <linux/hashtable.h>
14 #include <linux/miscdevice.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/sched/signal.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <uapi/linux/isst_if.h>
21
22 #include "isst_if_common.h"
23
24 #define MSR_THREAD_ID_INFO 0x53
25 #define MSR_CPU_BUS_NUMBER 0x128
26
27 static struct isst_if_cmd_cb punit_callbacks[ISST_IF_DEV_MAX];
28
29 static int punit_msr_white_list[] = {
30 MSR_TURBO_RATIO_LIMIT,
31 MSR_CONFIG_TDP_CONTROL,
32 MSR_TURBO_RATIO_LIMIT1,
33 MSR_TURBO_RATIO_LIMIT2,
34 };
35
36 struct isst_valid_cmd_ranges {
37 u16 cmd;
38 u16 sub_cmd_beg;
39 u16 sub_cmd_end;
40 };
41
42 struct isst_cmd_set_req_type {
43 u16 cmd;
44 u16 sub_cmd;
45 u16 param;
46 };
47
48 static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
49 {0xD0, 0x00, 0x03},
50 {0x7F, 0x00, 0x0B},
51 {0x7F, 0x10, 0x12},
52 {0x7F, 0x20, 0x23},
53 {0x94, 0x03, 0x03},
54 {0x95, 0x03, 0x03},
55 };
56
57 static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
58 {0xD0, 0x00, 0x08},
59 {0xD0, 0x01, 0x08},
60 {0xD0, 0x02, 0x08},
61 {0xD0, 0x03, 0x08},
62 {0x7F, 0x02, 0x00},
63 {0x7F, 0x08, 0x00},
64 {0x95, 0x03, 0x03},
65 };
66
67 struct isst_cmd {
68 struct hlist_node hnode;
69 u64 data;
70 u32 cmd;
71 int cpu;
72 int mbox_cmd_type;
73 u32 param;
74 };
75
76 static DECLARE_HASHTABLE(isst_hash, 8);
77 static DEFINE_MUTEX(isst_hash_lock);
78
isst_store_new_cmd(int cmd,u32 cpu,int mbox_cmd_type,u32 param,u32 data)79 static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param,
80 u32 data)
81 {
82 struct isst_cmd *sst_cmd;
83
84 sst_cmd = kmalloc(sizeof(*sst_cmd), GFP_KERNEL);
85 if (!sst_cmd)
86 return -ENOMEM;
87
88 sst_cmd->cpu = cpu;
89 sst_cmd->cmd = cmd;
90 sst_cmd->mbox_cmd_type = mbox_cmd_type;
91 sst_cmd->param = param;
92 sst_cmd->data = data;
93
94 hash_add(isst_hash, &sst_cmd->hnode, sst_cmd->cmd);
95
96 return 0;
97 }
98
isst_delete_hash(void)99 static void isst_delete_hash(void)
100 {
101 struct isst_cmd *sst_cmd;
102 struct hlist_node *tmp;
103 int i;
104
105 hash_for_each_safe(isst_hash, i, tmp, sst_cmd, hnode) {
106 hash_del(&sst_cmd->hnode);
107 kfree(sst_cmd);
108 }
109 }
110
111 /**
112 * isst_store_cmd() - Store command to a hash table
113 * @cmd: Mailbox command.
114 * @sub_cmd: Mailbox sub-command or MSR id.
115 * @mbox_cmd_type: Mailbox or MSR command.
116 * @param: Mailbox parameter.
117 * @data: Mailbox request data or MSR data.
118 *
119 * Stores the command to a hash table if there is no such command already
120 * stored. If already stored update the latest parameter and data for the
121 * command.
122 *
123 * Return: Return result of store to hash table, 0 for success, others for
124 * failure.
125 */
isst_store_cmd(int cmd,int sub_cmd,u32 cpu,int mbox_cmd_type,u32 param,u64 data)126 int isst_store_cmd(int cmd, int sub_cmd, u32 cpu, int mbox_cmd_type,
127 u32 param, u64 data)
128 {
129 struct isst_cmd *sst_cmd;
130 int full_cmd, ret;
131
132 full_cmd = (cmd & GENMASK_ULL(15, 0)) << 16;
133 full_cmd |= (sub_cmd & GENMASK_ULL(15, 0));
134 mutex_lock(&isst_hash_lock);
135 hash_for_each_possible(isst_hash, sst_cmd, hnode, full_cmd) {
136 if (sst_cmd->cmd == full_cmd && sst_cmd->cpu == cpu &&
137 sst_cmd->mbox_cmd_type == mbox_cmd_type) {
138 sst_cmd->param = param;
139 sst_cmd->data = data;
140 mutex_unlock(&isst_hash_lock);
141 return 0;
142 }
143 }
144
145 ret = isst_store_new_cmd(full_cmd, cpu, mbox_cmd_type, param, data);
146 mutex_unlock(&isst_hash_lock);
147
148 return ret;
149 }
150 EXPORT_SYMBOL_GPL(isst_store_cmd);
151
isst_mbox_resume_command(struct isst_if_cmd_cb * cb,struct isst_cmd * sst_cmd)152 static void isst_mbox_resume_command(struct isst_if_cmd_cb *cb,
153 struct isst_cmd *sst_cmd)
154 {
155 struct isst_if_mbox_cmd mbox_cmd;
156 int wr_only;
157
158 mbox_cmd.command = (sst_cmd->cmd & GENMASK_ULL(31, 16)) >> 16;
159 mbox_cmd.sub_command = sst_cmd->cmd & GENMASK_ULL(15, 0);
160 mbox_cmd.parameter = sst_cmd->param;
161 mbox_cmd.req_data = sst_cmd->data;
162 mbox_cmd.logical_cpu = sst_cmd->cpu;
163 (cb->cmd_callback)((u8 *)&mbox_cmd, &wr_only, 1);
164 }
165
166 /**
167 * isst_resume_common() - Process Resume request
168 *
169 * On resume replay all mailbox commands and MSRs.
170 *
171 * Return: None.
172 */
isst_resume_common(void)173 void isst_resume_common(void)
174 {
175 struct isst_cmd *sst_cmd;
176 int i;
177
178 hash_for_each(isst_hash, i, sst_cmd, hnode) {
179 struct isst_if_cmd_cb *cb;
180
181 if (sst_cmd->mbox_cmd_type) {
182 cb = &punit_callbacks[ISST_IF_DEV_MBOX];
183 if (cb->registered)
184 isst_mbox_resume_command(cb, sst_cmd);
185 } else {
186 wrmsrl_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd,
187 sst_cmd->data);
188 }
189 }
190 }
191 EXPORT_SYMBOL_GPL(isst_resume_common);
192
isst_restore_msr_local(int cpu)193 static void isst_restore_msr_local(int cpu)
194 {
195 struct isst_cmd *sst_cmd;
196 int i;
197
198 mutex_lock(&isst_hash_lock);
199 for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
200 if (!punit_msr_white_list[i])
201 break;
202
203 hash_for_each_possible(isst_hash, sst_cmd, hnode,
204 punit_msr_white_list[i]) {
205 if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu)
206 wrmsrl_safe(sst_cmd->cmd, sst_cmd->data);
207 }
208 }
209 mutex_unlock(&isst_hash_lock);
210 }
211
212 /**
213 * isst_if_mbox_cmd_invalid() - Check invalid mailbox commands
214 * @cmd: Pointer to the command structure to verify.
215 *
216 * Invalid command to PUNIT to may result in instability of the platform.
217 * This function has a whitelist of commands, which are allowed.
218 *
219 * Return: Return true if the command is invalid, else false.
220 */
isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd * cmd)221 bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd)
222 {
223 int i;
224
225 if (cmd->logical_cpu >= nr_cpu_ids)
226 return true;
227
228 for (i = 0; i < ARRAY_SIZE(isst_valid_cmds); ++i) {
229 if (cmd->command == isst_valid_cmds[i].cmd &&
230 (cmd->sub_command >= isst_valid_cmds[i].sub_cmd_beg &&
231 cmd->sub_command <= isst_valid_cmds[i].sub_cmd_end)) {
232 return false;
233 }
234 }
235
236 return true;
237 }
238 EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_invalid);
239
240 /**
241 * isst_if_mbox_cmd_set_req() - Check mailbox command is a set request
242 * @cmd: Pointer to the command structure to verify.
243 *
244 * Check if the given mail box level is set request and not a get request.
245 *
246 * Return: Return true if the command is set_req, else false.
247 */
isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd * cmd)248 bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *cmd)
249 {
250 int i;
251
252 for (i = 0; i < ARRAY_SIZE(isst_cmd_set_reqs); ++i) {
253 if (cmd->command == isst_cmd_set_reqs[i].cmd &&
254 cmd->sub_command == isst_cmd_set_reqs[i].sub_cmd &&
255 cmd->parameter == isst_cmd_set_reqs[i].param) {
256 return true;
257 }
258 }
259
260 return false;
261 }
262 EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_set_req);
263
isst_if_get_platform_info(void __user * argp)264 static int isst_if_get_platform_info(void __user *argp)
265 {
266 struct isst_if_platform_info info;
267
268 info.api_version = ISST_IF_API_VERSION;
269 info.driver_version = ISST_IF_DRIVER_VERSION;
270 info.max_cmds_per_ioctl = ISST_IF_CMD_LIMIT;
271 info.mbox_supported = punit_callbacks[ISST_IF_DEV_MBOX].registered;
272 info.mmio_supported = punit_callbacks[ISST_IF_DEV_MMIO].registered;
273
274 if (copy_to_user(argp, &info, sizeof(info)))
275 return -EFAULT;
276
277 return 0;
278 }
279
280 #define ISST_MAX_BUS_NUMBER 2
281
282 struct isst_if_cpu_info {
283 /* For BUS 0 and BUS 1 only, which we need for PUNIT interface */
284 int bus_info[ISST_MAX_BUS_NUMBER];
285 struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
286 int punit_cpu_id;
287 int numa_node;
288 };
289
290 struct isst_if_pkg_info {
291 struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
292 };
293
294 static struct isst_if_cpu_info *isst_cpu_info;
295 static struct isst_if_pkg_info *isst_pkg_info;
296
_isst_if_get_pci_dev(int cpu,int bus_no,int dev,int fn)297 static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
298 {
299 struct pci_dev *matched_pci_dev = NULL;
300 struct pci_dev *pci_dev = NULL;
301 struct pci_dev *_pci_dev = NULL;
302 int no_matches = 0, pkg_id;
303 int bus_number;
304
305 if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
306 cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
307 return NULL;
308
309 pkg_id = topology_physical_package_id(cpu);
310
311 bus_number = isst_cpu_info[cpu].bus_info[bus_no];
312 if (bus_number < 0)
313 return NULL;
314
315 for_each_pci_dev(_pci_dev) {
316 int node;
317
318 if (_pci_dev->bus->number != bus_number ||
319 _pci_dev->devfn != PCI_DEVFN(dev, fn))
320 continue;
321
322 ++no_matches;
323 if (!matched_pci_dev)
324 matched_pci_dev = _pci_dev;
325
326 node = dev_to_node(&_pci_dev->dev);
327 if (node == NUMA_NO_NODE) {
328 pr_info("Fail to get numa node for CPU:%d bus:%d dev:%d fn:%d\n",
329 cpu, bus_no, dev, fn);
330 continue;
331 }
332
333 if (node == isst_cpu_info[cpu].numa_node) {
334 isst_pkg_info[pkg_id].pci_dev[bus_no] = _pci_dev;
335
336 pci_dev = _pci_dev;
337 break;
338 }
339 }
340
341 /*
342 * If there is no numa matched pci_dev, then there can be following cases:
343 * 1. CONFIG_NUMA is not defined: In this case if there is only single device
344 * match, then we don't need numa information. Simply return last match.
345 * Othewise return NULL.
346 * 2. NUMA information is not exposed via _SEG method. In this case it is similar
347 * to case 1.
348 * 3. Numa information doesn't match with CPU numa node and more than one match
349 * return NULL.
350 */
351 if (!pci_dev && no_matches == 1)
352 pci_dev = matched_pci_dev;
353
354 /* Return pci_dev pointer for any matched CPU in the package */
355 if (!pci_dev)
356 pci_dev = isst_pkg_info[pkg_id].pci_dev[bus_no];
357
358 return pci_dev;
359 }
360
361 /**
362 * isst_if_get_pci_dev() - Get the PCI device instance for a CPU
363 * @cpu: Logical CPU number.
364 * @bus_number: The bus number assigned by the hardware.
365 * @dev: The device number assigned by the hardware.
366 * @fn: The function number assigned by the hardware.
367 *
368 * Using cached bus information, find out the PCI device for a bus number,
369 * device and function.
370 *
371 * Return: Return pci_dev pointer or NULL.
372 */
isst_if_get_pci_dev(int cpu,int bus_no,int dev,int fn)373 struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
374 {
375 struct pci_dev *pci_dev;
376
377 if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
378 cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
379 return NULL;
380
381 pci_dev = isst_cpu_info[cpu].pci_dev[bus_no];
382
383 if (pci_dev && pci_dev->devfn == PCI_DEVFN(dev, fn))
384 return pci_dev;
385
386 return _isst_if_get_pci_dev(cpu, bus_no, dev, fn);
387 }
388 EXPORT_SYMBOL_GPL(isst_if_get_pci_dev);
389
isst_if_cpu_online(unsigned int cpu)390 static int isst_if_cpu_online(unsigned int cpu)
391 {
392 u64 data;
393 int ret;
394
395 isst_cpu_info[cpu].numa_node = cpu_to_node(cpu);
396
397 ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data);
398 if (ret) {
399 /* This is not a fatal error on MSR mailbox only I/F */
400 isst_cpu_info[cpu].bus_info[0] = -1;
401 isst_cpu_info[cpu].bus_info[1] = -1;
402 } else {
403 isst_cpu_info[cpu].bus_info[0] = data & 0xff;
404 isst_cpu_info[cpu].bus_info[1] = (data >> 8) & 0xff;
405 isst_cpu_info[cpu].pci_dev[0] = _isst_if_get_pci_dev(cpu, 0, 0, 1);
406 isst_cpu_info[cpu].pci_dev[1] = _isst_if_get_pci_dev(cpu, 1, 30, 1);
407 }
408
409 ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data);
410 if (ret) {
411 isst_cpu_info[cpu].punit_cpu_id = -1;
412 return ret;
413 }
414 isst_cpu_info[cpu].punit_cpu_id = data;
415
416 isst_restore_msr_local(cpu);
417
418 return 0;
419 }
420
421 static int isst_if_online_id;
422
isst_if_cpu_info_init(void)423 static int isst_if_cpu_info_init(void)
424 {
425 int ret;
426
427 isst_cpu_info = kcalloc(num_possible_cpus(),
428 sizeof(*isst_cpu_info),
429 GFP_KERNEL);
430 if (!isst_cpu_info)
431 return -ENOMEM;
432
433 isst_pkg_info = kcalloc(topology_max_packages(),
434 sizeof(*isst_pkg_info),
435 GFP_KERNEL);
436 if (!isst_pkg_info) {
437 kfree(isst_cpu_info);
438 return -ENOMEM;
439 }
440
441 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
442 "platform/x86/isst-if:online",
443 isst_if_cpu_online, NULL);
444 if (ret < 0) {
445 kfree(isst_pkg_info);
446 kfree(isst_cpu_info);
447 return ret;
448 }
449
450 isst_if_online_id = ret;
451
452 return 0;
453 }
454
isst_if_cpu_info_exit(void)455 static void isst_if_cpu_info_exit(void)
456 {
457 cpuhp_remove_state(isst_if_online_id);
458 kfree(isst_pkg_info);
459 kfree(isst_cpu_info);
460 };
461
isst_if_proc_phyid_req(u8 * cmd_ptr,int * write_only,int resume)462 static long isst_if_proc_phyid_req(u8 *cmd_ptr, int *write_only, int resume)
463 {
464 struct isst_if_cpu_map *cpu_map;
465
466 cpu_map = (struct isst_if_cpu_map *)cmd_ptr;
467 if (cpu_map->logical_cpu >= nr_cpu_ids ||
468 cpu_map->logical_cpu >= num_possible_cpus())
469 return -EINVAL;
470
471 *write_only = 0;
472 cpu_map->physical_cpu = isst_cpu_info[cpu_map->logical_cpu].punit_cpu_id;
473
474 return 0;
475 }
476
match_punit_msr_white_list(int msr)477 static bool match_punit_msr_white_list(int msr)
478 {
479 int i;
480
481 for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
482 if (punit_msr_white_list[i] == msr)
483 return true;
484 }
485
486 return false;
487 }
488
isst_if_msr_cmd_req(u8 * cmd_ptr,int * write_only,int resume)489 static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume)
490 {
491 struct isst_if_msr_cmd *msr_cmd;
492 int ret;
493
494 msr_cmd = (struct isst_if_msr_cmd *)cmd_ptr;
495
496 if (!match_punit_msr_white_list(msr_cmd->msr))
497 return -EINVAL;
498
499 if (msr_cmd->logical_cpu >= nr_cpu_ids)
500 return -EINVAL;
501
502 if (msr_cmd->read_write) {
503 if (!capable(CAP_SYS_ADMIN))
504 return -EPERM;
505
506 ret = wrmsrl_safe_on_cpu(msr_cmd->logical_cpu,
507 msr_cmd->msr,
508 msr_cmd->data);
509 *write_only = 1;
510 if (!ret && !resume)
511 ret = isst_store_cmd(0, msr_cmd->msr,
512 msr_cmd->logical_cpu,
513 0, 0, msr_cmd->data);
514 } else {
515 u64 data;
516
517 ret = rdmsrl_safe_on_cpu(msr_cmd->logical_cpu,
518 msr_cmd->msr, &data);
519 if (!ret) {
520 msr_cmd->data = data;
521 *write_only = 0;
522 }
523 }
524
525
526 return ret;
527 }
528
isst_if_exec_multi_cmd(void __user * argp,struct isst_if_cmd_cb * cb)529 static long isst_if_exec_multi_cmd(void __user *argp, struct isst_if_cmd_cb *cb)
530 {
531 unsigned char __user *ptr;
532 u32 cmd_count;
533 u8 *cmd_ptr;
534 long ret;
535 int i;
536
537 /* Each multi command has u32 command count as the first field */
538 if (copy_from_user(&cmd_count, argp, sizeof(cmd_count)))
539 return -EFAULT;
540
541 if (!cmd_count || cmd_count > ISST_IF_CMD_LIMIT)
542 return -EINVAL;
543
544 cmd_ptr = kmalloc(cb->cmd_size, GFP_KERNEL);
545 if (!cmd_ptr)
546 return -ENOMEM;
547
548 /* cb->offset points to start of the command after the command count */
549 ptr = argp + cb->offset;
550
551 for (i = 0; i < cmd_count; ++i) {
552 int wr_only;
553
554 if (signal_pending(current)) {
555 ret = -EINTR;
556 break;
557 }
558
559 if (copy_from_user(cmd_ptr, ptr, cb->cmd_size)) {
560 ret = -EFAULT;
561 break;
562 }
563
564 ret = cb->cmd_callback(cmd_ptr, &wr_only, 0);
565 if (ret)
566 break;
567
568 if (!wr_only && copy_to_user(ptr, cmd_ptr, cb->cmd_size)) {
569 ret = -EFAULT;
570 break;
571 }
572
573 ptr += cb->cmd_size;
574 }
575
576 kfree(cmd_ptr);
577
578 return i ? i : ret;
579 }
580
isst_if_def_ioctl(struct file * file,unsigned int cmd,unsigned long arg)581 static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
582 unsigned long arg)
583 {
584 void __user *argp = (void __user *)arg;
585 struct isst_if_cmd_cb cmd_cb;
586 struct isst_if_cmd_cb *cb;
587 long ret = -ENOTTY;
588
589 switch (cmd) {
590 case ISST_IF_GET_PLATFORM_INFO:
591 ret = isst_if_get_platform_info(argp);
592 break;
593 case ISST_IF_GET_PHY_ID:
594 cmd_cb.cmd_size = sizeof(struct isst_if_cpu_map);
595 cmd_cb.offset = offsetof(struct isst_if_cpu_maps, cpu_map);
596 cmd_cb.cmd_callback = isst_if_proc_phyid_req;
597 ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
598 break;
599 case ISST_IF_IO_CMD:
600 cb = &punit_callbacks[ISST_IF_DEV_MMIO];
601 if (cb->registered)
602 ret = isst_if_exec_multi_cmd(argp, cb);
603 break;
604 case ISST_IF_MBOX_COMMAND:
605 cb = &punit_callbacks[ISST_IF_DEV_MBOX];
606 if (cb->registered)
607 ret = isst_if_exec_multi_cmd(argp, cb);
608 break;
609 case ISST_IF_MSR_COMMAND:
610 cmd_cb.cmd_size = sizeof(struct isst_if_msr_cmd);
611 cmd_cb.offset = offsetof(struct isst_if_msr_cmds, msr_cmd);
612 cmd_cb.cmd_callback = isst_if_msr_cmd_req;
613 ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
614 break;
615 default:
616 break;
617 }
618
619 return ret;
620 }
621
622 /* Lock to prevent module registration when already opened by user space */
623 static DEFINE_MUTEX(punit_misc_dev_open_lock);
624 /* Lock to allow one share misc device for all ISST interace */
625 static DEFINE_MUTEX(punit_misc_dev_reg_lock);
626 static int misc_usage_count;
627 static int misc_device_ret;
628 static int misc_device_open;
629
isst_if_open(struct inode * inode,struct file * file)630 static int isst_if_open(struct inode *inode, struct file *file)
631 {
632 int i, ret = 0;
633
634 /* Fail open, if a module is going away */
635 mutex_lock(&punit_misc_dev_open_lock);
636 for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
637 struct isst_if_cmd_cb *cb = &punit_callbacks[i];
638
639 if (cb->registered && !try_module_get(cb->owner)) {
640 ret = -ENODEV;
641 break;
642 }
643 }
644 if (ret) {
645 int j;
646
647 for (j = 0; j < i; ++j) {
648 struct isst_if_cmd_cb *cb;
649
650 cb = &punit_callbacks[j];
651 if (cb->registered)
652 module_put(cb->owner);
653 }
654 } else {
655 misc_device_open++;
656 }
657 mutex_unlock(&punit_misc_dev_open_lock);
658
659 return ret;
660 }
661
isst_if_relase(struct inode * inode,struct file * f)662 static int isst_if_relase(struct inode *inode, struct file *f)
663 {
664 int i;
665
666 mutex_lock(&punit_misc_dev_open_lock);
667 misc_device_open--;
668 for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
669 struct isst_if_cmd_cb *cb = &punit_callbacks[i];
670
671 if (cb->registered)
672 module_put(cb->owner);
673 }
674 mutex_unlock(&punit_misc_dev_open_lock);
675
676 return 0;
677 }
678
679 static const struct file_operations isst_if_char_driver_ops = {
680 .open = isst_if_open,
681 .unlocked_ioctl = isst_if_def_ioctl,
682 .release = isst_if_relase,
683 };
684
685 static struct miscdevice isst_if_char_driver = {
686 .minor = MISC_DYNAMIC_MINOR,
687 .name = "isst_interface",
688 .fops = &isst_if_char_driver_ops,
689 };
690
isst_misc_reg(void)691 static int isst_misc_reg(void)
692 {
693 mutex_lock(&punit_misc_dev_reg_lock);
694 if (misc_device_ret)
695 goto unlock_exit;
696
697 if (!misc_usage_count) {
698 misc_device_ret = isst_if_cpu_info_init();
699 if (misc_device_ret)
700 goto unlock_exit;
701
702 misc_device_ret = misc_register(&isst_if_char_driver);
703 if (misc_device_ret) {
704 isst_if_cpu_info_exit();
705 goto unlock_exit;
706 }
707 }
708 misc_usage_count++;
709
710 unlock_exit:
711 mutex_unlock(&punit_misc_dev_reg_lock);
712
713 return misc_device_ret;
714 }
715
isst_misc_unreg(void)716 static void isst_misc_unreg(void)
717 {
718 mutex_lock(&punit_misc_dev_reg_lock);
719 if (misc_usage_count)
720 misc_usage_count--;
721 if (!misc_usage_count && !misc_device_ret) {
722 misc_deregister(&isst_if_char_driver);
723 isst_if_cpu_info_exit();
724 }
725 mutex_unlock(&punit_misc_dev_reg_lock);
726 }
727
728 /**
729 * isst_if_cdev_register() - Register callback for IOCTL
730 * @device_type: The device type this callback handling.
731 * @cb: Callback structure.
732 *
733 * This function registers a callback to device type. On very first call
734 * it will register a misc device, which is used for user kernel interface.
735 * Other calls simply increment ref count. Registry will fail, if the user
736 * already opened misc device for operation. Also if the misc device
737 * creation failed, then it will not try again and all callers will get
738 * failure code.
739 *
740 * Return: Return the return value from the misc creation device or -EINVAL
741 * for unsupported device type.
742 */
isst_if_cdev_register(int device_type,struct isst_if_cmd_cb * cb)743 int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
744 {
745 int ret;
746
747 if (device_type >= ISST_IF_DEV_MAX)
748 return -EINVAL;
749
750 mutex_lock(&punit_misc_dev_open_lock);
751 /* Device is already open, we don't want to add new callbacks */
752 if (misc_device_open) {
753 mutex_unlock(&punit_misc_dev_open_lock);
754 return -EAGAIN;
755 }
756 memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
757 punit_callbacks[device_type].registered = 1;
758 mutex_unlock(&punit_misc_dev_open_lock);
759
760 ret = isst_misc_reg();
761 if (ret) {
762 /*
763 * No need of mutex as the misc device register failed
764 * as no one can open device yet. Hence no contention.
765 */
766 punit_callbacks[device_type].registered = 0;
767 return ret;
768 }
769 return 0;
770 }
771 EXPORT_SYMBOL_GPL(isst_if_cdev_register);
772
773 /**
774 * isst_if_cdev_unregister() - Unregister callback for IOCTL
775 * @device_type: The device type to unregister.
776 *
777 * This function unregisters the previously registered callback. If this
778 * is the last callback unregistering, then misc device is removed.
779 *
780 * Return: None.
781 */
isst_if_cdev_unregister(int device_type)782 void isst_if_cdev_unregister(int device_type)
783 {
784 isst_misc_unreg();
785 mutex_lock(&punit_misc_dev_open_lock);
786 punit_callbacks[device_type].registered = 0;
787 if (device_type == ISST_IF_DEV_MBOX)
788 isst_delete_hash();
789 mutex_unlock(&punit_misc_dev_open_lock);
790 }
791 EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
792
793 MODULE_LICENSE("GPL v2");
794