1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2012
4 *
5 * Author(s):
6 * Jan Glauber <jang@linux.vnet.ibm.com>
7 */
8
9 #define KMSG_COMPONENT "zpci"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12 #include <linux/compat.h>
13 #include <linux/kernel.h>
14 #include <linux/miscdevice.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/delay.h>
18 #include <linux/pci.h>
19 #include <linux/uaccess.h>
20 #include <asm/pci_debug.h>
21 #include <asm/pci_clp.h>
22 #include <asm/clp.h>
23 #include <uapi/asm/clp.h>
24
25 #include "pci_bus.h"
26
27 bool zpci_unique_uid;
28
update_uid_checking(bool new)29 void update_uid_checking(bool new)
30 {
31 if (zpci_unique_uid != new)
32 zpci_dbg(1, "uid checking:%d\n", new);
33
34 zpci_unique_uid = new;
35 }
36
zpci_err_clp(unsigned int rsp,int rc)37 static inline void zpci_err_clp(unsigned int rsp, int rc)
38 {
39 struct {
40 unsigned int rsp;
41 int rc;
42 } __packed data = {rsp, rc};
43
44 zpci_err_hex(&data, sizeof(data));
45 }
46
47 /*
48 * Call Logical Processor with c=1, lps=0 and command 1
49 * to get the bit mask of installed logical processors
50 */
clp_get_ilp(unsigned long * ilp)51 static inline int clp_get_ilp(unsigned long *ilp)
52 {
53 unsigned long mask;
54 int cc = 3;
55
56 asm volatile (
57 " .insn rrf,0xb9a00000,%[mask],%[cmd],8,0\n"
58 "0: ipm %[cc]\n"
59 " srl %[cc],28\n"
60 "1:\n"
61 EX_TABLE(0b, 1b)
62 : [cc] "+d" (cc), [mask] "=d" (mask) : [cmd] "a" (1)
63 : "cc");
64 *ilp = mask;
65 return cc;
66 }
67
68 /*
69 * Call Logical Processor with c=0, the give constant lps and an lpcb request.
70 */
clp_req(void * data,unsigned int lps)71 static __always_inline int clp_req(void *data, unsigned int lps)
72 {
73 struct { u8 _[CLP_BLK_SIZE]; } *req = data;
74 u64 ignored;
75 int cc = 3;
76
77 asm volatile (
78 " .insn rrf,0xb9a00000,%[ign],%[req],0,%[lps]\n"
79 "0: ipm %[cc]\n"
80 " srl %[cc],28\n"
81 "1:\n"
82 EX_TABLE(0b, 1b)
83 : [cc] "+d" (cc), [ign] "=d" (ignored), "+m" (*req)
84 : [req] "a" (req), [lps] "i" (lps)
85 : "cc");
86 return cc;
87 }
88
clp_alloc_block(gfp_t gfp_mask)89 static void *clp_alloc_block(gfp_t gfp_mask)
90 {
91 return (void *) __get_free_pages(gfp_mask, get_order(CLP_BLK_SIZE));
92 }
93
clp_free_block(void * ptr)94 static void clp_free_block(void *ptr)
95 {
96 free_pages((unsigned long) ptr, get_order(CLP_BLK_SIZE));
97 }
98
clp_store_query_pci_fngrp(struct zpci_dev * zdev,struct clp_rsp_query_pci_grp * response)99 static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
100 struct clp_rsp_query_pci_grp *response)
101 {
102 zdev->tlb_refresh = response->refresh;
103 zdev->dma_mask = response->dasm;
104 zdev->msi_addr = response->msia;
105 zdev->max_msi = response->noi;
106 zdev->fmb_update = response->mui;
107 zdev->version = response->version;
108
109 switch (response->version) {
110 case 1:
111 zdev->max_bus_speed = PCIE_SPEED_5_0GT;
112 break;
113 default:
114 zdev->max_bus_speed = PCI_SPEED_UNKNOWN;
115 break;
116 }
117 }
118
clp_query_pci_fngrp(struct zpci_dev * zdev,u8 pfgid)119 static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
120 {
121 struct clp_req_rsp_query_pci_grp *rrb;
122 int rc;
123
124 rrb = clp_alloc_block(GFP_KERNEL);
125 if (!rrb)
126 return -ENOMEM;
127
128 memset(rrb, 0, sizeof(*rrb));
129 rrb->request.hdr.len = sizeof(rrb->request);
130 rrb->request.hdr.cmd = CLP_QUERY_PCI_FNGRP;
131 rrb->response.hdr.len = sizeof(rrb->response);
132 rrb->request.pfgid = pfgid;
133
134 rc = clp_req(rrb, CLP_LPS_PCI);
135 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
136 clp_store_query_pci_fngrp(zdev, &rrb->response);
137 else {
138 zpci_err("Q PCI FGRP:\n");
139 zpci_err_clp(rrb->response.hdr.rsp, rc);
140 rc = -EIO;
141 }
142 clp_free_block(rrb);
143 return rc;
144 }
145
clp_store_query_pci_fn(struct zpci_dev * zdev,struct clp_rsp_query_pci * response)146 static int clp_store_query_pci_fn(struct zpci_dev *zdev,
147 struct clp_rsp_query_pci *response)
148 {
149 int i;
150
151 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
152 zdev->bars[i].val = le32_to_cpu(response->bar[i]);
153 zdev->bars[i].size = response->bar_size[i];
154 }
155 zdev->start_dma = response->sdma;
156 zdev->end_dma = response->edma;
157 zdev->pchid = response->pchid;
158 zdev->pfgid = response->pfgid;
159 zdev->pft = response->pft;
160 zdev->vfn = response->vfn;
161 zdev->port = response->port;
162 zdev->uid = response->uid;
163 zdev->fmb_length = sizeof(u32) * response->fmb_len;
164 zdev->rid_available = response->rid_avail;
165 zdev->is_physfn = response->is_physfn;
166 if (!s390_pci_no_rid && zdev->rid_available)
167 zdev->devfn = response->rid & ZPCI_RID_MASK_DEVFN;
168
169 memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
170 if (response->util_str_avail) {
171 memcpy(zdev->util_str, response->util_str,
172 sizeof(zdev->util_str));
173 zdev->util_str_avail = 1;
174 }
175 zdev->mio_capable = response->mio_addr_avail;
176 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
177 if (!(response->mio.valid & (1 << (PCI_STD_NUM_BARS - i - 1))))
178 continue;
179
180 zdev->bars[i].mio_wb = (void __iomem *) response->mio.addr[i].wb;
181 zdev->bars[i].mio_wt = (void __iomem *) response->mio.addr[i].wt;
182 }
183 return 0;
184 }
185
clp_query_pci_fn(struct zpci_dev * zdev)186 int clp_query_pci_fn(struct zpci_dev *zdev)
187 {
188 struct clp_req_rsp_query_pci *rrb;
189 int rc;
190
191 rrb = clp_alloc_block(GFP_KERNEL);
192 if (!rrb)
193 return -ENOMEM;
194
195 memset(rrb, 0, sizeof(*rrb));
196 rrb->request.hdr.len = sizeof(rrb->request);
197 rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
198 rrb->response.hdr.len = sizeof(rrb->response);
199 rrb->request.fh = zdev->fh;
200
201 rc = clp_req(rrb, CLP_LPS_PCI);
202 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
203 rc = clp_store_query_pci_fn(zdev, &rrb->response);
204 if (rc)
205 goto out;
206 rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
207 } else {
208 zpci_err("Q PCI FN:\n");
209 zpci_err_clp(rrb->response.hdr.rsp, rc);
210 rc = -EIO;
211 }
212 out:
213 clp_free_block(rrb);
214 return rc;
215 }
216
217 static int clp_refresh_fh(u32 fid);
218 /**
219 * clp_set_pci_fn() - Execute a command on a PCI function
220 * @zdev: Function that will be affected
221 * @nr_dma_as: DMA address space number
222 * @command: The command code to execute
223 *
224 * Returns: 0 on success, < 0 for Linux errors (e.g. -ENOMEM), and
225 * > 0 for non-success platform responses
226 */
clp_set_pci_fn(struct zpci_dev * zdev,u8 nr_dma_as,u8 command)227 static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
228 {
229 struct clp_req_rsp_set_pci *rrb;
230 int rc, retries = 100;
231
232 rrb = clp_alloc_block(GFP_KERNEL);
233 if (!rrb)
234 return -ENOMEM;
235
236 do {
237 memset(rrb, 0, sizeof(*rrb));
238 rrb->request.hdr.len = sizeof(rrb->request);
239 rrb->request.hdr.cmd = CLP_SET_PCI_FN;
240 rrb->response.hdr.len = sizeof(rrb->response);
241 rrb->request.fh = zdev->fh;
242 rrb->request.oc = command;
243 rrb->request.ndas = nr_dma_as;
244
245 rc = clp_req(rrb, CLP_LPS_PCI);
246 if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) {
247 retries--;
248 if (retries < 0)
249 break;
250 msleep(20);
251 }
252 } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
253
254 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
255 zdev->fh = rrb->response.fh;
256 } else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY) {
257 /* Function is already in desired state - update handle */
258 rc = clp_refresh_fh(zdev->fid);
259 } else {
260 zpci_err("Set PCI FN:\n");
261 zpci_err_clp(rrb->response.hdr.rsp, rc);
262 if (!rc)
263 rc = rrb->response.hdr.rsp;
264 }
265 clp_free_block(rrb);
266 return rc;
267 }
268
clp_setup_writeback_mio(void)269 int clp_setup_writeback_mio(void)
270 {
271 struct clp_req_rsp_slpc_pci *rrb;
272 u8 wb_bit_pos;
273 int rc;
274
275 rrb = clp_alloc_block(GFP_KERNEL);
276 if (!rrb)
277 return -ENOMEM;
278
279 memset(rrb, 0, sizeof(*rrb));
280 rrb->request.hdr.len = sizeof(rrb->request);
281 rrb->request.hdr.cmd = CLP_SLPC;
282 rrb->response.hdr.len = sizeof(rrb->response);
283
284 rc = clp_req(rrb, CLP_LPS_PCI);
285 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
286 if (rrb->response.vwb) {
287 wb_bit_pos = rrb->response.mio_wb;
288 set_bit_inv(wb_bit_pos, &mio_wb_bit_mask);
289 zpci_dbg(3, "wb bit: %d\n", wb_bit_pos);
290 } else {
291 zpci_dbg(3, "wb bit: n.a.\n");
292 }
293
294 } else {
295 zpci_err("SLPC PCI:\n");
296 zpci_err_clp(rrb->response.hdr.rsp, rc);
297 rc = -EIO;
298 }
299 clp_free_block(rrb);
300 return rc;
301 }
302
clp_enable_fh(struct zpci_dev * zdev,u8 nr_dma_as)303 int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
304 {
305 int rc;
306
307 rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
308 zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
309 if (!rc && zpci_use_mio(zdev)) {
310 rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_MIO);
311 zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n",
312 zdev->fid, zdev->fh, rc);
313 if (rc)
314 clp_disable_fh(zdev);
315 }
316 return rc;
317 }
318
clp_disable_fh(struct zpci_dev * zdev)319 int clp_disable_fh(struct zpci_dev *zdev)
320 {
321 int rc;
322
323 if (!zdev_enabled(zdev))
324 return 0;
325
326 rc = clp_set_pci_fn(zdev, 0, CLP_SET_DISABLE_PCI_FN);
327 zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
328 return rc;
329 }
330
clp_list_pci(struct clp_req_rsp_list_pci * rrb,void * data,void (* cb)(struct clp_fh_list_entry *,void *))331 static int clp_list_pci(struct clp_req_rsp_list_pci *rrb, void *data,
332 void (*cb)(struct clp_fh_list_entry *, void *))
333 {
334 u64 resume_token = 0;
335 int entries, i, rc;
336
337 do {
338 memset(rrb, 0, sizeof(*rrb));
339 rrb->request.hdr.len = sizeof(rrb->request);
340 rrb->request.hdr.cmd = CLP_LIST_PCI;
341 /* store as many entries as possible */
342 rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
343 rrb->request.resume_token = resume_token;
344
345 /* Get PCI function handle list */
346 rc = clp_req(rrb, CLP_LPS_PCI);
347 if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
348 zpci_err("List PCI FN:\n");
349 zpci_err_clp(rrb->response.hdr.rsp, rc);
350 rc = -EIO;
351 goto out;
352 }
353
354 update_uid_checking(rrb->response.uid_checking);
355 WARN_ON_ONCE(rrb->response.entry_size !=
356 sizeof(struct clp_fh_list_entry));
357
358 entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
359 rrb->response.entry_size;
360
361 resume_token = rrb->response.resume_token;
362 for (i = 0; i < entries; i++)
363 cb(&rrb->response.fh_list[i], data);
364 } while (resume_token);
365 out:
366 return rc;
367 }
368
__clp_add(struct clp_fh_list_entry * entry,void * data)369 static void __clp_add(struct clp_fh_list_entry *entry, void *data)
370 {
371 struct zpci_dev *zdev;
372
373 if (!entry->vendor_id)
374 return;
375
376 zdev = get_zdev_by_fid(entry->fid);
377 if (zdev) {
378 zpci_zdev_put(zdev);
379 return;
380 }
381 zpci_create_device(entry->fid, entry->fh, entry->config_state);
382 }
383
clp_scan_pci_devices(void)384 int clp_scan_pci_devices(void)
385 {
386 struct clp_req_rsp_list_pci *rrb;
387 int rc;
388
389 rrb = clp_alloc_block(GFP_KERNEL);
390 if (!rrb)
391 return -ENOMEM;
392
393 rc = clp_list_pci(rrb, NULL, __clp_add);
394
395 clp_free_block(rrb);
396 return rc;
397 }
398
__clp_refresh_fh(struct clp_fh_list_entry * entry,void * data)399 static void __clp_refresh_fh(struct clp_fh_list_entry *entry, void *data)
400 {
401 struct zpci_dev *zdev;
402 u32 fid = *((u32 *)data);
403
404 if (!entry->vendor_id || fid != entry->fid)
405 return;
406
407 zdev = get_zdev_by_fid(fid);
408 if (!zdev)
409 return;
410
411 zdev->fh = entry->fh;
412 }
413
414 /*
415 * Refresh the function handle of the function matching @fid
416 */
clp_refresh_fh(u32 fid)417 static int clp_refresh_fh(u32 fid)
418 {
419 struct clp_req_rsp_list_pci *rrb;
420 int rc;
421
422 rrb = clp_alloc_block(GFP_NOWAIT);
423 if (!rrb)
424 return -ENOMEM;
425
426 rc = clp_list_pci(rrb, &fid, __clp_refresh_fh);
427
428 clp_free_block(rrb);
429 return rc;
430 }
431
432 struct clp_state_data {
433 u32 fid;
434 enum zpci_state state;
435 };
436
__clp_get_state(struct clp_fh_list_entry * entry,void * data)437 static void __clp_get_state(struct clp_fh_list_entry *entry, void *data)
438 {
439 struct clp_state_data *sd = data;
440
441 if (entry->fid != sd->fid)
442 return;
443
444 sd->state = entry->config_state;
445 }
446
clp_get_state(u32 fid,enum zpci_state * state)447 int clp_get_state(u32 fid, enum zpci_state *state)
448 {
449 struct clp_req_rsp_list_pci *rrb;
450 struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED};
451 int rc;
452
453 rrb = clp_alloc_block(GFP_ATOMIC);
454 if (!rrb)
455 return -ENOMEM;
456
457 rc = clp_list_pci(rrb, &sd, __clp_get_state);
458 if (!rc)
459 *state = sd.state;
460
461 clp_free_block(rrb);
462 return rc;
463 }
464
clp_base_slpc(struct clp_req * req,struct clp_req_rsp_slpc * lpcb)465 static int clp_base_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
466 {
467 unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
468
469 if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
470 lpcb->response.hdr.len > limit)
471 return -EINVAL;
472 return clp_req(lpcb, CLP_LPS_BASE) ? -EOPNOTSUPP : 0;
473 }
474
clp_base_command(struct clp_req * req,struct clp_req_hdr * lpcb)475 static int clp_base_command(struct clp_req *req, struct clp_req_hdr *lpcb)
476 {
477 switch (lpcb->cmd) {
478 case 0x0001: /* store logical-processor characteristics */
479 return clp_base_slpc(req, (void *) lpcb);
480 default:
481 return -EINVAL;
482 }
483 }
484
clp_pci_slpc(struct clp_req * req,struct clp_req_rsp_slpc_pci * lpcb)485 static int clp_pci_slpc(struct clp_req *req, struct clp_req_rsp_slpc_pci *lpcb)
486 {
487 unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
488
489 if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
490 lpcb->response.hdr.len > limit)
491 return -EINVAL;
492 return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
493 }
494
clp_pci_list(struct clp_req * req,struct clp_req_rsp_list_pci * lpcb)495 static int clp_pci_list(struct clp_req *req, struct clp_req_rsp_list_pci *lpcb)
496 {
497 unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
498
499 if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
500 lpcb->response.hdr.len > limit)
501 return -EINVAL;
502 if (lpcb->request.reserved2 != 0)
503 return -EINVAL;
504 return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
505 }
506
clp_pci_query(struct clp_req * req,struct clp_req_rsp_query_pci * lpcb)507 static int clp_pci_query(struct clp_req *req,
508 struct clp_req_rsp_query_pci *lpcb)
509 {
510 unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
511
512 if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
513 lpcb->response.hdr.len > limit)
514 return -EINVAL;
515 if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0)
516 return -EINVAL;
517 return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
518 }
519
clp_pci_query_grp(struct clp_req * req,struct clp_req_rsp_query_pci_grp * lpcb)520 static int clp_pci_query_grp(struct clp_req *req,
521 struct clp_req_rsp_query_pci_grp *lpcb)
522 {
523 unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
524
525 if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
526 lpcb->response.hdr.len > limit)
527 return -EINVAL;
528 if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0 ||
529 lpcb->request.reserved4 != 0)
530 return -EINVAL;
531 return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
532 }
533
clp_pci_command(struct clp_req * req,struct clp_req_hdr * lpcb)534 static int clp_pci_command(struct clp_req *req, struct clp_req_hdr *lpcb)
535 {
536 switch (lpcb->cmd) {
537 case 0x0001: /* store logical-processor characteristics */
538 return clp_pci_slpc(req, (void *) lpcb);
539 case 0x0002: /* list PCI functions */
540 return clp_pci_list(req, (void *) lpcb);
541 case 0x0003: /* query PCI function */
542 return clp_pci_query(req, (void *) lpcb);
543 case 0x0004: /* query PCI function group */
544 return clp_pci_query_grp(req, (void *) lpcb);
545 default:
546 return -EINVAL;
547 }
548 }
549
clp_normal_command(struct clp_req * req)550 static int clp_normal_command(struct clp_req *req)
551 {
552 struct clp_req_hdr *lpcb;
553 void __user *uptr;
554 int rc;
555
556 rc = -EINVAL;
557 if (req->lps != 0 && req->lps != 2)
558 goto out;
559
560 rc = -ENOMEM;
561 lpcb = clp_alloc_block(GFP_KERNEL);
562 if (!lpcb)
563 goto out;
564
565 rc = -EFAULT;
566 uptr = (void __force __user *)(unsigned long) req->data_p;
567 if (copy_from_user(lpcb, uptr, PAGE_SIZE) != 0)
568 goto out_free;
569
570 rc = -EINVAL;
571 if (lpcb->fmt != 0 || lpcb->reserved1 != 0 || lpcb->reserved2 != 0)
572 goto out_free;
573
574 switch (req->lps) {
575 case 0:
576 rc = clp_base_command(req, lpcb);
577 break;
578 case 2:
579 rc = clp_pci_command(req, lpcb);
580 break;
581 }
582 if (rc)
583 goto out_free;
584
585 rc = -EFAULT;
586 if (copy_to_user(uptr, lpcb, PAGE_SIZE) != 0)
587 goto out_free;
588
589 rc = 0;
590
591 out_free:
592 clp_free_block(lpcb);
593 out:
594 return rc;
595 }
596
clp_immediate_command(struct clp_req * req)597 static int clp_immediate_command(struct clp_req *req)
598 {
599 void __user *uptr;
600 unsigned long ilp;
601 int exists;
602
603 if (req->cmd > 1 || clp_get_ilp(&ilp) != 0)
604 return -EINVAL;
605
606 uptr = (void __force __user *)(unsigned long) req->data_p;
607 if (req->cmd == 0) {
608 /* Command code 0: test for a specific processor */
609 exists = test_bit_inv(req->lps, &ilp);
610 return put_user(exists, (int __user *) uptr);
611 }
612 /* Command code 1: return bit mask of installed processors */
613 return put_user(ilp, (unsigned long __user *) uptr);
614 }
615
clp_misc_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)616 static long clp_misc_ioctl(struct file *filp, unsigned int cmd,
617 unsigned long arg)
618 {
619 struct clp_req req;
620 void __user *argp;
621
622 if (cmd != CLP_SYNC)
623 return -EINVAL;
624
625 argp = is_compat_task() ? compat_ptr(arg) : (void __user *) arg;
626 if (copy_from_user(&req, argp, sizeof(req)))
627 return -EFAULT;
628 if (req.r != 0)
629 return -EINVAL;
630 return req.c ? clp_immediate_command(&req) : clp_normal_command(&req);
631 }
632
clp_misc_release(struct inode * inode,struct file * filp)633 static int clp_misc_release(struct inode *inode, struct file *filp)
634 {
635 return 0;
636 }
637
638 static const struct file_operations clp_misc_fops = {
639 .owner = THIS_MODULE,
640 .open = nonseekable_open,
641 .release = clp_misc_release,
642 .unlocked_ioctl = clp_misc_ioctl,
643 .compat_ioctl = clp_misc_ioctl,
644 .llseek = no_llseek,
645 };
646
647 static struct miscdevice clp_misc_device = {
648 .minor = MISC_DYNAMIC_MINOR,
649 .name = "clp",
650 .fops = &clp_misc_fops,
651 };
652
clp_misc_init(void)653 static int __init clp_misc_init(void)
654 {
655 return misc_register(&clp_misc_device);
656 }
657
658 device_initcall(clp_misc_init);
659