1 /*
2 * Copyright (C) 2012 IBM Corporation
3 *
4 * Author: Ashley Lai <ashleydlai@gmail.com>
5 *
6 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
7 *
8 * Device driver for TCG/TCPA TPM (trusted platform module).
9 * Specifications at www.trustedcomputinggroup.org
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation, version 2 of the
14 * License.
15 *
16 */
17
18 #include <linux/dma-mapping.h>
19 #include <linux/dmapool.h>
20 #include <linux/slab.h>
21 #include <asm/vio.h>
22 #include <asm/irq.h>
23 #include <linux/types.h>
24 #include <linux/list.h>
25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h>
27 #include <linux/wait.h>
28 #include <asm/prom.h>
29
30 #include "tpm.h"
31 #include "tpm_ibmvtpm.h"
32
33 static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
34
35 static struct vio_device_id tpm_ibmvtpm_device_table[] = {
36 { "IBM,vtpm", "IBM,vtpm"},
37 { "", "" }
38 };
39 MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
40
41 /**
42 * ibmvtpm_send_crq - Send a CRQ request
43 * @vdev: vio device struct
44 * @w1: first word
45 * @w2: second word
46 *
47 * Return value:
48 * 0 -Sucess
49 * Non-zero - Failure
50 */
ibmvtpm_send_crq(struct vio_dev * vdev,u64 w1,u64 w2)51 static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2)
52 {
53 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2);
54 }
55
56 /**
57 * tpm_ibmvtpm_recv - Receive data after send
58 * @chip: tpm chip struct
59 * @buf: buffer to read
60 * count: size of buffer
61 *
62 * Return value:
63 * Number of bytes read
64 */
tpm_ibmvtpm_recv(struct tpm_chip * chip,u8 * buf,size_t count)65 static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
66 {
67 struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
68 u16 len;
69 int sig;
70
71 if (!ibmvtpm->rtce_buf) {
72 dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
73 return 0;
74 }
75
76 sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
77 if (sig)
78 return -EINTR;
79
80 len = ibmvtpm->res_len;
81
82 if (count < len) {
83 dev_err(ibmvtpm->dev,
84 "Invalid size in recv: count=%zd, crq_size=%d\n",
85 count, len);
86 return -EIO;
87 }
88
89 spin_lock(&ibmvtpm->rtce_lock);
90 memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
91 memset(ibmvtpm->rtce_buf, 0, len);
92 ibmvtpm->res_len = 0;
93 spin_unlock(&ibmvtpm->rtce_lock);
94 return len;
95 }
96
97 /**
98 * tpm_ibmvtpm_send - Send tpm request
99 * @chip: tpm chip struct
100 * @buf: buffer contains data to send
101 * count: size of buffer
102 *
103 * Return value:
104 * Number of bytes sent
105 */
tpm_ibmvtpm_send(struct tpm_chip * chip,u8 * buf,size_t count)106 static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
107 {
108 struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
109 struct ibmvtpm_crq crq;
110 __be64 *word = (__be64 *)&crq;
111 int rc, sig;
112
113 if (!ibmvtpm->rtce_buf) {
114 dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
115 return 0;
116 }
117
118 if (count > ibmvtpm->rtce_size) {
119 dev_err(ibmvtpm->dev,
120 "Invalid size in send: count=%zd, rtce_size=%d\n",
121 count, ibmvtpm->rtce_size);
122 return -EIO;
123 }
124
125 if (ibmvtpm->tpm_processing_cmd) {
126 dev_info(ibmvtpm->dev,
127 "Need to wait for TPM to finish\n");
128 /* wait for previous command to finish */
129 sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
130 if (sig)
131 return -EINTR;
132 }
133
134 spin_lock(&ibmvtpm->rtce_lock);
135 ibmvtpm->res_len = 0;
136 memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
137 crq.valid = (u8)IBMVTPM_VALID_CMD;
138 crq.msg = (u8)VTPM_TPM_COMMAND;
139 crq.len = cpu_to_be16(count);
140 crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle);
141
142 /*
143 * set the processing flag before the Hcall, since we may get the
144 * result (interrupt) before even being able to check rc.
145 */
146 ibmvtpm->tpm_processing_cmd = true;
147
148 rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]),
149 be64_to_cpu(word[1]));
150 if (rc != H_SUCCESS) {
151 dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
152 rc = 0;
153 ibmvtpm->tpm_processing_cmd = false;
154 } else
155 rc = count;
156
157 spin_unlock(&ibmvtpm->rtce_lock);
158 return rc;
159 }
160
tpm_ibmvtpm_cancel(struct tpm_chip * chip)161 static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
162 {
163 return;
164 }
165
tpm_ibmvtpm_status(struct tpm_chip * chip)166 static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
167 {
168 return 0;
169 }
170
171 /**
172 * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size
173 * @ibmvtpm: vtpm device struct
174 *
175 * Return value:
176 * 0 - Success
177 * Non-zero - Failure
178 */
ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev * ibmvtpm)179 static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
180 {
181 struct ibmvtpm_crq crq;
182 u64 *buf = (u64 *) &crq;
183 int rc;
184
185 crq.valid = (u8)IBMVTPM_VALID_CMD;
186 crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE;
187
188 rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
189 cpu_to_be64(buf[1]));
190 if (rc != H_SUCCESS)
191 dev_err(ibmvtpm->dev,
192 "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
193
194 return rc;
195 }
196
197 /**
198 * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version
199 * - Note that this is vtpm version and not tpm version
200 * @ibmvtpm: vtpm device struct
201 *
202 * Return value:
203 * 0 - Success
204 * Non-zero - Failure
205 */
ibmvtpm_crq_get_version(struct ibmvtpm_dev * ibmvtpm)206 static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
207 {
208 struct ibmvtpm_crq crq;
209 u64 *buf = (u64 *) &crq;
210 int rc;
211
212 crq.valid = (u8)IBMVTPM_VALID_CMD;
213 crq.msg = (u8)VTPM_GET_VERSION;
214
215 rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
216 cpu_to_be64(buf[1]));
217 if (rc != H_SUCCESS)
218 dev_err(ibmvtpm->dev,
219 "ibmvtpm_crq_get_version failed rc=%d\n", rc);
220
221 return rc;
222 }
223
224 /**
225 * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message
226 * @ibmvtpm: vtpm device struct
227 *
228 * Return value:
229 * 0 - Success
230 * Non-zero - Failure
231 */
ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev * ibmvtpm)232 static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
233 {
234 int rc;
235
236 rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0);
237 if (rc != H_SUCCESS)
238 dev_err(ibmvtpm->dev,
239 "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
240
241 return rc;
242 }
243
244 /**
245 * ibmvtpm_crq_send_init - Send a CRQ initialize message
246 * @ibmvtpm: vtpm device struct
247 *
248 * Return value:
249 * 0 - Success
250 * Non-zero - Failure
251 */
ibmvtpm_crq_send_init(struct ibmvtpm_dev * ibmvtpm)252 static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
253 {
254 int rc;
255
256 rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0);
257 if (rc != H_SUCCESS)
258 dev_err(ibmvtpm->dev,
259 "ibmvtpm_crq_send_init failed rc=%d\n", rc);
260
261 return rc;
262 }
263
264 /**
265 * tpm_ibmvtpm_remove - ibm vtpm remove entry point
266 * @vdev: vio device struct
267 *
268 * Return value:
269 * 0
270 */
tpm_ibmvtpm_remove(struct vio_dev * vdev)271 static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
272 {
273 struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
274 struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
275 int rc = 0;
276
277 tpm_chip_unregister(chip);
278
279 free_irq(vdev->irq, ibmvtpm);
280
281 do {
282 if (rc)
283 msleep(100);
284 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
285 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
286
287 dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
288 CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
289 free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
290
291 if (ibmvtpm->rtce_buf) {
292 dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
293 ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
294 kfree(ibmvtpm->rtce_buf);
295 }
296
297 kfree(ibmvtpm);
298 /* For tpm_ibmvtpm_get_desired_dma */
299 dev_set_drvdata(&vdev->dev, NULL);
300
301 return 0;
302 }
303
304 /**
305 * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver
306 * @vdev: vio device struct
307 *
308 * Return value:
309 * Number of bytes the driver needs to DMA map
310 */
tpm_ibmvtpm_get_desired_dma(struct vio_dev * vdev)311 static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
312 {
313 struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
314 struct ibmvtpm_dev *ibmvtpm;
315
316 /*
317 * ibmvtpm initializes at probe time, so the data we are
318 * asking for may not be set yet. Estimate that 4K required
319 * for TCE-mapped buffer in addition to CRQ.
320 */
321 if (chip)
322 ibmvtpm = dev_get_drvdata(&chip->dev);
323 else
324 return CRQ_RES_BUF_SIZE + PAGE_SIZE;
325
326 return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
327 }
328
329 /**
330 * tpm_ibmvtpm_suspend - Suspend
331 * @dev: device struct
332 *
333 * Return value:
334 * 0
335 */
tpm_ibmvtpm_suspend(struct device * dev)336 static int tpm_ibmvtpm_suspend(struct device *dev)
337 {
338 struct tpm_chip *chip = dev_get_drvdata(dev);
339 struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
340 struct ibmvtpm_crq crq;
341 u64 *buf = (u64 *) &crq;
342 int rc = 0;
343
344 crq.valid = (u8)IBMVTPM_VALID_CMD;
345 crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND;
346
347 rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
348 cpu_to_be64(buf[1]));
349 if (rc != H_SUCCESS)
350 dev_err(ibmvtpm->dev,
351 "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
352
353 return rc;
354 }
355
356 /**
357 * ibmvtpm_reset_crq - Reset CRQ
358 * @ibmvtpm: ibm vtpm struct
359 *
360 * Return value:
361 * 0 - Success
362 * Non-zero - Failure
363 */
ibmvtpm_reset_crq(struct ibmvtpm_dev * ibmvtpm)364 static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
365 {
366 int rc = 0;
367
368 do {
369 if (rc)
370 msleep(100);
371 rc = plpar_hcall_norets(H_FREE_CRQ,
372 ibmvtpm->vdev->unit_address);
373 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
374
375 memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
376 ibmvtpm->crq_queue.index = 0;
377
378 return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
379 ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
380 }
381
382 /**
383 * tpm_ibmvtpm_resume - Resume from suspend
384 * @dev: device struct
385 *
386 * Return value:
387 * 0
388 */
tpm_ibmvtpm_resume(struct device * dev)389 static int tpm_ibmvtpm_resume(struct device *dev)
390 {
391 struct tpm_chip *chip = dev_get_drvdata(dev);
392 struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
393 int rc = 0;
394
395 do {
396 if (rc)
397 msleep(100);
398 rc = plpar_hcall_norets(H_ENABLE_CRQ,
399 ibmvtpm->vdev->unit_address);
400 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
401
402 if (rc) {
403 dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
404 return rc;
405 }
406
407 rc = vio_enable_interrupts(ibmvtpm->vdev);
408 if (rc) {
409 dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
410 return rc;
411 }
412
413 rc = ibmvtpm_crq_send_init(ibmvtpm);
414 if (rc)
415 dev_err(dev, "Error send_init rc=%d\n", rc);
416
417 return rc;
418 }
419
tpm_ibmvtpm_req_canceled(struct tpm_chip * chip,u8 status)420 static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
421 {
422 return (status == 0);
423 }
424
425 static const struct tpm_class_ops tpm_ibmvtpm = {
426 .recv = tpm_ibmvtpm_recv,
427 .send = tpm_ibmvtpm_send,
428 .cancel = tpm_ibmvtpm_cancel,
429 .status = tpm_ibmvtpm_status,
430 .req_complete_mask = 0,
431 .req_complete_val = 0,
432 .req_canceled = tpm_ibmvtpm_req_canceled,
433 };
434
435 static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
436 .suspend = tpm_ibmvtpm_suspend,
437 .resume = tpm_ibmvtpm_resume,
438 };
439
440 /**
441 * ibmvtpm_crq_get_next - Get next responded crq
442 * @ibmvtpm vtpm device struct
443 *
444 * Return value:
445 * vtpm crq pointer
446 */
ibmvtpm_crq_get_next(struct ibmvtpm_dev * ibmvtpm)447 static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
448 {
449 struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
450 struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
451
452 if (crq->valid & VTPM_MSG_RES) {
453 if (++crq_q->index == crq_q->num_entry)
454 crq_q->index = 0;
455 smp_rmb();
456 } else
457 crq = NULL;
458 return crq;
459 }
460
461 /**
462 * ibmvtpm_crq_process - Process responded crq
463 * @crq crq to be processed
464 * @ibmvtpm vtpm device struct
465 *
466 * Return value:
467 * Nothing
468 */
ibmvtpm_crq_process(struct ibmvtpm_crq * crq,struct ibmvtpm_dev * ibmvtpm)469 static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
470 struct ibmvtpm_dev *ibmvtpm)
471 {
472 int rc = 0;
473
474 switch (crq->valid) {
475 case VALID_INIT_CRQ:
476 switch (crq->msg) {
477 case INIT_CRQ_RES:
478 dev_info(ibmvtpm->dev, "CRQ initialized\n");
479 rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
480 if (rc)
481 dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
482 return;
483 case INIT_CRQ_COMP_RES:
484 dev_info(ibmvtpm->dev,
485 "CRQ initialization completed\n");
486 return;
487 default:
488 dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
489 return;
490 }
491 case IBMVTPM_VALID_CMD:
492 switch (crq->msg) {
493 case VTPM_GET_RTCE_BUFFER_SIZE_RES:
494 if (be16_to_cpu(crq->len) <= 0) {
495 dev_err(ibmvtpm->dev, "Invalid rtce size\n");
496 return;
497 }
498 ibmvtpm->rtce_size = be16_to_cpu(crq->len);
499 ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
500 GFP_ATOMIC);
501 if (!ibmvtpm->rtce_buf) {
502 dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
503 return;
504 }
505
506 ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
507 ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
508 DMA_BIDIRECTIONAL);
509
510 if (dma_mapping_error(ibmvtpm->dev,
511 ibmvtpm->rtce_dma_handle)) {
512 kfree(ibmvtpm->rtce_buf);
513 ibmvtpm->rtce_buf = NULL;
514 dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
515 }
516
517 return;
518 case VTPM_GET_VERSION_RES:
519 ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
520 return;
521 case VTPM_TPM_COMMAND_RES:
522 /* len of the data in rtce buffer */
523 ibmvtpm->res_len = be16_to_cpu(crq->len);
524 ibmvtpm->tpm_processing_cmd = false;
525 wake_up_interruptible(&ibmvtpm->wq);
526 return;
527 default:
528 return;
529 }
530 }
531 return;
532 }
533
534 /**
535 * ibmvtpm_interrupt - Interrupt handler
536 * @irq: irq number to handle
537 * @vtpm_instance: vtpm that received interrupt
538 *
539 * Returns:
540 * IRQ_HANDLED
541 **/
ibmvtpm_interrupt(int irq,void * vtpm_instance)542 static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
543 {
544 struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
545 struct ibmvtpm_crq *crq;
546
547 /* while loop is needed for initial setup (get version and
548 * get rtce_size). There should be only one tpm request at any
549 * given time.
550 */
551 while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
552 ibmvtpm_crq_process(crq, ibmvtpm);
553 crq->valid = 0;
554 smp_wmb();
555 }
556
557 return IRQ_HANDLED;
558 }
559
560 /**
561 * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
562 * @vio_dev: vio device struct
563 * @id: vio device id struct
564 *
565 * Return value:
566 * 0 - Success
567 * Non-zero - Failure
568 */
tpm_ibmvtpm_probe(struct vio_dev * vio_dev,const struct vio_device_id * id)569 static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
570 const struct vio_device_id *id)
571 {
572 struct ibmvtpm_dev *ibmvtpm;
573 struct device *dev = &vio_dev->dev;
574 struct ibmvtpm_crq_queue *crq_q;
575 struct tpm_chip *chip;
576 int rc = -ENOMEM, rc1;
577
578 chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm);
579 if (IS_ERR(chip))
580 return PTR_ERR(chip);
581
582 ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
583 if (!ibmvtpm) {
584 dev_err(dev, "kzalloc for ibmvtpm failed\n");
585 goto cleanup;
586 }
587
588 ibmvtpm->dev = dev;
589 ibmvtpm->vdev = vio_dev;
590
591 crq_q = &ibmvtpm->crq_queue;
592 crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
593 if (!crq_q->crq_addr) {
594 dev_err(dev, "Unable to allocate memory for crq_addr\n");
595 goto cleanup;
596 }
597
598 crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
599 ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
600 CRQ_RES_BUF_SIZE,
601 DMA_BIDIRECTIONAL);
602
603 if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
604 dev_err(dev, "dma mapping failed\n");
605 goto cleanup;
606 }
607
608 rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
609 ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
610 if (rc == H_RESOURCE)
611 rc = ibmvtpm_reset_crq(ibmvtpm);
612
613 if (rc) {
614 dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
615 goto reg_crq_cleanup;
616 }
617
618 rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
619 tpm_ibmvtpm_driver_name, ibmvtpm);
620 if (rc) {
621 dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
622 goto init_irq_cleanup;
623 }
624
625 rc = vio_enable_interrupts(vio_dev);
626 if (rc) {
627 dev_err(dev, "Error %d enabling interrupts\n", rc);
628 goto init_irq_cleanup;
629 }
630
631 init_waitqueue_head(&ibmvtpm->wq);
632
633 crq_q->index = 0;
634
635 dev_set_drvdata(&chip->dev, ibmvtpm);
636
637 spin_lock_init(&ibmvtpm->rtce_lock);
638
639 rc = ibmvtpm_crq_send_init(ibmvtpm);
640 if (rc)
641 goto init_irq_cleanup;
642
643 rc = ibmvtpm_crq_get_version(ibmvtpm);
644 if (rc)
645 goto init_irq_cleanup;
646
647 rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
648 if (rc)
649 goto init_irq_cleanup;
650
651 return tpm_chip_register(chip);
652 init_irq_cleanup:
653 do {
654 rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
655 } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
656 reg_crq_cleanup:
657 dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
658 DMA_BIDIRECTIONAL);
659 cleanup:
660 if (ibmvtpm) {
661 if (crq_q->crq_addr)
662 free_page((unsigned long)crq_q->crq_addr);
663 kfree(ibmvtpm);
664 }
665
666 return rc;
667 }
668
669 static struct vio_driver ibmvtpm_driver = {
670 .id_table = tpm_ibmvtpm_device_table,
671 .probe = tpm_ibmvtpm_probe,
672 .remove = tpm_ibmvtpm_remove,
673 .get_desired_dma = tpm_ibmvtpm_get_desired_dma,
674 .name = tpm_ibmvtpm_driver_name,
675 .pm = &tpm_ibmvtpm_pm_ops,
676 };
677
678 /**
679 * ibmvtpm_module_init - Initialize ibm vtpm module
680 *
681 * Return value:
682 * 0 -Success
683 * Non-zero - Failure
684 */
ibmvtpm_module_init(void)685 static int __init ibmvtpm_module_init(void)
686 {
687 return vio_register_driver(&ibmvtpm_driver);
688 }
689
690 /**
691 * ibmvtpm_module_exit - Teardown ibm vtpm module
692 *
693 * Return value:
694 * Nothing
695 */
ibmvtpm_module_exit(void)696 static void __exit ibmvtpm_module_exit(void)
697 {
698 vio_unregister_driver(&ibmvtpm_driver);
699 }
700
701 module_init(ibmvtpm_module_init);
702 module_exit(ibmvtpm_module_exit);
703
704 MODULE_AUTHOR("adlai@us.ibm.com");
705 MODULE_DESCRIPTION("IBM vTPM Driver");
706 MODULE_VERSION("1.0");
707 MODULE_LICENSE("GPL");
708