1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/dma-mapping.h>
19#include <linux/dmapool.h>
20#include <linux/slab.h>
21#include <asm/vio.h>
22#include <asm/irq.h>
23#include <linux/types.h>
24#include <linux/list.h>
25#include <linux/spinlock.h>
26#include <linux/interrupt.h>
27#include <linux/wait.h>
28#include <asm/prom.h>
29
30#include "tpm.h"
31#include "tpm_ibmvtpm.h"
32
33static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
34
35static struct vio_device_id tpm_ibmvtpm_device_table[] = {
36 { "IBM,vtpm", "IBM,vtpm"},
37 { "", "" }
38};
39MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
40
41
42
43
44
45
46
47
48
49
50
51static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2)
52{
53 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2);
54}
55
56
57
58
59
60
61
62
63static struct ibmvtpm_dev *ibmvtpm_get_data(const struct device *dev)
64{
65 struct tpm_chip *chip = dev_get_drvdata(dev);
66 if (chip)
67 return (struct ibmvtpm_dev *)TPM_VPRIV(chip);
68 return NULL;
69}
70
71
72
73
74
75
76
77
78
79
80static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
81{
82 struct ibmvtpm_dev *ibmvtpm;
83 u16 len;
84 int sig;
85
86 ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
87
88 if (!ibmvtpm->rtce_buf) {
89 dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
90 return 0;
91 }
92
93 sig = wait_event_interruptible(ibmvtpm->wq, ibmvtpm->res_len != 0);
94 if (sig)
95 return -EINTR;
96
97 len = ibmvtpm->res_len;
98
99 if (count < len) {
100 dev_err(ibmvtpm->dev,
101 "Invalid size in recv: count=%zd, crq_size=%d\n",
102 count, len);
103 return -EIO;
104 }
105
106 spin_lock(&ibmvtpm->rtce_lock);
107 memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
108 memset(ibmvtpm->rtce_buf, 0, len);
109 ibmvtpm->res_len = 0;
110 spin_unlock(&ibmvtpm->rtce_lock);
111 return len;
112}
113
114
115
116
117
118
119
120
121
122
123static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
124{
125 struct ibmvtpm_dev *ibmvtpm;
126 struct ibmvtpm_crq crq;
127 u64 *word = (u64 *) &crq;
128 int rc;
129
130 ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
131
132 if (!ibmvtpm->rtce_buf) {
133 dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
134 return 0;
135 }
136
137 if (count > ibmvtpm->rtce_size) {
138 dev_err(ibmvtpm->dev,
139 "Invalid size in send: count=%zd, rtce_size=%d\n",
140 count, ibmvtpm->rtce_size);
141 return -EIO;
142 }
143
144 spin_lock(&ibmvtpm->rtce_lock);
145 memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
146 crq.valid = (u8)IBMVTPM_VALID_CMD;
147 crq.msg = (u8)VTPM_TPM_COMMAND;
148 crq.len = (u16)count;
149 crq.data = ibmvtpm->rtce_dma_handle;
150
151 rc = ibmvtpm_send_crq(ibmvtpm->vdev, word[0], word[1]);
152 if (rc != H_SUCCESS) {
153 dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
154 rc = 0;
155 } else
156 rc = count;
157
158 spin_unlock(&ibmvtpm->rtce_lock);
159 return rc;
160}
161
162static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
163{
164 return;
165}
166
167static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
168{
169 return 0;
170}
171
172
173
174
175
176
177
178
179
180static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
181{
182 struct ibmvtpm_crq crq;
183 u64 *buf = (u64 *) &crq;
184 int rc;
185
186 crq.valid = (u8)IBMVTPM_VALID_CMD;
187 crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE;
188
189 rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
190 if (rc != H_SUCCESS)
191 dev_err(ibmvtpm->dev,
192 "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
193
194 return rc;
195}
196
197
198
199
200
201
202
203
204
205
206static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
207{
208 struct ibmvtpm_crq crq;
209 u64 *buf = (u64 *) &crq;
210 int rc;
211
212 crq.valid = (u8)IBMVTPM_VALID_CMD;
213 crq.msg = (u8)VTPM_GET_VERSION;
214
215 rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
216 if (rc != H_SUCCESS)
217 dev_err(ibmvtpm->dev,
218 "ibmvtpm_crq_get_version failed rc=%d\n", rc);
219
220 return rc;
221}
222
223
224
225
226
227
228
229
230
231static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
232{
233 int rc;
234
235 rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0);
236 if (rc != H_SUCCESS)
237 dev_err(ibmvtpm->dev,
238 "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
239
240 return rc;
241}
242
243
244
245
246
247
248
249
250
251static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
252{
253 int rc;
254
255 rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0);
256 if (rc != H_SUCCESS)
257 dev_err(ibmvtpm->dev,
258 "ibmvtpm_crq_send_init failed rc=%d\n", rc);
259
260 return rc;
261}
262
263
264
265
266
267
268
269
270static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
271{
272 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
273 int rc = 0;
274
275 free_irq(vdev->irq, ibmvtpm);
276
277 do {
278 if (rc)
279 msleep(100);
280 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
281 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
282
283 dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
284 CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
285 free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
286
287 if (ibmvtpm->rtce_buf) {
288 dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
289 ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
290 kfree(ibmvtpm->rtce_buf);
291 }
292
293 tpm_remove_hardware(ibmvtpm->dev);
294
295 kfree(ibmvtpm);
296
297 return 0;
298}
299
300
301
302
303
304
305
306
307static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
308{
309 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
310 return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
311}
312
313
314
315
316
317
318
319
320static int tpm_ibmvtpm_suspend(struct device *dev)
321{
322 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
323 struct ibmvtpm_crq crq;
324 u64 *buf = (u64 *) &crq;
325 int rc = 0;
326
327 crq.valid = (u8)IBMVTPM_VALID_CMD;
328 crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND;
329
330 rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
331 if (rc != H_SUCCESS)
332 dev_err(ibmvtpm->dev,
333 "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
334
335 return rc;
336}
337
338
339
340
341
342
343
344
345
346static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
347{
348 int rc = 0;
349
350 do {
351 if (rc)
352 msleep(100);
353 rc = plpar_hcall_norets(H_FREE_CRQ,
354 ibmvtpm->vdev->unit_address);
355 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
356
357 memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
358 ibmvtpm->crq_queue.index = 0;
359
360 return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
361 ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
362}
363
364
365
366
367
368
369
370
371static int tpm_ibmvtpm_resume(struct device *dev)
372{
373 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
374 int rc = 0;
375
376 do {
377 if (rc)
378 msleep(100);
379 rc = plpar_hcall_norets(H_ENABLE_CRQ,
380 ibmvtpm->vdev->unit_address);
381 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
382
383 if (rc) {
384 dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
385 return rc;
386 }
387
388 rc = vio_enable_interrupts(ibmvtpm->vdev);
389 if (rc) {
390 dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
391 return rc;
392 }
393
394 rc = ibmvtpm_crq_send_init(ibmvtpm);
395 if (rc)
396 dev_err(dev, "Error send_init rc=%d\n", rc);
397
398 return rc;
399}
400
401static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
402{
403 return (status == 0);
404}
405
406static const struct tpm_class_ops tpm_ibmvtpm = {
407 .recv = tpm_ibmvtpm_recv,
408 .send = tpm_ibmvtpm_send,
409 .cancel = tpm_ibmvtpm_cancel,
410 .status = tpm_ibmvtpm_status,
411 .req_complete_mask = 0,
412 .req_complete_val = 0,
413 .req_canceled = tpm_ibmvtpm_req_canceled,
414};
415
416static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
417 .suspend = tpm_ibmvtpm_suspend,
418 .resume = tpm_ibmvtpm_resume,
419};
420
421
422
423
424
425
426
427
428static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
429{
430 struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
431 struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
432
433 if (crq->valid & VTPM_MSG_RES) {
434 if (++crq_q->index == crq_q->num_entry)
435 crq_q->index = 0;
436 smp_rmb();
437 } else
438 crq = NULL;
439 return crq;
440}
441
442
443
444
445
446
447
448
449
450static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
451 struct ibmvtpm_dev *ibmvtpm)
452{
453 int rc = 0;
454
455 switch (crq->valid) {
456 case VALID_INIT_CRQ:
457 switch (crq->msg) {
458 case INIT_CRQ_RES:
459 dev_info(ibmvtpm->dev, "CRQ initialized\n");
460 rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
461 if (rc)
462 dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
463 return;
464 case INIT_CRQ_COMP_RES:
465 dev_info(ibmvtpm->dev,
466 "CRQ initialization completed\n");
467 return;
468 default:
469 dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
470 return;
471 }
472 case IBMVTPM_VALID_CMD:
473 switch (crq->msg) {
474 case VTPM_GET_RTCE_BUFFER_SIZE_RES:
475 if (crq->len <= 0) {
476 dev_err(ibmvtpm->dev, "Invalid rtce size\n");
477 return;
478 }
479 ibmvtpm->rtce_size = crq->len;
480 ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
481 GFP_KERNEL);
482 if (!ibmvtpm->rtce_buf) {
483 dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
484 return;
485 }
486
487 ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
488 ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
489 DMA_BIDIRECTIONAL);
490
491 if (dma_mapping_error(ibmvtpm->dev,
492 ibmvtpm->rtce_dma_handle)) {
493 kfree(ibmvtpm->rtce_buf);
494 ibmvtpm->rtce_buf = NULL;
495 dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
496 }
497
498 return;
499 case VTPM_GET_VERSION_RES:
500 ibmvtpm->vtpm_version = crq->data;
501 return;
502 case VTPM_TPM_COMMAND_RES:
503
504 ibmvtpm->res_len = crq->len;
505 wake_up_interruptible(&ibmvtpm->wq);
506 return;
507 default:
508 return;
509 }
510 }
511 return;
512}
513
514
515
516
517
518
519
520
521
522static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
523{
524 struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
525 struct ibmvtpm_crq *crq;
526
527
528
529
530
531 while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
532 ibmvtpm_crq_process(crq, ibmvtpm);
533 crq->valid = 0;
534 smp_wmb();
535 }
536
537 return IRQ_HANDLED;
538}
539
540
541
542
543
544
545
546
547
548
549static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
550 const struct vio_device_id *id)
551{
552 struct ibmvtpm_dev *ibmvtpm;
553 struct device *dev = &vio_dev->dev;
554 struct ibmvtpm_crq_queue *crq_q;
555 struct tpm_chip *chip;
556 int rc = -ENOMEM, rc1;
557
558 chip = tpm_register_hardware(dev, &tpm_ibmvtpm);
559 if (!chip) {
560 dev_err(dev, "tpm_register_hardware failed\n");
561 return -ENODEV;
562 }
563
564 ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
565 if (!ibmvtpm) {
566 dev_err(dev, "kzalloc for ibmvtpm failed\n");
567 goto cleanup;
568 }
569
570 crq_q = &ibmvtpm->crq_queue;
571 crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
572 if (!crq_q->crq_addr) {
573 dev_err(dev, "Unable to allocate memory for crq_addr\n");
574 goto cleanup;
575 }
576
577 crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
578 ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
579 CRQ_RES_BUF_SIZE,
580 DMA_BIDIRECTIONAL);
581
582 if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
583 dev_err(dev, "dma mapping failed\n");
584 goto cleanup;
585 }
586
587 rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
588 ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
589 if (rc == H_RESOURCE)
590 rc = ibmvtpm_reset_crq(ibmvtpm);
591
592 if (rc) {
593 dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
594 goto reg_crq_cleanup;
595 }
596
597 rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
598 tpm_ibmvtpm_driver_name, ibmvtpm);
599 if (rc) {
600 dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
601 goto init_irq_cleanup;
602 }
603
604 rc = vio_enable_interrupts(vio_dev);
605 if (rc) {
606 dev_err(dev, "Error %d enabling interrupts\n", rc);
607 goto init_irq_cleanup;
608 }
609
610 init_waitqueue_head(&ibmvtpm->wq);
611
612 crq_q->index = 0;
613
614 ibmvtpm->dev = dev;
615 ibmvtpm->vdev = vio_dev;
616 TPM_VPRIV(chip) = (void *)ibmvtpm;
617
618 spin_lock_init(&ibmvtpm->rtce_lock);
619
620 rc = ibmvtpm_crq_send_init(ibmvtpm);
621 if (rc)
622 goto init_irq_cleanup;
623
624 rc = ibmvtpm_crq_get_version(ibmvtpm);
625 if (rc)
626 goto init_irq_cleanup;
627
628 rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
629 if (rc)
630 goto init_irq_cleanup;
631
632 return rc;
633init_irq_cleanup:
634 do {
635 rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
636 } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
637reg_crq_cleanup:
638 dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
639 DMA_BIDIRECTIONAL);
640cleanup:
641 if (ibmvtpm) {
642 if (crq_q->crq_addr)
643 free_page((unsigned long)crq_q->crq_addr);
644 kfree(ibmvtpm);
645 }
646
647 tpm_remove_hardware(dev);
648
649 return rc;
650}
651
652static struct vio_driver ibmvtpm_driver = {
653 .id_table = tpm_ibmvtpm_device_table,
654 .probe = tpm_ibmvtpm_probe,
655 .remove = tpm_ibmvtpm_remove,
656 .get_desired_dma = tpm_ibmvtpm_get_desired_dma,
657 .name = tpm_ibmvtpm_driver_name,
658 .pm = &tpm_ibmvtpm_pm_ops,
659};
660
661
662
663
664
665
666
667
668static int __init ibmvtpm_module_init(void)
669{
670 return vio_register_driver(&ibmvtpm_driver);
671}
672
673
674
675
676
677
678
679static void __exit ibmvtpm_module_exit(void)
680{
681 vio_unregister_driver(&ibmvtpm_driver);
682}
683
684module_init(ibmvtpm_module_init);
685module_exit(ibmvtpm_module_exit);
686
687MODULE_AUTHOR("adlai@us.ibm.com");
688MODULE_DESCRIPTION("IBM vTPM Driver");
689MODULE_VERSION("1.0");
690MODULE_LICENSE("GPL");
691