1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/types.h>
14#include <linux/spinlock.h>
15#include <linux/uaccess.h>
16#include <linux/wait.h>
17#include <linux/miscdevice.h>
18#include <linux/vtpm_proxy.h>
19#include <linux/file.h>
20#include <linux/anon_inodes.h>
21#include <linux/poll.h>
22#include <linux/compat.h>
23
24#include "tpm.h"
25
26#define VTPM_PROXY_REQ_COMPLETE_FLAG BIT(0)
27
28struct proxy_dev {
29 struct tpm_chip *chip;
30
31 u32 flags;
32
33 wait_queue_head_t wq;
34
35 struct mutex buf_lock;
36
37 long state;
38#define STATE_OPENED_FLAG BIT(0)
39#define STATE_WAIT_RESPONSE_FLAG BIT(1)
40#define STATE_REGISTERED_FLAG BIT(2)
41#define STATE_DRIVER_COMMAND BIT(3)
42
43 size_t req_len;
44 size_t resp_len;
45 u8 buffer[TPM_BUFSIZE];
46
47 struct work_struct work;
48};
49
50
51#define VTPM_PROXY_FLAGS_ALL (VTPM_PROXY_FLAG_TPM2)
52
53static struct workqueue_struct *workqueue;
54
55static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev);
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72static ssize_t vtpm_proxy_fops_read(struct file *filp, char __user *buf,
73 size_t count, loff_t *off)
74{
75 struct proxy_dev *proxy_dev = filp->private_data;
76 size_t len;
77 int sig, rc;
78
79 sig = wait_event_interruptible(proxy_dev->wq,
80 proxy_dev->req_len != 0 ||
81 !(proxy_dev->state & STATE_OPENED_FLAG));
82 if (sig)
83 return -EINTR;
84
85 mutex_lock(&proxy_dev->buf_lock);
86
87 if (!(proxy_dev->state & STATE_OPENED_FLAG)) {
88 mutex_unlock(&proxy_dev->buf_lock);
89 return -EPIPE;
90 }
91
92 len = proxy_dev->req_len;
93
94 if (count < len) {
95 mutex_unlock(&proxy_dev->buf_lock);
96 pr_debug("Invalid size in recv: count=%zd, req_len=%zd\n",
97 count, len);
98 return -EIO;
99 }
100
101 rc = copy_to_user(buf, proxy_dev->buffer, len);
102 memset(proxy_dev->buffer, 0, len);
103 proxy_dev->req_len = 0;
104
105 if (!rc)
106 proxy_dev->state |= STATE_WAIT_RESPONSE_FLAG;
107
108 mutex_unlock(&proxy_dev->buf_lock);
109
110 if (rc)
111 return -EFAULT;
112
113 return len;
114}
115
116
117
118
119
120
121
122
123
124
125
126
127static ssize_t vtpm_proxy_fops_write(struct file *filp, const char __user *buf,
128 size_t count, loff_t *off)
129{
130 struct proxy_dev *proxy_dev = filp->private_data;
131
132 mutex_lock(&proxy_dev->buf_lock);
133
134 if (!(proxy_dev->state & STATE_OPENED_FLAG)) {
135 mutex_unlock(&proxy_dev->buf_lock);
136 return -EPIPE;
137 }
138
139 if (count > sizeof(proxy_dev->buffer) ||
140 !(proxy_dev->state & STATE_WAIT_RESPONSE_FLAG)) {
141 mutex_unlock(&proxy_dev->buf_lock);
142 return -EIO;
143 }
144
145 proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG;
146
147 proxy_dev->req_len = 0;
148
149 if (copy_from_user(proxy_dev->buffer, buf, count)) {
150 mutex_unlock(&proxy_dev->buf_lock);
151 return -EFAULT;
152 }
153
154 proxy_dev->resp_len = count;
155
156 mutex_unlock(&proxy_dev->buf_lock);
157
158 wake_up_interruptible(&proxy_dev->wq);
159
160 return count;
161}
162
163
164
165
166
167
168
169
170
171static __poll_t vtpm_proxy_fops_poll(struct file *filp, poll_table *wait)
172{
173 struct proxy_dev *proxy_dev = filp->private_data;
174 __poll_t ret;
175
176 poll_wait(filp, &proxy_dev->wq, wait);
177
178 ret = EPOLLOUT;
179
180 mutex_lock(&proxy_dev->buf_lock);
181
182 if (proxy_dev->req_len)
183 ret |= EPOLLIN | EPOLLRDNORM;
184
185 if (!(proxy_dev->state & STATE_OPENED_FLAG))
186 ret |= EPOLLHUP;
187
188 mutex_unlock(&proxy_dev->buf_lock);
189
190 return ret;
191}
192
193
194
195
196
197
198
199
200static void vtpm_proxy_fops_open(struct file *filp)
201{
202 struct proxy_dev *proxy_dev = filp->private_data;
203
204 proxy_dev->state |= STATE_OPENED_FLAG;
205}
206
207
208
209
210
211
212
213static void vtpm_proxy_fops_undo_open(struct proxy_dev *proxy_dev)
214{
215 mutex_lock(&proxy_dev->buf_lock);
216
217 proxy_dev->state &= ~STATE_OPENED_FLAG;
218
219 mutex_unlock(&proxy_dev->buf_lock);
220
221
222 wake_up_interruptible(&proxy_dev->wq);
223}
224
225
226
227
228
229
230
231
232
233static int vtpm_proxy_fops_release(struct inode *inode, struct file *filp)
234{
235 struct proxy_dev *proxy_dev = filp->private_data;
236
237 filp->private_data = NULL;
238
239 vtpm_proxy_delete_device(proxy_dev);
240
241 return 0;
242}
243
244static const struct file_operations vtpm_proxy_fops = {
245 .owner = THIS_MODULE,
246 .llseek = no_llseek,
247 .read = vtpm_proxy_fops_read,
248 .write = vtpm_proxy_fops_write,
249 .poll = vtpm_proxy_fops_poll,
250 .release = vtpm_proxy_fops_release,
251};
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267static int vtpm_proxy_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count)
268{
269 struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
270 size_t len;
271
272
273 mutex_lock(&proxy_dev->buf_lock);
274
275 if (!(proxy_dev->state & STATE_OPENED_FLAG)) {
276 mutex_unlock(&proxy_dev->buf_lock);
277 return -EPIPE;
278 }
279
280 len = proxy_dev->resp_len;
281 if (count < len) {
282 dev_err(&chip->dev,
283 "Invalid size in recv: count=%zd, resp_len=%zd\n",
284 count, len);
285 len = -EIO;
286 goto out;
287 }
288
289 memcpy(buf, proxy_dev->buffer, len);
290 proxy_dev->resp_len = 0;
291
292out:
293 mutex_unlock(&proxy_dev->buf_lock);
294
295 return len;
296}
297
298static int vtpm_proxy_is_driver_command(struct tpm_chip *chip,
299 u8 *buf, size_t count)
300{
301 struct tpm_header *hdr = (struct tpm_header *)buf;
302
303 if (count < sizeof(struct tpm_header))
304 return 0;
305
306 if (chip->flags & TPM_CHIP_FLAG_TPM2) {
307 switch (be32_to_cpu(hdr->ordinal)) {
308 case TPM2_CC_SET_LOCALITY:
309 return 1;
310 }
311 } else {
312 switch (be32_to_cpu(hdr->ordinal)) {
313 case TPM_ORD_SET_LOCALITY:
314 return 1;
315 }
316 }
317 return 0;
318}
319
320
321
322
323
324
325
326
327
328
329
330static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
331{
332 struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
333
334 if (count > sizeof(proxy_dev->buffer)) {
335 dev_err(&chip->dev,
336 "Invalid size in send: count=%zd, buffer size=%zd\n",
337 count, sizeof(proxy_dev->buffer));
338 return -EIO;
339 }
340
341 if (!(proxy_dev->state & STATE_DRIVER_COMMAND) &&
342 vtpm_proxy_is_driver_command(chip, buf, count))
343 return -EFAULT;
344
345 mutex_lock(&proxy_dev->buf_lock);
346
347 if (!(proxy_dev->state & STATE_OPENED_FLAG)) {
348 mutex_unlock(&proxy_dev->buf_lock);
349 return -EPIPE;
350 }
351
352 proxy_dev->resp_len = 0;
353
354 proxy_dev->req_len = count;
355 memcpy(proxy_dev->buffer, buf, count);
356
357 proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG;
358
359 mutex_unlock(&proxy_dev->buf_lock);
360
361 wake_up_interruptible(&proxy_dev->wq);
362
363 return 0;
364}
365
366static void vtpm_proxy_tpm_op_cancel(struct tpm_chip *chip)
367{
368
369}
370
371static u8 vtpm_proxy_tpm_op_status(struct tpm_chip *chip)
372{
373 struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
374
375 if (proxy_dev->resp_len)
376 return VTPM_PROXY_REQ_COMPLETE_FLAG;
377
378 return 0;
379}
380
381static bool vtpm_proxy_tpm_req_canceled(struct tpm_chip *chip, u8 status)
382{
383 struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
384 bool ret;
385
386 mutex_lock(&proxy_dev->buf_lock);
387
388 ret = !(proxy_dev->state & STATE_OPENED_FLAG);
389
390 mutex_unlock(&proxy_dev->buf_lock);
391
392 return ret;
393}
394
395static int vtpm_proxy_request_locality(struct tpm_chip *chip, int locality)
396{
397 struct tpm_buf buf;
398 int rc;
399 const struct tpm_header *header;
400 struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
401
402 if (chip->flags & TPM_CHIP_FLAG_TPM2)
403 rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS,
404 TPM2_CC_SET_LOCALITY);
405 else
406 rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND,
407 TPM_ORD_SET_LOCALITY);
408 if (rc)
409 return rc;
410 tpm_buf_append_u8(&buf, locality);
411
412 proxy_dev->state |= STATE_DRIVER_COMMAND;
413
414 rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to set locality");
415
416 proxy_dev->state &= ~STATE_DRIVER_COMMAND;
417
418 if (rc < 0) {
419 locality = rc;
420 goto out;
421 }
422
423 header = (const struct tpm_header *)buf.data;
424 rc = be32_to_cpu(header->return_code);
425 if (rc)
426 locality = -1;
427
428out:
429 tpm_buf_destroy(&buf);
430
431 return locality;
432}
433
434static const struct tpm_class_ops vtpm_proxy_tpm_ops = {
435 .flags = TPM_OPS_AUTO_STARTUP,
436 .recv = vtpm_proxy_tpm_op_recv,
437 .send = vtpm_proxy_tpm_op_send,
438 .cancel = vtpm_proxy_tpm_op_cancel,
439 .status = vtpm_proxy_tpm_op_status,
440 .req_complete_mask = VTPM_PROXY_REQ_COMPLETE_FLAG,
441 .req_complete_val = VTPM_PROXY_REQ_COMPLETE_FLAG,
442 .req_canceled = vtpm_proxy_tpm_req_canceled,
443 .request_locality = vtpm_proxy_request_locality,
444};
445
446
447
448
449
450
451static void vtpm_proxy_work(struct work_struct *work)
452{
453 struct proxy_dev *proxy_dev = container_of(work, struct proxy_dev,
454 work);
455 int rc;
456
457 rc = tpm_chip_register(proxy_dev->chip);
458 if (rc)
459 vtpm_proxy_fops_undo_open(proxy_dev);
460 else
461 proxy_dev->state |= STATE_REGISTERED_FLAG;
462}
463
464
465
466
467
468
469
470static void vtpm_proxy_work_stop(struct proxy_dev *proxy_dev)
471{
472 vtpm_proxy_fops_undo_open(proxy_dev);
473 flush_work(&proxy_dev->work);
474}
475
476
477
478
479static inline void vtpm_proxy_work_start(struct proxy_dev *proxy_dev)
480{
481 queue_work(workqueue, &proxy_dev->work);
482}
483
484
485
486
487static struct proxy_dev *vtpm_proxy_create_proxy_dev(void)
488{
489 struct proxy_dev *proxy_dev;
490 struct tpm_chip *chip;
491 int err;
492
493 proxy_dev = kzalloc(sizeof(*proxy_dev), GFP_KERNEL);
494 if (proxy_dev == NULL)
495 return ERR_PTR(-ENOMEM);
496
497 init_waitqueue_head(&proxy_dev->wq);
498 mutex_init(&proxy_dev->buf_lock);
499 INIT_WORK(&proxy_dev->work, vtpm_proxy_work);
500
501 chip = tpm_chip_alloc(NULL, &vtpm_proxy_tpm_ops);
502 if (IS_ERR(chip)) {
503 err = PTR_ERR(chip);
504 goto err_proxy_dev_free;
505 }
506 dev_set_drvdata(&chip->dev, proxy_dev);
507
508 proxy_dev->chip = chip;
509
510 return proxy_dev;
511
512err_proxy_dev_free:
513 kfree(proxy_dev);
514
515 return ERR_PTR(err);
516}
517
518
519
520
521static inline void vtpm_proxy_delete_proxy_dev(struct proxy_dev *proxy_dev)
522{
523 put_device(&proxy_dev->chip->dev);
524 kfree(proxy_dev);
525}
526
527
528
529
530
531
532
533static struct file *vtpm_proxy_create_device(
534 struct vtpm_proxy_new_dev *vtpm_new_dev)
535{
536 struct proxy_dev *proxy_dev;
537 int rc, fd;
538 struct file *file;
539
540 if (vtpm_new_dev->flags & ~VTPM_PROXY_FLAGS_ALL)
541 return ERR_PTR(-EOPNOTSUPP);
542
543 proxy_dev = vtpm_proxy_create_proxy_dev();
544 if (IS_ERR(proxy_dev))
545 return ERR_CAST(proxy_dev);
546
547 proxy_dev->flags = vtpm_new_dev->flags;
548
549
550 fd = get_unused_fd_flags(O_RDWR);
551 if (fd < 0) {
552 rc = fd;
553 goto err_delete_proxy_dev;
554 }
555
556 file = anon_inode_getfile("[vtpms]", &vtpm_proxy_fops, proxy_dev,
557 O_RDWR);
558 if (IS_ERR(file)) {
559 rc = PTR_ERR(file);
560 goto err_put_unused_fd;
561 }
562
563
564
565 vtpm_proxy_fops_open(file);
566
567 if (proxy_dev->flags & VTPM_PROXY_FLAG_TPM2)
568 proxy_dev->chip->flags |= TPM_CHIP_FLAG_TPM2;
569
570 vtpm_proxy_work_start(proxy_dev);
571
572 vtpm_new_dev->fd = fd;
573 vtpm_new_dev->major = MAJOR(proxy_dev->chip->dev.devt);
574 vtpm_new_dev->minor = MINOR(proxy_dev->chip->dev.devt);
575 vtpm_new_dev->tpm_num = proxy_dev->chip->dev_num;
576
577 return file;
578
579err_put_unused_fd:
580 put_unused_fd(fd);
581
582err_delete_proxy_dev:
583 vtpm_proxy_delete_proxy_dev(proxy_dev);
584
585 return ERR_PTR(rc);
586}
587
588
589
590
591static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev)
592{
593 vtpm_proxy_work_stop(proxy_dev);
594
595
596
597
598
599
600 vtpm_proxy_fops_undo_open(proxy_dev);
601
602 if (proxy_dev->state & STATE_REGISTERED_FLAG)
603 tpm_chip_unregister(proxy_dev->chip);
604
605 vtpm_proxy_delete_proxy_dev(proxy_dev);
606}
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624static long vtpmx_ioc_new_dev(struct file *file, unsigned int ioctl,
625 unsigned long arg)
626{
627 void __user *argp = (void __user *)arg;
628 struct vtpm_proxy_new_dev __user *vtpm_new_dev_p;
629 struct vtpm_proxy_new_dev vtpm_new_dev;
630 struct file *vtpm_file;
631
632 if (!capable(CAP_SYS_ADMIN))
633 return -EPERM;
634
635 vtpm_new_dev_p = argp;
636
637 if (copy_from_user(&vtpm_new_dev, vtpm_new_dev_p,
638 sizeof(vtpm_new_dev)))
639 return -EFAULT;
640
641 vtpm_file = vtpm_proxy_create_device(&vtpm_new_dev);
642 if (IS_ERR(vtpm_file))
643 return PTR_ERR(vtpm_file);
644
645 if (copy_to_user(vtpm_new_dev_p, &vtpm_new_dev,
646 sizeof(vtpm_new_dev))) {
647 put_unused_fd(vtpm_new_dev.fd);
648 fput(vtpm_file);
649 return -EFAULT;
650 }
651
652 fd_install(vtpm_new_dev.fd, vtpm_file);
653 return 0;
654}
655
656
657
658
659
660
661
662static long vtpmx_fops_ioctl(struct file *f, unsigned int ioctl,
663 unsigned long arg)
664{
665 switch (ioctl) {
666 case VTPM_PROXY_IOC_NEW_DEV:
667 return vtpmx_ioc_new_dev(f, ioctl, arg);
668 default:
669 return -ENOIOCTLCMD;
670 }
671}
672
673static const struct file_operations vtpmx_fops = {
674 .owner = THIS_MODULE,
675 .unlocked_ioctl = vtpmx_fops_ioctl,
676 .compat_ioctl = compat_ptr_ioctl,
677 .llseek = noop_llseek,
678};
679
680static struct miscdevice vtpmx_miscdev = {
681 .minor = MISC_DYNAMIC_MINOR,
682 .name = "vtpmx",
683 .fops = &vtpmx_fops,
684};
685
686static int vtpmx_init(void)
687{
688 return misc_register(&vtpmx_miscdev);
689}
690
691static void vtpmx_cleanup(void)
692{
693 misc_deregister(&vtpmx_miscdev);
694}
695
696static int __init vtpm_module_init(void)
697{
698 int rc;
699
700 rc = vtpmx_init();
701 if (rc) {
702 pr_err("couldn't create vtpmx device\n");
703 return rc;
704 }
705
706 workqueue = create_workqueue("tpm-vtpm");
707 if (!workqueue) {
708 pr_err("couldn't create workqueue\n");
709 rc = -ENOMEM;
710 goto err_vtpmx_cleanup;
711 }
712
713 return 0;
714
715err_vtpmx_cleanup:
716 vtpmx_cleanup();
717
718 return rc;
719}
720
721static void __exit vtpm_module_exit(void)
722{
723 destroy_workqueue(workqueue);
724 vtpmx_cleanup();
725}
726
727module_init(vtpm_module_init);
728module_exit(vtpm_module_exit);
729
730MODULE_AUTHOR("Stefan Berger (stefanb@us.ibm.com)");
731MODULE_DESCRIPTION("vTPM Driver");
732MODULE_VERSION("0.1");
733MODULE_LICENSE("GPL");
734