1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/pnp.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/wait.h>
28#include <linux/acpi.h>
29#include <linux/freezer.h>
30#include "tpm.h"
31
32enum tis_access {
33 TPM_ACCESS_VALID = 0x80,
34 TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
35 TPM_ACCESS_REQUEST_PENDING = 0x04,
36 TPM_ACCESS_REQUEST_USE = 0x02,
37};
38
39enum tis_status {
40 TPM_STS_VALID = 0x80,
41 TPM_STS_COMMAND_READY = 0x40,
42 TPM_STS_GO = 0x20,
43 TPM_STS_DATA_AVAIL = 0x10,
44 TPM_STS_DATA_EXPECT = 0x08,
45};
46
47enum tis_int_flags {
48 TPM_GLOBAL_INT_ENABLE = 0x80000000,
49 TPM_INTF_BURST_COUNT_STATIC = 0x100,
50 TPM_INTF_CMD_READY_INT = 0x080,
51 TPM_INTF_INT_EDGE_FALLING = 0x040,
52 TPM_INTF_INT_EDGE_RISING = 0x020,
53 TPM_INTF_INT_LEVEL_LOW = 0x010,
54 TPM_INTF_INT_LEVEL_HIGH = 0x008,
55 TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
56 TPM_INTF_STS_VALID_INT = 0x002,
57 TPM_INTF_DATA_AVAIL_INT = 0x001,
58};
59
60enum tis_defaults {
61 TIS_MEM_BASE = 0xFED40000,
62 TIS_MEM_LEN = 0x5000,
63 TIS_SHORT_TIMEOUT = 750,
64 TIS_LONG_TIMEOUT = 2000,
65};
66
67#define TPM_ACCESS(l) (0x0000 | ((l) << 12))
68#define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12))
69#define TPM_INT_VECTOR(l) (0x000C | ((l) << 12))
70#define TPM_INT_STATUS(l) (0x0010 | ((l) << 12))
71#define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12))
72#define TPM_STS(l) (0x0018 | ((l) << 12))
73#define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12))
74
75#define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
76#define TPM_RID(l) (0x0F04 | ((l) << 12))
77
78static LIST_HEAD(tis_chips);
79static DEFINE_MUTEX(tis_lock);
80
81#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
82static int is_itpm(struct pnp_dev *dev)
83{
84 struct acpi_device *acpi = pnp_acpi_device(dev);
85 struct acpi_hardware_id *id;
86
87 list_for_each_entry(id, &acpi->pnp.ids, list) {
88 if (!strcmp("INTC0102", id->id))
89 return 1;
90 }
91
92 return 0;
93}
94#else
95static inline int is_itpm(struct pnp_dev *dev)
96{
97 return 0;
98}
99#endif
100
101static int check_locality(struct tpm_chip *chip, int l)
102{
103 if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
104 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
105 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
106 return chip->vendor.locality = l;
107
108 return -1;
109}
110
111static void release_locality(struct tpm_chip *chip, int l, int force)
112{
113 if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
114 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
115 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
116 iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
117 chip->vendor.iobase + TPM_ACCESS(l));
118}
119
120static int request_locality(struct tpm_chip *chip, int l)
121{
122 unsigned long stop, timeout;
123 long rc;
124
125 if (check_locality(chip, l) >= 0)
126 return l;
127
128 iowrite8(TPM_ACCESS_REQUEST_USE,
129 chip->vendor.iobase + TPM_ACCESS(l));
130
131 stop = jiffies + chip->vendor.timeout_a;
132
133 if (chip->vendor.irq) {
134again:
135 timeout = stop - jiffies;
136 if ((long)timeout <= 0)
137 return -1;
138 rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
139 (check_locality
140 (chip, l) >= 0),
141 timeout);
142 if (rc > 0)
143 return l;
144 if (rc == -ERESTARTSYS && freezing(current)) {
145 clear_thread_flag(TIF_SIGPENDING);
146 goto again;
147 }
148 } else {
149
150 do {
151 if (check_locality(chip, l) >= 0)
152 return l;
153 msleep(TPM_TIMEOUT);
154 }
155 while (time_before(jiffies, stop));
156 }
157 return -1;
158}
159
160static u8 tpm_tis_status(struct tpm_chip *chip)
161{
162 return ioread8(chip->vendor.iobase +
163 TPM_STS(chip->vendor.locality));
164}
165
166static void tpm_tis_ready(struct tpm_chip *chip)
167{
168
169 iowrite8(TPM_STS_COMMAND_READY,
170 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
171}
172
173static int get_burstcount(struct tpm_chip *chip)
174{
175 unsigned long stop;
176 int burstcnt;
177
178
179
180 stop = jiffies + chip->vendor.timeout_d;
181 do {
182 burstcnt = ioread8(chip->vendor.iobase +
183 TPM_STS(chip->vendor.locality) + 1);
184 burstcnt += ioread8(chip->vendor.iobase +
185 TPM_STS(chip->vendor.locality) +
186 2) << 8;
187 if (burstcnt)
188 return burstcnt;
189 msleep(TPM_TIMEOUT);
190 } while (time_before(jiffies, stop));
191 return -EBUSY;
192}
193
194static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
195{
196 int size = 0, burstcnt;
197 while (size < count &&
198 wait_for_tpm_stat(chip,
199 TPM_STS_DATA_AVAIL | TPM_STS_VALID,
200 chip->vendor.timeout_c,
201 &chip->vendor.read_queue)
202 == 0) {
203 burstcnt = get_burstcount(chip);
204 for (; burstcnt > 0 && size < count; burstcnt--)
205 buf[size++] = ioread8(chip->vendor.iobase +
206 TPM_DATA_FIFO(chip->vendor.
207 locality));
208 }
209 return size;
210}
211
212static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
213{
214 int size = 0;
215 int expected, status;
216
217 if (count < TPM_HEADER_SIZE) {
218 size = -EIO;
219 goto out;
220 }
221
222
223 if ((size =
224 recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
225 dev_err(chip->dev, "Unable to read header\n");
226 goto out;
227 }
228
229 expected = be32_to_cpu(*(__be32 *) (buf + 2));
230 if (expected > count) {
231 size = -EIO;
232 goto out;
233 }
234
235 if ((size +=
236 recv_data(chip, &buf[TPM_HEADER_SIZE],
237 expected - TPM_HEADER_SIZE)) < expected) {
238 dev_err(chip->dev, "Unable to read remainder of result\n");
239 size = -ETIME;
240 goto out;
241 }
242
243 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
244 &chip->vendor.int_queue);
245 status = tpm_tis_status(chip);
246 if (status & TPM_STS_DATA_AVAIL) {
247 dev_err(chip->dev, "Error left over data\n");
248 size = -EIO;
249 goto out;
250 }
251
252out:
253 tpm_tis_ready(chip);
254 release_locality(chip, chip->vendor.locality, 0);
255 return size;
256}
257
258static bool itpm;
259module_param(itpm, bool, 0444);
260MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
261
262
263
264
265
266
267static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
268{
269 int rc, status, burstcnt;
270 size_t count = 0;
271
272 if (request_locality(chip, 0) < 0)
273 return -EBUSY;
274
275 status = tpm_tis_status(chip);
276 if ((status & TPM_STS_COMMAND_READY) == 0) {
277 tpm_tis_ready(chip);
278 if (wait_for_tpm_stat
279 (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
280 &chip->vendor.int_queue) < 0) {
281 rc = -ETIME;
282 goto out_err;
283 }
284 }
285
286 while (count < len - 1) {
287 burstcnt = get_burstcount(chip);
288 for (; burstcnt > 0 && count < len - 1; burstcnt--) {
289 iowrite8(buf[count], chip->vendor.iobase +
290 TPM_DATA_FIFO(chip->vendor.locality));
291 count++;
292 }
293
294 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
295 &chip->vendor.int_queue);
296 status = tpm_tis_status(chip);
297 if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
298 rc = -EIO;
299 goto out_err;
300 }
301 }
302
303
304 iowrite8(buf[count],
305 chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
306 wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
307 &chip->vendor.int_queue);
308 status = tpm_tis_status(chip);
309 if ((status & TPM_STS_DATA_EXPECT) != 0) {
310 rc = -EIO;
311 goto out_err;
312 }
313
314 return 0;
315
316out_err:
317 tpm_tis_ready(chip);
318 release_locality(chip, chip->vendor.locality, 0);
319 return rc;
320}
321
322
323
324
325
326
327static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
328{
329 int rc;
330 u32 ordinal;
331
332 rc = tpm_tis_send_data(chip, buf, len);
333 if (rc < 0)
334 return rc;
335
336
337 iowrite8(TPM_STS_GO,
338 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
339
340 if (chip->vendor.irq) {
341 ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
342 if (wait_for_tpm_stat
343 (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
344 tpm_calc_ordinal_duration(chip, ordinal),
345 &chip->vendor.read_queue) < 0) {
346 rc = -ETIME;
347 goto out_err;
348 }
349 }
350 return len;
351out_err:
352 tpm_tis_ready(chip);
353 release_locality(chip, chip->vendor.locality, 0);
354 return rc;
355}
356
357
358
359
360
361
362static int probe_itpm(struct tpm_chip *chip)
363{
364 int rc = 0;
365 u8 cmd_getticks[] = {
366 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
367 0x00, 0x00, 0x00, 0xf1
368 };
369 size_t len = sizeof(cmd_getticks);
370 bool rem_itpm = itpm;
371 u16 vendor = ioread16(chip->vendor.iobase + TPM_DID_VID(0));
372
373
374 if (vendor != TPM_VID_INTEL)
375 return 0;
376
377 itpm = 0;
378
379 rc = tpm_tis_send_data(chip, cmd_getticks, len);
380 if (rc == 0)
381 goto out;
382
383 tpm_tis_ready(chip);
384 release_locality(chip, chip->vendor.locality, 0);
385
386 itpm = 1;
387
388 rc = tpm_tis_send_data(chip, cmd_getticks, len);
389 if (rc == 0) {
390 dev_info(chip->dev, "Detected an iTPM.\n");
391 rc = 1;
392 } else
393 rc = -EFAULT;
394
395out:
396 itpm = rem_itpm;
397 tpm_tis_ready(chip);
398 release_locality(chip, chip->vendor.locality, 0);
399
400 return rc;
401}
402
403static const struct file_operations tis_ops = {
404 .owner = THIS_MODULE,
405 .llseek = no_llseek,
406 .open = tpm_open,
407 .read = tpm_read,
408 .write = tpm_write,
409 .release = tpm_release,
410};
411
412static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
413static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
414static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
415static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
416static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
417static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
418 NULL);
419static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
420static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
421static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
422static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
423
424static struct attribute *tis_attrs[] = {
425 &dev_attr_pubek.attr,
426 &dev_attr_pcrs.attr,
427 &dev_attr_enabled.attr,
428 &dev_attr_active.attr,
429 &dev_attr_owned.attr,
430 &dev_attr_temp_deactivated.attr,
431 &dev_attr_caps.attr,
432 &dev_attr_cancel.attr,
433 &dev_attr_durations.attr,
434 &dev_attr_timeouts.attr, NULL,
435};
436
437static struct attribute_group tis_attr_grp = {
438 .attrs = tis_attrs
439};
440
441static struct tpm_vendor_specific tpm_tis = {
442 .status = tpm_tis_status,
443 .recv = tpm_tis_recv,
444 .send = tpm_tis_send,
445 .cancel = tpm_tis_ready,
446 .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
447 .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
448 .req_canceled = TPM_STS_COMMAND_READY,
449 .attr_group = &tis_attr_grp,
450 .miscdev = {
451 .fops = &tis_ops,},
452};
453
454static irqreturn_t tis_int_probe(int irq, void *dev_id)
455{
456 struct tpm_chip *chip = dev_id;
457 u32 interrupt;
458
459 interrupt = ioread32(chip->vendor.iobase +
460 TPM_INT_STATUS(chip->vendor.locality));
461
462 if (interrupt == 0)
463 return IRQ_NONE;
464
465 chip->vendor.probed_irq = irq;
466
467
468 iowrite32(interrupt,
469 chip->vendor.iobase +
470 TPM_INT_STATUS(chip->vendor.locality));
471 return IRQ_HANDLED;
472}
473
474static irqreturn_t tis_int_handler(int dummy, void *dev_id)
475{
476 struct tpm_chip *chip = dev_id;
477 u32 interrupt;
478 int i;
479
480 interrupt = ioread32(chip->vendor.iobase +
481 TPM_INT_STATUS(chip->vendor.locality));
482
483 if (interrupt == 0)
484 return IRQ_NONE;
485
486 if (interrupt & TPM_INTF_DATA_AVAIL_INT)
487 wake_up_interruptible(&chip->vendor.read_queue);
488 if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
489 for (i = 0; i < 5; i++)
490 if (check_locality(chip, i) >= 0)
491 break;
492 if (interrupt &
493 (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
494 TPM_INTF_CMD_READY_INT))
495 wake_up_interruptible(&chip->vendor.int_queue);
496
497
498 iowrite32(interrupt,
499 chip->vendor.iobase +
500 TPM_INT_STATUS(chip->vendor.locality));
501 ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
502 return IRQ_HANDLED;
503}
504
505static bool interrupts = 1;
506module_param(interrupts, bool, 0444);
507MODULE_PARM_DESC(interrupts, "Enable interrupts");
508
509static int tpm_tis_init(struct device *dev, resource_size_t start,
510 resource_size_t len, unsigned int irq)
511{
512 u32 vendor, intfcaps, intmask;
513 int rc, i, irq_s, irq_e, probe;
514 struct tpm_chip *chip;
515
516 if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
517 return -ENODEV;
518
519 chip->vendor.iobase = ioremap(start, len);
520 if (!chip->vendor.iobase) {
521 rc = -EIO;
522 goto out_err;
523 }
524
525
526 chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
527 chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
528 chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
529 chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
530
531 if (request_locality(chip, 0) != 0) {
532 rc = -ENODEV;
533 goto out_err;
534 }
535
536 vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
537
538 dev_info(dev,
539 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
540 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
541
542 if (!itpm) {
543 probe = probe_itpm(chip);
544 if (probe < 0) {
545 rc = -ENODEV;
546 goto out_err;
547 }
548 itpm = (probe == 0) ? 0 : 1;
549 }
550
551 if (itpm)
552 dev_info(dev, "Intel iTPM workaround enabled\n");
553
554
555
556 intfcaps =
557 ioread32(chip->vendor.iobase +
558 TPM_INTF_CAPS(chip->vendor.locality));
559 dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
560 intfcaps);
561 if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
562 dev_dbg(dev, "\tBurst Count Static\n");
563 if (intfcaps & TPM_INTF_CMD_READY_INT)
564 dev_dbg(dev, "\tCommand Ready Int Support\n");
565 if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
566 dev_dbg(dev, "\tInterrupt Edge Falling\n");
567 if (intfcaps & TPM_INTF_INT_EDGE_RISING)
568 dev_dbg(dev, "\tInterrupt Edge Rising\n");
569 if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
570 dev_dbg(dev, "\tInterrupt Level Low\n");
571 if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
572 dev_dbg(dev, "\tInterrupt Level High\n");
573 if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
574 dev_dbg(dev, "\tLocality Change Int Support\n");
575 if (intfcaps & TPM_INTF_STS_VALID_INT)
576 dev_dbg(dev, "\tSts Valid Int Support\n");
577 if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
578 dev_dbg(dev, "\tData Avail Int Support\n");
579
580
581 if (tpm_get_timeouts(chip)) {
582 dev_err(dev, "Could not get TPM timeouts and durations\n");
583 rc = -ENODEV;
584 goto out_err;
585 }
586
587 if (tpm_do_selftest(chip)) {
588 dev_err(dev, "TPM self test failed\n");
589 rc = -ENODEV;
590 goto out_err;
591 }
592
593
594 init_waitqueue_head(&chip->vendor.read_queue);
595 init_waitqueue_head(&chip->vendor.int_queue);
596
597 intmask =
598 ioread32(chip->vendor.iobase +
599 TPM_INT_ENABLE(chip->vendor.locality));
600
601 intmask |= TPM_INTF_CMD_READY_INT
602 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
603 | TPM_INTF_STS_VALID_INT;
604
605 iowrite32(intmask,
606 chip->vendor.iobase +
607 TPM_INT_ENABLE(chip->vendor.locality));
608 if (interrupts)
609 chip->vendor.irq = irq;
610 if (interrupts && !chip->vendor.irq) {
611 irq_s =
612 ioread8(chip->vendor.iobase +
613 TPM_INT_VECTOR(chip->vendor.locality));
614 if (irq_s) {
615 irq_e = irq_s;
616 } else {
617 irq_s = 3;
618 irq_e = 15;
619 }
620
621 for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) {
622 iowrite8(i, chip->vendor.iobase +
623 TPM_INT_VECTOR(chip->vendor.locality));
624 if (request_irq
625 (i, tis_int_probe, IRQF_SHARED,
626 chip->vendor.miscdev.name, chip) != 0) {
627 dev_info(chip->dev,
628 "Unable to request irq: %d for probe\n",
629 i);
630 continue;
631 }
632
633
634 iowrite32(ioread32
635 (chip->vendor.iobase +
636 TPM_INT_STATUS(chip->vendor.locality)),
637 chip->vendor.iobase +
638 TPM_INT_STATUS(chip->vendor.locality));
639
640
641 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
642 chip->vendor.iobase +
643 TPM_INT_ENABLE(chip->vendor.locality));
644
645 chip->vendor.probed_irq = 0;
646
647
648 tpm_gen_interrupt(chip);
649
650 chip->vendor.irq = chip->vendor.probed_irq;
651
652
653
654
655 iowrite32(ioread32
656 (chip->vendor.iobase +
657 TPM_INT_STATUS(chip->vendor.locality)),
658 chip->vendor.iobase +
659 TPM_INT_STATUS(chip->vendor.locality));
660
661
662 iowrite32(intmask,
663 chip->vendor.iobase +
664 TPM_INT_ENABLE(chip->vendor.locality));
665 free_irq(i, chip);
666 }
667 }
668 if (chip->vendor.irq) {
669 iowrite8(chip->vendor.irq,
670 chip->vendor.iobase +
671 TPM_INT_VECTOR(chip->vendor.locality));
672 if (request_irq
673 (chip->vendor.irq, tis_int_handler, IRQF_SHARED,
674 chip->vendor.miscdev.name, chip) != 0) {
675 dev_info(chip->dev,
676 "Unable to request irq: %d for use\n",
677 chip->vendor.irq);
678 chip->vendor.irq = 0;
679 } else {
680
681 iowrite32(ioread32
682 (chip->vendor.iobase +
683 TPM_INT_STATUS(chip->vendor.locality)),
684 chip->vendor.iobase +
685 TPM_INT_STATUS(chip->vendor.locality));
686
687
688 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
689 chip->vendor.iobase +
690 TPM_INT_ENABLE(chip->vendor.locality));
691 }
692 }
693
694 INIT_LIST_HEAD(&chip->vendor.list);
695 mutex_lock(&tis_lock);
696 list_add(&chip->vendor.list, &tis_chips);
697 mutex_unlock(&tis_lock);
698
699
700 return 0;
701out_err:
702 if (chip->vendor.iobase)
703 iounmap(chip->vendor.iobase);
704 tpm_remove_hardware(chip->dev);
705 return rc;
706}
707
708#if defined(CONFIG_PNP) || defined(CONFIG_PM_SLEEP)
709static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
710{
711 u32 intmask;
712
713
714
715 iowrite8(chip->vendor.irq, chip->vendor.iobase +
716 TPM_INT_VECTOR(chip->vendor.locality));
717
718 intmask =
719 ioread32(chip->vendor.iobase +
720 TPM_INT_ENABLE(chip->vendor.locality));
721
722 intmask |= TPM_INTF_CMD_READY_INT
723 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
724 | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
725
726 iowrite32(intmask,
727 chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
728}
729#endif
730
731#ifdef CONFIG_PNP
732static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
733 const struct pnp_device_id *pnp_id)
734{
735 resource_size_t start, len;
736 unsigned int irq = 0;
737
738 start = pnp_mem_start(pnp_dev, 0);
739 len = pnp_mem_len(pnp_dev, 0);
740
741 if (pnp_irq_valid(pnp_dev, 0))
742 irq = pnp_irq(pnp_dev, 0);
743 else
744 interrupts = 0;
745
746 if (is_itpm(pnp_dev))
747 itpm = 1;
748
749 return tpm_tis_init(&pnp_dev->dev, start, len, irq);
750}
751
752static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
753{
754 return tpm_pm_suspend(&dev->dev);
755}
756
757static int tpm_tis_pnp_resume(struct pnp_dev *dev)
758{
759 struct tpm_chip *chip = pnp_get_drvdata(dev);
760 int ret;
761
762 if (chip->vendor.irq)
763 tpm_tis_reenable_interrupts(chip);
764
765 ret = tpm_pm_resume(&dev->dev);
766 if (!ret)
767 tpm_do_selftest(chip);
768
769 return ret;
770}
771
772static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
773 {"PNP0C31", 0},
774 {"ATM1200", 0},
775 {"IFX0102", 0},
776 {"BCM0101", 0},
777 {"BCM0102", 0},
778 {"NSC1200", 0},
779 {"ICO0102", 0},
780
781 {"", 0},
782 {"", 0}
783};
784MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
785
786static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev)
787{
788 struct tpm_chip *chip = pnp_get_drvdata(dev);
789
790 tpm_dev_vendor_release(chip);
791
792 kfree(chip);
793}
794
795
796static struct pnp_driver tis_pnp_driver = {
797 .name = "tpm_tis",
798 .id_table = tpm_pnp_tbl,
799 .probe = tpm_tis_pnp_init,
800 .suspend = tpm_tis_pnp_suspend,
801 .resume = tpm_tis_pnp_resume,
802 .remove = tpm_tis_pnp_remove,
803};
804
805#define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
806module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
807 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
808MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
809#endif
810
811#ifdef CONFIG_PM_SLEEP
812static int tpm_tis_resume(struct device *dev)
813{
814 struct tpm_chip *chip = dev_get_drvdata(dev);
815
816 if (chip->vendor.irq)
817 tpm_tis_reenable_interrupts(chip);
818
819 return tpm_pm_resume(dev);
820}
821#endif
822
823static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
824
825static struct platform_driver tis_drv = {
826 .driver = {
827 .name = "tpm_tis",
828 .owner = THIS_MODULE,
829 .pm = &tpm_tis_pm,
830 },
831};
832
833static struct platform_device *pdev;
834
835static bool force;
836module_param(force, bool, 0444);
837MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
838static int __init init_tis(void)
839{
840 int rc;
841#ifdef CONFIG_PNP
842 if (!force)
843 return pnp_register_driver(&tis_pnp_driver);
844#endif
845
846 rc = platform_driver_register(&tis_drv);
847 if (rc < 0)
848 return rc;
849 if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
850 return PTR_ERR(pdev);
851 if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
852 platform_device_unregister(pdev);
853 platform_driver_unregister(&tis_drv);
854 }
855 return rc;
856}
857
858static void __exit cleanup_tis(void)
859{
860 struct tpm_vendor_specific *i, *j;
861 struct tpm_chip *chip;
862 mutex_lock(&tis_lock);
863 list_for_each_entry_safe(i, j, &tis_chips, list) {
864 chip = to_tpm_chip(i);
865 tpm_remove_hardware(chip->dev);
866 iowrite32(~TPM_GLOBAL_INT_ENABLE &
867 ioread32(chip->vendor.iobase +
868 TPM_INT_ENABLE(chip->vendor.
869 locality)),
870 chip->vendor.iobase +
871 TPM_INT_ENABLE(chip->vendor.locality));
872 release_locality(chip, chip->vendor.locality, 1);
873 if (chip->vendor.irq)
874 free_irq(chip->vendor.irq, chip);
875 iounmap(i->iobase);
876 list_del(&i->list);
877 }
878 mutex_unlock(&tis_lock);
879#ifdef CONFIG_PNP
880 if (!force) {
881 pnp_unregister_driver(&tis_pnp_driver);
882 return;
883 }
884#endif
885 platform_device_unregister(pdev);
886 platform_driver_unregister(&tis_drv);
887}
888
889module_init(init_tis);
890module_exit(cleanup_tis);
891MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
892MODULE_DESCRIPTION("TPM Driver");
893MODULE_VERSION("2.0");
894MODULE_LICENSE("GPL");
895