1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/pnp.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/wait.h>
28#include <linux/acpi.h>
29#include "tpm.h"
30
31#define TPM_HEADER_SIZE 10
32
33enum tis_access {
34 TPM_ACCESS_VALID = 0x80,
35 TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
36 TPM_ACCESS_REQUEST_PENDING = 0x04,
37 TPM_ACCESS_REQUEST_USE = 0x02,
38};
39
40enum tis_status {
41 TPM_STS_VALID = 0x80,
42 TPM_STS_COMMAND_READY = 0x40,
43 TPM_STS_GO = 0x20,
44 TPM_STS_DATA_AVAIL = 0x10,
45 TPM_STS_DATA_EXPECT = 0x08,
46};
47
48enum tis_int_flags {
49 TPM_GLOBAL_INT_ENABLE = 0x80000000,
50 TPM_INTF_BURST_COUNT_STATIC = 0x100,
51 TPM_INTF_CMD_READY_INT = 0x080,
52 TPM_INTF_INT_EDGE_FALLING = 0x040,
53 TPM_INTF_INT_EDGE_RISING = 0x020,
54 TPM_INTF_INT_LEVEL_LOW = 0x010,
55 TPM_INTF_INT_LEVEL_HIGH = 0x008,
56 TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
57 TPM_INTF_STS_VALID_INT = 0x002,
58 TPM_INTF_DATA_AVAIL_INT = 0x001,
59};
60
61enum tis_defaults {
62 TIS_MEM_BASE = 0xFED40000,
63 TIS_MEM_LEN = 0x5000,
64 TIS_SHORT_TIMEOUT = 750,
65 TIS_LONG_TIMEOUT = 2000,
66};
67
68#define TPM_ACCESS(l) (0x0000 | ((l) << 12))
69#define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12))
70#define TPM_INT_VECTOR(l) (0x000C | ((l) << 12))
71#define TPM_INT_STATUS(l) (0x0010 | ((l) << 12))
72#define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12))
73#define TPM_STS(l) (0x0018 | ((l) << 12))
74#define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12))
75
76#define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
77#define TPM_RID(l) (0x0F04 | ((l) << 12))
78
79static LIST_HEAD(tis_chips);
80static DEFINE_SPINLOCK(tis_lock);
81
82#ifdef CONFIG_ACPI
83static int is_itpm(struct pnp_dev *dev)
84{
85 struct acpi_device *acpi = pnp_acpi_device(dev);
86 struct acpi_hardware_id *id;
87
88 list_for_each_entry(id, &acpi->pnp.ids, list) {
89 if (!strcmp("INTC0102", id->id))
90 return 1;
91 }
92
93 return 0;
94}
95#else
96static int is_itpm(struct pnp_dev *dev)
97{
98 return 0;
99}
100#endif
101
102static int check_locality(struct tpm_chip *chip, int l)
103{
104 if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
105 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
106 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
107 return chip->vendor.locality = l;
108
109 return -1;
110}
111
112static void release_locality(struct tpm_chip *chip, int l, int force)
113{
114 if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
115 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
116 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
117 iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
118 chip->vendor.iobase + TPM_ACCESS(l));
119}
120
121static int request_locality(struct tpm_chip *chip, int l)
122{
123 unsigned long stop;
124 long rc;
125
126 if (check_locality(chip, l) >= 0)
127 return l;
128
129 iowrite8(TPM_ACCESS_REQUEST_USE,
130 chip->vendor.iobase + TPM_ACCESS(l));
131
132 if (chip->vendor.irq) {
133 rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
134 (check_locality
135 (chip, l) >= 0),
136 chip->vendor.timeout_a);
137 if (rc > 0)
138 return l;
139
140 } else {
141
142 stop = jiffies + chip->vendor.timeout_a;
143 do {
144 if (check_locality(chip, l) >= 0)
145 return l;
146 msleep(TPM_TIMEOUT);
147 }
148 while (time_before(jiffies, stop));
149 }
150 return -1;
151}
152
153static u8 tpm_tis_status(struct tpm_chip *chip)
154{
155 return ioread8(chip->vendor.iobase +
156 TPM_STS(chip->vendor.locality));
157}
158
159static void tpm_tis_ready(struct tpm_chip *chip)
160{
161
162 iowrite8(TPM_STS_COMMAND_READY,
163 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
164}
165
166static int get_burstcount(struct tpm_chip *chip)
167{
168 unsigned long stop;
169 int burstcnt;
170
171
172
173 stop = jiffies + chip->vendor.timeout_d;
174 do {
175 burstcnt = ioread8(chip->vendor.iobase +
176 TPM_STS(chip->vendor.locality) + 1);
177 burstcnt += ioread8(chip->vendor.iobase +
178 TPM_STS(chip->vendor.locality) +
179 2) << 8;
180 if (burstcnt)
181 return burstcnt;
182 msleep(TPM_TIMEOUT);
183 } while (time_before(jiffies, stop));
184 return -EBUSY;
185}
186
187static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
188 wait_queue_head_t *queue)
189{
190 unsigned long stop;
191 long rc;
192 u8 status;
193
194
195 status = tpm_tis_status(chip);
196 if ((status & mask) == mask)
197 return 0;
198
199 if (chip->vendor.irq) {
200 rc = wait_event_interruptible_timeout(*queue,
201 ((tpm_tis_status
202 (chip) & mask) ==
203 mask), timeout);
204 if (rc > 0)
205 return 0;
206 } else {
207 stop = jiffies + timeout;
208 do {
209 msleep(TPM_TIMEOUT);
210 status = tpm_tis_status(chip);
211 if ((status & mask) == mask)
212 return 0;
213 } while (time_before(jiffies, stop));
214 }
215 return -ETIME;
216}
217
218static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
219{
220 int size = 0, burstcnt;
221 while (size < count &&
222 wait_for_stat(chip,
223 TPM_STS_DATA_AVAIL | TPM_STS_VALID,
224 chip->vendor.timeout_c,
225 &chip->vendor.read_queue)
226 == 0) {
227 burstcnt = get_burstcount(chip);
228 for (; burstcnt > 0 && size < count; burstcnt--)
229 buf[size++] = ioread8(chip->vendor.iobase +
230 TPM_DATA_FIFO(chip->vendor.
231 locality));
232 }
233 return size;
234}
235
236static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
237{
238 int size = 0;
239 int expected, status;
240
241 if (count < TPM_HEADER_SIZE) {
242 size = -EIO;
243 goto out;
244 }
245
246
247 if ((size =
248 recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
249 dev_err(chip->dev, "Unable to read header\n");
250 goto out;
251 }
252
253 expected = be32_to_cpu(*(__be32 *) (buf + 2));
254 if (expected > count) {
255 size = -EIO;
256 goto out;
257 }
258
259 if ((size +=
260 recv_data(chip, &buf[TPM_HEADER_SIZE],
261 expected - TPM_HEADER_SIZE)) < expected) {
262 dev_err(chip->dev, "Unable to read remainder of result\n");
263 size = -ETIME;
264 goto out;
265 }
266
267 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
268 &chip->vendor.int_queue);
269 status = tpm_tis_status(chip);
270 if (status & TPM_STS_DATA_AVAIL) {
271 dev_err(chip->dev, "Error left over data\n");
272 size = -EIO;
273 goto out;
274 }
275
276out:
277 tpm_tis_ready(chip);
278 release_locality(chip, chip->vendor.locality, 0);
279 return size;
280}
281
282static int itpm;
283module_param(itpm, bool, 0444);
284MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
285
286
287
288
289
290
291static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
292{
293 int rc, status, burstcnt;
294 size_t count = 0;
295 u32 ordinal;
296
297 if (request_locality(chip, 0) < 0)
298 return -EBUSY;
299
300 status = tpm_tis_status(chip);
301 if ((status & TPM_STS_COMMAND_READY) == 0) {
302 tpm_tis_ready(chip);
303 if (wait_for_stat
304 (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
305 &chip->vendor.int_queue) < 0) {
306 rc = -ETIME;
307 goto out_err;
308 }
309 }
310
311 while (count < len - 1) {
312 burstcnt = get_burstcount(chip);
313 for (; burstcnt > 0 && count < len - 1; burstcnt--) {
314 iowrite8(buf[count], chip->vendor.iobase +
315 TPM_DATA_FIFO(chip->vendor.locality));
316 count++;
317 }
318
319 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
320 &chip->vendor.int_queue);
321 status = tpm_tis_status(chip);
322 if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
323 rc = -EIO;
324 goto out_err;
325 }
326 }
327
328
329 iowrite8(buf[count],
330 chip->vendor.iobase +
331 TPM_DATA_FIFO(chip->vendor.locality));
332 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
333 &chip->vendor.int_queue);
334 status = tpm_tis_status(chip);
335 if ((status & TPM_STS_DATA_EXPECT) != 0) {
336 rc = -EIO;
337 goto out_err;
338 }
339
340
341 iowrite8(TPM_STS_GO,
342 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
343
344 if (chip->vendor.irq) {
345 ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
346 if (wait_for_stat
347 (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
348 tpm_calc_ordinal_duration(chip, ordinal),
349 &chip->vendor.read_queue) < 0) {
350 rc = -ETIME;
351 goto out_err;
352 }
353 }
354 return len;
355out_err:
356 tpm_tis_ready(chip);
357 release_locality(chip, chip->vendor.locality, 0);
358 return rc;
359}
360
361static const struct file_operations tis_ops = {
362 .owner = THIS_MODULE,
363 .llseek = no_llseek,
364 .open = tpm_open,
365 .read = tpm_read,
366 .write = tpm_write,
367 .release = tpm_release,
368};
369
370static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
371static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
372static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
373static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
374static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
375static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
376 NULL);
377static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
378static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
379
380static struct attribute *tis_attrs[] = {
381 &dev_attr_pubek.attr,
382 &dev_attr_pcrs.attr,
383 &dev_attr_enabled.attr,
384 &dev_attr_active.attr,
385 &dev_attr_owned.attr,
386 &dev_attr_temp_deactivated.attr,
387 &dev_attr_caps.attr,
388 &dev_attr_cancel.attr, NULL,
389};
390
391static struct attribute_group tis_attr_grp = {
392 .attrs = tis_attrs
393};
394
395static struct tpm_vendor_specific tpm_tis = {
396 .status = tpm_tis_status,
397 .recv = tpm_tis_recv,
398 .send = tpm_tis_send,
399 .cancel = tpm_tis_ready,
400 .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
401 .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
402 .req_canceled = TPM_STS_COMMAND_READY,
403 .attr_group = &tis_attr_grp,
404 .miscdev = {
405 .fops = &tis_ops,},
406};
407
408static irqreturn_t tis_int_probe(int irq, void *dev_id)
409{
410 struct tpm_chip *chip = dev_id;
411 u32 interrupt;
412
413 interrupt = ioread32(chip->vendor.iobase +
414 TPM_INT_STATUS(chip->vendor.locality));
415
416 if (interrupt == 0)
417 return IRQ_NONE;
418
419 chip->vendor.irq = irq;
420
421
422 iowrite32(interrupt,
423 chip->vendor.iobase +
424 TPM_INT_STATUS(chip->vendor.locality));
425 return IRQ_HANDLED;
426}
427
428static irqreturn_t tis_int_handler(int dummy, void *dev_id)
429{
430 struct tpm_chip *chip = dev_id;
431 u32 interrupt;
432 int i;
433
434 interrupt = ioread32(chip->vendor.iobase +
435 TPM_INT_STATUS(chip->vendor.locality));
436
437 if (interrupt == 0)
438 return IRQ_NONE;
439
440 if (interrupt & TPM_INTF_DATA_AVAIL_INT)
441 wake_up_interruptible(&chip->vendor.read_queue);
442 if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
443 for (i = 0; i < 5; i++)
444 if (check_locality(chip, i) >= 0)
445 break;
446 if (interrupt &
447 (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
448 TPM_INTF_CMD_READY_INT))
449 wake_up_interruptible(&chip->vendor.int_queue);
450
451
452 iowrite32(interrupt,
453 chip->vendor.iobase +
454 TPM_INT_STATUS(chip->vendor.locality));
455 ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
456 return IRQ_HANDLED;
457}
458
459static int interrupts = 1;
460module_param(interrupts, bool, 0444);
461MODULE_PARM_DESC(interrupts, "Enable interrupts");
462
463static int tpm_tis_init(struct device *dev, resource_size_t start,
464 resource_size_t len, unsigned int irq)
465{
466 u32 vendor, intfcaps, intmask;
467 int rc, i;
468 struct tpm_chip *chip;
469
470 if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
471 return -ENODEV;
472
473 chip->vendor.iobase = ioremap(start, len);
474 if (!chip->vendor.iobase) {
475 rc = -EIO;
476 goto out_err;
477 }
478
479
480 chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
481 chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
482 chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
483 chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
484
485 if (request_locality(chip, 0) != 0) {
486 rc = -ENODEV;
487 goto out_err;
488 }
489
490 vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
491
492 dev_info(dev,
493 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
494 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
495
496 if (itpm)
497 dev_info(dev, "Intel iTPM workaround enabled\n");
498
499
500
501 intfcaps =
502 ioread32(chip->vendor.iobase +
503 TPM_INTF_CAPS(chip->vendor.locality));
504 dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
505 intfcaps);
506 if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
507 dev_dbg(dev, "\tBurst Count Static\n");
508 if (intfcaps & TPM_INTF_CMD_READY_INT)
509 dev_dbg(dev, "\tCommand Ready Int Support\n");
510 if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
511 dev_dbg(dev, "\tInterrupt Edge Falling\n");
512 if (intfcaps & TPM_INTF_INT_EDGE_RISING)
513 dev_dbg(dev, "\tInterrupt Edge Rising\n");
514 if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
515 dev_dbg(dev, "\tInterrupt Level Low\n");
516 if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
517 dev_dbg(dev, "\tInterrupt Level High\n");
518 if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
519 dev_dbg(dev, "\tLocality Change Int Support\n");
520 if (intfcaps & TPM_INTF_STS_VALID_INT)
521 dev_dbg(dev, "\tSts Valid Int Support\n");
522 if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
523 dev_dbg(dev, "\tData Avail Int Support\n");
524
525
526 init_waitqueue_head(&chip->vendor.read_queue);
527 init_waitqueue_head(&chip->vendor.int_queue);
528
529 intmask =
530 ioread32(chip->vendor.iobase +
531 TPM_INT_ENABLE(chip->vendor.locality));
532
533 intmask |= TPM_INTF_CMD_READY_INT
534 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
535 | TPM_INTF_STS_VALID_INT;
536
537 iowrite32(intmask,
538 chip->vendor.iobase +
539 TPM_INT_ENABLE(chip->vendor.locality));
540 if (interrupts)
541 chip->vendor.irq = irq;
542 if (interrupts && !chip->vendor.irq) {
543 chip->vendor.irq =
544 ioread8(chip->vendor.iobase +
545 TPM_INT_VECTOR(chip->vendor.locality));
546
547 for (i = 3; i < 16 && chip->vendor.irq == 0; i++) {
548 iowrite8(i, chip->vendor.iobase +
549 TPM_INT_VECTOR(chip->vendor.locality));
550 if (request_irq
551 (i, tis_int_probe, IRQF_SHARED,
552 chip->vendor.miscdev.name, chip) != 0) {
553 dev_info(chip->dev,
554 "Unable to request irq: %d for probe\n",
555 i);
556 continue;
557 }
558
559
560 iowrite32(ioread32
561 (chip->vendor.iobase +
562 TPM_INT_STATUS(chip->vendor.locality)),
563 chip->vendor.iobase +
564 TPM_INT_STATUS(chip->vendor.locality));
565
566
567 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
568 chip->vendor.iobase +
569 TPM_INT_ENABLE(chip->vendor.locality));
570
571
572 tpm_gen_interrupt(chip);
573
574
575 iowrite32(intmask,
576 chip->vendor.iobase +
577 TPM_INT_ENABLE(chip->vendor.locality));
578 free_irq(i, chip);
579 }
580 }
581 if (chip->vendor.irq) {
582 iowrite8(chip->vendor.irq,
583 chip->vendor.iobase +
584 TPM_INT_VECTOR(chip->vendor.locality));
585 if (request_irq
586 (chip->vendor.irq, tis_int_handler, IRQF_SHARED,
587 chip->vendor.miscdev.name, chip) != 0) {
588 dev_info(chip->dev,
589 "Unable to request irq: %d for use\n",
590 chip->vendor.irq);
591 chip->vendor.irq = 0;
592 } else {
593
594 iowrite32(ioread32
595 (chip->vendor.iobase +
596 TPM_INT_STATUS(chip->vendor.locality)),
597 chip->vendor.iobase +
598 TPM_INT_STATUS(chip->vendor.locality));
599
600
601 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
602 chip->vendor.iobase +
603 TPM_INT_ENABLE(chip->vendor.locality));
604 }
605 }
606
607 INIT_LIST_HEAD(&chip->vendor.list);
608 spin_lock(&tis_lock);
609 list_add(&chip->vendor.list, &tis_chips);
610 spin_unlock(&tis_lock);
611
612 tpm_get_timeouts(chip);
613 tpm_continue_selftest(chip);
614
615 return 0;
616out_err:
617 if (chip->vendor.iobase)
618 iounmap(chip->vendor.iobase);
619 tpm_remove_hardware(chip->dev);
620 return rc;
621}
622#ifdef CONFIG_PNP
623static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
624 const struct pnp_device_id *pnp_id)
625{
626 resource_size_t start, len;
627 unsigned int irq = 0;
628
629 start = pnp_mem_start(pnp_dev, 0);
630 len = pnp_mem_len(pnp_dev, 0);
631
632 if (pnp_irq_valid(pnp_dev, 0))
633 irq = pnp_irq(pnp_dev, 0);
634 else
635 interrupts = 0;
636
637 if (is_itpm(pnp_dev))
638 itpm = 1;
639
640 return tpm_tis_init(&pnp_dev->dev, start, len, irq);
641}
642
643static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
644{
645 return tpm_pm_suspend(&dev->dev, msg);
646}
647
648static int tpm_tis_pnp_resume(struct pnp_dev *dev)
649{
650 struct tpm_chip *chip = pnp_get_drvdata(dev);
651 int ret;
652
653 ret = tpm_pm_resume(&dev->dev);
654 if (!ret)
655 tpm_continue_selftest(chip);
656
657 return ret;
658}
659
660static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
661 {"PNP0C31", 0},
662 {"ATM1200", 0},
663 {"IFX0102", 0},
664 {"BCM0101", 0},
665 {"BCM0102", 0},
666 {"NSC1200", 0},
667 {"ICO0102", 0},
668
669 {"", 0},
670 {"", 0}
671};
672MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
673
674static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev)
675{
676 struct tpm_chip *chip = pnp_get_drvdata(dev);
677
678 tpm_dev_vendor_release(chip);
679
680 kfree(chip);
681}
682
683
684static struct pnp_driver tis_pnp_driver = {
685 .name = "tpm_tis",
686 .id_table = tpm_pnp_tbl,
687 .probe = tpm_tis_pnp_init,
688 .suspend = tpm_tis_pnp_suspend,
689 .resume = tpm_tis_pnp_resume,
690 .remove = tpm_tis_pnp_remove,
691};
692
693#define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
694module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
695 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
696MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
697#endif
698static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
699{
700 return tpm_pm_suspend(&dev->dev, msg);
701}
702
703static int tpm_tis_resume(struct platform_device *dev)
704{
705 return tpm_pm_resume(&dev->dev);
706}
707static struct platform_driver tis_drv = {
708 .driver = {
709 .name = "tpm_tis",
710 .owner = THIS_MODULE,
711 },
712 .suspend = tpm_tis_suspend,
713 .resume = tpm_tis_resume,
714};
715
716static struct platform_device *pdev;
717
718static int force;
719module_param(force, bool, 0444);
720MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
721static int __init init_tis(void)
722{
723 int rc;
724#ifdef CONFIG_PNP
725 if (!force)
726 return pnp_register_driver(&tis_pnp_driver);
727#endif
728
729 rc = platform_driver_register(&tis_drv);
730 if (rc < 0)
731 return rc;
732 if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
733 return PTR_ERR(pdev);
734 if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
735 platform_device_unregister(pdev);
736 platform_driver_unregister(&tis_drv);
737 }
738 return rc;
739}
740
741static void __exit cleanup_tis(void)
742{
743 struct tpm_vendor_specific *i, *j;
744 struct tpm_chip *chip;
745 spin_lock(&tis_lock);
746 list_for_each_entry_safe(i, j, &tis_chips, list) {
747 chip = to_tpm_chip(i);
748 tpm_remove_hardware(chip->dev);
749 iowrite32(~TPM_GLOBAL_INT_ENABLE &
750 ioread32(chip->vendor.iobase +
751 TPM_INT_ENABLE(chip->vendor.
752 locality)),
753 chip->vendor.iobase +
754 TPM_INT_ENABLE(chip->vendor.locality));
755 release_locality(chip, chip->vendor.locality, 1);
756 if (chip->vendor.irq)
757 free_irq(chip->vendor.irq, chip);
758 iounmap(i->iobase);
759 list_del(&i->list);
760 }
761 spin_unlock(&tis_lock);
762#ifdef CONFIG_PNP
763 if (!force) {
764 pnp_unregister_driver(&tis_pnp_driver);
765 return;
766 }
767#endif
768 platform_device_unregister(pdev);
769 platform_driver_unregister(&tis_drv);
770}
771
772module_init(init_tis);
773module_exit(cleanup_tis);
774MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
775MODULE_DESCRIPTION("TPM Driver");
776MODULE_VERSION("2.0");
777MODULE_LICENSE("GPL");
778