1/*
2 * Copyright (C) 2005, 2006 IBM Corporation
3 *
4 * Authors:
5 * Leendert van Doorn <leendert@watson.ibm.com>
6 * Kylene Hall <kjhall@us.ibm.com>
7 *
8 * Device driver for TCG/TCPA TPM (trusted platform module).
9 * Specifications at www.trustedcomputinggroup.org
10 *
11 * This device driver implements the TPM interface as defined in
12 * the TCG TPM Interface Spec version 1.2, revision 1.0.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License as
16 * published by the Free Software Foundation, version 2 of the
17 * License.
18 */
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/pnp.h>
23#include <linux/interrupt.h>
24#include <linux/wait.h>
25#include "tpm.h"
26
27#define TPM_HEADER_SIZE 10
28
29enum tis_access {
30	TPM_ACCESS_VALID = 0x80,
31	TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
32	TPM_ACCESS_REQUEST_PENDING = 0x04,
33	TPM_ACCESS_REQUEST_USE = 0x02,
34};
35
36enum tis_status {
37	TPM_STS_VALID = 0x80,
38	TPM_STS_COMMAND_READY = 0x40,
39	TPM_STS_GO = 0x20,
40	TPM_STS_DATA_AVAIL = 0x10,
41	TPM_STS_DATA_EXPECT = 0x08,
42};
43
44enum tis_int_flags {
45	TPM_GLOBAL_INT_ENABLE = 0x80000000,
46	TPM_INTF_BURST_COUNT_STATIC = 0x100,
47	TPM_INTF_CMD_READY_INT = 0x080,
48	TPM_INTF_INT_EDGE_FALLING = 0x040,
49	TPM_INTF_INT_EDGE_RISING = 0x020,
50	TPM_INTF_INT_LEVEL_LOW = 0x010,
51	TPM_INTF_INT_LEVEL_HIGH = 0x008,
52	TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
53	TPM_INTF_STS_VALID_INT = 0x002,
54	TPM_INTF_DATA_AVAIL_INT = 0x001,
55};
56
57enum tis_defaults {
58	TIS_MEM_BASE = 0xFED40000,
59	TIS_MEM_LEN = 0x5000,
60	TIS_SHORT_TIMEOUT = 750,	/* ms */
61	TIS_LONG_TIMEOUT = 2000,	/* 2 sec */
62};
63
64#define	TPM_ACCESS(l)			(0x0000 | ((l) << 12))
65#define	TPM_INT_ENABLE(l)		(0x0008 | ((l) << 12))
66#define	TPM_INT_VECTOR(l)		(0x000C | ((l) << 12))
67#define	TPM_INT_STATUS(l)		(0x0010 | ((l) << 12))
68#define	TPM_INTF_CAPS(l)		(0x0014 | ((l) << 12))
69#define	TPM_STS(l)			(0x0018 | ((l) << 12))
70#define	TPM_DATA_FIFO(l)		(0x0024 | ((l) << 12))
71
72#define	TPM_DID_VID(l)			(0x0F00 | ((l) << 12))
73#define	TPM_RID(l)			(0x0F04 | ((l) << 12))
74
75static LIST_HEAD(tis_chips);
76static DEFINE_SPINLOCK(tis_lock);
77
78static int check_locality(struct tpm_chip *chip, int l)
79{
80	if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
81	     (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
82	    (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
83		return chip->vendor.locality = l;
84
85	return -1;
86}
87
88static void release_locality(struct tpm_chip *chip, int l, int force)
89{
90	if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
91		      (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
92	    (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
93		iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
94			 chip->vendor.iobase + TPM_ACCESS(l));
95}
96
97static int request_locality(struct tpm_chip *chip, int l)
98{
99	unsigned long stop;
100	long rc;
101
102	if (check_locality(chip, l) >= 0)
103		return l;
104
105	iowrite8(TPM_ACCESS_REQUEST_USE,
106		 chip->vendor.iobase + TPM_ACCESS(l));
107
108	if (chip->vendor.irq) {
109		rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
110						      (check_locality
111						       (chip, l) >= 0),
112						      chip->vendor.timeout_a);
113		if (rc > 0)
114			return l;
115
116	} else {
117		/* wait for burstcount */
118		stop = jiffies + chip->vendor.timeout_a;
119		do {
120			if (check_locality(chip, l) >= 0)
121				return l;
122			msleep(TPM_TIMEOUT);
123		}
124		while (time_before(jiffies, stop));
125	}
126	return -1;
127}
128
129static u8 tpm_tis_status(struct tpm_chip *chip)
130{
131	return ioread8(chip->vendor.iobase +
132		       TPM_STS(chip->vendor.locality));
133}
134
135static void tpm_tis_ready(struct tpm_chip *chip)
136{
137	/* this causes the current command to be aborted */
138	iowrite8(TPM_STS_COMMAND_READY,
139		 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
140}
141
142static int get_burstcount(struct tpm_chip *chip)
143{
144	unsigned long stop;
145	int burstcnt;
146
147	/* wait for burstcount */
148	/* which timeout value, spec has 2 answers (c & d) */
149	stop = jiffies + chip->vendor.timeout_d;
150	do {
151		burstcnt = ioread8(chip->vendor.iobase +
152				   TPM_STS(chip->vendor.locality) + 1);
153		burstcnt += ioread8(chip->vendor.iobase +
154				    TPM_STS(chip->vendor.locality) +
155				    2) << 8;
156		if (burstcnt)
157			return burstcnt;
158		msleep(TPM_TIMEOUT);
159	} while (time_before(jiffies, stop));
160	return -EBUSY;
161}
162
163static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
164			 wait_queue_head_t *queue)
165{
166	unsigned long stop;
167	long rc;
168	u8 status;
169
170	/* check current status */
171	status = tpm_tis_status(chip);
172	if ((status & mask) == mask)
173		return 0;
174
175	if (chip->vendor.irq) {
176		rc = wait_event_interruptible_timeout(*queue,
177						      ((tpm_tis_status
178							(chip) & mask) ==
179						       mask), timeout);
180		if (rc > 0)
181			return 0;
182	} else {
183		stop = jiffies + timeout;
184		do {
185			msleep(TPM_TIMEOUT);
186			status = tpm_tis_status(chip);
187			if ((status & mask) == mask)
188				return 0;
189		} while (time_before(jiffies, stop));
190	}
191	return -ETIME;
192}
193
194static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
195{
196	int size = 0, burstcnt;
197	while (size < count &&
198	       wait_for_stat(chip,
199			     TPM_STS_DATA_AVAIL | TPM_STS_VALID,
200			     chip->vendor.timeout_c,
201			     &chip->vendor.read_queue)
202	       == 0) {
203		burstcnt = get_burstcount(chip);
204		for (; burstcnt > 0 && size < count; burstcnt--)
205			buf[size++] = ioread8(chip->vendor.iobase +
206					      TPM_DATA_FIFO(chip->vendor.
207							    locality));
208	}
209	return size;
210}
211
212static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
213{
214	int size = 0;
215	int expected, status;
216
217	if (count < TPM_HEADER_SIZE) {
218		size = -EIO;
219		goto out;
220	}
221
222	/* read first 10 bytes, including tag, paramsize, and result */
223	if ((size =
224	     recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
225		dev_err(chip->dev, "Unable to read header\n");
226		goto out;
227	}
228
229	expected = be32_to_cpu(*(__be32 *) (buf + 2));
230	if (expected > count) {
231		size = -EIO;
232		goto out;
233	}
234
235	if ((size +=
236	     recv_data(chip, &buf[TPM_HEADER_SIZE],
237		       expected - TPM_HEADER_SIZE)) < expected) {
238		dev_err(chip->dev, "Unable to read remainder of result\n");
239		size = -ETIME;
240		goto out;
241	}
242
243	wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
244		      &chip->vendor.int_queue);
245	status = tpm_tis_status(chip);
246	if (status & TPM_STS_DATA_AVAIL) {	/* retry? */
247		dev_err(chip->dev, "Error left over data\n");
248		size = -EIO;
249		goto out;
250	}
251
252out:
253	tpm_tis_ready(chip);
254	release_locality(chip, chip->vendor.locality, 0);
255	return size;
256}
257
258/*
259 * If interrupts are used (signaled by an irq set in the vendor structure)
260 * tpm.c can skip polling for the data to be available as the interrupt is
261 * waited for here
262 */
263static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
264{
265	int rc, status, burstcnt;
266	size_t count = 0;
267	u32 ordinal;
268
269	if (request_locality(chip, 0) < 0)
270		return -EBUSY;
271
272	status = tpm_tis_status(chip);
273	if ((status & TPM_STS_COMMAND_READY) == 0) {
274		tpm_tis_ready(chip);
275		if (wait_for_stat
276		    (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
277		     &chip->vendor.int_queue) < 0) {
278			rc = -ETIME;
279			goto out_err;
280		}
281	}
282
283	while (count < len - 1) {
284		burstcnt = get_burstcount(chip);
285		for (; burstcnt > 0 && count < len - 1; burstcnt--) {
286			iowrite8(buf[count], chip->vendor.iobase +
287				 TPM_DATA_FIFO(chip->vendor.locality));
288			count++;
289		}
290
291		wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
292			      &chip->vendor.int_queue);
293		status = tpm_tis_status(chip);
294		if ((status & TPM_STS_DATA_EXPECT) == 0) {
295			rc = -EIO;
296			goto out_err;
297		}
298	}
299
300	/* write last byte */
301	iowrite8(buf[count],
302		 chip->vendor.iobase +
303		 TPM_DATA_FIFO(chip->vendor.locality));
304	wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
305		      &chip->vendor.int_queue);
306	status = tpm_tis_status(chip);
307	if ((status & TPM_STS_DATA_EXPECT) != 0) {
308		rc = -EIO;
309		goto out_err;
310	}
311
312	/* go and do it */
313	iowrite8(TPM_STS_GO,
314		 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
315
316	if (chip->vendor.irq) {
317		ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
318		if (wait_for_stat
319		    (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
320		     tpm_calc_ordinal_duration(chip, ordinal),
321		     &chip->vendor.read_queue) < 0) {
322			rc = -ETIME;
323			goto out_err;
324		}
325	}
326	return len;
327out_err:
328	tpm_tis_ready(chip);
329	release_locality(chip, chip->vendor.locality, 0);
330	return rc;
331}
332
333static const struct file_operations tis_ops = {
334	.owner = THIS_MODULE,
335	.llseek = no_llseek,
336	.open = tpm_open,
337	.read = tpm_read,
338	.write = tpm_write,
339	.release = tpm_release,
340};
341
342static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
343static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
344static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
345static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
346static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
347static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
348		   NULL);
349static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
350static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
351
352static struct attribute *tis_attrs[] = {
353	&dev_attr_pubek.attr,
354	&dev_attr_pcrs.attr,
355	&dev_attr_enabled.attr,
356	&dev_attr_active.attr,
357	&dev_attr_owned.attr,
358	&dev_attr_temp_deactivated.attr,
359	&dev_attr_caps.attr,
360	&dev_attr_cancel.attr, NULL,
361};
362
363static struct attribute_group tis_attr_grp = {
364	.attrs = tis_attrs
365};
366
367static struct tpm_vendor_specific tpm_tis = {
368	.status = tpm_tis_status,
369	.recv = tpm_tis_recv,
370	.send = tpm_tis_send,
371	.cancel = tpm_tis_ready,
372	.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
373	.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
374	.req_canceled = TPM_STS_COMMAND_READY,
375	.attr_group = &tis_attr_grp,
376	.miscdev = {
377		    .fops = &tis_ops,},
378};
379
380static irqreturn_t tis_int_probe(int irq, void *dev_id)
381{
382	struct tpm_chip *chip = (struct tpm_chip *) dev_id;
383	u32 interrupt;
384
385	interrupt = ioread32(chip->vendor.iobase +
386			     TPM_INT_STATUS(chip->vendor.locality));
387
388	if (interrupt == 0)
389		return IRQ_NONE;
390
391	chip->vendor.irq = irq;
392
393	/* Clear interrupts handled with TPM_EOI */
394	iowrite32(interrupt,
395		  chip->vendor.iobase +
396		  TPM_INT_STATUS(chip->vendor.locality));
397	return IRQ_HANDLED;
398}
399
400static irqreturn_t tis_int_handler(int irq, void *dev_id)
401{
402	struct tpm_chip *chip = (struct tpm_chip *) dev_id;
403	u32 interrupt;
404	int i;
405
406	interrupt = ioread32(chip->vendor.iobase +
407			     TPM_INT_STATUS(chip->vendor.locality));
408
409	if (interrupt == 0)
410		return IRQ_NONE;
411
412	if (interrupt & TPM_INTF_DATA_AVAIL_INT)
413		wake_up_interruptible(&chip->vendor.read_queue);
414	if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
415		for (i = 0; i < 5; i++)
416			if (check_locality(chip, i) >= 0)
417				break;
418	if (interrupt &
419	    (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
420	     TPM_INTF_CMD_READY_INT))
421		wake_up_interruptible(&chip->vendor.int_queue);
422
423	/* Clear interrupts handled with TPM_EOI */
424	iowrite32(interrupt,
425		  chip->vendor.iobase +
426		  TPM_INT_STATUS(chip->vendor.locality));
427	ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
428	return IRQ_HANDLED;
429}
430
431static int interrupts = 1;
432module_param(interrupts, bool, 0444);
433MODULE_PARM_DESC(interrupts, "Enable interrupts");
434
435static int tpm_tis_init(struct device *dev, resource_size_t start,
436			resource_size_t len)
437{
438	u32 vendor, intfcaps, intmask;
439	int rc, i;
440	struct tpm_chip *chip;
441
442	if (!start)
443		start = TIS_MEM_BASE;
444	if (!len)
445		len = TIS_MEM_LEN;
446
447	if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
448		return -ENODEV;
449
450	chip->vendor.iobase = ioremap(start, len);
451	if (!chip->vendor.iobase) {
452		rc = -EIO;
453		goto out_err;
454	}
455
456	vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
457
458	/* Default timeouts */
459	chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
460	chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
461	chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
462	chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
463
464	dev_info(dev,
465		 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
466		 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
467
468	/* Figure out the capabilities */
469	intfcaps =
470	    ioread32(chip->vendor.iobase +
471		     TPM_INTF_CAPS(chip->vendor.locality));
472	dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
473		intfcaps);
474	if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
475		dev_dbg(dev, "\tBurst Count Static\n");
476	if (intfcaps & TPM_INTF_CMD_READY_INT)
477		dev_dbg(dev, "\tCommand Ready Int Support\n");
478	if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
479		dev_dbg(dev, "\tInterrupt Edge Falling\n");
480	if (intfcaps & TPM_INTF_INT_EDGE_RISING)
481		dev_dbg(dev, "\tInterrupt Edge Rising\n");
482	if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
483		dev_dbg(dev, "\tInterrupt Level Low\n");
484	if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
485		dev_dbg(dev, "\tInterrupt Level High\n");
486	if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
487		dev_dbg(dev, "\tLocality Change Int Support\n");
488	if (intfcaps & TPM_INTF_STS_VALID_INT)
489		dev_dbg(dev, "\tSts Valid Int Support\n");
490	if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
491		dev_dbg(dev, "\tData Avail Int Support\n");
492
493	if (request_locality(chip, 0) != 0) {
494		rc = -ENODEV;
495		goto out_err;
496	}
497
498	/* INTERRUPT Setup */
499	init_waitqueue_head(&chip->vendor.read_queue);
500	init_waitqueue_head(&chip->vendor.int_queue);
501
502	intmask =
503	    ioread32(chip->vendor.iobase +
504		     TPM_INT_ENABLE(chip->vendor.locality));
505
506	intmask |= TPM_INTF_CMD_READY_INT
507	    | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
508	    | TPM_INTF_STS_VALID_INT;
509
510	iowrite32(intmask,
511		  chip->vendor.iobase +
512		  TPM_INT_ENABLE(chip->vendor.locality));
513	if (interrupts) {
514		chip->vendor.irq =
515		    ioread8(chip->vendor.iobase +
516			    TPM_INT_VECTOR(chip->vendor.locality));
517
518		for (i = 3; i < 16 && chip->vendor.irq == 0; i++) {
519			iowrite8(i, chip->vendor.iobase +
520				    TPM_INT_VECTOR(chip->vendor.locality));
521			if (request_irq
522			    (i, tis_int_probe, IRQF_SHARED,
523			     chip->vendor.miscdev.name, chip) != 0) {
524				dev_info(chip->dev,
525					 "Unable to request irq: %d for probe\n",
526					 i);
527				continue;
528			}
529
530			/* Clear all existing */
531			iowrite32(ioread32
532				  (chip->vendor.iobase +
533				   TPM_INT_STATUS(chip->vendor.locality)),
534				  chip->vendor.iobase +
535				  TPM_INT_STATUS(chip->vendor.locality));
536
537			/* Turn on */
538			iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
539				  chip->vendor.iobase +
540				  TPM_INT_ENABLE(chip->vendor.locality));
541
542			/* Generate Interrupts */
543			tpm_gen_interrupt(chip);
544
545			/* Turn off */
546			iowrite32(intmask,
547				  chip->vendor.iobase +
548				  TPM_INT_ENABLE(chip->vendor.locality));
549			free_irq(i, chip);
550		}
551	}
552	if (chip->vendor.irq) {
553		iowrite8(chip->vendor.irq,
554			 chip->vendor.iobase +
555			 TPM_INT_VECTOR(chip->vendor.locality));
556		if (request_irq
557		    (chip->vendor.irq, tis_int_handler, IRQF_SHARED,
558		     chip->vendor.miscdev.name, chip) != 0) {
559			dev_info(chip->dev,
560				 "Unable to request irq: %d for use\n",
561				 chip->vendor.irq);
562			chip->vendor.irq = 0;
563		} else {
564			/* Clear all existing */
565			iowrite32(ioread32
566				  (chip->vendor.iobase +
567				   TPM_INT_STATUS(chip->vendor.locality)),
568				  chip->vendor.iobase +
569				  TPM_INT_STATUS(chip->vendor.locality));
570
571			/* Turn on */
572			iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
573				  chip->vendor.iobase +
574				  TPM_INT_ENABLE(chip->vendor.locality));
575		}
576	}
577
578	INIT_LIST_HEAD(&chip->vendor.list);
579	spin_lock(&tis_lock);
580	list_add(&chip->vendor.list, &tis_chips);
581	spin_unlock(&tis_lock);
582
583	tpm_get_timeouts(chip);
584	tpm_continue_selftest(chip);
585
586	return 0;
587out_err:
588	if (chip->vendor.iobase)
589		iounmap(chip->vendor.iobase);
590	tpm_remove_hardware(chip->dev);
591	return rc;
592}
593
594static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
595				      const struct pnp_device_id *pnp_id)
596{
597	resource_size_t start, len;
598	start = pnp_mem_start(pnp_dev, 0);
599	len = pnp_mem_len(pnp_dev, 0);
600
601	return tpm_tis_init(&pnp_dev->dev, start, len);
602}
603
604static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
605{
606	return tpm_pm_suspend(&dev->dev, msg);
607}
608
609static int tpm_tis_pnp_resume(struct pnp_dev *dev)
610{
611	return tpm_pm_resume(&dev->dev);
612}
613
614static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
615	{"PNP0C31", 0},		/* TPM */
616	{"ATM1200", 0},		/* Atmel */
617	{"IFX0102", 0},		/* Infineon */
618	{"BCM0101", 0},		/* Broadcom */
619	{"NSC1200", 0},		/* National */
620	/* Add new here */
621	{"", 0},		/* User Specified */
622	{"", 0}			/* Terminator */
623};
624
625static struct pnp_driver tis_pnp_driver = {
626	.name = "tpm_tis",
627	.id_table = tpm_pnp_tbl,
628	.probe = tpm_tis_pnp_init,
629	.suspend = tpm_tis_pnp_suspend,
630	.resume = tpm_tis_pnp_resume,
631};
632
633#define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
634module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
635		    sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
636MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
637
638static struct device_driver tis_drv = {
639	.name = "tpm_tis",
640	.bus = &platform_bus_type,
641	.owner = THIS_MODULE,
642	.suspend = tpm_pm_suspend,
643	.resume = tpm_pm_resume,
644};
645
646static struct platform_device *pdev;
647
648static int force;
649module_param(force, bool, 0444);
650MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
651static int __init init_tis(void)
652{
653	int rc;
654
655	if (force) {
656		rc = driver_register(&tis_drv);
657		if (rc < 0)
658			return rc;
659		if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
660			return PTR_ERR(pdev);
661		if((rc=tpm_tis_init(&pdev->dev, 0, 0)) != 0) {
662			platform_device_unregister(pdev);
663			driver_unregister(&tis_drv);
664		}
665		return rc;
666	}
667
668	return pnp_register_driver(&tis_pnp_driver);
669}
670
671static void __exit cleanup_tis(void)
672{
673	struct tpm_vendor_specific *i, *j;
674	struct tpm_chip *chip;
675	spin_lock(&tis_lock);
676	list_for_each_entry_safe(i, j, &tis_chips, list) {
677		chip = to_tpm_chip(i);
678		iowrite32(~TPM_GLOBAL_INT_ENABLE &
679			  ioread32(chip->vendor.iobase +
680				   TPM_INT_ENABLE(chip->vendor.
681						  locality)),
682			  chip->vendor.iobase +
683			  TPM_INT_ENABLE(chip->vendor.locality));
684		release_locality(chip, chip->vendor.locality, 1);
685		if (chip->vendor.irq)
686			free_irq(chip->vendor.irq, chip);
687		iounmap(i->iobase);
688		list_del(&i->list);
689		tpm_remove_hardware(chip->dev);
690	}
691	spin_unlock(&tis_lock);
692	if (force) {
693		platform_device_unregister(pdev);
694		driver_unregister(&tis_drv);
695	} else
696		pnp_unregister_driver(&tis_pnp_driver);
697}
698
699module_init(init_tis);
700module_exit(cleanup_tis);
701MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
702MODULE_DESCRIPTION("TPM Driver");
703MODULE_VERSION("2.0");
704MODULE_LICENSE("GPL");
705