1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2012-2020 IBM Corporation
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Author: Ashley Lai <ashleydlai@gmail.com>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Maintained by: <tpmdd-devel@lists.sourceforge.net>
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Device driver for TCG/TCPA TPM (trusted platform module).
10*4882a593Smuzhiyun * Specifications at www.trustedcomputinggroup.org
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/dma-mapping.h>
14*4882a593Smuzhiyun #include <linux/dmapool.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <asm/vio.h>
17*4882a593Smuzhiyun #include <asm/irq.h>
18*4882a593Smuzhiyun #include <linux/types.h>
19*4882a593Smuzhiyun #include <linux/list.h>
20*4882a593Smuzhiyun #include <linux/spinlock.h>
21*4882a593Smuzhiyun #include <linux/interrupt.h>
22*4882a593Smuzhiyun #include <linux/wait.h>
23*4882a593Smuzhiyun #include <asm/prom.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include "tpm.h"
26*4882a593Smuzhiyun #include "tpm_ibmvtpm.h"
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun static const struct vio_device_id tpm_ibmvtpm_device_table[] = {
31*4882a593Smuzhiyun { "IBM,vtpm", "IBM,vtpm"},
32*4882a593Smuzhiyun { "IBM,vtpm", "IBM,vtpm20"},
33*4882a593Smuzhiyun { "", "" }
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /**
38*4882a593Smuzhiyun * ibmvtpm_send_crq_word() - Send a CRQ request
39*4882a593Smuzhiyun * @vdev: vio device struct
40*4882a593Smuzhiyun * @w1: pre-constructed first word of tpm crq (second word is reserved)
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * Return:
43*4882a593Smuzhiyun * 0 - Success
44*4882a593Smuzhiyun * Non-zero - Failure
45*4882a593Smuzhiyun */
ibmvtpm_send_crq_word(struct vio_dev * vdev,u64 w1)46*4882a593Smuzhiyun static int ibmvtpm_send_crq_word(struct vio_dev *vdev, u64 w1)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, 0);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /**
52*4882a593Smuzhiyun * ibmvtpm_send_crq() - Send a CRQ request
53*4882a593Smuzhiyun *
54*4882a593Smuzhiyun * @vdev: vio device struct
55*4882a593Smuzhiyun * @valid: Valid field
56*4882a593Smuzhiyun * @msg: Type field
57*4882a593Smuzhiyun * @len: Length field
58*4882a593Smuzhiyun * @data: Data field
59*4882a593Smuzhiyun *
60*4882a593Smuzhiyun * The ibmvtpm crq is defined as follows:
61*4882a593Smuzhiyun *
62*4882a593Smuzhiyun * Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7
63*4882a593Smuzhiyun * -----------------------------------------------------------------------
64*4882a593Smuzhiyun * Word0 | Valid | Type | Length | Data
65*4882a593Smuzhiyun * -----------------------------------------------------------------------
66*4882a593Smuzhiyun * Word1 | Reserved
67*4882a593Smuzhiyun * -----------------------------------------------------------------------
68*4882a593Smuzhiyun *
69*4882a593Smuzhiyun * Which matches the following structure (on bigendian host):
70*4882a593Smuzhiyun *
71*4882a593Smuzhiyun * struct ibmvtpm_crq {
72*4882a593Smuzhiyun * u8 valid;
73*4882a593Smuzhiyun * u8 msg;
74*4882a593Smuzhiyun * __be16 len;
75*4882a593Smuzhiyun * __be32 data;
76*4882a593Smuzhiyun * __be64 reserved;
77*4882a593Smuzhiyun * } __attribute__((packed, aligned(8)));
78*4882a593Smuzhiyun *
79*4882a593Smuzhiyun * However, the value is passed in a register so just compute the numeric value
80*4882a593Smuzhiyun * to load into the register avoiding byteswap altogether. Endian only affects
81*4882a593Smuzhiyun * memory loads and stores - registers are internally represented the same.
82*4882a593Smuzhiyun *
83*4882a593Smuzhiyun * Return:
84*4882a593Smuzhiyun * 0 (H_SUCCESS) - Success
85*4882a593Smuzhiyun * Non-zero - Failure
86*4882a593Smuzhiyun */
ibmvtpm_send_crq(struct vio_dev * vdev,u8 valid,u8 msg,u16 len,u32 data)87*4882a593Smuzhiyun static int ibmvtpm_send_crq(struct vio_dev *vdev,
88*4882a593Smuzhiyun u8 valid, u8 msg, u16 len, u32 data)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun u64 w1 = ((u64)valid << 56) | ((u64)msg << 48) | ((u64)len << 32) |
91*4882a593Smuzhiyun (u64)data;
92*4882a593Smuzhiyun return ibmvtpm_send_crq_word(vdev, w1);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /**
96*4882a593Smuzhiyun * tpm_ibmvtpm_recv - Receive data after send
97*4882a593Smuzhiyun *
98*4882a593Smuzhiyun * @chip: tpm chip struct
99*4882a593Smuzhiyun * @buf: buffer to read
100*4882a593Smuzhiyun * @count: size of buffer
101*4882a593Smuzhiyun *
102*4882a593Smuzhiyun * Return:
103*4882a593Smuzhiyun * Number of bytes read
104*4882a593Smuzhiyun */
tpm_ibmvtpm_recv(struct tpm_chip * chip,u8 * buf,size_t count)105*4882a593Smuzhiyun static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
108*4882a593Smuzhiyun u16 len;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun if (!ibmvtpm->rtce_buf) {
111*4882a593Smuzhiyun dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
112*4882a593Smuzhiyun return 0;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun len = ibmvtpm->res_len;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun if (count < len) {
118*4882a593Smuzhiyun dev_err(ibmvtpm->dev,
119*4882a593Smuzhiyun "Invalid size in recv: count=%zd, crq_size=%d\n",
120*4882a593Smuzhiyun count, len);
121*4882a593Smuzhiyun return -EIO;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun spin_lock(&ibmvtpm->rtce_lock);
125*4882a593Smuzhiyun memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
126*4882a593Smuzhiyun memset(ibmvtpm->rtce_buf, 0, len);
127*4882a593Smuzhiyun ibmvtpm->res_len = 0;
128*4882a593Smuzhiyun spin_unlock(&ibmvtpm->rtce_lock);
129*4882a593Smuzhiyun return len;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /**
133*4882a593Smuzhiyun * ibmvtpm_crq_send_init - Send a CRQ initialize message
134*4882a593Smuzhiyun * @ibmvtpm: vtpm device struct
135*4882a593Smuzhiyun *
136*4882a593Smuzhiyun * Return:
137*4882a593Smuzhiyun * 0 on success.
138*4882a593Smuzhiyun * Non-zero on failure.
139*4882a593Smuzhiyun */
ibmvtpm_crq_send_init(struct ibmvtpm_dev * ibmvtpm)140*4882a593Smuzhiyun static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun int rc;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
145*4882a593Smuzhiyun if (rc != H_SUCCESS)
146*4882a593Smuzhiyun dev_err(ibmvtpm->dev,
147*4882a593Smuzhiyun "%s failed rc=%d\n", __func__, rc);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun return rc;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /**
153*4882a593Smuzhiyun * tpm_ibmvtpm_resume - Resume from suspend
154*4882a593Smuzhiyun *
155*4882a593Smuzhiyun * @dev: device struct
156*4882a593Smuzhiyun *
157*4882a593Smuzhiyun * Return: Always 0.
158*4882a593Smuzhiyun */
tpm_ibmvtpm_resume(struct device * dev)159*4882a593Smuzhiyun static int tpm_ibmvtpm_resume(struct device *dev)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun struct tpm_chip *chip = dev_get_drvdata(dev);
162*4882a593Smuzhiyun struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
163*4882a593Smuzhiyun int rc = 0;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun do {
166*4882a593Smuzhiyun if (rc)
167*4882a593Smuzhiyun msleep(100);
168*4882a593Smuzhiyun rc = plpar_hcall_norets(H_ENABLE_CRQ,
169*4882a593Smuzhiyun ibmvtpm->vdev->unit_address);
170*4882a593Smuzhiyun } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun if (rc) {
173*4882a593Smuzhiyun dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
174*4882a593Smuzhiyun return rc;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun rc = vio_enable_interrupts(ibmvtpm->vdev);
178*4882a593Smuzhiyun if (rc) {
179*4882a593Smuzhiyun dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
180*4882a593Smuzhiyun return rc;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun rc = ibmvtpm_crq_send_init(ibmvtpm);
184*4882a593Smuzhiyun if (rc)
185*4882a593Smuzhiyun dev_err(dev, "Error send_init rc=%d\n", rc);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun return rc;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /**
191*4882a593Smuzhiyun * tpm_ibmvtpm_send() - Send a TPM command
192*4882a593Smuzhiyun * @chip: tpm chip struct
193*4882a593Smuzhiyun * @buf: buffer contains data to send
194*4882a593Smuzhiyun * @count: size of buffer
195*4882a593Smuzhiyun *
196*4882a593Smuzhiyun * Return:
197*4882a593Smuzhiyun * 0 on success,
198*4882a593Smuzhiyun * -errno on error
199*4882a593Smuzhiyun */
tpm_ibmvtpm_send(struct tpm_chip * chip,u8 * buf,size_t count)200*4882a593Smuzhiyun static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
203*4882a593Smuzhiyun bool retry = true;
204*4882a593Smuzhiyun int rc, sig;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun if (!ibmvtpm->rtce_buf) {
207*4882a593Smuzhiyun dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
208*4882a593Smuzhiyun return 0;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun if (count > ibmvtpm->rtce_size) {
212*4882a593Smuzhiyun dev_err(ibmvtpm->dev,
213*4882a593Smuzhiyun "Invalid size in send: count=%zd, rtce_size=%d\n",
214*4882a593Smuzhiyun count, ibmvtpm->rtce_size);
215*4882a593Smuzhiyun return -EIO;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun if (ibmvtpm->tpm_processing_cmd) {
219*4882a593Smuzhiyun dev_info(ibmvtpm->dev,
220*4882a593Smuzhiyun "Need to wait for TPM to finish\n");
221*4882a593Smuzhiyun /* wait for previous command to finish */
222*4882a593Smuzhiyun sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
223*4882a593Smuzhiyun if (sig)
224*4882a593Smuzhiyun return -EINTR;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun spin_lock(&ibmvtpm->rtce_lock);
228*4882a593Smuzhiyun ibmvtpm->res_len = 0;
229*4882a593Smuzhiyun memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /*
232*4882a593Smuzhiyun * set the processing flag before the Hcall, since we may get the
233*4882a593Smuzhiyun * result (interrupt) before even being able to check rc.
234*4882a593Smuzhiyun */
235*4882a593Smuzhiyun ibmvtpm->tpm_processing_cmd = 1;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun again:
238*4882a593Smuzhiyun rc = ibmvtpm_send_crq(ibmvtpm->vdev,
239*4882a593Smuzhiyun IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
240*4882a593Smuzhiyun count, ibmvtpm->rtce_dma_handle);
241*4882a593Smuzhiyun if (rc != H_SUCCESS) {
242*4882a593Smuzhiyun /*
243*4882a593Smuzhiyun * H_CLOSED can be returned after LPM resume. Call
244*4882a593Smuzhiyun * tpm_ibmvtpm_resume() to re-enable the CRQ then retry
245*4882a593Smuzhiyun * ibmvtpm_send_crq() once before failing.
246*4882a593Smuzhiyun */
247*4882a593Smuzhiyun if (rc == H_CLOSED && retry) {
248*4882a593Smuzhiyun tpm_ibmvtpm_resume(ibmvtpm->dev);
249*4882a593Smuzhiyun retry = false;
250*4882a593Smuzhiyun goto again;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
253*4882a593Smuzhiyun ibmvtpm->tpm_processing_cmd = 0;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun spin_unlock(&ibmvtpm->rtce_lock);
257*4882a593Smuzhiyun return 0;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
tpm_ibmvtpm_cancel(struct tpm_chip * chip)260*4882a593Smuzhiyun static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun return;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
tpm_ibmvtpm_status(struct tpm_chip * chip)265*4882a593Smuzhiyun static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun return ibmvtpm->tpm_processing_cmd;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /**
273*4882a593Smuzhiyun * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size
274*4882a593Smuzhiyun *
275*4882a593Smuzhiyun * @ibmvtpm: vtpm device struct
276*4882a593Smuzhiyun *
277*4882a593Smuzhiyun * Return:
278*4882a593Smuzhiyun * 0 on success.
279*4882a593Smuzhiyun * Non-zero on failure.
280*4882a593Smuzhiyun */
ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev * ibmvtpm)281*4882a593Smuzhiyun static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun int rc;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun rc = ibmvtpm_send_crq(ibmvtpm->vdev,
286*4882a593Smuzhiyun IBMVTPM_VALID_CMD, VTPM_GET_RTCE_BUFFER_SIZE, 0, 0);
287*4882a593Smuzhiyun if (rc != H_SUCCESS)
288*4882a593Smuzhiyun dev_err(ibmvtpm->dev,
289*4882a593Smuzhiyun "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun return rc;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /**
295*4882a593Smuzhiyun * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version
296*4882a593Smuzhiyun * - Note that this is vtpm version and not tpm version
297*4882a593Smuzhiyun *
298*4882a593Smuzhiyun * @ibmvtpm: vtpm device struct
299*4882a593Smuzhiyun *
300*4882a593Smuzhiyun * Return:
301*4882a593Smuzhiyun * 0 on success.
302*4882a593Smuzhiyun * Non-zero on failure.
303*4882a593Smuzhiyun */
ibmvtpm_crq_get_version(struct ibmvtpm_dev * ibmvtpm)304*4882a593Smuzhiyun static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun int rc;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun rc = ibmvtpm_send_crq(ibmvtpm->vdev,
309*4882a593Smuzhiyun IBMVTPM_VALID_CMD, VTPM_GET_VERSION, 0, 0);
310*4882a593Smuzhiyun if (rc != H_SUCCESS)
311*4882a593Smuzhiyun dev_err(ibmvtpm->dev,
312*4882a593Smuzhiyun "ibmvtpm_crq_get_version failed rc=%d\n", rc);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun return rc;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /**
318*4882a593Smuzhiyun * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message
319*4882a593Smuzhiyun * @ibmvtpm: vtpm device struct
320*4882a593Smuzhiyun *
321*4882a593Smuzhiyun * Return:
322*4882a593Smuzhiyun * 0 on success.
323*4882a593Smuzhiyun * Non-zero on failure.
324*4882a593Smuzhiyun */
ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev * ibmvtpm)325*4882a593Smuzhiyun static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun int rc;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_COMP_CMD);
330*4882a593Smuzhiyun if (rc != H_SUCCESS)
331*4882a593Smuzhiyun dev_err(ibmvtpm->dev,
332*4882a593Smuzhiyun "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun return rc;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /**
338*4882a593Smuzhiyun * tpm_ibmvtpm_remove - ibm vtpm remove entry point
339*4882a593Smuzhiyun * @vdev: vio device struct
340*4882a593Smuzhiyun *
341*4882a593Smuzhiyun * Return: Always 0.
342*4882a593Smuzhiyun */
tpm_ibmvtpm_remove(struct vio_dev * vdev)343*4882a593Smuzhiyun static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
346*4882a593Smuzhiyun struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
347*4882a593Smuzhiyun int rc = 0;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun tpm_chip_unregister(chip);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun free_irq(vdev->irq, ibmvtpm);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun do {
354*4882a593Smuzhiyun if (rc)
355*4882a593Smuzhiyun msleep(100);
356*4882a593Smuzhiyun rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
357*4882a593Smuzhiyun } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
360*4882a593Smuzhiyun CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
361*4882a593Smuzhiyun free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun if (ibmvtpm->rtce_buf) {
364*4882a593Smuzhiyun dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
365*4882a593Smuzhiyun ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
366*4882a593Smuzhiyun kfree(ibmvtpm->rtce_buf);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun kfree(ibmvtpm);
370*4882a593Smuzhiyun /* For tpm_ibmvtpm_get_desired_dma */
371*4882a593Smuzhiyun dev_set_drvdata(&vdev->dev, NULL);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun return 0;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /**
377*4882a593Smuzhiyun * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver
378*4882a593Smuzhiyun * @vdev: vio device struct
379*4882a593Smuzhiyun *
380*4882a593Smuzhiyun * Return:
381*4882a593Smuzhiyun * Number of bytes the driver needs to DMA map.
382*4882a593Smuzhiyun */
tpm_ibmvtpm_get_desired_dma(struct vio_dev * vdev)383*4882a593Smuzhiyun static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
386*4882a593Smuzhiyun struct ibmvtpm_dev *ibmvtpm;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun /*
389*4882a593Smuzhiyun * ibmvtpm initializes at probe time, so the data we are
390*4882a593Smuzhiyun * asking for may not be set yet. Estimate that 4K required
391*4882a593Smuzhiyun * for TCE-mapped buffer in addition to CRQ.
392*4882a593Smuzhiyun */
393*4882a593Smuzhiyun if (chip)
394*4882a593Smuzhiyun ibmvtpm = dev_get_drvdata(&chip->dev);
395*4882a593Smuzhiyun else
396*4882a593Smuzhiyun return CRQ_RES_BUF_SIZE + PAGE_SIZE;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun /**
402*4882a593Smuzhiyun * tpm_ibmvtpm_suspend - Suspend
403*4882a593Smuzhiyun * @dev: device struct
404*4882a593Smuzhiyun *
405*4882a593Smuzhiyun * Return: Always 0.
406*4882a593Smuzhiyun */
tpm_ibmvtpm_suspend(struct device * dev)407*4882a593Smuzhiyun static int tpm_ibmvtpm_suspend(struct device *dev)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun struct tpm_chip *chip = dev_get_drvdata(dev);
410*4882a593Smuzhiyun struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
411*4882a593Smuzhiyun int rc = 0;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun rc = ibmvtpm_send_crq(ibmvtpm->vdev,
414*4882a593Smuzhiyun IBMVTPM_VALID_CMD, VTPM_PREPARE_TO_SUSPEND, 0, 0);
415*4882a593Smuzhiyun if (rc != H_SUCCESS)
416*4882a593Smuzhiyun dev_err(ibmvtpm->dev,
417*4882a593Smuzhiyun "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun return rc;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /**
423*4882a593Smuzhiyun * ibmvtpm_reset_crq - Reset CRQ
424*4882a593Smuzhiyun *
425*4882a593Smuzhiyun * @ibmvtpm: ibm vtpm struct
426*4882a593Smuzhiyun *
427*4882a593Smuzhiyun * Return:
428*4882a593Smuzhiyun * 0 on success.
429*4882a593Smuzhiyun * Non-zero on failure.
430*4882a593Smuzhiyun */
ibmvtpm_reset_crq(struct ibmvtpm_dev * ibmvtpm)431*4882a593Smuzhiyun static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun int rc = 0;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun do {
436*4882a593Smuzhiyun if (rc)
437*4882a593Smuzhiyun msleep(100);
438*4882a593Smuzhiyun rc = plpar_hcall_norets(H_FREE_CRQ,
439*4882a593Smuzhiyun ibmvtpm->vdev->unit_address);
440*4882a593Smuzhiyun } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
443*4882a593Smuzhiyun ibmvtpm->crq_queue.index = 0;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
446*4882a593Smuzhiyun ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
tpm_ibmvtpm_req_canceled(struct tpm_chip * chip,u8 status)449*4882a593Smuzhiyun static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun return (status == 0);
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun static const struct tpm_class_ops tpm_ibmvtpm = {
455*4882a593Smuzhiyun .recv = tpm_ibmvtpm_recv,
456*4882a593Smuzhiyun .send = tpm_ibmvtpm_send,
457*4882a593Smuzhiyun .cancel = tpm_ibmvtpm_cancel,
458*4882a593Smuzhiyun .status = tpm_ibmvtpm_status,
459*4882a593Smuzhiyun .req_complete_mask = 1,
460*4882a593Smuzhiyun .req_complete_val = 0,
461*4882a593Smuzhiyun .req_canceled = tpm_ibmvtpm_req_canceled,
462*4882a593Smuzhiyun };
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
465*4882a593Smuzhiyun .suspend = tpm_ibmvtpm_suspend,
466*4882a593Smuzhiyun .resume = tpm_ibmvtpm_resume,
467*4882a593Smuzhiyun };
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /**
470*4882a593Smuzhiyun * ibmvtpm_crq_get_next - Get next responded crq
471*4882a593Smuzhiyun *
472*4882a593Smuzhiyun * @ibmvtpm: vtpm device struct
473*4882a593Smuzhiyun *
474*4882a593Smuzhiyun * Return: vtpm crq pointer or NULL.
475*4882a593Smuzhiyun */
ibmvtpm_crq_get_next(struct ibmvtpm_dev * ibmvtpm)476*4882a593Smuzhiyun static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
479*4882a593Smuzhiyun struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun if (crq->valid & VTPM_MSG_RES) {
482*4882a593Smuzhiyun if (++crq_q->index == crq_q->num_entry)
483*4882a593Smuzhiyun crq_q->index = 0;
484*4882a593Smuzhiyun smp_rmb();
485*4882a593Smuzhiyun } else
486*4882a593Smuzhiyun crq = NULL;
487*4882a593Smuzhiyun return crq;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /**
491*4882a593Smuzhiyun * ibmvtpm_crq_process - Process responded crq
492*4882a593Smuzhiyun *
493*4882a593Smuzhiyun * @crq: crq to be processed
494*4882a593Smuzhiyun * @ibmvtpm: vtpm device struct
495*4882a593Smuzhiyun *
496*4882a593Smuzhiyun */
ibmvtpm_crq_process(struct ibmvtpm_crq * crq,struct ibmvtpm_dev * ibmvtpm)497*4882a593Smuzhiyun static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
498*4882a593Smuzhiyun struct ibmvtpm_dev *ibmvtpm)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun int rc = 0;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun switch (crq->valid) {
503*4882a593Smuzhiyun case VALID_INIT_CRQ:
504*4882a593Smuzhiyun switch (crq->msg) {
505*4882a593Smuzhiyun case INIT_CRQ_RES:
506*4882a593Smuzhiyun dev_info(ibmvtpm->dev, "CRQ initialized\n");
507*4882a593Smuzhiyun rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
508*4882a593Smuzhiyun if (rc)
509*4882a593Smuzhiyun dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
510*4882a593Smuzhiyun return;
511*4882a593Smuzhiyun case INIT_CRQ_COMP_RES:
512*4882a593Smuzhiyun dev_info(ibmvtpm->dev,
513*4882a593Smuzhiyun "CRQ initialization completed\n");
514*4882a593Smuzhiyun return;
515*4882a593Smuzhiyun default:
516*4882a593Smuzhiyun dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
517*4882a593Smuzhiyun return;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun case IBMVTPM_VALID_CMD:
520*4882a593Smuzhiyun switch (crq->msg) {
521*4882a593Smuzhiyun case VTPM_GET_RTCE_BUFFER_SIZE_RES:
522*4882a593Smuzhiyun if (be16_to_cpu(crq->len) <= 0) {
523*4882a593Smuzhiyun dev_err(ibmvtpm->dev, "Invalid rtce size\n");
524*4882a593Smuzhiyun return;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun ibmvtpm->rtce_size = be16_to_cpu(crq->len);
527*4882a593Smuzhiyun ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
528*4882a593Smuzhiyun GFP_ATOMIC);
529*4882a593Smuzhiyun if (!ibmvtpm->rtce_buf) {
530*4882a593Smuzhiyun dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
531*4882a593Smuzhiyun return;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
535*4882a593Smuzhiyun ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
536*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun if (dma_mapping_error(ibmvtpm->dev,
539*4882a593Smuzhiyun ibmvtpm->rtce_dma_handle)) {
540*4882a593Smuzhiyun kfree(ibmvtpm->rtce_buf);
541*4882a593Smuzhiyun ibmvtpm->rtce_buf = NULL;
542*4882a593Smuzhiyun dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun return;
546*4882a593Smuzhiyun case VTPM_GET_VERSION_RES:
547*4882a593Smuzhiyun ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
548*4882a593Smuzhiyun return;
549*4882a593Smuzhiyun case VTPM_TPM_COMMAND_RES:
550*4882a593Smuzhiyun /* len of the data in rtce buffer */
551*4882a593Smuzhiyun ibmvtpm->res_len = be16_to_cpu(crq->len);
552*4882a593Smuzhiyun ibmvtpm->tpm_processing_cmd = 0;
553*4882a593Smuzhiyun wake_up_interruptible(&ibmvtpm->wq);
554*4882a593Smuzhiyun return;
555*4882a593Smuzhiyun default:
556*4882a593Smuzhiyun return;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun return;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun /**
563*4882a593Smuzhiyun * ibmvtpm_interrupt - Interrupt handler
564*4882a593Smuzhiyun *
565*4882a593Smuzhiyun * @irq: irq number to handle
566*4882a593Smuzhiyun * @vtpm_instance: vtpm that received interrupt
567*4882a593Smuzhiyun *
568*4882a593Smuzhiyun * Returns:
569*4882a593Smuzhiyun * IRQ_HANDLED
570*4882a593Smuzhiyun **/
ibmvtpm_interrupt(int irq,void * vtpm_instance)571*4882a593Smuzhiyun static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
574*4882a593Smuzhiyun struct ibmvtpm_crq *crq;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun /* while loop is needed for initial setup (get version and
577*4882a593Smuzhiyun * get rtce_size). There should be only one tpm request at any
578*4882a593Smuzhiyun * given time.
579*4882a593Smuzhiyun */
580*4882a593Smuzhiyun while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
581*4882a593Smuzhiyun ibmvtpm_crq_process(crq, ibmvtpm);
582*4882a593Smuzhiyun wake_up_interruptible(&ibmvtpm->crq_queue.wq);
583*4882a593Smuzhiyun crq->valid = 0;
584*4882a593Smuzhiyun smp_wmb();
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun return IRQ_HANDLED;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun /**
591*4882a593Smuzhiyun * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
592*4882a593Smuzhiyun *
593*4882a593Smuzhiyun * @vio_dev: vio device struct
594*4882a593Smuzhiyun * @id: vio device id struct
595*4882a593Smuzhiyun *
596*4882a593Smuzhiyun * Return:
597*4882a593Smuzhiyun * 0 on success.
598*4882a593Smuzhiyun * Non-zero on failure.
599*4882a593Smuzhiyun */
tpm_ibmvtpm_probe(struct vio_dev * vio_dev,const struct vio_device_id * id)600*4882a593Smuzhiyun static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
601*4882a593Smuzhiyun const struct vio_device_id *id)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun struct ibmvtpm_dev *ibmvtpm;
604*4882a593Smuzhiyun struct device *dev = &vio_dev->dev;
605*4882a593Smuzhiyun struct ibmvtpm_crq_queue *crq_q;
606*4882a593Smuzhiyun struct tpm_chip *chip;
607*4882a593Smuzhiyun int rc = -ENOMEM, rc1;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm);
610*4882a593Smuzhiyun if (IS_ERR(chip))
611*4882a593Smuzhiyun return PTR_ERR(chip);
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
614*4882a593Smuzhiyun if (!ibmvtpm) {
615*4882a593Smuzhiyun dev_err(dev, "kzalloc for ibmvtpm failed\n");
616*4882a593Smuzhiyun goto cleanup;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun ibmvtpm->dev = dev;
620*4882a593Smuzhiyun ibmvtpm->vdev = vio_dev;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun crq_q = &ibmvtpm->crq_queue;
623*4882a593Smuzhiyun crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
624*4882a593Smuzhiyun if (!crq_q->crq_addr) {
625*4882a593Smuzhiyun dev_err(dev, "Unable to allocate memory for crq_addr\n");
626*4882a593Smuzhiyun goto cleanup;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
630*4882a593Smuzhiyun init_waitqueue_head(&crq_q->wq);
631*4882a593Smuzhiyun ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
632*4882a593Smuzhiyun CRQ_RES_BUF_SIZE,
633*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
636*4882a593Smuzhiyun dev_err(dev, "dma mapping failed\n");
637*4882a593Smuzhiyun goto cleanup;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
641*4882a593Smuzhiyun ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
642*4882a593Smuzhiyun if (rc == H_RESOURCE)
643*4882a593Smuzhiyun rc = ibmvtpm_reset_crq(ibmvtpm);
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun if (rc) {
646*4882a593Smuzhiyun dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
647*4882a593Smuzhiyun goto reg_crq_cleanup;
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
651*4882a593Smuzhiyun tpm_ibmvtpm_driver_name, ibmvtpm);
652*4882a593Smuzhiyun if (rc) {
653*4882a593Smuzhiyun dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
654*4882a593Smuzhiyun goto init_irq_cleanup;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun rc = vio_enable_interrupts(vio_dev);
658*4882a593Smuzhiyun if (rc) {
659*4882a593Smuzhiyun dev_err(dev, "Error %d enabling interrupts\n", rc);
660*4882a593Smuzhiyun goto init_irq_cleanup;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun init_waitqueue_head(&ibmvtpm->wq);
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun crq_q->index = 0;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun dev_set_drvdata(&chip->dev, ibmvtpm);
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun spin_lock_init(&ibmvtpm->rtce_lock);
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun rc = ibmvtpm_crq_send_init(ibmvtpm);
672*4882a593Smuzhiyun if (rc)
673*4882a593Smuzhiyun goto init_irq_cleanup;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun rc = ibmvtpm_crq_get_version(ibmvtpm);
676*4882a593Smuzhiyun if (rc)
677*4882a593Smuzhiyun goto init_irq_cleanup;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
680*4882a593Smuzhiyun if (rc)
681*4882a593Smuzhiyun goto init_irq_cleanup;
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
684*4882a593Smuzhiyun ibmvtpm->rtce_buf != NULL,
685*4882a593Smuzhiyun HZ)) {
686*4882a593Smuzhiyun rc = -ENODEV;
687*4882a593Smuzhiyun dev_err(dev, "CRQ response timed out\n");
688*4882a593Smuzhiyun goto init_irq_cleanup;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun if (!strcmp(id->compat, "IBM,vtpm20"))
693*4882a593Smuzhiyun chip->flags |= TPM_CHIP_FLAG_TPM2;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun rc = tpm_get_timeouts(chip);
696*4882a593Smuzhiyun if (rc)
697*4882a593Smuzhiyun goto init_irq_cleanup;
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun if (chip->flags & TPM_CHIP_FLAG_TPM2) {
700*4882a593Smuzhiyun rc = tpm2_get_cc_attrs_tbl(chip);
701*4882a593Smuzhiyun if (rc)
702*4882a593Smuzhiyun goto init_irq_cleanup;
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun return tpm_chip_register(chip);
706*4882a593Smuzhiyun init_irq_cleanup:
707*4882a593Smuzhiyun do {
708*4882a593Smuzhiyun rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
709*4882a593Smuzhiyun } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
710*4882a593Smuzhiyun reg_crq_cleanup:
711*4882a593Smuzhiyun dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
712*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
713*4882a593Smuzhiyun cleanup:
714*4882a593Smuzhiyun if (ibmvtpm) {
715*4882a593Smuzhiyun if (crq_q->crq_addr)
716*4882a593Smuzhiyun free_page((unsigned long)crq_q->crq_addr);
717*4882a593Smuzhiyun kfree(ibmvtpm);
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun return rc;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun static struct vio_driver ibmvtpm_driver = {
724*4882a593Smuzhiyun .id_table = tpm_ibmvtpm_device_table,
725*4882a593Smuzhiyun .probe = tpm_ibmvtpm_probe,
726*4882a593Smuzhiyun .remove = tpm_ibmvtpm_remove,
727*4882a593Smuzhiyun .get_desired_dma = tpm_ibmvtpm_get_desired_dma,
728*4882a593Smuzhiyun .name = tpm_ibmvtpm_driver_name,
729*4882a593Smuzhiyun .pm = &tpm_ibmvtpm_pm_ops,
730*4882a593Smuzhiyun };
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun /**
733*4882a593Smuzhiyun * ibmvtpm_module_init - Initialize ibm vtpm module.
734*4882a593Smuzhiyun *
735*4882a593Smuzhiyun *
736*4882a593Smuzhiyun * Return:
737*4882a593Smuzhiyun * 0 on success.
738*4882a593Smuzhiyun * Non-zero on failure.
739*4882a593Smuzhiyun */
ibmvtpm_module_init(void)740*4882a593Smuzhiyun static int __init ibmvtpm_module_init(void)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun return vio_register_driver(&ibmvtpm_driver);
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun /**
746*4882a593Smuzhiyun * ibmvtpm_module_exit - Tear down ibm vtpm module.
747*4882a593Smuzhiyun */
ibmvtpm_module_exit(void)748*4882a593Smuzhiyun static void __exit ibmvtpm_module_exit(void)
749*4882a593Smuzhiyun {
750*4882a593Smuzhiyun vio_unregister_driver(&ibmvtpm_driver);
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun module_init(ibmvtpm_module_init);
754*4882a593Smuzhiyun module_exit(ibmvtpm_module_exit);
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun MODULE_AUTHOR("adlai@us.ibm.com");
757*4882a593Smuzhiyun MODULE_DESCRIPTION("IBM vTPM Driver");
758*4882a593Smuzhiyun MODULE_VERSION("1.0");
759*4882a593Smuzhiyun MODULE_LICENSE("GPL");
760