xref: /OK3568_Linux_fs/kernel/drivers/char/tpm/xen-tpmfront.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Implementation of the Xen vTPM device frontend
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Author:  Daniel De Graaf <dgdegra@tycho.nsa.gov>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #include <linux/errno.h>
8*4882a593Smuzhiyun #include <linux/err.h>
9*4882a593Smuzhiyun #include <linux/interrupt.h>
10*4882a593Smuzhiyun #include <linux/freezer.h>
11*4882a593Smuzhiyun #include <xen/xen.h>
12*4882a593Smuzhiyun #include <xen/events.h>
13*4882a593Smuzhiyun #include <xen/interface/io/tpmif.h>
14*4882a593Smuzhiyun #include <xen/grant_table.h>
15*4882a593Smuzhiyun #include <xen/xenbus.h>
16*4882a593Smuzhiyun #include <xen/page.h>
17*4882a593Smuzhiyun #include "tpm.h"
18*4882a593Smuzhiyun #include <xen/platform_pci.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun struct tpm_private {
21*4882a593Smuzhiyun 	struct tpm_chip *chip;
22*4882a593Smuzhiyun 	struct xenbus_device *dev;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun 	struct vtpm_shared_page *shr;
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	unsigned int evtchn;
27*4882a593Smuzhiyun 	int ring_ref;
28*4882a593Smuzhiyun 	domid_t backend_id;
29*4882a593Smuzhiyun 	int irq;
30*4882a593Smuzhiyun 	wait_queue_head_t read_queue;
31*4882a593Smuzhiyun };
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun enum status_bits {
34*4882a593Smuzhiyun 	VTPM_STATUS_RUNNING  = 0x1,
35*4882a593Smuzhiyun 	VTPM_STATUS_IDLE     = 0x2,
36*4882a593Smuzhiyun 	VTPM_STATUS_RESULT   = 0x4,
37*4882a593Smuzhiyun 	VTPM_STATUS_CANCELED = 0x8,
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun 
wait_for_tpm_stat_cond(struct tpm_chip * chip,u8 mask,bool check_cancel,bool * canceled)40*4882a593Smuzhiyun static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
41*4882a593Smuzhiyun 					bool check_cancel, bool *canceled)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun 	u8 status = chip->ops->status(chip);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	*canceled = false;
46*4882a593Smuzhiyun 	if ((status & mask) == mask)
47*4882a593Smuzhiyun 		return true;
48*4882a593Smuzhiyun 	if (check_cancel && chip->ops->req_canceled(chip, status)) {
49*4882a593Smuzhiyun 		*canceled = true;
50*4882a593Smuzhiyun 		return true;
51*4882a593Smuzhiyun 	}
52*4882a593Smuzhiyun 	return false;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
wait_for_tpm_stat(struct tpm_chip * chip,u8 mask,unsigned long timeout,wait_queue_head_t * queue,bool check_cancel)55*4882a593Smuzhiyun static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
56*4882a593Smuzhiyun 		unsigned long timeout, wait_queue_head_t *queue,
57*4882a593Smuzhiyun 		bool check_cancel)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	unsigned long stop;
60*4882a593Smuzhiyun 	long rc;
61*4882a593Smuzhiyun 	u8 status;
62*4882a593Smuzhiyun 	bool canceled = false;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	/* check current status */
65*4882a593Smuzhiyun 	status = chip->ops->status(chip);
66*4882a593Smuzhiyun 	if ((status & mask) == mask)
67*4882a593Smuzhiyun 		return 0;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	stop = jiffies + timeout;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	if (chip->flags & TPM_CHIP_FLAG_IRQ) {
72*4882a593Smuzhiyun again:
73*4882a593Smuzhiyun 		timeout = stop - jiffies;
74*4882a593Smuzhiyun 		if ((long)timeout <= 0)
75*4882a593Smuzhiyun 			return -ETIME;
76*4882a593Smuzhiyun 		rc = wait_event_interruptible_timeout(*queue,
77*4882a593Smuzhiyun 			wait_for_tpm_stat_cond(chip, mask, check_cancel,
78*4882a593Smuzhiyun 					       &canceled),
79*4882a593Smuzhiyun 			timeout);
80*4882a593Smuzhiyun 		if (rc > 0) {
81*4882a593Smuzhiyun 			if (canceled)
82*4882a593Smuzhiyun 				return -ECANCELED;
83*4882a593Smuzhiyun 			return 0;
84*4882a593Smuzhiyun 		}
85*4882a593Smuzhiyun 		if (rc == -ERESTARTSYS && freezing(current)) {
86*4882a593Smuzhiyun 			clear_thread_flag(TIF_SIGPENDING);
87*4882a593Smuzhiyun 			goto again;
88*4882a593Smuzhiyun 		}
89*4882a593Smuzhiyun 	} else {
90*4882a593Smuzhiyun 		do {
91*4882a593Smuzhiyun 			tpm_msleep(TPM_TIMEOUT);
92*4882a593Smuzhiyun 			status = chip->ops->status(chip);
93*4882a593Smuzhiyun 			if ((status & mask) == mask)
94*4882a593Smuzhiyun 				return 0;
95*4882a593Smuzhiyun 		} while (time_before(jiffies, stop));
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun 	return -ETIME;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
vtpm_status(struct tpm_chip * chip)100*4882a593Smuzhiyun static u8 vtpm_status(struct tpm_chip *chip)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	struct tpm_private *priv = dev_get_drvdata(&chip->dev);
103*4882a593Smuzhiyun 	switch (priv->shr->state) {
104*4882a593Smuzhiyun 	case VTPM_STATE_IDLE:
105*4882a593Smuzhiyun 		return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED;
106*4882a593Smuzhiyun 	case VTPM_STATE_FINISH:
107*4882a593Smuzhiyun 		return VTPM_STATUS_IDLE | VTPM_STATUS_RESULT;
108*4882a593Smuzhiyun 	case VTPM_STATE_SUBMIT:
109*4882a593Smuzhiyun 	case VTPM_STATE_CANCEL: /* cancel requested, not yet canceled */
110*4882a593Smuzhiyun 		return VTPM_STATUS_RUNNING;
111*4882a593Smuzhiyun 	default:
112*4882a593Smuzhiyun 		return 0;
113*4882a593Smuzhiyun 	}
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
vtpm_req_canceled(struct tpm_chip * chip,u8 status)116*4882a593Smuzhiyun static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	return status & VTPM_STATUS_CANCELED;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
vtpm_cancel(struct tpm_chip * chip)121*4882a593Smuzhiyun static void vtpm_cancel(struct tpm_chip *chip)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	struct tpm_private *priv = dev_get_drvdata(&chip->dev);
124*4882a593Smuzhiyun 	priv->shr->state = VTPM_STATE_CANCEL;
125*4882a593Smuzhiyun 	wmb();
126*4882a593Smuzhiyun 	notify_remote_via_evtchn(priv->evtchn);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
shr_data_offset(struct vtpm_shared_page * shr)129*4882a593Smuzhiyun static unsigned int shr_data_offset(struct vtpm_shared_page *shr)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	return sizeof(*shr) + sizeof(u32) * shr->nr_extra_pages;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
vtpm_send(struct tpm_chip * chip,u8 * buf,size_t count)134*4882a593Smuzhiyun static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	struct tpm_private *priv = dev_get_drvdata(&chip->dev);
137*4882a593Smuzhiyun 	struct vtpm_shared_page *shr = priv->shr;
138*4882a593Smuzhiyun 	unsigned int offset = shr_data_offset(shr);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	u32 ordinal;
141*4882a593Smuzhiyun 	unsigned long duration;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	if (offset > PAGE_SIZE)
144*4882a593Smuzhiyun 		return -EINVAL;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	if (offset + count > PAGE_SIZE)
147*4882a593Smuzhiyun 		return -EINVAL;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	/* Wait for completion of any existing command or cancellation */
150*4882a593Smuzhiyun 	if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->timeout_c,
151*4882a593Smuzhiyun 			&priv->read_queue, true) < 0) {
152*4882a593Smuzhiyun 		vtpm_cancel(chip);
153*4882a593Smuzhiyun 		return -ETIME;
154*4882a593Smuzhiyun 	}
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	memcpy(offset + (u8 *)shr, buf, count);
157*4882a593Smuzhiyun 	shr->length = count;
158*4882a593Smuzhiyun 	barrier();
159*4882a593Smuzhiyun 	shr->state = VTPM_STATE_SUBMIT;
160*4882a593Smuzhiyun 	wmb();
161*4882a593Smuzhiyun 	notify_remote_via_evtchn(priv->evtchn);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	ordinal = be32_to_cpu(((struct tpm_header *)buf)->ordinal);
164*4882a593Smuzhiyun 	duration = tpm_calc_ordinal_duration(chip, ordinal);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration,
167*4882a593Smuzhiyun 			&priv->read_queue, true) < 0) {
168*4882a593Smuzhiyun 		/* got a signal or timeout, try to cancel */
169*4882a593Smuzhiyun 		vtpm_cancel(chip);
170*4882a593Smuzhiyun 		return -ETIME;
171*4882a593Smuzhiyun 	}
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	return 0;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
vtpm_recv(struct tpm_chip * chip,u8 * buf,size_t count)176*4882a593Smuzhiyun static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	struct tpm_private *priv = dev_get_drvdata(&chip->dev);
179*4882a593Smuzhiyun 	struct vtpm_shared_page *shr = priv->shr;
180*4882a593Smuzhiyun 	unsigned int offset = shr_data_offset(shr);
181*4882a593Smuzhiyun 	size_t length = shr->length;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	if (shr->state == VTPM_STATE_IDLE)
184*4882a593Smuzhiyun 		return -ECANCELED;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	/* In theory the wait at the end of _send makes this one unnecessary */
187*4882a593Smuzhiyun 	if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->timeout_c,
188*4882a593Smuzhiyun 			&priv->read_queue, true) < 0) {
189*4882a593Smuzhiyun 		vtpm_cancel(chip);
190*4882a593Smuzhiyun 		return -ETIME;
191*4882a593Smuzhiyun 	}
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	if (offset > PAGE_SIZE)
194*4882a593Smuzhiyun 		return -EIO;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	if (offset + length > PAGE_SIZE)
197*4882a593Smuzhiyun 		length = PAGE_SIZE - offset;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	if (length > count)
200*4882a593Smuzhiyun 		length = count;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	memcpy(buf, offset + (u8 *)shr, length);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	return length;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun static const struct tpm_class_ops tpm_vtpm = {
208*4882a593Smuzhiyun 	.status = vtpm_status,
209*4882a593Smuzhiyun 	.recv = vtpm_recv,
210*4882a593Smuzhiyun 	.send = vtpm_send,
211*4882a593Smuzhiyun 	.cancel = vtpm_cancel,
212*4882a593Smuzhiyun 	.req_complete_mask = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
213*4882a593Smuzhiyun 	.req_complete_val  = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
214*4882a593Smuzhiyun 	.req_canceled      = vtpm_req_canceled,
215*4882a593Smuzhiyun };
216*4882a593Smuzhiyun 
tpmif_interrupt(int dummy,void * dev_id)217*4882a593Smuzhiyun static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	struct tpm_private *priv = dev_id;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	switch (priv->shr->state) {
222*4882a593Smuzhiyun 	case VTPM_STATE_IDLE:
223*4882a593Smuzhiyun 	case VTPM_STATE_FINISH:
224*4882a593Smuzhiyun 		wake_up_interruptible(&priv->read_queue);
225*4882a593Smuzhiyun 		break;
226*4882a593Smuzhiyun 	case VTPM_STATE_SUBMIT:
227*4882a593Smuzhiyun 	case VTPM_STATE_CANCEL:
228*4882a593Smuzhiyun 	default:
229*4882a593Smuzhiyun 		break;
230*4882a593Smuzhiyun 	}
231*4882a593Smuzhiyun 	return IRQ_HANDLED;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
setup_chip(struct device * dev,struct tpm_private * priv)234*4882a593Smuzhiyun static int setup_chip(struct device *dev, struct tpm_private *priv)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	struct tpm_chip *chip;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	chip = tpmm_chip_alloc(dev, &tpm_vtpm);
239*4882a593Smuzhiyun 	if (IS_ERR(chip))
240*4882a593Smuzhiyun 		return PTR_ERR(chip);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	init_waitqueue_head(&priv->read_queue);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	priv->chip = chip;
245*4882a593Smuzhiyun 	dev_set_drvdata(&chip->dev, priv);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	return 0;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun /* caller must clean up in case of errors */
setup_ring(struct xenbus_device * dev,struct tpm_private * priv)251*4882a593Smuzhiyun static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	struct xenbus_transaction xbt;
254*4882a593Smuzhiyun 	const char *message = NULL;
255*4882a593Smuzhiyun 	int rv;
256*4882a593Smuzhiyun 	grant_ref_t gref;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
259*4882a593Smuzhiyun 	if (!priv->shr) {
260*4882a593Smuzhiyun 		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
261*4882a593Smuzhiyun 		return -ENOMEM;
262*4882a593Smuzhiyun 	}
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	rv = xenbus_grant_ring(dev, priv->shr, 1, &gref);
265*4882a593Smuzhiyun 	if (rv < 0)
266*4882a593Smuzhiyun 		return rv;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	priv->ring_ref = gref;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	rv = xenbus_alloc_evtchn(dev, &priv->evtchn);
271*4882a593Smuzhiyun 	if (rv)
272*4882a593Smuzhiyun 		return rv;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0,
275*4882a593Smuzhiyun 				       "tpmif", priv);
276*4882a593Smuzhiyun 	if (rv <= 0) {
277*4882a593Smuzhiyun 		xenbus_dev_fatal(dev, rv, "allocating TPM irq");
278*4882a593Smuzhiyun 		return rv;
279*4882a593Smuzhiyun 	}
280*4882a593Smuzhiyun 	priv->irq = rv;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun  again:
283*4882a593Smuzhiyun 	rv = xenbus_transaction_start(&xbt);
284*4882a593Smuzhiyun 	if (rv) {
285*4882a593Smuzhiyun 		xenbus_dev_fatal(dev, rv, "starting transaction");
286*4882a593Smuzhiyun 		return rv;
287*4882a593Smuzhiyun 	}
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	rv = xenbus_printf(xbt, dev->nodename,
290*4882a593Smuzhiyun 			"ring-ref", "%u", priv->ring_ref);
291*4882a593Smuzhiyun 	if (rv) {
292*4882a593Smuzhiyun 		message = "writing ring-ref";
293*4882a593Smuzhiyun 		goto abort_transaction;
294*4882a593Smuzhiyun 	}
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
297*4882a593Smuzhiyun 			priv->evtchn);
298*4882a593Smuzhiyun 	if (rv) {
299*4882a593Smuzhiyun 		message = "writing event-channel";
300*4882a593Smuzhiyun 		goto abort_transaction;
301*4882a593Smuzhiyun 	}
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1");
304*4882a593Smuzhiyun 	if (rv) {
305*4882a593Smuzhiyun 		message = "writing feature-protocol-v2";
306*4882a593Smuzhiyun 		goto abort_transaction;
307*4882a593Smuzhiyun 	}
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	rv = xenbus_transaction_end(xbt, 0);
310*4882a593Smuzhiyun 	if (rv == -EAGAIN)
311*4882a593Smuzhiyun 		goto again;
312*4882a593Smuzhiyun 	if (rv) {
313*4882a593Smuzhiyun 		xenbus_dev_fatal(dev, rv, "completing transaction");
314*4882a593Smuzhiyun 		return rv;
315*4882a593Smuzhiyun 	}
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	xenbus_switch_state(dev, XenbusStateInitialised);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	return 0;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun  abort_transaction:
322*4882a593Smuzhiyun 	xenbus_transaction_end(xbt, 1);
323*4882a593Smuzhiyun 	if (message)
324*4882a593Smuzhiyun 		xenbus_dev_error(dev, rv, "%s", message);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	return rv;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
ring_free(struct tpm_private * priv)329*4882a593Smuzhiyun static void ring_free(struct tpm_private *priv)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	if (!priv)
332*4882a593Smuzhiyun 		return;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	if (priv->ring_ref)
335*4882a593Smuzhiyun 		gnttab_end_foreign_access(priv->ring_ref, 0,
336*4882a593Smuzhiyun 				(unsigned long)priv->shr);
337*4882a593Smuzhiyun 	else
338*4882a593Smuzhiyun 		free_page((unsigned long)priv->shr);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	if (priv->irq)
341*4882a593Smuzhiyun 		unbind_from_irqhandler(priv->irq, priv);
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	kfree(priv);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun 
tpmfront_probe(struct xenbus_device * dev,const struct xenbus_device_id * id)346*4882a593Smuzhiyun static int tpmfront_probe(struct xenbus_device *dev,
347*4882a593Smuzhiyun 		const struct xenbus_device_id *id)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	struct tpm_private *priv;
350*4882a593Smuzhiyun 	int rv;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
353*4882a593Smuzhiyun 	if (!priv) {
354*4882a593Smuzhiyun 		xenbus_dev_fatal(dev, -ENOMEM, "allocating priv structure");
355*4882a593Smuzhiyun 		return -ENOMEM;
356*4882a593Smuzhiyun 	}
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	rv = setup_chip(&dev->dev, priv);
359*4882a593Smuzhiyun 	if (rv) {
360*4882a593Smuzhiyun 		kfree(priv);
361*4882a593Smuzhiyun 		return rv;
362*4882a593Smuzhiyun 	}
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	rv = setup_ring(dev, priv);
365*4882a593Smuzhiyun 	if (rv) {
366*4882a593Smuzhiyun 		ring_free(priv);
367*4882a593Smuzhiyun 		return rv;
368*4882a593Smuzhiyun 	}
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	tpm_get_timeouts(priv->chip);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	return tpm_chip_register(priv->chip);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
tpmfront_remove(struct xenbus_device * dev)375*4882a593Smuzhiyun static int tpmfront_remove(struct xenbus_device *dev)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
378*4882a593Smuzhiyun 	struct tpm_private *priv = dev_get_drvdata(&chip->dev);
379*4882a593Smuzhiyun 	tpm_chip_unregister(chip);
380*4882a593Smuzhiyun 	ring_free(priv);
381*4882a593Smuzhiyun 	dev_set_drvdata(&chip->dev, NULL);
382*4882a593Smuzhiyun 	return 0;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun 
tpmfront_resume(struct xenbus_device * dev)385*4882a593Smuzhiyun static int tpmfront_resume(struct xenbus_device *dev)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	/* A suspend/resume/migrate will interrupt a vTPM anyway */
388*4882a593Smuzhiyun 	tpmfront_remove(dev);
389*4882a593Smuzhiyun 	return tpmfront_probe(dev, NULL);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun 
backend_changed(struct xenbus_device * dev,enum xenbus_state backend_state)392*4882a593Smuzhiyun static void backend_changed(struct xenbus_device *dev,
393*4882a593Smuzhiyun 		enum xenbus_state backend_state)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun 	switch (backend_state) {
396*4882a593Smuzhiyun 	case XenbusStateInitialised:
397*4882a593Smuzhiyun 	case XenbusStateConnected:
398*4882a593Smuzhiyun 		if (dev->state == XenbusStateConnected)
399*4882a593Smuzhiyun 			break;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 		if (!xenbus_read_unsigned(dev->otherend, "feature-protocol-v2",
402*4882a593Smuzhiyun 					  0)) {
403*4882a593Smuzhiyun 			xenbus_dev_fatal(dev, -EINVAL,
404*4882a593Smuzhiyun 					"vTPM protocol 2 required");
405*4882a593Smuzhiyun 			return;
406*4882a593Smuzhiyun 		}
407*4882a593Smuzhiyun 		xenbus_switch_state(dev, XenbusStateConnected);
408*4882a593Smuzhiyun 		break;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	case XenbusStateClosing:
411*4882a593Smuzhiyun 	case XenbusStateClosed:
412*4882a593Smuzhiyun 		device_unregister(&dev->dev);
413*4882a593Smuzhiyun 		xenbus_frontend_closed(dev);
414*4882a593Smuzhiyun 		break;
415*4882a593Smuzhiyun 	default:
416*4882a593Smuzhiyun 		break;
417*4882a593Smuzhiyun 	}
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun static const struct xenbus_device_id tpmfront_ids[] = {
421*4882a593Smuzhiyun 	{ "vtpm" },
422*4882a593Smuzhiyun 	{ "" }
423*4882a593Smuzhiyun };
424*4882a593Smuzhiyun MODULE_ALIAS("xen:vtpm");
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun static struct xenbus_driver tpmfront_driver = {
427*4882a593Smuzhiyun 	.ids = tpmfront_ids,
428*4882a593Smuzhiyun 	.probe = tpmfront_probe,
429*4882a593Smuzhiyun 	.remove = tpmfront_remove,
430*4882a593Smuzhiyun 	.resume = tpmfront_resume,
431*4882a593Smuzhiyun 	.otherend_changed = backend_changed,
432*4882a593Smuzhiyun };
433*4882a593Smuzhiyun 
xen_tpmfront_init(void)434*4882a593Smuzhiyun static int __init xen_tpmfront_init(void)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	if (!xen_domain())
437*4882a593Smuzhiyun 		return -ENODEV;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	if (!xen_has_pv_devices())
440*4882a593Smuzhiyun 		return -ENODEV;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	return xenbus_register_frontend(&tpmfront_driver);
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun module_init(xen_tpmfront_init);
445*4882a593Smuzhiyun 
xen_tpmfront_exit(void)446*4882a593Smuzhiyun static void __exit xen_tpmfront_exit(void)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun 	xenbus_unregister_driver(&tpmfront_driver);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun module_exit(xen_tpmfront_exit);
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun MODULE_AUTHOR("Daniel De Graaf <dgdegra@tycho.nsa.gov>");
453*4882a593Smuzhiyun MODULE_DESCRIPTION("Xen vTPM Driver");
454*4882a593Smuzhiyun MODULE_LICENSE("GPL");
455