xref: /OK3568_Linux_fs/kernel/drivers/virt/nitro_enclaves/ne_pci_dev.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun /**
7*4882a593Smuzhiyun  * DOC: Nitro Enclaves (NE) PCI device driver.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/delay.h>
11*4882a593Smuzhiyun #include <linux/device.h>
12*4882a593Smuzhiyun #include <linux/list.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/mutex.h>
15*4882a593Smuzhiyun #include <linux/nitro_enclaves.h>
16*4882a593Smuzhiyun #include <linux/pci.h>
17*4882a593Smuzhiyun #include <linux/types.h>
18*4882a593Smuzhiyun #include <linux/wait.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include "ne_misc_dev.h"
21*4882a593Smuzhiyun #include "ne_pci_dev.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun /**
24*4882a593Smuzhiyun  * NE_DEFAULT_TIMEOUT_MSECS - Default timeout to wait for a reply from
25*4882a593Smuzhiyun  *			      the NE PCI device.
26*4882a593Smuzhiyun  */
27*4882a593Smuzhiyun #define NE_DEFAULT_TIMEOUT_MSECS	(120000) /* 120 sec */
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun static const struct pci_device_id ne_pci_ids[] = {
30*4882a593Smuzhiyun 	{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_NE) },
31*4882a593Smuzhiyun 	{ 0, }
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, ne_pci_ids);
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /**
37*4882a593Smuzhiyun  * ne_submit_request() - Submit command request to the PCI device based on the
38*4882a593Smuzhiyun  *			 command type.
39*4882a593Smuzhiyun  * @pdev:		PCI device to send the command to.
40*4882a593Smuzhiyun  * @cmd_type:		Command type of the request sent to the PCI device.
41*4882a593Smuzhiyun  * @cmd_request:	Command request payload.
42*4882a593Smuzhiyun  * @cmd_request_size:	Size of the command request payload.
43*4882a593Smuzhiyun  *
44*4882a593Smuzhiyun  * Context: Process context. This function is called with the ne_pci_dev mutex held.
45*4882a593Smuzhiyun  */
ne_submit_request(struct pci_dev * pdev,enum ne_pci_dev_cmd_type cmd_type,void * cmd_request,size_t cmd_request_size)46*4882a593Smuzhiyun static void ne_submit_request(struct pci_dev *pdev, enum ne_pci_dev_cmd_type cmd_type,
47*4882a593Smuzhiyun 			      void *cmd_request, size_t cmd_request_size)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	memcpy_toio(ne_pci_dev->iomem_base + NE_SEND_DATA, cmd_request, cmd_request_size);
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	iowrite32(cmd_type, ne_pci_dev->iomem_base + NE_COMMAND);
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /**
57*4882a593Smuzhiyun  * ne_retrieve_reply() - Retrieve reply from the PCI device.
58*4882a593Smuzhiyun  * @pdev:		PCI device to receive the reply from.
59*4882a593Smuzhiyun  * @cmd_reply:		Command reply payload.
60*4882a593Smuzhiyun  * @cmd_reply_size:	Size of the command reply payload.
61*4882a593Smuzhiyun  *
62*4882a593Smuzhiyun  * Context: Process context. This function is called with the ne_pci_dev mutex held.
63*4882a593Smuzhiyun  */
ne_retrieve_reply(struct pci_dev * pdev,struct ne_pci_dev_cmd_reply * cmd_reply,size_t cmd_reply_size)64*4882a593Smuzhiyun static void ne_retrieve_reply(struct pci_dev *pdev, struct ne_pci_dev_cmd_reply *cmd_reply,
65*4882a593Smuzhiyun 			      size_t cmd_reply_size)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	memcpy_fromio(cmd_reply, ne_pci_dev->iomem_base + NE_RECV_DATA, cmd_reply_size);
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /**
73*4882a593Smuzhiyun  * ne_wait_for_reply() - Wait for a reply of a PCI device command.
74*4882a593Smuzhiyun  * @pdev:	PCI device for which a reply is waited.
75*4882a593Smuzhiyun  *
76*4882a593Smuzhiyun  * Context: Process context. This function is called with the ne_pci_dev mutex held.
77*4882a593Smuzhiyun  * Return:
78*4882a593Smuzhiyun  * * 0 on success.
79*4882a593Smuzhiyun  * * Negative return value on failure.
80*4882a593Smuzhiyun  */
ne_wait_for_reply(struct pci_dev * pdev)81*4882a593Smuzhiyun static int ne_wait_for_reply(struct pci_dev *pdev)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
84*4882a593Smuzhiyun 	int rc = -EINVAL;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	/*
87*4882a593Smuzhiyun 	 * TODO: Update to _interruptible and handle interrupted wait event
88*4882a593Smuzhiyun 	 * e.g. -ERESTARTSYS, incoming signals + update timeout, if needed.
89*4882a593Smuzhiyun 	 */
90*4882a593Smuzhiyun 	rc = wait_event_timeout(ne_pci_dev->cmd_reply_wait_q,
91*4882a593Smuzhiyun 				atomic_read(&ne_pci_dev->cmd_reply_avail) != 0,
92*4882a593Smuzhiyun 				msecs_to_jiffies(NE_DEFAULT_TIMEOUT_MSECS));
93*4882a593Smuzhiyun 	if (!rc)
94*4882a593Smuzhiyun 		return -ETIMEDOUT;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	return 0;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
ne_do_request(struct pci_dev * pdev,enum ne_pci_dev_cmd_type cmd_type,void * cmd_request,size_t cmd_request_size,struct ne_pci_dev_cmd_reply * cmd_reply,size_t cmd_reply_size)99*4882a593Smuzhiyun int ne_do_request(struct pci_dev *pdev, enum ne_pci_dev_cmd_type cmd_type,
100*4882a593Smuzhiyun 		  void *cmd_request, size_t cmd_request_size,
101*4882a593Smuzhiyun 		  struct ne_pci_dev_cmd_reply *cmd_reply, size_t cmd_reply_size)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
104*4882a593Smuzhiyun 	int rc = -EINVAL;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	if (cmd_type <= INVALID_CMD || cmd_type >= MAX_CMD) {
107*4882a593Smuzhiyun 		dev_err_ratelimited(&pdev->dev, "Invalid cmd type=%u\n", cmd_type);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 		return -EINVAL;
110*4882a593Smuzhiyun 	}
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	if (!cmd_request) {
113*4882a593Smuzhiyun 		dev_err_ratelimited(&pdev->dev, "Null cmd request for cmd type=%u\n",
114*4882a593Smuzhiyun 				    cmd_type);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 		return -EINVAL;
117*4882a593Smuzhiyun 	}
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	if (cmd_request_size > NE_SEND_DATA_SIZE) {
120*4882a593Smuzhiyun 		dev_err_ratelimited(&pdev->dev, "Invalid req size=%zu for cmd type=%u\n",
121*4882a593Smuzhiyun 				    cmd_request_size, cmd_type);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 		return -EINVAL;
124*4882a593Smuzhiyun 	}
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	if (!cmd_reply) {
127*4882a593Smuzhiyun 		dev_err_ratelimited(&pdev->dev, "Null cmd reply for cmd type=%u\n",
128*4882a593Smuzhiyun 				    cmd_type);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 		return -EINVAL;
131*4882a593Smuzhiyun 	}
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	if (cmd_reply_size > NE_RECV_DATA_SIZE) {
134*4882a593Smuzhiyun 		dev_err_ratelimited(&pdev->dev, "Invalid reply size=%zu for cmd type=%u\n",
135*4882a593Smuzhiyun 				    cmd_reply_size, cmd_type);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 		return -EINVAL;
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	/*
141*4882a593Smuzhiyun 	 * Use this mutex so that the PCI device handles one command request at
142*4882a593Smuzhiyun 	 * a time.
143*4882a593Smuzhiyun 	 */
144*4882a593Smuzhiyun 	mutex_lock(&ne_pci_dev->pci_dev_mutex);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	atomic_set(&ne_pci_dev->cmd_reply_avail, 0);
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	ne_submit_request(pdev, cmd_type, cmd_request, cmd_request_size);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	rc = ne_wait_for_reply(pdev);
151*4882a593Smuzhiyun 	if (rc < 0) {
152*4882a593Smuzhiyun 		dev_err_ratelimited(&pdev->dev, "Error in wait for reply for cmd type=%u [rc=%d]\n",
153*4882a593Smuzhiyun 				    cmd_type, rc);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 		goto unlock_mutex;
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	ne_retrieve_reply(pdev, cmd_reply, cmd_reply_size);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	atomic_set(&ne_pci_dev->cmd_reply_avail, 0);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	if (cmd_reply->rc < 0) {
163*4882a593Smuzhiyun 		rc = cmd_reply->rc;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 		dev_err_ratelimited(&pdev->dev, "Error in cmd process logic, cmd type=%u [rc=%d]\n",
166*4882a593Smuzhiyun 				    cmd_type, rc);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 		goto unlock_mutex;
169*4882a593Smuzhiyun 	}
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	rc = 0;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun unlock_mutex:
174*4882a593Smuzhiyun 	mutex_unlock(&ne_pci_dev->pci_dev_mutex);
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	return rc;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun /**
180*4882a593Smuzhiyun  * ne_reply_handler() - Interrupt handler for retrieving a reply matching a
181*4882a593Smuzhiyun  *			request sent to the PCI device for enclave lifetime
182*4882a593Smuzhiyun  *			management.
183*4882a593Smuzhiyun  * @irq:	Received interrupt for a reply sent by the PCI device.
184*4882a593Smuzhiyun  * @args:	PCI device private data structure.
185*4882a593Smuzhiyun  *
186*4882a593Smuzhiyun  * Context: Interrupt context.
187*4882a593Smuzhiyun  * Return:
188*4882a593Smuzhiyun  * * IRQ_HANDLED on handled interrupt.
189*4882a593Smuzhiyun  */
ne_reply_handler(int irq,void * args)190*4882a593Smuzhiyun static irqreturn_t ne_reply_handler(int irq, void *args)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	struct ne_pci_dev *ne_pci_dev = (struct ne_pci_dev *)args;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	atomic_set(&ne_pci_dev->cmd_reply_avail, 1);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	/* TODO: Update to _interruptible. */
197*4882a593Smuzhiyun 	wake_up(&ne_pci_dev->cmd_reply_wait_q);
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	return IRQ_HANDLED;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun /**
203*4882a593Smuzhiyun  * ne_event_work_handler() - Work queue handler for notifying enclaves on a
204*4882a593Smuzhiyun  *			     state change received by the event interrupt
205*4882a593Smuzhiyun  *			     handler.
206*4882a593Smuzhiyun  * @work:	Item containing the NE PCI device for which an out-of-band event
207*4882a593Smuzhiyun  *		was issued.
208*4882a593Smuzhiyun  *
209*4882a593Smuzhiyun  * An out-of-band event is being issued by the Nitro Hypervisor when at least
210*4882a593Smuzhiyun  * one enclave is changing state without client interaction.
211*4882a593Smuzhiyun  *
212*4882a593Smuzhiyun  * Context: Work queue context.
213*4882a593Smuzhiyun  */
ne_event_work_handler(struct work_struct * work)214*4882a593Smuzhiyun static void ne_event_work_handler(struct work_struct *work)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	struct ne_pci_dev_cmd_reply cmd_reply = {};
217*4882a593Smuzhiyun 	struct ne_enclave *ne_enclave = NULL;
218*4882a593Smuzhiyun 	struct ne_pci_dev *ne_pci_dev =
219*4882a593Smuzhiyun 		container_of(work, struct ne_pci_dev, notify_work);
220*4882a593Smuzhiyun 	struct pci_dev *pdev = ne_pci_dev->pdev;
221*4882a593Smuzhiyun 	int rc = -EINVAL;
222*4882a593Smuzhiyun 	struct slot_info_req slot_info_req = {};
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	mutex_lock(&ne_pci_dev->enclaves_list_mutex);
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	/*
227*4882a593Smuzhiyun 	 * Iterate over all enclaves registered for the Nitro Enclaves
228*4882a593Smuzhiyun 	 * PCI device and determine for which enclave(s) the out-of-band event
229*4882a593Smuzhiyun 	 * is corresponding to.
230*4882a593Smuzhiyun 	 */
231*4882a593Smuzhiyun 	list_for_each_entry(ne_enclave, &ne_pci_dev->enclaves_list, enclave_list_entry) {
232*4882a593Smuzhiyun 		mutex_lock(&ne_enclave->enclave_info_mutex);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 		/*
235*4882a593Smuzhiyun 		 * Enclaves that were never started cannot receive out-of-band
236*4882a593Smuzhiyun 		 * events.
237*4882a593Smuzhiyun 		 */
238*4882a593Smuzhiyun 		if (ne_enclave->state != NE_STATE_RUNNING)
239*4882a593Smuzhiyun 			goto unlock;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 		slot_info_req.slot_uid = ne_enclave->slot_uid;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 		rc = ne_do_request(pdev, SLOT_INFO,
244*4882a593Smuzhiyun 				   &slot_info_req, sizeof(slot_info_req),
245*4882a593Smuzhiyun 				   &cmd_reply, sizeof(cmd_reply));
246*4882a593Smuzhiyun 		if (rc < 0)
247*4882a593Smuzhiyun 			dev_err(&pdev->dev, "Error in slot info [rc=%d]\n", rc);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 		/* Notify enclave process that the enclave state changed. */
250*4882a593Smuzhiyun 		if (ne_enclave->state != cmd_reply.state) {
251*4882a593Smuzhiyun 			ne_enclave->state = cmd_reply.state;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 			ne_enclave->has_event = true;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 			wake_up_interruptible(&ne_enclave->eventq);
256*4882a593Smuzhiyun 		}
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun unlock:
259*4882a593Smuzhiyun 		 mutex_unlock(&ne_enclave->enclave_info_mutex);
260*4882a593Smuzhiyun 	}
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun /**
266*4882a593Smuzhiyun  * ne_event_handler() - Interrupt handler for PCI device out-of-band events.
267*4882a593Smuzhiyun  *			This interrupt does not supply any data in the MMIO
268*4882a593Smuzhiyun  *			region. It notifies a change in the state of any of
269*4882a593Smuzhiyun  *			the launched enclaves.
270*4882a593Smuzhiyun  * @irq:	Received interrupt for an out-of-band event.
271*4882a593Smuzhiyun  * @args:	PCI device private data structure.
272*4882a593Smuzhiyun  *
273*4882a593Smuzhiyun  * Context: Interrupt context.
274*4882a593Smuzhiyun  * Return:
275*4882a593Smuzhiyun  * * IRQ_HANDLED on handled interrupt.
276*4882a593Smuzhiyun  */
ne_event_handler(int irq,void * args)277*4882a593Smuzhiyun static irqreturn_t ne_event_handler(int irq, void *args)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun 	struct ne_pci_dev *ne_pci_dev = (struct ne_pci_dev *)args;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	queue_work(ne_pci_dev->event_wq, &ne_pci_dev->notify_work);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	return IRQ_HANDLED;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun /**
287*4882a593Smuzhiyun  * ne_setup_msix() - Setup MSI-X vectors for the PCI device.
288*4882a593Smuzhiyun  * @pdev:	PCI device to setup the MSI-X for.
289*4882a593Smuzhiyun  *
290*4882a593Smuzhiyun  * Context: Process context.
291*4882a593Smuzhiyun  * Return:
292*4882a593Smuzhiyun  * * 0 on success.
293*4882a593Smuzhiyun  * * Negative return value on failure.
294*4882a593Smuzhiyun  */
ne_setup_msix(struct pci_dev * pdev)295*4882a593Smuzhiyun static int ne_setup_msix(struct pci_dev *pdev)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
298*4882a593Smuzhiyun 	int nr_vecs = 0;
299*4882a593Smuzhiyun 	int rc = -EINVAL;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	nr_vecs = pci_msix_vec_count(pdev);
302*4882a593Smuzhiyun 	if (nr_vecs < 0) {
303*4882a593Smuzhiyun 		rc = nr_vecs;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Error in getting vec count [rc=%d]\n", rc);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 		return rc;
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	rc = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
311*4882a593Smuzhiyun 	if (rc < 0) {
312*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Error in alloc MSI-X vecs [rc=%d]\n", rc);
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 		return rc;
315*4882a593Smuzhiyun 	}
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	/*
318*4882a593Smuzhiyun 	 * This IRQ gets triggered every time the PCI device responds to a
319*4882a593Smuzhiyun 	 * command request. The reply is then retrieved, reading from the MMIO
320*4882a593Smuzhiyun 	 * space of the PCI device.
321*4882a593Smuzhiyun 	 */
322*4882a593Smuzhiyun 	rc = request_irq(pci_irq_vector(pdev, NE_VEC_REPLY), ne_reply_handler,
323*4882a593Smuzhiyun 			 0, "enclave_cmd", ne_pci_dev);
324*4882a593Smuzhiyun 	if (rc < 0) {
325*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Error in request irq reply [rc=%d]\n", rc);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 		goto free_irq_vectors;
328*4882a593Smuzhiyun 	}
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	ne_pci_dev->event_wq = create_singlethread_workqueue("ne_pci_dev_wq");
331*4882a593Smuzhiyun 	if (!ne_pci_dev->event_wq) {
332*4882a593Smuzhiyun 		rc = -ENOMEM;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Cannot get wq for dev events [rc=%d]\n", rc);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 		goto free_reply_irq_vec;
337*4882a593Smuzhiyun 	}
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	INIT_WORK(&ne_pci_dev->notify_work, ne_event_work_handler);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	/*
342*4882a593Smuzhiyun 	 * This IRQ gets triggered every time any enclave's state changes. Its
343*4882a593Smuzhiyun 	 * handler then scans for the changes and propagates them to the user
344*4882a593Smuzhiyun 	 * space.
345*4882a593Smuzhiyun 	 */
346*4882a593Smuzhiyun 	rc = request_irq(pci_irq_vector(pdev, NE_VEC_EVENT), ne_event_handler,
347*4882a593Smuzhiyun 			 0, "enclave_evt", ne_pci_dev);
348*4882a593Smuzhiyun 	if (rc < 0) {
349*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Error in request irq event [rc=%d]\n", rc);
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 		goto destroy_wq;
352*4882a593Smuzhiyun 	}
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	return 0;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun destroy_wq:
357*4882a593Smuzhiyun 	destroy_workqueue(ne_pci_dev->event_wq);
358*4882a593Smuzhiyun free_reply_irq_vec:
359*4882a593Smuzhiyun 	free_irq(pci_irq_vector(pdev, NE_VEC_REPLY), ne_pci_dev);
360*4882a593Smuzhiyun free_irq_vectors:
361*4882a593Smuzhiyun 	pci_free_irq_vectors(pdev);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	return rc;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun /**
367*4882a593Smuzhiyun  * ne_teardown_msix() - Teardown MSI-X vectors for the PCI device.
368*4882a593Smuzhiyun  * @pdev:	PCI device to teardown the MSI-X for.
369*4882a593Smuzhiyun  *
370*4882a593Smuzhiyun  * Context: Process context.
371*4882a593Smuzhiyun  */
ne_teardown_msix(struct pci_dev * pdev)372*4882a593Smuzhiyun static void ne_teardown_msix(struct pci_dev *pdev)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun 	struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	free_irq(pci_irq_vector(pdev, NE_VEC_EVENT), ne_pci_dev);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	flush_work(&ne_pci_dev->notify_work);
379*4882a593Smuzhiyun 	flush_workqueue(ne_pci_dev->event_wq);
380*4882a593Smuzhiyun 	destroy_workqueue(ne_pci_dev->event_wq);
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	free_irq(pci_irq_vector(pdev, NE_VEC_REPLY), ne_pci_dev);
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	pci_free_irq_vectors(pdev);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun /**
388*4882a593Smuzhiyun  * ne_pci_dev_enable() - Select the PCI device version and enable it.
389*4882a593Smuzhiyun  * @pdev:	PCI device to select version for and then enable.
390*4882a593Smuzhiyun  *
391*4882a593Smuzhiyun  * Context: Process context.
392*4882a593Smuzhiyun  * Return:
393*4882a593Smuzhiyun  * * 0 on success.
394*4882a593Smuzhiyun  * * Negative return value on failure.
395*4882a593Smuzhiyun  */
ne_pci_dev_enable(struct pci_dev * pdev)396*4882a593Smuzhiyun static int ne_pci_dev_enable(struct pci_dev *pdev)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun 	u8 dev_enable_reply = 0;
399*4882a593Smuzhiyun 	u16 dev_version_reply = 0;
400*4882a593Smuzhiyun 	struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	iowrite16(NE_VERSION_MAX, ne_pci_dev->iomem_base + NE_VERSION);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	dev_version_reply = ioread16(ne_pci_dev->iomem_base + NE_VERSION);
405*4882a593Smuzhiyun 	if (dev_version_reply != NE_VERSION_MAX) {
406*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Error in pci dev version cmd\n");
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 		return -EIO;
409*4882a593Smuzhiyun 	}
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	iowrite8(NE_ENABLE_ON, ne_pci_dev->iomem_base + NE_ENABLE);
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	dev_enable_reply = ioread8(ne_pci_dev->iomem_base + NE_ENABLE);
414*4882a593Smuzhiyun 	if (dev_enable_reply != NE_ENABLE_ON) {
415*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Error in pci dev enable cmd\n");
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 		return -EIO;
418*4882a593Smuzhiyun 	}
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	return 0;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun /**
424*4882a593Smuzhiyun  * ne_pci_dev_disable() - Disable the PCI device.
425*4882a593Smuzhiyun  * @pdev:	PCI device to disable.
426*4882a593Smuzhiyun  *
427*4882a593Smuzhiyun  * Context: Process context.
428*4882a593Smuzhiyun  */
ne_pci_dev_disable(struct pci_dev * pdev)429*4882a593Smuzhiyun static void ne_pci_dev_disable(struct pci_dev *pdev)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun 	u8 dev_disable_reply = 0;
432*4882a593Smuzhiyun 	struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
433*4882a593Smuzhiyun 	const unsigned int sleep_time = 10; /* 10 ms */
434*4882a593Smuzhiyun 	unsigned int sleep_time_count = 0;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	iowrite8(NE_ENABLE_OFF, ne_pci_dev->iomem_base + NE_ENABLE);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	/*
439*4882a593Smuzhiyun 	 * Check for NE_ENABLE_OFF in a loop, to handle cases when the device
440*4882a593Smuzhiyun 	 * state is not immediately set to disabled and going through a
441*4882a593Smuzhiyun 	 * transitory state of disabling.
442*4882a593Smuzhiyun 	 */
443*4882a593Smuzhiyun 	while (sleep_time_count < NE_DEFAULT_TIMEOUT_MSECS) {
444*4882a593Smuzhiyun 		dev_disable_reply = ioread8(ne_pci_dev->iomem_base + NE_ENABLE);
445*4882a593Smuzhiyun 		if (dev_disable_reply == NE_ENABLE_OFF)
446*4882a593Smuzhiyun 			return;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 		msleep_interruptible(sleep_time);
449*4882a593Smuzhiyun 		sleep_time_count += sleep_time;
450*4882a593Smuzhiyun 	}
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	dev_disable_reply = ioread8(ne_pci_dev->iomem_base + NE_ENABLE);
453*4882a593Smuzhiyun 	if (dev_disable_reply != NE_ENABLE_OFF)
454*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Error in pci dev disable cmd\n");
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun /**
458*4882a593Smuzhiyun  * ne_pci_probe() - Probe function for the NE PCI device.
459*4882a593Smuzhiyun  * @pdev:	PCI device to match with the NE PCI driver.
460*4882a593Smuzhiyun  * @id :	PCI device id table associated with the NE PCI driver.
461*4882a593Smuzhiyun  *
462*4882a593Smuzhiyun  * Context: Process context.
463*4882a593Smuzhiyun  * Return:
464*4882a593Smuzhiyun  * * 0 on success.
465*4882a593Smuzhiyun  * * Negative return value on failure.
466*4882a593Smuzhiyun  */
ne_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)467*4882a593Smuzhiyun static int ne_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun 	struct ne_pci_dev *ne_pci_dev = NULL;
470*4882a593Smuzhiyun 	int rc = -EINVAL;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	ne_pci_dev = kzalloc(sizeof(*ne_pci_dev), GFP_KERNEL);
473*4882a593Smuzhiyun 	if (!ne_pci_dev)
474*4882a593Smuzhiyun 		return -ENOMEM;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	rc = pci_enable_device(pdev);
477*4882a593Smuzhiyun 	if (rc < 0) {
478*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Error in pci dev enable [rc=%d]\n", rc);
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 		goto free_ne_pci_dev;
481*4882a593Smuzhiyun 	}
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	rc = pci_request_regions_exclusive(pdev, "nitro_enclaves");
484*4882a593Smuzhiyun 	if (rc < 0) {
485*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Error in pci request regions [rc=%d]\n", rc);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 		goto disable_pci_dev;
488*4882a593Smuzhiyun 	}
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	ne_pci_dev->iomem_base = pci_iomap(pdev, PCI_BAR_NE, 0);
491*4882a593Smuzhiyun 	if (!ne_pci_dev->iomem_base) {
492*4882a593Smuzhiyun 		rc = -ENOMEM;
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Error in pci iomap [rc=%d]\n", rc);
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 		goto release_pci_regions;
497*4882a593Smuzhiyun 	}
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	pci_set_drvdata(pdev, ne_pci_dev);
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	rc = ne_setup_msix(pdev);
502*4882a593Smuzhiyun 	if (rc < 0) {
503*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Error in pci dev msix setup [rc=%d]\n", rc);
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 		goto iounmap_pci_bar;
506*4882a593Smuzhiyun 	}
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	ne_pci_dev_disable(pdev);
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	rc = ne_pci_dev_enable(pdev);
511*4882a593Smuzhiyun 	if (rc < 0) {
512*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Error in ne_pci_dev enable [rc=%d]\n", rc);
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 		goto teardown_msix;
515*4882a593Smuzhiyun 	}
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	atomic_set(&ne_pci_dev->cmd_reply_avail, 0);
518*4882a593Smuzhiyun 	init_waitqueue_head(&ne_pci_dev->cmd_reply_wait_q);
519*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ne_pci_dev->enclaves_list);
520*4882a593Smuzhiyun 	mutex_init(&ne_pci_dev->enclaves_list_mutex);
521*4882a593Smuzhiyun 	mutex_init(&ne_pci_dev->pci_dev_mutex);
522*4882a593Smuzhiyun 	ne_pci_dev->pdev = pdev;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	ne_devs.ne_pci_dev = ne_pci_dev;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	rc = misc_register(ne_devs.ne_misc_dev);
527*4882a593Smuzhiyun 	if (rc < 0) {
528*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Error in misc dev register [rc=%d]\n", rc);
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 		goto disable_ne_pci_dev;
531*4882a593Smuzhiyun 	}
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	return 0;
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun disable_ne_pci_dev:
536*4882a593Smuzhiyun 	ne_devs.ne_pci_dev = NULL;
537*4882a593Smuzhiyun 	ne_pci_dev_disable(pdev);
538*4882a593Smuzhiyun teardown_msix:
539*4882a593Smuzhiyun 	ne_teardown_msix(pdev);
540*4882a593Smuzhiyun iounmap_pci_bar:
541*4882a593Smuzhiyun 	pci_set_drvdata(pdev, NULL);
542*4882a593Smuzhiyun 	pci_iounmap(pdev, ne_pci_dev->iomem_base);
543*4882a593Smuzhiyun release_pci_regions:
544*4882a593Smuzhiyun 	pci_release_regions(pdev);
545*4882a593Smuzhiyun disable_pci_dev:
546*4882a593Smuzhiyun 	pci_disable_device(pdev);
547*4882a593Smuzhiyun free_ne_pci_dev:
548*4882a593Smuzhiyun 	kfree(ne_pci_dev);
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	return rc;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun /**
554*4882a593Smuzhiyun  * ne_pci_remove() - Remove function for the NE PCI device.
555*4882a593Smuzhiyun  * @pdev:	PCI device associated with the NE PCI driver.
556*4882a593Smuzhiyun  *
557*4882a593Smuzhiyun  * Context: Process context.
558*4882a593Smuzhiyun  */
ne_pci_remove(struct pci_dev * pdev)559*4882a593Smuzhiyun static void ne_pci_remove(struct pci_dev *pdev)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun 	struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	misc_deregister(ne_devs.ne_misc_dev);
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	ne_devs.ne_pci_dev = NULL;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	ne_pci_dev_disable(pdev);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	ne_teardown_msix(pdev);
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	pci_set_drvdata(pdev, NULL);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	pci_iounmap(pdev, ne_pci_dev->iomem_base);
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	pci_release_regions(pdev);
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	pci_disable_device(pdev);
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	kfree(ne_pci_dev);
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun /**
583*4882a593Smuzhiyun  * ne_pci_shutdown() - Shutdown function for the NE PCI device.
584*4882a593Smuzhiyun  * @pdev:	PCI device associated with the NE PCI driver.
585*4882a593Smuzhiyun  *
586*4882a593Smuzhiyun  * Context: Process context.
587*4882a593Smuzhiyun  */
ne_pci_shutdown(struct pci_dev * pdev)588*4882a593Smuzhiyun static void ne_pci_shutdown(struct pci_dev *pdev)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun 	struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	if (!ne_pci_dev)
593*4882a593Smuzhiyun 		return;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	misc_deregister(ne_devs.ne_misc_dev);
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	ne_devs.ne_pci_dev = NULL;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	ne_pci_dev_disable(pdev);
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	ne_teardown_msix(pdev);
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	pci_set_drvdata(pdev, NULL);
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	pci_iounmap(pdev, ne_pci_dev->iomem_base);
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	pci_release_regions(pdev);
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	pci_disable_device(pdev);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	kfree(ne_pci_dev);
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun /*
615*4882a593Smuzhiyun  * TODO: Add suspend / resume functions for power management w/ CONFIG_PM, if
616*4882a593Smuzhiyun  * needed.
617*4882a593Smuzhiyun  */
618*4882a593Smuzhiyun /* NE PCI device driver. */
619*4882a593Smuzhiyun struct pci_driver ne_pci_driver = {
620*4882a593Smuzhiyun 	.name		= "nitro_enclaves",
621*4882a593Smuzhiyun 	.id_table	= ne_pci_ids,
622*4882a593Smuzhiyun 	.probe		= ne_pci_probe,
623*4882a593Smuzhiyun 	.remove		= ne_pci_remove,
624*4882a593Smuzhiyun 	.shutdown	= ne_pci_shutdown,
625*4882a593Smuzhiyun };
626