xref: /OK3568_Linux_fs/kernel/drivers/xen/xen-front-pgdir-shbuf.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR MIT
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun /*
4*4882a593Smuzhiyun  * Xen frontend/backend page directory based shared buffer
5*4882a593Smuzhiyun  * helper module.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright (C) 2018 EPAM Systems Inc.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/errno.h>
14*4882a593Smuzhiyun #include <linux/mm.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <asm/xen/hypervisor.h>
17*4882a593Smuzhiyun #include <xen/balloon.h>
18*4882a593Smuzhiyun #include <xen/xen.h>
19*4882a593Smuzhiyun #include <xen/xenbus.h>
20*4882a593Smuzhiyun #include <xen/interface/io/ring.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include <xen/xen-front-pgdir-shbuf.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #ifndef GRANT_INVALID_REF
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun  * FIXME: usage of grant reference 0 as invalid grant reference:
27*4882a593Smuzhiyun  * grant reference 0 is valid, but never exposed to a PV driver,
28*4882a593Smuzhiyun  * because of the fact it is already in use/reserved by the PV console.
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun #define GRANT_INVALID_REF	0
31*4882a593Smuzhiyun #endif
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /**
34*4882a593Smuzhiyun  * This structure represents the structure of a shared page
35*4882a593Smuzhiyun  * that contains grant references to the pages of the shared
36*4882a593Smuzhiyun  * buffer. This structure is common to many Xen para-virtualized
37*4882a593Smuzhiyun  * protocols at include/xen/interface/io/
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun struct xen_page_directory {
40*4882a593Smuzhiyun 	grant_ref_t gref_dir_next_page;
41*4882a593Smuzhiyun 	grant_ref_t gref[1]; /* Variable length */
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /**
45*4882a593Smuzhiyun  * Shared buffer ops which are differently implemented
46*4882a593Smuzhiyun  * depending on the allocation mode, e.g. if the buffer
47*4882a593Smuzhiyun  * is allocated by the corresponding backend or frontend.
48*4882a593Smuzhiyun  * Some of the operations.
49*4882a593Smuzhiyun  */
50*4882a593Smuzhiyun struct xen_front_pgdir_shbuf_ops {
51*4882a593Smuzhiyun 	/*
52*4882a593Smuzhiyun 	 * Calculate number of grefs required to handle this buffer,
53*4882a593Smuzhiyun 	 * e.g. if grefs are required for page directory only or the buffer
54*4882a593Smuzhiyun 	 * pages as well.
55*4882a593Smuzhiyun 	 */
56*4882a593Smuzhiyun 	void (*calc_num_grefs)(struct xen_front_pgdir_shbuf *buf);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	/* Fill page directory according to para-virtual display protocol. */
59*4882a593Smuzhiyun 	void (*fill_page_dir)(struct xen_front_pgdir_shbuf *buf);
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	/* Claim grant references for the pages of the buffer. */
62*4882a593Smuzhiyun 	int (*grant_refs_for_buffer)(struct xen_front_pgdir_shbuf *buf,
63*4882a593Smuzhiyun 				     grant_ref_t *priv_gref_head, int gref_idx);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	/* Map grant references of the buffer. */
66*4882a593Smuzhiyun 	int (*map)(struct xen_front_pgdir_shbuf *buf);
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	/* Unmap grant references of the buffer. */
69*4882a593Smuzhiyun 	int (*unmap)(struct xen_front_pgdir_shbuf *buf);
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /**
73*4882a593Smuzhiyun  * Get granted reference to the very first page of the
74*4882a593Smuzhiyun  * page directory. Usually this is passed to the backend,
75*4882a593Smuzhiyun  * so it can find/fill the grant references to the buffer's
76*4882a593Smuzhiyun  * pages.
77*4882a593Smuzhiyun  *
78*4882a593Smuzhiyun  * \param buf shared buffer which page directory is of interest.
79*4882a593Smuzhiyun  * \return granted reference to the very first page of the
80*4882a593Smuzhiyun  * page directory.
81*4882a593Smuzhiyun  */
82*4882a593Smuzhiyun grant_ref_t
xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf * buf)83*4882a593Smuzhiyun xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	if (!buf->grefs)
86*4882a593Smuzhiyun 		return GRANT_INVALID_REF;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	return buf->grefs[0];
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_get_dir_start);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /**
93*4882a593Smuzhiyun  * Map granted references of the shared buffer.
94*4882a593Smuzhiyun  *
95*4882a593Smuzhiyun  * Depending on the shared buffer mode of allocation
96*4882a593Smuzhiyun  * (be_alloc flag) this can either do nothing (for buffers
97*4882a593Smuzhiyun  * shared by the frontend itself) or map the provided granted
98*4882a593Smuzhiyun  * references onto the backing storage (buf->pages).
99*4882a593Smuzhiyun  *
100*4882a593Smuzhiyun  * \param buf shared buffer which grants to be maped.
101*4882a593Smuzhiyun  * \return zero on success or a negative number on failure.
102*4882a593Smuzhiyun  */
xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf * buf)103*4882a593Smuzhiyun int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	if (buf->ops && buf->ops->map)
106*4882a593Smuzhiyun 		return buf->ops->map(buf);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	/* No need to map own grant references. */
109*4882a593Smuzhiyun 	return 0;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_map);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun /**
114*4882a593Smuzhiyun  * Unmap granted references of the shared buffer.
115*4882a593Smuzhiyun  *
116*4882a593Smuzhiyun  * Depending on the shared buffer mode of allocation
117*4882a593Smuzhiyun  * (be_alloc flag) this can either do nothing (for buffers
118*4882a593Smuzhiyun  * shared by the frontend itself) or unmap the provided granted
119*4882a593Smuzhiyun  * references.
120*4882a593Smuzhiyun  *
121*4882a593Smuzhiyun  * \param buf shared buffer which grants to be unmaped.
122*4882a593Smuzhiyun  * \return zero on success or a negative number on failure.
123*4882a593Smuzhiyun  */
xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf * buf)124*4882a593Smuzhiyun int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	if (buf->ops && buf->ops->unmap)
127*4882a593Smuzhiyun 		return buf->ops->unmap(buf);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	/* No need to unmap own grant references. */
130*4882a593Smuzhiyun 	return 0;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_unmap);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun /**
135*4882a593Smuzhiyun  * Free all the resources of the shared buffer.
136*4882a593Smuzhiyun  *
137*4882a593Smuzhiyun  * \param buf shared buffer which resources to be freed.
138*4882a593Smuzhiyun  */
xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf * buf)139*4882a593Smuzhiyun void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	if (buf->grefs) {
142*4882a593Smuzhiyun 		int i;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 		for (i = 0; i < buf->num_grefs; i++)
145*4882a593Smuzhiyun 			if (buf->grefs[i] != GRANT_INVALID_REF)
146*4882a593Smuzhiyun 				gnttab_end_foreign_access(buf->grefs[i],
147*4882a593Smuzhiyun 							  0, 0UL);
148*4882a593Smuzhiyun 	}
149*4882a593Smuzhiyun 	kfree(buf->grefs);
150*4882a593Smuzhiyun 	kfree(buf->directory);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_free);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun /*
155*4882a593Smuzhiyun  * Number of grefs a page can hold with respect to the
156*4882a593Smuzhiyun  * struct xen_page_directory header.
157*4882a593Smuzhiyun  */
158*4882a593Smuzhiyun #define XEN_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
159*4882a593Smuzhiyun 				 offsetof(struct xen_page_directory, \
160*4882a593Smuzhiyun 					  gref)) / sizeof(grant_ref_t))
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun /**
163*4882a593Smuzhiyun  * Get the number of pages the page directory consumes itself.
164*4882a593Smuzhiyun  *
165*4882a593Smuzhiyun  * \param buf shared buffer.
166*4882a593Smuzhiyun  */
get_num_pages_dir(struct xen_front_pgdir_shbuf * buf)167*4882a593Smuzhiyun static int get_num_pages_dir(struct xen_front_pgdir_shbuf *buf)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun /**
173*4882a593Smuzhiyun  * Calculate the number of grant references needed to share the buffer
174*4882a593Smuzhiyun  * and its pages when backend allocates the buffer.
175*4882a593Smuzhiyun  *
176*4882a593Smuzhiyun  * \param buf shared buffer.
177*4882a593Smuzhiyun  */
backend_calc_num_grefs(struct xen_front_pgdir_shbuf * buf)178*4882a593Smuzhiyun static void backend_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun 	/* Only for pages the page directory consumes itself. */
181*4882a593Smuzhiyun 	buf->num_grefs = get_num_pages_dir(buf);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun /**
185*4882a593Smuzhiyun  * Calculate the number of grant references needed to share the buffer
186*4882a593Smuzhiyun  * and its pages when frontend allocates the buffer.
187*4882a593Smuzhiyun  *
188*4882a593Smuzhiyun  * \param buf shared buffer.
189*4882a593Smuzhiyun  */
guest_calc_num_grefs(struct xen_front_pgdir_shbuf * buf)190*4882a593Smuzhiyun static void guest_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	/*
193*4882a593Smuzhiyun 	 * Number of pages the page directory consumes itself
194*4882a593Smuzhiyun 	 * plus grefs for the buffer pages.
195*4882a593Smuzhiyun 	 */
196*4882a593Smuzhiyun 	buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun #define xen_page_to_vaddr(page) \
200*4882a593Smuzhiyun 	((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun /**
203*4882a593Smuzhiyun  * Unmap the buffer previously mapped with grant references
204*4882a593Smuzhiyun  * provided by the backend.
205*4882a593Smuzhiyun  *
206*4882a593Smuzhiyun  * \param buf shared buffer.
207*4882a593Smuzhiyun  * \return zero on success or a negative number on failure.
208*4882a593Smuzhiyun  */
backend_unmap(struct xen_front_pgdir_shbuf * buf)209*4882a593Smuzhiyun static int backend_unmap(struct xen_front_pgdir_shbuf *buf)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	struct gnttab_unmap_grant_ref *unmap_ops;
212*4882a593Smuzhiyun 	int i, ret;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
215*4882a593Smuzhiyun 		return 0;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
218*4882a593Smuzhiyun 			    GFP_KERNEL);
219*4882a593Smuzhiyun 	if (!unmap_ops)
220*4882a593Smuzhiyun 		return -ENOMEM;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	for (i = 0; i < buf->num_pages; i++) {
223*4882a593Smuzhiyun 		phys_addr_t addr;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 		addr = xen_page_to_vaddr(buf->pages[i]);
226*4882a593Smuzhiyun 		gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
227*4882a593Smuzhiyun 				    buf->backend_map_handles[i]);
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
231*4882a593Smuzhiyun 				buf->num_pages);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	for (i = 0; i < buf->num_pages; i++) {
234*4882a593Smuzhiyun 		if (unlikely(unmap_ops[i].status != GNTST_okay))
235*4882a593Smuzhiyun 			dev_err(&buf->xb_dev->dev,
236*4882a593Smuzhiyun 				"Failed to unmap page %d: %d\n",
237*4882a593Smuzhiyun 				i, unmap_ops[i].status);
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	if (ret)
241*4882a593Smuzhiyun 		dev_err(&buf->xb_dev->dev,
242*4882a593Smuzhiyun 			"Failed to unmap grant references, ret %d", ret);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	kfree(unmap_ops);
245*4882a593Smuzhiyun 	kfree(buf->backend_map_handles);
246*4882a593Smuzhiyun 	buf->backend_map_handles = NULL;
247*4882a593Smuzhiyun 	return ret;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun /**
251*4882a593Smuzhiyun  * Map the buffer with grant references provided by the backend.
252*4882a593Smuzhiyun  *
253*4882a593Smuzhiyun  * \param buf shared buffer.
254*4882a593Smuzhiyun  * \return zero on success or a negative number on failure.
255*4882a593Smuzhiyun  */
backend_map(struct xen_front_pgdir_shbuf * buf)256*4882a593Smuzhiyun static int backend_map(struct xen_front_pgdir_shbuf *buf)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	struct gnttab_map_grant_ref *map_ops = NULL;
259*4882a593Smuzhiyun 	unsigned char *ptr;
260*4882a593Smuzhiyun 	int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
263*4882a593Smuzhiyun 	if (!map_ops)
264*4882a593Smuzhiyun 		return -ENOMEM;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	buf->backend_map_handles = kcalloc(buf->num_pages,
267*4882a593Smuzhiyun 					   sizeof(*buf->backend_map_handles),
268*4882a593Smuzhiyun 					   GFP_KERNEL);
269*4882a593Smuzhiyun 	if (!buf->backend_map_handles) {
270*4882a593Smuzhiyun 		kfree(map_ops);
271*4882a593Smuzhiyun 		return -ENOMEM;
272*4882a593Smuzhiyun 	}
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	/*
275*4882a593Smuzhiyun 	 * Read page directory to get grefs from the backend: for external
276*4882a593Smuzhiyun 	 * buffer we only allocate buf->grefs for the page directory,
277*4882a593Smuzhiyun 	 * so buf->num_grefs has number of pages in the page directory itself.
278*4882a593Smuzhiyun 	 */
279*4882a593Smuzhiyun 	ptr = buf->directory;
280*4882a593Smuzhiyun 	grefs_left = buf->num_pages;
281*4882a593Smuzhiyun 	cur_page = 0;
282*4882a593Smuzhiyun 	for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
283*4882a593Smuzhiyun 		struct xen_page_directory *page_dir =
284*4882a593Smuzhiyun 			(struct xen_page_directory *)ptr;
285*4882a593Smuzhiyun 		int to_copy = XEN_NUM_GREFS_PER_PAGE;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 		if (to_copy > grefs_left)
288*4882a593Smuzhiyun 			to_copy = grefs_left;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 		for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
291*4882a593Smuzhiyun 			phys_addr_t addr;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 			addr = xen_page_to_vaddr(buf->pages[cur_page]);
294*4882a593Smuzhiyun 			gnttab_set_map_op(&map_ops[cur_page], addr,
295*4882a593Smuzhiyun 					  GNTMAP_host_map,
296*4882a593Smuzhiyun 					  page_dir->gref[cur_gref],
297*4882a593Smuzhiyun 					  buf->xb_dev->otherend_id);
298*4882a593Smuzhiyun 			cur_page++;
299*4882a593Smuzhiyun 		}
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 		grefs_left -= to_copy;
302*4882a593Smuzhiyun 		ptr += PAGE_SIZE;
303*4882a593Smuzhiyun 	}
304*4882a593Smuzhiyun 	ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	/* Save handles even if error, so we can unmap. */
307*4882a593Smuzhiyun 	for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
308*4882a593Smuzhiyun 		buf->backend_map_handles[cur_page] = map_ops[cur_page].handle;
309*4882a593Smuzhiyun 		if (unlikely(map_ops[cur_page].status != GNTST_okay))
310*4882a593Smuzhiyun 			dev_err(&buf->xb_dev->dev,
311*4882a593Smuzhiyun 				"Failed to map page %d: %d\n",
312*4882a593Smuzhiyun 				cur_page, map_ops[cur_page].status);
313*4882a593Smuzhiyun 	}
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	if (ret) {
316*4882a593Smuzhiyun 		dev_err(&buf->xb_dev->dev,
317*4882a593Smuzhiyun 			"Failed to map grant references, ret %d", ret);
318*4882a593Smuzhiyun 		backend_unmap(buf);
319*4882a593Smuzhiyun 	}
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	kfree(map_ops);
322*4882a593Smuzhiyun 	return ret;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun /**
326*4882a593Smuzhiyun  * Fill page directory with grant references to the pages of the
327*4882a593Smuzhiyun  * page directory itself.
328*4882a593Smuzhiyun  *
329*4882a593Smuzhiyun  * The grant references to the buffer pages are provided by the
330*4882a593Smuzhiyun  * backend in this case.
331*4882a593Smuzhiyun  *
332*4882a593Smuzhiyun  * \param buf shared buffer.
333*4882a593Smuzhiyun  */
backend_fill_page_dir(struct xen_front_pgdir_shbuf * buf)334*4882a593Smuzhiyun static void backend_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	struct xen_page_directory *page_dir;
337*4882a593Smuzhiyun 	unsigned char *ptr;
338*4882a593Smuzhiyun 	int i, num_pages_dir;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	ptr = buf->directory;
341*4882a593Smuzhiyun 	num_pages_dir = get_num_pages_dir(buf);
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	/* Fill only grefs for the page directory itself. */
344*4882a593Smuzhiyun 	for (i = 0; i < num_pages_dir - 1; i++) {
345*4882a593Smuzhiyun 		page_dir = (struct xen_page_directory *)ptr;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 		page_dir->gref_dir_next_page = buf->grefs[i + 1];
348*4882a593Smuzhiyun 		ptr += PAGE_SIZE;
349*4882a593Smuzhiyun 	}
350*4882a593Smuzhiyun 	/* Last page must say there is no more pages. */
351*4882a593Smuzhiyun 	page_dir = (struct xen_page_directory *)ptr;
352*4882a593Smuzhiyun 	page_dir->gref_dir_next_page = GRANT_INVALID_REF;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun /**
356*4882a593Smuzhiyun  * Fill page directory with grant references to the pages of the
357*4882a593Smuzhiyun  * page directory and the buffer we share with the backend.
358*4882a593Smuzhiyun  *
359*4882a593Smuzhiyun  * \param buf shared buffer.
360*4882a593Smuzhiyun  */
guest_fill_page_dir(struct xen_front_pgdir_shbuf * buf)361*4882a593Smuzhiyun static void guest_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun 	unsigned char *ptr;
364*4882a593Smuzhiyun 	int cur_gref, grefs_left, to_copy, i, num_pages_dir;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	ptr = buf->directory;
367*4882a593Smuzhiyun 	num_pages_dir = get_num_pages_dir(buf);
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	/*
370*4882a593Smuzhiyun 	 * While copying, skip grefs at start, they are for pages
371*4882a593Smuzhiyun 	 * granted for the page directory itself.
372*4882a593Smuzhiyun 	 */
373*4882a593Smuzhiyun 	cur_gref = num_pages_dir;
374*4882a593Smuzhiyun 	grefs_left = buf->num_pages;
375*4882a593Smuzhiyun 	for (i = 0; i < num_pages_dir; i++) {
376*4882a593Smuzhiyun 		struct xen_page_directory *page_dir =
377*4882a593Smuzhiyun 			(struct xen_page_directory *)ptr;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 		if (grefs_left <= XEN_NUM_GREFS_PER_PAGE) {
380*4882a593Smuzhiyun 			to_copy = grefs_left;
381*4882a593Smuzhiyun 			page_dir->gref_dir_next_page = GRANT_INVALID_REF;
382*4882a593Smuzhiyun 		} else {
383*4882a593Smuzhiyun 			to_copy = XEN_NUM_GREFS_PER_PAGE;
384*4882a593Smuzhiyun 			page_dir->gref_dir_next_page = buf->grefs[i + 1];
385*4882a593Smuzhiyun 		}
386*4882a593Smuzhiyun 		memcpy(&page_dir->gref, &buf->grefs[cur_gref],
387*4882a593Smuzhiyun 		       to_copy * sizeof(grant_ref_t));
388*4882a593Smuzhiyun 		ptr += PAGE_SIZE;
389*4882a593Smuzhiyun 		grefs_left -= to_copy;
390*4882a593Smuzhiyun 		cur_gref += to_copy;
391*4882a593Smuzhiyun 	}
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun /**
395*4882a593Smuzhiyun  * Grant references to the frontend's buffer pages.
396*4882a593Smuzhiyun  *
397*4882a593Smuzhiyun  * These will be shared with the backend, so it can
398*4882a593Smuzhiyun  * access the buffer's data.
399*4882a593Smuzhiyun  *
400*4882a593Smuzhiyun  * \param buf shared buffer.
401*4882a593Smuzhiyun  * \return zero on success or a negative number on failure.
402*4882a593Smuzhiyun  */
guest_grant_refs_for_buffer(struct xen_front_pgdir_shbuf * buf,grant_ref_t * priv_gref_head,int gref_idx)403*4882a593Smuzhiyun static int guest_grant_refs_for_buffer(struct xen_front_pgdir_shbuf *buf,
404*4882a593Smuzhiyun 				       grant_ref_t *priv_gref_head,
405*4882a593Smuzhiyun 				       int gref_idx)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun 	int i, cur_ref, otherend_id;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	otherend_id = buf->xb_dev->otherend_id;
410*4882a593Smuzhiyun 	for (i = 0; i < buf->num_pages; i++) {
411*4882a593Smuzhiyun 		cur_ref = gnttab_claim_grant_reference(priv_gref_head);
412*4882a593Smuzhiyun 		if (cur_ref < 0)
413*4882a593Smuzhiyun 			return cur_ref;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 		gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
416*4882a593Smuzhiyun 						xen_page_to_gfn(buf->pages[i]),
417*4882a593Smuzhiyun 						0);
418*4882a593Smuzhiyun 		buf->grefs[gref_idx++] = cur_ref;
419*4882a593Smuzhiyun 	}
420*4882a593Smuzhiyun 	return 0;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun /**
424*4882a593Smuzhiyun  * Grant all the references needed to share the buffer.
425*4882a593Smuzhiyun  *
426*4882a593Smuzhiyun  * Grant references to the page directory pages and, if
427*4882a593Smuzhiyun  * needed, also to the pages of the shared buffer data.
428*4882a593Smuzhiyun  *
429*4882a593Smuzhiyun  * \param buf shared buffer.
430*4882a593Smuzhiyun  * \return zero on success or a negative number on failure.
431*4882a593Smuzhiyun  */
grant_references(struct xen_front_pgdir_shbuf * buf)432*4882a593Smuzhiyun static int grant_references(struct xen_front_pgdir_shbuf *buf)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun 	grant_ref_t priv_gref_head;
435*4882a593Smuzhiyun 	int ret, i, j, cur_ref;
436*4882a593Smuzhiyun 	int otherend_id, num_pages_dir;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
439*4882a593Smuzhiyun 	if (ret < 0) {
440*4882a593Smuzhiyun 		dev_err(&buf->xb_dev->dev,
441*4882a593Smuzhiyun 			"Cannot allocate grant references\n");
442*4882a593Smuzhiyun 		return ret;
443*4882a593Smuzhiyun 	}
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	otherend_id = buf->xb_dev->otherend_id;
446*4882a593Smuzhiyun 	j = 0;
447*4882a593Smuzhiyun 	num_pages_dir = get_num_pages_dir(buf);
448*4882a593Smuzhiyun 	for (i = 0; i < num_pages_dir; i++) {
449*4882a593Smuzhiyun 		unsigned long frame;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 		cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
452*4882a593Smuzhiyun 		if (cur_ref < 0)
453*4882a593Smuzhiyun 			return cur_ref;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 		frame = xen_page_to_gfn(virt_to_page(buf->directory +
456*4882a593Smuzhiyun 						     PAGE_SIZE * i));
457*4882a593Smuzhiyun 		gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
458*4882a593Smuzhiyun 		buf->grefs[j++] = cur_ref;
459*4882a593Smuzhiyun 	}
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	if (buf->ops->grant_refs_for_buffer) {
462*4882a593Smuzhiyun 		ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
463*4882a593Smuzhiyun 		if (ret)
464*4882a593Smuzhiyun 			return ret;
465*4882a593Smuzhiyun 	}
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	gnttab_free_grant_references(priv_gref_head);
468*4882a593Smuzhiyun 	return 0;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun /**
472*4882a593Smuzhiyun  * Allocate all required structures to mange shared buffer.
473*4882a593Smuzhiyun  *
474*4882a593Smuzhiyun  * \param buf shared buffer.
475*4882a593Smuzhiyun  * \return zero on success or a negative number on failure.
476*4882a593Smuzhiyun  */
alloc_storage(struct xen_front_pgdir_shbuf * buf)477*4882a593Smuzhiyun static int alloc_storage(struct xen_front_pgdir_shbuf *buf)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun 	buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
480*4882a593Smuzhiyun 	if (!buf->grefs)
481*4882a593Smuzhiyun 		return -ENOMEM;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
484*4882a593Smuzhiyun 	if (!buf->directory)
485*4882a593Smuzhiyun 		return -ENOMEM;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	return 0;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun /*
491*4882a593Smuzhiyun  * For backend allocated buffers we don't need grant_refs_for_buffer
492*4882a593Smuzhiyun  * as those grant references are allocated at backend side.
493*4882a593Smuzhiyun  */
494*4882a593Smuzhiyun static const struct xen_front_pgdir_shbuf_ops backend_ops = {
495*4882a593Smuzhiyun 	.calc_num_grefs = backend_calc_num_grefs,
496*4882a593Smuzhiyun 	.fill_page_dir = backend_fill_page_dir,
497*4882a593Smuzhiyun 	.map = backend_map,
498*4882a593Smuzhiyun 	.unmap = backend_unmap
499*4882a593Smuzhiyun };
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun /*
502*4882a593Smuzhiyun  * For locally granted references we do not need to map/unmap
503*4882a593Smuzhiyun  * the references.
504*4882a593Smuzhiyun  */
505*4882a593Smuzhiyun static const struct xen_front_pgdir_shbuf_ops local_ops = {
506*4882a593Smuzhiyun 	.calc_num_grefs = guest_calc_num_grefs,
507*4882a593Smuzhiyun 	.fill_page_dir = guest_fill_page_dir,
508*4882a593Smuzhiyun 	.grant_refs_for_buffer = guest_grant_refs_for_buffer,
509*4882a593Smuzhiyun };
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun /**
512*4882a593Smuzhiyun  * Allocate a new instance of a shared buffer.
513*4882a593Smuzhiyun  *
514*4882a593Smuzhiyun  * \param cfg configuration to be used while allocating a new shared buffer.
515*4882a593Smuzhiyun  * \return zero on success or a negative number on failure.
516*4882a593Smuzhiyun  */
xen_front_pgdir_shbuf_alloc(struct xen_front_pgdir_shbuf_cfg * cfg)517*4882a593Smuzhiyun int xen_front_pgdir_shbuf_alloc(struct xen_front_pgdir_shbuf_cfg *cfg)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun 	struct xen_front_pgdir_shbuf *buf = cfg->pgdir;
520*4882a593Smuzhiyun 	int ret;
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	if (cfg->be_alloc)
523*4882a593Smuzhiyun 		buf->ops = &backend_ops;
524*4882a593Smuzhiyun 	else
525*4882a593Smuzhiyun 		buf->ops = &local_ops;
526*4882a593Smuzhiyun 	buf->xb_dev = cfg->xb_dev;
527*4882a593Smuzhiyun 	buf->num_pages = cfg->num_pages;
528*4882a593Smuzhiyun 	buf->pages = cfg->pages;
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	buf->ops->calc_num_grefs(buf);
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	ret = alloc_storage(buf);
533*4882a593Smuzhiyun 	if (ret)
534*4882a593Smuzhiyun 		goto fail;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	ret = grant_references(buf);
537*4882a593Smuzhiyun 	if (ret)
538*4882a593Smuzhiyun 		goto fail;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	buf->ops->fill_page_dir(buf);
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	return 0;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun fail:
545*4882a593Smuzhiyun 	xen_front_pgdir_shbuf_free(buf);
546*4882a593Smuzhiyun 	return ret;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_alloc);
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun MODULE_DESCRIPTION("Xen frontend/backend page directory based "
551*4882a593Smuzhiyun 		   "shared buffer handling");
552*4882a593Smuzhiyun MODULE_AUTHOR("Oleksandr Andrushchenko");
553*4882a593Smuzhiyun MODULE_LICENSE("GPL");
554