xref: /OK3568_Linux_fs/kernel/include/xen/grant_table.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /******************************************************************************
2*4882a593Smuzhiyun  * grant_table.h
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Two sets of functionality:
5*4882a593Smuzhiyun  * 1. Granting foreign access to our memory reservation.
6*4882a593Smuzhiyun  * 2. Accessing others' memory reservations via grant references.
7*4882a593Smuzhiyun  * (i.e., mechanisms for both sender and recipient of grant references)
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Copyright (c) 2004-2005, K A Fraser
10*4882a593Smuzhiyun  * Copyright (c) 2005, Christopher Clark
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or
13*4882a593Smuzhiyun  * modify it under the terms of the GNU General Public License version 2
14*4882a593Smuzhiyun  * as published by the Free Software Foundation; or, when distributed
15*4882a593Smuzhiyun  * separately from the Linux kernel or incorporated into other
16*4882a593Smuzhiyun  * software packages, subject to the following license:
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a copy
19*4882a593Smuzhiyun  * of this source file (the "Software"), to deal in the Software without
20*4882a593Smuzhiyun  * restriction, including without limitation the rights to use, copy, modify,
21*4882a593Smuzhiyun  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22*4882a593Smuzhiyun  * and to permit persons to whom the Software is furnished to do so, subject to
23*4882a593Smuzhiyun  * the following conditions:
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
26*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
27*4882a593Smuzhiyun  *
28*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31*4882a593Smuzhiyun  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32*4882a593Smuzhiyun  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33*4882a593Smuzhiyun  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34*4882a593Smuzhiyun  * IN THE SOFTWARE.
35*4882a593Smuzhiyun  */
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #ifndef __ASM_GNTTAB_H__
38*4882a593Smuzhiyun #define __ASM_GNTTAB_H__
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #include <asm/page.h>
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #include <xen/interface/xen.h>
43*4882a593Smuzhiyun #include <xen/interface/grant_table.h>
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #include <asm/xen/hypervisor.h>
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #include <xen/features.h>
48*4882a593Smuzhiyun #include <xen/page.h>
49*4882a593Smuzhiyun #include <linux/mm_types.h>
50*4882a593Smuzhiyun #include <linux/page-flags.h>
51*4882a593Smuzhiyun #include <linux/kernel.h>
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #define GNTTAB_RESERVED_XENSTORE 1
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
56*4882a593Smuzhiyun #define NR_GRANT_FRAMES 4
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun struct gnttab_free_callback {
59*4882a593Smuzhiyun 	struct gnttab_free_callback *next;
60*4882a593Smuzhiyun 	void (*fn)(void *);
61*4882a593Smuzhiyun 	void *arg;
62*4882a593Smuzhiyun 	u16 count;
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun struct gntab_unmap_queue_data;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun typedef void (*gnttab_unmap_refs_done)(int result, struct gntab_unmap_queue_data *data);
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun struct gntab_unmap_queue_data
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	struct delayed_work	gnttab_work;
72*4882a593Smuzhiyun 	void *data;
73*4882a593Smuzhiyun 	gnttab_unmap_refs_done	done;
74*4882a593Smuzhiyun 	struct gnttab_unmap_grant_ref *unmap_ops;
75*4882a593Smuzhiyun 	struct gnttab_unmap_grant_ref *kunmap_ops;
76*4882a593Smuzhiyun 	struct page **pages;
77*4882a593Smuzhiyun 	unsigned int count;
78*4882a593Smuzhiyun 	unsigned int age;
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun int gnttab_init(void);
82*4882a593Smuzhiyun int gnttab_suspend(void);
83*4882a593Smuzhiyun int gnttab_resume(void);
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
86*4882a593Smuzhiyun 				int readonly);
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun /*
89*4882a593Smuzhiyun  * End access through the given grant reference, iff the grant entry is no
90*4882a593Smuzhiyun  * longer in use.  Return 1 if the grant entry was freed, 0 if it is still in
91*4882a593Smuzhiyun  * use.
92*4882a593Smuzhiyun  */
93*4882a593Smuzhiyun int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun /*
96*4882a593Smuzhiyun  * Eventually end access through the given grant reference, and once that
97*4882a593Smuzhiyun  * access has been ended, free the given page too.  Access will be ended
98*4882a593Smuzhiyun  * immediately iff the grant entry is not in use, otherwise it will happen
99*4882a593Smuzhiyun  * some time later.  page may be 0, in which case no freeing will occur.
100*4882a593Smuzhiyun  * Note that the granted page might still be accessed (read or write) by the
101*4882a593Smuzhiyun  * other side after gnttab_end_foreign_access() returns, so even if page was
102*4882a593Smuzhiyun  * specified as 0 it is not allowed to just reuse the page for other
103*4882a593Smuzhiyun  * purposes immediately. gnttab_end_foreign_access() will take an additional
104*4882a593Smuzhiyun  * reference to the granted page in this case, which is dropped only after
105*4882a593Smuzhiyun  * the grant is no longer in use.
106*4882a593Smuzhiyun  * This requires that multi page allocations for areas subject to
107*4882a593Smuzhiyun  * gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing
108*4882a593Smuzhiyun  * via free_pages_exact()) in order to avoid high order pages.
109*4882a593Smuzhiyun  */
110*4882a593Smuzhiyun void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
111*4882a593Smuzhiyun 			       unsigned long page);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun /*
114*4882a593Smuzhiyun  * End access through the given grant reference, iff the grant entry is
115*4882a593Smuzhiyun  * no longer in use.  In case of success ending foreign access, the
116*4882a593Smuzhiyun  * grant reference is deallocated.
117*4882a593Smuzhiyun  * Return 1 if the grant entry was freed, 0 if it is still in use.
118*4882a593Smuzhiyun  */
119*4882a593Smuzhiyun int gnttab_try_end_foreign_access(grant_ref_t ref);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
124*4882a593Smuzhiyun unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun /*
127*4882a593Smuzhiyun  * operations on reserved batches of grant references
128*4882a593Smuzhiyun  */
129*4882a593Smuzhiyun int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun void gnttab_free_grant_reference(grant_ref_t ref);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun void gnttab_free_grant_references(grant_ref_t head);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun void gnttab_release_grant_reference(grant_ref_t *private_head,
140*4882a593Smuzhiyun 				    grant_ref_t release);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun void gnttab_request_free_callback(struct gnttab_free_callback *callback,
143*4882a593Smuzhiyun 				  void (*fn)(void *), void *arg, u16 count);
144*4882a593Smuzhiyun void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
147*4882a593Smuzhiyun 				     unsigned long frame, int readonly);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun /* Give access to the first 4K of the page */
gnttab_page_grant_foreign_access_ref_one(grant_ref_t ref,domid_t domid,struct page * page,int readonly)150*4882a593Smuzhiyun static inline void gnttab_page_grant_foreign_access_ref_one(
151*4882a593Smuzhiyun 	grant_ref_t ref, domid_t domid,
152*4882a593Smuzhiyun 	struct page *page, int readonly)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	gnttab_grant_foreign_access_ref(ref, domid, xen_page_to_gfn(page),
155*4882a593Smuzhiyun 					readonly);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
159*4882a593Smuzhiyun 				       unsigned long pfn);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun static inline void
gnttab_set_map_op(struct gnttab_map_grant_ref * map,phys_addr_t addr,uint32_t flags,grant_ref_t ref,domid_t domid)162*4882a593Smuzhiyun gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
163*4882a593Smuzhiyun 		  uint32_t flags, grant_ref_t ref, domid_t domid)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	if (flags & GNTMAP_contains_pte)
166*4882a593Smuzhiyun 		map->host_addr = addr;
167*4882a593Smuzhiyun 	else if (xen_feature(XENFEAT_auto_translated_physmap))
168*4882a593Smuzhiyun 		map->host_addr = __pa(addr);
169*4882a593Smuzhiyun 	else
170*4882a593Smuzhiyun 		map->host_addr = addr;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	map->flags = flags;
173*4882a593Smuzhiyun 	map->ref = ref;
174*4882a593Smuzhiyun 	map->dom = domid;
175*4882a593Smuzhiyun 	map->status = 1; /* arbitrary positive value */
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun static inline void
gnttab_set_unmap_op(struct gnttab_unmap_grant_ref * unmap,phys_addr_t addr,uint32_t flags,grant_handle_t handle)179*4882a593Smuzhiyun gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
180*4882a593Smuzhiyun 		    uint32_t flags, grant_handle_t handle)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	if (flags & GNTMAP_contains_pte)
183*4882a593Smuzhiyun 		unmap->host_addr = addr;
184*4882a593Smuzhiyun 	else if (xen_feature(XENFEAT_auto_translated_physmap))
185*4882a593Smuzhiyun 		unmap->host_addr = __pa(addr);
186*4882a593Smuzhiyun 	else
187*4882a593Smuzhiyun 		unmap->host_addr = addr;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	unmap->handle = handle;
190*4882a593Smuzhiyun 	unmap->dev_bus_addr = 0;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
194*4882a593Smuzhiyun int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
195*4882a593Smuzhiyun 			   unsigned long max_nr_gframes,
196*4882a593Smuzhiyun 			   void **__shared);
197*4882a593Smuzhiyun int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
198*4882a593Smuzhiyun 			   unsigned long max_nr_gframes,
199*4882a593Smuzhiyun 			   grant_status_t **__shared);
200*4882a593Smuzhiyun void arch_gnttab_unmap(void *shared, unsigned long nr_gframes);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun struct grant_frames {
203*4882a593Smuzhiyun 	xen_pfn_t *pfn;
204*4882a593Smuzhiyun 	unsigned int count;
205*4882a593Smuzhiyun 	void *vaddr;
206*4882a593Smuzhiyun };
207*4882a593Smuzhiyun extern struct grant_frames xen_auto_xlat_grant_frames;
208*4882a593Smuzhiyun unsigned int gnttab_max_grant_frames(void);
209*4882a593Smuzhiyun int gnttab_setup_auto_xlat_frames(phys_addr_t addr);
210*4882a593Smuzhiyun void gnttab_free_auto_xlat_frames(void);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun int gnttab_alloc_pages(int nr_pages, struct page **pages);
215*4882a593Smuzhiyun void gnttab_free_pages(int nr_pages, struct page **pages);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun struct gnttab_page_cache {
218*4882a593Smuzhiyun 	spinlock_t		lock;
219*4882a593Smuzhiyun #ifdef CONFIG_XEN_UNPOPULATED_ALLOC
220*4882a593Smuzhiyun 	struct page		*pages;
221*4882a593Smuzhiyun #else
222*4882a593Smuzhiyun 	struct list_head	pages;
223*4882a593Smuzhiyun #endif
224*4882a593Smuzhiyun 	unsigned int		num_pages;
225*4882a593Smuzhiyun };
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun void gnttab_page_cache_init(struct gnttab_page_cache *cache);
228*4882a593Smuzhiyun int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page);
229*4882a593Smuzhiyun void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
230*4882a593Smuzhiyun 			   unsigned int num);
231*4882a593Smuzhiyun void gnttab_page_cache_shrink(struct gnttab_page_cache *cache,
232*4882a593Smuzhiyun 			      unsigned int num);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
235*4882a593Smuzhiyun struct gnttab_dma_alloc_args {
236*4882a593Smuzhiyun 	/* Device for which DMA memory will be/was allocated. */
237*4882a593Smuzhiyun 	struct device *dev;
238*4882a593Smuzhiyun 	/* If set then DMA buffer is coherent and write-combine otherwise. */
239*4882a593Smuzhiyun 	bool coherent;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	int nr_pages;
242*4882a593Smuzhiyun 	struct page **pages;
243*4882a593Smuzhiyun 	xen_pfn_t *frames;
244*4882a593Smuzhiyun 	void *vaddr;
245*4882a593Smuzhiyun 	dma_addr_t dev_bus_addr;
246*4882a593Smuzhiyun };
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args);
249*4882a593Smuzhiyun int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args);
250*4882a593Smuzhiyun #endif
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun int gnttab_pages_set_private(int nr_pages, struct page **pages);
253*4882a593Smuzhiyun void gnttab_pages_clear_private(int nr_pages, struct page **pages);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
256*4882a593Smuzhiyun 		    struct gnttab_map_grant_ref *kmap_ops,
257*4882a593Smuzhiyun 		    struct page **pages, unsigned int count);
258*4882a593Smuzhiyun int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
259*4882a593Smuzhiyun 		      struct gnttab_unmap_grant_ref *kunmap_ops,
260*4882a593Smuzhiyun 		      struct page **pages, unsigned int count);
261*4882a593Smuzhiyun void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
262*4882a593Smuzhiyun int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item);
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun /* Perform a batch of grant map/copy operations. Retry every batch slot
266*4882a593Smuzhiyun  * for which the hypervisor returns GNTST_eagain. This is typically due
267*4882a593Smuzhiyun  * to paged out target frames.
268*4882a593Smuzhiyun  *
269*4882a593Smuzhiyun  * Will retry for 1, 2, ... 255 ms, i.e. 256 times during 32 seconds.
270*4882a593Smuzhiyun  *
271*4882a593Smuzhiyun  * Return value in each iand every status field of the batch guaranteed
272*4882a593Smuzhiyun  * to not be GNTST_eagain.
273*4882a593Smuzhiyun  */
274*4882a593Smuzhiyun void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count);
275*4882a593Smuzhiyun void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun struct xen_page_foreign {
279*4882a593Smuzhiyun 	domid_t domid;
280*4882a593Smuzhiyun 	grant_ref_t gref;
281*4882a593Smuzhiyun };
282*4882a593Smuzhiyun 
xen_page_foreign(struct page * page)283*4882a593Smuzhiyun static inline struct xen_page_foreign *xen_page_foreign(struct page *page)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	if (!PageForeign(page))
286*4882a593Smuzhiyun 		return NULL;
287*4882a593Smuzhiyun #if BITS_PER_LONG < 64
288*4882a593Smuzhiyun 	return (struct xen_page_foreign *)page->private;
289*4882a593Smuzhiyun #else
290*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct xen_page_foreign) > BITS_PER_LONG);
291*4882a593Smuzhiyun 	return (struct xen_page_foreign *)&page->private;
292*4882a593Smuzhiyun #endif
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun /* Split Linux page in chunk of the size of the grant and call fn
296*4882a593Smuzhiyun  *
297*4882a593Smuzhiyun  * Parameters of fn:
298*4882a593Smuzhiyun  *	gfn: guest frame number
299*4882a593Smuzhiyun  *	offset: offset in the grant
300*4882a593Smuzhiyun  *	len: length of the data in the grant.
301*4882a593Smuzhiyun  *	data: internal information
302*4882a593Smuzhiyun  */
303*4882a593Smuzhiyun typedef void (*xen_grant_fn_t)(unsigned long gfn, unsigned int offset,
304*4882a593Smuzhiyun 			       unsigned int len, void *data);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun void gnttab_foreach_grant_in_range(struct page *page,
307*4882a593Smuzhiyun 				   unsigned int offset,
308*4882a593Smuzhiyun 				   unsigned int len,
309*4882a593Smuzhiyun 				   xen_grant_fn_t fn,
310*4882a593Smuzhiyun 				   void *data);
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun /* Helper to get to call fn only on the first "grant chunk" */
gnttab_for_one_grant(struct page * page,unsigned int offset,unsigned len,xen_grant_fn_t fn,void * data)313*4882a593Smuzhiyun static inline void gnttab_for_one_grant(struct page *page, unsigned int offset,
314*4882a593Smuzhiyun 					unsigned len, xen_grant_fn_t fn,
315*4882a593Smuzhiyun 					void *data)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun 	/* The first request is limited to the size of one grant */
318*4882a593Smuzhiyun 	len = min_t(unsigned int, XEN_PAGE_SIZE - (offset & ~XEN_PAGE_MASK),
319*4882a593Smuzhiyun 		    len);
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	gnttab_foreach_grant_in_range(page, offset, len, fn, data);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun /* Get @nr_grefs grants from an array of page and call fn for each grant */
325*4882a593Smuzhiyun void gnttab_foreach_grant(struct page **pages,
326*4882a593Smuzhiyun 			  unsigned int nr_grefs,
327*4882a593Smuzhiyun 			  xen_grant_fn_t fn,
328*4882a593Smuzhiyun 			  void *data);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun /* Get the number of grant in a specified region
331*4882a593Smuzhiyun  *
332*4882a593Smuzhiyun  * start: Offset from the beginning of the first page
333*4882a593Smuzhiyun  * len: total length of data (can cross multiple page)
334*4882a593Smuzhiyun  */
gnttab_count_grant(unsigned int start,unsigned int len)335*4882a593Smuzhiyun static inline unsigned int gnttab_count_grant(unsigned int start,
336*4882a593Smuzhiyun 					      unsigned int len)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	return XEN_PFN_UP(xen_offset_in_page(start) + len);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun #endif /* __ASM_GNTTAB_H__ */
342