xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/i915_mm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright © 2014 Intel Corporation
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the next
12*4882a593Smuzhiyun  * paragraph) shall be included in all copies or substantial portions of the
13*4882a593Smuzhiyun  * Software.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18*4882a593Smuzhiyun  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*4882a593Smuzhiyun  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20*4882a593Smuzhiyun  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21*4882a593Smuzhiyun  * IN THE SOFTWARE.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include <linux/mm.h>
26*4882a593Smuzhiyun #include <linux/io-mapping.h>
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include "i915_drv.h"
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun struct remap_pfn {
32*4882a593Smuzhiyun 	struct mm_struct *mm;
33*4882a593Smuzhiyun 	unsigned long pfn;
34*4882a593Smuzhiyun 	pgprot_t prot;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	struct sgt_iter sgt;
37*4882a593Smuzhiyun 	resource_size_t iobase;
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun 
remap_pfn(pte_t * pte,unsigned long addr,void * data)40*4882a593Smuzhiyun static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	struct remap_pfn *r = data;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	/* Special PTE are not associated with any struct page */
45*4882a593Smuzhiyun 	set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
46*4882a593Smuzhiyun 	r->pfn++;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	return 0;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun #define use_dma(io) ((io) != -1)
52*4882a593Smuzhiyun 
sgt_pfn(const struct remap_pfn * r)53*4882a593Smuzhiyun static inline unsigned long sgt_pfn(const struct remap_pfn *r)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	if (use_dma(r->iobase))
56*4882a593Smuzhiyun 		return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
57*4882a593Smuzhiyun 	else
58*4882a593Smuzhiyun 		return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
remap_sg(pte_t * pte,unsigned long addr,void * data)61*4882a593Smuzhiyun static int remap_sg(pte_t *pte, unsigned long addr, void *data)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	struct remap_pfn *r = data;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	if (GEM_WARN_ON(!r->sgt.pfn))
66*4882a593Smuzhiyun 		return -EINVAL;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	/* Special PTE are not associated with any struct page */
69*4882a593Smuzhiyun 	set_pte_at(r->mm, addr, pte,
70*4882a593Smuzhiyun 		   pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
71*4882a593Smuzhiyun 	r->pfn++; /* track insertions in case we need to unwind later */
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	r->sgt.curr += PAGE_SIZE;
74*4882a593Smuzhiyun 	if (r->sgt.curr >= r->sgt.max)
75*4882a593Smuzhiyun 		r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	return 0;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun /**
81*4882a593Smuzhiyun  * remap_io_mapping - remap an IO mapping to userspace
82*4882a593Smuzhiyun  * @vma: user vma to map to
83*4882a593Smuzhiyun  * @addr: target user address to start at
84*4882a593Smuzhiyun  * @pfn: physical address of kernel memory
85*4882a593Smuzhiyun  * @size: size of map area
86*4882a593Smuzhiyun  * @iomap: the source io_mapping
87*4882a593Smuzhiyun  *
88*4882a593Smuzhiyun  *  Note: this is only safe if the mm semaphore is held when called.
89*4882a593Smuzhiyun  */
remap_io_mapping(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size,struct io_mapping * iomap)90*4882a593Smuzhiyun int remap_io_mapping(struct vm_area_struct *vma,
91*4882a593Smuzhiyun 		     unsigned long addr, unsigned long pfn, unsigned long size,
92*4882a593Smuzhiyun 		     struct io_mapping *iomap)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	struct remap_pfn r;
95*4882a593Smuzhiyun 	int err;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun #define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
98*4882a593Smuzhiyun 	GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
101*4882a593Smuzhiyun 	r.mm = vma->vm_mm;
102*4882a593Smuzhiyun 	r.pfn = pfn;
103*4882a593Smuzhiyun 	r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
104*4882a593Smuzhiyun 			  (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
107*4882a593Smuzhiyun 	if (unlikely(err)) {
108*4882a593Smuzhiyun 		zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
109*4882a593Smuzhiyun 		return err;
110*4882a593Smuzhiyun 	}
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	return 0;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /**
116*4882a593Smuzhiyun  * remap_io_sg - remap an IO mapping to userspace
117*4882a593Smuzhiyun  * @vma: user vma to map to
118*4882a593Smuzhiyun  * @addr: target user address to start at
119*4882a593Smuzhiyun  * @size: size of map area
120*4882a593Smuzhiyun  * @sgl: Start sg entry
121*4882a593Smuzhiyun  * @iobase: Use stored dma address offset by this address or pfn if -1
122*4882a593Smuzhiyun  *
123*4882a593Smuzhiyun  *  Note: this is only safe if the mm semaphore is held when called.
124*4882a593Smuzhiyun  */
remap_io_sg(struct vm_area_struct * vma,unsigned long addr,unsigned long size,struct scatterlist * sgl,resource_size_t iobase)125*4882a593Smuzhiyun int remap_io_sg(struct vm_area_struct *vma,
126*4882a593Smuzhiyun 		unsigned long addr, unsigned long size,
127*4882a593Smuzhiyun 		struct scatterlist *sgl, resource_size_t iobase)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	struct remap_pfn r = {
130*4882a593Smuzhiyun 		.mm = vma->vm_mm,
131*4882a593Smuzhiyun 		.prot = vma->vm_page_prot,
132*4882a593Smuzhiyun 		.sgt = __sgt_iter(sgl, use_dma(iobase)),
133*4882a593Smuzhiyun 		.iobase = iobase,
134*4882a593Smuzhiyun 	};
135*4882a593Smuzhiyun 	int err;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
138*4882a593Smuzhiyun 	GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	if (!use_dma(iobase))
141*4882a593Smuzhiyun 		flush_cache_range(vma, addr, size);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
144*4882a593Smuzhiyun 	if (unlikely(err)) {
145*4882a593Smuzhiyun 		zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
146*4882a593Smuzhiyun 		return err;
147*4882a593Smuzhiyun 	}
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	return 0;
150*4882a593Smuzhiyun }
151