1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * IOMMU API for Graphics Address Relocation Table on Tegra20
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Author: Hiroshi DOYU <hdoyu@nvidia.com>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #define dev_fmt(fmt) "gart: " fmt
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/io.h>
13*4882a593Smuzhiyun #include <linux/iommu.h>
14*4882a593Smuzhiyun #include <linux/moduleparam.h>
15*4882a593Smuzhiyun #include <linux/platform_device.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/spinlock.h>
18*4882a593Smuzhiyun #include <linux/vmalloc.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include <soc/tegra/mc.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define GART_REG_BASE 0x24
23*4882a593Smuzhiyun #define GART_CONFIG (0x24 - GART_REG_BASE)
24*4882a593Smuzhiyun #define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
25*4882a593Smuzhiyun #define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #define GART_ENTRY_PHYS_ADDR_VALID BIT(31)
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define GART_PAGE_SHIFT 12
30*4882a593Smuzhiyun #define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT)
31*4882a593Smuzhiyun #define GART_PAGE_MASK GENMASK(30, GART_PAGE_SHIFT)
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /* bitmap of the page sizes currently supported */
34*4882a593Smuzhiyun #define GART_IOMMU_PGSIZES (GART_PAGE_SIZE)
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun struct gart_device {
37*4882a593Smuzhiyun void __iomem *regs;
38*4882a593Smuzhiyun u32 *savedata;
39*4882a593Smuzhiyun unsigned long iovmm_base; /* offset to vmm_area start */
40*4882a593Smuzhiyun unsigned long iovmm_end; /* offset to vmm_area end */
41*4882a593Smuzhiyun spinlock_t pte_lock; /* for pagetable */
42*4882a593Smuzhiyun spinlock_t dom_lock; /* for active domain */
43*4882a593Smuzhiyun unsigned int active_devices; /* number of active devices */
44*4882a593Smuzhiyun struct iommu_domain *active_domain; /* current active domain */
45*4882a593Smuzhiyun struct iommu_device iommu; /* IOMMU Core handle */
46*4882a593Smuzhiyun struct device *dev;
47*4882a593Smuzhiyun };
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun static struct gart_device *gart_handle; /* unique for a system */
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun static bool gart_debug;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /*
54*4882a593Smuzhiyun * Any interaction between any block on PPSB and a block on APB or AHB
55*4882a593Smuzhiyun * must have these read-back to ensure the APB/AHB bus transaction is
56*4882a593Smuzhiyun * complete before initiating activity on the PPSB block.
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun #define FLUSH_GART_REGS(gart) readl_relaxed((gart)->regs + GART_CONFIG)
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun #define for_each_gart_pte(gart, iova) \
61*4882a593Smuzhiyun for (iova = gart->iovmm_base; \
62*4882a593Smuzhiyun iova < gart->iovmm_end; \
63*4882a593Smuzhiyun iova += GART_PAGE_SIZE)
64*4882a593Smuzhiyun
gart_set_pte(struct gart_device * gart,unsigned long iova,unsigned long pte)65*4882a593Smuzhiyun static inline void gart_set_pte(struct gart_device *gart,
66*4882a593Smuzhiyun unsigned long iova, unsigned long pte)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
69*4882a593Smuzhiyun writel_relaxed(pte, gart->regs + GART_ENTRY_DATA);
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
gart_read_pte(struct gart_device * gart,unsigned long iova)72*4882a593Smuzhiyun static inline unsigned long gart_read_pte(struct gart_device *gart,
73*4882a593Smuzhiyun unsigned long iova)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun unsigned long pte;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
78*4882a593Smuzhiyun pte = readl_relaxed(gart->regs + GART_ENTRY_DATA);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun return pte;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
do_gart_setup(struct gart_device * gart,const u32 * data)83*4882a593Smuzhiyun static void do_gart_setup(struct gart_device *gart, const u32 *data)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun unsigned long iova;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun for_each_gart_pte(gart, iova)
88*4882a593Smuzhiyun gart_set_pte(gart, iova, data ? *(data++) : 0);
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun writel_relaxed(1, gart->regs + GART_CONFIG);
91*4882a593Smuzhiyun FLUSH_GART_REGS(gart);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
gart_iova_range_invalid(struct gart_device * gart,unsigned long iova,size_t bytes)94*4882a593Smuzhiyun static inline bool gart_iova_range_invalid(struct gart_device *gart,
95*4882a593Smuzhiyun unsigned long iova, size_t bytes)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun return unlikely(iova < gart->iovmm_base || bytes != GART_PAGE_SIZE ||
98*4882a593Smuzhiyun iova + bytes > gart->iovmm_end);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
gart_pte_valid(struct gart_device * gart,unsigned long iova)101*4882a593Smuzhiyun static inline bool gart_pte_valid(struct gart_device *gart, unsigned long iova)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun return !!(gart_read_pte(gart, iova) & GART_ENTRY_PHYS_ADDR_VALID);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
gart_iommu_attach_dev(struct iommu_domain * domain,struct device * dev)106*4882a593Smuzhiyun static int gart_iommu_attach_dev(struct iommu_domain *domain,
107*4882a593Smuzhiyun struct device *dev)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun struct gart_device *gart = gart_handle;
110*4882a593Smuzhiyun int ret = 0;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun spin_lock(&gart->dom_lock);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun if (gart->active_domain && gart->active_domain != domain) {
115*4882a593Smuzhiyun ret = -EBUSY;
116*4882a593Smuzhiyun } else if (dev_iommu_priv_get(dev) != domain) {
117*4882a593Smuzhiyun dev_iommu_priv_set(dev, domain);
118*4882a593Smuzhiyun gart->active_domain = domain;
119*4882a593Smuzhiyun gart->active_devices++;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun spin_unlock(&gart->dom_lock);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun return ret;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
gart_iommu_detach_dev(struct iommu_domain * domain,struct device * dev)127*4882a593Smuzhiyun static void gart_iommu_detach_dev(struct iommu_domain *domain,
128*4882a593Smuzhiyun struct device *dev)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun struct gart_device *gart = gart_handle;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun spin_lock(&gart->dom_lock);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun if (dev_iommu_priv_get(dev) == domain) {
135*4882a593Smuzhiyun dev_iommu_priv_set(dev, NULL);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun if (--gart->active_devices == 0)
138*4882a593Smuzhiyun gart->active_domain = NULL;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun spin_unlock(&gart->dom_lock);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
gart_iommu_domain_alloc(unsigned type)144*4882a593Smuzhiyun static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun struct iommu_domain *domain;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun if (type != IOMMU_DOMAIN_UNMANAGED)
149*4882a593Smuzhiyun return NULL;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun domain = kzalloc(sizeof(*domain), GFP_KERNEL);
152*4882a593Smuzhiyun if (domain) {
153*4882a593Smuzhiyun domain->geometry.aperture_start = gart_handle->iovmm_base;
154*4882a593Smuzhiyun domain->geometry.aperture_end = gart_handle->iovmm_end - 1;
155*4882a593Smuzhiyun domain->geometry.force_aperture = true;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun return domain;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
gart_iommu_domain_free(struct iommu_domain * domain)161*4882a593Smuzhiyun static void gart_iommu_domain_free(struct iommu_domain *domain)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun WARN_ON(gart_handle->active_domain == domain);
164*4882a593Smuzhiyun kfree(domain);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
__gart_iommu_map(struct gart_device * gart,unsigned long iova,unsigned long pa)167*4882a593Smuzhiyun static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova,
168*4882a593Smuzhiyun unsigned long pa)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun if (unlikely(gart_debug && gart_pte_valid(gart, iova))) {
171*4882a593Smuzhiyun dev_err(gart->dev, "Page entry is in-use\n");
172*4882a593Smuzhiyun return -EINVAL;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun gart_set_pte(gart, iova, GART_ENTRY_PHYS_ADDR_VALID | pa);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun return 0;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
gart_iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t pa,size_t bytes,int prot,gfp_t gfp)180*4882a593Smuzhiyun static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
181*4882a593Smuzhiyun phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun struct gart_device *gart = gart_handle;
184*4882a593Smuzhiyun int ret;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun if (gart_iova_range_invalid(gart, iova, bytes))
187*4882a593Smuzhiyun return -EINVAL;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun spin_lock(&gart->pte_lock);
190*4882a593Smuzhiyun ret = __gart_iommu_map(gart, iova, (unsigned long)pa);
191*4882a593Smuzhiyun spin_unlock(&gart->pte_lock);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun return ret;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
__gart_iommu_unmap(struct gart_device * gart,unsigned long iova)196*4882a593Smuzhiyun static inline int __gart_iommu_unmap(struct gart_device *gart,
197*4882a593Smuzhiyun unsigned long iova)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun if (unlikely(gart_debug && !gart_pte_valid(gart, iova))) {
200*4882a593Smuzhiyun dev_err(gart->dev, "Page entry is invalid\n");
201*4882a593Smuzhiyun return -EINVAL;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun gart_set_pte(gart, iova, 0);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun return 0;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
gart_iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t bytes,struct iommu_iotlb_gather * gather)209*4882a593Smuzhiyun static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
210*4882a593Smuzhiyun size_t bytes, struct iommu_iotlb_gather *gather)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun struct gart_device *gart = gart_handle;
213*4882a593Smuzhiyun int err;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun if (gart_iova_range_invalid(gart, iova, bytes))
216*4882a593Smuzhiyun return 0;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun spin_lock(&gart->pte_lock);
219*4882a593Smuzhiyun err = __gart_iommu_unmap(gart, iova);
220*4882a593Smuzhiyun spin_unlock(&gart->pte_lock);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun return err ? 0 : bytes;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
gart_iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)225*4882a593Smuzhiyun static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
226*4882a593Smuzhiyun dma_addr_t iova)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun struct gart_device *gart = gart_handle;
229*4882a593Smuzhiyun unsigned long pte;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun if (gart_iova_range_invalid(gart, iova, GART_PAGE_SIZE))
232*4882a593Smuzhiyun return -EINVAL;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun spin_lock(&gart->pte_lock);
235*4882a593Smuzhiyun pte = gart_read_pte(gart, iova);
236*4882a593Smuzhiyun spin_unlock(&gart->pte_lock);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun return pte & GART_PAGE_MASK;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
gart_iommu_capable(enum iommu_cap cap)241*4882a593Smuzhiyun static bool gart_iommu_capable(enum iommu_cap cap)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun return false;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
gart_iommu_probe_device(struct device * dev)246*4882a593Smuzhiyun static struct iommu_device *gart_iommu_probe_device(struct device *dev)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun if (!dev_iommu_fwspec_get(dev))
249*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun return &gart_handle->iommu;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
gart_iommu_release_device(struct device * dev)254*4882a593Smuzhiyun static void gart_iommu_release_device(struct device *dev)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
gart_iommu_of_xlate(struct device * dev,struct of_phandle_args * args)258*4882a593Smuzhiyun static int gart_iommu_of_xlate(struct device *dev,
259*4882a593Smuzhiyun struct of_phandle_args *args)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun return 0;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
gart_iommu_sync_map(struct iommu_domain * domain,unsigned long iova,size_t size)264*4882a593Smuzhiyun static void gart_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
265*4882a593Smuzhiyun size_t size)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun FLUSH_GART_REGS(gart_handle);
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
gart_iommu_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * gather)270*4882a593Smuzhiyun static void gart_iommu_sync(struct iommu_domain *domain,
271*4882a593Smuzhiyun struct iommu_iotlb_gather *gather)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun size_t length = gather->end - gather->start;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun gart_iommu_sync_map(domain, gather->start, length);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun static const struct iommu_ops gart_iommu_ops = {
279*4882a593Smuzhiyun .capable = gart_iommu_capable,
280*4882a593Smuzhiyun .domain_alloc = gart_iommu_domain_alloc,
281*4882a593Smuzhiyun .domain_free = gart_iommu_domain_free,
282*4882a593Smuzhiyun .attach_dev = gart_iommu_attach_dev,
283*4882a593Smuzhiyun .detach_dev = gart_iommu_detach_dev,
284*4882a593Smuzhiyun .probe_device = gart_iommu_probe_device,
285*4882a593Smuzhiyun .release_device = gart_iommu_release_device,
286*4882a593Smuzhiyun .device_group = generic_device_group,
287*4882a593Smuzhiyun .map = gart_iommu_map,
288*4882a593Smuzhiyun .unmap = gart_iommu_unmap,
289*4882a593Smuzhiyun .iova_to_phys = gart_iommu_iova_to_phys,
290*4882a593Smuzhiyun .pgsize_bitmap = GART_IOMMU_PGSIZES,
291*4882a593Smuzhiyun .of_xlate = gart_iommu_of_xlate,
292*4882a593Smuzhiyun .iotlb_sync_map = gart_iommu_sync_map,
293*4882a593Smuzhiyun .iotlb_sync = gart_iommu_sync,
294*4882a593Smuzhiyun };
295*4882a593Smuzhiyun
tegra_gart_suspend(struct gart_device * gart)296*4882a593Smuzhiyun int tegra_gart_suspend(struct gart_device *gart)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun u32 *data = gart->savedata;
299*4882a593Smuzhiyun unsigned long iova;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /*
302*4882a593Smuzhiyun * All GART users shall be suspended at this point. Disable
303*4882a593Smuzhiyun * address translation to trap all GART accesses as invalid
304*4882a593Smuzhiyun * memory accesses.
305*4882a593Smuzhiyun */
306*4882a593Smuzhiyun writel_relaxed(0, gart->regs + GART_CONFIG);
307*4882a593Smuzhiyun FLUSH_GART_REGS(gart);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun for_each_gart_pte(gart, iova)
310*4882a593Smuzhiyun *(data++) = gart_read_pte(gart, iova);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun return 0;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
tegra_gart_resume(struct gart_device * gart)315*4882a593Smuzhiyun int tegra_gart_resume(struct gart_device *gart)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun do_gart_setup(gart, gart->savedata);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun return 0;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
tegra_gart_probe(struct device * dev,struct tegra_mc * mc)322*4882a593Smuzhiyun struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun struct gart_device *gart;
325*4882a593Smuzhiyun struct resource *res;
326*4882a593Smuzhiyun int err;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* the GART memory aperture is required */
331*4882a593Smuzhiyun res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 1);
332*4882a593Smuzhiyun if (!res) {
333*4882a593Smuzhiyun dev_err(dev, "Memory aperture resource unavailable\n");
334*4882a593Smuzhiyun return ERR_PTR(-ENXIO);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun gart = kzalloc(sizeof(*gart), GFP_KERNEL);
338*4882a593Smuzhiyun if (!gart)
339*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun gart_handle = gart;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun gart->dev = dev;
344*4882a593Smuzhiyun gart->regs = mc->regs + GART_REG_BASE;
345*4882a593Smuzhiyun gart->iovmm_base = res->start;
346*4882a593Smuzhiyun gart->iovmm_end = res->end + 1;
347*4882a593Smuzhiyun spin_lock_init(&gart->pte_lock);
348*4882a593Smuzhiyun spin_lock_init(&gart->dom_lock);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun do_gart_setup(gart, NULL);
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun err = iommu_device_sysfs_add(&gart->iommu, dev, NULL, "gart");
353*4882a593Smuzhiyun if (err)
354*4882a593Smuzhiyun goto free_gart;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun iommu_device_set_ops(&gart->iommu, &gart_iommu_ops);
357*4882a593Smuzhiyun iommu_device_set_fwnode(&gart->iommu, dev->fwnode);
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun err = iommu_device_register(&gart->iommu);
360*4882a593Smuzhiyun if (err)
361*4882a593Smuzhiyun goto remove_sysfs;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun gart->savedata = vmalloc(resource_size(res) / GART_PAGE_SIZE *
364*4882a593Smuzhiyun sizeof(u32));
365*4882a593Smuzhiyun if (!gart->savedata) {
366*4882a593Smuzhiyun err = -ENOMEM;
367*4882a593Smuzhiyun goto unregister_iommu;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun return gart;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun unregister_iommu:
373*4882a593Smuzhiyun iommu_device_unregister(&gart->iommu);
374*4882a593Smuzhiyun remove_sysfs:
375*4882a593Smuzhiyun iommu_device_sysfs_remove(&gart->iommu);
376*4882a593Smuzhiyun free_gart:
377*4882a593Smuzhiyun kfree(gart);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun return ERR_PTR(err);
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun module_param(gart_debug, bool, 0644);
383*4882a593Smuzhiyun MODULE_PARM_DESC(gart_debug, "Enable GART debugging");
384