1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2012 Avionic Design GmbH
4*4882a593Smuzhiyun * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/bitops.h>
8*4882a593Smuzhiyun #include <linux/host1x.h>
9*4882a593Smuzhiyun #include <linux/idr.h>
10*4882a593Smuzhiyun #include <linux/iommu.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/platform_device.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <drm/drm_atomic.h>
15*4882a593Smuzhiyun #include <drm/drm_atomic_helper.h>
16*4882a593Smuzhiyun #include <drm/drm_debugfs.h>
17*4882a593Smuzhiyun #include <drm/drm_drv.h>
18*4882a593Smuzhiyun #include <drm/drm_fourcc.h>
19*4882a593Smuzhiyun #include <drm/drm_ioctl.h>
20*4882a593Smuzhiyun #include <drm/drm_prime.h>
21*4882a593Smuzhiyun #include <drm/drm_vblank.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #include "drm.h"
24*4882a593Smuzhiyun #include "gem.h"
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #define DRIVER_NAME "tegra"
27*4882a593Smuzhiyun #define DRIVER_DESC "NVIDIA Tegra graphics"
28*4882a593Smuzhiyun #define DRIVER_DATE "20120330"
29*4882a593Smuzhiyun #define DRIVER_MAJOR 0
30*4882a593Smuzhiyun #define DRIVER_MINOR 0
31*4882a593Smuzhiyun #define DRIVER_PATCHLEVEL 0
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #define CARVEOUT_SZ SZ_64M
34*4882a593Smuzhiyun #define CDMA_GATHER_FETCHES_MAX_NB 16383
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun struct tegra_drm_file {
37*4882a593Smuzhiyun struct idr contexts;
38*4882a593Smuzhiyun struct mutex lock;
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun
tegra_atomic_check(struct drm_device * drm,struct drm_atomic_state * state)41*4882a593Smuzhiyun static int tegra_atomic_check(struct drm_device *drm,
42*4882a593Smuzhiyun struct drm_atomic_state *state)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun int err;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun err = drm_atomic_helper_check(drm, state);
47*4882a593Smuzhiyun if (err < 0)
48*4882a593Smuzhiyun return err;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun return tegra_display_hub_atomic_check(drm, state);
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
54*4882a593Smuzhiyun .fb_create = tegra_fb_create,
55*4882a593Smuzhiyun #ifdef CONFIG_DRM_FBDEV_EMULATION
56*4882a593Smuzhiyun .output_poll_changed = drm_fb_helper_output_poll_changed,
57*4882a593Smuzhiyun #endif
58*4882a593Smuzhiyun .atomic_check = tegra_atomic_check,
59*4882a593Smuzhiyun .atomic_commit = drm_atomic_helper_commit,
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun
tegra_atomic_commit_tail(struct drm_atomic_state * old_state)62*4882a593Smuzhiyun static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun struct drm_device *drm = old_state->dev;
65*4882a593Smuzhiyun struct tegra_drm *tegra = drm->dev_private;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun if (tegra->hub) {
68*4882a593Smuzhiyun drm_atomic_helper_commit_modeset_disables(drm, old_state);
69*4882a593Smuzhiyun tegra_display_hub_atomic_commit(drm, old_state);
70*4882a593Smuzhiyun drm_atomic_helper_commit_planes(drm, old_state, 0);
71*4882a593Smuzhiyun drm_atomic_helper_commit_modeset_enables(drm, old_state);
72*4882a593Smuzhiyun drm_atomic_helper_commit_hw_done(old_state);
73*4882a593Smuzhiyun drm_atomic_helper_wait_for_vblanks(drm, old_state);
74*4882a593Smuzhiyun drm_atomic_helper_cleanup_planes(drm, old_state);
75*4882a593Smuzhiyun } else {
76*4882a593Smuzhiyun drm_atomic_helper_commit_tail_rpm(old_state);
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun static const struct drm_mode_config_helper_funcs
81*4882a593Smuzhiyun tegra_drm_mode_config_helpers = {
82*4882a593Smuzhiyun .atomic_commit_tail = tegra_atomic_commit_tail,
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun
tegra_drm_open(struct drm_device * drm,struct drm_file * filp)85*4882a593Smuzhiyun static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun struct tegra_drm_file *fpriv;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
90*4882a593Smuzhiyun if (!fpriv)
91*4882a593Smuzhiyun return -ENOMEM;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun idr_init_base(&fpriv->contexts, 1);
94*4882a593Smuzhiyun mutex_init(&fpriv->lock);
95*4882a593Smuzhiyun filp->driver_priv = fpriv;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun return 0;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
tegra_drm_context_free(struct tegra_drm_context * context)100*4882a593Smuzhiyun static void tegra_drm_context_free(struct tegra_drm_context *context)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun context->client->ops->close_channel(context);
103*4882a593Smuzhiyun kfree(context);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun static struct host1x_bo *
host1x_bo_lookup(struct drm_file * file,u32 handle)107*4882a593Smuzhiyun host1x_bo_lookup(struct drm_file *file, u32 handle)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun struct drm_gem_object *gem;
110*4882a593Smuzhiyun struct tegra_bo *bo;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun gem = drm_gem_object_lookup(file, handle);
113*4882a593Smuzhiyun if (!gem)
114*4882a593Smuzhiyun return NULL;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun bo = to_tegra_bo(gem);
117*4882a593Smuzhiyun return &bo->base;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
host1x_reloc_copy_from_user(struct host1x_reloc * dest,struct drm_tegra_reloc __user * src,struct drm_device * drm,struct drm_file * file)120*4882a593Smuzhiyun static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
121*4882a593Smuzhiyun struct drm_tegra_reloc __user *src,
122*4882a593Smuzhiyun struct drm_device *drm,
123*4882a593Smuzhiyun struct drm_file *file)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun u32 cmdbuf, target;
126*4882a593Smuzhiyun int err;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun err = get_user(cmdbuf, &src->cmdbuf.handle);
129*4882a593Smuzhiyun if (err < 0)
130*4882a593Smuzhiyun return err;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
133*4882a593Smuzhiyun if (err < 0)
134*4882a593Smuzhiyun return err;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun err = get_user(target, &src->target.handle);
137*4882a593Smuzhiyun if (err < 0)
138*4882a593Smuzhiyun return err;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun err = get_user(dest->target.offset, &src->target.offset);
141*4882a593Smuzhiyun if (err < 0)
142*4882a593Smuzhiyun return err;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun err = get_user(dest->shift, &src->shift);
145*4882a593Smuzhiyun if (err < 0)
146*4882a593Smuzhiyun return err;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun dest->flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
151*4882a593Smuzhiyun if (!dest->cmdbuf.bo)
152*4882a593Smuzhiyun return -ENOENT;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun dest->target.bo = host1x_bo_lookup(file, target);
155*4882a593Smuzhiyun if (!dest->target.bo)
156*4882a593Smuzhiyun return -ENOENT;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun return 0;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
tegra_drm_submit(struct tegra_drm_context * context,struct drm_tegra_submit * args,struct drm_device * drm,struct drm_file * file)161*4882a593Smuzhiyun int tegra_drm_submit(struct tegra_drm_context *context,
162*4882a593Smuzhiyun struct drm_tegra_submit *args, struct drm_device *drm,
163*4882a593Smuzhiyun struct drm_file *file)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun struct host1x_client *client = &context->client->base;
166*4882a593Smuzhiyun unsigned int num_cmdbufs = args->num_cmdbufs;
167*4882a593Smuzhiyun unsigned int num_relocs = args->num_relocs;
168*4882a593Smuzhiyun struct drm_tegra_cmdbuf __user *user_cmdbufs;
169*4882a593Smuzhiyun struct drm_tegra_reloc __user *user_relocs;
170*4882a593Smuzhiyun struct drm_tegra_syncpt __user *user_syncpt;
171*4882a593Smuzhiyun struct drm_tegra_syncpt syncpt;
172*4882a593Smuzhiyun struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
173*4882a593Smuzhiyun struct drm_gem_object **refs;
174*4882a593Smuzhiyun struct host1x_syncpt *sp;
175*4882a593Smuzhiyun struct host1x_job *job;
176*4882a593Smuzhiyun unsigned int num_refs;
177*4882a593Smuzhiyun int err;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun user_cmdbufs = u64_to_user_ptr(args->cmdbufs);
180*4882a593Smuzhiyun user_relocs = u64_to_user_ptr(args->relocs);
181*4882a593Smuzhiyun user_syncpt = u64_to_user_ptr(args->syncpts);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /* We don't yet support other than one syncpt_incr struct per submit */
184*4882a593Smuzhiyun if (args->num_syncpts != 1)
185*4882a593Smuzhiyun return -EINVAL;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /* We don't yet support waitchks */
188*4882a593Smuzhiyun if (args->num_waitchks != 0)
189*4882a593Smuzhiyun return -EINVAL;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun job = host1x_job_alloc(context->channel, args->num_cmdbufs,
192*4882a593Smuzhiyun args->num_relocs);
193*4882a593Smuzhiyun if (!job)
194*4882a593Smuzhiyun return -ENOMEM;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun job->num_relocs = args->num_relocs;
197*4882a593Smuzhiyun job->client = client;
198*4882a593Smuzhiyun job->class = client->class;
199*4882a593Smuzhiyun job->serialize = true;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /*
202*4882a593Smuzhiyun * Track referenced BOs so that they can be unreferenced after the
203*4882a593Smuzhiyun * submission is complete.
204*4882a593Smuzhiyun */
205*4882a593Smuzhiyun num_refs = num_cmdbufs + num_relocs * 2;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL);
208*4882a593Smuzhiyun if (!refs) {
209*4882a593Smuzhiyun err = -ENOMEM;
210*4882a593Smuzhiyun goto put;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /* reuse as an iterator later */
214*4882a593Smuzhiyun num_refs = 0;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun while (num_cmdbufs) {
217*4882a593Smuzhiyun struct drm_tegra_cmdbuf cmdbuf;
218*4882a593Smuzhiyun struct host1x_bo *bo;
219*4882a593Smuzhiyun struct tegra_bo *obj;
220*4882a593Smuzhiyun u64 offset;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) {
223*4882a593Smuzhiyun err = -EFAULT;
224*4882a593Smuzhiyun goto fail;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun * The maximum number of CDMA gather fetches is 16383, a higher
229*4882a593Smuzhiyun * value means the words count is malformed.
230*4882a593Smuzhiyun */
231*4882a593Smuzhiyun if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) {
232*4882a593Smuzhiyun err = -EINVAL;
233*4882a593Smuzhiyun goto fail;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun bo = host1x_bo_lookup(file, cmdbuf.handle);
237*4882a593Smuzhiyun if (!bo) {
238*4882a593Smuzhiyun err = -ENOENT;
239*4882a593Smuzhiyun goto fail;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32);
243*4882a593Smuzhiyun obj = host1x_to_tegra_bo(bo);
244*4882a593Smuzhiyun refs[num_refs++] = &obj->gem;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /*
247*4882a593Smuzhiyun * Gather buffer base address must be 4-bytes aligned,
248*4882a593Smuzhiyun * unaligned offset is malformed and cause commands stream
249*4882a593Smuzhiyun * corruption on the buffer address relocation.
250*4882a593Smuzhiyun */
251*4882a593Smuzhiyun if (offset & 3 || offset > obj->gem.size) {
252*4882a593Smuzhiyun err = -EINVAL;
253*4882a593Smuzhiyun goto fail;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
257*4882a593Smuzhiyun num_cmdbufs--;
258*4882a593Smuzhiyun user_cmdbufs++;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /* copy and resolve relocations from submit */
262*4882a593Smuzhiyun while (num_relocs--) {
263*4882a593Smuzhiyun struct host1x_reloc *reloc;
264*4882a593Smuzhiyun struct tegra_bo *obj;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun err = host1x_reloc_copy_from_user(&job->relocs[num_relocs],
267*4882a593Smuzhiyun &user_relocs[num_relocs], drm,
268*4882a593Smuzhiyun file);
269*4882a593Smuzhiyun if (err < 0)
270*4882a593Smuzhiyun goto fail;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun reloc = &job->relocs[num_relocs];
273*4882a593Smuzhiyun obj = host1x_to_tegra_bo(reloc->cmdbuf.bo);
274*4882a593Smuzhiyun refs[num_refs++] = &obj->gem;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /*
277*4882a593Smuzhiyun * The unaligned cmdbuf offset will cause an unaligned write
278*4882a593Smuzhiyun * during of the relocations patching, corrupting the commands
279*4882a593Smuzhiyun * stream.
280*4882a593Smuzhiyun */
281*4882a593Smuzhiyun if (reloc->cmdbuf.offset & 3 ||
282*4882a593Smuzhiyun reloc->cmdbuf.offset >= obj->gem.size) {
283*4882a593Smuzhiyun err = -EINVAL;
284*4882a593Smuzhiyun goto fail;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun obj = host1x_to_tegra_bo(reloc->target.bo);
288*4882a593Smuzhiyun refs[num_refs++] = &obj->gem;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun if (reloc->target.offset >= obj->gem.size) {
291*4882a593Smuzhiyun err = -EINVAL;
292*4882a593Smuzhiyun goto fail;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) {
297*4882a593Smuzhiyun err = -EFAULT;
298*4882a593Smuzhiyun goto fail;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /* check whether syncpoint ID is valid */
302*4882a593Smuzhiyun sp = host1x_syncpt_get(host1x, syncpt.id);
303*4882a593Smuzhiyun if (!sp) {
304*4882a593Smuzhiyun err = -ENOENT;
305*4882a593Smuzhiyun goto fail;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun job->is_addr_reg = context->client->ops->is_addr_reg;
309*4882a593Smuzhiyun job->is_valid_class = context->client->ops->is_valid_class;
310*4882a593Smuzhiyun job->syncpt_incrs = syncpt.incrs;
311*4882a593Smuzhiyun job->syncpt_id = syncpt.id;
312*4882a593Smuzhiyun job->timeout = 10000;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun if (args->timeout && args->timeout < 10000)
315*4882a593Smuzhiyun job->timeout = args->timeout;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun err = host1x_job_pin(job, context->client->base.dev);
318*4882a593Smuzhiyun if (err)
319*4882a593Smuzhiyun goto fail;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun err = host1x_job_submit(job);
322*4882a593Smuzhiyun if (err) {
323*4882a593Smuzhiyun host1x_job_unpin(job);
324*4882a593Smuzhiyun goto fail;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun args->fence = job->syncpt_end;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun fail:
330*4882a593Smuzhiyun while (num_refs--)
331*4882a593Smuzhiyun drm_gem_object_put(refs[num_refs]);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun kfree(refs);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun put:
336*4882a593Smuzhiyun host1x_job_put(job);
337*4882a593Smuzhiyun return err;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun #ifdef CONFIG_DRM_TEGRA_STAGING
tegra_gem_create(struct drm_device * drm,void * data,struct drm_file * file)342*4882a593Smuzhiyun static int tegra_gem_create(struct drm_device *drm, void *data,
343*4882a593Smuzhiyun struct drm_file *file)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun struct drm_tegra_gem_create *args = data;
346*4882a593Smuzhiyun struct tegra_bo *bo;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
349*4882a593Smuzhiyun &args->handle);
350*4882a593Smuzhiyun if (IS_ERR(bo))
351*4882a593Smuzhiyun return PTR_ERR(bo);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun return 0;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
tegra_gem_mmap(struct drm_device * drm,void * data,struct drm_file * file)356*4882a593Smuzhiyun static int tegra_gem_mmap(struct drm_device *drm, void *data,
357*4882a593Smuzhiyun struct drm_file *file)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun struct drm_tegra_gem_mmap *args = data;
360*4882a593Smuzhiyun struct drm_gem_object *gem;
361*4882a593Smuzhiyun struct tegra_bo *bo;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun gem = drm_gem_object_lookup(file, args->handle);
364*4882a593Smuzhiyun if (!gem)
365*4882a593Smuzhiyun return -EINVAL;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun bo = to_tegra_bo(gem);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun drm_gem_object_put(gem);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun return 0;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
tegra_syncpt_read(struct drm_device * drm,void * data,struct drm_file * file)376*4882a593Smuzhiyun static int tegra_syncpt_read(struct drm_device *drm, void *data,
377*4882a593Smuzhiyun struct drm_file *file)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun struct host1x *host = dev_get_drvdata(drm->dev->parent);
380*4882a593Smuzhiyun struct drm_tegra_syncpt_read *args = data;
381*4882a593Smuzhiyun struct host1x_syncpt *sp;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun sp = host1x_syncpt_get(host, args->id);
384*4882a593Smuzhiyun if (!sp)
385*4882a593Smuzhiyun return -EINVAL;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun args->value = host1x_syncpt_read_min(sp);
388*4882a593Smuzhiyun return 0;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
tegra_syncpt_incr(struct drm_device * drm,void * data,struct drm_file * file)391*4882a593Smuzhiyun static int tegra_syncpt_incr(struct drm_device *drm, void *data,
392*4882a593Smuzhiyun struct drm_file *file)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
395*4882a593Smuzhiyun struct drm_tegra_syncpt_incr *args = data;
396*4882a593Smuzhiyun struct host1x_syncpt *sp;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun sp = host1x_syncpt_get(host1x, args->id);
399*4882a593Smuzhiyun if (!sp)
400*4882a593Smuzhiyun return -EINVAL;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun return host1x_syncpt_incr(sp);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
tegra_syncpt_wait(struct drm_device * drm,void * data,struct drm_file * file)405*4882a593Smuzhiyun static int tegra_syncpt_wait(struct drm_device *drm, void *data,
406*4882a593Smuzhiyun struct drm_file *file)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
409*4882a593Smuzhiyun struct drm_tegra_syncpt_wait *args = data;
410*4882a593Smuzhiyun struct host1x_syncpt *sp;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun sp = host1x_syncpt_get(host1x, args->id);
413*4882a593Smuzhiyun if (!sp)
414*4882a593Smuzhiyun return -EINVAL;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun return host1x_syncpt_wait(sp, args->thresh,
417*4882a593Smuzhiyun msecs_to_jiffies(args->timeout),
418*4882a593Smuzhiyun &args->value);
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
tegra_client_open(struct tegra_drm_file * fpriv,struct tegra_drm_client * client,struct tegra_drm_context * context)421*4882a593Smuzhiyun static int tegra_client_open(struct tegra_drm_file *fpriv,
422*4882a593Smuzhiyun struct tegra_drm_client *client,
423*4882a593Smuzhiyun struct tegra_drm_context *context)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun int err;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun err = client->ops->open_channel(client, context);
428*4882a593Smuzhiyun if (err < 0)
429*4882a593Smuzhiyun return err;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);
432*4882a593Smuzhiyun if (err < 0) {
433*4882a593Smuzhiyun client->ops->close_channel(context);
434*4882a593Smuzhiyun return err;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun context->client = client;
438*4882a593Smuzhiyun context->id = err;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun return 0;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
tegra_open_channel(struct drm_device * drm,void * data,struct drm_file * file)443*4882a593Smuzhiyun static int tegra_open_channel(struct drm_device *drm, void *data,
444*4882a593Smuzhiyun struct drm_file *file)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun struct tegra_drm_file *fpriv = file->driver_priv;
447*4882a593Smuzhiyun struct tegra_drm *tegra = drm->dev_private;
448*4882a593Smuzhiyun struct drm_tegra_open_channel *args = data;
449*4882a593Smuzhiyun struct tegra_drm_context *context;
450*4882a593Smuzhiyun struct tegra_drm_client *client;
451*4882a593Smuzhiyun int err = -ENODEV;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun context = kzalloc(sizeof(*context), GFP_KERNEL);
454*4882a593Smuzhiyun if (!context)
455*4882a593Smuzhiyun return -ENOMEM;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun mutex_lock(&fpriv->lock);
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun list_for_each_entry(client, &tegra->clients, list)
460*4882a593Smuzhiyun if (client->base.class == args->client) {
461*4882a593Smuzhiyun err = tegra_client_open(fpriv, client, context);
462*4882a593Smuzhiyun if (err < 0)
463*4882a593Smuzhiyun break;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun args->context = context->id;
466*4882a593Smuzhiyun break;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun if (err < 0)
470*4882a593Smuzhiyun kfree(context);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun mutex_unlock(&fpriv->lock);
473*4882a593Smuzhiyun return err;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
tegra_close_channel(struct drm_device * drm,void * data,struct drm_file * file)476*4882a593Smuzhiyun static int tegra_close_channel(struct drm_device *drm, void *data,
477*4882a593Smuzhiyun struct drm_file *file)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun struct tegra_drm_file *fpriv = file->driver_priv;
480*4882a593Smuzhiyun struct drm_tegra_close_channel *args = data;
481*4882a593Smuzhiyun struct tegra_drm_context *context;
482*4882a593Smuzhiyun int err = 0;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun mutex_lock(&fpriv->lock);
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun context = idr_find(&fpriv->contexts, args->context);
487*4882a593Smuzhiyun if (!context) {
488*4882a593Smuzhiyun err = -EINVAL;
489*4882a593Smuzhiyun goto unlock;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun idr_remove(&fpriv->contexts, context->id);
493*4882a593Smuzhiyun tegra_drm_context_free(context);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun unlock:
496*4882a593Smuzhiyun mutex_unlock(&fpriv->lock);
497*4882a593Smuzhiyun return err;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
tegra_get_syncpt(struct drm_device * drm,void * data,struct drm_file * file)500*4882a593Smuzhiyun static int tegra_get_syncpt(struct drm_device *drm, void *data,
501*4882a593Smuzhiyun struct drm_file *file)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun struct tegra_drm_file *fpriv = file->driver_priv;
504*4882a593Smuzhiyun struct drm_tegra_get_syncpt *args = data;
505*4882a593Smuzhiyun struct tegra_drm_context *context;
506*4882a593Smuzhiyun struct host1x_syncpt *syncpt;
507*4882a593Smuzhiyun int err = 0;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun mutex_lock(&fpriv->lock);
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun context = idr_find(&fpriv->contexts, args->context);
512*4882a593Smuzhiyun if (!context) {
513*4882a593Smuzhiyun err = -ENODEV;
514*4882a593Smuzhiyun goto unlock;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun if (args->index >= context->client->base.num_syncpts) {
518*4882a593Smuzhiyun err = -EINVAL;
519*4882a593Smuzhiyun goto unlock;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun syncpt = context->client->base.syncpts[args->index];
523*4882a593Smuzhiyun args->id = host1x_syncpt_id(syncpt);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun unlock:
526*4882a593Smuzhiyun mutex_unlock(&fpriv->lock);
527*4882a593Smuzhiyun return err;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
tegra_submit(struct drm_device * drm,void * data,struct drm_file * file)530*4882a593Smuzhiyun static int tegra_submit(struct drm_device *drm, void *data,
531*4882a593Smuzhiyun struct drm_file *file)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun struct tegra_drm_file *fpriv = file->driver_priv;
534*4882a593Smuzhiyun struct drm_tegra_submit *args = data;
535*4882a593Smuzhiyun struct tegra_drm_context *context;
536*4882a593Smuzhiyun int err;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun mutex_lock(&fpriv->lock);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun context = idr_find(&fpriv->contexts, args->context);
541*4882a593Smuzhiyun if (!context) {
542*4882a593Smuzhiyun err = -ENODEV;
543*4882a593Smuzhiyun goto unlock;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun err = context->client->ops->submit(context, args, drm, file);
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun unlock:
549*4882a593Smuzhiyun mutex_unlock(&fpriv->lock);
550*4882a593Smuzhiyun return err;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
tegra_get_syncpt_base(struct drm_device * drm,void * data,struct drm_file * file)553*4882a593Smuzhiyun static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
554*4882a593Smuzhiyun struct drm_file *file)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun struct tegra_drm_file *fpriv = file->driver_priv;
557*4882a593Smuzhiyun struct drm_tegra_get_syncpt_base *args = data;
558*4882a593Smuzhiyun struct tegra_drm_context *context;
559*4882a593Smuzhiyun struct host1x_syncpt_base *base;
560*4882a593Smuzhiyun struct host1x_syncpt *syncpt;
561*4882a593Smuzhiyun int err = 0;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun mutex_lock(&fpriv->lock);
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun context = idr_find(&fpriv->contexts, args->context);
566*4882a593Smuzhiyun if (!context) {
567*4882a593Smuzhiyun err = -ENODEV;
568*4882a593Smuzhiyun goto unlock;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun if (args->syncpt >= context->client->base.num_syncpts) {
572*4882a593Smuzhiyun err = -EINVAL;
573*4882a593Smuzhiyun goto unlock;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun syncpt = context->client->base.syncpts[args->syncpt];
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun base = host1x_syncpt_get_base(syncpt);
579*4882a593Smuzhiyun if (!base) {
580*4882a593Smuzhiyun err = -ENXIO;
581*4882a593Smuzhiyun goto unlock;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun args->id = host1x_syncpt_base_id(base);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun unlock:
587*4882a593Smuzhiyun mutex_unlock(&fpriv->lock);
588*4882a593Smuzhiyun return err;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
tegra_gem_set_tiling(struct drm_device * drm,void * data,struct drm_file * file)591*4882a593Smuzhiyun static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
592*4882a593Smuzhiyun struct drm_file *file)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun struct drm_tegra_gem_set_tiling *args = data;
595*4882a593Smuzhiyun enum tegra_bo_tiling_mode mode;
596*4882a593Smuzhiyun struct drm_gem_object *gem;
597*4882a593Smuzhiyun unsigned long value = 0;
598*4882a593Smuzhiyun struct tegra_bo *bo;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun switch (args->mode) {
601*4882a593Smuzhiyun case DRM_TEGRA_GEM_TILING_MODE_PITCH:
602*4882a593Smuzhiyun mode = TEGRA_BO_TILING_MODE_PITCH;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun if (args->value != 0)
605*4882a593Smuzhiyun return -EINVAL;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun break;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun case DRM_TEGRA_GEM_TILING_MODE_TILED:
610*4882a593Smuzhiyun mode = TEGRA_BO_TILING_MODE_TILED;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun if (args->value != 0)
613*4882a593Smuzhiyun return -EINVAL;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun break;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
618*4882a593Smuzhiyun mode = TEGRA_BO_TILING_MODE_BLOCK;
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun if (args->value > 5)
621*4882a593Smuzhiyun return -EINVAL;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun value = args->value;
624*4882a593Smuzhiyun break;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun default:
627*4882a593Smuzhiyun return -EINVAL;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun gem = drm_gem_object_lookup(file, args->handle);
631*4882a593Smuzhiyun if (!gem)
632*4882a593Smuzhiyun return -ENOENT;
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun bo = to_tegra_bo(gem);
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun bo->tiling.mode = mode;
637*4882a593Smuzhiyun bo->tiling.value = value;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun drm_gem_object_put(gem);
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun return 0;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
tegra_gem_get_tiling(struct drm_device * drm,void * data,struct drm_file * file)644*4882a593Smuzhiyun static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
645*4882a593Smuzhiyun struct drm_file *file)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun struct drm_tegra_gem_get_tiling *args = data;
648*4882a593Smuzhiyun struct drm_gem_object *gem;
649*4882a593Smuzhiyun struct tegra_bo *bo;
650*4882a593Smuzhiyun int err = 0;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun gem = drm_gem_object_lookup(file, args->handle);
653*4882a593Smuzhiyun if (!gem)
654*4882a593Smuzhiyun return -ENOENT;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun bo = to_tegra_bo(gem);
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun switch (bo->tiling.mode) {
659*4882a593Smuzhiyun case TEGRA_BO_TILING_MODE_PITCH:
660*4882a593Smuzhiyun args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
661*4882a593Smuzhiyun args->value = 0;
662*4882a593Smuzhiyun break;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun case TEGRA_BO_TILING_MODE_TILED:
665*4882a593Smuzhiyun args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
666*4882a593Smuzhiyun args->value = 0;
667*4882a593Smuzhiyun break;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun case TEGRA_BO_TILING_MODE_BLOCK:
670*4882a593Smuzhiyun args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
671*4882a593Smuzhiyun args->value = bo->tiling.value;
672*4882a593Smuzhiyun break;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun default:
675*4882a593Smuzhiyun err = -EINVAL;
676*4882a593Smuzhiyun break;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun drm_gem_object_put(gem);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun return err;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
tegra_gem_set_flags(struct drm_device * drm,void * data,struct drm_file * file)684*4882a593Smuzhiyun static int tegra_gem_set_flags(struct drm_device *drm, void *data,
685*4882a593Smuzhiyun struct drm_file *file)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun struct drm_tegra_gem_set_flags *args = data;
688*4882a593Smuzhiyun struct drm_gem_object *gem;
689*4882a593Smuzhiyun struct tegra_bo *bo;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
692*4882a593Smuzhiyun return -EINVAL;
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun gem = drm_gem_object_lookup(file, args->handle);
695*4882a593Smuzhiyun if (!gem)
696*4882a593Smuzhiyun return -ENOENT;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun bo = to_tegra_bo(gem);
699*4882a593Smuzhiyun bo->flags = 0;
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
702*4882a593Smuzhiyun bo->flags |= TEGRA_BO_BOTTOM_UP;
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun drm_gem_object_put(gem);
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun return 0;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun
tegra_gem_get_flags(struct drm_device * drm,void * data,struct drm_file * file)709*4882a593Smuzhiyun static int tegra_gem_get_flags(struct drm_device *drm, void *data,
710*4882a593Smuzhiyun struct drm_file *file)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun struct drm_tegra_gem_get_flags *args = data;
713*4882a593Smuzhiyun struct drm_gem_object *gem;
714*4882a593Smuzhiyun struct tegra_bo *bo;
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun gem = drm_gem_object_lookup(file, args->handle);
717*4882a593Smuzhiyun if (!gem)
718*4882a593Smuzhiyun return -ENOENT;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun bo = to_tegra_bo(gem);
721*4882a593Smuzhiyun args->flags = 0;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun if (bo->flags & TEGRA_BO_BOTTOM_UP)
724*4882a593Smuzhiyun args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun drm_gem_object_put(gem);
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun return 0;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun #endif
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
733*4882a593Smuzhiyun #ifdef CONFIG_DRM_TEGRA_STAGING
734*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create,
735*4882a593Smuzhiyun DRM_RENDER_ALLOW),
736*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap,
737*4882a593Smuzhiyun DRM_RENDER_ALLOW),
738*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read,
739*4882a593Smuzhiyun DRM_RENDER_ALLOW),
740*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr,
741*4882a593Smuzhiyun DRM_RENDER_ALLOW),
742*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait,
743*4882a593Smuzhiyun DRM_RENDER_ALLOW),
744*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel,
745*4882a593Smuzhiyun DRM_RENDER_ALLOW),
746*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel,
747*4882a593Smuzhiyun DRM_RENDER_ALLOW),
748*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt,
749*4882a593Smuzhiyun DRM_RENDER_ALLOW),
750*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit,
751*4882a593Smuzhiyun DRM_RENDER_ALLOW),
752*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base,
753*4882a593Smuzhiyun DRM_RENDER_ALLOW),
754*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling,
755*4882a593Smuzhiyun DRM_RENDER_ALLOW),
756*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling,
757*4882a593Smuzhiyun DRM_RENDER_ALLOW),
758*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags,
759*4882a593Smuzhiyun DRM_RENDER_ALLOW),
760*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags,
761*4882a593Smuzhiyun DRM_RENDER_ALLOW),
762*4882a593Smuzhiyun #endif
763*4882a593Smuzhiyun };
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun static const struct file_operations tegra_drm_fops = {
766*4882a593Smuzhiyun .owner = THIS_MODULE,
767*4882a593Smuzhiyun .open = drm_open,
768*4882a593Smuzhiyun .release = drm_release,
769*4882a593Smuzhiyun .unlocked_ioctl = drm_ioctl,
770*4882a593Smuzhiyun .mmap = tegra_drm_mmap,
771*4882a593Smuzhiyun .poll = drm_poll,
772*4882a593Smuzhiyun .read = drm_read,
773*4882a593Smuzhiyun .compat_ioctl = drm_compat_ioctl,
774*4882a593Smuzhiyun .llseek = noop_llseek,
775*4882a593Smuzhiyun };
776*4882a593Smuzhiyun
tegra_drm_context_cleanup(int id,void * p,void * data)777*4882a593Smuzhiyun static int tegra_drm_context_cleanup(int id, void *p, void *data)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun struct tegra_drm_context *context = p;
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun tegra_drm_context_free(context);
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun return 0;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun
tegra_drm_postclose(struct drm_device * drm,struct drm_file * file)786*4882a593Smuzhiyun static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun struct tegra_drm_file *fpriv = file->driver_priv;
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun mutex_lock(&fpriv->lock);
791*4882a593Smuzhiyun idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL);
792*4882a593Smuzhiyun mutex_unlock(&fpriv->lock);
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun idr_destroy(&fpriv->contexts);
795*4882a593Smuzhiyun mutex_destroy(&fpriv->lock);
796*4882a593Smuzhiyun kfree(fpriv);
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
tegra_debugfs_framebuffers(struct seq_file * s,void * data)800*4882a593Smuzhiyun static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
801*4882a593Smuzhiyun {
802*4882a593Smuzhiyun struct drm_info_node *node = (struct drm_info_node *)s->private;
803*4882a593Smuzhiyun struct drm_device *drm = node->minor->dev;
804*4882a593Smuzhiyun struct drm_framebuffer *fb;
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun mutex_lock(&drm->mode_config.fb_lock);
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
809*4882a593Smuzhiyun seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
810*4882a593Smuzhiyun fb->base.id, fb->width, fb->height,
811*4882a593Smuzhiyun fb->format->depth,
812*4882a593Smuzhiyun fb->format->cpp[0] * 8,
813*4882a593Smuzhiyun drm_framebuffer_read_refcount(fb));
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun mutex_unlock(&drm->mode_config.fb_lock);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun return 0;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
tegra_debugfs_iova(struct seq_file * s,void * data)821*4882a593Smuzhiyun static int tegra_debugfs_iova(struct seq_file *s, void *data)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun struct drm_info_node *node = (struct drm_info_node *)s->private;
824*4882a593Smuzhiyun struct drm_device *drm = node->minor->dev;
825*4882a593Smuzhiyun struct tegra_drm *tegra = drm->dev_private;
826*4882a593Smuzhiyun struct drm_printer p = drm_seq_file_printer(s);
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun if (tegra->domain) {
829*4882a593Smuzhiyun mutex_lock(&tegra->mm_lock);
830*4882a593Smuzhiyun drm_mm_print(&tegra->mm, &p);
831*4882a593Smuzhiyun mutex_unlock(&tegra->mm_lock);
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun return 0;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun static struct drm_info_list tegra_debugfs_list[] = {
838*4882a593Smuzhiyun { "framebuffers", tegra_debugfs_framebuffers, 0 },
839*4882a593Smuzhiyun { "iova", tegra_debugfs_iova, 0 },
840*4882a593Smuzhiyun };
841*4882a593Smuzhiyun
tegra_debugfs_init(struct drm_minor * minor)842*4882a593Smuzhiyun static void tegra_debugfs_init(struct drm_minor *minor)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun drm_debugfs_create_files(tegra_debugfs_list,
845*4882a593Smuzhiyun ARRAY_SIZE(tegra_debugfs_list),
846*4882a593Smuzhiyun minor->debugfs_root, minor);
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun #endif
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun static struct drm_driver tegra_drm_driver = {
851*4882a593Smuzhiyun .driver_features = DRIVER_MODESET | DRIVER_GEM |
852*4882a593Smuzhiyun DRIVER_ATOMIC | DRIVER_RENDER,
853*4882a593Smuzhiyun .open = tegra_drm_open,
854*4882a593Smuzhiyun .postclose = tegra_drm_postclose,
855*4882a593Smuzhiyun .lastclose = drm_fb_helper_lastclose,
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun #if defined(CONFIG_DEBUG_FS)
858*4882a593Smuzhiyun .debugfs_init = tegra_debugfs_init,
859*4882a593Smuzhiyun #endif
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun .gem_free_object_unlocked = tegra_bo_free_object,
862*4882a593Smuzhiyun .gem_vm_ops = &tegra_bo_vm_ops,
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
865*4882a593Smuzhiyun .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
866*4882a593Smuzhiyun .gem_prime_export = tegra_gem_prime_export,
867*4882a593Smuzhiyun .gem_prime_import = tegra_gem_prime_import,
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun .dumb_create = tegra_bo_dumb_create,
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun .ioctls = tegra_drm_ioctls,
872*4882a593Smuzhiyun .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
873*4882a593Smuzhiyun .fops = &tegra_drm_fops,
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun .name = DRIVER_NAME,
876*4882a593Smuzhiyun .desc = DRIVER_DESC,
877*4882a593Smuzhiyun .date = DRIVER_DATE,
878*4882a593Smuzhiyun .major = DRIVER_MAJOR,
879*4882a593Smuzhiyun .minor = DRIVER_MINOR,
880*4882a593Smuzhiyun .patchlevel = DRIVER_PATCHLEVEL,
881*4882a593Smuzhiyun };
882*4882a593Smuzhiyun
tegra_drm_register_client(struct tegra_drm * tegra,struct tegra_drm_client * client)883*4882a593Smuzhiyun int tegra_drm_register_client(struct tegra_drm *tegra,
884*4882a593Smuzhiyun struct tegra_drm_client *client)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun mutex_lock(&tegra->clients_lock);
887*4882a593Smuzhiyun list_add_tail(&client->list, &tegra->clients);
888*4882a593Smuzhiyun client->drm = tegra;
889*4882a593Smuzhiyun mutex_unlock(&tegra->clients_lock);
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun return 0;
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun
tegra_drm_unregister_client(struct tegra_drm * tegra,struct tegra_drm_client * client)894*4882a593Smuzhiyun int tegra_drm_unregister_client(struct tegra_drm *tegra,
895*4882a593Smuzhiyun struct tegra_drm_client *client)
896*4882a593Smuzhiyun {
897*4882a593Smuzhiyun mutex_lock(&tegra->clients_lock);
898*4882a593Smuzhiyun list_del_init(&client->list);
899*4882a593Smuzhiyun client->drm = NULL;
900*4882a593Smuzhiyun mutex_unlock(&tegra->clients_lock);
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun return 0;
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
host1x_client_iommu_attach(struct host1x_client * client)905*4882a593Smuzhiyun int host1x_client_iommu_attach(struct host1x_client *client)
906*4882a593Smuzhiyun {
907*4882a593Smuzhiyun struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev);
908*4882a593Smuzhiyun struct drm_device *drm = dev_get_drvdata(client->host);
909*4882a593Smuzhiyun struct tegra_drm *tegra = drm->dev_private;
910*4882a593Smuzhiyun struct iommu_group *group = NULL;
911*4882a593Smuzhiyun int err;
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun /*
914*4882a593Smuzhiyun * If the host1x client is already attached to an IOMMU domain that is
915*4882a593Smuzhiyun * not the shared IOMMU domain, don't try to attach it to a different
916*4882a593Smuzhiyun * domain. This allows using the IOMMU-backed DMA API.
917*4882a593Smuzhiyun */
918*4882a593Smuzhiyun if (domain && domain != tegra->domain)
919*4882a593Smuzhiyun return 0;
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun if (tegra->domain) {
922*4882a593Smuzhiyun group = iommu_group_get(client->dev);
923*4882a593Smuzhiyun if (!group)
924*4882a593Smuzhiyun return -ENODEV;
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun if (domain != tegra->domain) {
927*4882a593Smuzhiyun err = iommu_attach_group(tegra->domain, group);
928*4882a593Smuzhiyun if (err < 0) {
929*4882a593Smuzhiyun iommu_group_put(group);
930*4882a593Smuzhiyun return err;
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun tegra->use_explicit_iommu = true;
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun client->group = group;
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun return 0;
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun
host1x_client_iommu_detach(struct host1x_client * client)942*4882a593Smuzhiyun void host1x_client_iommu_detach(struct host1x_client *client)
943*4882a593Smuzhiyun {
944*4882a593Smuzhiyun struct drm_device *drm = dev_get_drvdata(client->host);
945*4882a593Smuzhiyun struct tegra_drm *tegra = drm->dev_private;
946*4882a593Smuzhiyun struct iommu_domain *domain;
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun if (client->group) {
949*4882a593Smuzhiyun /*
950*4882a593Smuzhiyun * Devices that are part of the same group may no longer be
951*4882a593Smuzhiyun * attached to a domain at this point because their group may
952*4882a593Smuzhiyun * have been detached by an earlier client.
953*4882a593Smuzhiyun */
954*4882a593Smuzhiyun domain = iommu_get_domain_for_dev(client->dev);
955*4882a593Smuzhiyun if (domain)
956*4882a593Smuzhiyun iommu_detach_group(tegra->domain, client->group);
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun iommu_group_put(client->group);
959*4882a593Smuzhiyun client->group = NULL;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun
tegra_drm_alloc(struct tegra_drm * tegra,size_t size,dma_addr_t * dma)963*4882a593Smuzhiyun void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma)
964*4882a593Smuzhiyun {
965*4882a593Smuzhiyun struct iova *alloc;
966*4882a593Smuzhiyun void *virt;
967*4882a593Smuzhiyun gfp_t gfp;
968*4882a593Smuzhiyun int err;
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun if (tegra->domain)
971*4882a593Smuzhiyun size = iova_align(&tegra->carveout.domain, size);
972*4882a593Smuzhiyun else
973*4882a593Smuzhiyun size = PAGE_ALIGN(size);
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun gfp = GFP_KERNEL | __GFP_ZERO;
976*4882a593Smuzhiyun if (!tegra->domain) {
977*4882a593Smuzhiyun /*
978*4882a593Smuzhiyun * Many units only support 32-bit addresses, even on 64-bit
979*4882a593Smuzhiyun * SoCs. If there is no IOMMU to translate into a 32-bit IO
980*4882a593Smuzhiyun * virtual address space, force allocations to be in the
981*4882a593Smuzhiyun * lower 32-bit range.
982*4882a593Smuzhiyun */
983*4882a593Smuzhiyun gfp |= GFP_DMA;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun virt = (void *)__get_free_pages(gfp, get_order(size));
987*4882a593Smuzhiyun if (!virt)
988*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun if (!tegra->domain) {
991*4882a593Smuzhiyun /*
992*4882a593Smuzhiyun * If IOMMU is disabled, devices address physical memory
993*4882a593Smuzhiyun * directly.
994*4882a593Smuzhiyun */
995*4882a593Smuzhiyun *dma = virt_to_phys(virt);
996*4882a593Smuzhiyun return virt;
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun alloc = alloc_iova(&tegra->carveout.domain,
1000*4882a593Smuzhiyun size >> tegra->carveout.shift,
1001*4882a593Smuzhiyun tegra->carveout.limit, true);
1002*4882a593Smuzhiyun if (!alloc) {
1003*4882a593Smuzhiyun err = -EBUSY;
1004*4882a593Smuzhiyun goto free_pages;
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun *dma = iova_dma_addr(&tegra->carveout.domain, alloc);
1008*4882a593Smuzhiyun err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
1009*4882a593Smuzhiyun size, IOMMU_READ | IOMMU_WRITE);
1010*4882a593Smuzhiyun if (err < 0)
1011*4882a593Smuzhiyun goto free_iova;
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun return virt;
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun free_iova:
1016*4882a593Smuzhiyun __free_iova(&tegra->carveout.domain, alloc);
1017*4882a593Smuzhiyun free_pages:
1018*4882a593Smuzhiyun free_pages((unsigned long)virt, get_order(size));
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun return ERR_PTR(err);
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun
tegra_drm_free(struct tegra_drm * tegra,size_t size,void * virt,dma_addr_t dma)1023*4882a593Smuzhiyun void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
1024*4882a593Smuzhiyun dma_addr_t dma)
1025*4882a593Smuzhiyun {
1026*4882a593Smuzhiyun if (tegra->domain)
1027*4882a593Smuzhiyun size = iova_align(&tegra->carveout.domain, size);
1028*4882a593Smuzhiyun else
1029*4882a593Smuzhiyun size = PAGE_ALIGN(size);
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun if (tegra->domain) {
1032*4882a593Smuzhiyun iommu_unmap(tegra->domain, dma, size);
1033*4882a593Smuzhiyun free_iova(&tegra->carveout.domain,
1034*4882a593Smuzhiyun iova_pfn(&tegra->carveout.domain, dma));
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun free_pages((unsigned long)virt, get_order(size));
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun
host1x_drm_wants_iommu(struct host1x_device * dev)1040*4882a593Smuzhiyun static bool host1x_drm_wants_iommu(struct host1x_device *dev)
1041*4882a593Smuzhiyun {
1042*4882a593Smuzhiyun struct host1x *host1x = dev_get_drvdata(dev->dev.parent);
1043*4882a593Smuzhiyun struct iommu_domain *domain;
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun /* Our IOMMU usage policy doesn't currently play well with GART */
1046*4882a593Smuzhiyun if (of_machine_is_compatible("nvidia,tegra20"))
1047*4882a593Smuzhiyun return false;
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun /*
1050*4882a593Smuzhiyun * If the Tegra DRM clients are backed by an IOMMU, push buffers are
1051*4882a593Smuzhiyun * likely to be allocated beyond the 32-bit boundary if sufficient
1052*4882a593Smuzhiyun * system memory is available. This is problematic on earlier Tegra
1053*4882a593Smuzhiyun * generations where host1x supports a maximum of 32 address bits in
1054*4882a593Smuzhiyun * the GATHER opcode. In this case, unless host1x is behind an IOMMU
1055*4882a593Smuzhiyun * as well it won't be able to process buffers allocated beyond the
1056*4882a593Smuzhiyun * 32-bit boundary.
1057*4882a593Smuzhiyun *
1058*4882a593Smuzhiyun * The DMA API will use bounce buffers in this case, so that could
1059*4882a593Smuzhiyun * perhaps still be made to work, even if less efficient, but there
1060*4882a593Smuzhiyun * is another catch: in order to perform cache maintenance on pages
1061*4882a593Smuzhiyun * allocated for discontiguous buffers we need to map and unmap the
1062*4882a593Smuzhiyun * SG table representing these buffers. This is fine for something
1063*4882a593Smuzhiyun * small like a push buffer, but it exhausts the bounce buffer pool
1064*4882a593Smuzhiyun * (typically on the order of a few MiB) for framebuffers (many MiB
1065*4882a593Smuzhiyun * for any modern resolution).
1066*4882a593Smuzhiyun *
1067*4882a593Smuzhiyun * Work around this by making sure that Tegra DRM clients only use
1068*4882a593Smuzhiyun * an IOMMU if the parent host1x also uses an IOMMU.
1069*4882a593Smuzhiyun *
1070*4882a593Smuzhiyun * Note that there's still a small gap here that we don't cover: if
1071*4882a593Smuzhiyun * the DMA API is backed by an IOMMU there's no way to control which
1072*4882a593Smuzhiyun * device is attached to an IOMMU and which isn't, except via wiring
1073*4882a593Smuzhiyun * up the device tree appropriately. This is considered an problem
1074*4882a593Smuzhiyun * of integration, so care must be taken for the DT to be consistent.
1075*4882a593Smuzhiyun */
1076*4882a593Smuzhiyun domain = iommu_get_domain_for_dev(dev->dev.parent);
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun /*
1079*4882a593Smuzhiyun * Tegra20 and Tegra30 don't support addressing memory beyond the
1080*4882a593Smuzhiyun * 32-bit boundary, so the regular GATHER opcodes will always be
1081*4882a593Smuzhiyun * sufficient and whether or not the host1x is attached to an IOMMU
1082*4882a593Smuzhiyun * doesn't matter.
1083*4882a593Smuzhiyun */
1084*4882a593Smuzhiyun if (!domain && host1x_get_dma_mask(host1x) <= DMA_BIT_MASK(32))
1085*4882a593Smuzhiyun return true;
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun return domain != NULL;
1088*4882a593Smuzhiyun }
1089*4882a593Smuzhiyun
host1x_drm_probe(struct host1x_device * dev)1090*4882a593Smuzhiyun static int host1x_drm_probe(struct host1x_device *dev)
1091*4882a593Smuzhiyun {
1092*4882a593Smuzhiyun struct drm_driver *driver = &tegra_drm_driver;
1093*4882a593Smuzhiyun struct tegra_drm *tegra;
1094*4882a593Smuzhiyun struct drm_device *drm;
1095*4882a593Smuzhiyun int err;
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun drm = drm_dev_alloc(driver, &dev->dev);
1098*4882a593Smuzhiyun if (IS_ERR(drm))
1099*4882a593Smuzhiyun return PTR_ERR(drm);
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
1102*4882a593Smuzhiyun if (!tegra) {
1103*4882a593Smuzhiyun err = -ENOMEM;
1104*4882a593Smuzhiyun goto put;
1105*4882a593Smuzhiyun }
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun if (host1x_drm_wants_iommu(dev) && iommu_present(&platform_bus_type)) {
1108*4882a593Smuzhiyun tegra->domain = iommu_domain_alloc(&platform_bus_type);
1109*4882a593Smuzhiyun if (!tegra->domain) {
1110*4882a593Smuzhiyun err = -ENOMEM;
1111*4882a593Smuzhiyun goto free;
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun err = iova_cache_get();
1115*4882a593Smuzhiyun if (err < 0)
1116*4882a593Smuzhiyun goto domain;
1117*4882a593Smuzhiyun }
1118*4882a593Smuzhiyun
1119*4882a593Smuzhiyun mutex_init(&tegra->clients_lock);
1120*4882a593Smuzhiyun INIT_LIST_HEAD(&tegra->clients);
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun dev_set_drvdata(&dev->dev, drm);
1123*4882a593Smuzhiyun drm->dev_private = tegra;
1124*4882a593Smuzhiyun tegra->drm = drm;
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun drm_mode_config_init(drm);
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun drm->mode_config.min_width = 0;
1129*4882a593Smuzhiyun drm->mode_config.min_height = 0;
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun drm->mode_config.max_width = 4096;
1132*4882a593Smuzhiyun drm->mode_config.max_height = 4096;
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun drm->mode_config.normalize_zpos = true;
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
1137*4882a593Smuzhiyun drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun err = tegra_drm_fb_prepare(drm);
1140*4882a593Smuzhiyun if (err < 0)
1141*4882a593Smuzhiyun goto config;
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun drm_kms_helper_poll_init(drm);
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun err = host1x_device_init(dev);
1146*4882a593Smuzhiyun if (err < 0)
1147*4882a593Smuzhiyun goto fbdev;
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun if (tegra->use_explicit_iommu) {
1150*4882a593Smuzhiyun u64 carveout_start, carveout_end, gem_start, gem_end;
1151*4882a593Smuzhiyun u64 dma_mask = dma_get_mask(&dev->dev);
1152*4882a593Smuzhiyun dma_addr_t start, end;
1153*4882a593Smuzhiyun unsigned long order;
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun start = tegra->domain->geometry.aperture_start & dma_mask;
1156*4882a593Smuzhiyun end = tegra->domain->geometry.aperture_end & dma_mask;
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun gem_start = start;
1159*4882a593Smuzhiyun gem_end = end - CARVEOUT_SZ;
1160*4882a593Smuzhiyun carveout_start = gem_end + 1;
1161*4882a593Smuzhiyun carveout_end = end;
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun order = __ffs(tegra->domain->pgsize_bitmap);
1164*4882a593Smuzhiyun init_iova_domain(&tegra->carveout.domain, 1UL << order,
1165*4882a593Smuzhiyun carveout_start >> order);
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
1168*4882a593Smuzhiyun tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
1171*4882a593Smuzhiyun mutex_init(&tegra->mm_lock);
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun DRM_DEBUG_DRIVER("IOMMU apertures:\n");
1174*4882a593Smuzhiyun DRM_DEBUG_DRIVER(" GEM: %#llx-%#llx\n", gem_start, gem_end);
1175*4882a593Smuzhiyun DRM_DEBUG_DRIVER(" Carveout: %#llx-%#llx\n", carveout_start,
1176*4882a593Smuzhiyun carveout_end);
1177*4882a593Smuzhiyun } else if (tegra->domain) {
1178*4882a593Smuzhiyun iommu_domain_free(tegra->domain);
1179*4882a593Smuzhiyun tegra->domain = NULL;
1180*4882a593Smuzhiyun iova_cache_put();
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun if (tegra->hub) {
1184*4882a593Smuzhiyun err = tegra_display_hub_prepare(tegra->hub);
1185*4882a593Smuzhiyun if (err < 0)
1186*4882a593Smuzhiyun goto device;
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun /*
1190*4882a593Smuzhiyun * We don't use the drm_irq_install() helpers provided by the DRM
1191*4882a593Smuzhiyun * core, so we need to set this manually in order to allow the
1192*4882a593Smuzhiyun * DRM_IOCTL_WAIT_VBLANK to operate correctly.
1193*4882a593Smuzhiyun */
1194*4882a593Smuzhiyun drm->irq_enabled = true;
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun /* syncpoints are used for full 32-bit hardware VBLANK counters */
1197*4882a593Smuzhiyun drm->max_vblank_count = 0xffffffff;
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun err = drm_vblank_init(drm, drm->mode_config.num_crtc);
1200*4882a593Smuzhiyun if (err < 0)
1201*4882a593Smuzhiyun goto hub;
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun drm_mode_config_reset(drm);
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb",
1206*4882a593Smuzhiyun false);
1207*4882a593Smuzhiyun if (err < 0)
1208*4882a593Smuzhiyun goto hub;
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun err = tegra_drm_fb_init(drm);
1211*4882a593Smuzhiyun if (err < 0)
1212*4882a593Smuzhiyun goto hub;
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun err = drm_dev_register(drm, 0);
1215*4882a593Smuzhiyun if (err < 0)
1216*4882a593Smuzhiyun goto fb;
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun return 0;
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun fb:
1221*4882a593Smuzhiyun tegra_drm_fb_exit(drm);
1222*4882a593Smuzhiyun hub:
1223*4882a593Smuzhiyun if (tegra->hub)
1224*4882a593Smuzhiyun tegra_display_hub_cleanup(tegra->hub);
1225*4882a593Smuzhiyun device:
1226*4882a593Smuzhiyun if (tegra->domain) {
1227*4882a593Smuzhiyun mutex_destroy(&tegra->mm_lock);
1228*4882a593Smuzhiyun drm_mm_takedown(&tegra->mm);
1229*4882a593Smuzhiyun put_iova_domain(&tegra->carveout.domain);
1230*4882a593Smuzhiyun iova_cache_put();
1231*4882a593Smuzhiyun }
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun host1x_device_exit(dev);
1234*4882a593Smuzhiyun fbdev:
1235*4882a593Smuzhiyun drm_kms_helper_poll_fini(drm);
1236*4882a593Smuzhiyun tegra_drm_fb_free(drm);
1237*4882a593Smuzhiyun config:
1238*4882a593Smuzhiyun drm_mode_config_cleanup(drm);
1239*4882a593Smuzhiyun domain:
1240*4882a593Smuzhiyun if (tegra->domain)
1241*4882a593Smuzhiyun iommu_domain_free(tegra->domain);
1242*4882a593Smuzhiyun free:
1243*4882a593Smuzhiyun kfree(tegra);
1244*4882a593Smuzhiyun put:
1245*4882a593Smuzhiyun drm_dev_put(drm);
1246*4882a593Smuzhiyun return err;
1247*4882a593Smuzhiyun }
1248*4882a593Smuzhiyun
host1x_drm_remove(struct host1x_device * dev)1249*4882a593Smuzhiyun static int host1x_drm_remove(struct host1x_device *dev)
1250*4882a593Smuzhiyun {
1251*4882a593Smuzhiyun struct drm_device *drm = dev_get_drvdata(&dev->dev);
1252*4882a593Smuzhiyun struct tegra_drm *tegra = drm->dev_private;
1253*4882a593Smuzhiyun int err;
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun drm_dev_unregister(drm);
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun drm_kms_helper_poll_fini(drm);
1258*4882a593Smuzhiyun tegra_drm_fb_exit(drm);
1259*4882a593Smuzhiyun drm_atomic_helper_shutdown(drm);
1260*4882a593Smuzhiyun drm_mode_config_cleanup(drm);
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun if (tegra->hub)
1263*4882a593Smuzhiyun tegra_display_hub_cleanup(tegra->hub);
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun err = host1x_device_exit(dev);
1266*4882a593Smuzhiyun if (err < 0)
1267*4882a593Smuzhiyun dev_err(&dev->dev, "host1x device cleanup failed: %d\n", err);
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun if (tegra->domain) {
1270*4882a593Smuzhiyun mutex_destroy(&tegra->mm_lock);
1271*4882a593Smuzhiyun drm_mm_takedown(&tegra->mm);
1272*4882a593Smuzhiyun put_iova_domain(&tegra->carveout.domain);
1273*4882a593Smuzhiyun iova_cache_put();
1274*4882a593Smuzhiyun iommu_domain_free(tegra->domain);
1275*4882a593Smuzhiyun }
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun kfree(tegra);
1278*4882a593Smuzhiyun drm_dev_put(drm);
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun return 0;
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
host1x_drm_suspend(struct device * dev)1284*4882a593Smuzhiyun static int host1x_drm_suspend(struct device *dev)
1285*4882a593Smuzhiyun {
1286*4882a593Smuzhiyun struct drm_device *drm = dev_get_drvdata(dev);
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun return drm_mode_config_helper_suspend(drm);
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun
host1x_drm_resume(struct device * dev)1291*4882a593Smuzhiyun static int host1x_drm_resume(struct device *dev)
1292*4882a593Smuzhiyun {
1293*4882a593Smuzhiyun struct drm_device *drm = dev_get_drvdata(dev);
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun return drm_mode_config_helper_resume(drm);
1296*4882a593Smuzhiyun }
1297*4882a593Smuzhiyun #endif
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
1300*4882a593Smuzhiyun host1x_drm_resume);
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun static const struct of_device_id host1x_drm_subdevs[] = {
1303*4882a593Smuzhiyun { .compatible = "nvidia,tegra20-dc", },
1304*4882a593Smuzhiyun { .compatible = "nvidia,tegra20-hdmi", },
1305*4882a593Smuzhiyun { .compatible = "nvidia,tegra20-gr2d", },
1306*4882a593Smuzhiyun { .compatible = "nvidia,tegra20-gr3d", },
1307*4882a593Smuzhiyun { .compatible = "nvidia,tegra30-dc", },
1308*4882a593Smuzhiyun { .compatible = "nvidia,tegra30-hdmi", },
1309*4882a593Smuzhiyun { .compatible = "nvidia,tegra30-gr2d", },
1310*4882a593Smuzhiyun { .compatible = "nvidia,tegra30-gr3d", },
1311*4882a593Smuzhiyun { .compatible = "nvidia,tegra114-dsi", },
1312*4882a593Smuzhiyun { .compatible = "nvidia,tegra114-hdmi", },
1313*4882a593Smuzhiyun { .compatible = "nvidia,tegra114-gr3d", },
1314*4882a593Smuzhiyun { .compatible = "nvidia,tegra124-dc", },
1315*4882a593Smuzhiyun { .compatible = "nvidia,tegra124-sor", },
1316*4882a593Smuzhiyun { .compatible = "nvidia,tegra124-hdmi", },
1317*4882a593Smuzhiyun { .compatible = "nvidia,tegra124-dsi", },
1318*4882a593Smuzhiyun { .compatible = "nvidia,tegra124-vic", },
1319*4882a593Smuzhiyun { .compatible = "nvidia,tegra132-dsi", },
1320*4882a593Smuzhiyun { .compatible = "nvidia,tegra210-dc", },
1321*4882a593Smuzhiyun { .compatible = "nvidia,tegra210-dsi", },
1322*4882a593Smuzhiyun { .compatible = "nvidia,tegra210-sor", },
1323*4882a593Smuzhiyun { .compatible = "nvidia,tegra210-sor1", },
1324*4882a593Smuzhiyun { .compatible = "nvidia,tegra210-vic", },
1325*4882a593Smuzhiyun { .compatible = "nvidia,tegra186-display", },
1326*4882a593Smuzhiyun { .compatible = "nvidia,tegra186-dc", },
1327*4882a593Smuzhiyun { .compatible = "nvidia,tegra186-sor", },
1328*4882a593Smuzhiyun { .compatible = "nvidia,tegra186-sor1", },
1329*4882a593Smuzhiyun { .compatible = "nvidia,tegra186-vic", },
1330*4882a593Smuzhiyun { .compatible = "nvidia,tegra194-display", },
1331*4882a593Smuzhiyun { .compatible = "nvidia,tegra194-dc", },
1332*4882a593Smuzhiyun { .compatible = "nvidia,tegra194-sor", },
1333*4882a593Smuzhiyun { .compatible = "nvidia,tegra194-vic", },
1334*4882a593Smuzhiyun { /* sentinel */ }
1335*4882a593Smuzhiyun };
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun static struct host1x_driver host1x_drm_driver = {
1338*4882a593Smuzhiyun .driver = {
1339*4882a593Smuzhiyun .name = "drm",
1340*4882a593Smuzhiyun .pm = &host1x_drm_pm_ops,
1341*4882a593Smuzhiyun },
1342*4882a593Smuzhiyun .probe = host1x_drm_probe,
1343*4882a593Smuzhiyun .remove = host1x_drm_remove,
1344*4882a593Smuzhiyun .subdevs = host1x_drm_subdevs,
1345*4882a593Smuzhiyun };
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun static struct platform_driver * const drivers[] = {
1348*4882a593Smuzhiyun &tegra_display_hub_driver,
1349*4882a593Smuzhiyun &tegra_dc_driver,
1350*4882a593Smuzhiyun &tegra_hdmi_driver,
1351*4882a593Smuzhiyun &tegra_dsi_driver,
1352*4882a593Smuzhiyun &tegra_dpaux_driver,
1353*4882a593Smuzhiyun &tegra_sor_driver,
1354*4882a593Smuzhiyun &tegra_gr2d_driver,
1355*4882a593Smuzhiyun &tegra_gr3d_driver,
1356*4882a593Smuzhiyun &tegra_vic_driver,
1357*4882a593Smuzhiyun };
1358*4882a593Smuzhiyun
host1x_drm_init(void)1359*4882a593Smuzhiyun static int __init host1x_drm_init(void)
1360*4882a593Smuzhiyun {
1361*4882a593Smuzhiyun int err;
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun err = host1x_driver_register(&host1x_drm_driver);
1364*4882a593Smuzhiyun if (err < 0)
1365*4882a593Smuzhiyun return err;
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
1368*4882a593Smuzhiyun if (err < 0)
1369*4882a593Smuzhiyun goto unregister_host1x;
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun return 0;
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun unregister_host1x:
1374*4882a593Smuzhiyun host1x_driver_unregister(&host1x_drm_driver);
1375*4882a593Smuzhiyun return err;
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun module_init(host1x_drm_init);
1378*4882a593Smuzhiyun
host1x_drm_exit(void)1379*4882a593Smuzhiyun static void __exit host1x_drm_exit(void)
1380*4882a593Smuzhiyun {
1381*4882a593Smuzhiyun platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
1382*4882a593Smuzhiyun host1x_driver_unregister(&host1x_drm_driver);
1383*4882a593Smuzhiyun }
1384*4882a593Smuzhiyun module_exit(host1x_drm_exit);
1385*4882a593Smuzhiyun
1386*4882a593Smuzhiyun MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
1387*4882a593Smuzhiyun MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
1388*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1389