1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2*4882a593Smuzhiyun /**************************************************************************
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
5*4882a593Smuzhiyun * All Rights Reserved.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
8*4882a593Smuzhiyun * copy of this software and associated documentation files (the
9*4882a593Smuzhiyun * "Software"), to deal in the Software without restriction, including
10*4882a593Smuzhiyun * without limitation the rights to use, copy, modify, merge, publish,
11*4882a593Smuzhiyun * distribute, sub license, and/or sell copies of the Software, and to
12*4882a593Smuzhiyun * permit persons to whom the Software is furnished to do so, subject to
13*4882a593Smuzhiyun * the following conditions:
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the
16*4882a593Smuzhiyun * next paragraph) shall be included in all copies or substantial portions
17*4882a593Smuzhiyun * of the Software.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22*4882a593Smuzhiyun * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23*4882a593Smuzhiyun * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24*4882a593Smuzhiyun * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25*4882a593Smuzhiyun * USE OR OTHER DEALINGS IN THE SOFTWARE.
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun **************************************************************************/
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * While no substantial code is shared, the prime code is inspired by
32*4882a593Smuzhiyun * drm_prime.c, with
33*4882a593Smuzhiyun * Authors:
34*4882a593Smuzhiyun * Dave Airlie <airlied@redhat.com>
35*4882a593Smuzhiyun * Rob Clark <rob.clark@linaro.org>
36*4882a593Smuzhiyun */
37*4882a593Smuzhiyun /** @file ttm_ref_object.c
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun * Base- and reference object implementation for the various
40*4882a593Smuzhiyun * ttm objects. Implements reference counting, minimal security checks
41*4882a593Smuzhiyun * and release on file close.
42*4882a593Smuzhiyun */
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /**
46*4882a593Smuzhiyun * struct ttm_object_file
47*4882a593Smuzhiyun *
48*4882a593Smuzhiyun * @tdev: Pointer to the ttm_object_device.
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * @lock: Lock that protects the ref_list list and the
51*4882a593Smuzhiyun * ref_hash hash tables.
52*4882a593Smuzhiyun *
53*4882a593Smuzhiyun * @ref_list: List of ttm_ref_objects to be destroyed at
54*4882a593Smuzhiyun * file release.
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
57*4882a593Smuzhiyun * for fast lookup of ref objects given a base object.
58*4882a593Smuzhiyun */
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun #define pr_fmt(fmt) "[TTM] " fmt
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun #include <drm/ttm/ttm_module.h>
63*4882a593Smuzhiyun #include <linux/list.h>
64*4882a593Smuzhiyun #include <linux/spinlock.h>
65*4882a593Smuzhiyun #include <linux/slab.h>
66*4882a593Smuzhiyun #include <linux/atomic.h>
67*4882a593Smuzhiyun #include "ttm_object.h"
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun struct ttm_object_file {
70*4882a593Smuzhiyun struct ttm_object_device *tdev;
71*4882a593Smuzhiyun spinlock_t lock;
72*4882a593Smuzhiyun struct list_head ref_list;
73*4882a593Smuzhiyun struct drm_open_hash ref_hash[TTM_REF_NUM];
74*4882a593Smuzhiyun struct kref refcount;
75*4882a593Smuzhiyun };
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /**
78*4882a593Smuzhiyun * struct ttm_object_device
79*4882a593Smuzhiyun *
80*4882a593Smuzhiyun * @object_lock: lock that protects the object_hash hash table.
81*4882a593Smuzhiyun *
82*4882a593Smuzhiyun * @object_hash: hash table for fast lookup of object global names.
83*4882a593Smuzhiyun *
84*4882a593Smuzhiyun * @object_count: Per device object count.
85*4882a593Smuzhiyun *
86*4882a593Smuzhiyun * This is the per-device data structure needed for ttm object management.
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun struct ttm_object_device {
90*4882a593Smuzhiyun spinlock_t object_lock;
91*4882a593Smuzhiyun struct drm_open_hash object_hash;
92*4882a593Smuzhiyun atomic_t object_count;
93*4882a593Smuzhiyun struct ttm_mem_global *mem_glob;
94*4882a593Smuzhiyun struct dma_buf_ops ops;
95*4882a593Smuzhiyun void (*dmabuf_release)(struct dma_buf *dma_buf);
96*4882a593Smuzhiyun size_t dma_buf_size;
97*4882a593Smuzhiyun struct idr idr;
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /**
101*4882a593Smuzhiyun * struct ttm_ref_object
102*4882a593Smuzhiyun *
103*4882a593Smuzhiyun * @hash: Hash entry for the per-file object reference hash.
104*4882a593Smuzhiyun *
105*4882a593Smuzhiyun * @head: List entry for the per-file list of ref-objects.
106*4882a593Smuzhiyun *
107*4882a593Smuzhiyun * @kref: Ref count.
108*4882a593Smuzhiyun *
109*4882a593Smuzhiyun * @obj: Base object this ref object is referencing.
110*4882a593Smuzhiyun *
111*4882a593Smuzhiyun * @ref_type: Type of ref object.
112*4882a593Smuzhiyun *
113*4882a593Smuzhiyun * This is similar to an idr object, but it also has a hash table entry
114*4882a593Smuzhiyun * that allows lookup with a pointer to the referenced object as a key. In
115*4882a593Smuzhiyun * that way, one can easily detect whether a base object is referenced by
116*4882a593Smuzhiyun * a particular ttm_object_file. It also carries a ref count to avoid creating
117*4882a593Smuzhiyun * multiple ref objects if a ttm_object_file references the same base
118*4882a593Smuzhiyun * object more than once.
119*4882a593Smuzhiyun */
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun struct ttm_ref_object {
122*4882a593Smuzhiyun struct rcu_head rcu_head;
123*4882a593Smuzhiyun struct drm_hash_item hash;
124*4882a593Smuzhiyun struct list_head head;
125*4882a593Smuzhiyun struct kref kref;
126*4882a593Smuzhiyun enum ttm_ref_type ref_type;
127*4882a593Smuzhiyun struct ttm_base_object *obj;
128*4882a593Smuzhiyun struct ttm_object_file *tfile;
129*4882a593Smuzhiyun };
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun static inline struct ttm_object_file *
ttm_object_file_ref(struct ttm_object_file * tfile)134*4882a593Smuzhiyun ttm_object_file_ref(struct ttm_object_file *tfile)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun kref_get(&tfile->refcount);
137*4882a593Smuzhiyun return tfile;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
ttm_object_file_destroy(struct kref * kref)140*4882a593Smuzhiyun static void ttm_object_file_destroy(struct kref *kref)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun struct ttm_object_file *tfile =
143*4882a593Smuzhiyun container_of(kref, struct ttm_object_file, refcount);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun kfree(tfile);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun
ttm_object_file_unref(struct ttm_object_file ** p_tfile)149*4882a593Smuzhiyun static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun struct ttm_object_file *tfile = *p_tfile;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun *p_tfile = NULL;
154*4882a593Smuzhiyun kref_put(&tfile->refcount, ttm_object_file_destroy);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun
ttm_base_object_init(struct ttm_object_file * tfile,struct ttm_base_object * base,bool shareable,enum ttm_object_type object_type,void (* refcount_release)(struct ttm_base_object **),void (* ref_obj_release)(struct ttm_base_object *,enum ttm_ref_type ref_type))158*4882a593Smuzhiyun int ttm_base_object_init(struct ttm_object_file *tfile,
159*4882a593Smuzhiyun struct ttm_base_object *base,
160*4882a593Smuzhiyun bool shareable,
161*4882a593Smuzhiyun enum ttm_object_type object_type,
162*4882a593Smuzhiyun void (*refcount_release) (struct ttm_base_object **),
163*4882a593Smuzhiyun void (*ref_obj_release) (struct ttm_base_object *,
164*4882a593Smuzhiyun enum ttm_ref_type ref_type))
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun struct ttm_object_device *tdev = tfile->tdev;
167*4882a593Smuzhiyun int ret;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun base->shareable = shareable;
170*4882a593Smuzhiyun base->tfile = ttm_object_file_ref(tfile);
171*4882a593Smuzhiyun base->refcount_release = refcount_release;
172*4882a593Smuzhiyun base->ref_obj_release = ref_obj_release;
173*4882a593Smuzhiyun base->object_type = object_type;
174*4882a593Smuzhiyun kref_init(&base->refcount);
175*4882a593Smuzhiyun idr_preload(GFP_KERNEL);
176*4882a593Smuzhiyun spin_lock(&tdev->object_lock);
177*4882a593Smuzhiyun ret = idr_alloc(&tdev->idr, base, 1, 0, GFP_NOWAIT);
178*4882a593Smuzhiyun spin_unlock(&tdev->object_lock);
179*4882a593Smuzhiyun idr_preload_end();
180*4882a593Smuzhiyun if (ret < 0)
181*4882a593Smuzhiyun return ret;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun base->handle = ret;
184*4882a593Smuzhiyun ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
185*4882a593Smuzhiyun if (unlikely(ret != 0))
186*4882a593Smuzhiyun goto out_err1;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun ttm_base_object_unref(&base);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun return 0;
191*4882a593Smuzhiyun out_err1:
192*4882a593Smuzhiyun spin_lock(&tdev->object_lock);
193*4882a593Smuzhiyun idr_remove(&tdev->idr, base->handle);
194*4882a593Smuzhiyun spin_unlock(&tdev->object_lock);
195*4882a593Smuzhiyun return ret;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
ttm_release_base(struct kref * kref)198*4882a593Smuzhiyun static void ttm_release_base(struct kref *kref)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun struct ttm_base_object *base =
201*4882a593Smuzhiyun container_of(kref, struct ttm_base_object, refcount);
202*4882a593Smuzhiyun struct ttm_object_device *tdev = base->tfile->tdev;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun spin_lock(&tdev->object_lock);
205*4882a593Smuzhiyun idr_remove(&tdev->idr, base->handle);
206*4882a593Smuzhiyun spin_unlock(&tdev->object_lock);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /*
209*4882a593Smuzhiyun * Note: We don't use synchronize_rcu() here because it's far
210*4882a593Smuzhiyun * too slow. It's up to the user to free the object using
211*4882a593Smuzhiyun * call_rcu() or ttm_base_object_kfree().
212*4882a593Smuzhiyun */
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun ttm_object_file_unref(&base->tfile);
215*4882a593Smuzhiyun if (base->refcount_release)
216*4882a593Smuzhiyun base->refcount_release(&base);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
ttm_base_object_unref(struct ttm_base_object ** p_base)219*4882a593Smuzhiyun void ttm_base_object_unref(struct ttm_base_object **p_base)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun struct ttm_base_object *base = *p_base;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun *p_base = NULL;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun kref_put(&base->refcount, ttm_release_base);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /**
229*4882a593Smuzhiyun * ttm_base_object_noref_lookup - look up a base object without reference
230*4882a593Smuzhiyun * @tfile: The struct ttm_object_file the object is registered with.
231*4882a593Smuzhiyun * @key: The object handle.
232*4882a593Smuzhiyun *
233*4882a593Smuzhiyun * This function looks up a ttm base object and returns a pointer to it
234*4882a593Smuzhiyun * without refcounting the pointer. The returned pointer is only valid
235*4882a593Smuzhiyun * until ttm_base_object_noref_release() is called, and the object
236*4882a593Smuzhiyun * pointed to by the returned pointer may be doomed. Any persistent usage
237*4882a593Smuzhiyun * of the object requires a refcount to be taken using kref_get_unless_zero().
238*4882a593Smuzhiyun * Iff this function returns successfully it needs to be paired with
239*4882a593Smuzhiyun * ttm_base_object_noref_release() and no sleeping- or scheduling functions
240*4882a593Smuzhiyun * may be called inbetween these function callse.
241*4882a593Smuzhiyun *
242*4882a593Smuzhiyun * Return: A pointer to the object if successful or NULL otherwise.
243*4882a593Smuzhiyun */
244*4882a593Smuzhiyun struct ttm_base_object *
ttm_base_object_noref_lookup(struct ttm_object_file * tfile,uint32_t key)245*4882a593Smuzhiyun ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun struct drm_hash_item *hash;
248*4882a593Smuzhiyun struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
249*4882a593Smuzhiyun int ret;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun rcu_read_lock();
252*4882a593Smuzhiyun ret = drm_ht_find_item_rcu(ht, key, &hash);
253*4882a593Smuzhiyun if (ret) {
254*4882a593Smuzhiyun rcu_read_unlock();
255*4882a593Smuzhiyun return NULL;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun __release(RCU);
259*4882a593Smuzhiyun return drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun EXPORT_SYMBOL(ttm_base_object_noref_lookup);
262*4882a593Smuzhiyun
ttm_base_object_lookup(struct ttm_object_file * tfile,uint32_t key)263*4882a593Smuzhiyun struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
264*4882a593Smuzhiyun uint32_t key)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun struct ttm_base_object *base = NULL;
267*4882a593Smuzhiyun struct drm_hash_item *hash;
268*4882a593Smuzhiyun struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
269*4882a593Smuzhiyun int ret;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun rcu_read_lock();
272*4882a593Smuzhiyun ret = drm_ht_find_item_rcu(ht, key, &hash);
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun if (likely(ret == 0)) {
275*4882a593Smuzhiyun base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
276*4882a593Smuzhiyun if (!kref_get_unless_zero(&base->refcount))
277*4882a593Smuzhiyun base = NULL;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun rcu_read_unlock();
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun return base;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun struct ttm_base_object *
ttm_base_object_lookup_for_ref(struct ttm_object_device * tdev,uint32_t key)285*4882a593Smuzhiyun ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun struct ttm_base_object *base;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun rcu_read_lock();
290*4882a593Smuzhiyun base = idr_find(&tdev->idr, key);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun if (base && !kref_get_unless_zero(&base->refcount))
293*4882a593Smuzhiyun base = NULL;
294*4882a593Smuzhiyun rcu_read_unlock();
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun return base;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /**
300*4882a593Smuzhiyun * ttm_ref_object_exists - Check whether a caller has a valid ref object
301*4882a593Smuzhiyun * (has opened) a base object.
302*4882a593Smuzhiyun *
303*4882a593Smuzhiyun * @tfile: Pointer to a struct ttm_object_file identifying the caller.
304*4882a593Smuzhiyun * @base: Pointer to a struct base object.
305*4882a593Smuzhiyun *
306*4882a593Smuzhiyun * Checks wether the caller identified by @tfile has put a valid USAGE
307*4882a593Smuzhiyun * reference object on the base object identified by @base.
308*4882a593Smuzhiyun */
ttm_ref_object_exists(struct ttm_object_file * tfile,struct ttm_base_object * base)309*4882a593Smuzhiyun bool ttm_ref_object_exists(struct ttm_object_file *tfile,
310*4882a593Smuzhiyun struct ttm_base_object *base)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
313*4882a593Smuzhiyun struct drm_hash_item *hash;
314*4882a593Smuzhiyun struct ttm_ref_object *ref;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun rcu_read_lock();
317*4882a593Smuzhiyun if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0))
318*4882a593Smuzhiyun goto out_false;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /*
321*4882a593Smuzhiyun * Verify that the ref object is really pointing to our base object.
322*4882a593Smuzhiyun * Our base object could actually be dead, and the ref object pointing
323*4882a593Smuzhiyun * to another base object with the same handle.
324*4882a593Smuzhiyun */
325*4882a593Smuzhiyun ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
326*4882a593Smuzhiyun if (unlikely(base != ref->obj))
327*4882a593Smuzhiyun goto out_false;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /*
330*4882a593Smuzhiyun * Verify that the ref->obj pointer was actually valid!
331*4882a593Smuzhiyun */
332*4882a593Smuzhiyun rmb();
333*4882a593Smuzhiyun if (unlikely(kref_read(&ref->kref) == 0))
334*4882a593Smuzhiyun goto out_false;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun rcu_read_unlock();
337*4882a593Smuzhiyun return true;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun out_false:
340*4882a593Smuzhiyun rcu_read_unlock();
341*4882a593Smuzhiyun return false;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
ttm_ref_object_add(struct ttm_object_file * tfile,struct ttm_base_object * base,enum ttm_ref_type ref_type,bool * existed,bool require_existed)344*4882a593Smuzhiyun int ttm_ref_object_add(struct ttm_object_file *tfile,
345*4882a593Smuzhiyun struct ttm_base_object *base,
346*4882a593Smuzhiyun enum ttm_ref_type ref_type, bool *existed,
347*4882a593Smuzhiyun bool require_existed)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
350*4882a593Smuzhiyun struct ttm_ref_object *ref;
351*4882a593Smuzhiyun struct drm_hash_item *hash;
352*4882a593Smuzhiyun struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
353*4882a593Smuzhiyun struct ttm_operation_ctx ctx = {
354*4882a593Smuzhiyun .interruptible = false,
355*4882a593Smuzhiyun .no_wait_gpu = false
356*4882a593Smuzhiyun };
357*4882a593Smuzhiyun int ret = -EINVAL;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun if (base->tfile != tfile && !base->shareable)
360*4882a593Smuzhiyun return -EPERM;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (existed != NULL)
363*4882a593Smuzhiyun *existed = true;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun while (ret == -EINVAL) {
366*4882a593Smuzhiyun rcu_read_lock();
367*4882a593Smuzhiyun ret = drm_ht_find_item_rcu(ht, base->handle, &hash);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun if (ret == 0) {
370*4882a593Smuzhiyun ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
371*4882a593Smuzhiyun if (kref_get_unless_zero(&ref->kref)) {
372*4882a593Smuzhiyun rcu_read_unlock();
373*4882a593Smuzhiyun break;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun rcu_read_unlock();
378*4882a593Smuzhiyun if (require_existed)
379*4882a593Smuzhiyun return -EPERM;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
382*4882a593Smuzhiyun &ctx);
383*4882a593Smuzhiyun if (unlikely(ret != 0))
384*4882a593Smuzhiyun return ret;
385*4882a593Smuzhiyun ref = kmalloc(sizeof(*ref), GFP_KERNEL);
386*4882a593Smuzhiyun if (unlikely(ref == NULL)) {
387*4882a593Smuzhiyun ttm_mem_global_free(mem_glob, sizeof(*ref));
388*4882a593Smuzhiyun return -ENOMEM;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun ref->hash.key = base->handle;
392*4882a593Smuzhiyun ref->obj = base;
393*4882a593Smuzhiyun ref->tfile = tfile;
394*4882a593Smuzhiyun ref->ref_type = ref_type;
395*4882a593Smuzhiyun kref_init(&ref->kref);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun spin_lock(&tfile->lock);
398*4882a593Smuzhiyun ret = drm_ht_insert_item_rcu(ht, &ref->hash);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun if (likely(ret == 0)) {
401*4882a593Smuzhiyun list_add_tail(&ref->head, &tfile->ref_list);
402*4882a593Smuzhiyun kref_get(&base->refcount);
403*4882a593Smuzhiyun spin_unlock(&tfile->lock);
404*4882a593Smuzhiyun if (existed != NULL)
405*4882a593Smuzhiyun *existed = false;
406*4882a593Smuzhiyun break;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun spin_unlock(&tfile->lock);
410*4882a593Smuzhiyun BUG_ON(ret != -EINVAL);
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun ttm_mem_global_free(mem_glob, sizeof(*ref));
413*4882a593Smuzhiyun kfree(ref);
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun return ret;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun static void __releases(tfile->lock) __acquires(tfile->lock)
ttm_ref_object_release(struct kref * kref)420*4882a593Smuzhiyun ttm_ref_object_release(struct kref *kref)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun struct ttm_ref_object *ref =
423*4882a593Smuzhiyun container_of(kref, struct ttm_ref_object, kref);
424*4882a593Smuzhiyun struct ttm_base_object *base = ref->obj;
425*4882a593Smuzhiyun struct ttm_object_file *tfile = ref->tfile;
426*4882a593Smuzhiyun struct drm_open_hash *ht;
427*4882a593Smuzhiyun struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun ht = &tfile->ref_hash[ref->ref_type];
430*4882a593Smuzhiyun (void)drm_ht_remove_item_rcu(ht, &ref->hash);
431*4882a593Smuzhiyun list_del(&ref->head);
432*4882a593Smuzhiyun spin_unlock(&tfile->lock);
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
435*4882a593Smuzhiyun base->ref_obj_release(base, ref->ref_type);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun ttm_base_object_unref(&ref->obj);
438*4882a593Smuzhiyun ttm_mem_global_free(mem_glob, sizeof(*ref));
439*4882a593Smuzhiyun kfree_rcu(ref, rcu_head);
440*4882a593Smuzhiyun spin_lock(&tfile->lock);
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
ttm_ref_object_base_unref(struct ttm_object_file * tfile,unsigned long key,enum ttm_ref_type ref_type)443*4882a593Smuzhiyun int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
444*4882a593Smuzhiyun unsigned long key, enum ttm_ref_type ref_type)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
447*4882a593Smuzhiyun struct ttm_ref_object *ref;
448*4882a593Smuzhiyun struct drm_hash_item *hash;
449*4882a593Smuzhiyun int ret;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun spin_lock(&tfile->lock);
452*4882a593Smuzhiyun ret = drm_ht_find_item(ht, key, &hash);
453*4882a593Smuzhiyun if (unlikely(ret != 0)) {
454*4882a593Smuzhiyun spin_unlock(&tfile->lock);
455*4882a593Smuzhiyun return -EINVAL;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
458*4882a593Smuzhiyun kref_put(&ref->kref, ttm_ref_object_release);
459*4882a593Smuzhiyun spin_unlock(&tfile->lock);
460*4882a593Smuzhiyun return 0;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
ttm_object_file_release(struct ttm_object_file ** p_tfile)463*4882a593Smuzhiyun void ttm_object_file_release(struct ttm_object_file **p_tfile)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun struct ttm_ref_object *ref;
466*4882a593Smuzhiyun struct list_head *list;
467*4882a593Smuzhiyun unsigned int i;
468*4882a593Smuzhiyun struct ttm_object_file *tfile = *p_tfile;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun *p_tfile = NULL;
471*4882a593Smuzhiyun spin_lock(&tfile->lock);
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun /*
474*4882a593Smuzhiyun * Since we release the lock within the loop, we have to
475*4882a593Smuzhiyun * restart it from the beginning each time.
476*4882a593Smuzhiyun */
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun while (!list_empty(&tfile->ref_list)) {
479*4882a593Smuzhiyun list = tfile->ref_list.next;
480*4882a593Smuzhiyun ref = list_entry(list, struct ttm_ref_object, head);
481*4882a593Smuzhiyun ttm_ref_object_release(&ref->kref);
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun spin_unlock(&tfile->lock);
485*4882a593Smuzhiyun for (i = 0; i < TTM_REF_NUM; ++i)
486*4882a593Smuzhiyun drm_ht_remove(&tfile->ref_hash[i]);
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun ttm_object_file_unref(&tfile);
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
ttm_object_file_init(struct ttm_object_device * tdev,unsigned int hash_order)491*4882a593Smuzhiyun struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
492*4882a593Smuzhiyun unsigned int hash_order)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
495*4882a593Smuzhiyun unsigned int i;
496*4882a593Smuzhiyun unsigned int j = 0;
497*4882a593Smuzhiyun int ret;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun if (unlikely(tfile == NULL))
500*4882a593Smuzhiyun return NULL;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun spin_lock_init(&tfile->lock);
503*4882a593Smuzhiyun tfile->tdev = tdev;
504*4882a593Smuzhiyun kref_init(&tfile->refcount);
505*4882a593Smuzhiyun INIT_LIST_HEAD(&tfile->ref_list);
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun for (i = 0; i < TTM_REF_NUM; ++i) {
508*4882a593Smuzhiyun ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
509*4882a593Smuzhiyun if (ret) {
510*4882a593Smuzhiyun j = i;
511*4882a593Smuzhiyun goto out_err;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun return tfile;
516*4882a593Smuzhiyun out_err:
517*4882a593Smuzhiyun for (i = 0; i < j; ++i)
518*4882a593Smuzhiyun drm_ht_remove(&tfile->ref_hash[i]);
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun kfree(tfile);
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun return NULL;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun struct ttm_object_device *
ttm_object_device_init(struct ttm_mem_global * mem_glob,unsigned int hash_order,const struct dma_buf_ops * ops)526*4882a593Smuzhiyun ttm_object_device_init(struct ttm_mem_global *mem_glob,
527*4882a593Smuzhiyun unsigned int hash_order,
528*4882a593Smuzhiyun const struct dma_buf_ops *ops)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
531*4882a593Smuzhiyun int ret;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun if (unlikely(tdev == NULL))
534*4882a593Smuzhiyun return NULL;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun tdev->mem_glob = mem_glob;
537*4882a593Smuzhiyun spin_lock_init(&tdev->object_lock);
538*4882a593Smuzhiyun atomic_set(&tdev->object_count, 0);
539*4882a593Smuzhiyun ret = drm_ht_create(&tdev->object_hash, hash_order);
540*4882a593Smuzhiyun if (ret != 0)
541*4882a593Smuzhiyun goto out_no_object_hash;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun idr_init(&tdev->idr);
544*4882a593Smuzhiyun tdev->ops = *ops;
545*4882a593Smuzhiyun tdev->dmabuf_release = tdev->ops.release;
546*4882a593Smuzhiyun tdev->ops.release = ttm_prime_dmabuf_release;
547*4882a593Smuzhiyun tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
548*4882a593Smuzhiyun ttm_round_pot(sizeof(struct file));
549*4882a593Smuzhiyun return tdev;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun out_no_object_hash:
552*4882a593Smuzhiyun kfree(tdev);
553*4882a593Smuzhiyun return NULL;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
ttm_object_device_release(struct ttm_object_device ** p_tdev)556*4882a593Smuzhiyun void ttm_object_device_release(struct ttm_object_device **p_tdev)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun struct ttm_object_device *tdev = *p_tdev;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun *p_tdev = NULL;
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
563*4882a593Smuzhiyun idr_destroy(&tdev->idr);
564*4882a593Smuzhiyun drm_ht_remove(&tdev->object_hash);
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun kfree(tdev);
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun /**
570*4882a593Smuzhiyun * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
571*4882a593Smuzhiyun *
572*4882a593Smuzhiyun * @dma_buf: Non-refcounted pointer to a struct dma-buf.
573*4882a593Smuzhiyun *
574*4882a593Smuzhiyun * Obtain a file reference from a lookup structure that doesn't refcount
575*4882a593Smuzhiyun * the file, but synchronizes with its release method to make sure it has
576*4882a593Smuzhiyun * not been freed yet. See for example kref_get_unless_zero documentation.
577*4882a593Smuzhiyun * Returns true if refcounting succeeds, false otherwise.
578*4882a593Smuzhiyun *
579*4882a593Smuzhiyun * Nobody really wants this as a public API yet, so let it mature here
580*4882a593Smuzhiyun * for some time...
581*4882a593Smuzhiyun */
get_dma_buf_unless_doomed(struct dma_buf * dmabuf)582*4882a593Smuzhiyun static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun /**
588*4882a593Smuzhiyun * ttm_prime_refcount_release - refcount release method for a prime object.
589*4882a593Smuzhiyun *
590*4882a593Smuzhiyun * @p_base: Pointer to ttm_base_object pointer.
591*4882a593Smuzhiyun *
592*4882a593Smuzhiyun * This is a wrapper that calls the refcount_release founction of the
593*4882a593Smuzhiyun * underlying object. At the same time it cleans up the prime object.
594*4882a593Smuzhiyun * This function is called when all references to the base object we
595*4882a593Smuzhiyun * derive from are gone.
596*4882a593Smuzhiyun */
ttm_prime_refcount_release(struct ttm_base_object ** p_base)597*4882a593Smuzhiyun static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun struct ttm_base_object *base = *p_base;
600*4882a593Smuzhiyun struct ttm_prime_object *prime;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun *p_base = NULL;
603*4882a593Smuzhiyun prime = container_of(base, struct ttm_prime_object, base);
604*4882a593Smuzhiyun BUG_ON(prime->dma_buf != NULL);
605*4882a593Smuzhiyun mutex_destroy(&prime->mutex);
606*4882a593Smuzhiyun if (prime->refcount_release)
607*4882a593Smuzhiyun prime->refcount_release(&base);
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun /**
611*4882a593Smuzhiyun * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
612*4882a593Smuzhiyun *
613*4882a593Smuzhiyun * @dma_buf:
614*4882a593Smuzhiyun *
615*4882a593Smuzhiyun * This function first calls the dma_buf release method the driver
616*4882a593Smuzhiyun * provides. Then it cleans up our dma_buf pointer used for lookup,
617*4882a593Smuzhiyun * and finally releases the reference the dma_buf has on our base
618*4882a593Smuzhiyun * object.
619*4882a593Smuzhiyun */
ttm_prime_dmabuf_release(struct dma_buf * dma_buf)620*4882a593Smuzhiyun static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun struct ttm_prime_object *prime =
623*4882a593Smuzhiyun (struct ttm_prime_object *) dma_buf->priv;
624*4882a593Smuzhiyun struct ttm_base_object *base = &prime->base;
625*4882a593Smuzhiyun struct ttm_object_device *tdev = base->tfile->tdev;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun if (tdev->dmabuf_release)
628*4882a593Smuzhiyun tdev->dmabuf_release(dma_buf);
629*4882a593Smuzhiyun mutex_lock(&prime->mutex);
630*4882a593Smuzhiyun if (prime->dma_buf == dma_buf)
631*4882a593Smuzhiyun prime->dma_buf = NULL;
632*4882a593Smuzhiyun mutex_unlock(&prime->mutex);
633*4882a593Smuzhiyun ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
634*4882a593Smuzhiyun ttm_base_object_unref(&base);
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun /**
638*4882a593Smuzhiyun * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
639*4882a593Smuzhiyun *
640*4882a593Smuzhiyun * @tfile: A struct ttm_object_file identifying the caller.
641*4882a593Smuzhiyun * @fd: The prime / dmabuf fd.
642*4882a593Smuzhiyun * @handle: The returned handle.
643*4882a593Smuzhiyun *
644*4882a593Smuzhiyun * This function returns a handle to an object that previously exported
645*4882a593Smuzhiyun * a dma-buf. Note that we don't handle imports yet, because we simply
646*4882a593Smuzhiyun * have no consumers of that implementation.
647*4882a593Smuzhiyun */
ttm_prime_fd_to_handle(struct ttm_object_file * tfile,int fd,u32 * handle)648*4882a593Smuzhiyun int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
649*4882a593Smuzhiyun int fd, u32 *handle)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun struct ttm_object_device *tdev = tfile->tdev;
652*4882a593Smuzhiyun struct dma_buf *dma_buf;
653*4882a593Smuzhiyun struct ttm_prime_object *prime;
654*4882a593Smuzhiyun struct ttm_base_object *base;
655*4882a593Smuzhiyun int ret;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun dma_buf = dma_buf_get(fd);
658*4882a593Smuzhiyun if (IS_ERR(dma_buf))
659*4882a593Smuzhiyun return PTR_ERR(dma_buf);
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun if (dma_buf->ops != &tdev->ops)
662*4882a593Smuzhiyun return -ENOSYS;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun prime = (struct ttm_prime_object *) dma_buf->priv;
665*4882a593Smuzhiyun base = &prime->base;
666*4882a593Smuzhiyun *handle = base->handle;
667*4882a593Smuzhiyun ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun dma_buf_put(dma_buf);
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun return ret;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun /**
675*4882a593Smuzhiyun * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
676*4882a593Smuzhiyun *
677*4882a593Smuzhiyun * @tfile: Struct ttm_object_file identifying the caller.
678*4882a593Smuzhiyun * @handle: Handle to the object we're exporting from.
679*4882a593Smuzhiyun * @flags: flags for dma-buf creation. We just pass them on.
680*4882a593Smuzhiyun * @prime_fd: The returned file descriptor.
681*4882a593Smuzhiyun *
682*4882a593Smuzhiyun */
ttm_prime_handle_to_fd(struct ttm_object_file * tfile,uint32_t handle,uint32_t flags,int * prime_fd)683*4882a593Smuzhiyun int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
684*4882a593Smuzhiyun uint32_t handle, uint32_t flags,
685*4882a593Smuzhiyun int *prime_fd)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun struct ttm_object_device *tdev = tfile->tdev;
688*4882a593Smuzhiyun struct ttm_base_object *base;
689*4882a593Smuzhiyun struct dma_buf *dma_buf;
690*4882a593Smuzhiyun struct ttm_prime_object *prime;
691*4882a593Smuzhiyun int ret;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun base = ttm_base_object_lookup(tfile, handle);
694*4882a593Smuzhiyun if (unlikely(base == NULL ||
695*4882a593Smuzhiyun base->object_type != ttm_prime_type)) {
696*4882a593Smuzhiyun ret = -ENOENT;
697*4882a593Smuzhiyun goto out_unref;
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun prime = container_of(base, struct ttm_prime_object, base);
701*4882a593Smuzhiyun if (unlikely(!base->shareable)) {
702*4882a593Smuzhiyun ret = -EPERM;
703*4882a593Smuzhiyun goto out_unref;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun ret = mutex_lock_interruptible(&prime->mutex);
707*4882a593Smuzhiyun if (unlikely(ret != 0)) {
708*4882a593Smuzhiyun ret = -ERESTARTSYS;
709*4882a593Smuzhiyun goto out_unref;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun dma_buf = prime->dma_buf;
713*4882a593Smuzhiyun if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
714*4882a593Smuzhiyun DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
715*4882a593Smuzhiyun struct ttm_operation_ctx ctx = {
716*4882a593Smuzhiyun .interruptible = true,
717*4882a593Smuzhiyun .no_wait_gpu = false
718*4882a593Smuzhiyun };
719*4882a593Smuzhiyun exp_info.ops = &tdev->ops;
720*4882a593Smuzhiyun exp_info.size = prime->size;
721*4882a593Smuzhiyun exp_info.flags = flags;
722*4882a593Smuzhiyun exp_info.priv = prime;
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun /*
725*4882a593Smuzhiyun * Need to create a new dma_buf, with memory accounting.
726*4882a593Smuzhiyun */
727*4882a593Smuzhiyun ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
728*4882a593Smuzhiyun &ctx);
729*4882a593Smuzhiyun if (unlikely(ret != 0)) {
730*4882a593Smuzhiyun mutex_unlock(&prime->mutex);
731*4882a593Smuzhiyun goto out_unref;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun dma_buf = dma_buf_export(&exp_info);
735*4882a593Smuzhiyun if (IS_ERR(dma_buf)) {
736*4882a593Smuzhiyun ret = PTR_ERR(dma_buf);
737*4882a593Smuzhiyun ttm_mem_global_free(tdev->mem_glob,
738*4882a593Smuzhiyun tdev->dma_buf_size);
739*4882a593Smuzhiyun mutex_unlock(&prime->mutex);
740*4882a593Smuzhiyun goto out_unref;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun /*
744*4882a593Smuzhiyun * dma_buf has taken the base object reference
745*4882a593Smuzhiyun */
746*4882a593Smuzhiyun base = NULL;
747*4882a593Smuzhiyun prime->dma_buf = dma_buf;
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun mutex_unlock(&prime->mutex);
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun ret = dma_buf_fd(dma_buf, flags);
752*4882a593Smuzhiyun if (ret >= 0) {
753*4882a593Smuzhiyun *prime_fd = ret;
754*4882a593Smuzhiyun ret = 0;
755*4882a593Smuzhiyun } else
756*4882a593Smuzhiyun dma_buf_put(dma_buf);
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun out_unref:
759*4882a593Smuzhiyun if (base)
760*4882a593Smuzhiyun ttm_base_object_unref(&base);
761*4882a593Smuzhiyun return ret;
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun /**
765*4882a593Smuzhiyun * ttm_prime_object_init - Initialize a ttm_prime_object
766*4882a593Smuzhiyun *
767*4882a593Smuzhiyun * @tfile: struct ttm_object_file identifying the caller
768*4882a593Smuzhiyun * @size: The size of the dma_bufs we export.
769*4882a593Smuzhiyun * @prime: The object to be initialized.
770*4882a593Smuzhiyun * @shareable: See ttm_base_object_init
771*4882a593Smuzhiyun * @type: See ttm_base_object_init
772*4882a593Smuzhiyun * @refcount_release: See ttm_base_object_init
773*4882a593Smuzhiyun * @ref_obj_release: See ttm_base_object_init
774*4882a593Smuzhiyun *
775*4882a593Smuzhiyun * Initializes an object which is compatible with the drm_prime model
776*4882a593Smuzhiyun * for data sharing between processes and devices.
777*4882a593Smuzhiyun */
ttm_prime_object_init(struct ttm_object_file * tfile,size_t size,struct ttm_prime_object * prime,bool shareable,enum ttm_object_type type,void (* refcount_release)(struct ttm_base_object **),void (* ref_obj_release)(struct ttm_base_object *,enum ttm_ref_type ref_type))778*4882a593Smuzhiyun int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
779*4882a593Smuzhiyun struct ttm_prime_object *prime, bool shareable,
780*4882a593Smuzhiyun enum ttm_object_type type,
781*4882a593Smuzhiyun void (*refcount_release) (struct ttm_base_object **),
782*4882a593Smuzhiyun void (*ref_obj_release) (struct ttm_base_object *,
783*4882a593Smuzhiyun enum ttm_ref_type ref_type))
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun mutex_init(&prime->mutex);
786*4882a593Smuzhiyun prime->size = PAGE_ALIGN(size);
787*4882a593Smuzhiyun prime->real_type = type;
788*4882a593Smuzhiyun prime->dma_buf = NULL;
789*4882a593Smuzhiyun prime->refcount_release = refcount_release;
790*4882a593Smuzhiyun return ttm_base_object_init(tfile, &prime->base, shareable,
791*4882a593Smuzhiyun ttm_prime_type,
792*4882a593Smuzhiyun ttm_prime_refcount_release,
793*4882a593Smuzhiyun ref_obj_release);
794*4882a593Smuzhiyun }
795