1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright © 2015 Broadcom
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun /**
7*4882a593Smuzhiyun * DOC: VC4 GEM BO management support
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * The VC4 GPU architecture (both scanout and rendering) has direct
10*4882a593Smuzhiyun * access to system memory with no MMU in between. To support it, we
11*4882a593Smuzhiyun * use the GEM CMA helper functions to allocate contiguous ranges of
12*4882a593Smuzhiyun * physical memory for our BOs.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * Since the CMA allocator is very slow, we keep a cache of recently
15*4882a593Smuzhiyun * freed BOs around so that the kernel's allocation of objects for 3D
16*4882a593Smuzhiyun * rendering can return quickly.
17*4882a593Smuzhiyun */
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <linux/dma-buf.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include "vc4_drv.h"
22*4882a593Smuzhiyun #include "uapi/drm/vc4_drm.h"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun static const char * const bo_type_names[] = {
25*4882a593Smuzhiyun "kernel",
26*4882a593Smuzhiyun "V3D",
27*4882a593Smuzhiyun "V3D shader",
28*4882a593Smuzhiyun "dumb",
29*4882a593Smuzhiyun "binner",
30*4882a593Smuzhiyun "RCL",
31*4882a593Smuzhiyun "BCL",
32*4882a593Smuzhiyun "kernel BO cache",
33*4882a593Smuzhiyun };
34*4882a593Smuzhiyun
is_user_label(int label)35*4882a593Smuzhiyun static bool is_user_label(int label)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun return label >= VC4_BO_TYPE_COUNT;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun
vc4_bo_stats_print(struct drm_printer * p,struct vc4_dev * vc4)40*4882a593Smuzhiyun static void vc4_bo_stats_print(struct drm_printer *p, struct vc4_dev *vc4)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun int i;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun for (i = 0; i < vc4->num_labels; i++) {
45*4882a593Smuzhiyun if (!vc4->bo_labels[i].num_allocated)
46*4882a593Smuzhiyun continue;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun drm_printf(p, "%30s: %6dkb BOs (%d)\n",
49*4882a593Smuzhiyun vc4->bo_labels[i].name,
50*4882a593Smuzhiyun vc4->bo_labels[i].size_allocated / 1024,
51*4882a593Smuzhiyun vc4->bo_labels[i].num_allocated);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun mutex_lock(&vc4->purgeable.lock);
55*4882a593Smuzhiyun if (vc4->purgeable.num)
56*4882a593Smuzhiyun drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
57*4882a593Smuzhiyun vc4->purgeable.size / 1024, vc4->purgeable.num);
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun if (vc4->purgeable.purged_num)
60*4882a593Smuzhiyun drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
61*4882a593Smuzhiyun vc4->purgeable.purged_size / 1024,
62*4882a593Smuzhiyun vc4->purgeable.purged_num);
63*4882a593Smuzhiyun mutex_unlock(&vc4->purgeable.lock);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
vc4_bo_stats_debugfs(struct seq_file * m,void * unused)66*4882a593Smuzhiyun static int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun struct drm_info_node *node = (struct drm_info_node *)m->private;
69*4882a593Smuzhiyun struct drm_device *dev = node->minor->dev;
70*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(dev);
71*4882a593Smuzhiyun struct drm_printer p = drm_seq_file_printer(m);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun vc4_bo_stats_print(&p, vc4);
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun return 0;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* Takes ownership of *name and returns the appropriate slot for it in
79*4882a593Smuzhiyun * the bo_labels[] array, extending it as necessary.
80*4882a593Smuzhiyun *
81*4882a593Smuzhiyun * This is inefficient and could use a hash table instead of walking
82*4882a593Smuzhiyun * an array and strcmp()ing. However, the assumption is that user
83*4882a593Smuzhiyun * labeling will be infrequent (scanout buffers and other long-lived
84*4882a593Smuzhiyun * objects, or debug driver builds), so we can live with it for now.
85*4882a593Smuzhiyun */
vc4_get_user_label(struct vc4_dev * vc4,const char * name)86*4882a593Smuzhiyun static int vc4_get_user_label(struct vc4_dev *vc4, const char *name)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun int i;
89*4882a593Smuzhiyun int free_slot = -1;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun for (i = 0; i < vc4->num_labels; i++) {
92*4882a593Smuzhiyun if (!vc4->bo_labels[i].name) {
93*4882a593Smuzhiyun free_slot = i;
94*4882a593Smuzhiyun } else if (strcmp(vc4->bo_labels[i].name, name) == 0) {
95*4882a593Smuzhiyun kfree(name);
96*4882a593Smuzhiyun return i;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun if (free_slot != -1) {
101*4882a593Smuzhiyun WARN_ON(vc4->bo_labels[free_slot].num_allocated != 0);
102*4882a593Smuzhiyun vc4->bo_labels[free_slot].name = name;
103*4882a593Smuzhiyun return free_slot;
104*4882a593Smuzhiyun } else {
105*4882a593Smuzhiyun u32 new_label_count = vc4->num_labels + 1;
106*4882a593Smuzhiyun struct vc4_label *new_labels =
107*4882a593Smuzhiyun krealloc(vc4->bo_labels,
108*4882a593Smuzhiyun new_label_count * sizeof(*new_labels),
109*4882a593Smuzhiyun GFP_KERNEL);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun if (!new_labels) {
112*4882a593Smuzhiyun kfree(name);
113*4882a593Smuzhiyun return -1;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun free_slot = vc4->num_labels;
117*4882a593Smuzhiyun vc4->bo_labels = new_labels;
118*4882a593Smuzhiyun vc4->num_labels = new_label_count;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun vc4->bo_labels[free_slot].name = name;
121*4882a593Smuzhiyun vc4->bo_labels[free_slot].num_allocated = 0;
122*4882a593Smuzhiyun vc4->bo_labels[free_slot].size_allocated = 0;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun return free_slot;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
vc4_bo_set_label(struct drm_gem_object * gem_obj,int label)128*4882a593Smuzhiyun static void vc4_bo_set_label(struct drm_gem_object *gem_obj, int label)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun struct vc4_bo *bo = to_vc4_bo(gem_obj);
131*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(gem_obj->dev);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun lockdep_assert_held(&vc4->bo_lock);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if (label != -1) {
136*4882a593Smuzhiyun vc4->bo_labels[label].num_allocated++;
137*4882a593Smuzhiyun vc4->bo_labels[label].size_allocated += gem_obj->size;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun vc4->bo_labels[bo->label].num_allocated--;
141*4882a593Smuzhiyun vc4->bo_labels[bo->label].size_allocated -= gem_obj->size;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun if (vc4->bo_labels[bo->label].num_allocated == 0 &&
144*4882a593Smuzhiyun is_user_label(bo->label)) {
145*4882a593Smuzhiyun /* Free user BO label slots on last unreference.
146*4882a593Smuzhiyun * Slots are just where we track the stats for a given
147*4882a593Smuzhiyun * name, and once a name is unused we can reuse that
148*4882a593Smuzhiyun * slot.
149*4882a593Smuzhiyun */
150*4882a593Smuzhiyun kfree(vc4->bo_labels[bo->label].name);
151*4882a593Smuzhiyun vc4->bo_labels[bo->label].name = NULL;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun bo->label = label;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
bo_page_index(size_t size)157*4882a593Smuzhiyun static uint32_t bo_page_index(size_t size)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun return (size / PAGE_SIZE) - 1;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
vc4_bo_destroy(struct vc4_bo * bo)162*4882a593Smuzhiyun static void vc4_bo_destroy(struct vc4_bo *bo)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun struct drm_gem_object *obj = &bo->base.base;
165*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun lockdep_assert_held(&vc4->bo_lock);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun vc4_bo_set_label(obj, -1);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun if (bo->validated_shader) {
172*4882a593Smuzhiyun kfree(bo->validated_shader->uniform_addr_offsets);
173*4882a593Smuzhiyun kfree(bo->validated_shader->texture_samples);
174*4882a593Smuzhiyun kfree(bo->validated_shader);
175*4882a593Smuzhiyun bo->validated_shader = NULL;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun drm_gem_cma_free_object(obj);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
vc4_bo_remove_from_cache(struct vc4_bo * bo)181*4882a593Smuzhiyun static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun lockdep_assert_held(&vc4->bo_lock);
186*4882a593Smuzhiyun list_del(&bo->unref_head);
187*4882a593Smuzhiyun list_del(&bo->size_head);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
vc4_get_cache_list_for_size(struct drm_device * dev,size_t size)190*4882a593Smuzhiyun static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
191*4882a593Smuzhiyun size_t size)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(dev);
194*4882a593Smuzhiyun uint32_t page_index = bo_page_index(size);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun if (vc4->bo_cache.size_list_size <= page_index) {
197*4882a593Smuzhiyun uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
198*4882a593Smuzhiyun page_index + 1);
199*4882a593Smuzhiyun struct list_head *new_list;
200*4882a593Smuzhiyun uint32_t i;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun new_list = kmalloc_array(new_size, sizeof(struct list_head),
203*4882a593Smuzhiyun GFP_KERNEL);
204*4882a593Smuzhiyun if (!new_list)
205*4882a593Smuzhiyun return NULL;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* Rebase the old cached BO lists to their new list
208*4882a593Smuzhiyun * head locations.
209*4882a593Smuzhiyun */
210*4882a593Smuzhiyun for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
211*4882a593Smuzhiyun struct list_head *old_list =
212*4882a593Smuzhiyun &vc4->bo_cache.size_list[i];
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun if (list_empty(old_list))
215*4882a593Smuzhiyun INIT_LIST_HEAD(&new_list[i]);
216*4882a593Smuzhiyun else
217*4882a593Smuzhiyun list_replace(old_list, &new_list[i]);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun /* And initialize the brand new BO list heads. */
220*4882a593Smuzhiyun for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
221*4882a593Smuzhiyun INIT_LIST_HEAD(&new_list[i]);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun kfree(vc4->bo_cache.size_list);
224*4882a593Smuzhiyun vc4->bo_cache.size_list = new_list;
225*4882a593Smuzhiyun vc4->bo_cache.size_list_size = new_size;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun return &vc4->bo_cache.size_list[page_index];
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
vc4_bo_cache_purge(struct drm_device * dev)231*4882a593Smuzhiyun static void vc4_bo_cache_purge(struct drm_device *dev)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(dev);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun mutex_lock(&vc4->bo_lock);
236*4882a593Smuzhiyun while (!list_empty(&vc4->bo_cache.time_list)) {
237*4882a593Smuzhiyun struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
238*4882a593Smuzhiyun struct vc4_bo, unref_head);
239*4882a593Smuzhiyun vc4_bo_remove_from_cache(bo);
240*4882a593Smuzhiyun vc4_bo_destroy(bo);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun mutex_unlock(&vc4->bo_lock);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
vc4_bo_add_to_purgeable_pool(struct vc4_bo * bo)245*4882a593Smuzhiyun void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun mutex_lock(&vc4->purgeable.lock);
250*4882a593Smuzhiyun list_add_tail(&bo->size_head, &vc4->purgeable.list);
251*4882a593Smuzhiyun vc4->purgeable.num++;
252*4882a593Smuzhiyun vc4->purgeable.size += bo->base.base.size;
253*4882a593Smuzhiyun mutex_unlock(&vc4->purgeable.lock);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo * bo)256*4882a593Smuzhiyun static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /* list_del_init() is used here because the caller might release
261*4882a593Smuzhiyun * the purgeable lock in order to acquire the madv one and update the
262*4882a593Smuzhiyun * madv status.
263*4882a593Smuzhiyun * During this short period of time a user might decide to mark
264*4882a593Smuzhiyun * the BO as unpurgeable, and if bo->madv is set to
265*4882a593Smuzhiyun * VC4_MADV_DONTNEED it will try to remove the BO from the
266*4882a593Smuzhiyun * purgeable list which will fail if the ->next/prev fields
267*4882a593Smuzhiyun * are set to LIST_POISON1/LIST_POISON2 (which is what
268*4882a593Smuzhiyun * list_del() does).
269*4882a593Smuzhiyun * Re-initializing the list element guarantees that list_del()
270*4882a593Smuzhiyun * will work correctly even if it's a NOP.
271*4882a593Smuzhiyun */
272*4882a593Smuzhiyun list_del_init(&bo->size_head);
273*4882a593Smuzhiyun vc4->purgeable.num--;
274*4882a593Smuzhiyun vc4->purgeable.size -= bo->base.base.size;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
vc4_bo_remove_from_purgeable_pool(struct vc4_bo * bo)277*4882a593Smuzhiyun void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun mutex_lock(&vc4->purgeable.lock);
282*4882a593Smuzhiyun vc4_bo_remove_from_purgeable_pool_locked(bo);
283*4882a593Smuzhiyun mutex_unlock(&vc4->purgeable.lock);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
vc4_bo_purge(struct drm_gem_object * obj)286*4882a593Smuzhiyun static void vc4_bo_purge(struct drm_gem_object *obj)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun struct vc4_bo *bo = to_vc4_bo(obj);
289*4882a593Smuzhiyun struct drm_device *dev = obj->dev;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun WARN_ON(!mutex_is_locked(&bo->madv_lock));
292*4882a593Smuzhiyun WARN_ON(bo->madv != VC4_MADV_DONTNEED);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr);
297*4882a593Smuzhiyun bo->base.vaddr = NULL;
298*4882a593Smuzhiyun bo->madv = __VC4_MADV_PURGED;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
vc4_bo_userspace_cache_purge(struct drm_device * dev)301*4882a593Smuzhiyun static void vc4_bo_userspace_cache_purge(struct drm_device *dev)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(dev);
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun mutex_lock(&vc4->purgeable.lock);
306*4882a593Smuzhiyun while (!list_empty(&vc4->purgeable.list)) {
307*4882a593Smuzhiyun struct vc4_bo *bo = list_first_entry(&vc4->purgeable.list,
308*4882a593Smuzhiyun struct vc4_bo, size_head);
309*4882a593Smuzhiyun struct drm_gem_object *obj = &bo->base.base;
310*4882a593Smuzhiyun size_t purged_size = 0;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun vc4_bo_remove_from_purgeable_pool_locked(bo);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /* Release the purgeable lock while we're purging the BO so
315*4882a593Smuzhiyun * that other people can continue inserting things in the
316*4882a593Smuzhiyun * purgeable pool without having to wait for all BOs to be
317*4882a593Smuzhiyun * purged.
318*4882a593Smuzhiyun */
319*4882a593Smuzhiyun mutex_unlock(&vc4->purgeable.lock);
320*4882a593Smuzhiyun mutex_lock(&bo->madv_lock);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /* Since we released the purgeable pool lock before acquiring
323*4882a593Smuzhiyun * the BO madv one, the user may have marked the BO as WILLNEED
324*4882a593Smuzhiyun * and re-used it in the meantime.
325*4882a593Smuzhiyun * Before purging the BO we need to make sure
326*4882a593Smuzhiyun * - it is still marked as DONTNEED
327*4882a593Smuzhiyun * - it has not been re-inserted in the purgeable list
328*4882a593Smuzhiyun * - it is not used by HW blocks
329*4882a593Smuzhiyun * If one of these conditions is not met, just skip the entry.
330*4882a593Smuzhiyun */
331*4882a593Smuzhiyun if (bo->madv == VC4_MADV_DONTNEED &&
332*4882a593Smuzhiyun list_empty(&bo->size_head) &&
333*4882a593Smuzhiyun !refcount_read(&bo->usecnt)) {
334*4882a593Smuzhiyun purged_size = bo->base.base.size;
335*4882a593Smuzhiyun vc4_bo_purge(obj);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun mutex_unlock(&bo->madv_lock);
338*4882a593Smuzhiyun mutex_lock(&vc4->purgeable.lock);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun if (purged_size) {
341*4882a593Smuzhiyun vc4->purgeable.purged_size += purged_size;
342*4882a593Smuzhiyun vc4->purgeable.purged_num++;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun mutex_unlock(&vc4->purgeable.lock);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
vc4_bo_get_from_cache(struct drm_device * dev,uint32_t size,enum vc4_kernel_bo_type type)348*4882a593Smuzhiyun static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
349*4882a593Smuzhiyun uint32_t size,
350*4882a593Smuzhiyun enum vc4_kernel_bo_type type)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(dev);
353*4882a593Smuzhiyun uint32_t page_index = bo_page_index(size);
354*4882a593Smuzhiyun struct vc4_bo *bo = NULL;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun size = roundup(size, PAGE_SIZE);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun mutex_lock(&vc4->bo_lock);
359*4882a593Smuzhiyun if (page_index >= vc4->bo_cache.size_list_size)
360*4882a593Smuzhiyun goto out;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (list_empty(&vc4->bo_cache.size_list[page_index]))
363*4882a593Smuzhiyun goto out;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
366*4882a593Smuzhiyun struct vc4_bo, size_head);
367*4882a593Smuzhiyun vc4_bo_remove_from_cache(bo);
368*4882a593Smuzhiyun kref_init(&bo->base.base.refcount);
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun out:
371*4882a593Smuzhiyun if (bo)
372*4882a593Smuzhiyun vc4_bo_set_label(&bo->base.base, type);
373*4882a593Smuzhiyun mutex_unlock(&vc4->bo_lock);
374*4882a593Smuzhiyun return bo;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /**
378*4882a593Smuzhiyun * vc4_gem_create_object - Implementation of driver->gem_create_object.
379*4882a593Smuzhiyun * @dev: DRM device
380*4882a593Smuzhiyun * @size: Size in bytes of the memory the object will reference
381*4882a593Smuzhiyun *
382*4882a593Smuzhiyun * This lets the CMA helpers allocate object structs for us, and keep
383*4882a593Smuzhiyun * our BO stats correct.
384*4882a593Smuzhiyun */
vc4_create_object(struct drm_device * dev,size_t size)385*4882a593Smuzhiyun struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(dev);
388*4882a593Smuzhiyun struct vc4_bo *bo;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun bo = kzalloc(sizeof(*bo), GFP_KERNEL);
391*4882a593Smuzhiyun if (!bo)
392*4882a593Smuzhiyun return NULL;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun bo->madv = VC4_MADV_WILLNEED;
395*4882a593Smuzhiyun refcount_set(&bo->usecnt, 0);
396*4882a593Smuzhiyun mutex_init(&bo->madv_lock);
397*4882a593Smuzhiyun mutex_lock(&vc4->bo_lock);
398*4882a593Smuzhiyun bo->label = VC4_BO_TYPE_KERNEL;
399*4882a593Smuzhiyun vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
400*4882a593Smuzhiyun vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
401*4882a593Smuzhiyun mutex_unlock(&vc4->bo_lock);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun return &bo->base.base;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
vc4_bo_create(struct drm_device * dev,size_t unaligned_size,bool allow_unzeroed,enum vc4_kernel_bo_type type)406*4882a593Smuzhiyun struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
407*4882a593Smuzhiyun bool allow_unzeroed, enum vc4_kernel_bo_type type)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun size_t size = roundup(unaligned_size, PAGE_SIZE);
410*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(dev);
411*4882a593Smuzhiyun struct drm_gem_cma_object *cma_obj;
412*4882a593Smuzhiyun struct vc4_bo *bo;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun if (size == 0)
415*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /* First, try to get a vc4_bo from the kernel BO cache. */
418*4882a593Smuzhiyun bo = vc4_bo_get_from_cache(dev, size, type);
419*4882a593Smuzhiyun if (bo) {
420*4882a593Smuzhiyun if (!allow_unzeroed)
421*4882a593Smuzhiyun memset(bo->base.vaddr, 0, bo->base.base.size);
422*4882a593Smuzhiyun return bo;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun cma_obj = drm_gem_cma_create(dev, size);
426*4882a593Smuzhiyun if (IS_ERR(cma_obj)) {
427*4882a593Smuzhiyun /*
428*4882a593Smuzhiyun * If we've run out of CMA memory, kill the cache of
429*4882a593Smuzhiyun * CMA allocations we've got laying around and try again.
430*4882a593Smuzhiyun */
431*4882a593Smuzhiyun vc4_bo_cache_purge(dev);
432*4882a593Smuzhiyun cma_obj = drm_gem_cma_create(dev, size);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun if (IS_ERR(cma_obj)) {
436*4882a593Smuzhiyun /*
437*4882a593Smuzhiyun * Still not enough CMA memory, purge the userspace BO
438*4882a593Smuzhiyun * cache and retry.
439*4882a593Smuzhiyun * This is sub-optimal since we purge the whole userspace
440*4882a593Smuzhiyun * BO cache which forces user that want to re-use the BO to
441*4882a593Smuzhiyun * restore its initial content.
442*4882a593Smuzhiyun * Ideally, we should purge entries one by one and retry
443*4882a593Smuzhiyun * after each to see if CMA allocation succeeds. Or even
444*4882a593Smuzhiyun * better, try to find an entry with at least the same
445*4882a593Smuzhiyun * size.
446*4882a593Smuzhiyun */
447*4882a593Smuzhiyun vc4_bo_userspace_cache_purge(dev);
448*4882a593Smuzhiyun cma_obj = drm_gem_cma_create(dev, size);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun if (IS_ERR(cma_obj)) {
452*4882a593Smuzhiyun struct drm_printer p = drm_info_printer(vc4->base.dev);
453*4882a593Smuzhiyun DRM_ERROR("Failed to allocate from CMA:\n");
454*4882a593Smuzhiyun vc4_bo_stats_print(&p, vc4);
455*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun bo = to_vc4_bo(&cma_obj->base);
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun /* By default, BOs do not support the MADV ioctl. This will be enabled
460*4882a593Smuzhiyun * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB
461*4882a593Smuzhiyun * BOs).
462*4882a593Smuzhiyun */
463*4882a593Smuzhiyun bo->madv = __VC4_MADV_NOTSUPP;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun mutex_lock(&vc4->bo_lock);
466*4882a593Smuzhiyun vc4_bo_set_label(&cma_obj->base, type);
467*4882a593Smuzhiyun mutex_unlock(&vc4->bo_lock);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun return bo;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
vc4_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)472*4882a593Smuzhiyun int vc4_dumb_create(struct drm_file *file_priv,
473*4882a593Smuzhiyun struct drm_device *dev,
474*4882a593Smuzhiyun struct drm_mode_create_dumb *args)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
477*4882a593Smuzhiyun struct vc4_bo *bo = NULL;
478*4882a593Smuzhiyun int ret;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun if (args->pitch < min_pitch)
481*4882a593Smuzhiyun args->pitch = min_pitch;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun if (args->size < args->pitch * args->height)
484*4882a593Smuzhiyun args->size = args->pitch * args->height;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB);
487*4882a593Smuzhiyun if (IS_ERR(bo))
488*4882a593Smuzhiyun return PTR_ERR(bo);
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun bo->madv = VC4_MADV_WILLNEED;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
493*4882a593Smuzhiyun drm_gem_object_put(&bo->base.base);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun return ret;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
vc4_bo_cache_free_old(struct drm_device * dev)498*4882a593Smuzhiyun static void vc4_bo_cache_free_old(struct drm_device *dev)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(dev);
501*4882a593Smuzhiyun unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun lockdep_assert_held(&vc4->bo_lock);
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun while (!list_empty(&vc4->bo_cache.time_list)) {
506*4882a593Smuzhiyun struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
507*4882a593Smuzhiyun struct vc4_bo, unref_head);
508*4882a593Smuzhiyun if (time_before(expire_time, bo->free_time)) {
509*4882a593Smuzhiyun mod_timer(&vc4->bo_cache.time_timer,
510*4882a593Smuzhiyun round_jiffies_up(jiffies +
511*4882a593Smuzhiyun msecs_to_jiffies(1000)));
512*4882a593Smuzhiyun return;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun vc4_bo_remove_from_cache(bo);
516*4882a593Smuzhiyun vc4_bo_destroy(bo);
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun /* Called on the last userspace/kernel unreference of the BO. Returns
521*4882a593Smuzhiyun * it to the BO cache if possible, otherwise frees it.
522*4882a593Smuzhiyun */
vc4_free_object(struct drm_gem_object * gem_bo)523*4882a593Smuzhiyun void vc4_free_object(struct drm_gem_object *gem_bo)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun struct drm_device *dev = gem_bo->dev;
526*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(dev);
527*4882a593Smuzhiyun struct vc4_bo *bo = to_vc4_bo(gem_bo);
528*4882a593Smuzhiyun struct list_head *cache_list;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun /* Remove the BO from the purgeable list. */
531*4882a593Smuzhiyun mutex_lock(&bo->madv_lock);
532*4882a593Smuzhiyun if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt))
533*4882a593Smuzhiyun vc4_bo_remove_from_purgeable_pool(bo);
534*4882a593Smuzhiyun mutex_unlock(&bo->madv_lock);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun mutex_lock(&vc4->bo_lock);
537*4882a593Smuzhiyun /* If the object references someone else's memory, we can't cache it.
538*4882a593Smuzhiyun */
539*4882a593Smuzhiyun if (gem_bo->import_attach) {
540*4882a593Smuzhiyun vc4_bo_destroy(bo);
541*4882a593Smuzhiyun goto out;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun /* Don't cache if it was publicly named. */
545*4882a593Smuzhiyun if (gem_bo->name) {
546*4882a593Smuzhiyun vc4_bo_destroy(bo);
547*4882a593Smuzhiyun goto out;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /* If this object was partially constructed but CMA allocation
551*4882a593Smuzhiyun * had failed, just free it. Can also happen when the BO has been
552*4882a593Smuzhiyun * purged.
553*4882a593Smuzhiyun */
554*4882a593Smuzhiyun if (!bo->base.vaddr) {
555*4882a593Smuzhiyun vc4_bo_destroy(bo);
556*4882a593Smuzhiyun goto out;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
560*4882a593Smuzhiyun if (!cache_list) {
561*4882a593Smuzhiyun vc4_bo_destroy(bo);
562*4882a593Smuzhiyun goto out;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun if (bo->validated_shader) {
566*4882a593Smuzhiyun kfree(bo->validated_shader->uniform_addr_offsets);
567*4882a593Smuzhiyun kfree(bo->validated_shader->texture_samples);
568*4882a593Smuzhiyun kfree(bo->validated_shader);
569*4882a593Smuzhiyun bo->validated_shader = NULL;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun /* Reset madv and usecnt before adding the BO to the cache. */
573*4882a593Smuzhiyun bo->madv = __VC4_MADV_NOTSUPP;
574*4882a593Smuzhiyun refcount_set(&bo->usecnt, 0);
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun bo->t_format = false;
577*4882a593Smuzhiyun bo->free_time = jiffies;
578*4882a593Smuzhiyun list_add(&bo->size_head, cache_list);
579*4882a593Smuzhiyun list_add(&bo->unref_head, &vc4->bo_cache.time_list);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun vc4_bo_set_label(&bo->base.base, VC4_BO_TYPE_KERNEL_CACHE);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun vc4_bo_cache_free_old(dev);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun out:
586*4882a593Smuzhiyun mutex_unlock(&vc4->bo_lock);
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun
vc4_bo_cache_time_work(struct work_struct * work)589*4882a593Smuzhiyun static void vc4_bo_cache_time_work(struct work_struct *work)
590*4882a593Smuzhiyun {
591*4882a593Smuzhiyun struct vc4_dev *vc4 =
592*4882a593Smuzhiyun container_of(work, struct vc4_dev, bo_cache.time_work);
593*4882a593Smuzhiyun struct drm_device *dev = &vc4->base;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun mutex_lock(&vc4->bo_lock);
596*4882a593Smuzhiyun vc4_bo_cache_free_old(dev);
597*4882a593Smuzhiyun mutex_unlock(&vc4->bo_lock);
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun
vc4_bo_inc_usecnt(struct vc4_bo * bo)600*4882a593Smuzhiyun int vc4_bo_inc_usecnt(struct vc4_bo *bo)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun int ret;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun /* Fast path: if the BO is already retained by someone, no need to
605*4882a593Smuzhiyun * check the madv status.
606*4882a593Smuzhiyun */
607*4882a593Smuzhiyun if (refcount_inc_not_zero(&bo->usecnt))
608*4882a593Smuzhiyun return 0;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun mutex_lock(&bo->madv_lock);
611*4882a593Smuzhiyun switch (bo->madv) {
612*4882a593Smuzhiyun case VC4_MADV_WILLNEED:
613*4882a593Smuzhiyun if (!refcount_inc_not_zero(&bo->usecnt))
614*4882a593Smuzhiyun refcount_set(&bo->usecnt, 1);
615*4882a593Smuzhiyun ret = 0;
616*4882a593Smuzhiyun break;
617*4882a593Smuzhiyun case VC4_MADV_DONTNEED:
618*4882a593Smuzhiyun /* We shouldn't use a BO marked as purgeable if at least
619*4882a593Smuzhiyun * someone else retained its content by incrementing usecnt.
620*4882a593Smuzhiyun * Luckily the BO hasn't been purged yet, but something wrong
621*4882a593Smuzhiyun * is happening here. Just throw an error instead of
622*4882a593Smuzhiyun * authorizing this use case.
623*4882a593Smuzhiyun */
624*4882a593Smuzhiyun case __VC4_MADV_PURGED:
625*4882a593Smuzhiyun /* We can't use a purged BO. */
626*4882a593Smuzhiyun default:
627*4882a593Smuzhiyun /* Invalid madv value. */
628*4882a593Smuzhiyun ret = -EINVAL;
629*4882a593Smuzhiyun break;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun mutex_unlock(&bo->madv_lock);
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun return ret;
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun
vc4_bo_dec_usecnt(struct vc4_bo * bo)636*4882a593Smuzhiyun void vc4_bo_dec_usecnt(struct vc4_bo *bo)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun /* Fast path: if the BO is still retained by someone, no need to test
639*4882a593Smuzhiyun * the madv value.
640*4882a593Smuzhiyun */
641*4882a593Smuzhiyun if (refcount_dec_not_one(&bo->usecnt))
642*4882a593Smuzhiyun return;
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun mutex_lock(&bo->madv_lock);
645*4882a593Smuzhiyun if (refcount_dec_and_test(&bo->usecnt) &&
646*4882a593Smuzhiyun bo->madv == VC4_MADV_DONTNEED)
647*4882a593Smuzhiyun vc4_bo_add_to_purgeable_pool(bo);
648*4882a593Smuzhiyun mutex_unlock(&bo->madv_lock);
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
vc4_bo_cache_time_timer(struct timer_list * t)651*4882a593Smuzhiyun static void vc4_bo_cache_time_timer(struct timer_list *t)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer);
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun schedule_work(&vc4->bo_cache.time_work);
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
vc4_prime_export(struct drm_gem_object * obj,int flags)658*4882a593Smuzhiyun struct dma_buf * vc4_prime_export(struct drm_gem_object *obj, int flags)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun struct vc4_bo *bo = to_vc4_bo(obj);
661*4882a593Smuzhiyun struct dma_buf *dmabuf;
662*4882a593Smuzhiyun int ret;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun if (bo->validated_shader) {
665*4882a593Smuzhiyun DRM_DEBUG("Attempting to export shader BO\n");
666*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun /* Note: as soon as the BO is exported it becomes unpurgeable, because
670*4882a593Smuzhiyun * noone ever decrements the usecnt even if the reference held by the
671*4882a593Smuzhiyun * exported BO is released. This shouldn't be a problem since we don't
672*4882a593Smuzhiyun * expect exported BOs to be marked as purgeable.
673*4882a593Smuzhiyun */
674*4882a593Smuzhiyun ret = vc4_bo_inc_usecnt(bo);
675*4882a593Smuzhiyun if (ret) {
676*4882a593Smuzhiyun DRM_ERROR("Failed to increment BO usecnt\n");
677*4882a593Smuzhiyun return ERR_PTR(ret);
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun dmabuf = drm_gem_prime_export(obj, flags);
681*4882a593Smuzhiyun if (IS_ERR(dmabuf))
682*4882a593Smuzhiyun vc4_bo_dec_usecnt(bo);
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun return dmabuf;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
vc4_fault(struct vm_fault * vmf)687*4882a593Smuzhiyun vm_fault_t vc4_fault(struct vm_fault *vmf)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun struct vm_area_struct *vma = vmf->vma;
690*4882a593Smuzhiyun struct drm_gem_object *obj = vma->vm_private_data;
691*4882a593Smuzhiyun struct vc4_bo *bo = to_vc4_bo(obj);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun /* The only reason we would end up here is when user-space accesses
694*4882a593Smuzhiyun * BO's memory after it's been purged.
695*4882a593Smuzhiyun */
696*4882a593Smuzhiyun mutex_lock(&bo->madv_lock);
697*4882a593Smuzhiyun WARN_ON(bo->madv != __VC4_MADV_PURGED);
698*4882a593Smuzhiyun mutex_unlock(&bo->madv_lock);
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun return VM_FAULT_SIGBUS;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
vc4_mmap(struct file * filp,struct vm_area_struct * vma)703*4882a593Smuzhiyun int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun struct drm_gem_object *gem_obj;
706*4882a593Smuzhiyun unsigned long vm_pgoff;
707*4882a593Smuzhiyun struct vc4_bo *bo;
708*4882a593Smuzhiyun int ret;
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun ret = drm_gem_mmap(filp, vma);
711*4882a593Smuzhiyun if (ret)
712*4882a593Smuzhiyun return ret;
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun gem_obj = vma->vm_private_data;
715*4882a593Smuzhiyun bo = to_vc4_bo(gem_obj);
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
718*4882a593Smuzhiyun DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
719*4882a593Smuzhiyun return -EINVAL;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun if (bo->madv != VC4_MADV_WILLNEED) {
723*4882a593Smuzhiyun DRM_DEBUG("mmaping of %s BO not allowed\n",
724*4882a593Smuzhiyun bo->madv == VC4_MADV_DONTNEED ?
725*4882a593Smuzhiyun "purgeable" : "purged");
726*4882a593Smuzhiyun return -EINVAL;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun /*
730*4882a593Smuzhiyun * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
731*4882a593Smuzhiyun * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
732*4882a593Smuzhiyun * the whole buffer.
733*4882a593Smuzhiyun */
734*4882a593Smuzhiyun vma->vm_flags &= ~VM_PFNMAP;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun /* This ->vm_pgoff dance is needed to make all parties happy:
737*4882a593Smuzhiyun * - dma_mmap_wc() uses ->vm_pgoff as an offset within the allocated
738*4882a593Smuzhiyun * mem-region, hence the need to set it to zero (the value set by
739*4882a593Smuzhiyun * the DRM core is a virtual offset encoding the GEM object-id)
740*4882a593Smuzhiyun * - the mmap() core logic needs ->vm_pgoff to be restored to its
741*4882a593Smuzhiyun * initial value before returning from this function because it
742*4882a593Smuzhiyun * encodes the offset of this GEM in the dev->anon_inode pseudo-file
743*4882a593Smuzhiyun * and this information will be used when we invalidate userspace
744*4882a593Smuzhiyun * mappings with drm_vma_node_unmap() (called from vc4_gem_purge()).
745*4882a593Smuzhiyun */
746*4882a593Smuzhiyun vm_pgoff = vma->vm_pgoff;
747*4882a593Smuzhiyun vma->vm_pgoff = 0;
748*4882a593Smuzhiyun ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
749*4882a593Smuzhiyun bo->base.paddr, vma->vm_end - vma->vm_start);
750*4882a593Smuzhiyun vma->vm_pgoff = vm_pgoff;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun if (ret)
753*4882a593Smuzhiyun drm_gem_vm_close(vma);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun return ret;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun
vc4_prime_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)758*4882a593Smuzhiyun int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun struct vc4_bo *bo = to_vc4_bo(obj);
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
763*4882a593Smuzhiyun DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
764*4882a593Smuzhiyun return -EINVAL;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun return drm_gem_cma_prime_mmap(obj, vma);
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
vc4_prime_vmap(struct drm_gem_object * obj)770*4882a593Smuzhiyun void *vc4_prime_vmap(struct drm_gem_object *obj)
771*4882a593Smuzhiyun {
772*4882a593Smuzhiyun struct vc4_bo *bo = to_vc4_bo(obj);
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun if (bo->validated_shader) {
775*4882a593Smuzhiyun DRM_DEBUG("mmaping of shader BOs not allowed.\n");
776*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun return drm_gem_cma_prime_vmap(obj);
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun struct drm_gem_object *
vc4_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)783*4882a593Smuzhiyun vc4_prime_import_sg_table(struct drm_device *dev,
784*4882a593Smuzhiyun struct dma_buf_attachment *attach,
785*4882a593Smuzhiyun struct sg_table *sgt)
786*4882a593Smuzhiyun {
787*4882a593Smuzhiyun struct drm_gem_object *obj;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
790*4882a593Smuzhiyun if (IS_ERR(obj))
791*4882a593Smuzhiyun return obj;
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun return obj;
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun
vc4_grab_bin_bo(struct vc4_dev * vc4,struct vc4_file * vc4file)796*4882a593Smuzhiyun static int vc4_grab_bin_bo(struct vc4_dev *vc4, struct vc4_file *vc4file)
797*4882a593Smuzhiyun {
798*4882a593Smuzhiyun int ret;
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun if (!vc4->v3d)
801*4882a593Smuzhiyun return -ENODEV;
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun if (vc4file->bin_bo_used)
804*4882a593Smuzhiyun return 0;
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun ret = vc4_v3d_bin_bo_get(vc4, &vc4file->bin_bo_used);
807*4882a593Smuzhiyun if (ret)
808*4882a593Smuzhiyun return ret;
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun return 0;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
vc4_create_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)813*4882a593Smuzhiyun int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
814*4882a593Smuzhiyun struct drm_file *file_priv)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun struct drm_vc4_create_bo *args = data;
817*4882a593Smuzhiyun struct vc4_file *vc4file = file_priv->driver_priv;
818*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(dev);
819*4882a593Smuzhiyun struct vc4_bo *bo = NULL;
820*4882a593Smuzhiyun int ret;
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun ret = vc4_grab_bin_bo(vc4, vc4file);
823*4882a593Smuzhiyun if (ret)
824*4882a593Smuzhiyun return ret;
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun /*
827*4882a593Smuzhiyun * We can't allocate from the BO cache, because the BOs don't
828*4882a593Smuzhiyun * get zeroed, and that might leak data between users.
829*4882a593Smuzhiyun */
830*4882a593Smuzhiyun bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D);
831*4882a593Smuzhiyun if (IS_ERR(bo))
832*4882a593Smuzhiyun return PTR_ERR(bo);
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun bo->madv = VC4_MADV_WILLNEED;
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
837*4882a593Smuzhiyun drm_gem_object_put(&bo->base.base);
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun return ret;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun
vc4_mmap_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)842*4882a593Smuzhiyun int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
843*4882a593Smuzhiyun struct drm_file *file_priv)
844*4882a593Smuzhiyun {
845*4882a593Smuzhiyun struct drm_vc4_mmap_bo *args = data;
846*4882a593Smuzhiyun struct drm_gem_object *gem_obj;
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun gem_obj = drm_gem_object_lookup(file_priv, args->handle);
849*4882a593Smuzhiyun if (!gem_obj) {
850*4882a593Smuzhiyun DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
851*4882a593Smuzhiyun return -EINVAL;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun /* The mmap offset was set up at BO allocation time. */
855*4882a593Smuzhiyun args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun drm_gem_object_put(gem_obj);
858*4882a593Smuzhiyun return 0;
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun int
vc4_create_shader_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)862*4882a593Smuzhiyun vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
863*4882a593Smuzhiyun struct drm_file *file_priv)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun struct drm_vc4_create_shader_bo *args = data;
866*4882a593Smuzhiyun struct vc4_file *vc4file = file_priv->driver_priv;
867*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(dev);
868*4882a593Smuzhiyun struct vc4_bo *bo = NULL;
869*4882a593Smuzhiyun int ret;
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun if (args->size == 0)
872*4882a593Smuzhiyun return -EINVAL;
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun if (args->size % sizeof(u64) != 0)
875*4882a593Smuzhiyun return -EINVAL;
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun if (args->flags != 0) {
878*4882a593Smuzhiyun DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
879*4882a593Smuzhiyun return -EINVAL;
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun if (args->pad != 0) {
883*4882a593Smuzhiyun DRM_INFO("Pad set: 0x%08x\n", args->pad);
884*4882a593Smuzhiyun return -EINVAL;
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun ret = vc4_grab_bin_bo(vc4, vc4file);
888*4882a593Smuzhiyun if (ret)
889*4882a593Smuzhiyun return ret;
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER);
892*4882a593Smuzhiyun if (IS_ERR(bo))
893*4882a593Smuzhiyun return PTR_ERR(bo);
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun bo->madv = VC4_MADV_WILLNEED;
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun if (copy_from_user(bo->base.vaddr,
898*4882a593Smuzhiyun (void __user *)(uintptr_t)args->data,
899*4882a593Smuzhiyun args->size)) {
900*4882a593Smuzhiyun ret = -EFAULT;
901*4882a593Smuzhiyun goto fail;
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun /* Clear the rest of the memory from allocating from the BO
904*4882a593Smuzhiyun * cache.
905*4882a593Smuzhiyun */
906*4882a593Smuzhiyun memset(bo->base.vaddr + args->size, 0,
907*4882a593Smuzhiyun bo->base.base.size - args->size);
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun bo->validated_shader = vc4_validate_shader(&bo->base);
910*4882a593Smuzhiyun if (!bo->validated_shader) {
911*4882a593Smuzhiyun ret = -EINVAL;
912*4882a593Smuzhiyun goto fail;
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun /* We have to create the handle after validation, to avoid
916*4882a593Smuzhiyun * races for users to do doing things like mmap the shader BO.
917*4882a593Smuzhiyun */
918*4882a593Smuzhiyun ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun fail:
921*4882a593Smuzhiyun drm_gem_object_put(&bo->base.base);
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun return ret;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun /**
927*4882a593Smuzhiyun * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO.
928*4882a593Smuzhiyun * @dev: DRM device
929*4882a593Smuzhiyun * @data: ioctl argument
930*4882a593Smuzhiyun * @file_priv: DRM file for this fd
931*4882a593Smuzhiyun *
932*4882a593Smuzhiyun * The tiling state of the BO decides the default modifier of an fb if
933*4882a593Smuzhiyun * no specific modifier was set by userspace, and the return value of
934*4882a593Smuzhiyun * vc4_get_tiling_ioctl() (so that userspace can treat a BO it
935*4882a593Smuzhiyun * received from dmabuf as the same tiling format as the producer
936*4882a593Smuzhiyun * used).
937*4882a593Smuzhiyun */
vc4_set_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)938*4882a593Smuzhiyun int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
939*4882a593Smuzhiyun struct drm_file *file_priv)
940*4882a593Smuzhiyun {
941*4882a593Smuzhiyun struct drm_vc4_set_tiling *args = data;
942*4882a593Smuzhiyun struct drm_gem_object *gem_obj;
943*4882a593Smuzhiyun struct vc4_bo *bo;
944*4882a593Smuzhiyun bool t_format;
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun if (args->flags != 0)
947*4882a593Smuzhiyun return -EINVAL;
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun switch (args->modifier) {
950*4882a593Smuzhiyun case DRM_FORMAT_MOD_NONE:
951*4882a593Smuzhiyun t_format = false;
952*4882a593Smuzhiyun break;
953*4882a593Smuzhiyun case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
954*4882a593Smuzhiyun t_format = true;
955*4882a593Smuzhiyun break;
956*4882a593Smuzhiyun default:
957*4882a593Smuzhiyun return -EINVAL;
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun gem_obj = drm_gem_object_lookup(file_priv, args->handle);
961*4882a593Smuzhiyun if (!gem_obj) {
962*4882a593Smuzhiyun DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
963*4882a593Smuzhiyun return -ENOENT;
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun bo = to_vc4_bo(gem_obj);
966*4882a593Smuzhiyun bo->t_format = t_format;
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun drm_gem_object_put(gem_obj);
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun return 0;
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun /**
974*4882a593Smuzhiyun * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO.
975*4882a593Smuzhiyun * @dev: DRM device
976*4882a593Smuzhiyun * @data: ioctl argument
977*4882a593Smuzhiyun * @file_priv: DRM file for this fd
978*4882a593Smuzhiyun *
979*4882a593Smuzhiyun * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl().
980*4882a593Smuzhiyun */
vc4_get_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)981*4882a593Smuzhiyun int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
982*4882a593Smuzhiyun struct drm_file *file_priv)
983*4882a593Smuzhiyun {
984*4882a593Smuzhiyun struct drm_vc4_get_tiling *args = data;
985*4882a593Smuzhiyun struct drm_gem_object *gem_obj;
986*4882a593Smuzhiyun struct vc4_bo *bo;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun if (args->flags != 0 || args->modifier != 0)
989*4882a593Smuzhiyun return -EINVAL;
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun gem_obj = drm_gem_object_lookup(file_priv, args->handle);
992*4882a593Smuzhiyun if (!gem_obj) {
993*4882a593Smuzhiyun DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
994*4882a593Smuzhiyun return -ENOENT;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun bo = to_vc4_bo(gem_obj);
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun if (bo->t_format)
999*4882a593Smuzhiyun args->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
1000*4882a593Smuzhiyun else
1001*4882a593Smuzhiyun args->modifier = DRM_FORMAT_MOD_NONE;
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun drm_gem_object_put(gem_obj);
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun return 0;
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused);
vc4_bo_cache_init(struct drm_device * dev)1009*4882a593Smuzhiyun int vc4_bo_cache_init(struct drm_device *dev)
1010*4882a593Smuzhiyun {
1011*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(dev);
1012*4882a593Smuzhiyun int i;
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun /* Create the initial set of BO labels that the kernel will
1015*4882a593Smuzhiyun * use. This lets us avoid a bunch of string reallocation in
1016*4882a593Smuzhiyun * the kernel's draw and BO allocation paths.
1017*4882a593Smuzhiyun */
1018*4882a593Smuzhiyun vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels),
1019*4882a593Smuzhiyun GFP_KERNEL);
1020*4882a593Smuzhiyun if (!vc4->bo_labels)
1021*4882a593Smuzhiyun return -ENOMEM;
1022*4882a593Smuzhiyun vc4->num_labels = VC4_BO_TYPE_COUNT;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT);
1025*4882a593Smuzhiyun for (i = 0; i < VC4_BO_TYPE_COUNT; i++)
1026*4882a593Smuzhiyun vc4->bo_labels[i].name = bo_type_names[i];
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun mutex_init(&vc4->bo_lock);
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun vc4_debugfs_add_file(dev, "bo_stats", vc4_bo_stats_debugfs, NULL);
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun INIT_LIST_HEAD(&vc4->bo_cache.time_list);
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
1035*4882a593Smuzhiyun timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun return drmm_add_action_or_reset(dev, vc4_bo_cache_destroy, NULL);
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun
vc4_bo_cache_destroy(struct drm_device * dev,void * unused)1040*4882a593Smuzhiyun static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused)
1041*4882a593Smuzhiyun {
1042*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(dev);
1043*4882a593Smuzhiyun int i;
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun del_timer(&vc4->bo_cache.time_timer);
1046*4882a593Smuzhiyun cancel_work_sync(&vc4->bo_cache.time_work);
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun vc4_bo_cache_purge(dev);
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun for (i = 0; i < vc4->num_labels; i++) {
1051*4882a593Smuzhiyun if (vc4->bo_labels[i].num_allocated) {
1052*4882a593Smuzhiyun DRM_ERROR("Destroying BO cache with %d %s "
1053*4882a593Smuzhiyun "BOs still allocated\n",
1054*4882a593Smuzhiyun vc4->bo_labels[i].num_allocated,
1055*4882a593Smuzhiyun vc4->bo_labels[i].name);
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun if (is_user_label(i))
1059*4882a593Smuzhiyun kfree(vc4->bo_labels[i].name);
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun kfree(vc4->bo_labels);
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun
vc4_label_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1064*4882a593Smuzhiyun int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
1065*4882a593Smuzhiyun struct drm_file *file_priv)
1066*4882a593Smuzhiyun {
1067*4882a593Smuzhiyun struct vc4_dev *vc4 = to_vc4_dev(dev);
1068*4882a593Smuzhiyun struct drm_vc4_label_bo *args = data;
1069*4882a593Smuzhiyun char *name;
1070*4882a593Smuzhiyun struct drm_gem_object *gem_obj;
1071*4882a593Smuzhiyun int ret = 0, label;
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun if (!args->len)
1074*4882a593Smuzhiyun return -EINVAL;
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun name = strndup_user(u64_to_user_ptr(args->name), args->len + 1);
1077*4882a593Smuzhiyun if (IS_ERR(name))
1078*4882a593Smuzhiyun return PTR_ERR(name);
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1081*4882a593Smuzhiyun if (!gem_obj) {
1082*4882a593Smuzhiyun DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
1083*4882a593Smuzhiyun kfree(name);
1084*4882a593Smuzhiyun return -ENOENT;
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun mutex_lock(&vc4->bo_lock);
1088*4882a593Smuzhiyun label = vc4_get_user_label(vc4, name);
1089*4882a593Smuzhiyun if (label != -1)
1090*4882a593Smuzhiyun vc4_bo_set_label(gem_obj, label);
1091*4882a593Smuzhiyun else
1092*4882a593Smuzhiyun ret = -ENOMEM;
1093*4882a593Smuzhiyun mutex_unlock(&vc4->bo_lock);
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun drm_gem_object_put(gem_obj);
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun return ret;
1098*4882a593Smuzhiyun }
1099