1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3 *
4 * (C) COPYRIGHT 2020-2021 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #ifndef _KBASE_TRACE_GPU_MEM_H_
23 #define _KBASE_TRACE_GPU_MEM_H_
24
25 #if IS_ENABLED(CONFIG_TRACE_GPU_MEM)
26 #include <trace/events/gpu_mem.h>
27 #endif
28
29 #define DEVICE_TGID ((u32) 0U)
30
kbase_trace_gpu_mem_usage(struct kbase_device * kbdev,struct kbase_context * kctx)31 static void kbase_trace_gpu_mem_usage(struct kbase_device *kbdev,
32 struct kbase_context *kctx)
33 {
34 #if IS_ENABLED(CONFIG_TRACE_GPU_MEM)
35 lockdep_assert_held(&kbdev->gpu_mem_usage_lock);
36
37 trace_gpu_mem_total(kbdev->id, DEVICE_TGID,
38 kbdev->total_gpu_pages << PAGE_SHIFT);
39
40 if (likely(kctx))
41 trace_gpu_mem_total(kbdev->id, kctx->kprcs->tgid,
42 kctx->kprcs->total_gpu_pages << PAGE_SHIFT);
43 #endif
44 }
45
kbase_trace_gpu_mem_usage_dec(struct kbase_device * kbdev,struct kbase_context * kctx,size_t pages)46 static inline void kbase_trace_gpu_mem_usage_dec(struct kbase_device *kbdev,
47 struct kbase_context *kctx, size_t pages)
48 {
49 spin_lock(&kbdev->gpu_mem_usage_lock);
50
51 if (likely(kctx))
52 kctx->kprcs->total_gpu_pages -= pages;
53
54 kbdev->total_gpu_pages -= pages;
55
56 kbase_trace_gpu_mem_usage(kbdev, kctx);
57
58 spin_unlock(&kbdev->gpu_mem_usage_lock);
59 }
60
kbase_trace_gpu_mem_usage_inc(struct kbase_device * kbdev,struct kbase_context * kctx,size_t pages)61 static inline void kbase_trace_gpu_mem_usage_inc(struct kbase_device *kbdev,
62 struct kbase_context *kctx, size_t pages)
63 {
64 spin_lock(&kbdev->gpu_mem_usage_lock);
65
66 if (likely(kctx))
67 kctx->kprcs->total_gpu_pages += pages;
68
69 kbdev->total_gpu_pages += pages;
70
71 kbase_trace_gpu_mem_usage(kbdev, kctx);
72
73 spin_unlock(&kbdev->gpu_mem_usage_lock);
74 }
75
76 /**
77 * kbase_remove_dma_buf_usage - Remove a dma-buf entry captured.
78 *
79 * @kctx: Pointer to the kbase context
80 * @alloc: Pointer to the alloc to unmap
81 *
82 * Remove reference to dma buf been unmapped from kbase_device level
83 * rb_tree and Kbase_process level dma buf rb_tree.
84 */
85 void kbase_remove_dma_buf_usage(struct kbase_context *kctx,
86 struct kbase_mem_phy_alloc *alloc);
87
88 /**
89 * kbase_add_dma_buf_usage - Add a dma-buf entry captured.
90 *
91 * @kctx: Pointer to the kbase context
92 * @alloc: Pointer to the alloc to map in
93 *
94 * Add reference to dma buf been mapped to kbase_device level
95 * rb_tree and Kbase_process level dma buf rb_tree.
96 */
97 void kbase_add_dma_buf_usage(struct kbase_context *kctx,
98 struct kbase_mem_phy_alloc *alloc);
99
100 #endif /* _KBASE_TRACE_GPU_MEM_H_ */
101