1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2010, 2012-2015, 2017-2022 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 /*
23 * Model Linux Framework interfaces.
24 */
25
26 #include <mali_kbase.h>
27 #include <gpu/mali_kbase_gpu_regmap.h>
28
29 #include "backend/gpu/mali_kbase_model_linux.h"
30 #include "device/mali_kbase_device.h"
31 #include "mali_kbase_irq_internal.h"
32
33 #include <linux/kthread.h>
34
35 struct model_irq_data {
36 struct kbase_device *kbdev;
37 struct work_struct work;
38 };
39
serve_job_irq(struct work_struct * work)40 static void serve_job_irq(struct work_struct *work)
41 {
42 struct model_irq_data *data = container_of(work, struct model_irq_data,
43 work);
44 struct kbase_device *kbdev = data->kbdev;
45
46 /* Make sure no worker is already serving this IRQ */
47 while (atomic_cmpxchg(&kbdev->serving_job_irq, 1, 0) == 1) {
48 u32 val;
49
50 while ((val = kbase_reg_read(kbdev,
51 JOB_CONTROL_REG(JOB_IRQ_STATUS)))) {
52 unsigned long flags;
53
54 /* Handle the IRQ */
55 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
56 #if MALI_USE_CSF
57 kbase_csf_interrupt(kbdev, val);
58 #else
59 kbase_job_done(kbdev, val);
60 #endif
61 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
62 }
63 }
64
65 kmem_cache_free(kbdev->irq_slab, data);
66 }
67
serve_gpu_irq(struct work_struct * work)68 static void serve_gpu_irq(struct work_struct *work)
69 {
70 struct model_irq_data *data = container_of(work, struct model_irq_data,
71 work);
72 struct kbase_device *kbdev = data->kbdev;
73
74 /* Make sure no worker is already serving this IRQ */
75 while (atomic_cmpxchg(&kbdev->serving_gpu_irq, 1, 0) == 1) {
76 u32 val;
77
78 while ((val = kbase_reg_read(kbdev,
79 GPU_CONTROL_REG(GPU_IRQ_STATUS)))) {
80 /* Handle the IRQ */
81 kbase_gpu_interrupt(kbdev, val);
82 }
83 }
84
85 kmem_cache_free(kbdev->irq_slab, data);
86 }
87
serve_mmu_irq(struct work_struct * work)88 static void serve_mmu_irq(struct work_struct *work)
89 {
90 struct model_irq_data *data = container_of(work, struct model_irq_data,
91 work);
92 struct kbase_device *kbdev = data->kbdev;
93
94 /* Make sure no worker is already serving this IRQ */
95 if (atomic_cmpxchg(&kbdev->serving_mmu_irq, 1, 0) == 1) {
96 u32 val;
97
98 while ((val = kbase_reg_read(kbdev,
99 MMU_REG(MMU_IRQ_STATUS)))) {
100 /* Handle the IRQ */
101 kbase_mmu_interrupt(kbdev, val);
102 }
103 }
104
105 kmem_cache_free(kbdev->irq_slab, data);
106 }
107
gpu_device_raise_irq(void * model,u32 irq)108 void gpu_device_raise_irq(void *model, u32 irq)
109 {
110 struct model_irq_data *data;
111 struct kbase_device *kbdev = gpu_device_get_data(model);
112
113 KBASE_DEBUG_ASSERT(kbdev);
114
115 data = kmem_cache_alloc(kbdev->irq_slab, GFP_ATOMIC);
116 if (data == NULL)
117 return;
118
119 data->kbdev = kbdev;
120
121 switch (irq) {
122 case MODEL_LINUX_JOB_IRQ:
123 INIT_WORK(&data->work, serve_job_irq);
124 atomic_set(&kbdev->serving_job_irq, 1);
125 break;
126 case MODEL_LINUX_GPU_IRQ:
127 INIT_WORK(&data->work, serve_gpu_irq);
128 atomic_set(&kbdev->serving_gpu_irq, 1);
129 break;
130 case MODEL_LINUX_MMU_IRQ:
131 INIT_WORK(&data->work, serve_mmu_irq);
132 atomic_set(&kbdev->serving_mmu_irq, 1);
133 break;
134 default:
135 dev_warn(kbdev->dev, "Unknown IRQ");
136 kmem_cache_free(kbdev->irq_slab, data);
137 data = NULL;
138 break;
139 }
140
141 if (data != NULL)
142 queue_work(kbdev->irq_workq, &data->work);
143 }
144
kbase_reg_write(struct kbase_device * kbdev,u32 offset,u32 value)145 void kbase_reg_write(struct kbase_device *kbdev, u32 offset, u32 value)
146 {
147 unsigned long flags;
148
149 spin_lock_irqsave(&kbdev->reg_op_lock, flags);
150 midgard_model_write_reg(kbdev->model, offset, value);
151 spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
152 }
153
154 KBASE_EXPORT_TEST_API(kbase_reg_write);
155
kbase_reg_read(struct kbase_device * kbdev,u32 offset)156 u32 kbase_reg_read(struct kbase_device *kbdev, u32 offset)
157 {
158 unsigned long flags;
159 u32 val;
160
161 spin_lock_irqsave(&kbdev->reg_op_lock, flags);
162 midgard_model_read_reg(kbdev->model, offset, &val);
163 spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
164
165 return val;
166 }
167 KBASE_EXPORT_TEST_API(kbase_reg_read);
168
kbase_install_interrupts(struct kbase_device * kbdev)169 int kbase_install_interrupts(struct kbase_device *kbdev)
170 {
171 KBASE_DEBUG_ASSERT(kbdev);
172
173 atomic_set(&kbdev->serving_job_irq, 0);
174 atomic_set(&kbdev->serving_gpu_irq, 0);
175 atomic_set(&kbdev->serving_mmu_irq, 0);
176
177 kbdev->irq_workq = alloc_ordered_workqueue("dummy irq queue", 0);
178 if (kbdev->irq_workq == NULL)
179 return -ENOMEM;
180
181 kbdev->irq_slab = kmem_cache_create("dummy_irq_slab",
182 sizeof(struct model_irq_data), 0, 0, NULL);
183 if (kbdev->irq_slab == NULL) {
184 destroy_workqueue(kbdev->irq_workq);
185 return -ENOMEM;
186 }
187
188 return 0;
189 }
190
kbase_release_interrupts(struct kbase_device * kbdev)191 void kbase_release_interrupts(struct kbase_device *kbdev)
192 {
193 KBASE_DEBUG_ASSERT(kbdev);
194 destroy_workqueue(kbdev->irq_workq);
195 kmem_cache_destroy(kbdev->irq_slab);
196 }
197
kbase_synchronize_irqs(struct kbase_device * kbdev)198 void kbase_synchronize_irqs(struct kbase_device *kbdev)
199 {
200 KBASE_DEBUG_ASSERT(kbdev);
201 flush_workqueue(kbdev->irq_workq);
202 }
203
204 KBASE_EXPORT_TEST_API(kbase_synchronize_irqs);
205
kbase_set_custom_irq_handler(struct kbase_device * kbdev,irq_handler_t custom_handler,int irq_type)206 int kbase_set_custom_irq_handler(struct kbase_device *kbdev,
207 irq_handler_t custom_handler,
208 int irq_type)
209 {
210 return 0;
211 }
212
213 KBASE_EXPORT_TEST_API(kbase_set_custom_irq_handler);
214
kbase_gpu_irq_test_handler(int irq,void * data,u32 val)215 irqreturn_t kbase_gpu_irq_test_handler(int irq, void *data, u32 val)
216 {
217 if (!val)
218 return IRQ_NONE;
219
220 return IRQ_HANDLED;
221 }
222
223 KBASE_EXPORT_TEST_API(kbase_gpu_irq_test_handler);
224
kbase_gpu_device_create(struct kbase_device * kbdev)225 int kbase_gpu_device_create(struct kbase_device *kbdev)
226 {
227 kbdev->model = midgard_model_create(kbdev);
228 if (kbdev->model == NULL)
229 return -ENOMEM;
230
231 spin_lock_init(&kbdev->reg_op_lock);
232
233 return 0;
234 }
235
236 /**
237 * kbase_gpu_device_destroy - Destroy GPU device
238 *
239 * @kbdev: kbase device
240 */
kbase_gpu_device_destroy(struct kbase_device * kbdev)241 void kbase_gpu_device_destroy(struct kbase_device *kbdev)
242 {
243 midgard_model_destroy(kbdev->model);
244 }
245