1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) Rockchip Electronics Co., Ltd.
4 *
5 * Author: Huang Lee <Putin.li@rock-chips.com>
6 */
7
8 #define pr_fmt(fmt) "rve_reg: " fmt
9
10 #include "rve_reg.h"
11 #include "rve_job.h"
12
rve_soft_reset(struct rve_scheduler_t * scheduler)13 void rve_soft_reset(struct rve_scheduler_t *scheduler)
14 {
15 u32 i;
16 u32 reg;
17
18 rve_write(1, RVE_SWREG5_IVE_IDLE_CTRL, scheduler);
19
20 if (DEBUGGER_EN(REG)) {
21 pr_err("dump reg info on soft reset");
22 rve_dump_read_back_reg(scheduler);
23 }
24
25 if (DEBUGGER_EN(MSG)) {
26 pr_err("soft reset idle_ctrl = %.8x, idle_prc_sta = %.8x",
27 rve_read(RVE_SWREG5_IVE_IDLE_CTRL, scheduler),
28 rve_read(RVE_SWREG3_IVE_IDLE_PRC_STA, scheduler));
29
30 pr_err("work status = %.8x", rve_read(RVE_SWREG6_IVE_WORK_STA, scheduler));
31 }
32
33 mdelay(20);
34
35 for (i = 0; i < RVE_RESET_TIMEOUT; i++) {
36 reg = rve_read(RVE_SWREG3_IVE_IDLE_PRC_STA, scheduler);
37 if (reg & 0x2) {
38 pr_info("soft reset successfully");
39
40 /* reset sw_softrst_rdy_sta reg */
41 rve_write(0x30000, RVE_SWREG3_IVE_IDLE_PRC_STA, scheduler);
42
43 /* reset RVE_SWREG6_IVE_WORK_STA */
44 rve_write(0xff0000, RVE_SWREG6_IVE_WORK_STA, scheduler);
45
46 /* clean up int */
47 rve_write(0x30000, RVE_SWREG1_IVE_IRQ, scheduler);
48
49 break;
50 }
51
52 udelay(1);
53 }
54
55 if (i == RVE_RESET_TIMEOUT)
56 pr_err("soft reset timeout.\n");
57
58 if (DEBUGGER_EN(MSG)) {
59 pr_err("after soft reset idle_ctrl = %.8x, idle_prc_sta = %.8x",
60 rve_read(RVE_SWREG5_IVE_IDLE_CTRL, scheduler),
61 rve_read(RVE_SWREG3_IVE_IDLE_PRC_STA, scheduler));
62
63 pr_err("work status = %x", rve_read(RVE_SWREG6_IVE_WORK_STA, scheduler));
64 }
65 }
66
rve_init_reg(struct rve_job * job)67 int rve_init_reg(struct rve_job *job)
68 {
69 int ret = 0;
70
71 if (DEBUGGER_EN(MSG))
72 pr_err("TODO: debug info");
73
74 return ret;
75 }
76
rve_dump_read_back_reg(struct rve_scheduler_t * scheduler)77 void rve_dump_read_back_reg(struct rve_scheduler_t *scheduler)
78 {
79 int i;
80 unsigned long flags;
81 uint32_t sys_reg[8] = {0};
82 uint32_t ltb_reg[12] = {0};
83 uint32_t cfg_reg[40] = {0};
84 uint32_t mmu_reg[12] = {0};
85
86 spin_lock_irqsave(&scheduler->irq_lock, flags);
87
88 for (i = 0; i < 8; i++)
89 sys_reg[i] = rve_read(RVE_SYS_REG + i * 4, scheduler);
90
91 for (i = 0; i < 12; i++)
92 ltb_reg[i] = rve_read(RVE_LTB_REG + i * 4, scheduler);
93
94 for (i = 0; i < 40; i++)
95 cfg_reg[i] = rve_read(RVE_CFG_REG + i * 4, scheduler);
96
97 for (i = 0; i < 12; i++)
98 mmu_reg[i] = rve_read(RVE_MMU_REG + i * 4, scheduler);
99
100 spin_unlock_irqrestore(&scheduler->irq_lock, flags);
101
102 pr_info("sys_reg:");
103 for (i = 0; i < 2; i++)
104 pr_info("i = %x : %.8x %.8x %.8x %.8x\n", RVE_SYS_REG + i * 16,
105 sys_reg[0 + i * 4], sys_reg[1 + i * 4],
106 sys_reg[2 + i * 4], sys_reg[3 + i * 4]);
107
108 pr_info("ltb_reg:");
109 for (i = 0; i < 3; i++)
110 pr_info("i = %x : %.8x %.8x %.8x %.8x\n", RVE_LTB_REG + i * 16,
111 ltb_reg[0 + i * 4], ltb_reg[1 + i * 4],
112 ltb_reg[2 + i * 4], ltb_reg[3 + i * 4]);
113
114 pr_info("cfg_reg:");
115 for (i = 0; i < 10; i++)
116 pr_info("i = %x : %.8x %.8x %.8x %.8x\n", RVE_CFG_REG + i * 16,
117 cfg_reg[0 + i * 4], cfg_reg[1 + i * 4],
118 cfg_reg[2 + i * 4], cfg_reg[3 + i * 4]);
119
120 pr_info("mmu_reg:");
121 for (i = 0; i < 3; i++)
122 pr_info("i = %x : %.8x %.8x %.8x %.8x\n", RVE_MMU_REG + i * 16,
123 mmu_reg[0 + i * 4], mmu_reg[1 + i * 4],
124 mmu_reg[2 + i * 4], mmu_reg[3 + i * 4]);
125 }
126
rve_set_reg(struct rve_job * job,struct rve_scheduler_t * scheduler)127 int rve_set_reg(struct rve_job *job, struct rve_scheduler_t *scheduler)
128 {
129 ktime_t now = ktime_get();
130 //uint32_t cmd_reg[58];
131 uint32_t *cmd_reg;
132 int i;
133
134 cmd_reg = job->regcmd_data->cmd_reg;
135
136 if (DEBUGGER_EN(REG)) {
137 pr_info("user readback:");
138 for (i = 0; i < 14; i++)
139 pr_info("%.8x %.8x %.8x %.8x\n",
140 cmd_reg[0 + i * 4], cmd_reg[1 + i * 4],
141 cmd_reg[2 + i * 4], cmd_reg[3 + i * 4]);
142 pr_info("%.8x %.8x", cmd_reg[56], cmd_reg[57]);
143 }
144
145 /* clean up irq status reg */
146 rve_write(0x00000, RVE_SWREG6_IVE_WORK_STA, scheduler);
147
148 if (DEBUGGER_EN(MSG)) {
149 pr_info("idle_ctrl = %x, idle_prc_sta = %x",
150 rve_read(RVE_SWREG5_IVE_IDLE_CTRL, scheduler),
151 rve_read(RVE_SWREG3_IVE_IDLE_PRC_STA, scheduler));
152
153 pr_info("work status = %x", rve_read(RVE_SWREG6_IVE_WORK_STA, scheduler));
154 }
155
156 if (DEBUGGER_EN(TIME))
157 pr_info("set cmd use time = %lld\n", ktime_to_us(ktime_sub(now, job->timestamp)));
158
159 job->hw_running_time = now;
160 job->hw_recoder_time = now;
161
162 /* start hw, CMD buff */
163 for (i = 0; i < 8; i++)
164 rve_write(cmd_reg[i], RVE_SYS_REG + i * 4, scheduler);
165
166 for (i = 0; i < 10; i++) {
167 /* skip start reg */
168 if (i == 2)
169 continue;
170
171 rve_write(cmd_reg[8 + i], RVE_LTB_REG + i * 4, scheduler);
172 }
173
174 /* 0x200(start)(40 - 1 = 39) need config after reg ready */
175 for (i = 0; i < 39; i++)
176 rve_write(cmd_reg[19 + i], RVE_CFG_REG + (i + 1) * 4, scheduler);
177
178 //TODO: ddr config
179 rve_write(0x30000, RVE_SWCFG5_CTRL, scheduler);
180 rve_write(0xf4240, RVE_SWCFG6_TIMEOUT_THRESH, scheduler);
181 rve_write(0x1f0001, RVE_SWCFG7_DDR_CTRL, scheduler);
182
183 /* reset RVE_SWREG6_IVE_WORK_STA */
184 rve_write(RVE_CLEAR_UP_REG6_WROK_STA, RVE_SWREG6_IVE_WORK_STA, scheduler);
185
186 /* enable monitor */
187 if (DEBUGGER_EN(MONITOR))
188 rve_write(1, RVE_SWCFG32_MONITOR_CTRL0, scheduler);
189
190 if (DEBUGGER_EN(REG)) {
191 pr_err("before config:");
192 rve_dump_read_back_reg(scheduler);
193 }
194
195 /* if llp mode enable, skip to enable slave mode */
196 if (cmd_reg[11] != 1)
197 rve_write(1, RVE_SWCFG0_EN, scheduler);
198 else
199 /* llp config done, to start hw */
200 rve_write(cmd_reg[10], RVE_SWLTB2_CFG_DONE, scheduler);
201
202 if (DEBUGGER_EN(REG)) {
203 pr_err("after config:");
204 rve_dump_read_back_reg(scheduler);
205 }
206
207 return 0;
208 }
209
rve_get_version(struct rve_scheduler_t * scheduler)210 int rve_get_version(struct rve_scheduler_t *scheduler)
211 {
212 u32 major_version, minor_version, prod_num;
213 u32 reg_version;
214
215 if (!scheduler) {
216 pr_err("scheduler is null\n");
217 return -EINVAL;
218 }
219
220 reg_version = rve_read(RVE_SWREG0_IVE_VERSION, scheduler);
221
222 major_version = (reg_version & RVE_MAJOR_VERSION_MASK) >> 8;
223 minor_version = (reg_version & RVE_MINOR_VERSION_MASK);
224 prod_num = (reg_version & RVE_PROD_NUM_MASK) >> 16;
225
226 snprintf(scheduler->version.str, sizeof(scheduler->version.str), "[%x]%x.%x",
227 prod_num, major_version, minor_version);
228
229 scheduler->version.major = major_version;
230 scheduler->version.minor = minor_version;
231 scheduler->version.prod_num = prod_num;
232
233 return 0;
234 }
235
rve_get_monitor_info(struct rve_job * job)236 void rve_get_monitor_info(struct rve_job *job)
237 {
238 struct rve_sche_pid_info_t *pid_info = NULL;
239 struct rve_scheduler_t *scheduler = NULL;
240 unsigned long flags;
241 uint32_t rd_bandwidth, wr_bandwidth, cycle_cnt;
242 int i;
243
244 scheduler = rve_job_get_scheduler(job);
245 pid_info = scheduler->session.pid_info;
246
247 /* monitor */
248 if (DEBUGGER_EN(MONITOR)) {
249 rd_bandwidth = rve_read(RVE_SWCFG37_MONITOR_INFO3, scheduler);
250 wr_bandwidth = rve_read(RVE_SWCFG38_MONITOR_INFO4, scheduler);
251 cycle_cnt = rve_read(RVE_SWCFG39_MONITOR_INFO5, scheduler);
252
253 /* reset per htimer occur */
254 rve_write(2, RVE_SWCFG32_MONITOR_CTRL0, scheduler);
255
256 spin_lock_irqsave(&scheduler->irq_lock, flags);
257
258 for (i = 0; i < RVE_MAX_PID_INFO; i++) {
259 if (pid_info[i].pid == job->pid) {
260 pid_info[i].last_job_rd_bandwidth = rd_bandwidth;
261 pid_info[i].last_job_wr_bandwidth = wr_bandwidth;
262 pid_info[i].last_job_cycle_cnt = cycle_cnt;
263 break;
264 }
265 }
266
267 if (DEBUGGER_EN(MSG))
268 pr_info("rd_bandwidth = %d, wd_bandwidth = %d, cycle_cnt = %d\n",
269 rd_bandwidth, wr_bandwidth, cycle_cnt);
270
271 scheduler->session.rd_bandwidth += rd_bandwidth;
272 scheduler->session.wr_bandwidth += wr_bandwidth;
273 scheduler->session.cycle_cnt += cycle_cnt;
274
275 spin_unlock_irqrestore(&scheduler->irq_lock, flags);
276 }
277 }
278