1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3 * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4 *
5 * author:
6 * Ding Wei, leo.ding@rock-chips.com
7 *
8 */
9
10 #include <linux/delay.h>
11 #include <linux/io.h>
12 #include <linux/of.h>
13 #include <linux/of_platform.h>
14 #include <linux/regmap.h>
15
16 #include <soc/rockchip/pm_domains.h>
17
18 #include "../mpp_debug.h"
19 #include "../mpp_common.h"
20 #include "../mpp_iommu.h"
21 #include "mpp_hack_px30.h"
22 #include <soc/rockchip/rockchip_iommu.h>
23
24 #define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
25 #define RK_MMU_STATUS 0x04
26 #define RK_MMU_COMMAND 0x08
27 #define RK_MMU_INT_MASK 0x1C /* IRQ enable */
28
29 /* RK_MMU_COMMAND command values */
30 #define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */
31 #define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */
32 #define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */
33 #define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */
34 #define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */
35 #define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */
36 #define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */
37
38 /* RK_MMU_INT_* register fields */
39 #define RK_MMU_IRQ_MASK 0x03
40 /* RK_MMU_STATUS fields */
41 #define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
42 #define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
43
mpp_iommu_is_paged(struct mpp_rk_iommu * iommu)44 static bool mpp_iommu_is_paged(struct mpp_rk_iommu *iommu)
45 {
46 int i;
47 u32 status;
48 bool active = true;
49
50 for (i = 0; i < iommu->mmu_num; i++) {
51 status = readl(iommu->bases[i] + RK_MMU_STATUS);
52 active &= !!(status & RK_MMU_STATUS_PAGING_ENABLED);
53 }
54
55 return active;
56 }
57
mpp_iommu_get_dte_addr(struct mpp_rk_iommu * iommu)58 static u32 mpp_iommu_get_dte_addr(struct mpp_rk_iommu *iommu)
59 {
60 return readl(iommu->bases[0] + RK_MMU_DTE_ADDR);
61 }
62
mpp_iommu_enable(struct mpp_rk_iommu * iommu)63 static int mpp_iommu_enable(struct mpp_rk_iommu *iommu)
64 {
65 int i;
66
67 /* check iommu whether is paged */
68 iommu->is_paged = mpp_iommu_is_paged(iommu);
69 if (iommu->is_paged)
70 return 0;
71
72 /* enable stall */
73 for (i = 0; i < iommu->mmu_num; i++)
74 writel(RK_MMU_CMD_ENABLE_STALL,
75 iommu->bases[i] + RK_MMU_COMMAND);
76 udelay(2);
77 /* force reset */
78 for (i = 0; i < iommu->mmu_num; i++)
79 writel(RK_MMU_CMD_FORCE_RESET,
80 iommu->bases[i] + RK_MMU_COMMAND);
81 udelay(2);
82
83 for (i = 0; i < iommu->mmu_num; i++) {
84 /* restore dte and status */
85 writel(iommu->dte_addr,
86 iommu->bases[i] + RK_MMU_DTE_ADDR);
87 /* zap cache */
88 writel(RK_MMU_CMD_ZAP_CACHE,
89 iommu->bases[i] + RK_MMU_COMMAND);
90 /* irq mask */
91 writel(RK_MMU_IRQ_MASK,
92 iommu->bases[i] + RK_MMU_INT_MASK);
93 }
94 udelay(2);
95 /* enable paging */
96 for (i = 0; i < iommu->mmu_num; i++)
97 writel(RK_MMU_CMD_ENABLE_PAGING,
98 iommu->bases[i] + RK_MMU_COMMAND);
99 udelay(2);
100 /* disable stall */
101 for (i = 0; i < iommu->mmu_num; i++)
102 writel(RK_MMU_CMD_DISABLE_STALL,
103 iommu->bases[i] + RK_MMU_COMMAND);
104 udelay(2);
105
106 /* iommu should be paging enable */
107 iommu->is_paged = mpp_iommu_is_paged(iommu);
108 if (!iommu->is_paged) {
109 mpp_err("iommu->base_addr=%08x enable failed\n",
110 iommu->base_addr[0]);
111 return -EINVAL;
112 }
113
114 return 0;
115 }
116
mpp_iommu_disable(struct mpp_rk_iommu * iommu)117 static int mpp_iommu_disable(struct mpp_rk_iommu *iommu)
118 {
119 int i;
120 u32 dte;
121
122 if (iommu->is_paged) {
123 dte = readl(iommu->bases[0] + RK_MMU_DTE_ADDR);
124 if (!dte)
125 return -EINVAL;
126 udelay(2);
127 /* enable stall */
128 for (i = 0; i < iommu->mmu_num; i++)
129 writel(RK_MMU_CMD_ENABLE_STALL,
130 iommu->bases[i] + RK_MMU_COMMAND);
131 udelay(2);
132 /* disable paging */
133 for (i = 0; i < iommu->mmu_num; i++)
134 writel(RK_MMU_CMD_DISABLE_PAGING,
135 iommu->bases[i] + RK_MMU_COMMAND);
136 udelay(2);
137 /* disable stall */
138 for (i = 0; i < iommu->mmu_num; i++)
139 writel(RK_MMU_CMD_DISABLE_STALL,
140 iommu->bases[i] + RK_MMU_COMMAND);
141 udelay(2);
142 }
143
144 return 0;
145 }
146
px30_workaround_combo_init(struct mpp_dev * mpp)147 int px30_workaround_combo_init(struct mpp_dev *mpp)
148 {
149 struct mpp_rk_iommu *iommu = NULL, *loop = NULL, *n;
150 struct platform_device *pdev = mpp->iommu_info->pdev;
151
152 /* find whether exist in iommu link */
153 list_for_each_entry_safe(loop, n, &mpp->queue->mmu_list, link) {
154 if (loop->base_addr[0] == pdev->resource[0].start) {
155 iommu = loop;
156 break;
157 }
158 }
159 /* if not exist, add it */
160 if (!iommu) {
161 int i;
162 struct resource *res;
163 void __iomem *base;
164
165 iommu = devm_kzalloc(mpp->srv->dev, sizeof(*iommu), GFP_KERNEL);
166 for (i = 0; i < pdev->num_resources; i++) {
167 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
168 if (!res)
169 continue;
170 base = devm_ioremap(&pdev->dev,
171 res->start, resource_size(res));
172 if (IS_ERR(base))
173 continue;
174 iommu->base_addr[i] = res->start;
175 iommu->bases[i] = base;
176 iommu->mmu_num++;
177 }
178 iommu->grf_val = mpp->grf_info->val & MPP_GRF_VAL_MASK;
179 if (mpp->hw_ops->clk_on)
180 mpp->hw_ops->clk_on(mpp);
181 /*
182 * ensure that iommu is enable, so that read valid dte value
183 */
184 if (rockchip_iommu_is_enabled(mpp->dev))
185 iommu->dte_addr = mpp_iommu_get_dte_addr(iommu);
186 else {
187 rockchip_iommu_enable(mpp->dev);
188 iommu->dte_addr = mpp_iommu_get_dte_addr(iommu);
189 rockchip_iommu_disable(mpp->dev);
190 }
191 dev_err(mpp->dev, "%s dte_addr %08x\n", __func__, iommu->dte_addr);
192 if (mpp->hw_ops->clk_off)
193 mpp->hw_ops->clk_off(mpp);
194 INIT_LIST_HEAD(&iommu->link);
195 mutex_lock(&mpp->queue->mmu_lock);
196 list_add_tail(&iommu->link, &mpp->queue->mmu_list);
197 mutex_unlock(&mpp->queue->mmu_lock);
198 }
199 mpp->iommu_info->iommu = iommu;
200
201 return 0;
202 }
203
px30_workaround_combo_switch_grf(struct mpp_dev * mpp)204 int px30_workaround_combo_switch_grf(struct mpp_dev *mpp)
205 {
206 int ret = 0;
207 u32 curr_val;
208 u32 next_val;
209 bool pd_is_on;
210 struct mpp_rk_iommu *loop = NULL, *n;
211
212 if (!mpp->grf_info->grf || !mpp->grf_info->val)
213 return 0;
214
215 curr_val = mpp_get_grf(mpp->grf_info);
216 next_val = mpp->grf_info->val & MPP_GRF_VAL_MASK;
217 if (curr_val == next_val)
218 return 0;
219
220 pd_is_on = rockchip_pmu_pd_is_on(mpp->dev);
221 if (!pd_is_on)
222 rockchip_pmu_pd_on(mpp->dev);
223 mpp->hw_ops->clk_on(mpp);
224
225 list_for_each_entry_safe(loop, n, &mpp->queue->mmu_list, link) {
226 /* update iommu parameters */
227 if (loop->grf_val == curr_val)
228 loop->is_paged = mpp_iommu_is_paged(loop);
229 /* disable all iommu */
230 mpp_iommu_disable(loop);
231 }
232 mpp_set_grf(mpp->grf_info);
233 /* enable current iommu */
234 ret = mpp_iommu_enable(mpp->iommu_info->iommu);
235
236 mpp->hw_ops->clk_off(mpp);
237 if (!pd_is_on)
238 rockchip_pmu_pd_off(mpp->dev);
239
240 return ret;
241 }
242