xref: /OK3568_Linux_fs/kernel/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2019, The Linux Foundation. All rights reserved.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/adreno-smmu-priv.h>
7*4882a593Smuzhiyun #include <linux/bitfield.h>
8*4882a593Smuzhiyun #include <linux/of_device.h>
9*4882a593Smuzhiyun #include <linux/qcom_scm.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include "arm-smmu.h"
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun struct qcom_smmu {
14*4882a593Smuzhiyun 	struct arm_smmu_device smmu;
15*4882a593Smuzhiyun 	bool bypass_quirk;
16*4882a593Smuzhiyun 	u8 bypass_cbndx;
17*4882a593Smuzhiyun };
18*4882a593Smuzhiyun 
qcom_sdm845_smmu500_cfg_probe(struct arm_smmu_device * smmu)19*4882a593Smuzhiyun static int qcom_sdm845_smmu500_cfg_probe(struct arm_smmu_device *smmu)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun 	u32 s2cr;
22*4882a593Smuzhiyun 	u32 smr;
23*4882a593Smuzhiyun 	int i;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 	for (i = 0; i < smmu->num_mapping_groups; i++) {
26*4882a593Smuzhiyun 		smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
27*4882a593Smuzhiyun 		s2cr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_S2CR(i));
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 		smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
30*4882a593Smuzhiyun 		smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr);
31*4882a593Smuzhiyun 		if (smmu->features & ARM_SMMU_FEAT_EXIDS)
32*4882a593Smuzhiyun 			smmu->smrs[i].valid = FIELD_GET(
33*4882a593Smuzhiyun 						ARM_SMMU_S2CR_EXIDVALID,
34*4882a593Smuzhiyun 						s2cr);
35*4882a593Smuzhiyun 		else
36*4882a593Smuzhiyun 			smmu->smrs[i].valid = FIELD_GET(
37*4882a593Smuzhiyun 						ARM_SMMU_SMR_VALID,
38*4882a593Smuzhiyun 						smr);
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 		smmu->s2crs[i].group = NULL;
41*4882a593Smuzhiyun 		smmu->s2crs[i].count = 0;
42*4882a593Smuzhiyun 		smmu->s2crs[i].type = FIELD_GET(ARM_SMMU_S2CR_TYPE, s2cr);
43*4882a593Smuzhiyun 		smmu->s2crs[i].privcfg = FIELD_GET(ARM_SMMU_S2CR_PRIVCFG, s2cr);
44*4882a593Smuzhiyun 		smmu->s2crs[i].cbndx = FIELD_GET(ARM_SMMU_S2CR_CBNDX, s2cr);
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 		if (!smmu->smrs[i].valid)
47*4882a593Smuzhiyun 			continue;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 		smmu->s2crs[i].pinned = true;
50*4882a593Smuzhiyun 		bitmap_set(smmu->context_map, smmu->s2crs[i].cbndx, 1);
51*4882a593Smuzhiyun 	}
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	return 0;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define QCOM_ADRENO_SMMU_GPU_SID 0
57*4882a593Smuzhiyun 
qcom_adreno_smmu_is_gpu_device(struct device * dev)58*4882a593Smuzhiyun static bool qcom_adreno_smmu_is_gpu_device(struct device *dev)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
61*4882a593Smuzhiyun 	int i;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	/*
64*4882a593Smuzhiyun 	 * The GPU will always use SID 0 so that is a handy way to uniquely
65*4882a593Smuzhiyun 	 * identify it and configure it for per-instance pagetables
66*4882a593Smuzhiyun 	 */
67*4882a593Smuzhiyun 	for (i = 0; i < fwspec->num_ids; i++) {
68*4882a593Smuzhiyun 		u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 		if (sid == QCOM_ADRENO_SMMU_GPU_SID)
71*4882a593Smuzhiyun 			return true;
72*4882a593Smuzhiyun 	}
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	return false;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
qcom_adreno_smmu_get_ttbr1_cfg(const void * cookie)77*4882a593Smuzhiyun static const struct io_pgtable_cfg *qcom_adreno_smmu_get_ttbr1_cfg(
78*4882a593Smuzhiyun 		const void *cookie)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	struct arm_smmu_domain *smmu_domain = (void *)cookie;
81*4882a593Smuzhiyun 	struct io_pgtable *pgtable =
82*4882a593Smuzhiyun 		io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
83*4882a593Smuzhiyun 	return &pgtable->cfg;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun  * Local implementation to configure TTBR0 with the specified pagetable config.
88*4882a593Smuzhiyun  * The GPU driver will call this to enable TTBR0 when per-instance pagetables
89*4882a593Smuzhiyun  * are active
90*4882a593Smuzhiyun  */
91*4882a593Smuzhiyun 
qcom_adreno_smmu_set_ttbr0_cfg(const void * cookie,const struct io_pgtable_cfg * pgtbl_cfg)92*4882a593Smuzhiyun static int qcom_adreno_smmu_set_ttbr0_cfg(const void *cookie,
93*4882a593Smuzhiyun 		const struct io_pgtable_cfg *pgtbl_cfg)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	struct arm_smmu_domain *smmu_domain = (void *)cookie;
96*4882a593Smuzhiyun 	struct io_pgtable *pgtable = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
97*4882a593Smuzhiyun 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
98*4882a593Smuzhiyun 	struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	/* The domain must have split pagetables already enabled */
101*4882a593Smuzhiyun 	if (cb->tcr[0] & ARM_SMMU_TCR_EPD1)
102*4882a593Smuzhiyun 		return -EINVAL;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	/* If the pagetable config is NULL, disable TTBR0 */
105*4882a593Smuzhiyun 	if (!pgtbl_cfg) {
106*4882a593Smuzhiyun 		/* Do nothing if it is already disabled */
107*4882a593Smuzhiyun 		if ((cb->tcr[0] & ARM_SMMU_TCR_EPD0))
108*4882a593Smuzhiyun 			return -EINVAL;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 		/* Set TCR to the original configuration */
111*4882a593Smuzhiyun 		cb->tcr[0] = arm_smmu_lpae_tcr(&pgtable->cfg);
112*4882a593Smuzhiyun 		cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
113*4882a593Smuzhiyun 	} else {
114*4882a593Smuzhiyun 		u32 tcr = cb->tcr[0];
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 		/* Don't call this again if TTBR0 is already enabled */
117*4882a593Smuzhiyun 		if (!(cb->tcr[0] & ARM_SMMU_TCR_EPD0))
118*4882a593Smuzhiyun 			return -EINVAL;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 		tcr |= arm_smmu_lpae_tcr(pgtbl_cfg);
121*4882a593Smuzhiyun 		tcr &= ~(ARM_SMMU_TCR_EPD0 | ARM_SMMU_TCR_EPD1);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 		cb->tcr[0] = tcr;
124*4882a593Smuzhiyun 		cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
125*4882a593Smuzhiyun 		cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
126*4882a593Smuzhiyun 	}
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	arm_smmu_write_context_bank(smmu_domain->smmu, cb->cfg->cbndx);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	return 0;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain * smmu_domain,struct arm_smmu_device * smmu,struct device * dev,int start)133*4882a593Smuzhiyun static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
134*4882a593Smuzhiyun 					       struct arm_smmu_device *smmu,
135*4882a593Smuzhiyun 					       struct device *dev, int start)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	int count;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	/*
140*4882a593Smuzhiyun 	 * Assign context bank 0 to the GPU device so the GPU hardware can
141*4882a593Smuzhiyun 	 * switch pagetables
142*4882a593Smuzhiyun 	 */
143*4882a593Smuzhiyun 	if (qcom_adreno_smmu_is_gpu_device(dev)) {
144*4882a593Smuzhiyun 		start = 0;
145*4882a593Smuzhiyun 		count = 1;
146*4882a593Smuzhiyun 	} else {
147*4882a593Smuzhiyun 		start = 1;
148*4882a593Smuzhiyun 		count = smmu->num_context_banks;
149*4882a593Smuzhiyun 	}
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	return __arm_smmu_alloc_bitmap(smmu->context_map, start, count);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
qcom_adreno_smmu_init_context(struct arm_smmu_domain * smmu_domain,struct io_pgtable_cfg * pgtbl_cfg,struct device * dev)154*4882a593Smuzhiyun static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
155*4882a593Smuzhiyun 		struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	struct adreno_smmu_priv *priv;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	/* Only enable split pagetables for the GPU device (SID 0) */
160*4882a593Smuzhiyun 	if (!qcom_adreno_smmu_is_gpu_device(dev))
161*4882a593Smuzhiyun 		return 0;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	/*
164*4882a593Smuzhiyun 	 * All targets that use the qcom,adreno-smmu compatible string *should*
165*4882a593Smuzhiyun 	 * be AARCH64 stage 1 but double check because the arm-smmu code assumes
166*4882a593Smuzhiyun 	 * that is the case when the TTBR1 quirk is enabled
167*4882a593Smuzhiyun 	 */
168*4882a593Smuzhiyun 	if ((smmu_domain->stage == ARM_SMMU_DOMAIN_S1) &&
169*4882a593Smuzhiyun 	    (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64))
170*4882a593Smuzhiyun 		pgtbl_cfg->quirks |= IO_PGTABLE_QUIRK_ARM_TTBR1;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	/*
173*4882a593Smuzhiyun 	 * Initialize private interface with GPU:
174*4882a593Smuzhiyun 	 */
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	priv = dev_get_drvdata(dev);
177*4882a593Smuzhiyun 	priv->cookie = smmu_domain;
178*4882a593Smuzhiyun 	priv->get_ttbr1_cfg = qcom_adreno_smmu_get_ttbr1_cfg;
179*4882a593Smuzhiyun 	priv->set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	return 0;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
to_qcom_smmu(struct arm_smmu_device * smmu)184*4882a593Smuzhiyun static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	return container_of(smmu, struct qcom_smmu, smmu);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
190*4882a593Smuzhiyun 	{ .compatible = "qcom,adreno" },
191*4882a593Smuzhiyun 	{ .compatible = "qcom,mdp4" },
192*4882a593Smuzhiyun 	{ .compatible = "qcom,mdss" },
193*4882a593Smuzhiyun 	{ .compatible = "qcom,sc7180-mdss" },
194*4882a593Smuzhiyun 	{ .compatible = "qcom,sc7180-mss-pil" },
195*4882a593Smuzhiyun 	{ .compatible = "qcom,sdm845-mdss" },
196*4882a593Smuzhiyun 	{ .compatible = "qcom,sdm845-mss-pil" },
197*4882a593Smuzhiyun 	{ }
198*4882a593Smuzhiyun };
199*4882a593Smuzhiyun 
qcom_smmu_cfg_probe(struct arm_smmu_device * smmu)200*4882a593Smuzhiyun static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	unsigned int last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
203*4882a593Smuzhiyun 	struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
204*4882a593Smuzhiyun 	u32 reg;
205*4882a593Smuzhiyun 	u32 smr;
206*4882a593Smuzhiyun 	int i;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	/*
209*4882a593Smuzhiyun 	 * With some firmware versions writes to S2CR of type FAULT are
210*4882a593Smuzhiyun 	 * ignored, and writing BYPASS will end up written as FAULT in the
211*4882a593Smuzhiyun 	 * register. Perform a write to S2CR to detect if this is the case and
212*4882a593Smuzhiyun 	 * if so reserve a context bank to emulate bypass streams.
213*4882a593Smuzhiyun 	 */
214*4882a593Smuzhiyun 	reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, S2CR_TYPE_BYPASS) |
215*4882a593Smuzhiyun 	      FIELD_PREP(ARM_SMMU_S2CR_CBNDX, 0xff) |
216*4882a593Smuzhiyun 	      FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, S2CR_PRIVCFG_DEFAULT);
217*4882a593Smuzhiyun 	arm_smmu_gr0_write(smmu, last_s2cr, reg);
218*4882a593Smuzhiyun 	reg = arm_smmu_gr0_read(smmu, last_s2cr);
219*4882a593Smuzhiyun 	if (FIELD_GET(ARM_SMMU_S2CR_TYPE, reg) != S2CR_TYPE_BYPASS) {
220*4882a593Smuzhiyun 		qsmmu->bypass_quirk = true;
221*4882a593Smuzhiyun 		qsmmu->bypass_cbndx = smmu->num_context_banks - 1;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 		set_bit(qsmmu->bypass_cbndx, smmu->context_map);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 		arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 		reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
228*4882a593Smuzhiyun 		arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	for (i = 0; i < smmu->num_mapping_groups; i++) {
232*4882a593Smuzhiyun 		smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 		if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) {
235*4882a593Smuzhiyun 			/* Ignore valid bit for SMR mask extraction. */
236*4882a593Smuzhiyun 			smr &= ~ARM_SMMU_SMR_VALID;
237*4882a593Smuzhiyun 			smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr);
238*4882a593Smuzhiyun 			smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
239*4882a593Smuzhiyun 			smmu->smrs[i].valid = true;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 			smmu->s2crs[i].type = S2CR_TYPE_BYPASS;
242*4882a593Smuzhiyun 			smmu->s2crs[i].privcfg = S2CR_PRIVCFG_DEFAULT;
243*4882a593Smuzhiyun 			smmu->s2crs[i].cbndx = 0xff;
244*4882a593Smuzhiyun 		}
245*4882a593Smuzhiyun 	}
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	return 0;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
qcom_smmu_write_s2cr(struct arm_smmu_device * smmu,int idx)250*4882a593Smuzhiyun static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
253*4882a593Smuzhiyun 	struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
254*4882a593Smuzhiyun 	u32 cbndx = s2cr->cbndx;
255*4882a593Smuzhiyun 	u32 type = s2cr->type;
256*4882a593Smuzhiyun 	u32 reg;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	if (qsmmu->bypass_quirk) {
259*4882a593Smuzhiyun 		if (type == S2CR_TYPE_BYPASS) {
260*4882a593Smuzhiyun 			/*
261*4882a593Smuzhiyun 			 * Firmware with quirky S2CR handling will substitute
262*4882a593Smuzhiyun 			 * BYPASS writes with FAULT, so point the stream to the
263*4882a593Smuzhiyun 			 * reserved context bank and ask for translation on the
264*4882a593Smuzhiyun 			 * stream
265*4882a593Smuzhiyun 			 */
266*4882a593Smuzhiyun 			type = S2CR_TYPE_TRANS;
267*4882a593Smuzhiyun 			cbndx = qsmmu->bypass_cbndx;
268*4882a593Smuzhiyun 		} else if (type == S2CR_TYPE_FAULT) {
269*4882a593Smuzhiyun 			/*
270*4882a593Smuzhiyun 			 * Firmware with quirky S2CR handling will ignore FAULT
271*4882a593Smuzhiyun 			 * writes, so trick it to write FAULT by asking for a
272*4882a593Smuzhiyun 			 * BYPASS.
273*4882a593Smuzhiyun 			 */
274*4882a593Smuzhiyun 			type = S2CR_TYPE_BYPASS;
275*4882a593Smuzhiyun 			cbndx = 0xff;
276*4882a593Smuzhiyun 		}
277*4882a593Smuzhiyun 	}
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, type) |
280*4882a593Smuzhiyun 	      FIELD_PREP(ARM_SMMU_S2CR_CBNDX, cbndx) |
281*4882a593Smuzhiyun 	      FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
282*4882a593Smuzhiyun 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
qcom_smmu_def_domain_type(struct device * dev)285*4882a593Smuzhiyun static int qcom_smmu_def_domain_type(struct device *dev)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	const struct of_device_id *match =
288*4882a593Smuzhiyun 		of_match_device(qcom_smmu_client_of_match, dev);
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	return match ? IOMMU_DOMAIN_IDENTITY : 0;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun 
qcom_sdm845_smmu500_reset(struct arm_smmu_device * smmu)293*4882a593Smuzhiyun static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	int ret;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/*
298*4882a593Smuzhiyun 	 * To address performance degradation in non-real time clients,
299*4882a593Smuzhiyun 	 * such as USB and UFS, turn off wait-for-safe on sdm845 based boards,
300*4882a593Smuzhiyun 	 * such as MTP and db845, whose firmwares implement secure monitor
301*4882a593Smuzhiyun 	 * call handlers to turn on/off the wait-for-safe logic.
302*4882a593Smuzhiyun 	 */
303*4882a593Smuzhiyun 	ret = qcom_scm_qsmmu500_wait_safe_toggle(0);
304*4882a593Smuzhiyun 	if (ret)
305*4882a593Smuzhiyun 		dev_warn(smmu->dev, "Failed to turn off SAFE logic\n");
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	return ret;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
qcom_smmu500_reset(struct arm_smmu_device * smmu)310*4882a593Smuzhiyun static int qcom_smmu500_reset(struct arm_smmu_device *smmu)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	const struct device_node *np = smmu->dev->of_node;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	arm_mmu500_reset(smmu);
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	if (of_device_is_compatible(np, "qcom,sdm845-smmu-500"))
317*4882a593Smuzhiyun 		return qcom_sdm845_smmu500_reset(smmu);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	return 0;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun static const struct arm_smmu_impl qcom_smmu_impl = {
323*4882a593Smuzhiyun 	.cfg_probe = qcom_smmu_cfg_probe,
324*4882a593Smuzhiyun 	.def_domain_type = qcom_smmu_def_domain_type,
325*4882a593Smuzhiyun 	.cfg_probe = qcom_sdm845_smmu500_cfg_probe,
326*4882a593Smuzhiyun 	.reset = qcom_smmu500_reset,
327*4882a593Smuzhiyun 	.write_s2cr = qcom_smmu_write_s2cr,
328*4882a593Smuzhiyun };
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun static const struct arm_smmu_impl qcom_adreno_smmu_impl = {
331*4882a593Smuzhiyun 	.init_context = qcom_adreno_smmu_init_context,
332*4882a593Smuzhiyun 	.def_domain_type = qcom_smmu_def_domain_type,
333*4882a593Smuzhiyun 	.reset = qcom_smmu500_reset,
334*4882a593Smuzhiyun 	.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
335*4882a593Smuzhiyun };
336*4882a593Smuzhiyun 
qcom_smmu_create(struct arm_smmu_device * smmu,const struct arm_smmu_impl * impl)337*4882a593Smuzhiyun static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
338*4882a593Smuzhiyun 		const struct arm_smmu_impl *impl)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	struct qcom_smmu *qsmmu;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	/* Check to make sure qcom_scm has finished probing */
343*4882a593Smuzhiyun 	if (!qcom_scm_is_available())
344*4882a593Smuzhiyun 		return ERR_PTR(-EPROBE_DEFER);
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	qsmmu = devm_kzalloc(smmu->dev, sizeof(*qsmmu), GFP_KERNEL);
347*4882a593Smuzhiyun 	if (!qsmmu)
348*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	qsmmu->smmu = *smmu;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	qsmmu->smmu.impl = impl;
353*4882a593Smuzhiyun 	devm_kfree(smmu->dev, smmu);
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	return &qsmmu->smmu;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
qcom_smmu_impl_init(struct arm_smmu_device * smmu)358*4882a593Smuzhiyun struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	return qcom_smmu_create(smmu, &qcom_smmu_impl);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun 
qcom_adreno_smmu_impl_init(struct arm_smmu_device * smmu)363*4882a593Smuzhiyun struct arm_smmu_device *qcom_adreno_smmu_impl_init(struct arm_smmu_device *smmu)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	return qcom_smmu_create(smmu, &qcom_adreno_smmu_impl);
366*4882a593Smuzhiyun }
367