xref: /rk3399_ARM-atf/drivers/arm/smmu/smmu_v3.c (revision a1ff78f53a6cc119546ccb685a2d2b93ca63c592)
11154586bSJeenu Viswambharan /*
270d849c1SVivek Gautam  * Copyright (c) 2017-2024, ARM Limited and Contributors. All rights reserved.
31154586bSJeenu Viswambharan  *
41154586bSJeenu Viswambharan  * SPDX-License-Identifier: BSD-3-Clause
51154586bSJeenu Viswambharan  */
61154586bSJeenu Viswambharan 
7ccd4d475SAlexei Fedorov #include <common/debug.h>
8c9263e62SDaniel Boulby #include <cdefs.h>
909d40e0eSAntonio Nino Diaz #include <drivers/arm/smmu_v3.h>
10620dd58bSDeepika Bhavnani #include <drivers/delay_timer.h>
1109d40e0eSAntonio Nino Diaz #include <lib/mmio.h>
1252a314afSOlivier Deprez #include <arch_features.h>
1309d40e0eSAntonio Nino Diaz 
14ccd4d475SAlexei Fedorov /* SMMU poll number of retries */
15620dd58bSDeepika Bhavnani #define SMMU_POLL_TIMEOUT_US	U(1000)
161154586bSJeenu Viswambharan 
smmuv3_poll(uintptr_t smmu_reg,uint32_t mask,uint32_t value)176c5c5320SLucian Paul-Trifu static int smmuv3_poll(uintptr_t smmu_reg, uint32_t mask,
18ccd4d475SAlexei Fedorov 				uint32_t value)
191154586bSJeenu Viswambharan {
20620dd58bSDeepika Bhavnani 	uint32_t reg_val;
21620dd58bSDeepika Bhavnani 	uint64_t timeout;
221154586bSJeenu Viswambharan 
23620dd58bSDeepika Bhavnani 	/* Set 1ms timeout value */
24620dd58bSDeepika Bhavnani 	timeout = timeout_init_us(SMMU_POLL_TIMEOUT_US);
25ccd4d475SAlexei Fedorov 	do {
26ccd4d475SAlexei Fedorov 		reg_val = mmio_read_32(smmu_reg);
27ccd4d475SAlexei Fedorov 		if ((reg_val & mask) == value)
28ccd4d475SAlexei Fedorov 			return 0;
29620dd58bSDeepika Bhavnani 	} while (!timeout_elapsed(timeout));
301154586bSJeenu Viswambharan 
31620dd58bSDeepika Bhavnani 	ERROR("Timeout polling SMMUv3 register @%p\n", (void *)smmu_reg);
32ccd4d475SAlexei Fedorov 	ERROR("Read value 0x%x, expected 0x%x\n", reg_val,
33ccd4d475SAlexei Fedorov 		value == 0U ? reg_val & ~mask : reg_val | mask);
34ccd4d475SAlexei Fedorov 	return -1;
356d5f0631SAntonio Nino Diaz }
366d5f0631SAntonio Nino Diaz 
371154586bSJeenu Viswambharan /*
381461ad9fSAlexei Fedorov  * Abort all incoming transactions in order to implement a default
391461ad9fSAlexei Fedorov  * deny policy on reset.
401461ad9fSAlexei Fedorov  */
smmuv3_security_init(uintptr_t smmu_base)411461ad9fSAlexei Fedorov int __init smmuv3_security_init(uintptr_t smmu_base)
421461ad9fSAlexei Fedorov {
431461ad9fSAlexei Fedorov 	/* Attribute update has completed when SMMU_(S)_GBPA.Update bit is 0 */
441461ad9fSAlexei Fedorov 	if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U)
451461ad9fSAlexei Fedorov 		return -1;
461461ad9fSAlexei Fedorov 
471461ad9fSAlexei Fedorov 	/*
481461ad9fSAlexei Fedorov 	 * SMMU_(S)_CR0 resets to zero with all streams bypassing the SMMU,
491461ad9fSAlexei Fedorov 	 * so just abort all incoming transactions.
501461ad9fSAlexei Fedorov 	 */
511461ad9fSAlexei Fedorov 	mmio_setbits_32(smmu_base + SMMU_GBPA,
521461ad9fSAlexei Fedorov 			SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT);
531461ad9fSAlexei Fedorov 
541461ad9fSAlexei Fedorov 	if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U)
551461ad9fSAlexei Fedorov 		return -1;
561461ad9fSAlexei Fedorov 
571461ad9fSAlexei Fedorov 	/* Check if the SMMU supports secure state */
581461ad9fSAlexei Fedorov 	if ((mmio_read_32(smmu_base + SMMU_S_IDR1) &
591461ad9fSAlexei Fedorov 				SMMU_S_IDR1_SECURE_IMPL) == 0U)
601461ad9fSAlexei Fedorov 		return 0;
611461ad9fSAlexei Fedorov 
621461ad9fSAlexei Fedorov 	/* Abort all incoming secure transactions */
631461ad9fSAlexei Fedorov 	if (smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U) != 0U)
641461ad9fSAlexei Fedorov 		return -1;
651461ad9fSAlexei Fedorov 
661461ad9fSAlexei Fedorov 	mmio_setbits_32(smmu_base + SMMU_S_GBPA,
671461ad9fSAlexei Fedorov 			SMMU_S_GBPA_UPDATE | SMMU_S_GBPA_ABORT);
681461ad9fSAlexei Fedorov 
691461ad9fSAlexei Fedorov 	return smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U);
701461ad9fSAlexei Fedorov }
711461ad9fSAlexei Fedorov 
72a23710b4SVijayenthiran Subramaniam /* Initialize the SMMU by invalidating all secure caches and TLBs. */
smmuv3_init(uintptr_t smmu_base)73c9263e62SDaniel Boulby int __init smmuv3_init(uintptr_t smmu_base)
741154586bSJeenu Viswambharan {
7570d849c1SVivek Gautam 	/*
7670d849c1SVivek Gautam 	 * Initiate invalidation of secure caches and TLBs if the SMMU
7770d849c1SVivek Gautam 	 * supports secure state. If not, it's implementation defined
7870d849c1SVivek Gautam 	 * as to how SMMU_S_INIT register is accessed.
7970d849c1SVivek Gautam 	 * As per Arm SMMUv3 specification the SMMU_S_INIT register in a SMMU
8070d849c1SVivek Gautam 	 * with RME implementation has following properties:
8170d849c1SVivek Gautam 	 * a) all SMMU registers that are specified to be accessible only in
8270d849c1SVivek Gautam 	 *    the Secure physical address space are additionally accessible in
8370d849c1SVivek Gautam 	 *    Root physical address space.
8470d849c1SVivek Gautam 	 * b) as GPT information is permitted to be cached in a TLB, the
8570d849c1SVivek Gautam 	 *    SMMU_S_INIT.INV_ALL operation also invalidates all GPT information
8670d849c1SVivek Gautam 	 *    cached in TLBs.
8770d849c1SVivek Gautam 	 * Additionally, it is Root firmware’s responsibility to write to
8870d849c1SVivek Gautam 	 * INV_ALL before enabling SMMU_ROOT_CR0.{ACCESSEN,GPCEN}.
8970d849c1SVivek Gautam 	 */
9070d849c1SVivek Gautam 	mmio_write_32(smmu_base + SMMU_S_INIT, SMMU_S_INIT_INV_ALL);
9170d849c1SVivek Gautam 
9270d849c1SVivek Gautam 	/* Wait for global invalidation operation to finish */
9370d849c1SVivek Gautam 	if (smmuv3_poll(smmu_base + SMMU_S_INIT,
9470d849c1SVivek Gautam 			SMMU_S_INIT_INV_ALL, 0U) != 0) {
9570d849c1SVivek Gautam 		return -1;
9670d849c1SVivek Gautam 	}
9770d849c1SVivek Gautam 
9852a314afSOlivier Deprez #if ENABLE_RME
9952a314afSOlivier Deprez 
100aaaf2cc3SSona Mathew 	if (is_feat_rme_present()) {
10152a314afSOlivier Deprez 		if ((mmio_read_32(smmu_base + SMMU_ROOT_IDR0) &
10252a314afSOlivier Deprez 				  SMMU_ROOT_IDR0_ROOT_IMPL) == 0U) {
10352a314afSOlivier Deprez 			WARN("Skip SMMU GPC configuration.\n");
10452a314afSOlivier Deprez 		} else {
10552a314afSOlivier Deprez 			uint64_t gpccr_el3 = read_gpccr_el3();
10652a314afSOlivier Deprez 			uint64_t gptbr_el3 = read_gptbr_el3();
10752a314afSOlivier Deprez 
10852a314afSOlivier Deprez 			/* SMMU_ROOT_GPT_BASE_CFG[16] is RES0. */
10952a314afSOlivier Deprez 			gpccr_el3 &= ~(1UL << 16);
11052a314afSOlivier Deprez 
11152a314afSOlivier Deprez 			/*
11252a314afSOlivier Deprez 			 * TODO: SMMU_ROOT_GPT_BASE_CFG is 64b in the spec,
11352a314afSOlivier Deprez 			 * but SMMU model only accepts 32b access.
11452a314afSOlivier Deprez 			 */
11552a314afSOlivier Deprez 			mmio_write_32(smmu_base + SMMU_ROOT_GPT_BASE_CFG,
11652a314afSOlivier Deprez 				      gpccr_el3);
11752a314afSOlivier Deprez 
11852a314afSOlivier Deprez 			/*
11952a314afSOlivier Deprez 			 * pa_gpt_table_base[51:12] maps to GPTBR_EL3[39:0]
12052a314afSOlivier Deprez 			 * whereas it maps to SMMU_ROOT_GPT_BASE[51:12]
12152a314afSOlivier Deprez 			 * hence needs a 12 bit left shit.
12252a314afSOlivier Deprez 			 */
12352a314afSOlivier Deprez 			mmio_write_64(smmu_base + SMMU_ROOT_GPT_BASE,
12452a314afSOlivier Deprez 				      gptbr_el3 << 12);
12552a314afSOlivier Deprez 
12652a314afSOlivier Deprez 			/*
12752a314afSOlivier Deprez 			 * GPCEN=1: All clients and SMMU-originated accesses,
12852a314afSOlivier Deprez 			 *          except GPT-walks, are subject to GPC.
129*8cc97242SOlivier Deprez 			 *
130*8cc97242SOlivier Deprez 			 * It is recommended to set GPCEN and wait for completion
131*8cc97242SOlivier Deprez 			 * prior to setting ACCESSEN.
132*8cc97242SOlivier Deprez 			 */
133*8cc97242SOlivier Deprez 			mmio_setbits_32(smmu_base + SMMU_ROOT_CR0,
134*8cc97242SOlivier Deprez 					SMMU_ROOT_CR0_GPCEN);
135*8cc97242SOlivier Deprez 
136*8cc97242SOlivier Deprez 			/* Poll for GPCEN ack bit. */
137*8cc97242SOlivier Deprez 			if (smmuv3_poll(smmu_base + SMMU_ROOT_CR0ACK,
138*8cc97242SOlivier Deprez 					SMMU_ROOT_CR0_GPCEN,
139*8cc97242SOlivier Deprez 					SMMU_ROOT_CR0_GPCEN) != 0) {
140*8cc97242SOlivier Deprez 				WARN("Failed enabling SMMU GPC.\n");
141*8cc97242SOlivier Deprez 			}
142*8cc97242SOlivier Deprez 
143*8cc97242SOlivier Deprez 			/*
144*8cc97242SOlivier Deprez 			 * ACCESSEN=1: SMMU- and client-originated accesses are
145*8cc97242SOlivier Deprez 			 *             not terminated by this mechanism.
14652a314afSOlivier Deprez 			 */
14752a314afSOlivier Deprez 			mmio_setbits_32(smmu_base + SMMU_ROOT_CR0,
14852a314afSOlivier Deprez 					SMMU_ROOT_CR0_GPCEN |
14952a314afSOlivier Deprez 					SMMU_ROOT_CR0_ACCESSEN);
15052a314afSOlivier Deprez 
151*8cc97242SOlivier Deprez 			/* Poll for ACCESSEN ack bit. */
15252a314afSOlivier Deprez 			if (smmuv3_poll(smmu_base + SMMU_ROOT_CR0ACK,
15352a314afSOlivier Deprez 					SMMU_ROOT_CR0_ACCESSEN,
15452a314afSOlivier Deprez 					SMMU_ROOT_CR0_ACCESSEN) != 0) {
155*8cc97242SOlivier Deprez 				WARN("Failed enabling SMMU ACCESS.\n");
15652a314afSOlivier Deprez 
15752a314afSOlivier Deprez 				/*
15852a314afSOlivier Deprez 				 * Do not return in error, but fall back to
15952a314afSOlivier Deprez 				 * invalidating all entries through the secure
16052a314afSOlivier Deprez 				 * register file.
16152a314afSOlivier Deprez 				 */
16252a314afSOlivier Deprez 			}
16352a314afSOlivier Deprez 		}
16452a314afSOlivier Deprez 	}
16552a314afSOlivier Deprez 
16652a314afSOlivier Deprez #endif /* ENABLE_RME */
16752a314afSOlivier Deprez 
16870d849c1SVivek Gautam 	return 0;
1691154586bSJeenu Viswambharan }
1706c5c5320SLucian Paul-Trifu 
smmuv3_ns_set_abort_all(uintptr_t smmu_base)1716c5c5320SLucian Paul-Trifu int smmuv3_ns_set_abort_all(uintptr_t smmu_base)
1726c5c5320SLucian Paul-Trifu {
1736c5c5320SLucian Paul-Trifu 	/* Attribute update has completed when SMMU_GBPA.Update bit is 0 */
1746c5c5320SLucian Paul-Trifu 	if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) {
1756c5c5320SLucian Paul-Trifu 		return -1;
1766c5c5320SLucian Paul-Trifu 	}
1776c5c5320SLucian Paul-Trifu 
1786c5c5320SLucian Paul-Trifu 	/*
1796c5c5320SLucian Paul-Trifu 	 * Set GBPA's ABORT bit. Other GBPA fields are presumably ignored then,
1806c5c5320SLucian Paul-Trifu 	 * so simply preserve their value.
1816c5c5320SLucian Paul-Trifu 	 */
1826c5c5320SLucian Paul-Trifu 	mmio_setbits_32(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT);
1836c5c5320SLucian Paul-Trifu 	if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) {
1846c5c5320SLucian Paul-Trifu 		return -1;
1856c5c5320SLucian Paul-Trifu 	}
1866c5c5320SLucian Paul-Trifu 
1876c5c5320SLucian Paul-Trifu 	/* Disable the SMMU to engage the GBPA fields previously configured. */
1886c5c5320SLucian Paul-Trifu 	mmio_clrbits_32(smmu_base + SMMU_CR0, SMMU_CR0_SMMUEN);
1896c5c5320SLucian Paul-Trifu 	if (smmuv3_poll(smmu_base + SMMU_CR0ACK, SMMU_CR0_SMMUEN, 0U) != 0U) {
1906c5c5320SLucian Paul-Trifu 		return -1;
1916c5c5320SLucian Paul-Trifu 	}
1926c5c5320SLucian Paul-Trifu 
1936c5c5320SLucian Paul-Trifu 	return 0;
1946c5c5320SLucian Paul-Trifu }
195