xref: /rk3399_ARM-atf/drivers/arm/smmu/smmu_v3.c (revision 70d849c14de99e7320cc381b441af8bfe2a38375)
11154586bSJeenu Viswambharan /*
2*70d849c1SVivek Gautam  * Copyright (c) 2017-2024, ARM Limited and Contributors. All rights reserved.
31154586bSJeenu Viswambharan  *
41154586bSJeenu Viswambharan  * SPDX-License-Identifier: BSD-3-Clause
51154586bSJeenu Viswambharan  */
61154586bSJeenu Viswambharan 
7ccd4d475SAlexei Fedorov #include <common/debug.h>
8c9263e62SDaniel Boulby #include <cdefs.h>
909d40e0eSAntonio Nino Diaz #include <drivers/arm/smmu_v3.h>
10620dd58bSDeepika Bhavnani #include <drivers/delay_timer.h>
1109d40e0eSAntonio Nino Diaz #include <lib/mmio.h>
1252a314afSOlivier Deprez #include <arch_features.h>
1309d40e0eSAntonio Nino Diaz 
14ccd4d475SAlexei Fedorov /* SMMU poll number of retries */
15620dd58bSDeepika Bhavnani #define SMMU_POLL_TIMEOUT_US	U(1000)
161154586bSJeenu Viswambharan 
176c5c5320SLucian Paul-Trifu static int smmuv3_poll(uintptr_t smmu_reg, uint32_t mask,
18ccd4d475SAlexei Fedorov 				uint32_t value)
191154586bSJeenu Viswambharan {
20620dd58bSDeepika Bhavnani 	uint32_t reg_val;
21620dd58bSDeepika Bhavnani 	uint64_t timeout;
221154586bSJeenu Viswambharan 
23620dd58bSDeepika Bhavnani 	/* Set 1ms timeout value */
24620dd58bSDeepika Bhavnani 	timeout = timeout_init_us(SMMU_POLL_TIMEOUT_US);
25ccd4d475SAlexei Fedorov 	do {
26ccd4d475SAlexei Fedorov 		reg_val = mmio_read_32(smmu_reg);
27ccd4d475SAlexei Fedorov 		if ((reg_val & mask) == value)
28ccd4d475SAlexei Fedorov 			return 0;
29620dd58bSDeepika Bhavnani 	} while (!timeout_elapsed(timeout));
301154586bSJeenu Viswambharan 
31620dd58bSDeepika Bhavnani 	ERROR("Timeout polling SMMUv3 register @%p\n", (void *)smmu_reg);
32ccd4d475SAlexei Fedorov 	ERROR("Read value 0x%x, expected 0x%x\n", reg_val,
33ccd4d475SAlexei Fedorov 		value == 0U ? reg_val & ~mask : reg_val | mask);
34ccd4d475SAlexei Fedorov 	return -1;
356d5f0631SAntonio Nino Diaz }
366d5f0631SAntonio Nino Diaz 
371154586bSJeenu Viswambharan /*
381461ad9fSAlexei Fedorov  * Abort all incoming transactions in order to implement a default
391461ad9fSAlexei Fedorov  * deny policy on reset.
401461ad9fSAlexei Fedorov  */
411461ad9fSAlexei Fedorov int __init smmuv3_security_init(uintptr_t smmu_base)
421461ad9fSAlexei Fedorov {
431461ad9fSAlexei Fedorov 	/* Attribute update has completed when SMMU_(S)_GBPA.Update bit is 0 */
441461ad9fSAlexei Fedorov 	if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U)
451461ad9fSAlexei Fedorov 		return -1;
461461ad9fSAlexei Fedorov 
471461ad9fSAlexei Fedorov 	/*
481461ad9fSAlexei Fedorov 	 * SMMU_(S)_CR0 resets to zero with all streams bypassing the SMMU,
491461ad9fSAlexei Fedorov 	 * so just abort all incoming transactions.
501461ad9fSAlexei Fedorov 	 */
511461ad9fSAlexei Fedorov 	mmio_setbits_32(smmu_base + SMMU_GBPA,
521461ad9fSAlexei Fedorov 			SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT);
531461ad9fSAlexei Fedorov 
541461ad9fSAlexei Fedorov 	if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U)
551461ad9fSAlexei Fedorov 		return -1;
561461ad9fSAlexei Fedorov 
571461ad9fSAlexei Fedorov 	/* Check if the SMMU supports secure state */
581461ad9fSAlexei Fedorov 	if ((mmio_read_32(smmu_base + SMMU_S_IDR1) &
591461ad9fSAlexei Fedorov 				SMMU_S_IDR1_SECURE_IMPL) == 0U)
601461ad9fSAlexei Fedorov 		return 0;
611461ad9fSAlexei Fedorov 
621461ad9fSAlexei Fedorov 	/* Abort all incoming secure transactions */
631461ad9fSAlexei Fedorov 	if (smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U) != 0U)
641461ad9fSAlexei Fedorov 		return -1;
651461ad9fSAlexei Fedorov 
661461ad9fSAlexei Fedorov 	mmio_setbits_32(smmu_base + SMMU_S_GBPA,
671461ad9fSAlexei Fedorov 			SMMU_S_GBPA_UPDATE | SMMU_S_GBPA_ABORT);
681461ad9fSAlexei Fedorov 
691461ad9fSAlexei Fedorov 	return smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U);
701461ad9fSAlexei Fedorov }
711461ad9fSAlexei Fedorov 
721461ad9fSAlexei Fedorov /*
731154586bSJeenu Viswambharan  * Initialize the SMMU by invalidating all secure caches and TLBs.
74ccd4d475SAlexei Fedorov  * Abort all incoming transactions in order to implement a default
75ccd4d475SAlexei Fedorov  * deny policy on reset
761154586bSJeenu Viswambharan  */
77c9263e62SDaniel Boulby int __init smmuv3_init(uintptr_t smmu_base)
781154586bSJeenu Viswambharan {
791461ad9fSAlexei Fedorov 	/* Abort all incoming transactions */
801461ad9fSAlexei Fedorov 	if (smmuv3_security_init(smmu_base) != 0)
811461ad9fSAlexei Fedorov 		return -1;
82ccd4d475SAlexei Fedorov 
83*70d849c1SVivek Gautam 	/*
84*70d849c1SVivek Gautam 	 * Initiate invalidation of secure caches and TLBs if the SMMU
85*70d849c1SVivek Gautam 	 * supports secure state. If not, it's implementation defined
86*70d849c1SVivek Gautam 	 * as to how SMMU_S_INIT register is accessed.
87*70d849c1SVivek Gautam 	 * As per Arm SMMUv3 specification the SMMU_S_INIT register in a SMMU
88*70d849c1SVivek Gautam 	 * with RME implementation has following properties:
89*70d849c1SVivek Gautam 	 * a) all SMMU registers that are specified to be accessible only in
90*70d849c1SVivek Gautam 	 *    the Secure physical address space are additionally accessible in
91*70d849c1SVivek Gautam 	 *    Root physical address space.
92*70d849c1SVivek Gautam 	 * b) as GPT information is permitted to be cached in a TLB, the
93*70d849c1SVivek Gautam 	 *    SMMU_S_INIT.INV_ALL operation also invalidates all GPT information
94*70d849c1SVivek Gautam 	 *    cached in TLBs.
95*70d849c1SVivek Gautam 	 * Additionally, it is Root firmware’s responsibility to write to
96*70d849c1SVivek Gautam 	 * INV_ALL before enabling SMMU_ROOT_CR0.{ACCESSEN,GPCEN}.
97*70d849c1SVivek Gautam 	 */
98*70d849c1SVivek Gautam 	mmio_write_32(smmu_base + SMMU_S_INIT, SMMU_S_INIT_INV_ALL);
99*70d849c1SVivek Gautam 
100*70d849c1SVivek Gautam 	/* Wait for global invalidation operation to finish */
101*70d849c1SVivek Gautam 	if (smmuv3_poll(smmu_base + SMMU_S_INIT,
102*70d849c1SVivek Gautam 			SMMU_S_INIT_INV_ALL, 0U) != 0) {
103*70d849c1SVivek Gautam 		return -1;
104*70d849c1SVivek Gautam 	}
105*70d849c1SVivek Gautam 
10652a314afSOlivier Deprez #if ENABLE_RME
10752a314afSOlivier Deprez 
10852a314afSOlivier Deprez 	if (get_armv9_2_feat_rme_support() != 0U) {
10952a314afSOlivier Deprez 		if ((mmio_read_32(smmu_base + SMMU_ROOT_IDR0) &
11052a314afSOlivier Deprez 				  SMMU_ROOT_IDR0_ROOT_IMPL) == 0U) {
11152a314afSOlivier Deprez 			WARN("Skip SMMU GPC configuration.\n");
11252a314afSOlivier Deprez 		} else {
11352a314afSOlivier Deprez 			uint64_t gpccr_el3 = read_gpccr_el3();
11452a314afSOlivier Deprez 			uint64_t gptbr_el3 = read_gptbr_el3();
11552a314afSOlivier Deprez 
11652a314afSOlivier Deprez 			/* SMMU_ROOT_GPT_BASE_CFG[16] is RES0. */
11752a314afSOlivier Deprez 			gpccr_el3 &= ~(1UL << 16);
11852a314afSOlivier Deprez 
11952a314afSOlivier Deprez 			/*
12052a314afSOlivier Deprez 			 * TODO: SMMU_ROOT_GPT_BASE_CFG is 64b in the spec,
12152a314afSOlivier Deprez 			 * but SMMU model only accepts 32b access.
12252a314afSOlivier Deprez 			 */
12352a314afSOlivier Deprez 			mmio_write_32(smmu_base + SMMU_ROOT_GPT_BASE_CFG,
12452a314afSOlivier Deprez 				      gpccr_el3);
12552a314afSOlivier Deprez 
12652a314afSOlivier Deprez 			/*
12752a314afSOlivier Deprez 			 * pa_gpt_table_base[51:12] maps to GPTBR_EL3[39:0]
12852a314afSOlivier Deprez 			 * whereas it maps to SMMU_ROOT_GPT_BASE[51:12]
12952a314afSOlivier Deprez 			 * hence needs a 12 bit left shit.
13052a314afSOlivier Deprez 			 */
13152a314afSOlivier Deprez 			mmio_write_64(smmu_base + SMMU_ROOT_GPT_BASE,
13252a314afSOlivier Deprez 				      gptbr_el3 << 12);
13352a314afSOlivier Deprez 
13452a314afSOlivier Deprez 			/*
13552a314afSOlivier Deprez 			 * ACCESSEN=1: SMMU- and client-originated accesses are
13652a314afSOlivier Deprez 			 *             not terminated by this mechanism.
13752a314afSOlivier Deprez 			 * GPCEN=1: All clients and SMMU-originated accesses,
13852a314afSOlivier Deprez 			 *          except GPT-walks, are subject to GPC.
13952a314afSOlivier Deprez 			 */
14052a314afSOlivier Deprez 			mmio_setbits_32(smmu_base + SMMU_ROOT_CR0,
14152a314afSOlivier Deprez 					SMMU_ROOT_CR0_GPCEN |
14252a314afSOlivier Deprez 					SMMU_ROOT_CR0_ACCESSEN);
14352a314afSOlivier Deprez 
14452a314afSOlivier Deprez 			/* Poll for ACCESSEN and GPCEN ack bits. */
14552a314afSOlivier Deprez 			if (smmuv3_poll(smmu_base + SMMU_ROOT_CR0ACK,
14652a314afSOlivier Deprez 					SMMU_ROOT_CR0_GPCEN |
14752a314afSOlivier Deprez 					SMMU_ROOT_CR0_ACCESSEN,
14852a314afSOlivier Deprez 					SMMU_ROOT_CR0_GPCEN |
14952a314afSOlivier Deprez 					SMMU_ROOT_CR0_ACCESSEN) != 0) {
15052a314afSOlivier Deprez 				WARN("Failed enabling SMMU GPC.\n");
15152a314afSOlivier Deprez 
15252a314afSOlivier Deprez 				/*
15352a314afSOlivier Deprez 				 * Do not return in error, but fall back to
15452a314afSOlivier Deprez 				 * invalidating all entries through the secure
15552a314afSOlivier Deprez 				 * register file.
15652a314afSOlivier Deprez 				 */
15752a314afSOlivier Deprez 			}
15852a314afSOlivier Deprez 		}
15952a314afSOlivier Deprez 	}
16052a314afSOlivier Deprez 
16152a314afSOlivier Deprez #endif /* ENABLE_RME */
16252a314afSOlivier Deprez 
163*70d849c1SVivek Gautam 	return 0;
1641154586bSJeenu Viswambharan }
1656c5c5320SLucian Paul-Trifu 
1666c5c5320SLucian Paul-Trifu int smmuv3_ns_set_abort_all(uintptr_t smmu_base)
1676c5c5320SLucian Paul-Trifu {
1686c5c5320SLucian Paul-Trifu 	/* Attribute update has completed when SMMU_GBPA.Update bit is 0 */
1696c5c5320SLucian Paul-Trifu 	if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) {
1706c5c5320SLucian Paul-Trifu 		return -1;
1716c5c5320SLucian Paul-Trifu 	}
1726c5c5320SLucian Paul-Trifu 
1736c5c5320SLucian Paul-Trifu 	/*
1746c5c5320SLucian Paul-Trifu 	 * Set GBPA's ABORT bit. Other GBPA fields are presumably ignored then,
1756c5c5320SLucian Paul-Trifu 	 * so simply preserve their value.
1766c5c5320SLucian Paul-Trifu 	 */
1776c5c5320SLucian Paul-Trifu 	mmio_setbits_32(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT);
1786c5c5320SLucian Paul-Trifu 	if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) {
1796c5c5320SLucian Paul-Trifu 		return -1;
1806c5c5320SLucian Paul-Trifu 	}
1816c5c5320SLucian Paul-Trifu 
1826c5c5320SLucian Paul-Trifu 	/* Disable the SMMU to engage the GBPA fields previously configured. */
1836c5c5320SLucian Paul-Trifu 	mmio_clrbits_32(smmu_base + SMMU_CR0, SMMU_CR0_SMMUEN);
1846c5c5320SLucian Paul-Trifu 	if (smmuv3_poll(smmu_base + SMMU_CR0ACK, SMMU_CR0_SMMUEN, 0U) != 0U) {
1856c5c5320SLucian Paul-Trifu 		return -1;
1866c5c5320SLucian Paul-Trifu 	}
1876c5c5320SLucian Paul-Trifu 
1886c5c5320SLucian Paul-Trifu 	return 0;
1896c5c5320SLucian Paul-Trifu }
190