xref: /rk3399_ARM-atf/drivers/arm/smmu/smmu_v3.c (revision 1461ad9febbcb625941a53d80e4fa792f21e6e65)
11154586bSJeenu Viswambharan /*
2ccd4d475SAlexei Fedorov  * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
31154586bSJeenu Viswambharan  *
41154586bSJeenu Viswambharan  * SPDX-License-Identifier: BSD-3-Clause
51154586bSJeenu Viswambharan  */
61154586bSJeenu Viswambharan 
7ccd4d475SAlexei Fedorov #include <common/debug.h>
8c9263e62SDaniel Boulby #include <cdefs.h>
909d40e0eSAntonio Nino Diaz #include <drivers/arm/smmu_v3.h>
1009d40e0eSAntonio Nino Diaz #include <lib/mmio.h>
1109d40e0eSAntonio Nino Diaz 
12ccd4d475SAlexei Fedorov /* SMMU poll number of retries */
13ccd4d475SAlexei Fedorov #define SMMU_POLL_RETRY		1000000
141154586bSJeenu Viswambharan 
15ccd4d475SAlexei Fedorov static int __init smmuv3_poll(uintptr_t smmu_reg, uint32_t mask,
16ccd4d475SAlexei Fedorov 				uint32_t value)
171154586bSJeenu Viswambharan {
18ccd4d475SAlexei Fedorov 	uint32_t reg_val, retries = SMMU_POLL_RETRY;
191154586bSJeenu Viswambharan 
20ccd4d475SAlexei Fedorov 	do {
21ccd4d475SAlexei Fedorov 		reg_val = mmio_read_32(smmu_reg);
22ccd4d475SAlexei Fedorov 		if ((reg_val & mask) == value)
23ccd4d475SAlexei Fedorov 			return 0;
24ccd4d475SAlexei Fedorov 	} while (--retries != 0U);
251154586bSJeenu Viswambharan 
26ccd4d475SAlexei Fedorov 	ERROR("Failed to poll SMMUv3 register @%p\n", (void *)smmu_reg);
27ccd4d475SAlexei Fedorov 	ERROR("Read value 0x%x, expected 0x%x\n", reg_val,
28ccd4d475SAlexei Fedorov 		value == 0U ? reg_val & ~mask : reg_val | mask);
29ccd4d475SAlexei Fedorov 	return -1;
306d5f0631SAntonio Nino Diaz }
316d5f0631SAntonio Nino Diaz 
321154586bSJeenu Viswambharan /*
33*1461ad9fSAlexei Fedorov  * Abort all incoming transactions in order to implement a default
34*1461ad9fSAlexei Fedorov  * deny policy on reset.
35*1461ad9fSAlexei Fedorov  */
36*1461ad9fSAlexei Fedorov int __init smmuv3_security_init(uintptr_t smmu_base)
37*1461ad9fSAlexei Fedorov {
38*1461ad9fSAlexei Fedorov 	/* Attribute update has completed when SMMU_(S)_GBPA.Update bit is 0 */
39*1461ad9fSAlexei Fedorov 	if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U)
40*1461ad9fSAlexei Fedorov 		return -1;
41*1461ad9fSAlexei Fedorov 
42*1461ad9fSAlexei Fedorov 	/*
43*1461ad9fSAlexei Fedorov 	 * SMMU_(S)_CR0 resets to zero with all streams bypassing the SMMU,
44*1461ad9fSAlexei Fedorov 	 * so just abort all incoming transactions.
45*1461ad9fSAlexei Fedorov 	 */
46*1461ad9fSAlexei Fedorov 	mmio_setbits_32(smmu_base + SMMU_GBPA,
47*1461ad9fSAlexei Fedorov 			SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT);
48*1461ad9fSAlexei Fedorov 
49*1461ad9fSAlexei Fedorov 	if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U)
50*1461ad9fSAlexei Fedorov 		return -1;
51*1461ad9fSAlexei Fedorov 
52*1461ad9fSAlexei Fedorov 	/* Check if the SMMU supports secure state */
53*1461ad9fSAlexei Fedorov 	if ((mmio_read_32(smmu_base + SMMU_S_IDR1) &
54*1461ad9fSAlexei Fedorov 				SMMU_S_IDR1_SECURE_IMPL) == 0U)
55*1461ad9fSAlexei Fedorov 		return 0;
56*1461ad9fSAlexei Fedorov 
57*1461ad9fSAlexei Fedorov 	/* Abort all incoming secure transactions */
58*1461ad9fSAlexei Fedorov 	if (smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U) != 0U)
59*1461ad9fSAlexei Fedorov 		return -1;
60*1461ad9fSAlexei Fedorov 
61*1461ad9fSAlexei Fedorov 	mmio_setbits_32(smmu_base + SMMU_S_GBPA,
62*1461ad9fSAlexei Fedorov 			SMMU_S_GBPA_UPDATE | SMMU_S_GBPA_ABORT);
63*1461ad9fSAlexei Fedorov 
64*1461ad9fSAlexei Fedorov 	return smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U);
65*1461ad9fSAlexei Fedorov }
66*1461ad9fSAlexei Fedorov 
67*1461ad9fSAlexei Fedorov /*
681154586bSJeenu Viswambharan  * Initialize the SMMU by invalidating all secure caches and TLBs.
69ccd4d475SAlexei Fedorov  * Abort all incoming transactions in order to implement a default
70ccd4d475SAlexei Fedorov  * deny policy on reset
711154586bSJeenu Viswambharan  */
72c9263e62SDaniel Boulby int __init smmuv3_init(uintptr_t smmu_base)
731154586bSJeenu Viswambharan {
74*1461ad9fSAlexei Fedorov 	/* Abort all incoming transactions */
75*1461ad9fSAlexei Fedorov 	if (smmuv3_security_init(smmu_base) != 0)
76*1461ad9fSAlexei Fedorov 		return -1;
77ccd4d475SAlexei Fedorov 
78*1461ad9fSAlexei Fedorov 	/* Check if the SMMU supports secure state */
79*1461ad9fSAlexei Fedorov 	if ((mmio_read_32(smmu_base + SMMU_S_IDR1) &
80*1461ad9fSAlexei Fedorov 				SMMU_S_IDR1_SECURE_IMPL) == 0U)
81*1461ad9fSAlexei Fedorov 		return 0;
82*1461ad9fSAlexei Fedorov 	/*
83*1461ad9fSAlexei Fedorov 	 * Initiate invalidation of secure caches and TLBs if the SMMU
84*1461ad9fSAlexei Fedorov 	 * supports secure state. If not, it's implementation defined
85*1461ad9fSAlexei Fedorov 	 * as to how SMMU_S_INIT register is accessed.
86*1461ad9fSAlexei Fedorov 	 */
87ccd4d475SAlexei Fedorov 	mmio_write_32(smmu_base + SMMU_S_INIT, SMMU_S_INIT_INV_ALL);
88ccd4d475SAlexei Fedorov 
89ccd4d475SAlexei Fedorov 	/* Wait for global invalidation operation to finish */
90ccd4d475SAlexei Fedorov 	return smmuv3_poll(smmu_base + SMMU_S_INIT,
91ccd4d475SAlexei Fedorov 				SMMU_S_INIT_INV_ALL, 0U);
921154586bSJeenu Viswambharan }
93