11154586bSJeenu Viswambharan /* 2*52a314afSOlivier Deprez * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved. 31154586bSJeenu Viswambharan * 41154586bSJeenu Viswambharan * SPDX-License-Identifier: BSD-3-Clause 51154586bSJeenu Viswambharan */ 61154586bSJeenu Viswambharan 7ccd4d475SAlexei Fedorov #include <common/debug.h> 8c9263e62SDaniel Boulby #include <cdefs.h> 909d40e0eSAntonio Nino Diaz #include <drivers/arm/smmu_v3.h> 10620dd58bSDeepika Bhavnani #include <drivers/delay_timer.h> 1109d40e0eSAntonio Nino Diaz #include <lib/mmio.h> 12*52a314afSOlivier Deprez #include <arch_features.h> 1309d40e0eSAntonio Nino Diaz 14ccd4d475SAlexei Fedorov /* SMMU poll number of retries */ 15620dd58bSDeepika Bhavnani #define SMMU_POLL_TIMEOUT_US U(1000) 161154586bSJeenu Viswambharan 17ccd4d475SAlexei Fedorov static int __init smmuv3_poll(uintptr_t smmu_reg, uint32_t mask, 18ccd4d475SAlexei Fedorov uint32_t value) 191154586bSJeenu Viswambharan { 20620dd58bSDeepika Bhavnani uint32_t reg_val; 21620dd58bSDeepika Bhavnani uint64_t timeout; 221154586bSJeenu Viswambharan 23620dd58bSDeepika Bhavnani /* Set 1ms timeout value */ 24620dd58bSDeepika Bhavnani timeout = timeout_init_us(SMMU_POLL_TIMEOUT_US); 25ccd4d475SAlexei Fedorov do { 26ccd4d475SAlexei Fedorov reg_val = mmio_read_32(smmu_reg); 27ccd4d475SAlexei Fedorov if ((reg_val & mask) == value) 28ccd4d475SAlexei Fedorov return 0; 29620dd58bSDeepika Bhavnani } while (!timeout_elapsed(timeout)); 301154586bSJeenu Viswambharan 31620dd58bSDeepika Bhavnani ERROR("Timeout polling SMMUv3 register @%p\n", (void *)smmu_reg); 32ccd4d475SAlexei Fedorov ERROR("Read value 0x%x, expected 0x%x\n", reg_val, 33ccd4d475SAlexei Fedorov value == 0U ? reg_val & ~mask : reg_val | mask); 34ccd4d475SAlexei Fedorov return -1; 356d5f0631SAntonio Nino Diaz } 366d5f0631SAntonio Nino Diaz 371154586bSJeenu Viswambharan /* 381461ad9fSAlexei Fedorov * Abort all incoming transactions in order to implement a default 391461ad9fSAlexei Fedorov * deny policy on reset. 401461ad9fSAlexei Fedorov */ 411461ad9fSAlexei Fedorov int __init smmuv3_security_init(uintptr_t smmu_base) 421461ad9fSAlexei Fedorov { 431461ad9fSAlexei Fedorov /* Attribute update has completed when SMMU_(S)_GBPA.Update bit is 0 */ 441461ad9fSAlexei Fedorov if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) 451461ad9fSAlexei Fedorov return -1; 461461ad9fSAlexei Fedorov 471461ad9fSAlexei Fedorov /* 481461ad9fSAlexei Fedorov * SMMU_(S)_CR0 resets to zero with all streams bypassing the SMMU, 491461ad9fSAlexei Fedorov * so just abort all incoming transactions. 501461ad9fSAlexei Fedorov */ 511461ad9fSAlexei Fedorov mmio_setbits_32(smmu_base + SMMU_GBPA, 521461ad9fSAlexei Fedorov SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT); 531461ad9fSAlexei Fedorov 541461ad9fSAlexei Fedorov if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) 551461ad9fSAlexei Fedorov return -1; 561461ad9fSAlexei Fedorov 571461ad9fSAlexei Fedorov /* Check if the SMMU supports secure state */ 581461ad9fSAlexei Fedorov if ((mmio_read_32(smmu_base + SMMU_S_IDR1) & 591461ad9fSAlexei Fedorov SMMU_S_IDR1_SECURE_IMPL) == 0U) 601461ad9fSAlexei Fedorov return 0; 611461ad9fSAlexei Fedorov 621461ad9fSAlexei Fedorov /* Abort all incoming secure transactions */ 631461ad9fSAlexei Fedorov if (smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U) != 0U) 641461ad9fSAlexei Fedorov return -1; 651461ad9fSAlexei Fedorov 661461ad9fSAlexei Fedorov mmio_setbits_32(smmu_base + SMMU_S_GBPA, 671461ad9fSAlexei Fedorov SMMU_S_GBPA_UPDATE | SMMU_S_GBPA_ABORT); 681461ad9fSAlexei Fedorov 691461ad9fSAlexei Fedorov return smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U); 701461ad9fSAlexei Fedorov } 711461ad9fSAlexei Fedorov 721461ad9fSAlexei Fedorov /* 731154586bSJeenu Viswambharan * Initialize the SMMU by invalidating all secure caches and TLBs. 74ccd4d475SAlexei Fedorov * Abort all incoming transactions in order to implement a default 75ccd4d475SAlexei Fedorov * deny policy on reset 761154586bSJeenu Viswambharan */ 77c9263e62SDaniel Boulby int __init smmuv3_init(uintptr_t smmu_base) 781154586bSJeenu Viswambharan { 791461ad9fSAlexei Fedorov /* Abort all incoming transactions */ 801461ad9fSAlexei Fedorov if (smmuv3_security_init(smmu_base) != 0) 811461ad9fSAlexei Fedorov return -1; 82ccd4d475SAlexei Fedorov 83*52a314afSOlivier Deprez #if ENABLE_RME 84*52a314afSOlivier Deprez 85*52a314afSOlivier Deprez if (get_armv9_2_feat_rme_support() != 0U) { 86*52a314afSOlivier Deprez if ((mmio_read_32(smmu_base + SMMU_ROOT_IDR0) & 87*52a314afSOlivier Deprez SMMU_ROOT_IDR0_ROOT_IMPL) == 0U) { 88*52a314afSOlivier Deprez WARN("Skip SMMU GPC configuration.\n"); 89*52a314afSOlivier Deprez } else { 90*52a314afSOlivier Deprez uint64_t gpccr_el3 = read_gpccr_el3(); 91*52a314afSOlivier Deprez uint64_t gptbr_el3 = read_gptbr_el3(); 92*52a314afSOlivier Deprez 93*52a314afSOlivier Deprez /* SMMU_ROOT_GPT_BASE_CFG[16] is RES0. */ 94*52a314afSOlivier Deprez gpccr_el3 &= ~(1UL << 16); 95*52a314afSOlivier Deprez 96*52a314afSOlivier Deprez /* 97*52a314afSOlivier Deprez * TODO: SMMU_ROOT_GPT_BASE_CFG is 64b in the spec, 98*52a314afSOlivier Deprez * but SMMU model only accepts 32b access. 99*52a314afSOlivier Deprez */ 100*52a314afSOlivier Deprez mmio_write_32(smmu_base + SMMU_ROOT_GPT_BASE_CFG, 101*52a314afSOlivier Deprez gpccr_el3); 102*52a314afSOlivier Deprez 103*52a314afSOlivier Deprez /* 104*52a314afSOlivier Deprez * pa_gpt_table_base[51:12] maps to GPTBR_EL3[39:0] 105*52a314afSOlivier Deprez * whereas it maps to SMMU_ROOT_GPT_BASE[51:12] 106*52a314afSOlivier Deprez * hence needs a 12 bit left shit. 107*52a314afSOlivier Deprez */ 108*52a314afSOlivier Deprez mmio_write_64(smmu_base + SMMU_ROOT_GPT_BASE, 109*52a314afSOlivier Deprez gptbr_el3 << 12); 110*52a314afSOlivier Deprez 111*52a314afSOlivier Deprez /* 112*52a314afSOlivier Deprez * ACCESSEN=1: SMMU- and client-originated accesses are 113*52a314afSOlivier Deprez * not terminated by this mechanism. 114*52a314afSOlivier Deprez * GPCEN=1: All clients and SMMU-originated accesses, 115*52a314afSOlivier Deprez * except GPT-walks, are subject to GPC. 116*52a314afSOlivier Deprez */ 117*52a314afSOlivier Deprez mmio_setbits_32(smmu_base + SMMU_ROOT_CR0, 118*52a314afSOlivier Deprez SMMU_ROOT_CR0_GPCEN | 119*52a314afSOlivier Deprez SMMU_ROOT_CR0_ACCESSEN); 120*52a314afSOlivier Deprez 121*52a314afSOlivier Deprez /* Poll for ACCESSEN and GPCEN ack bits. */ 122*52a314afSOlivier Deprez if (smmuv3_poll(smmu_base + SMMU_ROOT_CR0ACK, 123*52a314afSOlivier Deprez SMMU_ROOT_CR0_GPCEN | 124*52a314afSOlivier Deprez SMMU_ROOT_CR0_ACCESSEN, 125*52a314afSOlivier Deprez SMMU_ROOT_CR0_GPCEN | 126*52a314afSOlivier Deprez SMMU_ROOT_CR0_ACCESSEN) != 0) { 127*52a314afSOlivier Deprez WARN("Failed enabling SMMU GPC.\n"); 128*52a314afSOlivier Deprez 129*52a314afSOlivier Deprez /* 130*52a314afSOlivier Deprez * Do not return in error, but fall back to 131*52a314afSOlivier Deprez * invalidating all entries through the secure 132*52a314afSOlivier Deprez * register file. 133*52a314afSOlivier Deprez */ 134*52a314afSOlivier Deprez } 135*52a314afSOlivier Deprez } 136*52a314afSOlivier Deprez } 137*52a314afSOlivier Deprez 138*52a314afSOlivier Deprez #endif /* ENABLE_RME */ 139*52a314afSOlivier Deprez 1401461ad9fSAlexei Fedorov /* 1411461ad9fSAlexei Fedorov * Initiate invalidation of secure caches and TLBs if the SMMU 1421461ad9fSAlexei Fedorov * supports secure state. If not, it's implementation defined 1431461ad9fSAlexei Fedorov * as to how SMMU_S_INIT register is accessed. 144*52a314afSOlivier Deprez * Arm SMMU Arch RME supplement, section 3.4: all SMMU registers 145*52a314afSOlivier Deprez * specified to be accessible only in secure physical address space are 146*52a314afSOlivier Deprez * additionally accessible in root physical address space in an SMMU 147*52a314afSOlivier Deprez * with RME. 148*52a314afSOlivier Deprez * Section 3.3: as GPT information is permitted to be cached in a TLB, 149*52a314afSOlivier Deprez * the SMMU_S_INIT.INV_ALL mechanism also invalidates GPT information 150*52a314afSOlivier Deprez * cached in TLBs. 1511461ad9fSAlexei Fedorov */ 152ccd4d475SAlexei Fedorov mmio_write_32(smmu_base + SMMU_S_INIT, SMMU_S_INIT_INV_ALL); 153ccd4d475SAlexei Fedorov 154ccd4d475SAlexei Fedorov /* Wait for global invalidation operation to finish */ 155ccd4d475SAlexei Fedorov return smmuv3_poll(smmu_base + SMMU_S_INIT, 156ccd4d475SAlexei Fedorov SMMU_S_INIT_INV_ALL, 0U); 1571154586bSJeenu Viswambharan } 158