11154586bSJeenu Viswambharan /* 2*ccd4d475SAlexei Fedorov * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved. 31154586bSJeenu Viswambharan * 41154586bSJeenu Viswambharan * SPDX-License-Identifier: BSD-3-Clause 51154586bSJeenu Viswambharan */ 61154586bSJeenu Viswambharan 7*ccd4d475SAlexei Fedorov #include <common/debug.h> 8c9263e62SDaniel Boulby #include <cdefs.h> 909d40e0eSAntonio Nino Diaz #include <drivers/arm/smmu_v3.h> 1009d40e0eSAntonio Nino Diaz #include <lib/mmio.h> 1109d40e0eSAntonio Nino Diaz 12*ccd4d475SAlexei Fedorov /* SMMU poll number of retries */ 13*ccd4d475SAlexei Fedorov #define SMMU_POLL_RETRY 1000000 141154586bSJeenu Viswambharan 15*ccd4d475SAlexei Fedorov static int __init smmuv3_poll(uintptr_t smmu_reg, uint32_t mask, 16*ccd4d475SAlexei Fedorov uint32_t value) 171154586bSJeenu Viswambharan { 18*ccd4d475SAlexei Fedorov uint32_t reg_val, retries = SMMU_POLL_RETRY; 191154586bSJeenu Viswambharan 20*ccd4d475SAlexei Fedorov do { 21*ccd4d475SAlexei Fedorov reg_val = mmio_read_32(smmu_reg); 22*ccd4d475SAlexei Fedorov if ((reg_val & mask) == value) 23*ccd4d475SAlexei Fedorov return 0; 24*ccd4d475SAlexei Fedorov } while (--retries != 0U); 251154586bSJeenu Viswambharan 26*ccd4d475SAlexei Fedorov ERROR("Failed to poll SMMUv3 register @%p\n", (void *)smmu_reg); 27*ccd4d475SAlexei Fedorov ERROR("Read value 0x%x, expected 0x%x\n", reg_val, 28*ccd4d475SAlexei Fedorov value == 0U ? reg_val & ~mask : reg_val | mask); 29*ccd4d475SAlexei Fedorov return -1; 306d5f0631SAntonio Nino Diaz } 316d5f0631SAntonio Nino Diaz 321154586bSJeenu Viswambharan /* 331154586bSJeenu Viswambharan * Initialize the SMMU by invalidating all secure caches and TLBs. 34*ccd4d475SAlexei Fedorov * Abort all incoming transactions in order to implement a default 35*ccd4d475SAlexei Fedorov * deny policy on reset 361154586bSJeenu Viswambharan */ 37c9263e62SDaniel Boulby int __init smmuv3_init(uintptr_t smmu_base) 381154586bSJeenu Viswambharan { 391154586bSJeenu Viswambharan /* 401154586bSJeenu Viswambharan * Invalidation of secure caches and TLBs is required only if the SMMU 411154586bSJeenu Viswambharan * supports secure state. If not, it's implementation defined as to how 421154586bSJeenu Viswambharan * SMMU_S_INIT register is accessed. 431154586bSJeenu Viswambharan */ 44*ccd4d475SAlexei Fedorov if ((mmio_read_32(smmu_base + SMMU_S_IDR1) & 45*ccd4d475SAlexei Fedorov SMMU_S_IDR1_SECURE_IMPL) != 0U) { 46*ccd4d475SAlexei Fedorov 47*ccd4d475SAlexei Fedorov /* Initiate invalidation */ 48*ccd4d475SAlexei Fedorov mmio_write_32(smmu_base + SMMU_S_INIT, SMMU_S_INIT_INV_ALL); 49*ccd4d475SAlexei Fedorov 50*ccd4d475SAlexei Fedorov /* Wait for global invalidation operation to finish */ 51*ccd4d475SAlexei Fedorov return smmuv3_poll(smmu_base + SMMU_S_INIT, 52*ccd4d475SAlexei Fedorov SMMU_S_INIT_INV_ALL, 0U); 531154586bSJeenu Viswambharan } 541154586bSJeenu Viswambharan return 0; 551154586bSJeenu Viswambharan } 56