16fba6e04STony Xie /* 26fba6e04STony Xie * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. 36fba6e04STony Xie * 46fba6e04STony Xie * Redistribution and use in source and binary forms, with or without 56fba6e04STony Xie * modification, are permitted provided that the following conditions are met: 66fba6e04STony Xie * 76fba6e04STony Xie * Redistributions of source code must retain the above copyright notice, this 86fba6e04STony Xie * list of conditions and the following disclaimer. 96fba6e04STony Xie * 106fba6e04STony Xie * Redistributions in binary form must reproduce the above copyright notice, 116fba6e04STony Xie * this list of conditions and the following disclaimer in the documentation 126fba6e04STony Xie * and/or other materials provided with the distribution. 136fba6e04STony Xie * 146fba6e04STony Xie * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 156fba6e04STony Xie * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 166fba6e04STony Xie * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 176fba6e04STony Xie * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 186fba6e04STony Xie * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 196fba6e04STony Xie * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 206fba6e04STony Xie * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 216fba6e04STony Xie * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 226fba6e04STony Xie * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 236fba6e04STony Xie * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 246fba6e04STony Xie * POSSIBILITY OF SUCH DAMAGE. 256fba6e04STony Xie */ 266fba6e04STony Xie 276fba6e04STony Xie #ifndef __PMU_COM_H__ 286fba6e04STony Xie #define __PMU_COM_H__ 296fba6e04STony Xie 30*f47a25ddSCaesar Wang /* 31*f47a25ddSCaesar Wang * Use this macro to instantiate lock before it is used in below 32*f47a25ddSCaesar Wang * rockchip_pd_lock_xxx() macros 33*f47a25ddSCaesar Wang */ 346fba6e04STony Xie DEFINE_BAKERY_LOCK(rockchip_pd_lock); 356fba6e04STony Xie 36*f47a25ddSCaesar Wang /* 37*f47a25ddSCaesar Wang * These are wrapper macros to the powe domain Bakery Lock API. 38*f47a25ddSCaesar Wang */ 39*f47a25ddSCaesar Wang #define rockchip_pd_lock_init() bakery_lock_init(&rockchip_pd_lock) 406fba6e04STony Xie #define rockchip_pd_lock_get() bakery_lock_get(&rockchip_pd_lock) 416fba6e04STony Xie #define rockchip_pd_lock_rls() bakery_lock_release(&rockchip_pd_lock) 426fba6e04STony Xie 436fba6e04STony Xie /***************************************************************************** 446fba6e04STony Xie * power domain on or off 456fba6e04STony Xie *****************************************************************************/ 466fba6e04STony Xie enum pmu_pd_state { 476fba6e04STony Xie pmu_pd_on = 0, 486fba6e04STony Xie pmu_pd_off = 1 496fba6e04STony Xie }; 506fba6e04STony Xie 516fba6e04STony Xie #pragma weak plat_ic_get_pending_interrupt_id 526fba6e04STony Xie #pragma weak pmu_power_domain_ctr 536fba6e04STony Xie #pragma weak check_cpu_wfie 546fba6e04STony Xie 556fba6e04STony Xie static inline uint32_t pmu_power_domain_st(uint32_t pd) 566fba6e04STony Xie { 576fba6e04STony Xie uint32_t pwrdn_st = mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & BIT(pd); 586fba6e04STony Xie 596fba6e04STony Xie if (pwrdn_st) 606fba6e04STony Xie return pmu_pd_off; 616fba6e04STony Xie else 626fba6e04STony Xie return pmu_pd_on; 636fba6e04STony Xie } 646fba6e04STony Xie 656fba6e04STony Xie static int pmu_power_domain_ctr(uint32_t pd, uint32_t pd_state) 666fba6e04STony Xie { 676fba6e04STony Xie uint32_t val; 686fba6e04STony Xie uint32_t loop = 0; 696fba6e04STony Xie int ret = 0; 706fba6e04STony Xie 716fba6e04STony Xie rockchip_pd_lock_get(); 726fba6e04STony Xie 736fba6e04STony Xie val = mmio_read_32(PMU_BASE + PMU_PWRDN_CON); 746fba6e04STony Xie if (pd_state == pmu_pd_off) 756fba6e04STony Xie val |= BIT(pd); 766fba6e04STony Xie else 776fba6e04STony Xie val &= ~BIT(pd); 786fba6e04STony Xie 796fba6e04STony Xie mmio_write_32(PMU_BASE + PMU_PWRDN_CON, val); 806fba6e04STony Xie dsb(); 816fba6e04STony Xie 826fba6e04STony Xie while ((pmu_power_domain_st(pd) != pd_state) && (loop < PD_CTR_LOOP)) { 836fba6e04STony Xie udelay(1); 846fba6e04STony Xie loop++; 856fba6e04STony Xie } 866fba6e04STony Xie 876fba6e04STony Xie if (pmu_power_domain_st(pd) != pd_state) { 886fba6e04STony Xie WARN("%s: %d, %d, error!\n", __func__, pd, pd_state); 896fba6e04STony Xie ret = -EINVAL; 906fba6e04STony Xie } 916fba6e04STony Xie 926fba6e04STony Xie rockchip_pd_lock_rls(); 936fba6e04STony Xie 946fba6e04STony Xie return ret; 956fba6e04STony Xie } 966fba6e04STony Xie 976fba6e04STony Xie static int check_cpu_wfie(uint32_t cpu_id, uint32_t wfie_msk) 986fba6e04STony Xie { 996fba6e04STony Xie uint32_t cluster_id, loop = 0; 1006fba6e04STony Xie 1016fba6e04STony Xie if (cpu_id >= PLATFORM_CLUSTER0_CORE_COUNT) { 1026fba6e04STony Xie cluster_id = 1; 1036fba6e04STony Xie cpu_id -= PLATFORM_CLUSTER0_CORE_COUNT; 1046fba6e04STony Xie } else { 1056fba6e04STony Xie cluster_id = 0; 1066fba6e04STony Xie } 1076fba6e04STony Xie 1086fba6e04STony Xie if (cluster_id) 1096fba6e04STony Xie wfie_msk <<= (clstb_cpu_wfe + cpu_id); 1106fba6e04STony Xie else 1116fba6e04STony Xie wfie_msk <<= (clstl_cpu_wfe + cpu_id); 1126fba6e04STony Xie 1136fba6e04STony Xie while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & wfie_msk) && 1146fba6e04STony Xie (loop < CHK_CPU_LOOP)) { 1156fba6e04STony Xie udelay(1); 1166fba6e04STony Xie loop++; 1176fba6e04STony Xie } 1186fba6e04STony Xie 1196fba6e04STony Xie if ((mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & wfie_msk) == 0) { 1206fba6e04STony Xie WARN("%s: %d, %d, %d, error!\n", __func__, 1216fba6e04STony Xie cluster_id, cpu_id, wfie_msk); 1226fba6e04STony Xie return -EINVAL; 1236fba6e04STony Xie } 1246fba6e04STony Xie 1256fba6e04STony Xie return 0; 1266fba6e04STony Xie } 1276fba6e04STony Xie 1286fba6e04STony Xie #endif /* __PMU_COM_H__ */ 129