1 /*
2 * Copyright (c) 2018-2025, Arm Limited and Contributors. All rights reserved.
3 * Copyright (c) 2021-2022, Xilinx, Inc. All rights reserved.
4 * Copyright (c) 2022-2025, Advanced Micro Devices, Inc. All rights reserved.
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8
9 #include <assert.h>
10
11 #include <common/debug.h>
12 #include <common/runtime_svc.h>
13 #include <lib/mmio.h>
14 #include <lib/psci/psci.h>
15 #include <plat/arm/common/plat_arm.h>
16 #include <plat/common/platform.h>
17 #include <plat_arm.h>
18
19 #include <plat_private.h>
20 #include <pm_defs.h>
21
22 static uintptr_t versal_net_sec_entry;
23
zynqmp_cpu_standby(plat_local_state_t cpu_state)24 static void zynqmp_cpu_standby(plat_local_state_t cpu_state)
25 {
26 dsb();
27 wfi();
28 }
29
zynqmp_nopmu_pwr_domain_on(u_register_t mpidr)30 static int32_t zynqmp_nopmu_pwr_domain_on(u_register_t mpidr)
31 {
32 uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
33 uint32_t cpu = cpu_id % PLATFORM_CORE_COUNT_PER_CLUSTER;
34 uint32_t cluster = cpu_id / PLATFORM_CORE_COUNT_PER_CLUSTER;
35 uintptr_t apu_cluster_base = 0, apu_pcli_base, apu_pcli_cluster = 0;
36 uintptr_t rst_apu_cluster = PSX_CRF + RST_APU0_OFFSET + (cluster * 0x4);
37
38 VERBOSE("%s: mpidr: 0x%lx, cpuid: %x, cpu: %x, cluster: %x\n",
39 __func__, mpidr, cpu_id, cpu, cluster);
40
41 if (cpu_id == -1) {
42 return PSCI_E_INTERN_FAIL;
43 }
44
45 if (platform_id == VERSAL_NET_SPP && cluster > 1) {
46 panic();
47 }
48
49 if (cluster > 3) {
50 panic();
51 }
52
53 apu_pcli_cluster = APU_PCLI + APU_PCLI_CLUSTER_OFFSET + (cluster * APU_PCLI_CLUSTER_STEP);
54 apu_cluster_base = APU_CLUSTER0 + (cluster * APU_CLUSTER_STEP);
55
56 /* Enable clock */
57 mmio_setbits_32(PSX_CRF + ACPU0_CLK_CTRL + (cluster * 0x4), ACPU_CLK_CTRL_CLKACT);
58
59 /* Enable cluster states */
60 mmio_setbits_32(apu_pcli_cluster + PCLI_PSTATE_OFFSET, PCLI_PSTATE_VAL_SET);
61 mmio_setbits_32(apu_pcli_cluster + PCLI_PREQ_OFFSET, PREQ_CHANGE_REQUEST);
62
63 /* assert core reset */
64 mmio_setbits_32(rst_apu_cluster, ((RST_APU_COLD_RESET|RST_APU_WARN_RESET) << cpu));
65
66 /* program RVBAR */
67 mmio_write_32(apu_cluster_base + APU_RVBAR_L_0 + (cpu << 3),
68 (uint32_t)versal_net_sec_entry);
69 mmio_write_32(apu_cluster_base + APU_RVBAR_H_0 + (cpu << 3),
70 versal_net_sec_entry >> 32);
71
72 /* de-assert core reset */
73 mmio_clrbits_32(rst_apu_cluster, ((RST_APU_COLD_RESET|RST_APU_WARN_RESET) << cpu));
74
75 /* clear cluster resets */
76 mmio_clrbits_32(rst_apu_cluster, RST_APU_CLUSTER_WARM_RESET);
77 mmio_clrbits_32(rst_apu_cluster, RST_APU_CLUSTER_COLD_RESET);
78
79 apu_pcli_base = APU_PCLI + (APU_PCLI_CPU_STEP * cpu) +
80 (APU_PCLI_CLUSTER_CPU_STEP * cluster);
81
82 mmio_write_32(apu_pcli_base + PCLI_PSTATE_OFFSET, PCLI_PSTATE_VAL_CLEAR);
83 mmio_write_32(apu_pcli_base + PCLI_PREQ_OFFSET, PREQ_CHANGE_REQUEST);
84
85 return PSCI_E_SUCCESS;
86 }
87
zynqmp_nopmu_pwr_domain_off(const psci_power_state_t * target_state)88 static void zynqmp_nopmu_pwr_domain_off(const psci_power_state_t *target_state)
89 {
90 }
91
zynqmp_nopmu_system_reset(void)92 static void __dead2 zynqmp_nopmu_system_reset(void)
93 {
94 while (1)
95 wfi();
96 }
97
zynqmp_validate_ns_entrypoint(uint64_t ns_entrypoint)98 static int32_t zynqmp_validate_ns_entrypoint(uint64_t ns_entrypoint)
99 {
100 return PSCI_E_SUCCESS;
101 }
102
zynqmp_pwr_domain_suspend(const psci_power_state_t * target_state)103 static void zynqmp_pwr_domain_suspend(const psci_power_state_t *target_state)
104 {
105 }
106
zynqmp_pwr_domain_on_finish(const psci_power_state_t * target_state)107 static void zynqmp_pwr_domain_on_finish(const psci_power_state_t *target_state)
108 {
109 }
110
zynqmp_pwr_domain_suspend_finish(const psci_power_state_t * target_state)111 static void zynqmp_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
112 {
113 }
114
zynqmp_system_off(void)115 static void __dead2 zynqmp_system_off(void)
116 {
117 while (1)
118 wfi();
119 }
120
zynqmp_validate_power_state(uint32_t power_state,psci_power_state_t * req_state)121 static int32_t zynqmp_validate_power_state(uint32_t power_state, psci_power_state_t *req_state)
122 {
123 return PSCI_E_SUCCESS;
124 }
125
zynqmp_get_sys_suspend_power_state(psci_power_state_t * req_state)126 static void zynqmp_get_sys_suspend_power_state(psci_power_state_t *req_state)
127 {
128 req_state->pwr_domain_state[PSCI_CPU_PWR_LVL] = PLAT_MAX_OFF_STATE;
129 req_state->pwr_domain_state[1] = PLAT_MAX_OFF_STATE;
130 }
131
132 static const struct plat_psci_ops versal_net_nopmc_psci_ops = {
133 .cpu_standby = zynqmp_cpu_standby,
134 .pwr_domain_on = zynqmp_nopmu_pwr_domain_on,
135 .pwr_domain_off = zynqmp_nopmu_pwr_domain_off,
136 .system_reset = zynqmp_nopmu_system_reset,
137 .validate_ns_entrypoint = zynqmp_validate_ns_entrypoint,
138 .pwr_domain_suspend = zynqmp_pwr_domain_suspend,
139 .pwr_domain_on_finish = zynqmp_pwr_domain_on_finish,
140 .pwr_domain_suspend_finish = zynqmp_pwr_domain_suspend_finish,
141 .system_off = zynqmp_system_off,
142 .validate_power_state = zynqmp_validate_power_state,
143 .get_sys_suspend_power_state = zynqmp_get_sys_suspend_power_state,
144 };
145
146 /*******************************************************************************
147 * Export the platform specific power ops.
148 ******************************************************************************/
plat_setup_psci_ops(uintptr_t sec_entrypoint,const struct plat_psci_ops ** psci_ops)149 int32_t plat_setup_psci_ops(uintptr_t sec_entrypoint,
150 const struct plat_psci_ops **psci_ops)
151 {
152 versal_net_sec_entry = sec_entrypoint;
153
154 VERBOSE("Setting up entry point %lx\n", versal_net_sec_entry);
155
156 *psci_ops = &versal_net_nopmc_psci_ops;
157
158 return 0;
159 }
160
sip_svc_setup_init(void)161 int sip_svc_setup_init(void)
162 {
163 return 0;
164 }
165
no_pm_ioctl(uint32_t device_id,uint32_t ioctl_id,uint32_t arg1,uint32_t arg2)166 static int32_t no_pm_ioctl(uint32_t device_id, uint32_t ioctl_id,
167 uint32_t arg1, uint32_t arg2)
168 {
169 VERBOSE("%s: ioctl_id: %x, arg1: %x\n", __func__, ioctl_id, arg1);
170 if (ioctl_id == IOCTL_OSPI_MUX_SELECT) {
171 mmio_write_32(SLCR_OSPI_QSPI_IOU_AXI_MUX_SEL, arg1);
172 return 0;
173 }
174 return PM_RET_ERROR_IOCTL_NOT_SUPPORTED;
175 }
176
no_pm_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)177 static uint64_t no_pm_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
178 uint64_t x4, void *cookie, void *handle, uint64_t flags)
179 {
180 int32_t ret;
181 uint32_t arg[4], api_id;
182
183 arg[0] = (uint32_t)x1;
184 arg[1] = (uint32_t)(x1 >> 32);
185 arg[2] = (uint32_t)x2;
186 arg[3] = (uint32_t)(x2 >> 32);
187
188 api_id = smc_fid & FUNCID_NUM_MASK;
189 VERBOSE("%s: smc_fid: %x, api_id=0x%x\n", __func__, smc_fid, api_id);
190
191 switch (api_id) {
192 case PM_IOCTL:
193 {
194 ret = no_pm_ioctl(arg[0], arg[1], arg[2], arg[3]);
195 SMC_RET1(handle, (uint64_t)ret);
196 }
197 case PM_GET_CHIPID:
198 {
199 uint32_t idcode, version;
200
201 idcode = mmio_read_32(PMC_TAP);
202 version = mmio_read_32(PMC_TAP_VERSION);
203 SMC_RET2(handle, ((uint64_t)idcode << 32), version);
204 }
205 default:
206 WARN("Unimplemented PM Service Call: 0x%x\n", smc_fid);
207 SMC_RET1(handle, SMC_UNK);
208 }
209 }
210
smc_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)211 uint64_t smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4,
212 void *cookie, void *handle, uint64_t flags)
213 {
214 return no_pm_handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags);
215 }
216