1fa6e3546SVolodymyr Babchuk // SPDX-License-Identifier: BSD-2-Clause
2fa6e3546SVolodymyr Babchuk /*
3fa6e3546SVolodymyr Babchuk * Copyright (c) 2021, EPAM Systems
4fa6e3546SVolodymyr Babchuk */
5fa6e3546SVolodymyr Babchuk #include <assert.h>
6fa6e3546SVolodymyr Babchuk #include <kernel/panic.h>
7fa6e3546SVolodymyr Babchuk #include <mm/core_memprot.h>
8fa6e3546SVolodymyr Babchuk #include <mm/core_mmu.h>
9fa6e3546SVolodymyr Babchuk
10fa6e3546SVolodymyr Babchuk #include "rcar.h"
11fa6e3546SVolodymyr Babchuk #include "romapi.h"
12fa6e3546SVolodymyr Babchuk
get_api_table_index(void)13fa6e3546SVolodymyr Babchuk static int get_api_table_index(void)
14fa6e3546SVolodymyr Babchuk {
15fa6e3546SVolodymyr Babchuk /*
16fa6e3546SVolodymyr Babchuk * Depending on SoC type and version, there are 4 possible addresses
17fa6e3546SVolodymyr Babchuk * for each ROMAPI function
18fa6e3546SVolodymyr Babchuk */
19fa6e3546SVolodymyr Babchuk static int index __nex_data = -1;
20fa6e3546SVolodymyr Babchuk
21fa6e3546SVolodymyr Babchuk if (index != -1)
22fa6e3546SVolodymyr Babchuk return index;
23fa6e3546SVolodymyr Babchuk
24fa6e3546SVolodymyr Babchuk switch (rcar_prr_value & PRR_PRODUCT_MASK) {
25fa6e3546SVolodymyr Babchuk case PRR_PRODUCT_H3:
26fa6e3546SVolodymyr Babchuk switch (rcar_prr_value & PRR_CUT_MASK) {
27fa6e3546SVolodymyr Babchuk case PRR_CUT_10: /* H3 ES1.0 */
28fa6e3546SVolodymyr Babchuk case PRR_CUT_11: /* H3 ES1.1 */
29fa6e3546SVolodymyr Babchuk index = 0;
30fa6e3546SVolodymyr Babchuk break;
31fa6e3546SVolodymyr Babchuk case PRR_CUT_20: /* H3 ES2.0 */
32fa6e3546SVolodymyr Babchuk index = 1;
33fa6e3546SVolodymyr Babchuk break;
34fa6e3546SVolodymyr Babchuk default: /* Newer H3 versions use unified table */
35fa6e3546SVolodymyr Babchuk index = 3;
36fa6e3546SVolodymyr Babchuk break;
37fa6e3546SVolodymyr Babchuk }
38fa6e3546SVolodymyr Babchuk break;
39fa6e3546SVolodymyr Babchuk case PRR_PRODUCT_M3W:
40fa6e3546SVolodymyr Babchuk switch (rcar_prr_value & PRR_CUT_MASK) {
41fa6e3546SVolodymyr Babchuk case PRR_CUT_10: /* M3 ES1.0 */
42fa6e3546SVolodymyr Babchuk index = 2;
43fa6e3546SVolodymyr Babchuk break;
44fa6e3546SVolodymyr Babchuk default: /* Newer M3 versions use unified table */
45fa6e3546SVolodymyr Babchuk index = 3;
46fa6e3546SVolodymyr Babchuk break;
47fa6e3546SVolodymyr Babchuk }
48fa6e3546SVolodymyr Babchuk break;
49fa6e3546SVolodymyr Babchuk default: /* All other SoCs use unified table */
50fa6e3546SVolodymyr Babchuk index = 3;
51fa6e3546SVolodymyr Babchuk break;
52fa6e3546SVolodymyr Babchuk }
53fa6e3546SVolodymyr Babchuk
54fa6e3546SVolodymyr Babchuk return index;
55fa6e3546SVolodymyr Babchuk }
56fa6e3546SVolodymyr Babchuk
57fa6e3546SVolodymyr Babchuk /* implemented in romapi_call.S */
58fa6e3546SVolodymyr Babchuk extern uint32_t __plat_romapi_wrapper(paddr_t func, uint64_t arg1,
59fa6e3546SVolodymyr Babchuk uint64_t arg2, uint64_t arg3);
60fa6e3546SVolodymyr Babchuk
__plat_romapi_direct(paddr_t func,uint64_t arg1,uint64_t arg2,uint64_t arg3)61fa6e3546SVolodymyr Babchuk static uint32_t __plat_romapi_direct(paddr_t func, uint64_t arg1,
62fa6e3546SVolodymyr Babchuk uint64_t arg2, uint64_t arg3)
63fa6e3546SVolodymyr Babchuk {
64fa6e3546SVolodymyr Babchuk uint32_t (*fptr)(uint64_t arg1, uint64_t arg2, uint64_t arg3) = NULL;
65fa6e3546SVolodymyr Babchuk
66fa6e3546SVolodymyr Babchuk assert(!cpu_mmu_enabled());
67fa6e3546SVolodymyr Babchuk
68fa6e3546SVolodymyr Babchuk fptr = (typeof(fptr))func;
69fa6e3546SVolodymyr Babchuk
70fa6e3546SVolodymyr Babchuk return fptr(arg1, arg2, arg3);
71fa6e3546SVolodymyr Babchuk }
72fa6e3546SVolodymyr Babchuk
plat_call_romapi(paddr_t func,uint64_t arg1,uint64_t arg2,uint64_t arg3)73fa6e3546SVolodymyr Babchuk static uint32_t plat_call_romapi(paddr_t func, uint64_t arg1,
74fa6e3546SVolodymyr Babchuk uint64_t arg2, uint64_t arg3)
75fa6e3546SVolodymyr Babchuk {
76fa6e3546SVolodymyr Babchuk uint32_t (*fptr)(paddr_t func, uint64_t arg1, uint64_t arg2,
77fa6e3546SVolodymyr Babchuk uint64_t arg3) = NULL;
78fa6e3546SVolodymyr Babchuk
79fa6e3546SVolodymyr Babchuk /*
80fa6e3546SVolodymyr Babchuk * If MMU is enabled, we need to use trampoline function that will
81fa6e3546SVolodymyr Babchuk * disable MMU and switch stack pointer to physical address. On other
82fa6e3546SVolodymyr Babchuk * hand, if MMU is disabled, we can call the ROM function directly.
83fa6e3546SVolodymyr Babchuk */
84fa6e3546SVolodymyr Babchuk if (cpu_mmu_enabled())
85fa6e3546SVolodymyr Babchuk /*
86fa6e3546SVolodymyr Babchuk * With ASLR enabled __plat_romapi_wrapper() function will be
87fa6e3546SVolodymyr Babchuk * mapped at two addresses: at random address (with the rest of
88fa6e3546SVolodymyr Babchuk * OP-TEE) and at identity address. We need to map it at
89fa6e3546SVolodymyr Babchuk * identity address and call it at identity address because this
90fa6e3546SVolodymyr Babchuk * function turns off MMU to perform ROM API call. But
91fa6e3546SVolodymyr Babchuk * __plat_romapi_wrapper *symbol* will be relocated by ASLR
92fa6e3546SVolodymyr Babchuk * code. To get identity address of the function we need to use
93fa6e3546SVolodymyr Babchuk * virt_to_phys().
94fa6e3546SVolodymyr Babchuk */
95fa6e3546SVolodymyr Babchuk fptr = (void *)virt_to_phys(__plat_romapi_wrapper);
96fa6e3546SVolodymyr Babchuk else
97fa6e3546SVolodymyr Babchuk /*
98fa6e3546SVolodymyr Babchuk * With MMU disabled we can call ROM code directly.
99fa6e3546SVolodymyr Babchuk */
100fa6e3546SVolodymyr Babchuk fptr = __plat_romapi_direct;
101fa6e3546SVolodymyr Babchuk
102fa6e3546SVolodymyr Babchuk return fptr(func, arg1, arg2, arg3);
103fa6e3546SVolodymyr Babchuk }
104fa6e3546SVolodymyr Babchuk
va2pa(void * ptr)105fa6e3546SVolodymyr Babchuk static paddr_t va2pa(void *ptr)
106fa6e3546SVolodymyr Babchuk {
107fa6e3546SVolodymyr Babchuk if (cpu_mmu_enabled())
108fa6e3546SVolodymyr Babchuk return virt_to_phys(ptr);
109fa6e3546SVolodymyr Babchuk else
110fa6e3546SVolodymyr Babchuk return (paddr_t)ptr;
111fa6e3546SVolodymyr Babchuk }
112fa6e3546SVolodymyr Babchuk
113fa6e3546SVolodymyr Babchuk static const paddr_t romapi_getrndvector[] = {
114fa6e3546SVolodymyr Babchuk 0xEB10DFC4, /* H3 1.0/1.1, needs confirmation */
115fa6e3546SVolodymyr Babchuk 0xEB117134, /* H3 2.0 */
116fa6e3546SVolodymyr Babchuk 0xEB11055C, /* M3 1.0/1.05, needs confirmation */
117fa6e3546SVolodymyr Babchuk 0xEB100188, /* H3 3.0, M3 1.1+, M3N, E3, D3, V3M 2.0 */
118fa6e3546SVolodymyr Babchuk };
119fa6e3546SVolodymyr Babchuk
plat_rom_getrndvector(uint8_t rndbuff[PLAT_RND_VECTOR_SZ],uint8_t * scratch,uint32_t scratch_sz)120fa6e3546SVolodymyr Babchuk uint32_t plat_rom_getrndvector(uint8_t rndbuff[PLAT_RND_VECTOR_SZ],
121fa6e3546SVolodymyr Babchuk uint8_t *scratch, uint32_t scratch_sz)
122fa6e3546SVolodymyr Babchuk {
123fa6e3546SVolodymyr Babchuk uint32_t ret = -1;
124*ec0d74f2SVolodymyr Babchuk int try = 0;
125fa6e3546SVolodymyr Babchuk paddr_t func_addr = romapi_getrndvector[get_api_table_index()];
126fa6e3546SVolodymyr Babchuk paddr_t rndbuff_pa = va2pa(rndbuff);
127fa6e3546SVolodymyr Babchuk paddr_t scratch_pa = va2pa(scratch);
128fa6e3546SVolodymyr Babchuk
129fa6e3546SVolodymyr Babchuk assert(scratch_sz >= 4096);
130fa6e3546SVolodymyr Babchuk assert(rndbuff_pa % RCAR_CACHE_LINE_SZ == 0);
131fa6e3546SVolodymyr Babchuk assert(scratch_pa % RCAR_CACHE_LINE_SZ == 0);
132fa6e3546SVolodymyr Babchuk
133*ec0d74f2SVolodymyr Babchuk while (try++ < 3) {
134*ec0d74f2SVolodymyr Babchuk ret = plat_call_romapi(func_addr, rndbuff_pa, scratch_pa,
135*ec0d74f2SVolodymyr Babchuk scratch_sz);
136*ec0d74f2SVolodymyr Babchuk if (ret == 0)
137*ec0d74f2SVolodymyr Babchuk break;
138*ec0d74f2SVolodymyr Babchuk IMSG("ROM_GetRndVector() returned "PRIx32, ret);
139*ec0d74f2SVolodymyr Babchuk }
140fa6e3546SVolodymyr Babchuk
141fa6e3546SVolodymyr Babchuk /*
142fa6e3546SVolodymyr Babchuk * ROM code is called with MMU turned off, so any accesses to rndbuff
143fa6e3546SVolodymyr Babchuk * are not affected by data cache. This can lead to two problems:
144fa6e3546SVolodymyr Babchuk *
145fa6e3546SVolodymyr Babchuk * 1. Any prior writes can be cached but may not reach memory. So staled
146fa6e3546SVolodymyr Babchuk * values can be flushed to memory later and overwrite new data written
147fa6e3546SVolodymyr Babchuk * by ROM code. This includes stack as well.
148fa6e3546SVolodymyr Babchuk *
149fa6e3546SVolodymyr Babchuk * 2. ROM code will write new data to the buffer, but we may see old,
150fa6e3546SVolodymyr Babchuk * cached values.
151fa6e3546SVolodymyr Babchuk *
152fa6e3546SVolodymyr Babchuk * ROM code wrapper will issue dcache_op_all(DCACHE_OP_CLEAN). This will
153fa6e3546SVolodymyr Babchuk * ensure that all writes reached memory. After the call we need to
154fa6e3546SVolodymyr Babchuk * invalidate the cache to see new data.
155fa6e3546SVolodymyr Babchuk *
156fa6e3546SVolodymyr Babchuk * We are not accessing scratch area, so no need to do cache maintenance
157fa6e3546SVolodymyr Babchuk * for that buffer.
158fa6e3546SVolodymyr Babchuk */
159fa6e3546SVolodymyr Babchuk cache_op_inner(DCACHE_AREA_INVALIDATE, rndbuff, PLAT_RND_VECTOR_SZ);
160fa6e3546SVolodymyr Babchuk
161fa6e3546SVolodymyr Babchuk return ret;
162fa6e3546SVolodymyr Babchuk }
163