1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Based on linux/arch/arm/mm/nommu.c
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * ARM PMSAv7 supporting functions.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/bitops.h>
8*4882a593Smuzhiyun #include <linux/memblock.h>
9*4882a593Smuzhiyun #include <linux/string.h>
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <asm/cacheflush.h>
12*4882a593Smuzhiyun #include <asm/cp15.h>
13*4882a593Smuzhiyun #include <asm/cputype.h>
14*4882a593Smuzhiyun #include <asm/mpu.h>
15*4882a593Smuzhiyun #include <asm/sections.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include "mm.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun struct region {
20*4882a593Smuzhiyun phys_addr_t base;
21*4882a593Smuzhiyun phys_addr_t size;
22*4882a593Smuzhiyun unsigned long subreg;
23*4882a593Smuzhiyun };
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun static struct region __initdata mem[MPU_MAX_REGIONS];
26*4882a593Smuzhiyun #ifdef CONFIG_XIP_KERNEL
27*4882a593Smuzhiyun static struct region __initdata xip[MPU_MAX_REGIONS];
28*4882a593Smuzhiyun #endif
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun static unsigned int __initdata mpu_min_region_order;
31*4882a593Smuzhiyun static unsigned int __initdata mpu_max_regions;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun static int __init __mpu_min_region_order(void);
34*4882a593Smuzhiyun static int __init __mpu_max_regions(void);
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #ifndef CONFIG_CPU_V7M
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #define DRBAR __ACCESS_CP15(c6, 0, c1, 0)
39*4882a593Smuzhiyun #define IRBAR __ACCESS_CP15(c6, 0, c1, 1)
40*4882a593Smuzhiyun #define DRSR __ACCESS_CP15(c6, 0, c1, 2)
41*4882a593Smuzhiyun #define IRSR __ACCESS_CP15(c6, 0, c1, 3)
42*4882a593Smuzhiyun #define DRACR __ACCESS_CP15(c6, 0, c1, 4)
43*4882a593Smuzhiyun #define IRACR __ACCESS_CP15(c6, 0, c1, 5)
44*4882a593Smuzhiyun #define RNGNR __ACCESS_CP15(c6, 0, c2, 0)
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* Region number */
rgnr_write(u32 v)47*4882a593Smuzhiyun static inline void rgnr_write(u32 v)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun write_sysreg(v, RNGNR);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /* Data-side / unified region attributes */
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* Region access control register */
dracr_write(u32 v)55*4882a593Smuzhiyun static inline void dracr_write(u32 v)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun write_sysreg(v, DRACR);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /* Region size register */
drsr_write(u32 v)61*4882a593Smuzhiyun static inline void drsr_write(u32 v)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun write_sysreg(v, DRSR);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* Region base address register */
drbar_write(u32 v)67*4882a593Smuzhiyun static inline void drbar_write(u32 v)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun write_sysreg(v, DRBAR);
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
drbar_read(void)72*4882a593Smuzhiyun static inline u32 drbar_read(void)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun return read_sysreg(DRBAR);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun /* Optional instruction-side region attributes */
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* I-side Region access control register */
iracr_write(u32 v)79*4882a593Smuzhiyun static inline void iracr_write(u32 v)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun write_sysreg(v, IRACR);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /* I-side Region size register */
irsr_write(u32 v)85*4882a593Smuzhiyun static inline void irsr_write(u32 v)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun write_sysreg(v, IRSR);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /* I-side Region base address register */
irbar_write(u32 v)91*4882a593Smuzhiyun static inline void irbar_write(u32 v)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun write_sysreg(v, IRBAR);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
irbar_read(void)96*4882a593Smuzhiyun static inline u32 irbar_read(void)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun return read_sysreg(IRBAR);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun #else
102*4882a593Smuzhiyun
rgnr_write(u32 v)103*4882a593Smuzhiyun static inline void rgnr_write(u32 v)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RNR);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* Data-side / unified region attributes */
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* Region access control register */
dracr_write(u32 v)111*4882a593Smuzhiyun static inline void dracr_write(u32 v)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(15, 0);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + PMSAv7_RASR);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /* Region size register */
drsr_write(u32 v)119*4882a593Smuzhiyun static inline void drsr_write(u32 v)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun u32 racr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(31, 16);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun writel_relaxed(v | racr, BASEADDR_V7M_SCB + PMSAv7_RASR);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /* Region base address register */
drbar_write(u32 v)127*4882a593Smuzhiyun static inline void drbar_write(u32 v)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RBAR);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
drbar_read(void)132*4882a593Smuzhiyun static inline u32 drbar_read(void)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun return readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RBAR);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /* ARMv7-M only supports a unified MPU, so I-side operations are nop */
138*4882a593Smuzhiyun
iracr_write(u32 v)139*4882a593Smuzhiyun static inline void iracr_write(u32 v) {}
irsr_write(u32 v)140*4882a593Smuzhiyun static inline void irsr_write(u32 v) {}
irbar_write(u32 v)141*4882a593Smuzhiyun static inline void irbar_write(u32 v) {}
irbar_read(void)142*4882a593Smuzhiyun static inline unsigned long irbar_read(void) {return 0;}
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun #endif
145*4882a593Smuzhiyun
try_split_region(phys_addr_t base,phys_addr_t size,struct region * region)146*4882a593Smuzhiyun static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct region *region)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun unsigned long subreg, bslots, sslots;
149*4882a593Smuzhiyun phys_addr_t abase = base & ~(size - 1);
150*4882a593Smuzhiyun phys_addr_t asize = base + size - abase;
151*4882a593Smuzhiyun phys_addr_t p2size = 1 << __fls(asize);
152*4882a593Smuzhiyun phys_addr_t bdiff, sdiff;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun if (p2size != asize)
155*4882a593Smuzhiyun p2size *= 2;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun bdiff = base - abase;
158*4882a593Smuzhiyun sdiff = p2size - asize;
159*4882a593Smuzhiyun subreg = p2size / PMSAv7_NR_SUBREGS;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun if ((bdiff % subreg) || (sdiff % subreg))
162*4882a593Smuzhiyun return false;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun bslots = bdiff / subreg;
165*4882a593Smuzhiyun sslots = sdiff / subreg;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun if (bslots || sslots) {
168*4882a593Smuzhiyun int i;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (subreg < PMSAv7_MIN_SUBREG_SIZE)
171*4882a593Smuzhiyun return false;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun if (bslots + sslots > PMSAv7_NR_SUBREGS)
174*4882a593Smuzhiyun return false;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun for (i = 0; i < bslots; i++)
177*4882a593Smuzhiyun _set_bit(i, ®ion->subreg);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun for (i = 1; i <= sslots; i++)
180*4882a593Smuzhiyun _set_bit(PMSAv7_NR_SUBREGS - i, ®ion->subreg);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun region->base = abase;
184*4882a593Smuzhiyun region->size = p2size;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun return true;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
allocate_region(phys_addr_t base,phys_addr_t size,unsigned int limit,struct region * regions)189*4882a593Smuzhiyun static int __init allocate_region(phys_addr_t base, phys_addr_t size,
190*4882a593Smuzhiyun unsigned int limit, struct region *regions)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun int count = 0;
193*4882a593Smuzhiyun phys_addr_t diff = size;
194*4882a593Smuzhiyun int attempts = MPU_MAX_REGIONS;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun while (diff) {
197*4882a593Smuzhiyun /* Try cover region as is (maybe with help of subregions) */
198*4882a593Smuzhiyun if (try_split_region(base, size, ®ions[count])) {
199*4882a593Smuzhiyun count++;
200*4882a593Smuzhiyun base += size;
201*4882a593Smuzhiyun diff -= size;
202*4882a593Smuzhiyun size = diff;
203*4882a593Smuzhiyun } else {
204*4882a593Smuzhiyun /*
205*4882a593Smuzhiyun * Maximum aligned region might overflow phys_addr_t
206*4882a593Smuzhiyun * if "base" is 0. Hence we keep everything below 4G
207*4882a593Smuzhiyun * until we take the smaller of the aligned region
208*4882a593Smuzhiyun * size ("asize") and rounded region size ("p2size"),
209*4882a593Smuzhiyun * one of which is guaranteed to be smaller than the
210*4882a593Smuzhiyun * maximum physical address.
211*4882a593Smuzhiyun */
212*4882a593Smuzhiyun phys_addr_t asize = (base - 1) ^ base;
213*4882a593Smuzhiyun phys_addr_t p2size = (1 << __fls(diff)) - 1;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun size = asize < p2size ? asize + 1 : p2size + 1;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun if (count > limit)
219*4882a593Smuzhiyun break;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun if (!attempts)
222*4882a593Smuzhiyun break;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun attempts--;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun return count;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /* MPU initialisation functions */
pmsav7_adjust_lowmem_bounds(void)231*4882a593Smuzhiyun void __init pmsav7_adjust_lowmem_bounds(void)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun phys_addr_t specified_mem_size = 0, total_mem_size = 0;
234*4882a593Smuzhiyun phys_addr_t mem_start;
235*4882a593Smuzhiyun phys_addr_t mem_end;
236*4882a593Smuzhiyun phys_addr_t reg_start, reg_end;
237*4882a593Smuzhiyun unsigned int mem_max_regions;
238*4882a593Smuzhiyun bool first = true;
239*4882a593Smuzhiyun int num;
240*4882a593Smuzhiyun u64 i;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun /* Free-up PMSAv7_PROBE_REGION */
243*4882a593Smuzhiyun mpu_min_region_order = __mpu_min_region_order();
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /* How many regions are supported */
246*4882a593Smuzhiyun mpu_max_regions = __mpu_max_regions();
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun mem_max_regions = min((unsigned int)MPU_MAX_REGIONS, mpu_max_regions);
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun /* We need to keep one slot for background region */
251*4882a593Smuzhiyun mem_max_regions--;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun #ifndef CONFIG_CPU_V7M
254*4882a593Smuzhiyun /* ... and one for vectors */
255*4882a593Smuzhiyun mem_max_regions--;
256*4882a593Smuzhiyun #endif
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun #ifdef CONFIG_XIP_KERNEL
259*4882a593Smuzhiyun /* plus some regions to cover XIP ROM */
260*4882a593Smuzhiyun num = allocate_region(CONFIG_XIP_PHYS_ADDR, __pa(_exiprom) - CONFIG_XIP_PHYS_ADDR,
261*4882a593Smuzhiyun mem_max_regions, xip);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun mem_max_regions -= num;
264*4882a593Smuzhiyun #endif
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun for_each_mem_range(i, ®_start, ®_end) {
267*4882a593Smuzhiyun if (first) {
268*4882a593Smuzhiyun phys_addr_t phys_offset = PHYS_OFFSET;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun /*
271*4882a593Smuzhiyun * Initially only use memory continuous from
272*4882a593Smuzhiyun * PHYS_OFFSET */
273*4882a593Smuzhiyun if (reg_start != phys_offset)
274*4882a593Smuzhiyun panic("First memory bank must be contiguous from PHYS_OFFSET");
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun mem_start = reg_start;
277*4882a593Smuzhiyun mem_end = reg_end;
278*4882a593Smuzhiyun specified_mem_size = mem_end - mem_start;
279*4882a593Smuzhiyun first = false;
280*4882a593Smuzhiyun } else {
281*4882a593Smuzhiyun /*
282*4882a593Smuzhiyun * memblock auto merges contiguous blocks, remove
283*4882a593Smuzhiyun * all blocks afterwards in one go (we can't remove
284*4882a593Smuzhiyun * blocks separately while iterating)
285*4882a593Smuzhiyun */
286*4882a593Smuzhiyun pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
287*4882a593Smuzhiyun &mem_end, ®_start);
288*4882a593Smuzhiyun memblock_remove(reg_start, 0 - reg_start);
289*4882a593Smuzhiyun break;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun memset(mem, 0, sizeof(mem));
294*4882a593Smuzhiyun num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun for (i = 0; i < num; i++) {
297*4882a593Smuzhiyun unsigned long subreg = mem[i].size / PMSAv7_NR_SUBREGS;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun total_mem_size += mem[i].size - subreg * hweight_long(mem[i].subreg);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun pr_debug("MPU: base %pa size %pa disable subregions: %*pbl\n",
302*4882a593Smuzhiyun &mem[i].base, &mem[i].size, PMSAv7_NR_SUBREGS, &mem[i].subreg);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun if (total_mem_size != specified_mem_size) {
306*4882a593Smuzhiyun pr_warn("Truncating memory from %pa to %pa (MPU region constraints)",
307*4882a593Smuzhiyun &specified_mem_size, &total_mem_size);
308*4882a593Smuzhiyun memblock_remove(mem_start + total_mem_size,
309*4882a593Smuzhiyun specified_mem_size - total_mem_size);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
__mpu_max_regions(void)313*4882a593Smuzhiyun static int __init __mpu_max_regions(void)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun /*
316*4882a593Smuzhiyun * We don't support a different number of I/D side regions so if we
317*4882a593Smuzhiyun * have separate instruction and data memory maps then return
318*4882a593Smuzhiyun * whichever side has a smaller number of supported regions.
319*4882a593Smuzhiyun */
320*4882a593Smuzhiyun u32 dregions, iregions, mpuir;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun mpuir = read_cpuid_mputype();
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /* Check for separate d-side and i-side memory maps */
327*4882a593Smuzhiyun if (mpuir & MPUIR_nU)
328*4882a593Smuzhiyun iregions = (mpuir & MPUIR_IREGION_SZMASK) >> MPUIR_IREGION;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* Use the smallest of the two maxima */
331*4882a593Smuzhiyun return min(dregions, iregions);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
mpu_iside_independent(void)334*4882a593Smuzhiyun static int __init mpu_iside_independent(void)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun /* MPUIR.nU specifies whether there is *not* a unified memory map */
337*4882a593Smuzhiyun return read_cpuid_mputype() & MPUIR_nU;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
__mpu_min_region_order(void)340*4882a593Smuzhiyun static int __init __mpu_min_region_order(void)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun u32 drbar_result, irbar_result;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun /* We've kept a region free for this probing */
345*4882a593Smuzhiyun rgnr_write(PMSAv7_PROBE_REGION);
346*4882a593Smuzhiyun isb();
347*4882a593Smuzhiyun /*
348*4882a593Smuzhiyun * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum
349*4882a593Smuzhiyun * region order
350*4882a593Smuzhiyun */
351*4882a593Smuzhiyun drbar_write(0xFFFFFFFC);
352*4882a593Smuzhiyun drbar_result = irbar_result = drbar_read();
353*4882a593Smuzhiyun drbar_write(0x0);
354*4882a593Smuzhiyun /* If the MPU is non-unified, we use the larger of the two minima*/
355*4882a593Smuzhiyun if (mpu_iside_independent()) {
356*4882a593Smuzhiyun irbar_write(0xFFFFFFFC);
357*4882a593Smuzhiyun irbar_result = irbar_read();
358*4882a593Smuzhiyun irbar_write(0x0);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun isb(); /* Ensure that MPU region operations have completed */
361*4882a593Smuzhiyun /* Return whichever result is larger */
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun return __ffs(max(drbar_result, irbar_result));
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
mpu_setup_region(unsigned int number,phys_addr_t start,unsigned int size_order,unsigned int properties,unsigned int subregions,bool need_flush)366*4882a593Smuzhiyun static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
367*4882a593Smuzhiyun unsigned int size_order, unsigned int properties,
368*4882a593Smuzhiyun unsigned int subregions, bool need_flush)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun u32 size_data;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /* We kept a region free for probing resolution of MPU regions*/
373*4882a593Smuzhiyun if (number > mpu_max_regions
374*4882a593Smuzhiyun || number >= MPU_MAX_REGIONS)
375*4882a593Smuzhiyun return -ENOENT;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun if (size_order > 32)
378*4882a593Smuzhiyun return -ENOMEM;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun if (size_order < mpu_min_region_order)
381*4882a593Smuzhiyun return -ENOMEM;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */
384*4882a593Smuzhiyun size_data = ((size_order - 1) << PMSAv7_RSR_SZ) | 1 << PMSAv7_RSR_EN;
385*4882a593Smuzhiyun size_data |= subregions << PMSAv7_RSR_SD;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun if (need_flush)
388*4882a593Smuzhiyun flush_cache_all();
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun dsb(); /* Ensure all previous data accesses occur with old mappings */
391*4882a593Smuzhiyun rgnr_write(number);
392*4882a593Smuzhiyun isb();
393*4882a593Smuzhiyun drbar_write(start);
394*4882a593Smuzhiyun dracr_write(properties);
395*4882a593Smuzhiyun isb(); /* Propagate properties before enabling region */
396*4882a593Smuzhiyun drsr_write(size_data);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /* Check for independent I-side registers */
399*4882a593Smuzhiyun if (mpu_iside_independent()) {
400*4882a593Smuzhiyun irbar_write(start);
401*4882a593Smuzhiyun iracr_write(properties);
402*4882a593Smuzhiyun isb();
403*4882a593Smuzhiyun irsr_write(size_data);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun isb();
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun /* Store region info (we treat i/d side the same, so only store d) */
408*4882a593Smuzhiyun mpu_rgn_info.rgns[number].dracr = properties;
409*4882a593Smuzhiyun mpu_rgn_info.rgns[number].drbar = start;
410*4882a593Smuzhiyun mpu_rgn_info.rgns[number].drsr = size_data;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun mpu_rgn_info.used++;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun return 0;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /*
418*4882a593Smuzhiyun * Set up default MPU regions, doing nothing if there is no MPU
419*4882a593Smuzhiyun */
pmsav7_setup(void)420*4882a593Smuzhiyun void __init pmsav7_setup(void)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun int i, region = 0, err = 0;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun /* Setup MPU (order is important) */
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun /* Background */
427*4882a593Smuzhiyun err |= mpu_setup_region(region++, 0, 32,
428*4882a593Smuzhiyun PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0RW,
429*4882a593Smuzhiyun 0, false);
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun #ifdef CONFIG_XIP_KERNEL
432*4882a593Smuzhiyun /* ROM */
433*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(xip); i++) {
434*4882a593Smuzhiyun /*
435*4882a593Smuzhiyun * In case we overwrite RAM region we set earlier in
436*4882a593Smuzhiyun * head-nommu.S (which is cachable) all subsequent
437*4882a593Smuzhiyun * data access till we setup RAM bellow would be done
438*4882a593Smuzhiyun * with BG region (which is uncachable), thus we need
439*4882a593Smuzhiyun * to clean and invalidate cache.
440*4882a593Smuzhiyun */
441*4882a593Smuzhiyun bool need_flush = region == PMSAv7_RAM_REGION;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun if (!xip[i].size)
444*4882a593Smuzhiyun continue;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size),
447*4882a593Smuzhiyun PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL,
448*4882a593Smuzhiyun xip[i].subreg, need_flush);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun #endif
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun /* RAM */
453*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(mem); i++) {
454*4882a593Smuzhiyun if (!mem[i].size)
455*4882a593Smuzhiyun continue;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size),
458*4882a593Smuzhiyun PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL,
459*4882a593Smuzhiyun mem[i].subreg, false);
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /* Vectors */
463*4882a593Smuzhiyun #ifndef CONFIG_CPU_V7M
464*4882a593Smuzhiyun err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE),
465*4882a593Smuzhiyun PMSAv7_AP_PL1RW_PL0NA | PMSAv7_RGN_NORMAL,
466*4882a593Smuzhiyun 0, false);
467*4882a593Smuzhiyun #endif
468*4882a593Smuzhiyun if (err) {
469*4882a593Smuzhiyun panic("MPU region initialization failure! %d", err);
470*4882a593Smuzhiyun } else {
471*4882a593Smuzhiyun pr_info("Using ARMv7 PMSA Compliant MPU. "
472*4882a593Smuzhiyun "Region independence: %s, Used %d of %d regions\n",
473*4882a593Smuzhiyun mpu_iside_independent() ? "Yes" : "No",
474*4882a593Smuzhiyun mpu_rgn_info.used, mpu_max_regions);
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun }
477