xref: /rk3399_rockchip-uboot/arch/arm/cpu/armv8/cache.S (revision 208bd51396fb606dbdcf45b064e6b372d7dd3e81)
10ae76531SDavid Feng/*
20ae76531SDavid Feng * (C) Copyright 2013
30ae76531SDavid Feng * David Feng <fenghua@phytium.com.cn>
40ae76531SDavid Feng *
50ae76531SDavid Feng * This file is based on sample code from ARMv8 ARM.
60ae76531SDavid Feng *
70ae76531SDavid Feng * SPDX-License-Identifier:	GPL-2.0+
80ae76531SDavid Feng */
90ae76531SDavid Feng
100ae76531SDavid Feng#include <asm-offsets.h>
110ae76531SDavid Feng#include <config.h>
120ae76531SDavid Feng#include <asm/macro.h>
130ae76531SDavid Feng#include <linux/linkage.h>
140ae76531SDavid Feng
150ae76531SDavid Feng/*
160ae76531SDavid Feng * void __asm_flush_dcache_level(level)
170ae76531SDavid Feng *
180ae76531SDavid Feng * clean and invalidate one level cache.
190ae76531SDavid Feng *
200ae76531SDavid Feng * x0: cache level
211e6ad55cSYork Sun * x1: 0 flush & invalidate, 1 invalidate only
221e6ad55cSYork Sun * x2~x9: clobbered
230ae76531SDavid Feng */
240ae76531SDavid FengENTRY(__asm_flush_dcache_level)
251e6ad55cSYork Sun	lsl	x12, x0, #1
261e6ad55cSYork Sun	msr	csselr_el1, x12		/* select cache level */
270ae76531SDavid Feng	isb				/* sync change of cssidr_el1 */
280ae76531SDavid Feng	mrs	x6, ccsidr_el1		/* read the new cssidr_el1 */
290ae76531SDavid Feng	and	x2, x6, #7		/* x2 <- log2(cache line size)-4 */
300ae76531SDavid Feng	add	x2, x2, #4		/* x2 <- log2(cache line size) */
310ae76531SDavid Feng	mov	x3, #0x3ff
320ae76531SDavid Feng	and	x3, x3, x6, lsr #3	/* x3 <- max number of #ways */
3342ddfad6SLeo Yan	clz	w5, w3			/* bit position of #ways */
340ae76531SDavid Feng	mov	x4, #0x7fff
350ae76531SDavid Feng	and	x4, x4, x6, lsr #13	/* x4 <- max number of #sets */
361e6ad55cSYork Sun	/* x12 <- cache level << 1 */
370ae76531SDavid Feng	/* x2 <- line length offset */
380ae76531SDavid Feng	/* x3 <- number of cache ways - 1 */
390ae76531SDavid Feng	/* x4 <- number of cache sets - 1 */
400ae76531SDavid Feng	/* x5 <- bit position of #ways */
410ae76531SDavid Feng
420ae76531SDavid Fengloop_set:
430ae76531SDavid Feng	mov	x6, x3			/* x6 <- working copy of #ways */
440ae76531SDavid Fengloop_way:
450ae76531SDavid Feng	lsl	x7, x6, x5
461e6ad55cSYork Sun	orr	x9, x12, x7		/* map way and level to cisw value */
470ae76531SDavid Feng	lsl	x7, x4, x2
480ae76531SDavid Feng	orr	x9, x9, x7		/* map set number to cisw value */
491e6ad55cSYork Sun	tbz	w1, #0, 1f
501e6ad55cSYork Sun	dc	isw, x9
511e6ad55cSYork Sun	b	2f
521e6ad55cSYork Sun1:	dc	cisw, x9		/* clean & invalidate by set/way */
531e6ad55cSYork Sun2:	subs	x6, x6, #1		/* decrement the way */
540ae76531SDavid Feng	b.ge	loop_way
550ae76531SDavid Feng	subs	x4, x4, #1		/* decrement the set */
560ae76531SDavid Feng	b.ge	loop_set
570ae76531SDavid Feng
580ae76531SDavid Feng	ret
590ae76531SDavid FengENDPROC(__asm_flush_dcache_level)
600ae76531SDavid Feng
610ae76531SDavid Feng/*
621e6ad55cSYork Sun * void __asm_flush_dcache_all(int invalidate_only)
631e6ad55cSYork Sun *
641e6ad55cSYork Sun * x0: 0 flush & invalidate, 1 invalidate only
650ae76531SDavid Feng *
660ae76531SDavid Feng * clean and invalidate all data cache by SET/WAY.
670ae76531SDavid Feng */
681e6ad55cSYork SunENTRY(__asm_dcache_all)
691e6ad55cSYork Sun	mov	x1, x0
700ae76531SDavid Feng	dsb	sy
710ae76531SDavid Feng	mrs	x10, clidr_el1		/* read clidr_el1 */
720ae76531SDavid Feng	lsr	x11, x10, #24
730ae76531SDavid Feng	and	x11, x11, #0x7		/* x11 <- loc */
740ae76531SDavid Feng	cbz	x11, finished		/* if loc is 0, exit */
750ae76531SDavid Feng	mov	x15, lr
760ae76531SDavid Feng	mov	x0, #0			/* start flush at cache level 0 */
770ae76531SDavid Feng	/* x0  <- cache level */
780ae76531SDavid Feng	/* x10 <- clidr_el1 */
790ae76531SDavid Feng	/* x11 <- loc */
800ae76531SDavid Feng	/* x15 <- return address */
810ae76531SDavid Feng
820ae76531SDavid Fengloop_level:
831e6ad55cSYork Sun	lsl	x12, x0, #1
841e6ad55cSYork Sun	add	x12, x12, x0		/* x0 <- tripled cache level */
851e6ad55cSYork Sun	lsr	x12, x10, x12
861e6ad55cSYork Sun	and	x12, x12, #7		/* x12 <- cache type */
871e6ad55cSYork Sun	cmp	x12, #2
880ae76531SDavid Feng	b.lt	skip			/* skip if no cache or icache */
891e6ad55cSYork Sun	bl	__asm_flush_dcache_level	/* x1 = 0 flush, 1 invalidate */
900ae76531SDavid Fengskip:
910ae76531SDavid Feng	add	x0, x0, #1		/* increment cache level */
920ae76531SDavid Feng	cmp	x11, x0
930ae76531SDavid Feng	b.gt	loop_level
940ae76531SDavid Feng
950ae76531SDavid Feng	mov	x0, #0
96f1075aedSMichal Simek	msr	csselr_el1, x0		/* restore csselr_el1 */
970ae76531SDavid Feng	dsb	sy
980ae76531SDavid Feng	isb
990ae76531SDavid Feng	mov	lr, x15
1000ae76531SDavid Feng
1010ae76531SDavid Fengfinished:
1020ae76531SDavid Feng	ret
1031e6ad55cSYork SunENDPROC(__asm_dcache_all)
1041e6ad55cSYork Sun
1051e6ad55cSYork SunENTRY(__asm_flush_dcache_all)
1061e6ad55cSYork Sun	mov	x16, lr
1071e6ad55cSYork Sun	mov	x0, #0
1081e6ad55cSYork Sun	bl	__asm_dcache_all
1091e6ad55cSYork Sun	mov	lr, x16
1101e6ad55cSYork Sun	ret
1110ae76531SDavid FengENDPROC(__asm_flush_dcache_all)
1120ae76531SDavid Feng
1131e6ad55cSYork SunENTRY(__asm_invalidate_dcache_all)
1141e6ad55cSYork Sun	mov	x16, lr
115*208bd513SPeng Fan	mov	x0, #0x1
1161e6ad55cSYork Sun	bl	__asm_dcache_all
1171e6ad55cSYork Sun	mov	lr, x16
1181e6ad55cSYork Sun	ret
1191e6ad55cSYork SunENDPROC(__asm_invalidate_dcache_all)
1201e6ad55cSYork Sun
1210ae76531SDavid Feng/*
1220ae76531SDavid Feng * void __asm_flush_dcache_range(start, end)
1230ae76531SDavid Feng *
1240ae76531SDavid Feng * clean & invalidate data cache in the range
1250ae76531SDavid Feng *
1260ae76531SDavid Feng * x0: start address
1270ae76531SDavid Feng * x1: end address
1280ae76531SDavid Feng */
1290ae76531SDavid FengENTRY(__asm_flush_dcache_range)
1300ae76531SDavid Feng	mrs	x3, ctr_el0
1310ae76531SDavid Feng	lsr	x3, x3, #16
1320ae76531SDavid Feng	and	x3, x3, #0xf
1330ae76531SDavid Feng	mov	x2, #4
1340ae76531SDavid Feng	lsl	x2, x2, x3		/* cache line size */
1350ae76531SDavid Feng
1360ae76531SDavid Feng	/* x2 <- minimal cache line size in cache system */
1370ae76531SDavid Feng	sub	x3, x2, #1
1380ae76531SDavid Feng	bic	x0, x0, x3
1390ae76531SDavid Feng1:	dc	civac, x0	/* clean & invalidate data or unified cache */
1400ae76531SDavid Feng	add	x0, x0, x2
1410ae76531SDavid Feng	cmp	x0, x1
1420ae76531SDavid Feng	b.lo	1b
1430ae76531SDavid Feng	dsb	sy
1440ae76531SDavid Feng	ret
1450ae76531SDavid FengENDPROC(__asm_flush_dcache_range)
1460ae76531SDavid Feng
1470ae76531SDavid Feng/*
1480ae76531SDavid Feng * void __asm_invalidate_icache_all(void)
1490ae76531SDavid Feng *
1500ae76531SDavid Feng * invalidate all tlb entries.
1510ae76531SDavid Feng */
1520ae76531SDavid FengENTRY(__asm_invalidate_icache_all)
1530ae76531SDavid Feng	ic	ialluis
1540ae76531SDavid Feng	isb	sy
1550ae76531SDavid Feng	ret
1560ae76531SDavid FengENDPROC(__asm_invalidate_icache_all)
157dcd468b8SYork Sun
158dcd468b8SYork SunENTRY(__asm_flush_l3_cache)
159dcd468b8SYork Sun	mov	x0, #0			/* return status as success */
160dcd468b8SYork Sun	ret
161dcd468b8SYork SunENDPROC(__asm_flush_l3_cache)
162dcd468b8SYork Sun	.weak	__asm_flush_l3_cache
163