xref: /OK3568_Linux_fs/kernel/arch/arm/mach-ep93xx/crunch.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * arch/arm/kernel/crunch.c
4*4882a593Smuzhiyun  * Cirrus MaverickCrunch context switching and handling
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/types.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/signal.h>
13*4882a593Smuzhiyun #include <linux/sched.h>
14*4882a593Smuzhiyun #include <linux/init.h>
15*4882a593Smuzhiyun #include <linux/io.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <asm/thread_notify.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include "soc.h"
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun struct crunch_state *crunch_owner;
22*4882a593Smuzhiyun 
crunch_task_release(struct thread_info * thread)23*4882a593Smuzhiyun void crunch_task_release(struct thread_info *thread)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun 	local_irq_disable();
26*4882a593Smuzhiyun 	if (crunch_owner == &thread->crunchstate)
27*4882a593Smuzhiyun 		crunch_owner = NULL;
28*4882a593Smuzhiyun 	local_irq_enable();
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun 
crunch_enabled(u32 devcfg)31*4882a593Smuzhiyun static int crunch_enabled(u32 devcfg)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun 	return !!(devcfg & EP93XX_SYSCON_DEVCFG_CPENA);
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun 
crunch_do(struct notifier_block * self,unsigned long cmd,void * t)36*4882a593Smuzhiyun static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	struct thread_info *thread = (struct thread_info *)t;
39*4882a593Smuzhiyun 	struct crunch_state *crunch_state;
40*4882a593Smuzhiyun 	u32 devcfg;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	crunch_state = &thread->crunchstate;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	switch (cmd) {
45*4882a593Smuzhiyun 	case THREAD_NOTIFY_FLUSH:
46*4882a593Smuzhiyun 		memset(crunch_state, 0, sizeof(*crunch_state));
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 		/*
49*4882a593Smuzhiyun 		 * FALLTHROUGH: Ensure we don't try to overwrite our newly
50*4882a593Smuzhiyun 		 * initialised state information on the first fault.
51*4882a593Smuzhiyun 		 */
52*4882a593Smuzhiyun 		fallthrough;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	case THREAD_NOTIFY_EXIT:
55*4882a593Smuzhiyun 		crunch_task_release(thread);
56*4882a593Smuzhiyun 		break;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	case THREAD_NOTIFY_SWITCH:
59*4882a593Smuzhiyun 		devcfg = __raw_readl(EP93XX_SYSCON_DEVCFG);
60*4882a593Smuzhiyun 		if (crunch_enabled(devcfg) || crunch_owner == crunch_state) {
61*4882a593Smuzhiyun 			/*
62*4882a593Smuzhiyun 			 * We don't use ep93xx_syscon_swlocked_write() here
63*4882a593Smuzhiyun 			 * because we are on the context switch path and
64*4882a593Smuzhiyun 			 * preemption is already disabled.
65*4882a593Smuzhiyun 			 */
66*4882a593Smuzhiyun 			devcfg ^= EP93XX_SYSCON_DEVCFG_CPENA;
67*4882a593Smuzhiyun 			__raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
68*4882a593Smuzhiyun 			__raw_writel(devcfg, EP93XX_SYSCON_DEVCFG);
69*4882a593Smuzhiyun 		}
70*4882a593Smuzhiyun 		break;
71*4882a593Smuzhiyun 	}
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	return NOTIFY_DONE;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun static struct notifier_block crunch_notifier_block = {
77*4882a593Smuzhiyun 	.notifier_call	= crunch_do,
78*4882a593Smuzhiyun };
79*4882a593Smuzhiyun 
crunch_init(void)80*4882a593Smuzhiyun int __init crunch_init(void)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	thread_register_notifier(&crunch_notifier_block);
83*4882a593Smuzhiyun 	elf_hwcap |= HWCAP_CRUNCH;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	return 0;
86*4882a593Smuzhiyun }
87