1 /*
2 *
3 * (C) COPYRIGHT 2013-2017 ARM Limited. All rights reserved.
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
13 *
14 */
15
16
17
18 /*
19 * Base kernel core availability APIs
20 */
21
22 #include <mali_kbase.h>
23 #include <mali_kbase_pm.h>
24 #include <backend/gpu/mali_kbase_pm_internal.h>
25
26 static const struct kbase_pm_ca_policy *const policy_list[] = {
27 &kbase_pm_ca_fixed_policy_ops,
28 #ifdef CONFIG_MALI_DEVFREQ
29 &kbase_pm_ca_devfreq_policy_ops,
30 #endif
31 #if !MALI_CUSTOMER_RELEASE
32 &kbase_pm_ca_random_policy_ops
33 #endif
34 };
35
36 /**
37 * POLICY_COUNT - The number of policies available in the system.
38 *
39 * This is derived from the number of functions listed in policy_list.
40 */
41 #define POLICY_COUNT (sizeof(policy_list)/sizeof(*policy_list))
42
kbase_pm_ca_init(struct kbase_device * kbdev)43 int kbase_pm_ca_init(struct kbase_device *kbdev)
44 {
45 KBASE_DEBUG_ASSERT(kbdev != NULL);
46
47 kbdev->pm.backend.ca_current_policy = policy_list[0];
48
49 kbdev->pm.backend.ca_current_policy->init(kbdev);
50
51 return 0;
52 }
53
kbase_pm_ca_term(struct kbase_device * kbdev)54 void kbase_pm_ca_term(struct kbase_device *kbdev)
55 {
56 kbdev->pm.backend.ca_current_policy->term(kbdev);
57 }
58
kbase_pm_ca_list_policies(const struct kbase_pm_ca_policy * const ** list)59 int kbase_pm_ca_list_policies(const struct kbase_pm_ca_policy * const **list)
60 {
61 if (!list)
62 return POLICY_COUNT;
63
64 *list = policy_list;
65
66 return POLICY_COUNT;
67 }
68
69 KBASE_EXPORT_TEST_API(kbase_pm_ca_list_policies);
70
71 const struct kbase_pm_ca_policy
kbase_pm_ca_get_policy(struct kbase_device * kbdev)72 *kbase_pm_ca_get_policy(struct kbase_device *kbdev)
73 {
74 KBASE_DEBUG_ASSERT(kbdev != NULL);
75
76 return kbdev->pm.backend.ca_current_policy;
77 }
78
79 KBASE_EXPORT_TEST_API(kbase_pm_ca_get_policy);
80
kbase_pm_ca_set_policy(struct kbase_device * kbdev,const struct kbase_pm_ca_policy * new_policy)81 void kbase_pm_ca_set_policy(struct kbase_device *kbdev,
82 const struct kbase_pm_ca_policy *new_policy)
83 {
84 const struct kbase_pm_ca_policy *old_policy;
85 unsigned long flags;
86
87 KBASE_DEBUG_ASSERT(kbdev != NULL);
88 KBASE_DEBUG_ASSERT(new_policy != NULL);
89
90 KBASE_TRACE_ADD(kbdev, PM_CA_SET_POLICY, NULL, NULL, 0u,
91 new_policy->id);
92
93 /* During a policy change we pretend the GPU is active */
94 /* A suspend won't happen here, because we're in a syscall from a
95 * userspace thread */
96 kbase_pm_context_active(kbdev);
97
98 mutex_lock(&kbdev->pm.lock);
99
100 /* Remove the policy to prevent IRQ handlers from working on it */
101 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
102 old_policy = kbdev->pm.backend.ca_current_policy;
103 kbdev->pm.backend.ca_current_policy = NULL;
104 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
105
106 if (old_policy->term)
107 old_policy->term(kbdev);
108
109 if (new_policy->init)
110 new_policy->init(kbdev);
111
112 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
113 kbdev->pm.backend.ca_current_policy = new_policy;
114
115 /* If any core power state changes were previously attempted, but
116 * couldn't be made because the policy was changing (current_policy was
117 * NULL), then re-try them here. */
118 kbase_pm_update_cores_state_nolock(kbdev);
119
120 kbdev->pm.backend.ca_current_policy->update_core_status(kbdev,
121 kbdev->shader_ready_bitmap,
122 kbdev->shader_transitioning_bitmap);
123
124 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
125
126 mutex_unlock(&kbdev->pm.lock);
127
128 /* Now the policy change is finished, we release our fake context active
129 * reference */
130 kbase_pm_context_idle(kbdev);
131 }
132
133 KBASE_EXPORT_TEST_API(kbase_pm_ca_set_policy);
134
kbase_pm_ca_get_core_mask(struct kbase_device * kbdev)135 u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev)
136 {
137 lockdep_assert_held(&kbdev->hwaccess_lock);
138
139 /* All cores must be enabled when instrumentation is in use */
140 if (kbdev->pm.backend.instr_enabled)
141 return kbdev->gpu_props.props.raw_props.shader_present &
142 kbdev->pm.debug_core_mask_all;
143
144 if (kbdev->pm.backend.ca_current_policy == NULL)
145 return kbdev->gpu_props.props.raw_props.shader_present &
146 kbdev->pm.debug_core_mask_all;
147
148 return kbdev->pm.backend.ca_current_policy->get_core_mask(kbdev) &
149 kbdev->pm.debug_core_mask_all;
150 }
151
152 KBASE_EXPORT_TEST_API(kbase_pm_ca_get_core_mask);
153
kbase_pm_ca_update_core_status(struct kbase_device * kbdev,u64 cores_ready,u64 cores_transitioning)154 void kbase_pm_ca_update_core_status(struct kbase_device *kbdev, u64 cores_ready,
155 u64 cores_transitioning)
156 {
157 lockdep_assert_held(&kbdev->hwaccess_lock);
158
159 if (kbdev->pm.backend.ca_current_policy != NULL)
160 kbdev->pm.backend.ca_current_policy->update_core_status(kbdev,
161 cores_ready,
162 cores_transitioning);
163 }
164
kbase_pm_ca_instr_enable(struct kbase_device * kbdev)165 void kbase_pm_ca_instr_enable(struct kbase_device *kbdev)
166 {
167 unsigned long flags;
168
169 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
170 kbdev->pm.backend.instr_enabled = true;
171
172 kbase_pm_update_cores_state_nolock(kbdev);
173 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
174 }
175
kbase_pm_ca_instr_disable(struct kbase_device * kbdev)176 void kbase_pm_ca_instr_disable(struct kbase_device *kbdev)
177 {
178 lockdep_assert_held(&kbdev->hwaccess_lock);
179 kbdev->pm.backend.instr_enabled = false;
180
181 kbase_pm_update_cores_state_nolock(kbdev);
182 }
183