1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * cacheinfo support - processor cache information via sysfs
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
6*4882a593Smuzhiyun * Author: Sudeep Holla <sudeep.holla@arm.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/acpi.h>
11*4882a593Smuzhiyun #include <linux/bitops.h>
12*4882a593Smuzhiyun #include <linux/cacheinfo.h>
13*4882a593Smuzhiyun #include <linux/compiler.h>
14*4882a593Smuzhiyun #include <linux/cpu.h>
15*4882a593Smuzhiyun #include <linux/device.h>
16*4882a593Smuzhiyun #include <linux/init.h>
17*4882a593Smuzhiyun #include <linux/of.h>
18*4882a593Smuzhiyun #include <linux/sched.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/smp.h>
21*4882a593Smuzhiyun #include <linux/sysfs.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /* pointer to per cpu cacheinfo */
24*4882a593Smuzhiyun static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
25*4882a593Smuzhiyun #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
26*4882a593Smuzhiyun #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
27*4882a593Smuzhiyun #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
28*4882a593Smuzhiyun
get_cpu_cacheinfo(unsigned int cpu)29*4882a593Smuzhiyun struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun return ci_cacheinfo(cpu);
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #ifdef CONFIG_OF
cache_leaves_are_shared(struct cacheinfo * this_leaf,struct cacheinfo * sib_leaf)35*4882a593Smuzhiyun static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
36*4882a593Smuzhiyun struct cacheinfo *sib_leaf)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun return sib_leaf->fw_token == this_leaf->fw_token;
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /* OF properties to query for a given cache type */
42*4882a593Smuzhiyun struct cache_type_info {
43*4882a593Smuzhiyun const char *size_prop;
44*4882a593Smuzhiyun const char *line_size_props[2];
45*4882a593Smuzhiyun const char *nr_sets_prop;
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun static const struct cache_type_info cache_type_info[] = {
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun .size_prop = "cache-size",
51*4882a593Smuzhiyun .line_size_props = { "cache-line-size",
52*4882a593Smuzhiyun "cache-block-size", },
53*4882a593Smuzhiyun .nr_sets_prop = "cache-sets",
54*4882a593Smuzhiyun }, {
55*4882a593Smuzhiyun .size_prop = "i-cache-size",
56*4882a593Smuzhiyun .line_size_props = { "i-cache-line-size",
57*4882a593Smuzhiyun "i-cache-block-size", },
58*4882a593Smuzhiyun .nr_sets_prop = "i-cache-sets",
59*4882a593Smuzhiyun }, {
60*4882a593Smuzhiyun .size_prop = "d-cache-size",
61*4882a593Smuzhiyun .line_size_props = { "d-cache-line-size",
62*4882a593Smuzhiyun "d-cache-block-size", },
63*4882a593Smuzhiyun .nr_sets_prop = "d-cache-sets",
64*4882a593Smuzhiyun },
65*4882a593Smuzhiyun };
66*4882a593Smuzhiyun
get_cacheinfo_idx(enum cache_type type)67*4882a593Smuzhiyun static inline int get_cacheinfo_idx(enum cache_type type)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun if (type == CACHE_TYPE_UNIFIED)
70*4882a593Smuzhiyun return 0;
71*4882a593Smuzhiyun return type;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
cache_size(struct cacheinfo * this_leaf,struct device_node * np)74*4882a593Smuzhiyun static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun const char *propname;
77*4882a593Smuzhiyun int ct_idx;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun ct_idx = get_cacheinfo_idx(this_leaf->type);
80*4882a593Smuzhiyun propname = cache_type_info[ct_idx].size_prop;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun of_property_read_u32(np, propname, &this_leaf->size);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun /* not cache_line_size() because that's a macro in include/linux/cache.h */
cache_get_line_size(struct cacheinfo * this_leaf,struct device_node * np)86*4882a593Smuzhiyun static void cache_get_line_size(struct cacheinfo *this_leaf,
87*4882a593Smuzhiyun struct device_node *np)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun int i, lim, ct_idx;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun ct_idx = get_cacheinfo_idx(this_leaf->type);
92*4882a593Smuzhiyun lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun for (i = 0; i < lim; i++) {
95*4882a593Smuzhiyun int ret;
96*4882a593Smuzhiyun u32 line_size;
97*4882a593Smuzhiyun const char *propname;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun propname = cache_type_info[ct_idx].line_size_props[i];
100*4882a593Smuzhiyun ret = of_property_read_u32(np, propname, &line_size);
101*4882a593Smuzhiyun if (!ret) {
102*4882a593Smuzhiyun this_leaf->coherency_line_size = line_size;
103*4882a593Smuzhiyun break;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
cache_nr_sets(struct cacheinfo * this_leaf,struct device_node * np)108*4882a593Smuzhiyun static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun const char *propname;
111*4882a593Smuzhiyun int ct_idx;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun ct_idx = get_cacheinfo_idx(this_leaf->type);
114*4882a593Smuzhiyun propname = cache_type_info[ct_idx].nr_sets_prop;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun of_property_read_u32(np, propname, &this_leaf->number_of_sets);
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
cache_associativity(struct cacheinfo * this_leaf)119*4882a593Smuzhiyun static void cache_associativity(struct cacheinfo *this_leaf)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun unsigned int line_size = this_leaf->coherency_line_size;
122*4882a593Smuzhiyun unsigned int nr_sets = this_leaf->number_of_sets;
123*4882a593Smuzhiyun unsigned int size = this_leaf->size;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun * If the cache is fully associative, there is no need to
127*4882a593Smuzhiyun * check the other properties.
128*4882a593Smuzhiyun */
129*4882a593Smuzhiyun if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
130*4882a593Smuzhiyun this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
cache_node_is_unified(struct cacheinfo * this_leaf,struct device_node * np)133*4882a593Smuzhiyun static bool cache_node_is_unified(struct cacheinfo *this_leaf,
134*4882a593Smuzhiyun struct device_node *np)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun return of_property_read_bool(np, "cache-unified");
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
cache_of_set_props(struct cacheinfo * this_leaf,struct device_node * np)139*4882a593Smuzhiyun static void cache_of_set_props(struct cacheinfo *this_leaf,
140*4882a593Smuzhiyun struct device_node *np)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * init_cache_level must setup the cache level correctly
144*4882a593Smuzhiyun * overriding the architecturally specified levels, so
145*4882a593Smuzhiyun * if type is NONE at this stage, it should be unified
146*4882a593Smuzhiyun */
147*4882a593Smuzhiyun if (this_leaf->type == CACHE_TYPE_NOCACHE &&
148*4882a593Smuzhiyun cache_node_is_unified(this_leaf, np))
149*4882a593Smuzhiyun this_leaf->type = CACHE_TYPE_UNIFIED;
150*4882a593Smuzhiyun cache_size(this_leaf, np);
151*4882a593Smuzhiyun cache_get_line_size(this_leaf, np);
152*4882a593Smuzhiyun cache_nr_sets(this_leaf, np);
153*4882a593Smuzhiyun cache_associativity(this_leaf);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
cache_setup_of_node(unsigned int cpu)156*4882a593Smuzhiyun static int cache_setup_of_node(unsigned int cpu)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun struct device_node *np;
159*4882a593Smuzhiyun struct cacheinfo *this_leaf;
160*4882a593Smuzhiyun struct device *cpu_dev = get_cpu_device(cpu);
161*4882a593Smuzhiyun struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
162*4882a593Smuzhiyun unsigned int index = 0;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /* skip if fw_token is already populated */
165*4882a593Smuzhiyun if (this_cpu_ci->info_list->fw_token) {
166*4882a593Smuzhiyun return 0;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun if (!cpu_dev) {
170*4882a593Smuzhiyun pr_err("No cpu device for CPU %d\n", cpu);
171*4882a593Smuzhiyun return -ENODEV;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun np = cpu_dev->of_node;
174*4882a593Smuzhiyun if (!np) {
175*4882a593Smuzhiyun pr_err("Failed to find cpu%d device node\n", cpu);
176*4882a593Smuzhiyun return -ENOENT;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun while (index < cache_leaves(cpu)) {
180*4882a593Smuzhiyun this_leaf = this_cpu_ci->info_list + index;
181*4882a593Smuzhiyun if (this_leaf->level != 1)
182*4882a593Smuzhiyun np = of_find_next_cache_node(np);
183*4882a593Smuzhiyun else
184*4882a593Smuzhiyun np = of_node_get(np);/* cpu node itself */
185*4882a593Smuzhiyun if (!np)
186*4882a593Smuzhiyun break;
187*4882a593Smuzhiyun cache_of_set_props(this_leaf, np);
188*4882a593Smuzhiyun this_leaf->fw_token = np;
189*4882a593Smuzhiyun index++;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun if (index != cache_leaves(cpu)) /* not all OF nodes populated */
193*4882a593Smuzhiyun return -ENOENT;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun return 0;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun #else
cache_setup_of_node(unsigned int cpu)198*4882a593Smuzhiyun static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
cache_leaves_are_shared(struct cacheinfo * this_leaf,struct cacheinfo * sib_leaf)199*4882a593Smuzhiyun static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
200*4882a593Smuzhiyun struct cacheinfo *sib_leaf)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun /*
203*4882a593Smuzhiyun * For non-DT/ACPI systems, assume unique level 1 caches, system-wide
204*4882a593Smuzhiyun * shared caches for all other levels. This will be used only if
205*4882a593Smuzhiyun * arch specific code has not populated shared_cpu_map
206*4882a593Smuzhiyun */
207*4882a593Smuzhiyun return !(this_leaf->level == 1);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun #endif
210*4882a593Smuzhiyun
cache_setup_acpi(unsigned int cpu)211*4882a593Smuzhiyun int __weak cache_setup_acpi(unsigned int cpu)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun return -ENOTSUPP;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun unsigned int coherency_max_size;
217*4882a593Smuzhiyun
cache_shared_cpu_map_setup(unsigned int cpu)218*4882a593Smuzhiyun static int cache_shared_cpu_map_setup(unsigned int cpu)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
221*4882a593Smuzhiyun struct cacheinfo *this_leaf, *sib_leaf;
222*4882a593Smuzhiyun unsigned int index;
223*4882a593Smuzhiyun int ret = 0;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun if (this_cpu_ci->cpu_map_populated)
226*4882a593Smuzhiyun return 0;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun if (of_have_populated_dt())
229*4882a593Smuzhiyun ret = cache_setup_of_node(cpu);
230*4882a593Smuzhiyun else if (!acpi_disabled)
231*4882a593Smuzhiyun ret = cache_setup_acpi(cpu);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun if (ret)
234*4882a593Smuzhiyun return ret;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun for (index = 0; index < cache_leaves(cpu); index++) {
237*4882a593Smuzhiyun unsigned int i;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun this_leaf = this_cpu_ci->info_list + index;
240*4882a593Smuzhiyun /* skip if shared_cpu_map is already populated */
241*4882a593Smuzhiyun if (!cpumask_empty(&this_leaf->shared_cpu_map))
242*4882a593Smuzhiyun continue;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
245*4882a593Smuzhiyun for_each_online_cpu(i) {
246*4882a593Smuzhiyun struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun if (i == cpu || !sib_cpu_ci->info_list)
249*4882a593Smuzhiyun continue;/* skip if itself or no cacheinfo */
250*4882a593Smuzhiyun sib_leaf = sib_cpu_ci->info_list + index;
251*4882a593Smuzhiyun if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
252*4882a593Smuzhiyun cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
253*4882a593Smuzhiyun cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun /* record the maximum cache line size */
257*4882a593Smuzhiyun if (this_leaf->coherency_line_size > coherency_max_size)
258*4882a593Smuzhiyun coherency_max_size = this_leaf->coherency_line_size;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun return 0;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
cache_shared_cpu_map_remove(unsigned int cpu)264*4882a593Smuzhiyun static void cache_shared_cpu_map_remove(unsigned int cpu)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
267*4882a593Smuzhiyun struct cacheinfo *this_leaf, *sib_leaf;
268*4882a593Smuzhiyun unsigned int sibling, index;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun for (index = 0; index < cache_leaves(cpu); index++) {
271*4882a593Smuzhiyun this_leaf = this_cpu_ci->info_list + index;
272*4882a593Smuzhiyun for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
273*4882a593Smuzhiyun struct cpu_cacheinfo *sib_cpu_ci;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (sibling == cpu) /* skip itself */
276*4882a593Smuzhiyun continue;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun sib_cpu_ci = get_cpu_cacheinfo(sibling);
279*4882a593Smuzhiyun if (!sib_cpu_ci->info_list)
280*4882a593Smuzhiyun continue;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun sib_leaf = sib_cpu_ci->info_list + index;
283*4882a593Smuzhiyun cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
284*4882a593Smuzhiyun cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun if (of_have_populated_dt())
287*4882a593Smuzhiyun of_node_put(this_leaf->fw_token);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
free_cache_attributes(unsigned int cpu)291*4882a593Smuzhiyun static void free_cache_attributes(unsigned int cpu)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun if (!per_cpu_cacheinfo(cpu))
294*4882a593Smuzhiyun return;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun cache_shared_cpu_map_remove(cpu);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun kfree(per_cpu_cacheinfo(cpu));
299*4882a593Smuzhiyun per_cpu_cacheinfo(cpu) = NULL;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
init_cache_level(unsigned int cpu)302*4882a593Smuzhiyun int __weak init_cache_level(unsigned int cpu)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun return -ENOENT;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
populate_cache_leaves(unsigned int cpu)307*4882a593Smuzhiyun int __weak populate_cache_leaves(unsigned int cpu)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun return -ENOENT;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
detect_cache_attributes(unsigned int cpu)312*4882a593Smuzhiyun static int detect_cache_attributes(unsigned int cpu)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun int ret;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun if (init_cache_level(cpu) || !cache_leaves(cpu))
317*4882a593Smuzhiyun return -ENOENT;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
320*4882a593Smuzhiyun sizeof(struct cacheinfo), GFP_KERNEL);
321*4882a593Smuzhiyun if (per_cpu_cacheinfo(cpu) == NULL)
322*4882a593Smuzhiyun return -ENOMEM;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /*
325*4882a593Smuzhiyun * populate_cache_leaves() may completely setup the cache leaves and
326*4882a593Smuzhiyun * shared_cpu_map or it may leave it partially setup.
327*4882a593Smuzhiyun */
328*4882a593Smuzhiyun ret = populate_cache_leaves(cpu);
329*4882a593Smuzhiyun if (ret)
330*4882a593Smuzhiyun goto free_ci;
331*4882a593Smuzhiyun /*
332*4882a593Smuzhiyun * For systems using DT for cache hierarchy, fw_token
333*4882a593Smuzhiyun * and shared_cpu_map will be set up here only if they are
334*4882a593Smuzhiyun * not populated already
335*4882a593Smuzhiyun */
336*4882a593Smuzhiyun ret = cache_shared_cpu_map_setup(cpu);
337*4882a593Smuzhiyun if (ret) {
338*4882a593Smuzhiyun pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
339*4882a593Smuzhiyun goto free_ci;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun return 0;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun free_ci:
345*4882a593Smuzhiyun free_cache_attributes(cpu);
346*4882a593Smuzhiyun return ret;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /* pointer to cpuX/cache device */
350*4882a593Smuzhiyun static DEFINE_PER_CPU(struct device *, ci_cache_dev);
351*4882a593Smuzhiyun #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun static cpumask_t cache_dev_map;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun /* pointer to array of devices for cpuX/cache/indexY */
356*4882a593Smuzhiyun static DEFINE_PER_CPU(struct device **, ci_index_dev);
357*4882a593Smuzhiyun #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
358*4882a593Smuzhiyun #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun #define show_one(file_name, object) \
361*4882a593Smuzhiyun static ssize_t file_name##_show(struct device *dev, \
362*4882a593Smuzhiyun struct device_attribute *attr, char *buf) \
363*4882a593Smuzhiyun { \
364*4882a593Smuzhiyun struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
365*4882a593Smuzhiyun return sysfs_emit(buf, "%u\n", this_leaf->object); \
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun show_one(id, id);
369*4882a593Smuzhiyun show_one(level, level);
370*4882a593Smuzhiyun show_one(coherency_line_size, coherency_line_size);
371*4882a593Smuzhiyun show_one(number_of_sets, number_of_sets);
372*4882a593Smuzhiyun show_one(physical_line_partition, physical_line_partition);
373*4882a593Smuzhiyun show_one(ways_of_associativity, ways_of_associativity);
374*4882a593Smuzhiyun
size_show(struct device * dev,struct device_attribute * attr,char * buf)375*4882a593Smuzhiyun static ssize_t size_show(struct device *dev,
376*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun struct cacheinfo *this_leaf = dev_get_drvdata(dev);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
shared_cpu_map_show(struct device * dev,struct device_attribute * attr,char * buf)383*4882a593Smuzhiyun static ssize_t shared_cpu_map_show(struct device *dev,
384*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun struct cacheinfo *this_leaf = dev_get_drvdata(dev);
387*4882a593Smuzhiyun const struct cpumask *mask = &this_leaf->shared_cpu_map;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
shared_cpu_list_show(struct device * dev,struct device_attribute * attr,char * buf)392*4882a593Smuzhiyun static ssize_t shared_cpu_list_show(struct device *dev,
393*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun struct cacheinfo *this_leaf = dev_get_drvdata(dev);
396*4882a593Smuzhiyun const struct cpumask *mask = &this_leaf->shared_cpu_map;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask);
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
type_show(struct device * dev,struct device_attribute * attr,char * buf)401*4882a593Smuzhiyun static ssize_t type_show(struct device *dev,
402*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun struct cacheinfo *this_leaf = dev_get_drvdata(dev);
405*4882a593Smuzhiyun const char *output;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun switch (this_leaf->type) {
408*4882a593Smuzhiyun case CACHE_TYPE_DATA:
409*4882a593Smuzhiyun output = "Data";
410*4882a593Smuzhiyun break;
411*4882a593Smuzhiyun case CACHE_TYPE_INST:
412*4882a593Smuzhiyun output = "Instruction";
413*4882a593Smuzhiyun break;
414*4882a593Smuzhiyun case CACHE_TYPE_UNIFIED:
415*4882a593Smuzhiyun output = "Unified";
416*4882a593Smuzhiyun break;
417*4882a593Smuzhiyun default:
418*4882a593Smuzhiyun return -EINVAL;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun return sysfs_emit(buf, "%s\n", output);
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
allocation_policy_show(struct device * dev,struct device_attribute * attr,char * buf)424*4882a593Smuzhiyun static ssize_t allocation_policy_show(struct device *dev,
425*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun struct cacheinfo *this_leaf = dev_get_drvdata(dev);
428*4882a593Smuzhiyun unsigned int ci_attr = this_leaf->attributes;
429*4882a593Smuzhiyun const char *output;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
432*4882a593Smuzhiyun output = "ReadWriteAllocate";
433*4882a593Smuzhiyun else if (ci_attr & CACHE_READ_ALLOCATE)
434*4882a593Smuzhiyun output = "ReadAllocate";
435*4882a593Smuzhiyun else if (ci_attr & CACHE_WRITE_ALLOCATE)
436*4882a593Smuzhiyun output = "WriteAllocate";
437*4882a593Smuzhiyun else
438*4882a593Smuzhiyun return 0;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun return sysfs_emit(buf, "%s\n", output);
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
write_policy_show(struct device * dev,struct device_attribute * attr,char * buf)443*4882a593Smuzhiyun static ssize_t write_policy_show(struct device *dev,
444*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun struct cacheinfo *this_leaf = dev_get_drvdata(dev);
447*4882a593Smuzhiyun unsigned int ci_attr = this_leaf->attributes;
448*4882a593Smuzhiyun int n = 0;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun if (ci_attr & CACHE_WRITE_THROUGH)
451*4882a593Smuzhiyun n = sysfs_emit(buf, "WriteThrough\n");
452*4882a593Smuzhiyun else if (ci_attr & CACHE_WRITE_BACK)
453*4882a593Smuzhiyun n = sysfs_emit(buf, "WriteBack\n");
454*4882a593Smuzhiyun return n;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun static DEVICE_ATTR_RO(id);
458*4882a593Smuzhiyun static DEVICE_ATTR_RO(level);
459*4882a593Smuzhiyun static DEVICE_ATTR_RO(type);
460*4882a593Smuzhiyun static DEVICE_ATTR_RO(coherency_line_size);
461*4882a593Smuzhiyun static DEVICE_ATTR_RO(ways_of_associativity);
462*4882a593Smuzhiyun static DEVICE_ATTR_RO(number_of_sets);
463*4882a593Smuzhiyun static DEVICE_ATTR_RO(size);
464*4882a593Smuzhiyun static DEVICE_ATTR_RO(allocation_policy);
465*4882a593Smuzhiyun static DEVICE_ATTR_RO(write_policy);
466*4882a593Smuzhiyun static DEVICE_ATTR_RO(shared_cpu_map);
467*4882a593Smuzhiyun static DEVICE_ATTR_RO(shared_cpu_list);
468*4882a593Smuzhiyun static DEVICE_ATTR_RO(physical_line_partition);
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun static struct attribute *cache_default_attrs[] = {
471*4882a593Smuzhiyun &dev_attr_id.attr,
472*4882a593Smuzhiyun &dev_attr_type.attr,
473*4882a593Smuzhiyun &dev_attr_level.attr,
474*4882a593Smuzhiyun &dev_attr_shared_cpu_map.attr,
475*4882a593Smuzhiyun &dev_attr_shared_cpu_list.attr,
476*4882a593Smuzhiyun &dev_attr_coherency_line_size.attr,
477*4882a593Smuzhiyun &dev_attr_ways_of_associativity.attr,
478*4882a593Smuzhiyun &dev_attr_number_of_sets.attr,
479*4882a593Smuzhiyun &dev_attr_size.attr,
480*4882a593Smuzhiyun &dev_attr_allocation_policy.attr,
481*4882a593Smuzhiyun &dev_attr_write_policy.attr,
482*4882a593Smuzhiyun &dev_attr_physical_line_partition.attr,
483*4882a593Smuzhiyun NULL
484*4882a593Smuzhiyun };
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun static umode_t
cache_default_attrs_is_visible(struct kobject * kobj,struct attribute * attr,int unused)487*4882a593Smuzhiyun cache_default_attrs_is_visible(struct kobject *kobj,
488*4882a593Smuzhiyun struct attribute *attr, int unused)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun struct device *dev = kobj_to_dev(kobj);
491*4882a593Smuzhiyun struct cacheinfo *this_leaf = dev_get_drvdata(dev);
492*4882a593Smuzhiyun const struct cpumask *mask = &this_leaf->shared_cpu_map;
493*4882a593Smuzhiyun umode_t mode = attr->mode;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
496*4882a593Smuzhiyun return mode;
497*4882a593Smuzhiyun if ((attr == &dev_attr_type.attr) && this_leaf->type)
498*4882a593Smuzhiyun return mode;
499*4882a593Smuzhiyun if ((attr == &dev_attr_level.attr) && this_leaf->level)
500*4882a593Smuzhiyun return mode;
501*4882a593Smuzhiyun if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
502*4882a593Smuzhiyun return mode;
503*4882a593Smuzhiyun if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
504*4882a593Smuzhiyun return mode;
505*4882a593Smuzhiyun if ((attr == &dev_attr_coherency_line_size.attr) &&
506*4882a593Smuzhiyun this_leaf->coherency_line_size)
507*4882a593Smuzhiyun return mode;
508*4882a593Smuzhiyun if ((attr == &dev_attr_ways_of_associativity.attr) &&
509*4882a593Smuzhiyun this_leaf->size) /* allow 0 = full associativity */
510*4882a593Smuzhiyun return mode;
511*4882a593Smuzhiyun if ((attr == &dev_attr_number_of_sets.attr) &&
512*4882a593Smuzhiyun this_leaf->number_of_sets)
513*4882a593Smuzhiyun return mode;
514*4882a593Smuzhiyun if ((attr == &dev_attr_size.attr) && this_leaf->size)
515*4882a593Smuzhiyun return mode;
516*4882a593Smuzhiyun if ((attr == &dev_attr_write_policy.attr) &&
517*4882a593Smuzhiyun (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
518*4882a593Smuzhiyun return mode;
519*4882a593Smuzhiyun if ((attr == &dev_attr_allocation_policy.attr) &&
520*4882a593Smuzhiyun (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
521*4882a593Smuzhiyun return mode;
522*4882a593Smuzhiyun if ((attr == &dev_attr_physical_line_partition.attr) &&
523*4882a593Smuzhiyun this_leaf->physical_line_partition)
524*4882a593Smuzhiyun return mode;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun return 0;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun static const struct attribute_group cache_default_group = {
530*4882a593Smuzhiyun .attrs = cache_default_attrs,
531*4882a593Smuzhiyun .is_visible = cache_default_attrs_is_visible,
532*4882a593Smuzhiyun };
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun static const struct attribute_group *cache_default_groups[] = {
535*4882a593Smuzhiyun &cache_default_group,
536*4882a593Smuzhiyun NULL,
537*4882a593Smuzhiyun };
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun static const struct attribute_group *cache_private_groups[] = {
540*4882a593Smuzhiyun &cache_default_group,
541*4882a593Smuzhiyun NULL, /* Place holder for private group */
542*4882a593Smuzhiyun NULL,
543*4882a593Smuzhiyun };
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun const struct attribute_group *
cache_get_priv_group(struct cacheinfo * this_leaf)546*4882a593Smuzhiyun __weak cache_get_priv_group(struct cacheinfo *this_leaf)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun return NULL;
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun static const struct attribute_group **
cache_get_attribute_groups(struct cacheinfo * this_leaf)552*4882a593Smuzhiyun cache_get_attribute_groups(struct cacheinfo *this_leaf)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun const struct attribute_group *priv_group =
555*4882a593Smuzhiyun cache_get_priv_group(this_leaf);
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun if (!priv_group)
558*4882a593Smuzhiyun return cache_default_groups;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun if (!cache_private_groups[1])
561*4882a593Smuzhiyun cache_private_groups[1] = priv_group;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun return cache_private_groups;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun /* Add/Remove cache interface for CPU device */
cpu_cache_sysfs_exit(unsigned int cpu)567*4882a593Smuzhiyun static void cpu_cache_sysfs_exit(unsigned int cpu)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun int i;
570*4882a593Smuzhiyun struct device *ci_dev;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun if (per_cpu_index_dev(cpu)) {
573*4882a593Smuzhiyun for (i = 0; i < cache_leaves(cpu); i++) {
574*4882a593Smuzhiyun ci_dev = per_cache_index_dev(cpu, i);
575*4882a593Smuzhiyun if (!ci_dev)
576*4882a593Smuzhiyun continue;
577*4882a593Smuzhiyun device_unregister(ci_dev);
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun kfree(per_cpu_index_dev(cpu));
580*4882a593Smuzhiyun per_cpu_index_dev(cpu) = NULL;
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun device_unregister(per_cpu_cache_dev(cpu));
583*4882a593Smuzhiyun per_cpu_cache_dev(cpu) = NULL;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
cpu_cache_sysfs_init(unsigned int cpu)586*4882a593Smuzhiyun static int cpu_cache_sysfs_init(unsigned int cpu)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun struct device *dev = get_cpu_device(cpu);
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun if (per_cpu_cacheinfo(cpu) == NULL)
591*4882a593Smuzhiyun return -ENOENT;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
594*4882a593Smuzhiyun if (IS_ERR(per_cpu_cache_dev(cpu)))
595*4882a593Smuzhiyun return PTR_ERR(per_cpu_cache_dev(cpu));
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun /* Allocate all required memory */
598*4882a593Smuzhiyun per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
599*4882a593Smuzhiyun sizeof(struct device *), GFP_KERNEL);
600*4882a593Smuzhiyun if (unlikely(per_cpu_index_dev(cpu) == NULL))
601*4882a593Smuzhiyun goto err_out;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun return 0;
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun err_out:
606*4882a593Smuzhiyun cpu_cache_sysfs_exit(cpu);
607*4882a593Smuzhiyun return -ENOMEM;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
cache_add_dev(unsigned int cpu)610*4882a593Smuzhiyun static int cache_add_dev(unsigned int cpu)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun unsigned int i;
613*4882a593Smuzhiyun int rc;
614*4882a593Smuzhiyun struct device *ci_dev, *parent;
615*4882a593Smuzhiyun struct cacheinfo *this_leaf;
616*4882a593Smuzhiyun struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
617*4882a593Smuzhiyun const struct attribute_group **cache_groups;
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun rc = cpu_cache_sysfs_init(cpu);
620*4882a593Smuzhiyun if (unlikely(rc < 0))
621*4882a593Smuzhiyun return rc;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun parent = per_cpu_cache_dev(cpu);
624*4882a593Smuzhiyun for (i = 0; i < cache_leaves(cpu); i++) {
625*4882a593Smuzhiyun this_leaf = this_cpu_ci->info_list + i;
626*4882a593Smuzhiyun if (this_leaf->disable_sysfs)
627*4882a593Smuzhiyun continue;
628*4882a593Smuzhiyun if (this_leaf->type == CACHE_TYPE_NOCACHE)
629*4882a593Smuzhiyun break;
630*4882a593Smuzhiyun cache_groups = cache_get_attribute_groups(this_leaf);
631*4882a593Smuzhiyun ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
632*4882a593Smuzhiyun "index%1u", i);
633*4882a593Smuzhiyun if (IS_ERR(ci_dev)) {
634*4882a593Smuzhiyun rc = PTR_ERR(ci_dev);
635*4882a593Smuzhiyun goto err;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun per_cache_index_dev(cpu, i) = ci_dev;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun cpumask_set_cpu(cpu, &cache_dev_map);
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun return 0;
642*4882a593Smuzhiyun err:
643*4882a593Smuzhiyun cpu_cache_sysfs_exit(cpu);
644*4882a593Smuzhiyun return rc;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
cacheinfo_cpu_online(unsigned int cpu)647*4882a593Smuzhiyun static int cacheinfo_cpu_online(unsigned int cpu)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun int rc = detect_cache_attributes(cpu);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if (rc)
652*4882a593Smuzhiyun return rc;
653*4882a593Smuzhiyun rc = cache_add_dev(cpu);
654*4882a593Smuzhiyun if (rc)
655*4882a593Smuzhiyun free_cache_attributes(cpu);
656*4882a593Smuzhiyun return rc;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun
cacheinfo_cpu_pre_down(unsigned int cpu)659*4882a593Smuzhiyun static int cacheinfo_cpu_pre_down(unsigned int cpu)
660*4882a593Smuzhiyun {
661*4882a593Smuzhiyun if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
662*4882a593Smuzhiyun cpu_cache_sysfs_exit(cpu);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun free_cache_attributes(cpu);
665*4882a593Smuzhiyun return 0;
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun
cacheinfo_sysfs_init(void)668*4882a593Smuzhiyun static int __init cacheinfo_sysfs_init(void)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
671*4882a593Smuzhiyun "base/cacheinfo:online",
672*4882a593Smuzhiyun cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun device_initcall(cacheinfo_sysfs_init);
675