1// SPDX-License-Identifier: GPL-2.0
2/*
3 * cacheinfo support - processor cache information via sysfs
4 *
5 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
6 * Author: Sudeep Holla <sudeep.holla@arm.com>
7 */
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/acpi.h>
11#include <linux/bitops.h>
12#include <linux/cacheinfo.h>
13#include <linux/compiler.h>
14#include <linux/cpu.h>
15#include <linux/device.h>
16#include <linux/init.h>
17#include <linux/of.h>
18#include <linux/sched.h>
19#include <linux/slab.h>
20#include <linux/smp.h>
21#include <linux/sysfs.h>
22
23/* pointer to per cpu cacheinfo */
24static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
25#define ci_cacheinfo(cpu)	(&per_cpu(ci_cpu_cacheinfo, cpu))
26#define cache_leaves(cpu)	(ci_cacheinfo(cpu)->num_leaves)
27#define per_cpu_cacheinfo(cpu)	(ci_cacheinfo(cpu)->info_list)
28#define per_cpu_cacheinfo_idx(cpu, idx)		\
29				(per_cpu_cacheinfo(cpu) + (idx))
30
31/* Set if no cache information is found in DT/ACPI. */
32static bool use_arch_info;
33
34struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
35{
36	return ci_cacheinfo(cpu);
37}
38
39static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
40					   struct cacheinfo *sib_leaf)
41{
42	/*
43	 * For non DT/ACPI systems, assume unique level 1 caches,
44	 * system-wide shared caches for all other levels.
45	 */
46	if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)) ||
47	    use_arch_info)
48		return (this_leaf->level != 1) && (sib_leaf->level != 1);
49
50	if ((sib_leaf->attributes & CACHE_ID) &&
51	    (this_leaf->attributes & CACHE_ID))
52		return sib_leaf->id == this_leaf->id;
53
54	return sib_leaf->fw_token == this_leaf->fw_token;
55}
56
57bool last_level_cache_is_valid(unsigned int cpu)
58{
59	struct cacheinfo *llc;
60
61	if (!cache_leaves(cpu))
62		return false;
63
64	llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
65
66	return (llc->attributes & CACHE_ID) || !!llc->fw_token;
67
68}
69
70bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y)
71{
72	struct cacheinfo *llc_x, *llc_y;
73
74	if (!last_level_cache_is_valid(cpu_x) ||
75	    !last_level_cache_is_valid(cpu_y))
76		return false;
77
78	llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1);
79	llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1);
80
81	return cache_leaves_are_shared(llc_x, llc_y);
82}
83
84#ifdef CONFIG_OF
85
86static bool of_check_cache_nodes(struct device_node *np);
87
88/* OF properties to query for a given cache type */
89struct cache_type_info {
90	const char *size_prop;
91	const char *line_size_props[2];
92	const char *nr_sets_prop;
93};
94
95static const struct cache_type_info cache_type_info[] = {
96	{
97		.size_prop       = "cache-size",
98		.line_size_props = { "cache-line-size",
99				     "cache-block-size", },
100		.nr_sets_prop    = "cache-sets",
101	}, {
102		.size_prop       = "i-cache-size",
103		.line_size_props = { "i-cache-line-size",
104				     "i-cache-block-size", },
105		.nr_sets_prop    = "i-cache-sets",
106	}, {
107		.size_prop       = "d-cache-size",
108		.line_size_props = { "d-cache-line-size",
109				     "d-cache-block-size", },
110		.nr_sets_prop    = "d-cache-sets",
111	},
112};
113
114static inline int get_cacheinfo_idx(enum cache_type type)
115{
116	if (type == CACHE_TYPE_UNIFIED)
117		return 0;
118	return type;
119}
120
121static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
122{
123	const char *propname;
124	int ct_idx;
125
126	ct_idx = get_cacheinfo_idx(this_leaf->type);
127	propname = cache_type_info[ct_idx].size_prop;
128
129	of_property_read_u32(np, propname, &this_leaf->size);
130}
131
132/* not cache_line_size() because that's a macro in include/linux/cache.h */
133static void cache_get_line_size(struct cacheinfo *this_leaf,
134				struct device_node *np)
135{
136	int i, lim, ct_idx;
137
138	ct_idx = get_cacheinfo_idx(this_leaf->type);
139	lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
140
141	for (i = 0; i < lim; i++) {
142		int ret;
143		u32 line_size;
144		const char *propname;
145
146		propname = cache_type_info[ct_idx].line_size_props[i];
147		ret = of_property_read_u32(np, propname, &line_size);
148		if (!ret) {
149			this_leaf->coherency_line_size = line_size;
150			break;
151		}
152	}
153}
154
155static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
156{
157	const char *propname;
158	int ct_idx;
159
160	ct_idx = get_cacheinfo_idx(this_leaf->type);
161	propname = cache_type_info[ct_idx].nr_sets_prop;
162
163	of_property_read_u32(np, propname, &this_leaf->number_of_sets);
164}
165
166static void cache_associativity(struct cacheinfo *this_leaf)
167{
168	unsigned int line_size = this_leaf->coherency_line_size;
169	unsigned int nr_sets = this_leaf->number_of_sets;
170	unsigned int size = this_leaf->size;
171
172	/*
173	 * If the cache is fully associative, there is no need to
174	 * check the other properties.
175	 */
176	if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
177		this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
178}
179
180static bool cache_node_is_unified(struct cacheinfo *this_leaf,
181				  struct device_node *np)
182{
183	return of_property_read_bool(np, "cache-unified");
184}
185
186static void cache_of_set_props(struct cacheinfo *this_leaf,
187			       struct device_node *np)
188{
189	/*
190	 * init_cache_level must setup the cache level correctly
191	 * overriding the architecturally specified levels, so
192	 * if type is NONE at this stage, it should be unified
193	 */
194	if (this_leaf->type == CACHE_TYPE_NOCACHE &&
195	    cache_node_is_unified(this_leaf, np))
196		this_leaf->type = CACHE_TYPE_UNIFIED;
197	cache_size(this_leaf, np);
198	cache_get_line_size(this_leaf, np);
199	cache_nr_sets(this_leaf, np);
200	cache_associativity(this_leaf);
201}
202
203static int cache_setup_of_node(unsigned int cpu)
204{
205	struct device_node *np, *prev;
206	struct cacheinfo *this_leaf;
207	unsigned int index = 0;
208
209	np = of_cpu_device_node_get(cpu);
210	if (!np) {
211		pr_err("Failed to find cpu%d device node\n", cpu);
212		return -ENOENT;
213	}
214
215	if (!of_check_cache_nodes(np)) {
216		of_node_put(np);
217		return -ENOENT;
218	}
219
220	prev = np;
221
222	while (index < cache_leaves(cpu)) {
223		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
224		if (this_leaf->level != 1) {
225			np = of_find_next_cache_node(np);
226			of_node_put(prev);
227			prev = np;
228			if (!np)
229				break;
230		}
231		cache_of_set_props(this_leaf, np);
232		this_leaf->fw_token = np;
233		index++;
234	}
235
236	of_node_put(np);
237
238	if (index != cache_leaves(cpu)) /* not all OF nodes populated */
239		return -ENOENT;
240
241	return 0;
242}
243
244static bool of_check_cache_nodes(struct device_node *np)
245{
246	struct device_node *next;
247
248	if (of_property_present(np, "cache-size")   ||
249	    of_property_present(np, "i-cache-size") ||
250	    of_property_present(np, "d-cache-size") ||
251	    of_property_present(np, "cache-unified"))
252		return true;
253
254	next = of_find_next_cache_node(np);
255	if (next) {
256		of_node_put(next);
257		return true;
258	}
259
260	return false;
261}
262
263static int of_count_cache_leaves(struct device_node *np)
264{
265	unsigned int leaves = 0;
266
267	if (of_property_read_bool(np, "cache-size"))
268		++leaves;
269	if (of_property_read_bool(np, "i-cache-size"))
270		++leaves;
271	if (of_property_read_bool(np, "d-cache-size"))
272		++leaves;
273
274	if (!leaves) {
275		/* The '[i-|d-|]cache-size' property is required, but
276		 * if absent, fallback on the 'cache-unified' property.
277		 */
278		if (of_property_read_bool(np, "cache-unified"))
279			return 1;
280		else
281			return 2;
282	}
283
284	return leaves;
285}
286
287int init_of_cache_level(unsigned int cpu)
288{
289	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
290	struct device_node *np = of_cpu_device_node_get(cpu);
291	struct device_node *prev = NULL;
292	unsigned int levels = 0, leaves, level;
293
294	if (!of_check_cache_nodes(np)) {
295		of_node_put(np);
296		return -ENOENT;
297	}
298
299	leaves = of_count_cache_leaves(np);
300	if (leaves > 0)
301		levels = 1;
302
303	prev = np;
304	while ((np = of_find_next_cache_node(np))) {
305		of_node_put(prev);
306		prev = np;
307		if (!of_device_is_compatible(np, "cache"))
308			goto err_out;
309		if (of_property_read_u32(np, "cache-level", &level))
310			goto err_out;
311		if (level <= levels)
312			goto err_out;
313
314		leaves += of_count_cache_leaves(np);
315		levels = level;
316	}
317
318	of_node_put(np);
319	this_cpu_ci->num_levels = levels;
320	this_cpu_ci->num_leaves = leaves;
321
322	return 0;
323
324err_out:
325	of_node_put(np);
326	return -EINVAL;
327}
328
329#else
330static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
331int init_of_cache_level(unsigned int cpu) { return 0; }
332#endif
333
334int __weak cache_setup_acpi(unsigned int cpu)
335{
336	return -ENOTSUPP;
337}
338
339unsigned int coherency_max_size;
340
341static int cache_setup_properties(unsigned int cpu)
342{
343	int ret = 0;
344
345	if (of_have_populated_dt())
346		ret = cache_setup_of_node(cpu);
347	else if (!acpi_disabled)
348		ret = cache_setup_acpi(cpu);
349
350	// Assume there is no cache information available in DT/ACPI from now.
351	if (ret && use_arch_cache_info())
352		use_arch_info = true;
353
354	return ret;
355}
356
357static int cache_shared_cpu_map_setup(unsigned int cpu)
358{
359	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
360	struct cacheinfo *this_leaf, *sib_leaf;
361	unsigned int index, sib_index;
362	int ret = 0;
363
364	if (this_cpu_ci->cpu_map_populated)
365		return 0;
366
367	/*
368	 * skip setting up cache properties if LLC is valid, just need
369	 * to update the shared cpu_map if the cache attributes were
370	 * populated early before all the cpus are brought online
371	 */
372	if (!last_level_cache_is_valid(cpu) && !use_arch_info) {
373		ret = cache_setup_properties(cpu);
374		if (ret)
375			return ret;
376	}
377
378	for (index = 0; index < cache_leaves(cpu); index++) {
379		unsigned int i;
380
381		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
382
383		cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
384		for_each_online_cpu(i) {
385			struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
386
387			if (i == cpu || !sib_cpu_ci->info_list)
388				continue;/* skip if itself or no cacheinfo */
389			for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
390				sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
391
392				/*
393				 * Comparing cache IDs only makes sense if the leaves
394				 * belong to the same cache level of same type. Skip
395				 * the check if level and type do not match.
396				 */
397				if (sib_leaf->level != this_leaf->level ||
398				    sib_leaf->type != this_leaf->type)
399					continue;
400
401				if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
402					cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
403					cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
404					break;
405				}
406			}
407		}
408		/* record the maximum cache line size */
409		if (this_leaf->coherency_line_size > coherency_max_size)
410			coherency_max_size = this_leaf->coherency_line_size;
411	}
412
413	/* shared_cpu_map is now populated for the cpu */
414	this_cpu_ci->cpu_map_populated = true;
415	return 0;
416}
417
418static void cache_shared_cpu_map_remove(unsigned int cpu)
419{
420	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
421	struct cacheinfo *this_leaf, *sib_leaf;
422	unsigned int sibling, index, sib_index;
423
424	for (index = 0; index < cache_leaves(cpu); index++) {
425		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
426		for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
427			struct cpu_cacheinfo *sib_cpu_ci =
428						get_cpu_cacheinfo(sibling);
429
430			if (sibling == cpu || !sib_cpu_ci->info_list)
431				continue;/* skip if itself or no cacheinfo */
432
433			for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
434				sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
435
436				/*
437				 * Comparing cache IDs only makes sense if the leaves
438				 * belong to the same cache level of same type. Skip
439				 * the check if level and type do not match.
440				 */
441				if (sib_leaf->level != this_leaf->level ||
442				    sib_leaf->type != this_leaf->type)
443					continue;
444
445				if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
446					cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
447					cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
448					break;
449				}
450			}
451		}
452	}
453
454	/* cpu is no longer populated in the shared map */
455	this_cpu_ci->cpu_map_populated = false;
456}
457
458static void free_cache_attributes(unsigned int cpu)
459{
460	if (!per_cpu_cacheinfo(cpu))
461		return;
462
463	cache_shared_cpu_map_remove(cpu);
464}
465
466int __weak early_cache_level(unsigned int cpu)
467{
468	return -ENOENT;
469}
470
471int __weak init_cache_level(unsigned int cpu)
472{
473	return -ENOENT;
474}
475
476int __weak populate_cache_leaves(unsigned int cpu)
477{
478	return -ENOENT;
479}
480
481static inline
482int allocate_cache_info(int cpu)
483{
484	per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
485					 sizeof(struct cacheinfo), GFP_ATOMIC);
486	if (!per_cpu_cacheinfo(cpu)) {
487		cache_leaves(cpu) = 0;
488		return -ENOMEM;
489	}
490
491	return 0;
492}
493
494int fetch_cache_info(unsigned int cpu)
495{
496	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
497	unsigned int levels = 0, split_levels = 0;
498	int ret;
499
500	if (acpi_disabled) {
501		ret = init_of_cache_level(cpu);
502	} else {
503		ret = acpi_get_cache_info(cpu, &levels, &split_levels);
504		if (!ret) {
505			this_cpu_ci->num_levels = levels;
506			/*
507			 * This assumes that:
508			 * - there cannot be any split caches (data/instruction)
509			 *   above a unified cache
510			 * - data/instruction caches come by pair
511			 */
512			this_cpu_ci->num_leaves = levels + split_levels;
513		}
514	}
515
516	if (ret || !cache_leaves(cpu)) {
517		ret = early_cache_level(cpu);
518		if (ret)
519			return ret;
520
521		if (!cache_leaves(cpu))
522			return -ENOENT;
523
524		this_cpu_ci->early_ci_levels = true;
525	}
526
527	return allocate_cache_info(cpu);
528}
529
530static inline int init_level_allocate_ci(unsigned int cpu)
531{
532	unsigned int early_leaves = cache_leaves(cpu);
533
534	/* Since early initialization/allocation of the cacheinfo is allowed
535	 * via fetch_cache_info() and this also gets called as CPU hotplug
536	 * callbacks via cacheinfo_cpu_online, the init/alloc can be skipped
537	 * as it will happen only once (the cacheinfo memory is never freed).
538	 * Just populate the cacheinfo. However, if the cacheinfo has been
539	 * allocated early through the arch-specific early_cache_level() call,
540	 * there is a chance the info is wrong (this can happen on arm64). In
541	 * that case, call init_cache_level() anyway to give the arch-specific
542	 * code a chance to make things right.
543	 */
544	if (per_cpu_cacheinfo(cpu) && !ci_cacheinfo(cpu)->early_ci_levels)
545		return 0;
546
547	if (init_cache_level(cpu) || !cache_leaves(cpu))
548		return -ENOENT;
549
550	/*
551	 * Now that we have properly initialized the cache level info, make
552	 * sure we don't try to do that again the next time we are called
553	 * (e.g. as CPU hotplug callbacks).
554	 */
555	ci_cacheinfo(cpu)->early_ci_levels = false;
556
557	if (cache_leaves(cpu) <= early_leaves)
558		return 0;
559
560	kfree(per_cpu_cacheinfo(cpu));
561	return allocate_cache_info(cpu);
562}
563
564int detect_cache_attributes(unsigned int cpu)
565{
566	int ret;
567
568	ret = init_level_allocate_ci(cpu);
569	if (ret)
570		return ret;
571
572	/*
573	 * If LLC is valid the cache leaves were already populated so just go to
574	 * update the cpu map.
575	 */
576	if (!last_level_cache_is_valid(cpu)) {
577		/*
578		 * populate_cache_leaves() may completely setup the cache leaves and
579		 * shared_cpu_map or it may leave it partially setup.
580		 */
581		ret = populate_cache_leaves(cpu);
582		if (ret)
583			goto free_ci;
584	}
585
586	/*
587	 * For systems using DT for cache hierarchy, fw_token
588	 * and shared_cpu_map will be set up here only if they are
589	 * not populated already
590	 */
591	ret = cache_shared_cpu_map_setup(cpu);
592	if (ret) {
593		pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
594		goto free_ci;
595	}
596
597	return 0;
598
599free_ci:
600	free_cache_attributes(cpu);
601	return ret;
602}
603
604/* pointer to cpuX/cache device */
605static DEFINE_PER_CPU(struct device *, ci_cache_dev);
606#define per_cpu_cache_dev(cpu)	(per_cpu(ci_cache_dev, cpu))
607
608static cpumask_t cache_dev_map;
609
610/* pointer to array of devices for cpuX/cache/indexY */
611static DEFINE_PER_CPU(struct device **, ci_index_dev);
612#define per_cpu_index_dev(cpu)	(per_cpu(ci_index_dev, cpu))
613#define per_cache_index_dev(cpu, idx)	((per_cpu_index_dev(cpu))[idx])
614
615#define show_one(file_name, object)				\
616static ssize_t file_name##_show(struct device *dev,		\
617		struct device_attribute *attr, char *buf)	\
618{								\
619	struct cacheinfo *this_leaf = dev_get_drvdata(dev);	\
620	return sysfs_emit(buf, "%u\n", this_leaf->object);	\
621}
622
623show_one(id, id);
624show_one(level, level);
625show_one(coherency_line_size, coherency_line_size);
626show_one(number_of_sets, number_of_sets);
627show_one(physical_line_partition, physical_line_partition);
628show_one(ways_of_associativity, ways_of_associativity);
629
630static ssize_t size_show(struct device *dev,
631			 struct device_attribute *attr, char *buf)
632{
633	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
634
635	return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
636}
637
638static ssize_t shared_cpu_map_show(struct device *dev,
639				   struct device_attribute *attr, char *buf)
640{
641	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
642	const struct cpumask *mask = &this_leaf->shared_cpu_map;
643
644	return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask);
645}
646
647static ssize_t shared_cpu_list_show(struct device *dev,
648				    struct device_attribute *attr, char *buf)
649{
650	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
651	const struct cpumask *mask = &this_leaf->shared_cpu_map;
652
653	return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask);
654}
655
656static ssize_t type_show(struct device *dev,
657			 struct device_attribute *attr, char *buf)
658{
659	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
660	const char *output;
661
662	switch (this_leaf->type) {
663	case CACHE_TYPE_DATA:
664		output = "Data";
665		break;
666	case CACHE_TYPE_INST:
667		output = "Instruction";
668		break;
669	case CACHE_TYPE_UNIFIED:
670		output = "Unified";
671		break;
672	default:
673		return -EINVAL;
674	}
675
676	return sysfs_emit(buf, "%s\n", output);
677}
678
679static ssize_t allocation_policy_show(struct device *dev,
680				      struct device_attribute *attr, char *buf)
681{
682	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
683	unsigned int ci_attr = this_leaf->attributes;
684	const char *output;
685
686	if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
687		output = "ReadWriteAllocate";
688	else if (ci_attr & CACHE_READ_ALLOCATE)
689		output = "ReadAllocate";
690	else if (ci_attr & CACHE_WRITE_ALLOCATE)
691		output = "WriteAllocate";
692	else
693		return 0;
694
695	return sysfs_emit(buf, "%s\n", output);
696}
697
698static ssize_t write_policy_show(struct device *dev,
699				 struct device_attribute *attr, char *buf)
700{
701	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
702	unsigned int ci_attr = this_leaf->attributes;
703	int n = 0;
704
705	if (ci_attr & CACHE_WRITE_THROUGH)
706		n = sysfs_emit(buf, "WriteThrough\n");
707	else if (ci_attr & CACHE_WRITE_BACK)
708		n = sysfs_emit(buf, "WriteBack\n");
709	return n;
710}
711
712static DEVICE_ATTR_RO(id);
713static DEVICE_ATTR_RO(level);
714static DEVICE_ATTR_RO(type);
715static DEVICE_ATTR_RO(coherency_line_size);
716static DEVICE_ATTR_RO(ways_of_associativity);
717static DEVICE_ATTR_RO(number_of_sets);
718static DEVICE_ATTR_RO(size);
719static DEVICE_ATTR_RO(allocation_policy);
720static DEVICE_ATTR_RO(write_policy);
721static DEVICE_ATTR_RO(shared_cpu_map);
722static DEVICE_ATTR_RO(shared_cpu_list);
723static DEVICE_ATTR_RO(physical_line_partition);
724
725static struct attribute *cache_default_attrs[] = {
726	&dev_attr_id.attr,
727	&dev_attr_type.attr,
728	&dev_attr_level.attr,
729	&dev_attr_shared_cpu_map.attr,
730	&dev_attr_shared_cpu_list.attr,
731	&dev_attr_coherency_line_size.attr,
732	&dev_attr_ways_of_associativity.attr,
733	&dev_attr_number_of_sets.attr,
734	&dev_attr_size.attr,
735	&dev_attr_allocation_policy.attr,
736	&dev_attr_write_policy.attr,
737	&dev_attr_physical_line_partition.attr,
738	NULL
739};
740
741static umode_t
742cache_default_attrs_is_visible(struct kobject *kobj,
743			       struct attribute *attr, int unused)
744{
745	struct device *dev = kobj_to_dev(kobj);
746	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
747	const struct cpumask *mask = &this_leaf->shared_cpu_map;
748	umode_t mode = attr->mode;
749
750	if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
751		return mode;
752	if ((attr == &dev_attr_type.attr) && this_leaf->type)
753		return mode;
754	if ((attr == &dev_attr_level.attr) && this_leaf->level)
755		return mode;
756	if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
757		return mode;
758	if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
759		return mode;
760	if ((attr == &dev_attr_coherency_line_size.attr) &&
761	    this_leaf->coherency_line_size)
762		return mode;
763	if ((attr == &dev_attr_ways_of_associativity.attr) &&
764	    this_leaf->size) /* allow 0 = full associativity */
765		return mode;
766	if ((attr == &dev_attr_number_of_sets.attr) &&
767	    this_leaf->number_of_sets)
768		return mode;
769	if ((attr == &dev_attr_size.attr) && this_leaf->size)
770		return mode;
771	if ((attr == &dev_attr_write_policy.attr) &&
772	    (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
773		return mode;
774	if ((attr == &dev_attr_allocation_policy.attr) &&
775	    (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
776		return mode;
777	if ((attr == &dev_attr_physical_line_partition.attr) &&
778	    this_leaf->physical_line_partition)
779		return mode;
780
781	return 0;
782}
783
784static const struct attribute_group cache_default_group = {
785	.attrs = cache_default_attrs,
786	.is_visible = cache_default_attrs_is_visible,
787};
788
789static const struct attribute_group *cache_default_groups[] = {
790	&cache_default_group,
791	NULL,
792};
793
794static const struct attribute_group *cache_private_groups[] = {
795	&cache_default_group,
796	NULL, /* Place holder for private group */
797	NULL,
798};
799
800const struct attribute_group *
801__weak cache_get_priv_group(struct cacheinfo *this_leaf)
802{
803	return NULL;
804}
805
806static const struct attribute_group **
807cache_get_attribute_groups(struct cacheinfo *this_leaf)
808{
809	const struct attribute_group *priv_group =
810			cache_get_priv_group(this_leaf);
811
812	if (!priv_group)
813		return cache_default_groups;
814
815	if (!cache_private_groups[1])
816		cache_private_groups[1] = priv_group;
817
818	return cache_private_groups;
819}
820
821/* Add/Remove cache interface for CPU device */
822static void cpu_cache_sysfs_exit(unsigned int cpu)
823{
824	int i;
825	struct device *ci_dev;
826
827	if (per_cpu_index_dev(cpu)) {
828		for (i = 0; i < cache_leaves(cpu); i++) {
829			ci_dev = per_cache_index_dev(cpu, i);
830			if (!ci_dev)
831				continue;
832			device_unregister(ci_dev);
833		}
834		kfree(per_cpu_index_dev(cpu));
835		per_cpu_index_dev(cpu) = NULL;
836	}
837	device_unregister(per_cpu_cache_dev(cpu));
838	per_cpu_cache_dev(cpu) = NULL;
839}
840
841static int cpu_cache_sysfs_init(unsigned int cpu)
842{
843	struct device *dev = get_cpu_device(cpu);
844
845	if (per_cpu_cacheinfo(cpu) == NULL)
846		return -ENOENT;
847
848	per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
849	if (IS_ERR(per_cpu_cache_dev(cpu)))
850		return PTR_ERR(per_cpu_cache_dev(cpu));
851
852	/* Allocate all required memory */
853	per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
854					 sizeof(struct device *), GFP_KERNEL);
855	if (unlikely(per_cpu_index_dev(cpu) == NULL))
856		goto err_out;
857
858	return 0;
859
860err_out:
861	cpu_cache_sysfs_exit(cpu);
862	return -ENOMEM;
863}
864
865static int cache_add_dev(unsigned int cpu)
866{
867	unsigned int i;
868	int rc;
869	struct device *ci_dev, *parent;
870	struct cacheinfo *this_leaf;
871	const struct attribute_group **cache_groups;
872
873	rc = cpu_cache_sysfs_init(cpu);
874	if (unlikely(rc < 0))
875		return rc;
876
877	parent = per_cpu_cache_dev(cpu);
878	for (i = 0; i < cache_leaves(cpu); i++) {
879		this_leaf = per_cpu_cacheinfo_idx(cpu, i);
880		if (this_leaf->disable_sysfs)
881			continue;
882		if (this_leaf->type == CACHE_TYPE_NOCACHE)
883			break;
884		cache_groups = cache_get_attribute_groups(this_leaf);
885		ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
886					   "index%1u", i);
887		if (IS_ERR(ci_dev)) {
888			rc = PTR_ERR(ci_dev);
889			goto err;
890		}
891		per_cache_index_dev(cpu, i) = ci_dev;
892	}
893	cpumask_set_cpu(cpu, &cache_dev_map);
894
895	return 0;
896err:
897	cpu_cache_sysfs_exit(cpu);
898	return rc;
899}
900
901static unsigned int cpu_map_shared_cache(bool online, unsigned int cpu,
902					 cpumask_t **map)
903{
904	struct cacheinfo *llc, *sib_llc;
905	unsigned int sibling;
906
907	if (!last_level_cache_is_valid(cpu))
908		return 0;
909
910	llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
911
912	if (llc->type != CACHE_TYPE_DATA && llc->type != CACHE_TYPE_UNIFIED)
913		return 0;
914
915	if (online) {
916		*map = &llc->shared_cpu_map;
917		return cpumask_weight(*map);
918	}
919
920	/* shared_cpu_map of offlined CPU will be cleared, so use sibling map */
921	for_each_cpu(sibling, &llc->shared_cpu_map) {
922		if (sibling == cpu || !last_level_cache_is_valid(sibling))
923			continue;
924		sib_llc = per_cpu_cacheinfo_idx(sibling, cache_leaves(sibling) - 1);
925		*map = &sib_llc->shared_cpu_map;
926		return cpumask_weight(*map);
927	}
928
929	return 0;
930}
931
932/*
933 * Calculate the size of the per-CPU data cache slice.  This can be
934 * used to estimate the size of the data cache slice that can be used
935 * by one CPU under ideal circumstances.  UNIFIED caches are counted
936 * in addition to DATA caches.  So, please consider code cache usage
937 * when use the result.
938 *
939 * Because the cache inclusive/non-inclusive information isn't
940 * available, we just use the size of the per-CPU slice of LLC to make
941 * the result more predictable across architectures.
942 */
943static void update_per_cpu_data_slice_size_cpu(unsigned int cpu)
944{
945	struct cpu_cacheinfo *ci;
946	struct cacheinfo *llc;
947	unsigned int nr_shared;
948
949	if (!last_level_cache_is_valid(cpu))
950		return;
951
952	ci = ci_cacheinfo(cpu);
953	llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
954
955	if (llc->type != CACHE_TYPE_DATA && llc->type != CACHE_TYPE_UNIFIED)
956		return;
957
958	nr_shared = cpumask_weight(&llc->shared_cpu_map);
959	if (nr_shared)
960		ci->per_cpu_data_slice_size = llc->size / nr_shared;
961}
962
963static void update_per_cpu_data_slice_size(bool cpu_online, unsigned int cpu,
964					   cpumask_t *cpu_map)
965{
966	unsigned int icpu;
967
968	for_each_cpu(icpu, cpu_map) {
969		if (!cpu_online && icpu == cpu)
970			continue;
971		update_per_cpu_data_slice_size_cpu(icpu);
972		setup_pcp_cacheinfo(icpu);
973	}
974}
975
976static int cacheinfo_cpu_online(unsigned int cpu)
977{
978	int rc = detect_cache_attributes(cpu);
979	cpumask_t *cpu_map;
980
981	if (rc)
982		return rc;
983	rc = cache_add_dev(cpu);
984	if (rc)
985		goto err;
986	if (cpu_map_shared_cache(true, cpu, &cpu_map))
987		update_per_cpu_data_slice_size(true, cpu, cpu_map);
988	return 0;
989err:
990	free_cache_attributes(cpu);
991	return rc;
992}
993
994static int cacheinfo_cpu_pre_down(unsigned int cpu)
995{
996	cpumask_t *cpu_map;
997	unsigned int nr_shared;
998
999	nr_shared = cpu_map_shared_cache(false, cpu, &cpu_map);
1000	if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
1001		cpu_cache_sysfs_exit(cpu);
1002
1003	free_cache_attributes(cpu);
1004	if (nr_shared > 1)
1005		update_per_cpu_data_slice_size(false, cpu, cpu_map);
1006	return 0;
1007}
1008
1009static int __init cacheinfo_sysfs_init(void)
1010{
1011	return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
1012				 "base/cacheinfo:online",
1013				 cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
1014}
1015device_initcall(cacheinfo_sysfs_init);
1016