1/*
2 * Extract CPU cache information and expose them via sysfs.
3 *
4 *    Copyright IBM Corp. 2012
5 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/seq_file.h>
9#include <linux/cpu.h>
10#include <linux/cacheinfo.h>
11#include <asm/facility.h>
12
13enum {
14	CACHE_SCOPE_NOTEXISTS,
15	CACHE_SCOPE_PRIVATE,
16	CACHE_SCOPE_SHARED,
17	CACHE_SCOPE_RESERVED,
18};
19
20enum {
21	CTYPE_SEPARATE,
22	CTYPE_DATA,
23	CTYPE_INSTRUCTION,
24	CTYPE_UNIFIED,
25};
26
27enum {
28	EXTRACT_TOPOLOGY,
29	EXTRACT_LINE_SIZE,
30	EXTRACT_SIZE,
31	EXTRACT_ASSOCIATIVITY,
32};
33
34enum {
35	CACHE_TI_UNIFIED = 0,
36	CACHE_TI_DATA = 0,
37	CACHE_TI_INSTRUCTION,
38};
39
40struct cache_info {
41	unsigned char	    : 4;
42	unsigned char scope : 2;
43	unsigned char type  : 2;
44};
45
46#define CACHE_MAX_LEVEL 8
47union cache_topology {
48	struct cache_info ci[CACHE_MAX_LEVEL];
49	unsigned long long raw;
50};
51
52static const char * const cache_type_string[] = {
53	"",
54	"Instruction",
55	"Data",
56	"",
57	"Unified",
58};
59
60static const enum cache_type cache_type_map[] = {
61	[CTYPE_SEPARATE] = CACHE_TYPE_SEPARATE,
62	[CTYPE_DATA] = CACHE_TYPE_DATA,
63	[CTYPE_INSTRUCTION] = CACHE_TYPE_INST,
64	[CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
65};
66
67void show_cacheinfo(struct seq_file *m)
68{
69	struct cpu_cacheinfo *this_cpu_ci;
70	struct cacheinfo *cache;
71	int idx;
72
73	if (!test_facility(34))
74		return;
75	get_online_cpus();
76	this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
77	for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
78		cache = this_cpu_ci->info_list + idx;
79		seq_printf(m, "cache%-11d: ", idx);
80		seq_printf(m, "level=%d ", cache->level);
81		seq_printf(m, "type=%s ", cache_type_string[cache->type]);
82		seq_printf(m, "scope=%s ",
83			   cache->disable_sysfs ? "Shared" : "Private");
84		seq_printf(m, "size=%dK ", cache->size >> 10);
85		seq_printf(m, "line_size=%u ", cache->coherency_line_size);
86		seq_printf(m, "associativity=%d", cache->ways_of_associativity);
87		seq_puts(m, "\n");
88	}
89	put_online_cpus();
90}
91
92static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
93{
94	if (level >= CACHE_MAX_LEVEL)
95		return CACHE_TYPE_NOCACHE;
96	ci += level;
97	if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
98		return CACHE_TYPE_NOCACHE;
99	return cache_type_map[ci->type];
100}
101
102static inline unsigned long ecag(int ai, int li, int ti)
103{
104	unsigned long cmd, val;
105
106	cmd = ai << 4 | li << 1 | ti;
107	asm volatile(".insn	rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
108		     : "=d" (val) : "a" (cmd));
109	return val;
110}
111
112static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
113			 enum cache_type type, unsigned int level, int cpu)
114{
115	int ti, num_sets;
116
117	if (type == CACHE_TYPE_INST)
118		ti = CACHE_TI_INSTRUCTION;
119	else
120		ti = CACHE_TI_UNIFIED;
121	this_leaf->level = level + 1;
122	this_leaf->type = type;
123	this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
124	this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
125	this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
126	num_sets = this_leaf->size / this_leaf->coherency_line_size;
127	num_sets /= this_leaf->ways_of_associativity;
128	this_leaf->number_of_sets = num_sets;
129	cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
130	if (!private)
131		this_leaf->disable_sysfs = true;
132}
133
134int init_cache_level(unsigned int cpu)
135{
136	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
137	unsigned int level = 0, leaves = 0;
138	union cache_topology ct;
139	enum cache_type ctype;
140
141	if (!test_facility(34))
142		return -EOPNOTSUPP;
143	if (!this_cpu_ci)
144		return -EINVAL;
145	ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
146	do {
147		ctype = get_cache_type(&ct.ci[0], level);
148		if (ctype == CACHE_TYPE_NOCACHE)
149			break;
150		/* Separate instruction and data caches */
151		leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
152	} while (++level < CACHE_MAX_LEVEL);
153	this_cpu_ci->num_levels = level;
154	this_cpu_ci->num_leaves = leaves;
155	return 0;
156}
157
158int populate_cache_leaves(unsigned int cpu)
159{
160	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
161	struct cacheinfo *this_leaf = this_cpu_ci->info_list;
162	unsigned int level, idx, pvt;
163	union cache_topology ct;
164	enum cache_type ctype;
165
166	if (!test_facility(34))
167		return -EOPNOTSUPP;
168	ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
169	for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
170	     idx < this_cpu_ci->num_leaves; idx++, level++) {
171		if (!this_leaf)
172			return -EINVAL;
173		pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
174		ctype = get_cache_type(&ct.ci[0], level);
175		if (ctype == CACHE_TYPE_SEPARATE) {
176			ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu);
177			ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu);
178		} else {
179			ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
180		}
181	}
182	return 0;
183}
184