root/arch/ia64/kernel/topology.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. arch_fix_phys_package_id
  2. arch_register_cpu
  3. arch_unregister_cpu
  4. arch_register_cpu
  5. topology_init
  6. cache_shared_cpu_map_setup
  7. cache_shared_cpu_map_setup
  8. show_coherency_line_size
  9. show_ways_of_associativity
  10. show_attributes
  11. show_size
  12. show_number_of_sets
  13. show_shared_cpu_map
  14. show_type
  15. show_level
  16. ia64_cache_show
  17. cpu_cache_sysfs_exit
  18. cpu_cache_sysfs_init
  19. cache_add_dev
  20. cache_remove_dev
  21. cache_sysfs_init

   1 /*
   2  * This file is subject to the terms and conditions of the GNU General Public
   3  * License.  See the file "COPYING" in the main directory of this archive
   4  * for more details.
   5  *
   6  * This file contains NUMA specific variables and functions which can
   7  * be split away from DISCONTIGMEM and are used on NUMA machines with
   8  * contiguous memory.
   9  *              2002/08/07 Erich Focht <efocht@ess.nec.de>
  10  * Populate cpu entries in sysfs for non-numa systems as well
  11  *      Intel Corporation - Ashok Raj
  12  * 02/27/2006 Zhang, Yanmin
  13  *      Populate cpu cache entries in sysfs for cpu cache info
  14  */
  15 
  16 #include <linux/cpu.h>
  17 #include <linux/kernel.h>
  18 #include <linux/mm.h>
  19 #include <linux/node.h>
  20 #include <linux/slab.h>
  21 #include <linux/init.h>
  22 #include <linux/memblock.h>
  23 #include <linux/nodemask.h>
  24 #include <linux/notifier.h>
  25 #include <linux/export.h>
  26 #include <asm/mmzone.h>
  27 #include <asm/numa.h>
  28 #include <asm/cpu.h>
  29 
  30 static struct ia64_cpu *sysfs_cpus;
  31 
  32 void arch_fix_phys_package_id(int num, u32 slot)
  33 {
  34 #ifdef CONFIG_SMP
  35         if (cpu_data(num)->socket_id == -1)
  36                 cpu_data(num)->socket_id = slot;
  37 #endif
  38 }
  39 EXPORT_SYMBOL_GPL(arch_fix_phys_package_id);
  40 
  41 
  42 #ifdef CONFIG_HOTPLUG_CPU
  43 int __ref arch_register_cpu(int num)
  44 {
  45         /*
  46          * If CPEI can be re-targeted or if this is not
  47          * CPEI target, then it is hotpluggable
  48          */
  49         if (can_cpei_retarget() || !is_cpu_cpei_target(num))
  50                 sysfs_cpus[num].cpu.hotpluggable = 1;
  51         map_cpu_to_node(num, node_cpuid[num].nid);
  52         return register_cpu(&sysfs_cpus[num].cpu, num);
  53 }
  54 EXPORT_SYMBOL(arch_register_cpu);
  55 
  56 void __ref arch_unregister_cpu(int num)
  57 {
  58         unregister_cpu(&sysfs_cpus[num].cpu);
  59         unmap_cpu_from_node(num, cpu_to_node(num));
  60 }
  61 EXPORT_SYMBOL(arch_unregister_cpu);
  62 #else
  63 static int __init arch_register_cpu(int num)
  64 {
  65         return register_cpu(&sysfs_cpus[num].cpu, num);
  66 }
  67 #endif /*CONFIG_HOTPLUG_CPU*/
  68 
  69 
  70 static int __init topology_init(void)
  71 {
  72         int i, err = 0;
  73 
  74 #ifdef CONFIG_NUMA
  75         /*
  76          * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
  77          */
  78         for_each_online_node(i) {
  79                 if ((err = register_one_node(i)))
  80                         goto out;
  81         }
  82 #endif
  83 
  84         sysfs_cpus = kcalloc(NR_CPUS, sizeof(struct ia64_cpu), GFP_KERNEL);
  85         if (!sysfs_cpus)
  86                 panic("kzalloc in topology_init failed - NR_CPUS too big?");
  87 
  88         for_each_present_cpu(i) {
  89                 if((err = arch_register_cpu(i)))
  90                         goto out;
  91         }
  92 out:
  93         return err;
  94 }
  95 
  96 subsys_initcall(topology_init);
  97 
  98 
  99 /*
 100  * Export cpu cache information through sysfs
 101  */
 102 
 103 /*
 104  *  A bunch of string array to get pretty printing
 105  */
 106 static const char *cache_types[] = {
 107         "",                     /* not used */
 108         "Instruction",
 109         "Data",
 110         "Unified"       /* unified */
 111 };
 112 
 113 static const char *cache_mattrib[]={
 114         "WriteThrough",
 115         "WriteBack",
 116         "",             /* reserved */
 117         ""              /* reserved */
 118 };
 119 
 120 struct cache_info {
 121         pal_cache_config_info_t cci;
 122         cpumask_t shared_cpu_map;
 123         int level;
 124         int type;
 125         struct kobject kobj;
 126 };
 127 
 128 struct cpu_cache_info {
 129         struct cache_info *cache_leaves;
 130         int     num_cache_leaves;
 131         struct kobject kobj;
 132 };
 133 
 134 static struct cpu_cache_info    all_cpu_cache_info[NR_CPUS];
 135 #define LEAF_KOBJECT_PTR(x,y)    (&all_cpu_cache_info[x].cache_leaves[y])
 136 
 137 #ifdef CONFIG_SMP
 138 static void cache_shared_cpu_map_setup(unsigned int cpu,
 139                 struct cache_info * this_leaf)
 140 {
 141         pal_cache_shared_info_t csi;
 142         int num_shared, i = 0;
 143         unsigned int j;
 144 
 145         if (cpu_data(cpu)->threads_per_core <= 1 &&
 146                 cpu_data(cpu)->cores_per_socket <= 1) {
 147                 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
 148                 return;
 149         }
 150 
 151         if (ia64_pal_cache_shared_info(this_leaf->level,
 152                                         this_leaf->type,
 153                                         0,
 154                                         &csi) != PAL_STATUS_SUCCESS)
 155                 return;
 156 
 157         num_shared = (int) csi.num_shared;
 158         do {
 159                 for_each_possible_cpu(j)
 160                         if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
 161                                 && cpu_data(j)->core_id == csi.log1_cid
 162                                 && cpu_data(j)->thread_id == csi.log1_tid)
 163                                 cpumask_set_cpu(j, &this_leaf->shared_cpu_map);
 164 
 165                 i++;
 166         } while (i < num_shared &&
 167                 ia64_pal_cache_shared_info(this_leaf->level,
 168                                 this_leaf->type,
 169                                 i,
 170                                 &csi) == PAL_STATUS_SUCCESS);
 171 }
 172 #else
 173 static void cache_shared_cpu_map_setup(unsigned int cpu,
 174                 struct cache_info * this_leaf)
 175 {
 176         cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
 177         return;
 178 }
 179 #endif
 180 
 181 static ssize_t show_coherency_line_size(struct cache_info *this_leaf,
 182                                         char *buf)
 183 {
 184         return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size);
 185 }
 186 
 187 static ssize_t show_ways_of_associativity(struct cache_info *this_leaf,
 188                                         char *buf)
 189 {
 190         return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc);
 191 }
 192 
 193 static ssize_t show_attributes(struct cache_info *this_leaf, char *buf)
 194 {
 195         return sprintf(buf,
 196                         "%s\n",
 197                         cache_mattrib[this_leaf->cci.pcci_cache_attr]);
 198 }
 199 
 200 static ssize_t show_size(struct cache_info *this_leaf, char *buf)
 201 {
 202         return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024);
 203 }
 204 
 205 static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf)
 206 {
 207         unsigned number_of_sets = this_leaf->cci.pcci_cache_size;
 208         number_of_sets /= this_leaf->cci.pcci_assoc;
 209         number_of_sets /= 1 << this_leaf->cci.pcci_line_size;
 210 
 211         return sprintf(buf, "%u\n", number_of_sets);
 212 }
 213 
 214 static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
 215 {
 216         cpumask_t shared_cpu_map;
 217 
 218         cpumask_and(&shared_cpu_map,
 219                                 &this_leaf->shared_cpu_map, cpu_online_mask);
 220         return scnprintf(buf, PAGE_SIZE, "%*pb\n",
 221                          cpumask_pr_args(&shared_cpu_map));
 222 }
 223 
 224 static ssize_t show_type(struct cache_info *this_leaf, char *buf)
 225 {
 226         int type = this_leaf->type + this_leaf->cci.pcci_unified;
 227         return sprintf(buf, "%s\n", cache_types[type]);
 228 }
 229 
 230 static ssize_t show_level(struct cache_info *this_leaf, char *buf)
 231 {
 232         return sprintf(buf, "%u\n", this_leaf->level);
 233 }
 234 
 235 struct cache_attr {
 236         struct attribute attr;
 237         ssize_t (*show)(struct cache_info *, char *);
 238         ssize_t (*store)(struct cache_info *, const char *, size_t count);
 239 };
 240 
 241 #ifdef define_one_ro
 242         #undef define_one_ro
 243 #endif
 244 #define define_one_ro(_name) \
 245         static struct cache_attr _name = \
 246 __ATTR(_name, 0444, show_##_name, NULL)
 247 
 248 define_one_ro(level);
 249 define_one_ro(type);
 250 define_one_ro(coherency_line_size);
 251 define_one_ro(ways_of_associativity);
 252 define_one_ro(size);
 253 define_one_ro(number_of_sets);
 254 define_one_ro(shared_cpu_map);
 255 define_one_ro(attributes);
 256 
 257 static struct attribute * cache_default_attrs[] = {
 258         &type.attr,
 259         &level.attr,
 260         &coherency_line_size.attr,
 261         &ways_of_associativity.attr,
 262         &attributes.attr,
 263         &size.attr,
 264         &number_of_sets.attr,
 265         &shared_cpu_map.attr,
 266         NULL
 267 };
 268 
 269 #define to_object(k) container_of(k, struct cache_info, kobj)
 270 #define to_attr(a) container_of(a, struct cache_attr, attr)
 271 
 272 static ssize_t ia64_cache_show(struct kobject * kobj, struct attribute * attr, char * buf)
 273 {
 274         struct cache_attr *fattr = to_attr(attr);
 275         struct cache_info *this_leaf = to_object(kobj);
 276         ssize_t ret;
 277 
 278         ret = fattr->show ? fattr->show(this_leaf, buf) : 0;
 279         return ret;
 280 }
 281 
 282 static const struct sysfs_ops cache_sysfs_ops = {
 283         .show   = ia64_cache_show
 284 };
 285 
 286 static struct kobj_type cache_ktype = {
 287         .sysfs_ops      = &cache_sysfs_ops,
 288         .default_attrs  = cache_default_attrs,
 289 };
 290 
 291 static struct kobj_type cache_ktype_percpu_entry = {
 292         .sysfs_ops      = &cache_sysfs_ops,
 293 };
 294 
 295 static void cpu_cache_sysfs_exit(unsigned int cpu)
 296 {
 297         kfree(all_cpu_cache_info[cpu].cache_leaves);
 298         all_cpu_cache_info[cpu].cache_leaves = NULL;
 299         all_cpu_cache_info[cpu].num_cache_leaves = 0;
 300         memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
 301         return;
 302 }
 303 
 304 static int cpu_cache_sysfs_init(unsigned int cpu)
 305 {
 306         unsigned long i, levels, unique_caches;
 307         pal_cache_config_info_t cci;
 308         int j;
 309         long status;
 310         struct cache_info *this_cache;
 311         int num_cache_leaves = 0;
 312 
 313         if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
 314                 printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
 315                 return -1;
 316         }
 317 
 318         this_cache=kcalloc(unique_caches, sizeof(struct cache_info),
 319                            GFP_KERNEL);
 320         if (this_cache == NULL)
 321                 return -ENOMEM;
 322 
 323         for (i=0; i < levels; i++) {
 324                 for (j=2; j >0 ; j--) {
 325                         if ((status=ia64_pal_cache_config_info(i,j, &cci)) !=
 326                                         PAL_STATUS_SUCCESS)
 327                                 continue;
 328 
 329                         this_cache[num_cache_leaves].cci = cci;
 330                         this_cache[num_cache_leaves].level = i + 1;
 331                         this_cache[num_cache_leaves].type = j;
 332 
 333                         cache_shared_cpu_map_setup(cpu,
 334                                         &this_cache[num_cache_leaves]);
 335                         num_cache_leaves ++;
 336                 }
 337         }
 338 
 339         all_cpu_cache_info[cpu].cache_leaves = this_cache;
 340         all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves;
 341 
 342         memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
 343 
 344         return 0;
 345 }
 346 
 347 /* Add cache interface for CPU device */
 348 static int cache_add_dev(unsigned int cpu)
 349 {
 350         struct device *sys_dev = get_cpu_device(cpu);
 351         unsigned long i, j;
 352         struct cache_info *this_object;
 353         int retval = 0;
 354 
 355         if (all_cpu_cache_info[cpu].kobj.parent)
 356                 return 0;
 357 
 358 
 359         retval = cpu_cache_sysfs_init(cpu);
 360         if (unlikely(retval < 0))
 361                 return retval;
 362 
 363         retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj,
 364                                       &cache_ktype_percpu_entry, &sys_dev->kobj,
 365                                       "%s", "cache");
 366         if (unlikely(retval < 0)) {
 367                 cpu_cache_sysfs_exit(cpu);
 368                 return retval;
 369         }
 370 
 371         for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
 372                 this_object = LEAF_KOBJECT_PTR(cpu,i);
 373                 retval = kobject_init_and_add(&(this_object->kobj),
 374                                               &cache_ktype,
 375                                               &all_cpu_cache_info[cpu].kobj,
 376                                               "index%1lu", i);
 377                 if (unlikely(retval)) {
 378                         for (j = 0; j < i; j++) {
 379                                 kobject_put(&(LEAF_KOBJECT_PTR(cpu,j)->kobj));
 380                         }
 381                         kobject_put(&all_cpu_cache_info[cpu].kobj);
 382                         cpu_cache_sysfs_exit(cpu);
 383                         return retval;
 384                 }
 385                 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
 386         }
 387         kobject_uevent(&all_cpu_cache_info[cpu].kobj, KOBJ_ADD);
 388         return retval;
 389 }
 390 
 391 /* Remove cache interface for CPU device */
 392 static int cache_remove_dev(unsigned int cpu)
 393 {
 394         unsigned long i;
 395 
 396         for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
 397                 kobject_put(&(LEAF_KOBJECT_PTR(cpu,i)->kobj));
 398 
 399         if (all_cpu_cache_info[cpu].kobj.parent) {
 400                 kobject_put(&all_cpu_cache_info[cpu].kobj);
 401                 memset(&all_cpu_cache_info[cpu].kobj,
 402                         0,
 403                         sizeof(struct kobject));
 404         }
 405 
 406         cpu_cache_sysfs_exit(cpu);
 407 
 408         return 0;
 409 }
 410 
 411 static int __init cache_sysfs_init(void)
 412 {
 413         int ret;
 414 
 415         ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/topology:online",
 416                                 cache_add_dev, cache_remove_dev);
 417         WARN_ON(ret < 0);
 418         return 0;
 419 }
 420 device_initcall(cache_sysfs_init);

/* [<][>][^][v][top][bottom][index][help] */