root/tools/perf/lib/cpumap.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. perf_cpu_map__dummy_new
  2. cpu_map__delete
  3. perf_cpu_map__get
  4. perf_cpu_map__put
  5. cpu_map__default_new
  6. cpu_map__trim_new
  7. perf_cpu_map__read
  8. cpu_map__read_all_cpu_map
  9. perf_cpu_map__new
  10. perf_cpu_map__cpu
  11. perf_cpu_map__nr
  12. perf_cpu_map__empty
  13. perf_cpu_map__idx
  14. perf_cpu_map__max

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 #include <perf/cpumap.h>
   3 #include <stdlib.h>
   4 #include <linux/refcount.h>
   5 #include <internal/cpumap.h>
   6 #include <asm/bug.h>
   7 #include <stdio.h>
   8 #include <string.h>
   9 #include <unistd.h>
  10 #include <ctype.h>
  11 #include <limits.h>
  12 
  13 struct perf_cpu_map *perf_cpu_map__dummy_new(void)
  14 {
  15         struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
  16 
  17         if (cpus != NULL) {
  18                 cpus->nr = 1;
  19                 cpus->map[0] = -1;
  20                 refcount_set(&cpus->refcnt, 1);
  21         }
  22 
  23         return cpus;
  24 }
  25 
  26 static void cpu_map__delete(struct perf_cpu_map *map)
  27 {
  28         if (map) {
  29                 WARN_ONCE(refcount_read(&map->refcnt) != 0,
  30                           "cpu_map refcnt unbalanced\n");
  31                 free(map);
  32         }
  33 }
  34 
  35 struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
  36 {
  37         if (map)
  38                 refcount_inc(&map->refcnt);
  39         return map;
  40 }
  41 
  42 void perf_cpu_map__put(struct perf_cpu_map *map)
  43 {
  44         if (map && refcount_dec_and_test(&map->refcnt))
  45                 cpu_map__delete(map);
  46 }
  47 
  48 static struct perf_cpu_map *cpu_map__default_new(void)
  49 {
  50         struct perf_cpu_map *cpus;
  51         int nr_cpus;
  52 
  53         nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
  54         if (nr_cpus < 0)
  55                 return NULL;
  56 
  57         cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
  58         if (cpus != NULL) {
  59                 int i;
  60 
  61                 for (i = 0; i < nr_cpus; ++i)
  62                         cpus->map[i] = i;
  63 
  64                 cpus->nr = nr_cpus;
  65                 refcount_set(&cpus->refcnt, 1);
  66         }
  67 
  68         return cpus;
  69 }
  70 
  71 static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
  72 {
  73         size_t payload_size = nr_cpus * sizeof(int);
  74         struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
  75 
  76         if (cpus != NULL) {
  77                 cpus->nr = nr_cpus;
  78                 memcpy(cpus->map, tmp_cpus, payload_size);
  79                 refcount_set(&cpus->refcnt, 1);
  80         }
  81 
  82         return cpus;
  83 }
  84 
  85 struct perf_cpu_map *perf_cpu_map__read(FILE *file)
  86 {
  87         struct perf_cpu_map *cpus = NULL;
  88         int nr_cpus = 0;
  89         int *tmp_cpus = NULL, *tmp;
  90         int max_entries = 0;
  91         int n, cpu, prev;
  92         char sep;
  93 
  94         sep = 0;
  95         prev = -1;
  96         for (;;) {
  97                 n = fscanf(file, "%u%c", &cpu, &sep);
  98                 if (n <= 0)
  99                         break;
 100                 if (prev >= 0) {
 101                         int new_max = nr_cpus + cpu - prev - 1;
 102 
 103                         WARN_ONCE(new_max >= MAX_NR_CPUS, "Perf can support %d CPUs. "
 104                                                           "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
 105 
 106                         if (new_max >= max_entries) {
 107                                 max_entries = new_max + MAX_NR_CPUS / 2;
 108                                 tmp = realloc(tmp_cpus, max_entries * sizeof(int));
 109                                 if (tmp == NULL)
 110                                         goto out_free_tmp;
 111                                 tmp_cpus = tmp;
 112                         }
 113 
 114                         while (++prev < cpu)
 115                                 tmp_cpus[nr_cpus++] = prev;
 116                 }
 117                 if (nr_cpus == max_entries) {
 118                         max_entries += MAX_NR_CPUS;
 119                         tmp = realloc(tmp_cpus, max_entries * sizeof(int));
 120                         if (tmp == NULL)
 121                                 goto out_free_tmp;
 122                         tmp_cpus = tmp;
 123                 }
 124 
 125                 tmp_cpus[nr_cpus++] = cpu;
 126                 if (n == 2 && sep == '-')
 127                         prev = cpu;
 128                 else
 129                         prev = -1;
 130                 if (n == 1 || sep == '\n')
 131                         break;
 132         }
 133 
 134         if (nr_cpus > 0)
 135                 cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
 136         else
 137                 cpus = cpu_map__default_new();
 138 out_free_tmp:
 139         free(tmp_cpus);
 140         return cpus;
 141 }
 142 
 143 static struct perf_cpu_map *cpu_map__read_all_cpu_map(void)
 144 {
 145         struct perf_cpu_map *cpus = NULL;
 146         FILE *onlnf;
 147 
 148         onlnf = fopen("/sys/devices/system/cpu/online", "r");
 149         if (!onlnf)
 150                 return cpu_map__default_new();
 151 
 152         cpus = perf_cpu_map__read(onlnf);
 153         fclose(onlnf);
 154         return cpus;
 155 }
 156 
 157 struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
 158 {
 159         struct perf_cpu_map *cpus = NULL;
 160         unsigned long start_cpu, end_cpu = 0;
 161         char *p = NULL;
 162         int i, nr_cpus = 0;
 163         int *tmp_cpus = NULL, *tmp;
 164         int max_entries = 0;
 165 
 166         if (!cpu_list)
 167                 return cpu_map__read_all_cpu_map();
 168 
 169         /*
 170          * must handle the case of empty cpumap to cover
 171          * TOPOLOGY header for NUMA nodes with no CPU
 172          * ( e.g., because of CPU hotplug)
 173          */
 174         if (!isdigit(*cpu_list) && *cpu_list != '\0')
 175                 goto out;
 176 
 177         while (isdigit(*cpu_list)) {
 178                 p = NULL;
 179                 start_cpu = strtoul(cpu_list, &p, 0);
 180                 if (start_cpu >= INT_MAX
 181                     || (*p != '\0' && *p != ',' && *p != '-'))
 182                         goto invalid;
 183 
 184                 if (*p == '-') {
 185                         cpu_list = ++p;
 186                         p = NULL;
 187                         end_cpu = strtoul(cpu_list, &p, 0);
 188 
 189                         if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
 190                                 goto invalid;
 191 
 192                         if (end_cpu < start_cpu)
 193                                 goto invalid;
 194                 } else {
 195                         end_cpu = start_cpu;
 196                 }
 197 
 198                 WARN_ONCE(end_cpu >= MAX_NR_CPUS, "Perf can support %d CPUs. "
 199                                                   "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
 200 
 201                 for (; start_cpu <= end_cpu; start_cpu++) {
 202                         /* check for duplicates */
 203                         for (i = 0; i < nr_cpus; i++)
 204                                 if (tmp_cpus[i] == (int)start_cpu)
 205                                         goto invalid;
 206 
 207                         if (nr_cpus == max_entries) {
 208                                 max_entries += MAX_NR_CPUS;
 209                                 tmp = realloc(tmp_cpus, max_entries * sizeof(int));
 210                                 if (tmp == NULL)
 211                                         goto invalid;
 212                                 tmp_cpus = tmp;
 213                         }
 214                         tmp_cpus[nr_cpus++] = (int)start_cpu;
 215                 }
 216                 if (*p)
 217                         ++p;
 218 
 219                 cpu_list = p;
 220         }
 221 
 222         if (nr_cpus > 0)
 223                 cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
 224         else if (*cpu_list != '\0')
 225                 cpus = cpu_map__default_new();
 226         else
 227                 cpus = perf_cpu_map__dummy_new();
 228 invalid:
 229         free(tmp_cpus);
 230 out:
 231         return cpus;
 232 }
 233 
 234 int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
 235 {
 236         if (idx < cpus->nr)
 237                 return cpus->map[idx];
 238 
 239         return -1;
 240 }
 241 
 242 int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
 243 {
 244         return cpus ? cpus->nr : 1;
 245 }
 246 
 247 bool perf_cpu_map__empty(const struct perf_cpu_map *map)
 248 {
 249         return map ? map->map[0] == -1 : true;
 250 }
 251 
 252 int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu)
 253 {
 254         int i;
 255 
 256         for (i = 0; i < cpus->nr; ++i) {
 257                 if (cpus->map[i] == cpu)
 258                         return i;
 259         }
 260 
 261         return -1;
 262 }
 263 
 264 int perf_cpu_map__max(struct perf_cpu_map *map)
 265 {
 266         int i, max = -1;
 267 
 268         for (i = 0; i < map->nr; i++) {
 269                 if (map->map[i] > max)
 270                         max = map->map[i];
 271         }
 272 
 273         return max;
 274 }

/* [<][>][^][v][top][bottom][index][help] */