cpumap.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include "util.h"
  3. #include <api/fs/fs.h>
  4. #include "../perf.h"
  5. #include "cpumap.h"
  6. #include <assert.h>
  7. #include <dirent.h>
  8. #include <stdio.h>
  9. #include <stdlib.h>
  10. #include <linux/bitmap.h>
  11. #include "asm/bug.h"
  12. #include "sane_ctype.h"
  13. static int max_cpu_num;
  14. static int max_present_cpu_num;
  15. static int max_node_num;
  16. static int *cpunode_map;
  17. static struct cpu_map *cpu_map__default_new(void)
  18. {
  19. struct cpu_map *cpus;
  20. int nr_cpus;
  21. nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
  22. if (nr_cpus < 0)
  23. return NULL;
  24. cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
  25. if (cpus != NULL) {
  26. int i;
  27. for (i = 0; i < nr_cpus; ++i)
  28. cpus->map[i] = i;
  29. cpus->nr = nr_cpus;
  30. refcount_set(&cpus->refcnt, 1);
  31. }
  32. return cpus;
  33. }
  34. static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
  35. {
  36. size_t payload_size = nr_cpus * sizeof(int);
  37. struct cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
  38. if (cpus != NULL) {
  39. cpus->nr = nr_cpus;
  40. memcpy(cpus->map, tmp_cpus, payload_size);
  41. refcount_set(&cpus->refcnt, 1);
  42. }
  43. return cpus;
  44. }
  45. struct cpu_map *cpu_map__read(FILE *file)
  46. {
  47. struct cpu_map *cpus = NULL;
  48. int nr_cpus = 0;
  49. int *tmp_cpus = NULL, *tmp;
  50. int max_entries = 0;
  51. int n, cpu, prev;
  52. char sep;
  53. sep = 0;
  54. prev = -1;
  55. for (;;) {
  56. n = fscanf(file, "%u%c", &cpu, &sep);
  57. if (n <= 0)
  58. break;
  59. if (prev >= 0) {
  60. int new_max = nr_cpus + cpu - prev - 1;
  61. if (new_max >= max_entries) {
  62. max_entries = new_max + MAX_NR_CPUS / 2;
  63. tmp = realloc(tmp_cpus, max_entries * sizeof(int));
  64. if (tmp == NULL)
  65. goto out_free_tmp;
  66. tmp_cpus = tmp;
  67. }
  68. while (++prev < cpu)
  69. tmp_cpus[nr_cpus++] = prev;
  70. }
  71. if (nr_cpus == max_entries) {
  72. max_entries += MAX_NR_CPUS;
  73. tmp = realloc(tmp_cpus, max_entries * sizeof(int));
  74. if (tmp == NULL)
  75. goto out_free_tmp;
  76. tmp_cpus = tmp;
  77. }
  78. tmp_cpus[nr_cpus++] = cpu;
  79. if (n == 2 && sep == '-')
  80. prev = cpu;
  81. else
  82. prev = -1;
  83. if (n == 1 || sep == '\n')
  84. break;
  85. }
  86. if (nr_cpus > 0)
  87. cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
  88. else
  89. cpus = cpu_map__default_new();
  90. out_free_tmp:
  91. free(tmp_cpus);
  92. return cpus;
  93. }
  94. static struct cpu_map *cpu_map__read_all_cpu_map(void)
  95. {
  96. struct cpu_map *cpus = NULL;
  97. FILE *onlnf;
  98. onlnf = fopen("/sys/devices/system/cpu/online", "r");
  99. if (!onlnf)
  100. return cpu_map__default_new();
  101. cpus = cpu_map__read(onlnf);
  102. fclose(onlnf);
  103. return cpus;
  104. }
  105. struct cpu_map *cpu_map__new(const char *cpu_list)
  106. {
  107. struct cpu_map *cpus = NULL;
  108. unsigned long start_cpu, end_cpu = 0;
  109. char *p = NULL;
  110. int i, nr_cpus = 0;
  111. int *tmp_cpus = NULL, *tmp;
  112. int max_entries = 0;
  113. if (!cpu_list)
  114. return cpu_map__read_all_cpu_map();
  115. /*
  116. * must handle the case of empty cpumap to cover
  117. * TOPOLOGY header for NUMA nodes with no CPU
  118. * ( e.g., because of CPU hotplug)
  119. */
  120. if (!isdigit(*cpu_list) && *cpu_list != '\0')
  121. goto out;
  122. while (isdigit(*cpu_list)) {
  123. p = NULL;
  124. start_cpu = strtoul(cpu_list, &p, 0);
  125. if (start_cpu >= INT_MAX
  126. || (*p != '\0' && *p != ',' && *p != '-'))
  127. goto invalid;
  128. if (*p == '-') {
  129. cpu_list = ++p;
  130. p = NULL;
  131. end_cpu = strtoul(cpu_list, &p, 0);
  132. if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
  133. goto invalid;
  134. if (end_cpu < start_cpu)
  135. goto invalid;
  136. } else {
  137. end_cpu = start_cpu;
  138. }
  139. for (; start_cpu <= end_cpu; start_cpu++) {
  140. /* check for duplicates */
  141. for (i = 0; i < nr_cpus; i++)
  142. if (tmp_cpus[i] == (int)start_cpu)
  143. goto invalid;
  144. if (nr_cpus == max_entries) {
  145. max_entries += MAX_NR_CPUS;
  146. tmp = realloc(tmp_cpus, max_entries * sizeof(int));
  147. if (tmp == NULL)
  148. goto invalid;
  149. tmp_cpus = tmp;
  150. }
  151. tmp_cpus[nr_cpus++] = (int)start_cpu;
  152. }
  153. if (*p)
  154. ++p;
  155. cpu_list = p;
  156. }
  157. if (nr_cpus > 0)
  158. cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
  159. else if (*cpu_list != '\0')
  160. cpus = cpu_map__default_new();
  161. else
  162. cpus = cpu_map__dummy_new();
  163. invalid:
  164. free(tmp_cpus);
  165. out:
  166. return cpus;
  167. }
  168. static struct cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus)
  169. {
  170. struct cpu_map *map;
  171. map = cpu_map__empty_new(cpus->nr);
  172. if (map) {
  173. unsigned i;
  174. for (i = 0; i < cpus->nr; i++) {
  175. /*
  176. * Special treatment for -1, which is not real cpu number,
  177. * and we need to use (int) -1 to initialize map[i],
  178. * otherwise it would become 65535.
  179. */
  180. if (cpus->cpu[i] == (u16) -1)
  181. map->map[i] = -1;
  182. else
  183. map->map[i] = (int) cpus->cpu[i];
  184. }
  185. }
  186. return map;
  187. }
  188. static struct cpu_map *cpu_map__from_mask(struct cpu_map_mask *mask)
  189. {
  190. struct cpu_map *map;
  191. int nr, nbits = mask->nr * mask->long_size * BITS_PER_BYTE;
  192. nr = bitmap_weight(mask->mask, nbits);
  193. map = cpu_map__empty_new(nr);
  194. if (map) {
  195. int cpu, i = 0;
  196. for_each_set_bit(cpu, mask->mask, nbits)
  197. map->map[i++] = cpu;
  198. }
  199. return map;
  200. }
  201. struct cpu_map *cpu_map__new_data(struct cpu_map_data *data)
  202. {
  203. if (data->type == PERF_CPU_MAP__CPUS)
  204. return cpu_map__from_entries((struct cpu_map_entries *)data->data);
  205. else
  206. return cpu_map__from_mask((struct cpu_map_mask *)data->data);
  207. }
  208. size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp)
  209. {
  210. #define BUFSIZE 1024
  211. char buf[BUFSIZE];
  212. cpu_map__snprint(map, buf, sizeof(buf));
  213. return fprintf(fp, "%s\n", buf);
  214. #undef BUFSIZE
  215. }
  216. struct cpu_map *cpu_map__dummy_new(void)
  217. {
  218. struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
  219. if (cpus != NULL) {
  220. cpus->nr = 1;
  221. cpus->map[0] = -1;
  222. refcount_set(&cpus->refcnt, 1);
  223. }
  224. return cpus;
  225. }
  226. struct cpu_map *cpu_map__empty_new(int nr)
  227. {
  228. struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr);
  229. if (cpus != NULL) {
  230. int i;
  231. cpus->nr = nr;
  232. for (i = 0; i < nr; i++)
  233. cpus->map[i] = -1;
  234. refcount_set(&cpus->refcnt, 1);
  235. }
  236. return cpus;
  237. }
  238. static void cpu_map__delete(struct cpu_map *map)
  239. {
  240. if (map) {
  241. WARN_ONCE(refcount_read(&map->refcnt) != 0,
  242. "cpu_map refcnt unbalanced\n");
  243. free(map);
  244. }
  245. }
  246. struct cpu_map *cpu_map__get(struct cpu_map *map)
  247. {
  248. if (map)
  249. refcount_inc(&map->refcnt);
  250. return map;
  251. }
  252. void cpu_map__put(struct cpu_map *map)
  253. {
  254. if (map && refcount_dec_and_test(&map->refcnt))
  255. cpu_map__delete(map);
  256. }
  257. static int cpu__get_topology_int(int cpu, const char *name, int *value)
  258. {
  259. char path[PATH_MAX];
  260. snprintf(path, PATH_MAX,
  261. "devices/system/cpu/cpu%d/topology/%s", cpu, name);
  262. return sysfs__read_int(path, value);
  263. }
  264. int cpu_map__get_socket_id(int cpu)
  265. {
  266. int value, ret = cpu__get_topology_int(cpu, "physical_package_id", &value);
  267. return ret ?: value;
  268. }
  269. int cpu_map__get_socket(struct cpu_map *map, int idx, void *data __maybe_unused)
  270. {
  271. int cpu;
  272. if (idx > map->nr)
  273. return -1;
  274. cpu = map->map[idx];
  275. return cpu_map__get_socket_id(cpu);
  276. }
  277. static int cmp_ids(const void *a, const void *b)
  278. {
  279. return *(int *)a - *(int *)b;
  280. }
  281. int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
  282. int (*f)(struct cpu_map *map, int cpu, void *data),
  283. void *data)
  284. {
  285. struct cpu_map *c;
  286. int nr = cpus->nr;
  287. int cpu, s1, s2;
  288. /* allocate as much as possible */
  289. c = calloc(1, sizeof(*c) + nr * sizeof(int));
  290. if (!c)
  291. return -1;
  292. for (cpu = 0; cpu < nr; cpu++) {
  293. s1 = f(cpus, cpu, data);
  294. for (s2 = 0; s2 < c->nr; s2++) {
  295. if (s1 == c->map[s2])
  296. break;
  297. }
  298. if (s2 == c->nr) {
  299. c->map[c->nr] = s1;
  300. c->nr++;
  301. }
  302. }
  303. /* ensure we process id in increasing order */
  304. qsort(c->map, c->nr, sizeof(int), cmp_ids);
  305. refcount_set(&c->refcnt, 1);
  306. *res = c;
  307. return 0;
  308. }
  309. int cpu_map__get_core_id(int cpu)
  310. {
  311. int value, ret = cpu__get_topology_int(cpu, "core_id", &value);
  312. return ret ?: value;
  313. }
  314. int cpu_map__get_core(struct cpu_map *map, int idx, void *data)
  315. {
  316. int cpu, s;
  317. if (idx > map->nr)
  318. return -1;
  319. cpu = map->map[idx];
  320. cpu = cpu_map__get_core_id(cpu);
  321. s = cpu_map__get_socket(map, idx, data);
  322. if (s == -1)
  323. return -1;
  324. /*
  325. * encode socket in upper 16 bits
  326. * core_id is relative to socket, and
  327. * we need a global id. So we combine
  328. * socket+ core id
  329. */
  330. return (s << 16) | (cpu & 0xffff);
  331. }
  332. int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp)
  333. {
  334. return cpu_map__build_map(cpus, sockp, cpu_map__get_socket, NULL);
  335. }
  336. int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep)
  337. {
  338. return cpu_map__build_map(cpus, corep, cpu_map__get_core, NULL);
  339. }
  340. /* setup simple routines to easily access node numbers given a cpu number */
  341. static int get_max_num(char *path, int *max)
  342. {
  343. size_t num;
  344. char *buf;
  345. int err = 0;
  346. if (filename__read_str(path, &buf, &num))
  347. return -1;
  348. buf[num] = '\0';
  349. /* start on the right, to find highest node num */
  350. while (--num) {
  351. if ((buf[num] == ',') || (buf[num] == '-')) {
  352. num++;
  353. break;
  354. }
  355. }
  356. if (sscanf(&buf[num], "%d", max) < 1) {
  357. err = -1;
  358. goto out;
  359. }
  360. /* convert from 0-based to 1-based */
  361. (*max)++;
  362. out:
  363. free(buf);
  364. return err;
  365. }
  366. /* Determine highest possible cpu in the system for sparse allocation */
  367. static void set_max_cpu_num(void)
  368. {
  369. const char *mnt;
  370. char path[PATH_MAX];
  371. int ret = -1;
  372. /* set up default */
  373. max_cpu_num = 4096;
  374. max_present_cpu_num = 4096;
  375. mnt = sysfs__mountpoint();
  376. if (!mnt)
  377. goto out;
  378. /* get the highest possible cpu number for a sparse allocation */
  379. ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
  380. if (ret >= PATH_MAX) {
  381. pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
  382. goto out;
  383. }
  384. ret = get_max_num(path, &max_cpu_num);
  385. if (ret)
  386. goto out;
  387. /* get the highest present cpu number for a sparse allocation */
  388. ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
  389. if (ret >= PATH_MAX) {
  390. pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
  391. goto out;
  392. }
  393. ret = get_max_num(path, &max_present_cpu_num);
  394. out:
  395. if (ret)
  396. pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num);
  397. }
  398. /* Determine highest possible node in the system for sparse allocation */
  399. static void set_max_node_num(void)
  400. {
  401. const char *mnt;
  402. char path[PATH_MAX];
  403. int ret = -1;
  404. /* set up default */
  405. max_node_num = 8;
  406. mnt = sysfs__mountpoint();
  407. if (!mnt)
  408. goto out;
  409. /* get the highest possible cpu number for a sparse allocation */
  410. ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
  411. if (ret >= PATH_MAX) {
  412. pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
  413. goto out;
  414. }
  415. ret = get_max_num(path, &max_node_num);
  416. out:
  417. if (ret)
  418. pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
  419. }
  420. int cpu__max_node(void)
  421. {
  422. if (unlikely(!max_node_num))
  423. set_max_node_num();
  424. return max_node_num;
  425. }
  426. int cpu__max_cpu(void)
  427. {
  428. if (unlikely(!max_cpu_num))
  429. set_max_cpu_num();
  430. return max_cpu_num;
  431. }
  432. int cpu__max_present_cpu(void)
  433. {
  434. if (unlikely(!max_present_cpu_num))
  435. set_max_cpu_num();
  436. return max_present_cpu_num;
  437. }
  438. int cpu__get_node(int cpu)
  439. {
  440. if (unlikely(cpunode_map == NULL)) {
  441. pr_debug("cpu_map not initialized\n");
  442. return -1;
  443. }
  444. return cpunode_map[cpu];
  445. }
  446. static int init_cpunode_map(void)
  447. {
  448. int i;
  449. set_max_cpu_num();
  450. set_max_node_num();
  451. cpunode_map = calloc(max_cpu_num, sizeof(int));
  452. if (!cpunode_map) {
  453. pr_err("%s: calloc failed\n", __func__);
  454. return -1;
  455. }
  456. for (i = 0; i < max_cpu_num; i++)
  457. cpunode_map[i] = -1;
  458. return 0;
  459. }
  460. int cpu__setup_cpunode_map(void)
  461. {
  462. struct dirent *dent1, *dent2;
  463. DIR *dir1, *dir2;
  464. unsigned int cpu, mem;
  465. char buf[PATH_MAX];
  466. char path[PATH_MAX];
  467. const char *mnt;
  468. int n;
  469. /* initialize globals */
  470. if (init_cpunode_map())
  471. return -1;
  472. mnt = sysfs__mountpoint();
  473. if (!mnt)
  474. return 0;
  475. n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
  476. if (n >= PATH_MAX) {
  477. pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
  478. return -1;
  479. }
  480. dir1 = opendir(path);
  481. if (!dir1)
  482. return 0;
  483. /* walk tree and setup map */
  484. while ((dent1 = readdir(dir1)) != NULL) {
  485. if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1)
  486. continue;
  487. n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
  488. if (n >= PATH_MAX) {
  489. pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
  490. continue;
  491. }
  492. dir2 = opendir(buf);
  493. if (!dir2)
  494. continue;
  495. while ((dent2 = readdir(dir2)) != NULL) {
  496. if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
  497. continue;
  498. cpunode_map[cpu] = mem;
  499. }
  500. closedir(dir2);
  501. }
  502. closedir(dir1);
  503. return 0;
  504. }
  505. bool cpu_map__has(struct cpu_map *cpus, int cpu)
  506. {
  507. return cpu_map__idx(cpus, cpu) != -1;
  508. }
  509. int cpu_map__idx(struct cpu_map *cpus, int cpu)
  510. {
  511. int i;
  512. for (i = 0; i < cpus->nr; ++i) {
  513. if (cpus->map[i] == cpu)
  514. return i;
  515. }
  516. return -1;
  517. }
  518. int cpu_map__cpu(struct cpu_map *cpus, int idx)
  519. {
  520. return cpus->map[idx];
  521. }
  522. size_t cpu_map__snprint(struct cpu_map *map, char *buf, size_t size)
  523. {
  524. int i, cpu, start = -1;
  525. bool first = true;
  526. size_t ret = 0;
  527. #define COMMA first ? "" : ","
  528. for (i = 0; i < map->nr + 1; i++) {
  529. bool last = i == map->nr;
  530. cpu = last ? INT_MAX : map->map[i];
  531. if (start == -1) {
  532. start = i;
  533. if (last) {
  534. ret += snprintf(buf + ret, size - ret,
  535. "%s%d", COMMA,
  536. map->map[i]);
  537. }
  538. } else if (((i - start) != (cpu - map->map[start])) || last) {
  539. int end = i - 1;
  540. if (start == end) {
  541. ret += snprintf(buf + ret, size - ret,
  542. "%s%d", COMMA,
  543. map->map[start]);
  544. } else {
  545. ret += snprintf(buf + ret, size - ret,
  546. "%s%d-%d", COMMA,
  547. map->map[start], map->map[end]);
  548. }
  549. first = false;
  550. start = i;
  551. }
  552. }
  553. #undef COMMA
  554. pr_debug("cpumask list: %s\n", buf);
  555. return ret;
  556. }
  557. static char hex_char(unsigned char val)
  558. {
  559. if (val < 10)
  560. return val + '0';
  561. if (val < 16)
  562. return val - 10 + 'a';
  563. return '?';
  564. }
  565. size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size)
  566. {
  567. int i, cpu;
  568. char *ptr = buf;
  569. unsigned char *bitmap;
  570. int last_cpu = cpu_map__cpu(map, map->nr - 1);
  571. if (buf == NULL)
  572. return 0;
  573. bitmap = zalloc(last_cpu / 8 + 1);
  574. if (bitmap == NULL) {
  575. buf[0] = '\0';
  576. return 0;
  577. }
  578. for (i = 0; i < map->nr; i++) {
  579. cpu = cpu_map__cpu(map, i);
  580. bitmap[cpu / 8] |= 1 << (cpu % 8);
  581. }
  582. for (cpu = last_cpu / 4 * 4; cpu >= 0; cpu -= 4) {
  583. unsigned char bits = bitmap[cpu / 8];
  584. if (cpu % 8)
  585. bits >>= 4;
  586. else
  587. bits &= 0xf;
  588. *ptr++ = hex_char(bits);
  589. if ((cpu % 32) == 0 && cpu > 0)
  590. *ptr++ = ',';
  591. }
  592. *ptr = '\0';
  593. free(bitmap);
  594. buf[size - 1] = '\0';
  595. return ptr - buf;
  596. }