tracing_map.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * tracing_map - lock-free map for tracing
  4. *
  5. * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
  6. *
  7. * tracing_map implementation inspired by lock-free map algorithms
  8. * originated by Dr. Cliff Click:
  9. *
  10. * http://www.azulsystems.com/blog/cliff/2007-03-26-non-blocking-hashtable
  11. * http://www.azulsystems.com/events/javaone_2007/2007_LockFreeHash.pdf
  12. */
  13. #include <linux/vmalloc.h>
  14. #include <linux/jhash.h>
  15. #include <linux/slab.h>
  16. #include <linux/sort.h>
  17. #include <linux/kmemleak.h>
  18. #include "tracing_map.h"
  19. #include "trace.h"
  20. /*
  21. * NOTE: For a detailed description of the data structures used by
  22. * these functions (such as tracing_map_elt) please see the overview
  23. * of tracing_map data structures at the beginning of tracing_map.h.
  24. */
  25. /**
  26. * tracing_map_update_sum - Add a value to a tracing_map_elt's sum field
  27. * @elt: The tracing_map_elt
  28. * @i: The index of the given sum associated with the tracing_map_elt
  29. * @n: The value to add to the sum
  30. *
  31. * Add n to sum i associated with the specified tracing_map_elt
  32. * instance. The index i is the index returned by the call to
  33. * tracing_map_add_sum_field() when the tracing map was set up.
  34. */
  35. void tracing_map_update_sum(struct tracing_map_elt *elt, unsigned int i, u64 n)
  36. {
  37. atomic64_add(n, &elt->fields[i].sum);
  38. }
  39. /**
  40. * tracing_map_read_sum - Return the value of a tracing_map_elt's sum field
  41. * @elt: The tracing_map_elt
  42. * @i: The index of the given sum associated with the tracing_map_elt
  43. *
  44. * Retrieve the value of the sum i associated with the specified
  45. * tracing_map_elt instance. The index i is the index returned by the
  46. * call to tracing_map_add_sum_field() when the tracing map was set
  47. * up.
  48. *
  49. * Return: The sum associated with field i for elt.
  50. */
  51. u64 tracing_map_read_sum(struct tracing_map_elt *elt, unsigned int i)
  52. {
  53. return (u64)atomic64_read(&elt->fields[i].sum);
  54. }
  55. /**
  56. * tracing_map_set_var - Assign a tracing_map_elt's variable field
  57. * @elt: The tracing_map_elt
  58. * @i: The index of the given variable associated with the tracing_map_elt
  59. * @n: The value to assign
  60. *
  61. * Assign n to variable i associated with the specified tracing_map_elt
  62. * instance. The index i is the index returned by the call to
  63. * tracing_map_add_var() when the tracing map was set up.
  64. */
  65. void tracing_map_set_var(struct tracing_map_elt *elt, unsigned int i, u64 n)
  66. {
  67. atomic64_set(&elt->vars[i], n);
  68. elt->var_set[i] = true;
  69. }
  70. /**
  71. * tracing_map_var_set - Return whether or not a variable has been set
  72. * @elt: The tracing_map_elt
  73. * @i: The index of the given variable associated with the tracing_map_elt
  74. *
  75. * Return true if the variable has been set, false otherwise. The
  76. * index i is the index returned by the call to tracing_map_add_var()
  77. * when the tracing map was set up.
  78. */
  79. bool tracing_map_var_set(struct tracing_map_elt *elt, unsigned int i)
  80. {
  81. return elt->var_set[i];
  82. }
  83. /**
  84. * tracing_map_read_var - Return the value of a tracing_map_elt's variable field
  85. * @elt: The tracing_map_elt
  86. * @i: The index of the given variable associated with the tracing_map_elt
  87. *
  88. * Retrieve the value of the variable i associated with the specified
  89. * tracing_map_elt instance. The index i is the index returned by the
  90. * call to tracing_map_add_var() when the tracing map was set
  91. * up.
  92. *
  93. * Return: The variable value associated with field i for elt.
  94. */
  95. u64 tracing_map_read_var(struct tracing_map_elt *elt, unsigned int i)
  96. {
  97. return (u64)atomic64_read(&elt->vars[i]);
  98. }
  99. /**
  100. * tracing_map_read_var_once - Return and reset a tracing_map_elt's variable field
  101. * @elt: The tracing_map_elt
  102. * @i: The index of the given variable associated with the tracing_map_elt
  103. *
  104. * Retrieve the value of the variable i associated with the specified
  105. * tracing_map_elt instance, and reset the variable to the 'not set'
  106. * state. The index i is the index returned by the call to
  107. * tracing_map_add_var() when the tracing map was set up. The reset
  108. * essentially makes the variable a read-once variable if it's only
  109. * accessed using this function.
  110. *
  111. * Return: The variable value associated with field i for elt.
  112. */
  113. u64 tracing_map_read_var_once(struct tracing_map_elt *elt, unsigned int i)
  114. {
  115. elt->var_set[i] = false;
  116. return (u64)atomic64_read(&elt->vars[i]);
  117. }
  118. int tracing_map_cmp_string(void *val_a, void *val_b)
  119. {
  120. char *a = val_a;
  121. char *b = val_b;
  122. return strcmp(a, b);
  123. }
  124. int tracing_map_cmp_none(void *val_a, void *val_b)
  125. {
  126. return 0;
  127. }
  128. static int tracing_map_cmp_atomic64(void *val_a, void *val_b)
  129. {
  130. u64 a = atomic64_read((atomic64_t *)val_a);
  131. u64 b = atomic64_read((atomic64_t *)val_b);
  132. return (a > b) ? 1 : ((a < b) ? -1 : 0);
  133. }
  134. #define DEFINE_TRACING_MAP_CMP_FN(type) \
  135. static int tracing_map_cmp_##type(void *val_a, void *val_b) \
  136. { \
  137. type a = (type)(*(u64 *)val_a); \
  138. type b = (type)(*(u64 *)val_b); \
  139. \
  140. return (a > b) ? 1 : ((a < b) ? -1 : 0); \
  141. }
  142. DEFINE_TRACING_MAP_CMP_FN(s64);
  143. DEFINE_TRACING_MAP_CMP_FN(u64);
  144. DEFINE_TRACING_MAP_CMP_FN(s32);
  145. DEFINE_TRACING_MAP_CMP_FN(u32);
  146. DEFINE_TRACING_MAP_CMP_FN(s16);
  147. DEFINE_TRACING_MAP_CMP_FN(u16);
  148. DEFINE_TRACING_MAP_CMP_FN(s8);
  149. DEFINE_TRACING_MAP_CMP_FN(u8);
  150. tracing_map_cmp_fn_t tracing_map_cmp_num(int field_size,
  151. int field_is_signed)
  152. {
  153. tracing_map_cmp_fn_t fn = tracing_map_cmp_none;
  154. switch (field_size) {
  155. case 8:
  156. if (field_is_signed)
  157. fn = tracing_map_cmp_s64;
  158. else
  159. fn = tracing_map_cmp_u64;
  160. break;
  161. case 4:
  162. if (field_is_signed)
  163. fn = tracing_map_cmp_s32;
  164. else
  165. fn = tracing_map_cmp_u32;
  166. break;
  167. case 2:
  168. if (field_is_signed)
  169. fn = tracing_map_cmp_s16;
  170. else
  171. fn = tracing_map_cmp_u16;
  172. break;
  173. case 1:
  174. if (field_is_signed)
  175. fn = tracing_map_cmp_s8;
  176. else
  177. fn = tracing_map_cmp_u8;
  178. break;
  179. }
  180. return fn;
  181. }
  182. static int tracing_map_add_field(struct tracing_map *map,
  183. tracing_map_cmp_fn_t cmp_fn)
  184. {
  185. int ret = -EINVAL;
  186. if (map->n_fields < TRACING_MAP_FIELDS_MAX) {
  187. ret = map->n_fields;
  188. map->fields[map->n_fields++].cmp_fn = cmp_fn;
  189. }
  190. return ret;
  191. }
  192. /**
  193. * tracing_map_add_sum_field - Add a field describing a tracing_map sum
  194. * @map: The tracing_map
  195. *
  196. * Add a sum field to the key and return the index identifying it in
  197. * the map and associated tracing_map_elts. This is the index used
  198. * for instance to update a sum for a particular tracing_map_elt using
  199. * tracing_map_update_sum() or reading it via tracing_map_read_sum().
  200. *
  201. * Return: The index identifying the field in the map and associated
  202. * tracing_map_elts, or -EINVAL on error.
  203. */
  204. int tracing_map_add_sum_field(struct tracing_map *map)
  205. {
  206. return tracing_map_add_field(map, tracing_map_cmp_atomic64);
  207. }
  208. /**
  209. * tracing_map_add_var - Add a field describing a tracing_map var
  210. * @map: The tracing_map
  211. *
  212. * Add a var to the map and return the index identifying it in the map
  213. * and associated tracing_map_elts. This is the index used for
  214. * instance to update a var for a particular tracing_map_elt using
  215. * tracing_map_update_var() or reading it via tracing_map_read_var().
  216. *
  217. * Return: The index identifying the var in the map and associated
  218. * tracing_map_elts, or -EINVAL on error.
  219. */
  220. int tracing_map_add_var(struct tracing_map *map)
  221. {
  222. int ret = -EINVAL;
  223. if (map->n_vars < TRACING_MAP_VARS_MAX)
  224. ret = map->n_vars++;
  225. return ret;
  226. }
  227. /**
  228. * tracing_map_add_key_field - Add a field describing a tracing_map key
  229. * @map: The tracing_map
  230. * @offset: The offset within the key
  231. * @cmp_fn: The comparison function that will be used to sort on the key
  232. *
  233. * Let the map know there is a key and that if it's used as a sort key
  234. * to use cmp_fn.
  235. *
  236. * A key can be a subset of a compound key; for that purpose, the
  237. * offset param is used to describe where within the compound key
  238. * the key referenced by this key field resides.
  239. *
  240. * Return: The index identifying the field in the map and associated
  241. * tracing_map_elts, or -EINVAL on error.
  242. */
  243. int tracing_map_add_key_field(struct tracing_map *map,
  244. unsigned int offset,
  245. tracing_map_cmp_fn_t cmp_fn)
  246. {
  247. int idx = tracing_map_add_field(map, cmp_fn);
  248. if (idx < 0)
  249. return idx;
  250. map->fields[idx].offset = offset;
  251. map->key_idx[map->n_keys++] = idx;
  252. return idx;
  253. }
  254. static void tracing_map_array_clear(struct tracing_map_array *a)
  255. {
  256. unsigned int i;
  257. if (!a->pages)
  258. return;
  259. for (i = 0; i < a->n_pages; i++)
  260. memset(a->pages[i], 0, PAGE_SIZE);
  261. }
  262. static void tracing_map_array_free(struct tracing_map_array *a)
  263. {
  264. unsigned int i;
  265. if (!a)
  266. return;
  267. if (!a->pages)
  268. goto free;
  269. for (i = 0; i < a->n_pages; i++) {
  270. if (!a->pages[i])
  271. break;
  272. kmemleak_free(a->pages[i]);
  273. free_page((unsigned long)a->pages[i]);
  274. }
  275. kfree(a->pages);
  276. free:
  277. kfree(a);
  278. }
  279. static struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
  280. unsigned int entry_size)
  281. {
  282. struct tracing_map_array *a;
  283. unsigned int i;
  284. a = kzalloc(sizeof(*a), GFP_KERNEL);
  285. if (!a)
  286. return NULL;
  287. a->entry_size_shift = fls(roundup_pow_of_two(entry_size) - 1);
  288. a->entries_per_page = PAGE_SIZE / (1 << a->entry_size_shift);
  289. a->n_pages = n_elts / a->entries_per_page;
  290. if (!a->n_pages)
  291. a->n_pages = 1;
  292. a->entry_shift = fls(a->entries_per_page) - 1;
  293. a->entry_mask = (1 << a->entry_shift) - 1;
  294. a->pages = kcalloc(a->n_pages, sizeof(void *), GFP_KERNEL);
  295. if (!a->pages)
  296. goto free;
  297. for (i = 0; i < a->n_pages; i++) {
  298. a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
  299. if (!a->pages[i])
  300. goto free;
  301. kmemleak_alloc(a->pages[i], PAGE_SIZE, 1, GFP_KERNEL);
  302. }
  303. out:
  304. return a;
  305. free:
  306. tracing_map_array_free(a);
  307. a = NULL;
  308. goto out;
  309. }
  310. static void tracing_map_elt_clear(struct tracing_map_elt *elt)
  311. {
  312. unsigned i;
  313. for (i = 0; i < elt->map->n_fields; i++)
  314. if (elt->fields[i].cmp_fn == tracing_map_cmp_atomic64)
  315. atomic64_set(&elt->fields[i].sum, 0);
  316. for (i = 0; i < elt->map->n_vars; i++) {
  317. atomic64_set(&elt->vars[i], 0);
  318. elt->var_set[i] = false;
  319. }
  320. if (elt->map->ops && elt->map->ops->elt_clear)
  321. elt->map->ops->elt_clear(elt);
  322. }
  323. static void tracing_map_elt_init_fields(struct tracing_map_elt *elt)
  324. {
  325. unsigned int i;
  326. tracing_map_elt_clear(elt);
  327. for (i = 0; i < elt->map->n_fields; i++) {
  328. elt->fields[i].cmp_fn = elt->map->fields[i].cmp_fn;
  329. if (elt->fields[i].cmp_fn != tracing_map_cmp_atomic64)
  330. elt->fields[i].offset = elt->map->fields[i].offset;
  331. }
  332. }
  333. static void tracing_map_elt_free(struct tracing_map_elt *elt)
  334. {
  335. if (!elt)
  336. return;
  337. if (elt->map->ops && elt->map->ops->elt_free)
  338. elt->map->ops->elt_free(elt);
  339. kfree(elt->fields);
  340. kfree(elt->vars);
  341. kfree(elt->var_set);
  342. kfree(elt->key);
  343. kfree(elt);
  344. }
  345. static struct tracing_map_elt *tracing_map_elt_alloc(struct tracing_map *map)
  346. {
  347. struct tracing_map_elt *elt;
  348. int err = 0;
  349. elt = kzalloc(sizeof(*elt), GFP_KERNEL);
  350. if (!elt)
  351. return ERR_PTR(-ENOMEM);
  352. elt->map = map;
  353. elt->key = kzalloc(map->key_size, GFP_KERNEL);
  354. if (!elt->key) {
  355. err = -ENOMEM;
  356. goto free;
  357. }
  358. elt->fields = kcalloc(map->n_fields, sizeof(*elt->fields), GFP_KERNEL);
  359. if (!elt->fields) {
  360. err = -ENOMEM;
  361. goto free;
  362. }
  363. elt->vars = kcalloc(map->n_vars, sizeof(*elt->vars), GFP_KERNEL);
  364. if (!elt->vars) {
  365. err = -ENOMEM;
  366. goto free;
  367. }
  368. elt->var_set = kcalloc(map->n_vars, sizeof(*elt->var_set), GFP_KERNEL);
  369. if (!elt->var_set) {
  370. err = -ENOMEM;
  371. goto free;
  372. }
  373. tracing_map_elt_init_fields(elt);
  374. if (map->ops && map->ops->elt_alloc) {
  375. err = map->ops->elt_alloc(elt);
  376. if (err)
  377. goto free;
  378. }
  379. return elt;
  380. free:
  381. tracing_map_elt_free(elt);
  382. return ERR_PTR(err);
  383. }
  384. static struct tracing_map_elt *get_free_elt(struct tracing_map *map)
  385. {
  386. struct tracing_map_elt *elt = NULL;
  387. int idx;
  388. idx = atomic_fetch_add_unless(&map->next_elt, 1, map->max_elts);
  389. if (idx < map->max_elts) {
  390. elt = *(TRACING_MAP_ELT(map->elts, idx));
  391. if (map->ops && map->ops->elt_init)
  392. map->ops->elt_init(elt);
  393. }
  394. return elt;
  395. }
  396. static void tracing_map_free_elts(struct tracing_map *map)
  397. {
  398. unsigned int i;
  399. if (!map->elts)
  400. return;
  401. for (i = 0; i < map->max_elts; i++) {
  402. tracing_map_elt_free(*(TRACING_MAP_ELT(map->elts, i)));
  403. *(TRACING_MAP_ELT(map->elts, i)) = NULL;
  404. }
  405. tracing_map_array_free(map->elts);
  406. map->elts = NULL;
  407. }
  408. static int tracing_map_alloc_elts(struct tracing_map *map)
  409. {
  410. unsigned int i;
  411. map->elts = tracing_map_array_alloc(map->max_elts,
  412. sizeof(struct tracing_map_elt *));
  413. if (!map->elts)
  414. return -ENOMEM;
  415. for (i = 0; i < map->max_elts; i++) {
  416. *(TRACING_MAP_ELT(map->elts, i)) = tracing_map_elt_alloc(map);
  417. if (IS_ERR(*(TRACING_MAP_ELT(map->elts, i)))) {
  418. *(TRACING_MAP_ELT(map->elts, i)) = NULL;
  419. tracing_map_free_elts(map);
  420. return -ENOMEM;
  421. }
  422. }
  423. return 0;
  424. }
  425. static inline bool keys_match(void *key, void *test_key, unsigned key_size)
  426. {
  427. bool match = true;
  428. if (memcmp(key, test_key, key_size))
  429. match = false;
  430. return match;
  431. }
  432. static inline struct tracing_map_elt *
  433. __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
  434. {
  435. u32 idx, key_hash, test_key;
  436. int dup_try = 0;
  437. struct tracing_map_entry *entry;
  438. struct tracing_map_elt *val;
  439. key_hash = jhash(key, map->key_size, 0);
  440. if (key_hash == 0)
  441. key_hash = 1;
  442. idx = key_hash >> (32 - (map->map_bits + 1));
  443. while (1) {
  444. idx &= (map->map_size - 1);
  445. entry = TRACING_MAP_ENTRY(map->map, idx);
  446. test_key = entry->key;
  447. if (test_key && test_key == key_hash) {
  448. val = READ_ONCE(entry->val);
  449. if (val &&
  450. keys_match(key, val->key, map->key_size)) {
  451. if (!lookup_only)
  452. atomic64_inc(&map->hits);
  453. return val;
  454. } else if (unlikely(!val)) {
  455. /*
  456. * The key is present. But, val (pointer to elt
  457. * struct) is still NULL. which means some other
  458. * thread is in the process of inserting an
  459. * element.
  460. *
  461. * On top of that, it's key_hash is same as the
  462. * one being inserted right now. So, it's
  463. * possible that the element has the same
  464. * key as well.
  465. */
  466. dup_try++;
  467. if (dup_try > map->map_size) {
  468. atomic64_inc(&map->drops);
  469. break;
  470. }
  471. continue;
  472. }
  473. }
  474. if (!test_key) {
  475. if (lookup_only)
  476. break;
  477. if (!cmpxchg(&entry->key, 0, key_hash)) {
  478. struct tracing_map_elt *elt;
  479. elt = get_free_elt(map);
  480. if (!elt) {
  481. atomic64_inc(&map->drops);
  482. entry->key = 0;
  483. break;
  484. }
  485. memcpy(elt->key, key, map->key_size);
  486. /*
  487. * Ensure the initialization is visible and
  488. * publish the elt.
  489. */
  490. smp_wmb();
  491. WRITE_ONCE(entry->val, elt);
  492. atomic64_inc(&map->hits);
  493. return entry->val;
  494. } else {
  495. /*
  496. * cmpxchg() failed. Loop around once
  497. * more to check what key was inserted.
  498. */
  499. dup_try++;
  500. continue;
  501. }
  502. }
  503. idx++;
  504. }
  505. return NULL;
  506. }
  507. /**
  508. * tracing_map_insert - Insert key and/or retrieve val from a tracing_map
  509. * @map: The tracing_map to insert into
  510. * @key: The key to insert
  511. *
  512. * Inserts a key into a tracing_map and creates and returns a new
  513. * tracing_map_elt for it, or if the key has already been inserted by
  514. * a previous call, returns the tracing_map_elt already associated
  515. * with it. When the map was created, the number of elements to be
  516. * allocated for the map was specified (internally maintained as
  517. * 'max_elts' in struct tracing_map), and that number of
  518. * tracing_map_elts was created by tracing_map_init(). This is the
  519. * pre-allocated pool of tracing_map_elts that tracing_map_insert()
  520. * will allocate from when adding new keys. Once that pool is
  521. * exhausted, tracing_map_insert() is useless and will return NULL to
  522. * signal that state. There are two user-visible tracing_map
  523. * variables, 'hits' and 'drops', which are updated by this function.
  524. * Every time an element is either successfully inserted or retrieved,
  525. * the 'hits' value is incremented. Every time an element insertion
  526. * fails, the 'drops' value is incremented.
  527. *
  528. * This is a lock-free tracing map insertion function implementing a
  529. * modified form of Cliff Click's basic insertion algorithm. It
  530. * requires the table size be a power of two. To prevent any
  531. * possibility of an infinite loop we always make the internal table
  532. * size double the size of the requested table size (max_elts * 2).
  533. * Likewise, we never reuse a slot or resize or delete elements - when
  534. * we've reached max_elts entries, we simply return NULL once we've
  535. * run out of entries. Readers can at any point in time traverse the
  536. * tracing map and safely access the key/val pairs.
  537. *
  538. * Return: the tracing_map_elt pointer val associated with the key.
  539. * If this was a newly inserted key, the val will be a newly allocated
  540. * and associated tracing_map_elt pointer val. If the key wasn't
  541. * found and the pool of tracing_map_elts has been exhausted, NULL is
  542. * returned and no further insertions will succeed.
  543. */
  544. struct tracing_map_elt *tracing_map_insert(struct tracing_map *map, void *key)
  545. {
  546. return __tracing_map_insert(map, key, false);
  547. }
  548. /**
  549. * tracing_map_lookup - Retrieve val from a tracing_map
  550. * @map: The tracing_map to perform the lookup on
  551. * @key: The key to look up
  552. *
  553. * Looks up key in tracing_map and if found returns the matching
  554. * tracing_map_elt. This is a lock-free lookup; see
  555. * tracing_map_insert() for details on tracing_map and how it works.
  556. * Every time an element is retrieved, the 'hits' value is
  557. * incremented. There is one user-visible tracing_map variable,
  558. * 'hits', which is updated by this function. Every time an element
  559. * is successfully retrieved, the 'hits' value is incremented. The
  560. * 'drops' value is never updated by this function.
  561. *
  562. * Return: the tracing_map_elt pointer val associated with the key.
  563. * If the key wasn't found, NULL is returned.
  564. */
  565. struct tracing_map_elt *tracing_map_lookup(struct tracing_map *map, void *key)
  566. {
  567. return __tracing_map_insert(map, key, true);
  568. }
  569. /**
  570. * tracing_map_destroy - Destroy a tracing_map
  571. * @map: The tracing_map to destroy
  572. *
  573. * Frees a tracing_map along with its associated array of
  574. * tracing_map_elts.
  575. *
  576. * Callers should make sure there are no readers or writers actively
  577. * reading or inserting into the map before calling this.
  578. */
  579. void tracing_map_destroy(struct tracing_map *map)
  580. {
  581. if (!map)
  582. return;
  583. tracing_map_free_elts(map);
  584. tracing_map_array_free(map->map);
  585. kfree(map);
  586. }
  587. /**
  588. * tracing_map_clear - Clear a tracing_map
  589. * @map: The tracing_map to clear
  590. *
  591. * Resets the tracing map to a cleared or initial state. The
  592. * tracing_map_elts are all cleared, and the array of struct
  593. * tracing_map_entry is reset to an initialized state.
  594. *
  595. * Callers should make sure there are no writers actively inserting
  596. * into the map before calling this.
  597. */
  598. void tracing_map_clear(struct tracing_map *map)
  599. {
  600. unsigned int i;
  601. atomic_set(&map->next_elt, 0);
  602. atomic64_set(&map->hits, 0);
  603. atomic64_set(&map->drops, 0);
  604. tracing_map_array_clear(map->map);
  605. for (i = 0; i < map->max_elts; i++)
  606. tracing_map_elt_clear(*(TRACING_MAP_ELT(map->elts, i)));
  607. }
  608. static void set_sort_key(struct tracing_map *map,
  609. struct tracing_map_sort_key *sort_key)
  610. {
  611. map->sort_key = *sort_key;
  612. }
  613. /**
  614. * tracing_map_create - Create a lock-free map and element pool
  615. * @map_bits: The size of the map (2 ** map_bits)
  616. * @key_size: The size of the key for the map in bytes
  617. * @ops: Optional client-defined tracing_map_ops instance
  618. * @private_data: Client data associated with the map
  619. *
  620. * Creates and sets up a map to contain 2 ** map_bits number of
  621. * elements (internally maintained as 'max_elts' in struct
  622. * tracing_map). Before using, map fields should be added to the map
  623. * with tracing_map_add_sum_field() and tracing_map_add_key_field().
  624. * tracing_map_init() should then be called to allocate the array of
  625. * tracing_map_elts, in order to avoid allocating anything in the map
  626. * insertion path. The user-specified map size reflects the maximum
  627. * number of elements that can be contained in the table requested by
  628. * the user - internally we double that in order to keep the table
  629. * sparse and keep collisions manageable.
  630. *
  631. * A tracing_map is a special-purpose map designed to aggregate or
  632. * 'sum' one or more values associated with a specific object of type
  633. * tracing_map_elt, which is attached by the map to a given key.
  634. *
  635. * tracing_map_create() sets up the map itself, and provides
  636. * operations for inserting tracing_map_elts, but doesn't allocate the
  637. * tracing_map_elts themselves, or provide a means for describing the
  638. * keys or sums associated with the tracing_map_elts. All
  639. * tracing_map_elts for a given map have the same set of sums and
  640. * keys, which are defined by the client using the functions
  641. * tracing_map_add_key_field() and tracing_map_add_sum_field(). Once
  642. * the fields are defined, the pool of elements allocated for the map
  643. * can be created, which occurs when the client code calls
  644. * tracing_map_init().
  645. *
  646. * When tracing_map_init() returns, tracing_map_elt elements can be
  647. * inserted into the map using tracing_map_insert(). When called,
  648. * tracing_map_insert() grabs a free tracing_map_elt from the pool, or
  649. * finds an existing match in the map and in either case returns it.
  650. * The client can then use tracing_map_update_sum() and
  651. * tracing_map_read_sum() to update or read a given sum field for the
  652. * tracing_map_elt.
  653. *
  654. * The client can at any point retrieve and traverse the current set
  655. * of inserted tracing_map_elts in a tracing_map, via
  656. * tracing_map_sort_entries(). Sorting can be done on any field,
  657. * including keys.
  658. *
  659. * See tracing_map.h for a description of tracing_map_ops.
  660. *
  661. * Return: the tracing_map pointer if successful, ERR_PTR if not.
  662. */
  663. struct tracing_map *tracing_map_create(unsigned int map_bits,
  664. unsigned int key_size,
  665. const struct tracing_map_ops *ops,
  666. void *private_data)
  667. {
  668. struct tracing_map *map;
  669. unsigned int i;
  670. if (map_bits < TRACING_MAP_BITS_MIN ||
  671. map_bits > TRACING_MAP_BITS_MAX)
  672. return ERR_PTR(-EINVAL);
  673. map = kzalloc(sizeof(*map), GFP_KERNEL);
  674. if (!map)
  675. return ERR_PTR(-ENOMEM);
  676. map->map_bits = map_bits;
  677. map->max_elts = (1 << map_bits);
  678. atomic_set(&map->next_elt, 0);
  679. map->map_size = (1 << (map_bits + 1));
  680. map->ops = ops;
  681. map->private_data = private_data;
  682. map->map = tracing_map_array_alloc(map->map_size,
  683. sizeof(struct tracing_map_entry));
  684. if (!map->map)
  685. goto free;
  686. map->key_size = key_size;
  687. for (i = 0; i < TRACING_MAP_KEYS_MAX; i++)
  688. map->key_idx[i] = -1;
  689. out:
  690. return map;
  691. free:
  692. tracing_map_destroy(map);
  693. map = ERR_PTR(-ENOMEM);
  694. goto out;
  695. }
  696. /**
  697. * tracing_map_init - Allocate and clear a map's tracing_map_elts
  698. * @map: The tracing_map to initialize
  699. *
  700. * Allocates a clears a pool of tracing_map_elts equal to the
  701. * user-specified size of 2 ** map_bits (internally maintained as
  702. * 'max_elts' in struct tracing_map). Before using, the map fields
  703. * should be added to the map with tracing_map_add_sum_field() and
  704. * tracing_map_add_key_field(). tracing_map_init() should then be
  705. * called to allocate the array of tracing_map_elts, in order to avoid
  706. * allocating anything in the map insertion path. The user-specified
  707. * map size reflects the max number of elements requested by the user
  708. * - internally we double that in order to keep the table sparse and
  709. * keep collisions manageable.
  710. *
  711. * See tracing_map.h for a description of tracing_map_ops.
  712. *
  713. * Return: the tracing_map pointer if successful, ERR_PTR if not.
  714. */
  715. int tracing_map_init(struct tracing_map *map)
  716. {
  717. int err;
  718. if (map->n_fields < 2)
  719. return -EINVAL; /* need at least 1 key and 1 val */
  720. err = tracing_map_alloc_elts(map);
  721. if (err)
  722. return err;
  723. tracing_map_clear(map);
  724. return err;
  725. }
  726. static int cmp_entries_dup(const void *A, const void *B)
  727. {
  728. const struct tracing_map_sort_entry *a, *b;
  729. a = *(const struct tracing_map_sort_entry **)A;
  730. b = *(const struct tracing_map_sort_entry **)B;
  731. return memcmp(a->key, b->key, a->elt->map->key_size);
  732. }
  733. static int cmp_entries_sum(const void *A, const void *B)
  734. {
  735. const struct tracing_map_elt *elt_a, *elt_b;
  736. const struct tracing_map_sort_entry *a, *b;
  737. struct tracing_map_sort_key *sort_key;
  738. struct tracing_map_field *field;
  739. tracing_map_cmp_fn_t cmp_fn;
  740. void *val_a, *val_b;
  741. int ret = 0;
  742. a = *(const struct tracing_map_sort_entry **)A;
  743. b = *(const struct tracing_map_sort_entry **)B;
  744. elt_a = a->elt;
  745. elt_b = b->elt;
  746. sort_key = &elt_a->map->sort_key;
  747. field = &elt_a->fields[sort_key->field_idx];
  748. cmp_fn = field->cmp_fn;
  749. val_a = &elt_a->fields[sort_key->field_idx].sum;
  750. val_b = &elt_b->fields[sort_key->field_idx].sum;
  751. ret = cmp_fn(val_a, val_b);
  752. if (sort_key->descending)
  753. ret = -ret;
  754. return ret;
  755. }
  756. static int cmp_entries_key(const void *A, const void *B)
  757. {
  758. const struct tracing_map_elt *elt_a, *elt_b;
  759. const struct tracing_map_sort_entry *a, *b;
  760. struct tracing_map_sort_key *sort_key;
  761. struct tracing_map_field *field;
  762. tracing_map_cmp_fn_t cmp_fn;
  763. void *val_a, *val_b;
  764. int ret = 0;
  765. a = *(const struct tracing_map_sort_entry **)A;
  766. b = *(const struct tracing_map_sort_entry **)B;
  767. elt_a = a->elt;
  768. elt_b = b->elt;
  769. sort_key = &elt_a->map->sort_key;
  770. field = &elt_a->fields[sort_key->field_idx];
  771. cmp_fn = field->cmp_fn;
  772. val_a = elt_a->key + field->offset;
  773. val_b = elt_b->key + field->offset;
  774. ret = cmp_fn(val_a, val_b);
  775. if (sort_key->descending)
  776. ret = -ret;
  777. return ret;
  778. }
  779. static void destroy_sort_entry(struct tracing_map_sort_entry *entry)
  780. {
  781. if (!entry)
  782. return;
  783. if (entry->elt_copied)
  784. tracing_map_elt_free(entry->elt);
  785. kfree(entry);
  786. }
  787. /**
  788. * tracing_map_destroy_sort_entries - Destroy an array of sort entries
  789. * @entries: The entries to destroy
  790. * @n_entries: The number of entries in the array
  791. *
  792. * Destroy the elements returned by a tracing_map_sort_entries() call.
  793. */
  794. void tracing_map_destroy_sort_entries(struct tracing_map_sort_entry **entries,
  795. unsigned int n_entries)
  796. {
  797. unsigned int i;
  798. for (i = 0; i < n_entries; i++)
  799. destroy_sort_entry(entries[i]);
  800. vfree(entries);
  801. }
  802. static struct tracing_map_sort_entry *
  803. create_sort_entry(void *key, struct tracing_map_elt *elt)
  804. {
  805. struct tracing_map_sort_entry *sort_entry;
  806. sort_entry = kzalloc(sizeof(*sort_entry), GFP_KERNEL);
  807. if (!sort_entry)
  808. return NULL;
  809. sort_entry->key = key;
  810. sort_entry->elt = elt;
  811. return sort_entry;
  812. }
  813. static void detect_dups(struct tracing_map_sort_entry **sort_entries,
  814. int n_entries, unsigned int key_size)
  815. {
  816. unsigned int total_dups = 0;
  817. int i;
  818. void *key;
  819. if (n_entries < 2)
  820. return;
  821. sort(sort_entries, n_entries, sizeof(struct tracing_map_sort_entry *),
  822. (int (*)(const void *, const void *))cmp_entries_dup, NULL);
  823. key = sort_entries[0]->key;
  824. for (i = 1; i < n_entries; i++) {
  825. if (!memcmp(sort_entries[i]->key, key, key_size)) {
  826. total_dups++;
  827. continue;
  828. }
  829. key = sort_entries[i]->key;
  830. }
  831. WARN_ONCE(total_dups > 0,
  832. "Duplicates detected: %d\n", total_dups);
  833. }
  834. static bool is_key(struct tracing_map *map, unsigned int field_idx)
  835. {
  836. unsigned int i;
  837. for (i = 0; i < map->n_keys; i++)
  838. if (map->key_idx[i] == field_idx)
  839. return true;
  840. return false;
  841. }
  842. static void sort_secondary(struct tracing_map *map,
  843. const struct tracing_map_sort_entry **entries,
  844. unsigned int n_entries,
  845. struct tracing_map_sort_key *primary_key,
  846. struct tracing_map_sort_key *secondary_key)
  847. {
  848. int (*primary_fn)(const void *, const void *);
  849. int (*secondary_fn)(const void *, const void *);
  850. unsigned i, start = 0, n_sub = 1;
  851. if (is_key(map, primary_key->field_idx))
  852. primary_fn = cmp_entries_key;
  853. else
  854. primary_fn = cmp_entries_sum;
  855. if (is_key(map, secondary_key->field_idx))
  856. secondary_fn = cmp_entries_key;
  857. else
  858. secondary_fn = cmp_entries_sum;
  859. for (i = 0; i < n_entries - 1; i++) {
  860. const struct tracing_map_sort_entry **a = &entries[i];
  861. const struct tracing_map_sort_entry **b = &entries[i + 1];
  862. if (primary_fn(a, b) == 0) {
  863. n_sub++;
  864. if (i < n_entries - 2)
  865. continue;
  866. }
  867. if (n_sub < 2) {
  868. start = i + 1;
  869. n_sub = 1;
  870. continue;
  871. }
  872. set_sort_key(map, secondary_key);
  873. sort(&entries[start], n_sub,
  874. sizeof(struct tracing_map_sort_entry *),
  875. (int (*)(const void *, const void *))secondary_fn, NULL);
  876. set_sort_key(map, primary_key);
  877. start = i + 1;
  878. n_sub = 1;
  879. }
  880. }
  881. /**
  882. * tracing_map_sort_entries - Sort the current set of tracing_map_elts in a map
  883. * @map: The tracing_map
  884. * @sort_keys: The sort key to use for sorting
  885. * @n_sort_keys: hitcount, always have at least one
  886. * @sort_entries: outval: pointer to allocated and sorted array of entries
  887. *
  888. * tracing_map_sort_entries() sorts the current set of entries in the
  889. * map and returns the list of tracing_map_sort_entries containing
  890. * them to the client in the sort_entries param. The client can
  891. * access the struct tracing_map_elt element of interest directly as
  892. * the 'elt' field of a returned struct tracing_map_sort_entry object.
  893. *
  894. * The sort_key has only two fields: idx and descending. 'idx' refers
  895. * to the index of the field added via tracing_map_add_sum_field() or
  896. * tracing_map_add_key_field() when the tracing_map was initialized.
  897. * 'descending' is a flag that if set reverses the sort order, which
  898. * by default is ascending.
  899. *
  900. * The client should not hold on to the returned array but should use
  901. * it and call tracing_map_destroy_sort_entries() when done.
  902. *
  903. * Return: the number of sort_entries in the struct tracing_map_sort_entry
  904. * array, negative on error
  905. */
  906. int tracing_map_sort_entries(struct tracing_map *map,
  907. struct tracing_map_sort_key *sort_keys,
  908. unsigned int n_sort_keys,
  909. struct tracing_map_sort_entry ***sort_entries)
  910. {
  911. int (*cmp_entries_fn)(const void *, const void *);
  912. struct tracing_map_sort_entry *sort_entry, **entries;
  913. int i, n_entries, ret;
  914. entries = vmalloc(array_size(sizeof(sort_entry), map->max_elts));
  915. if (!entries)
  916. return -ENOMEM;
  917. for (i = 0, n_entries = 0; i < map->map_size; i++) {
  918. struct tracing_map_entry *entry;
  919. entry = TRACING_MAP_ENTRY(map->map, i);
  920. if (!entry->key || !entry->val)
  921. continue;
  922. entries[n_entries] = create_sort_entry(entry->val->key,
  923. entry->val);
  924. if (!entries[n_entries++]) {
  925. ret = -ENOMEM;
  926. goto free;
  927. }
  928. }
  929. if (n_entries == 0) {
  930. ret = 0;
  931. goto free;
  932. }
  933. if (n_entries == 1) {
  934. *sort_entries = entries;
  935. return 1;
  936. }
  937. detect_dups(entries, n_entries, map->key_size);
  938. if (is_key(map, sort_keys[0].field_idx))
  939. cmp_entries_fn = cmp_entries_key;
  940. else
  941. cmp_entries_fn = cmp_entries_sum;
  942. set_sort_key(map, &sort_keys[0]);
  943. sort(entries, n_entries, sizeof(struct tracing_map_sort_entry *),
  944. (int (*)(const void *, const void *))cmp_entries_fn, NULL);
  945. if (n_sort_keys > 1)
  946. sort_secondary(map,
  947. (const struct tracing_map_sort_entry **)entries,
  948. n_entries,
  949. &sort_keys[0],
  950. &sort_keys[1]);
  951. *sort_entries = entries;
  952. return n_entries;
  953. free:
  954. tracing_map_destroy_sort_entries(entries, n_entries);
  955. return ret;
  956. }