btf_dump.c 69 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552
  1. // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
  2. /*
  3. * BTF-to-C type converter.
  4. *
  5. * Copyright (c) 2019 Facebook
  6. */
  7. #include <stdbool.h>
  8. #include <stddef.h>
  9. #include <stdlib.h>
  10. #include <string.h>
  11. #include <ctype.h>
  12. #include <endian.h>
  13. #include <errno.h>
  14. #include <limits.h>
  15. #include <linux/err.h>
  16. #include <linux/btf.h>
  17. #include <linux/kernel.h>
  18. #include "btf.h"
  19. #include "hashmap.h"
  20. #include "libbpf.h"
  21. #include "libbpf_internal.h"
  22. static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t";
  23. static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1;
  24. static const char *pfx(int lvl)
  25. {
  26. return lvl >= PREFIX_CNT ? PREFIXES : &PREFIXES[PREFIX_CNT - lvl];
  27. }
  28. enum btf_dump_type_order_state {
  29. NOT_ORDERED,
  30. ORDERING,
  31. ORDERED,
  32. };
  33. enum btf_dump_type_emit_state {
  34. NOT_EMITTED,
  35. EMITTING,
  36. EMITTED,
  37. };
  38. /* per-type auxiliary state */
  39. struct btf_dump_type_aux_state {
  40. /* topological sorting state */
  41. enum btf_dump_type_order_state order_state: 2;
  42. /* emitting state used to determine the need for forward declaration */
  43. enum btf_dump_type_emit_state emit_state: 2;
  44. /* whether forward declaration was already emitted */
  45. __u8 fwd_emitted: 1;
  46. /* whether unique non-duplicate name was already assigned */
  47. __u8 name_resolved: 1;
  48. /* whether type is referenced from any other type */
  49. __u8 referenced: 1;
  50. };
  51. /* indent string length; one indent string is added for each indent level */
  52. #define BTF_DATA_INDENT_STR_LEN 32
  53. /*
  54. * Common internal data for BTF type data dump operations.
  55. */
  56. struct btf_dump_data {
  57. const void *data_end; /* end of valid data to show */
  58. bool compact;
  59. bool skip_names;
  60. bool emit_zeroes;
  61. __u8 indent_lvl; /* base indent level */
  62. char indent_str[BTF_DATA_INDENT_STR_LEN];
  63. /* below are used during iteration */
  64. int depth;
  65. bool is_array_member;
  66. bool is_array_terminated;
  67. bool is_array_char;
  68. };
  69. struct btf_dump {
  70. const struct btf *btf;
  71. btf_dump_printf_fn_t printf_fn;
  72. void *cb_ctx;
  73. int ptr_sz;
  74. bool strip_mods;
  75. bool skip_anon_defs;
  76. int last_id;
  77. /* per-type auxiliary state */
  78. struct btf_dump_type_aux_state *type_states;
  79. size_t type_states_cap;
  80. /* per-type optional cached unique name, must be freed, if present */
  81. const char **cached_names;
  82. size_t cached_names_cap;
  83. /* topo-sorted list of dependent type definitions */
  84. __u32 *emit_queue;
  85. int emit_queue_cap;
  86. int emit_queue_cnt;
  87. /*
  88. * stack of type declarations (e.g., chain of modifiers, arrays,
  89. * funcs, etc)
  90. */
  91. __u32 *decl_stack;
  92. int decl_stack_cap;
  93. int decl_stack_cnt;
  94. /* maps struct/union/enum name to a number of name occurrences */
  95. struct hashmap *type_names;
  96. /*
  97. * maps typedef identifiers and enum value names to a number of such
  98. * name occurrences
  99. */
  100. struct hashmap *ident_names;
  101. /*
  102. * data for typed display; allocated if needed.
  103. */
  104. struct btf_dump_data *typed_dump;
  105. };
  106. static size_t str_hash_fn(long key, void *ctx)
  107. {
  108. return str_hash((void *)key);
  109. }
  110. static bool str_equal_fn(long a, long b, void *ctx)
  111. {
  112. return strcmp((void *)a, (void *)b) == 0;
  113. }
  114. static const char *btf_name_of(const struct btf_dump *d, __u32 name_off)
  115. {
  116. return btf__name_by_offset(d->btf, name_off);
  117. }
  118. static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...)
  119. {
  120. va_list args;
  121. va_start(args, fmt);
  122. d->printf_fn(d->cb_ctx, fmt, args);
  123. va_end(args);
  124. }
  125. static int btf_dump_mark_referenced(struct btf_dump *d);
  126. static int btf_dump_resize(struct btf_dump *d);
  127. struct btf_dump *btf_dump__new(const struct btf *btf,
  128. btf_dump_printf_fn_t printf_fn,
  129. void *ctx,
  130. const struct btf_dump_opts *opts)
  131. {
  132. struct btf_dump *d;
  133. int err;
  134. if (!OPTS_VALID(opts, btf_dump_opts))
  135. return libbpf_err_ptr(-EINVAL);
  136. if (!printf_fn)
  137. return libbpf_err_ptr(-EINVAL);
  138. d = calloc(1, sizeof(struct btf_dump));
  139. if (!d)
  140. return libbpf_err_ptr(-ENOMEM);
  141. d->btf = btf;
  142. d->printf_fn = printf_fn;
  143. d->cb_ctx = ctx;
  144. d->ptr_sz = btf__pointer_size(btf) ? : sizeof(void *);
  145. d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
  146. if (IS_ERR(d->type_names)) {
  147. err = PTR_ERR(d->type_names);
  148. d->type_names = NULL;
  149. goto err;
  150. }
  151. d->ident_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
  152. if (IS_ERR(d->ident_names)) {
  153. err = PTR_ERR(d->ident_names);
  154. d->ident_names = NULL;
  155. goto err;
  156. }
  157. err = btf_dump_resize(d);
  158. if (err)
  159. goto err;
  160. return d;
  161. err:
  162. btf_dump__free(d);
  163. return libbpf_err_ptr(err);
  164. }
  165. static int btf_dump_resize(struct btf_dump *d)
  166. {
  167. int err, last_id = btf__type_cnt(d->btf) - 1;
  168. if (last_id <= d->last_id)
  169. return 0;
  170. if (libbpf_ensure_mem((void **)&d->type_states, &d->type_states_cap,
  171. sizeof(*d->type_states), last_id + 1))
  172. return -ENOMEM;
  173. if (libbpf_ensure_mem((void **)&d->cached_names, &d->cached_names_cap,
  174. sizeof(*d->cached_names), last_id + 1))
  175. return -ENOMEM;
  176. if (d->last_id == 0) {
  177. /* VOID is special */
  178. d->type_states[0].order_state = ORDERED;
  179. d->type_states[0].emit_state = EMITTED;
  180. }
  181. /* eagerly determine referenced types for anon enums */
  182. err = btf_dump_mark_referenced(d);
  183. if (err)
  184. return err;
  185. d->last_id = last_id;
  186. return 0;
  187. }
  188. static void btf_dump_free_names(struct hashmap *map)
  189. {
  190. size_t bkt;
  191. struct hashmap_entry *cur;
  192. if (!map)
  193. return;
  194. hashmap__for_each_entry(map, cur, bkt)
  195. free((void *)cur->pkey);
  196. hashmap__free(map);
  197. }
  198. void btf_dump__free(struct btf_dump *d)
  199. {
  200. int i;
  201. if (IS_ERR_OR_NULL(d))
  202. return;
  203. free(d->type_states);
  204. if (d->cached_names) {
  205. /* any set cached name is owned by us and should be freed */
  206. for (i = 0; i <= d->last_id; i++) {
  207. if (d->cached_names[i])
  208. free((void *)d->cached_names[i]);
  209. }
  210. }
  211. free(d->cached_names);
  212. free(d->emit_queue);
  213. free(d->decl_stack);
  214. btf_dump_free_names(d->type_names);
  215. btf_dump_free_names(d->ident_names);
  216. free(d);
  217. }
  218. static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr);
  219. static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id);
  220. /*
  221. * Dump BTF type in a compilable C syntax, including all the necessary
  222. * dependent types, necessary for compilation. If some of the dependent types
  223. * were already emitted as part of previous btf_dump__dump_type() invocation
  224. * for another type, they won't be emitted again. This API allows callers to
  225. * filter out BTF types according to user-defined criterias and emitted only
  226. * minimal subset of types, necessary to compile everything. Full struct/union
  227. * definitions will still be emitted, even if the only usage is through
  228. * pointer and could be satisfied with just a forward declaration.
  229. *
  230. * Dumping is done in two high-level passes:
  231. * 1. Topologically sort type definitions to satisfy C rules of compilation.
  232. * 2. Emit type definitions in C syntax.
  233. *
  234. * Returns 0 on success; <0, otherwise.
  235. */
  236. int btf_dump__dump_type(struct btf_dump *d, __u32 id)
  237. {
  238. int err, i;
  239. if (id >= btf__type_cnt(d->btf))
  240. return libbpf_err(-EINVAL);
  241. err = btf_dump_resize(d);
  242. if (err)
  243. return libbpf_err(err);
  244. d->emit_queue_cnt = 0;
  245. err = btf_dump_order_type(d, id, false);
  246. if (err < 0)
  247. return libbpf_err(err);
  248. for (i = 0; i < d->emit_queue_cnt; i++)
  249. btf_dump_emit_type(d, d->emit_queue[i], 0 /*top-level*/);
  250. return 0;
  251. }
  252. /*
  253. * Mark all types that are referenced from any other type. This is used to
  254. * determine top-level anonymous enums that need to be emitted as an
  255. * independent type declarations.
  256. * Anonymous enums come in two flavors: either embedded in a struct's field
  257. * definition, in which case they have to be declared inline as part of field
  258. * type declaration; or as a top-level anonymous enum, typically used for
  259. * declaring global constants. It's impossible to distinguish between two
  260. * without knowing whether given enum type was referenced from other type:
  261. * top-level anonymous enum won't be referenced by anything, while embedded
  262. * one will.
  263. */
  264. static int btf_dump_mark_referenced(struct btf_dump *d)
  265. {
  266. int i, j, n = btf__type_cnt(d->btf);
  267. const struct btf_type *t;
  268. __u16 vlen;
  269. for (i = d->last_id + 1; i < n; i++) {
  270. t = btf__type_by_id(d->btf, i);
  271. vlen = btf_vlen(t);
  272. switch (btf_kind(t)) {
  273. case BTF_KIND_INT:
  274. case BTF_KIND_ENUM:
  275. case BTF_KIND_ENUM64:
  276. case BTF_KIND_FWD:
  277. case BTF_KIND_FLOAT:
  278. break;
  279. case BTF_KIND_VOLATILE:
  280. case BTF_KIND_CONST:
  281. case BTF_KIND_RESTRICT:
  282. case BTF_KIND_PTR:
  283. case BTF_KIND_TYPEDEF:
  284. case BTF_KIND_FUNC:
  285. case BTF_KIND_VAR:
  286. case BTF_KIND_DECL_TAG:
  287. case BTF_KIND_TYPE_TAG:
  288. d->type_states[t->type].referenced = 1;
  289. break;
  290. case BTF_KIND_ARRAY: {
  291. const struct btf_array *a = btf_array(t);
  292. d->type_states[a->index_type].referenced = 1;
  293. d->type_states[a->type].referenced = 1;
  294. break;
  295. }
  296. case BTF_KIND_STRUCT:
  297. case BTF_KIND_UNION: {
  298. const struct btf_member *m = btf_members(t);
  299. for (j = 0; j < vlen; j++, m++)
  300. d->type_states[m->type].referenced = 1;
  301. break;
  302. }
  303. case BTF_KIND_FUNC_PROTO: {
  304. const struct btf_param *p = btf_params(t);
  305. for (j = 0; j < vlen; j++, p++)
  306. d->type_states[p->type].referenced = 1;
  307. break;
  308. }
  309. case BTF_KIND_DATASEC: {
  310. const struct btf_var_secinfo *v = btf_var_secinfos(t);
  311. for (j = 0; j < vlen; j++, v++)
  312. d->type_states[v->type].referenced = 1;
  313. break;
  314. }
  315. default:
  316. return -EINVAL;
  317. }
  318. }
  319. return 0;
  320. }
  321. static int btf_dump_add_emit_queue_id(struct btf_dump *d, __u32 id)
  322. {
  323. __u32 *new_queue;
  324. size_t new_cap;
  325. if (d->emit_queue_cnt >= d->emit_queue_cap) {
  326. new_cap = max(16, d->emit_queue_cap * 3 / 2);
  327. new_queue = libbpf_reallocarray(d->emit_queue, new_cap, sizeof(new_queue[0]));
  328. if (!new_queue)
  329. return -ENOMEM;
  330. d->emit_queue = new_queue;
  331. d->emit_queue_cap = new_cap;
  332. }
  333. d->emit_queue[d->emit_queue_cnt++] = id;
  334. return 0;
  335. }
  336. /*
  337. * Determine order of emitting dependent types and specified type to satisfy
  338. * C compilation rules. This is done through topological sorting with an
  339. * additional complication which comes from C rules. The main idea for C is
  340. * that if some type is "embedded" into a struct/union, it's size needs to be
  341. * known at the time of definition of containing type. E.g., for:
  342. *
  343. * struct A {};
  344. * struct B { struct A x; }
  345. *
  346. * struct A *HAS* to be defined before struct B, because it's "embedded",
  347. * i.e., it is part of struct B layout. But in the following case:
  348. *
  349. * struct A;
  350. * struct B { struct A *x; }
  351. * struct A {};
  352. *
  353. * it's enough to just have a forward declaration of struct A at the time of
  354. * struct B definition, as struct B has a pointer to struct A, so the size of
  355. * field x is known without knowing struct A size: it's sizeof(void *).
  356. *
  357. * Unfortunately, there are some trickier cases we need to handle, e.g.:
  358. *
  359. * struct A {}; // if this was forward-declaration: compilation error
  360. * struct B {
  361. * struct { // anonymous struct
  362. * struct A y;
  363. * } *x;
  364. * };
  365. *
  366. * In this case, struct B's field x is a pointer, so it's size is known
  367. * regardless of the size of (anonymous) struct it points to. But because this
  368. * struct is anonymous and thus defined inline inside struct B, *and* it
  369. * embeds struct A, compiler requires full definition of struct A to be known
  370. * before struct B can be defined. This creates a transitive dependency
  371. * between struct A and struct B. If struct A was forward-declared before
  372. * struct B definition and fully defined after struct B definition, that would
  373. * trigger compilation error.
  374. *
  375. * All this means that while we are doing topological sorting on BTF type
  376. * graph, we need to determine relationships between different types (graph
  377. * nodes):
  378. * - weak link (relationship) between X and Y, if Y *CAN* be
  379. * forward-declared at the point of X definition;
  380. * - strong link, if Y *HAS* to be fully-defined before X can be defined.
  381. *
  382. * The rule is as follows. Given a chain of BTF types from X to Y, if there is
  383. * BTF_KIND_PTR type in the chain and at least one non-anonymous type
  384. * Z (excluding X, including Y), then link is weak. Otherwise, it's strong.
  385. * Weak/strong relationship is determined recursively during DFS traversal and
  386. * is returned as a result from btf_dump_order_type().
  387. *
  388. * btf_dump_order_type() is trying to avoid unnecessary forward declarations,
  389. * but it is not guaranteeing that no extraneous forward declarations will be
  390. * emitted.
  391. *
  392. * To avoid extra work, algorithm marks some of BTF types as ORDERED, when
  393. * it's done with them, but not for all (e.g., VOLATILE, CONST, RESTRICT,
  394. * ARRAY, FUNC_PROTO), as weak/strong semantics for those depends on the
  395. * entire graph path, so depending where from one came to that BTF type, it
  396. * might cause weak or strong ordering. For types like STRUCT/UNION/INT/ENUM,
  397. * once they are processed, there is no need to do it again, so they are
  398. * marked as ORDERED. We can mark PTR as ORDERED as well, as it semi-forces
  399. * weak link, unless subsequent referenced STRUCT/UNION/ENUM is anonymous. But
  400. * in any case, once those are processed, no need to do it again, as the
  401. * result won't change.
  402. *
  403. * Returns:
  404. * - 1, if type is part of strong link (so there is strong topological
  405. * ordering requirements);
  406. * - 0, if type is part of weak link (so can be satisfied through forward
  407. * declaration);
  408. * - <0, on error (e.g., unsatisfiable type loop detected).
  409. */
  410. static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
  411. {
  412. /*
  413. * Order state is used to detect strong link cycles, but only for BTF
  414. * kinds that are or could be an independent definition (i.e.,
  415. * stand-alone fwd decl, enum, typedef, struct, union). Ptrs, arrays,
  416. * func_protos, modifiers are just means to get to these definitions.
  417. * Int/void don't need definitions, they are assumed to be always
  418. * properly defined. We also ignore datasec, var, and funcs for now.
  419. * So for all non-defining kinds, we never even set ordering state,
  420. * for defining kinds we set ORDERING and subsequently ORDERED if it
  421. * forms a strong link.
  422. */
  423. struct btf_dump_type_aux_state *tstate = &d->type_states[id];
  424. const struct btf_type *t;
  425. __u16 vlen;
  426. int err, i;
  427. /* return true, letting typedefs know that it's ok to be emitted */
  428. if (tstate->order_state == ORDERED)
  429. return 1;
  430. t = btf__type_by_id(d->btf, id);
  431. if (tstate->order_state == ORDERING) {
  432. /* type loop, but resolvable through fwd declaration */
  433. if (btf_is_composite(t) && through_ptr && t->name_off != 0)
  434. return 0;
  435. pr_warn("unsatisfiable type cycle, id:[%u]\n", id);
  436. return -ELOOP;
  437. }
  438. switch (btf_kind(t)) {
  439. case BTF_KIND_INT:
  440. case BTF_KIND_FLOAT:
  441. tstate->order_state = ORDERED;
  442. return 0;
  443. case BTF_KIND_PTR:
  444. err = btf_dump_order_type(d, t->type, true);
  445. tstate->order_state = ORDERED;
  446. return err;
  447. case BTF_KIND_ARRAY:
  448. return btf_dump_order_type(d, btf_array(t)->type, false);
  449. case BTF_KIND_STRUCT:
  450. case BTF_KIND_UNION: {
  451. const struct btf_member *m = btf_members(t);
  452. /*
  453. * struct/union is part of strong link, only if it's embedded
  454. * (so no ptr in a path) or it's anonymous (so has to be
  455. * defined inline, even if declared through ptr)
  456. */
  457. if (through_ptr && t->name_off != 0)
  458. return 0;
  459. tstate->order_state = ORDERING;
  460. vlen = btf_vlen(t);
  461. for (i = 0; i < vlen; i++, m++) {
  462. err = btf_dump_order_type(d, m->type, false);
  463. if (err < 0)
  464. return err;
  465. }
  466. if (t->name_off != 0) {
  467. err = btf_dump_add_emit_queue_id(d, id);
  468. if (err < 0)
  469. return err;
  470. }
  471. tstate->order_state = ORDERED;
  472. return 1;
  473. }
  474. case BTF_KIND_ENUM:
  475. case BTF_KIND_ENUM64:
  476. case BTF_KIND_FWD:
  477. /*
  478. * non-anonymous or non-referenced enums are top-level
  479. * declarations and should be emitted. Same logic can be
  480. * applied to FWDs, it won't hurt anyways.
  481. */
  482. if (t->name_off != 0 || !tstate->referenced) {
  483. err = btf_dump_add_emit_queue_id(d, id);
  484. if (err)
  485. return err;
  486. }
  487. tstate->order_state = ORDERED;
  488. return 1;
  489. case BTF_KIND_TYPEDEF: {
  490. int is_strong;
  491. is_strong = btf_dump_order_type(d, t->type, through_ptr);
  492. if (is_strong < 0)
  493. return is_strong;
  494. /* typedef is similar to struct/union w.r.t. fwd-decls */
  495. if (through_ptr && !is_strong)
  496. return 0;
  497. /* typedef is always a named definition */
  498. err = btf_dump_add_emit_queue_id(d, id);
  499. if (err)
  500. return err;
  501. d->type_states[id].order_state = ORDERED;
  502. return 1;
  503. }
  504. case BTF_KIND_VOLATILE:
  505. case BTF_KIND_CONST:
  506. case BTF_KIND_RESTRICT:
  507. case BTF_KIND_TYPE_TAG:
  508. return btf_dump_order_type(d, t->type, through_ptr);
  509. case BTF_KIND_FUNC_PROTO: {
  510. const struct btf_param *p = btf_params(t);
  511. bool is_strong;
  512. err = btf_dump_order_type(d, t->type, through_ptr);
  513. if (err < 0)
  514. return err;
  515. is_strong = err > 0;
  516. vlen = btf_vlen(t);
  517. for (i = 0; i < vlen; i++, p++) {
  518. err = btf_dump_order_type(d, p->type, through_ptr);
  519. if (err < 0)
  520. return err;
  521. if (err > 0)
  522. is_strong = true;
  523. }
  524. return is_strong;
  525. }
  526. case BTF_KIND_FUNC:
  527. case BTF_KIND_VAR:
  528. case BTF_KIND_DATASEC:
  529. case BTF_KIND_DECL_TAG:
  530. d->type_states[id].order_state = ORDERED;
  531. return 0;
  532. default:
  533. return -EINVAL;
  534. }
  535. }
  536. static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id,
  537. const struct btf_type *t);
  538. static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
  539. const struct btf_type *t);
  540. static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id,
  541. const struct btf_type *t, int lvl);
  542. static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
  543. const struct btf_type *t);
  544. static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
  545. const struct btf_type *t, int lvl);
  546. static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id,
  547. const struct btf_type *t);
  548. static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id,
  549. const struct btf_type *t, int lvl);
  550. /* a local view into a shared stack */
  551. struct id_stack {
  552. const __u32 *ids;
  553. int cnt;
  554. };
  555. static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
  556. const char *fname, int lvl);
  557. static void btf_dump_emit_type_chain(struct btf_dump *d,
  558. struct id_stack *decl_stack,
  559. const char *fname, int lvl);
  560. static const char *btf_dump_type_name(struct btf_dump *d, __u32 id);
  561. static const char *btf_dump_ident_name(struct btf_dump *d, __u32 id);
  562. static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map,
  563. const char *orig_name);
  564. static bool btf_dump_is_blacklisted(struct btf_dump *d, __u32 id)
  565. {
  566. const struct btf_type *t = btf__type_by_id(d->btf, id);
  567. /* __builtin_va_list is a compiler built-in, which causes compilation
  568. * errors, when compiling w/ different compiler, then used to compile
  569. * original code (e.g., GCC to compile kernel, Clang to use generated
  570. * C header from BTF). As it is built-in, it should be already defined
  571. * properly internally in compiler.
  572. */
  573. if (t->name_off == 0)
  574. return false;
  575. return strcmp(btf_name_of(d, t->name_off), "__builtin_va_list") == 0;
  576. }
  577. /*
  578. * Emit C-syntax definitions of types from chains of BTF types.
  579. *
  580. * High-level handling of determining necessary forward declarations are handled
  581. * by btf_dump_emit_type() itself, but all nitty-gritty details of emitting type
  582. * declarations/definitions in C syntax are handled by a combo of
  583. * btf_dump_emit_type_decl()/btf_dump_emit_type_chain() w/ delegation to
  584. * corresponding btf_dump_emit_*_{def,fwd}() functions.
  585. *
  586. * We also keep track of "containing struct/union type ID" to determine when
  587. * we reference it from inside and thus can avoid emitting unnecessary forward
  588. * declaration.
  589. *
  590. * This algorithm is designed in such a way, that even if some error occurs
  591. * (either technical, e.g., out of memory, or logical, i.e., malformed BTF
  592. * that doesn't comply to C rules completely), algorithm will try to proceed
  593. * and produce as much meaningful output as possible.
  594. */
  595. static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
  596. {
  597. struct btf_dump_type_aux_state *tstate = &d->type_states[id];
  598. bool top_level_def = cont_id == 0;
  599. const struct btf_type *t;
  600. __u16 kind;
  601. if (tstate->emit_state == EMITTED)
  602. return;
  603. t = btf__type_by_id(d->btf, id);
  604. kind = btf_kind(t);
  605. if (tstate->emit_state == EMITTING) {
  606. if (tstate->fwd_emitted)
  607. return;
  608. switch (kind) {
  609. case BTF_KIND_STRUCT:
  610. case BTF_KIND_UNION:
  611. /*
  612. * if we are referencing a struct/union that we are
  613. * part of - then no need for fwd declaration
  614. */
  615. if (id == cont_id)
  616. return;
  617. if (t->name_off == 0) {
  618. pr_warn("anonymous struct/union loop, id:[%u]\n",
  619. id);
  620. return;
  621. }
  622. btf_dump_emit_struct_fwd(d, id, t);
  623. btf_dump_printf(d, ";\n\n");
  624. tstate->fwd_emitted = 1;
  625. break;
  626. case BTF_KIND_TYPEDEF:
  627. /*
  628. * for typedef fwd_emitted means typedef definition
  629. * was emitted, but it can be used only for "weak"
  630. * references through pointer only, not for embedding
  631. */
  632. if (!btf_dump_is_blacklisted(d, id)) {
  633. btf_dump_emit_typedef_def(d, id, t, 0);
  634. btf_dump_printf(d, ";\n\n");
  635. }
  636. tstate->fwd_emitted = 1;
  637. break;
  638. default:
  639. break;
  640. }
  641. return;
  642. }
  643. switch (kind) {
  644. case BTF_KIND_INT:
  645. /* Emit type alias definitions if necessary */
  646. btf_dump_emit_missing_aliases(d, id, t);
  647. tstate->emit_state = EMITTED;
  648. break;
  649. case BTF_KIND_ENUM:
  650. case BTF_KIND_ENUM64:
  651. if (top_level_def) {
  652. btf_dump_emit_enum_def(d, id, t, 0);
  653. btf_dump_printf(d, ";\n\n");
  654. }
  655. tstate->emit_state = EMITTED;
  656. break;
  657. case BTF_KIND_PTR:
  658. case BTF_KIND_VOLATILE:
  659. case BTF_KIND_CONST:
  660. case BTF_KIND_RESTRICT:
  661. case BTF_KIND_TYPE_TAG:
  662. btf_dump_emit_type(d, t->type, cont_id);
  663. break;
  664. case BTF_KIND_ARRAY:
  665. btf_dump_emit_type(d, btf_array(t)->type, cont_id);
  666. break;
  667. case BTF_KIND_FWD:
  668. btf_dump_emit_fwd_def(d, id, t);
  669. btf_dump_printf(d, ";\n\n");
  670. tstate->emit_state = EMITTED;
  671. break;
  672. case BTF_KIND_TYPEDEF:
  673. tstate->emit_state = EMITTING;
  674. btf_dump_emit_type(d, t->type, id);
  675. /*
  676. * typedef can server as both definition and forward
  677. * declaration; at this stage someone depends on
  678. * typedef as a forward declaration (refers to it
  679. * through pointer), so unless we already did it,
  680. * emit typedef as a forward declaration
  681. */
  682. if (!tstate->fwd_emitted && !btf_dump_is_blacklisted(d, id)) {
  683. btf_dump_emit_typedef_def(d, id, t, 0);
  684. btf_dump_printf(d, ";\n\n");
  685. }
  686. tstate->emit_state = EMITTED;
  687. break;
  688. case BTF_KIND_STRUCT:
  689. case BTF_KIND_UNION:
  690. tstate->emit_state = EMITTING;
  691. /* if it's a top-level struct/union definition or struct/union
  692. * is anonymous, then in C we'll be emitting all fields and
  693. * their types (as opposed to just `struct X`), so we need to
  694. * make sure that all types, referenced from struct/union
  695. * members have necessary forward-declarations, where
  696. * applicable
  697. */
  698. if (top_level_def || t->name_off == 0) {
  699. const struct btf_member *m = btf_members(t);
  700. __u16 vlen = btf_vlen(t);
  701. int i, new_cont_id;
  702. new_cont_id = t->name_off == 0 ? cont_id : id;
  703. for (i = 0; i < vlen; i++, m++)
  704. btf_dump_emit_type(d, m->type, new_cont_id);
  705. } else if (!tstate->fwd_emitted && id != cont_id) {
  706. btf_dump_emit_struct_fwd(d, id, t);
  707. btf_dump_printf(d, ";\n\n");
  708. tstate->fwd_emitted = 1;
  709. }
  710. if (top_level_def) {
  711. btf_dump_emit_struct_def(d, id, t, 0);
  712. btf_dump_printf(d, ";\n\n");
  713. tstate->emit_state = EMITTED;
  714. } else {
  715. tstate->emit_state = NOT_EMITTED;
  716. }
  717. break;
  718. case BTF_KIND_FUNC_PROTO: {
  719. const struct btf_param *p = btf_params(t);
  720. __u16 n = btf_vlen(t);
  721. int i;
  722. btf_dump_emit_type(d, t->type, cont_id);
  723. for (i = 0; i < n; i++, p++)
  724. btf_dump_emit_type(d, p->type, cont_id);
  725. break;
  726. }
  727. default:
  728. break;
  729. }
  730. }
  731. static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
  732. const struct btf_type *t)
  733. {
  734. const struct btf_member *m;
  735. int max_align = 1, align, i, bit_sz;
  736. __u16 vlen;
  737. m = btf_members(t);
  738. vlen = btf_vlen(t);
  739. /* all non-bitfield fields have to be naturally aligned */
  740. for (i = 0; i < vlen; i++, m++) {
  741. align = btf__align_of(btf, m->type);
  742. bit_sz = btf_member_bitfield_size(t, i);
  743. if (align && bit_sz == 0 && m->offset % (8 * align) != 0)
  744. return true;
  745. max_align = max(align, max_align);
  746. }
  747. /* size of a non-packed struct has to be a multiple of its alignment */
  748. if (t->size % max_align != 0)
  749. return true;
  750. /*
  751. * if original struct was marked as packed, but its layout is
  752. * naturally aligned, we'll detect that it's not packed
  753. */
  754. return false;
  755. }
  756. static void btf_dump_emit_bit_padding(const struct btf_dump *d,
  757. int cur_off, int next_off, int next_align,
  758. bool in_bitfield, int lvl)
  759. {
  760. const struct {
  761. const char *name;
  762. int bits;
  763. } pads[] = {
  764. {"long", d->ptr_sz * 8}, {"int", 32}, {"short", 16}, {"char", 8}
  765. };
  766. int new_off = 0, pad_bits = 0, bits, i;
  767. const char *pad_type = NULL;
  768. if (cur_off >= next_off)
  769. return; /* no gap */
  770. /* For filling out padding we want to take advantage of
  771. * natural alignment rules to minimize unnecessary explicit
  772. * padding. First, we find the largest type (among long, int,
  773. * short, or char) that can be used to force naturally aligned
  774. * boundary. Once determined, we'll use such type to fill in
  775. * the remaining padding gap. In some cases we can rely on
  776. * compiler filling some gaps, but sometimes we need to force
  777. * alignment to close natural alignment with markers like
  778. * `long: 0` (this is always the case for bitfields). Note
  779. * that even if struct itself has, let's say 4-byte alignment
  780. * (i.e., it only uses up to int-aligned types), using `long:
  781. * X;` explicit padding doesn't actually change struct's
  782. * overall alignment requirements, but compiler does take into
  783. * account that type's (long, in this example) natural
  784. * alignment requirements when adding implicit padding. We use
  785. * this fact heavily and don't worry about ruining correct
  786. * struct alignment requirement.
  787. */
  788. for (i = 0; i < ARRAY_SIZE(pads); i++) {
  789. pad_bits = pads[i].bits;
  790. pad_type = pads[i].name;
  791. new_off = roundup(cur_off, pad_bits);
  792. if (new_off <= next_off)
  793. break;
  794. }
  795. if (new_off > cur_off && new_off <= next_off) {
  796. /* We need explicit `<type>: 0` aligning mark if next
  797. * field is right on alignment offset and its
  798. * alignment requirement is less strict than <type>'s
  799. * alignment (so compiler won't naturally align to the
  800. * offset we expect), or if subsequent `<type>: X`,
  801. * will actually completely fit in the remaining hole,
  802. * making compiler basically ignore `<type>: X`
  803. * completely.
  804. */
  805. if (in_bitfield ||
  806. (new_off == next_off && roundup(cur_off, next_align * 8) != new_off) ||
  807. (new_off != next_off && next_off - new_off <= new_off - cur_off))
  808. /* but for bitfields we'll emit explicit bit count */
  809. btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type,
  810. in_bitfield ? new_off - cur_off : 0);
  811. cur_off = new_off;
  812. }
  813. /* Now we know we start at naturally aligned offset for a chosen
  814. * padding type (long, int, short, or char), and so the rest is just
  815. * a straightforward filling of remaining padding gap with full
  816. * `<type>: sizeof(<type>);` markers, except for the last one, which
  817. * might need smaller than sizeof(<type>) padding.
  818. */
  819. while (cur_off != next_off) {
  820. bits = min(next_off - cur_off, pad_bits);
  821. if (bits == pad_bits) {
  822. btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, pad_bits);
  823. cur_off += bits;
  824. continue;
  825. }
  826. /* For the remainder padding that doesn't cover entire
  827. * pad_type bit length, we pick the smallest necessary type.
  828. * This is pure aesthetics, we could have just used `long`,
  829. * but having smallest necessary one communicates better the
  830. * scale of the padding gap.
  831. */
  832. for (i = ARRAY_SIZE(pads) - 1; i >= 0; i--) {
  833. pad_type = pads[i].name;
  834. pad_bits = pads[i].bits;
  835. if (pad_bits < bits)
  836. continue;
  837. btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, bits);
  838. cur_off += bits;
  839. break;
  840. }
  841. }
  842. }
  843. static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
  844. const struct btf_type *t)
  845. {
  846. btf_dump_printf(d, "%s%s%s",
  847. btf_is_struct(t) ? "struct" : "union",
  848. t->name_off ? " " : "",
  849. btf_dump_type_name(d, id));
  850. }
  851. static void btf_dump_emit_struct_def(struct btf_dump *d,
  852. __u32 id,
  853. const struct btf_type *t,
  854. int lvl)
  855. {
  856. const struct btf_member *m = btf_members(t);
  857. bool is_struct = btf_is_struct(t);
  858. bool packed, prev_bitfield = false;
  859. int align, i, off = 0;
  860. __u16 vlen = btf_vlen(t);
  861. align = btf__align_of(d->btf, id);
  862. packed = is_struct ? btf_is_struct_packed(d->btf, id, t) : 0;
  863. btf_dump_printf(d, "%s%s%s {",
  864. is_struct ? "struct" : "union",
  865. t->name_off ? " " : "",
  866. btf_dump_type_name(d, id));
  867. for (i = 0; i < vlen; i++, m++) {
  868. const char *fname;
  869. int m_off, m_sz, m_align;
  870. bool in_bitfield;
  871. fname = btf_name_of(d, m->name_off);
  872. m_sz = btf_member_bitfield_size(t, i);
  873. m_off = btf_member_bit_offset(t, i);
  874. m_align = packed ? 1 : btf__align_of(d->btf, m->type);
  875. in_bitfield = prev_bitfield && m_sz != 0;
  876. btf_dump_emit_bit_padding(d, off, m_off, m_align, in_bitfield, lvl + 1);
  877. btf_dump_printf(d, "\n%s", pfx(lvl + 1));
  878. btf_dump_emit_type_decl(d, m->type, fname, lvl + 1);
  879. if (m_sz) {
  880. btf_dump_printf(d, ": %d", m_sz);
  881. off = m_off + m_sz;
  882. prev_bitfield = true;
  883. } else {
  884. m_sz = max((__s64)0, btf__resolve_size(d->btf, m->type));
  885. off = m_off + m_sz * 8;
  886. prev_bitfield = false;
  887. }
  888. btf_dump_printf(d, ";");
  889. }
  890. /* pad at the end, if necessary */
  891. if (is_struct)
  892. btf_dump_emit_bit_padding(d, off, t->size * 8, align, false, lvl + 1);
  893. /*
  894. * Keep `struct empty {}` on a single line,
  895. * only print newline when there are regular or padding fields.
  896. */
  897. if (vlen || t->size) {
  898. btf_dump_printf(d, "\n");
  899. btf_dump_printf(d, "%s}", pfx(lvl));
  900. } else {
  901. btf_dump_printf(d, "}");
  902. }
  903. if (packed)
  904. btf_dump_printf(d, " __attribute__((packed))");
  905. }
  906. static const char *missing_base_types[][2] = {
  907. /*
  908. * GCC emits typedefs to its internal __PolyX_t types when compiling Arm
  909. * SIMD intrinsics. Alias them to standard base types.
  910. */
  911. { "__Poly8_t", "unsigned char" },
  912. { "__Poly16_t", "unsigned short" },
  913. { "__Poly64_t", "unsigned long long" },
  914. { "__Poly128_t", "unsigned __int128" },
  915. };
  916. static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id,
  917. const struct btf_type *t)
  918. {
  919. const char *name = btf_dump_type_name(d, id);
  920. int i;
  921. for (i = 0; i < ARRAY_SIZE(missing_base_types); i++) {
  922. if (strcmp(name, missing_base_types[i][0]) == 0) {
  923. btf_dump_printf(d, "typedef %s %s;\n\n",
  924. missing_base_types[i][1], name);
  925. break;
  926. }
  927. }
  928. }
  929. static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
  930. const struct btf_type *t)
  931. {
  932. btf_dump_printf(d, "enum %s", btf_dump_type_name(d, id));
  933. }
  934. static void btf_dump_emit_enum32_val(struct btf_dump *d,
  935. const struct btf_type *t,
  936. int lvl, __u16 vlen)
  937. {
  938. const struct btf_enum *v = btf_enum(t);
  939. bool is_signed = btf_kflag(t);
  940. const char *fmt_str;
  941. const char *name;
  942. size_t dup_cnt;
  943. int i;
  944. for (i = 0; i < vlen; i++, v++) {
  945. name = btf_name_of(d, v->name_off);
  946. /* enumerators share namespace with typedef idents */
  947. dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
  948. if (dup_cnt > 1) {
  949. fmt_str = is_signed ? "\n%s%s___%zd = %d," : "\n%s%s___%zd = %u,";
  950. btf_dump_printf(d, fmt_str, pfx(lvl + 1), name, dup_cnt, v->val);
  951. } else {
  952. fmt_str = is_signed ? "\n%s%s = %d," : "\n%s%s = %u,";
  953. btf_dump_printf(d, fmt_str, pfx(lvl + 1), name, v->val);
  954. }
  955. }
  956. }
  957. static void btf_dump_emit_enum64_val(struct btf_dump *d,
  958. const struct btf_type *t,
  959. int lvl, __u16 vlen)
  960. {
  961. const struct btf_enum64 *v = btf_enum64(t);
  962. bool is_signed = btf_kflag(t);
  963. const char *fmt_str;
  964. const char *name;
  965. size_t dup_cnt;
  966. __u64 val;
  967. int i;
  968. for (i = 0; i < vlen; i++, v++) {
  969. name = btf_name_of(d, v->name_off);
  970. dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
  971. val = btf_enum64_value(v);
  972. if (dup_cnt > 1) {
  973. fmt_str = is_signed ? "\n%s%s___%zd = %lldLL,"
  974. : "\n%s%s___%zd = %lluULL,";
  975. btf_dump_printf(d, fmt_str,
  976. pfx(lvl + 1), name, dup_cnt,
  977. (unsigned long long)val);
  978. } else {
  979. fmt_str = is_signed ? "\n%s%s = %lldLL,"
  980. : "\n%s%s = %lluULL,";
  981. btf_dump_printf(d, fmt_str,
  982. pfx(lvl + 1), name,
  983. (unsigned long long)val);
  984. }
  985. }
  986. }
  987. static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
  988. const struct btf_type *t,
  989. int lvl)
  990. {
  991. __u16 vlen = btf_vlen(t);
  992. btf_dump_printf(d, "enum%s%s",
  993. t->name_off ? " " : "",
  994. btf_dump_type_name(d, id));
  995. if (!vlen)
  996. return;
  997. btf_dump_printf(d, " {");
  998. if (btf_is_enum(t))
  999. btf_dump_emit_enum32_val(d, t, lvl, vlen);
  1000. else
  1001. btf_dump_emit_enum64_val(d, t, lvl, vlen);
  1002. btf_dump_printf(d, "\n%s}", pfx(lvl));
  1003. /* special case enums with special sizes */
  1004. if (t->size == 1) {
  1005. /* one-byte enums can be forced with mode(byte) attribute */
  1006. btf_dump_printf(d, " __attribute__((mode(byte)))");
  1007. } else if (t->size == 8 && d->ptr_sz == 8) {
  1008. /* enum can be 8-byte sized if one of the enumerator values
  1009. * doesn't fit in 32-bit integer, or by adding mode(word)
  1010. * attribute (but probably only on 64-bit architectures); do
  1011. * our best here to try to satisfy the contract without adding
  1012. * unnecessary attributes
  1013. */
  1014. bool needs_word_mode;
  1015. if (btf_is_enum(t)) {
  1016. /* enum can't represent 64-bit values, so we need word mode */
  1017. needs_word_mode = true;
  1018. } else {
  1019. /* enum64 needs mode(word) if none of its values has
  1020. * non-zero upper 32-bits (which means that all values
  1021. * fit in 32-bit integers and won't cause compiler to
  1022. * bump enum to be 64-bit naturally
  1023. */
  1024. int i;
  1025. needs_word_mode = true;
  1026. for (i = 0; i < vlen; i++) {
  1027. if (btf_enum64(t)[i].val_hi32 != 0) {
  1028. needs_word_mode = false;
  1029. break;
  1030. }
  1031. }
  1032. }
  1033. if (needs_word_mode)
  1034. btf_dump_printf(d, " __attribute__((mode(word)))");
  1035. }
  1036. }
  1037. static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id,
  1038. const struct btf_type *t)
  1039. {
  1040. const char *name = btf_dump_type_name(d, id);
  1041. if (btf_kflag(t))
  1042. btf_dump_printf(d, "union %s", name);
  1043. else
  1044. btf_dump_printf(d, "struct %s", name);
  1045. }
  1046. static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id,
  1047. const struct btf_type *t, int lvl)
  1048. {
  1049. const char *name = btf_dump_ident_name(d, id);
  1050. /*
  1051. * Old GCC versions are emitting invalid typedef for __gnuc_va_list
  1052. * pointing to VOID. This generates warnings from btf_dump() and
  1053. * results in uncompilable header file, so we are fixing it up here
  1054. * with valid typedef into __builtin_va_list.
  1055. */
  1056. if (t->type == 0 && strcmp(name, "__gnuc_va_list") == 0) {
  1057. btf_dump_printf(d, "typedef __builtin_va_list __gnuc_va_list");
  1058. return;
  1059. }
  1060. btf_dump_printf(d, "typedef ");
  1061. btf_dump_emit_type_decl(d, t->type, name, lvl);
  1062. }
  1063. static int btf_dump_push_decl_stack_id(struct btf_dump *d, __u32 id)
  1064. {
  1065. __u32 *new_stack;
  1066. size_t new_cap;
  1067. if (d->decl_stack_cnt >= d->decl_stack_cap) {
  1068. new_cap = max(16, d->decl_stack_cap * 3 / 2);
  1069. new_stack = libbpf_reallocarray(d->decl_stack, new_cap, sizeof(new_stack[0]));
  1070. if (!new_stack)
  1071. return -ENOMEM;
  1072. d->decl_stack = new_stack;
  1073. d->decl_stack_cap = new_cap;
  1074. }
  1075. d->decl_stack[d->decl_stack_cnt++] = id;
  1076. return 0;
  1077. }
  1078. /*
  1079. * Emit type declaration (e.g., field type declaration in a struct or argument
  1080. * declaration in function prototype) in correct C syntax.
  1081. *
  1082. * For most types it's trivial, but there are few quirky type declaration
  1083. * cases worth mentioning:
  1084. * - function prototypes (especially nesting of function prototypes);
  1085. * - arrays;
  1086. * - const/volatile/restrict for pointers vs other types.
  1087. *
  1088. * For a good discussion of *PARSING* C syntax (as a human), see
  1089. * Peter van der Linden's "Expert C Programming: Deep C Secrets",
  1090. * Ch.3 "Unscrambling Declarations in C".
  1091. *
  1092. * It won't help with BTF to C conversion much, though, as it's an opposite
  1093. * problem. So we came up with this algorithm in reverse to van der Linden's
  1094. * parsing algorithm. It goes from structured BTF representation of type
  1095. * declaration to a valid compilable C syntax.
  1096. *
  1097. * For instance, consider this C typedef:
  1098. * typedef const int * const * arr[10] arr_t;
  1099. * It will be represented in BTF with this chain of BTF types:
  1100. * [typedef] -> [array] -> [ptr] -> [const] -> [ptr] -> [const] -> [int]
  1101. *
  1102. * Notice how [const] modifier always goes before type it modifies in BTF type
  1103. * graph, but in C syntax, const/volatile/restrict modifiers are written to
  1104. * the right of pointers, but to the left of other types. There are also other
  1105. * quirks, like function pointers, arrays of them, functions returning other
  1106. * functions, etc.
  1107. *
  1108. * We handle that by pushing all the types to a stack, until we hit "terminal"
  1109. * type (int/enum/struct/union/fwd). Then depending on the kind of a type on
  1110. * top of a stack, modifiers are handled differently. Array/function pointers
  1111. * have also wildly different syntax and how nesting of them are done. See
  1112. * code for authoritative definition.
  1113. *
  1114. * To avoid allocating new stack for each independent chain of BTF types, we
  1115. * share one bigger stack, with each chain working only on its own local view
  1116. * of a stack frame. Some care is required to "pop" stack frames after
  1117. * processing type declaration chain.
  1118. */
  1119. int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id,
  1120. const struct btf_dump_emit_type_decl_opts *opts)
  1121. {
  1122. const char *fname;
  1123. int lvl, err;
  1124. if (!OPTS_VALID(opts, btf_dump_emit_type_decl_opts))
  1125. return libbpf_err(-EINVAL);
  1126. err = btf_dump_resize(d);
  1127. if (err)
  1128. return libbpf_err(err);
  1129. fname = OPTS_GET(opts, field_name, "");
  1130. lvl = OPTS_GET(opts, indent_level, 0);
  1131. d->strip_mods = OPTS_GET(opts, strip_mods, false);
  1132. btf_dump_emit_type_decl(d, id, fname, lvl);
  1133. d->strip_mods = false;
  1134. return 0;
  1135. }
  1136. static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
  1137. const char *fname, int lvl)
  1138. {
  1139. struct id_stack decl_stack;
  1140. const struct btf_type *t;
  1141. int err, stack_start;
  1142. stack_start = d->decl_stack_cnt;
  1143. for (;;) {
  1144. t = btf__type_by_id(d->btf, id);
  1145. if (d->strip_mods && btf_is_mod(t))
  1146. goto skip_mod;
  1147. err = btf_dump_push_decl_stack_id(d, id);
  1148. if (err < 0) {
  1149. /*
  1150. * if we don't have enough memory for entire type decl
  1151. * chain, restore stack, emit warning, and try to
  1152. * proceed nevertheless
  1153. */
  1154. pr_warn("not enough memory for decl stack:%d", err);
  1155. d->decl_stack_cnt = stack_start;
  1156. return;
  1157. }
  1158. skip_mod:
  1159. /* VOID */
  1160. if (id == 0)
  1161. break;
  1162. switch (btf_kind(t)) {
  1163. case BTF_KIND_PTR:
  1164. case BTF_KIND_VOLATILE:
  1165. case BTF_KIND_CONST:
  1166. case BTF_KIND_RESTRICT:
  1167. case BTF_KIND_FUNC_PROTO:
  1168. case BTF_KIND_TYPE_TAG:
  1169. id = t->type;
  1170. break;
  1171. case BTF_KIND_ARRAY:
  1172. id = btf_array(t)->type;
  1173. break;
  1174. case BTF_KIND_INT:
  1175. case BTF_KIND_ENUM:
  1176. case BTF_KIND_ENUM64:
  1177. case BTF_KIND_FWD:
  1178. case BTF_KIND_STRUCT:
  1179. case BTF_KIND_UNION:
  1180. case BTF_KIND_TYPEDEF:
  1181. case BTF_KIND_FLOAT:
  1182. goto done;
  1183. default:
  1184. pr_warn("unexpected type in decl chain, kind:%u, id:[%u]\n",
  1185. btf_kind(t), id);
  1186. goto done;
  1187. }
  1188. }
  1189. done:
  1190. /*
  1191. * We might be inside a chain of declarations (e.g., array of function
  1192. * pointers returning anonymous (so inlined) structs, having another
  1193. * array field). Each of those needs its own "stack frame" to handle
  1194. * emitting of declarations. Those stack frames are non-overlapping
  1195. * portions of shared btf_dump->decl_stack. To make it a bit nicer to
  1196. * handle this set of nested stacks, we create a view corresponding to
  1197. * our own "stack frame" and work with it as an independent stack.
  1198. * We'll need to clean up after emit_type_chain() returns, though.
  1199. */
  1200. decl_stack.ids = d->decl_stack + stack_start;
  1201. decl_stack.cnt = d->decl_stack_cnt - stack_start;
  1202. btf_dump_emit_type_chain(d, &decl_stack, fname, lvl);
  1203. /*
  1204. * emit_type_chain() guarantees that it will pop its entire decl_stack
  1205. * frame before returning. But it works with a read-only view into
  1206. * decl_stack, so it doesn't actually pop anything from the
  1207. * perspective of shared btf_dump->decl_stack, per se. We need to
  1208. * reset decl_stack state to how it was before us to avoid it growing
  1209. * all the time.
  1210. */
  1211. d->decl_stack_cnt = stack_start;
  1212. }
  1213. static void btf_dump_emit_mods(struct btf_dump *d, struct id_stack *decl_stack)
  1214. {
  1215. const struct btf_type *t;
  1216. __u32 id;
  1217. while (decl_stack->cnt) {
  1218. id = decl_stack->ids[decl_stack->cnt - 1];
  1219. t = btf__type_by_id(d->btf, id);
  1220. switch (btf_kind(t)) {
  1221. case BTF_KIND_VOLATILE:
  1222. btf_dump_printf(d, "volatile ");
  1223. break;
  1224. case BTF_KIND_CONST:
  1225. btf_dump_printf(d, "const ");
  1226. break;
  1227. case BTF_KIND_RESTRICT:
  1228. btf_dump_printf(d, "restrict ");
  1229. break;
  1230. default:
  1231. return;
  1232. }
  1233. decl_stack->cnt--;
  1234. }
  1235. }
  1236. static void btf_dump_drop_mods(struct btf_dump *d, struct id_stack *decl_stack)
  1237. {
  1238. const struct btf_type *t;
  1239. __u32 id;
  1240. while (decl_stack->cnt) {
  1241. id = decl_stack->ids[decl_stack->cnt - 1];
  1242. t = btf__type_by_id(d->btf, id);
  1243. if (!btf_is_mod(t))
  1244. return;
  1245. decl_stack->cnt--;
  1246. }
  1247. }
  1248. static void btf_dump_emit_name(const struct btf_dump *d,
  1249. const char *name, bool last_was_ptr)
  1250. {
  1251. bool separate = name[0] && !last_was_ptr;
  1252. btf_dump_printf(d, "%s%s", separate ? " " : "", name);
  1253. }
  1254. static void btf_dump_emit_type_chain(struct btf_dump *d,
  1255. struct id_stack *decls,
  1256. const char *fname, int lvl)
  1257. {
  1258. /*
  1259. * last_was_ptr is used to determine if we need to separate pointer
  1260. * asterisk (*) from previous part of type signature with space, so
  1261. * that we get `int ***`, instead of `int * * *`. We default to true
  1262. * for cases where we have single pointer in a chain. E.g., in ptr ->
  1263. * func_proto case. func_proto will start a new emit_type_chain call
  1264. * with just ptr, which should be emitted as (*) or (*<fname>), so we
  1265. * don't want to prepend space for that last pointer.
  1266. */
  1267. bool last_was_ptr = true;
  1268. const struct btf_type *t;
  1269. const char *name;
  1270. __u16 kind;
  1271. __u32 id;
  1272. while (decls->cnt) {
  1273. id = decls->ids[--decls->cnt];
  1274. if (id == 0) {
  1275. /* VOID is a special snowflake */
  1276. btf_dump_emit_mods(d, decls);
  1277. btf_dump_printf(d, "void");
  1278. last_was_ptr = false;
  1279. continue;
  1280. }
  1281. t = btf__type_by_id(d->btf, id);
  1282. kind = btf_kind(t);
  1283. switch (kind) {
  1284. case BTF_KIND_INT:
  1285. case BTF_KIND_FLOAT:
  1286. btf_dump_emit_mods(d, decls);
  1287. name = btf_name_of(d, t->name_off);
  1288. btf_dump_printf(d, "%s", name);
  1289. break;
  1290. case BTF_KIND_STRUCT:
  1291. case BTF_KIND_UNION:
  1292. btf_dump_emit_mods(d, decls);
  1293. /* inline anonymous struct/union */
  1294. if (t->name_off == 0 && !d->skip_anon_defs)
  1295. btf_dump_emit_struct_def(d, id, t, lvl);
  1296. else
  1297. btf_dump_emit_struct_fwd(d, id, t);
  1298. break;
  1299. case BTF_KIND_ENUM:
  1300. case BTF_KIND_ENUM64:
  1301. btf_dump_emit_mods(d, decls);
  1302. /* inline anonymous enum */
  1303. if (t->name_off == 0 && !d->skip_anon_defs)
  1304. btf_dump_emit_enum_def(d, id, t, lvl);
  1305. else
  1306. btf_dump_emit_enum_fwd(d, id, t);
  1307. break;
  1308. case BTF_KIND_FWD:
  1309. btf_dump_emit_mods(d, decls);
  1310. btf_dump_emit_fwd_def(d, id, t);
  1311. break;
  1312. case BTF_KIND_TYPEDEF:
  1313. btf_dump_emit_mods(d, decls);
  1314. btf_dump_printf(d, "%s", btf_dump_ident_name(d, id));
  1315. break;
  1316. case BTF_KIND_PTR:
  1317. btf_dump_printf(d, "%s", last_was_ptr ? "*" : " *");
  1318. break;
  1319. case BTF_KIND_VOLATILE:
  1320. btf_dump_printf(d, " volatile");
  1321. break;
  1322. case BTF_KIND_CONST:
  1323. btf_dump_printf(d, " const");
  1324. break;
  1325. case BTF_KIND_RESTRICT:
  1326. btf_dump_printf(d, " restrict");
  1327. break;
  1328. case BTF_KIND_TYPE_TAG:
  1329. btf_dump_emit_mods(d, decls);
  1330. name = btf_name_of(d, t->name_off);
  1331. btf_dump_printf(d, " __attribute__((btf_type_tag(\"%s\")))", name);
  1332. break;
  1333. case BTF_KIND_ARRAY: {
  1334. const struct btf_array *a = btf_array(t);
  1335. const struct btf_type *next_t;
  1336. __u32 next_id;
  1337. bool multidim;
  1338. /*
  1339. * GCC has a bug
  1340. * (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=8354)
  1341. * which causes it to emit extra const/volatile
  1342. * modifiers for an array, if array's element type has
  1343. * const/volatile modifiers. Clang doesn't do that.
  1344. * In general, it doesn't seem very meaningful to have
  1345. * a const/volatile modifier for array, so we are
  1346. * going to silently skip them here.
  1347. */
  1348. btf_dump_drop_mods(d, decls);
  1349. if (decls->cnt == 0) {
  1350. btf_dump_emit_name(d, fname, last_was_ptr);
  1351. btf_dump_printf(d, "[%u]", a->nelems);
  1352. return;
  1353. }
  1354. next_id = decls->ids[decls->cnt - 1];
  1355. next_t = btf__type_by_id(d->btf, next_id);
  1356. multidim = btf_is_array(next_t);
  1357. /* we need space if we have named non-pointer */
  1358. if (fname[0] && !last_was_ptr)
  1359. btf_dump_printf(d, " ");
  1360. /* no parentheses for multi-dimensional array */
  1361. if (!multidim)
  1362. btf_dump_printf(d, "(");
  1363. btf_dump_emit_type_chain(d, decls, fname, lvl);
  1364. if (!multidim)
  1365. btf_dump_printf(d, ")");
  1366. btf_dump_printf(d, "[%u]", a->nelems);
  1367. return;
  1368. }
  1369. case BTF_KIND_FUNC_PROTO: {
  1370. const struct btf_param *p = btf_params(t);
  1371. __u16 vlen = btf_vlen(t);
  1372. int i;
  1373. /*
  1374. * GCC emits extra volatile qualifier for
  1375. * __attribute__((noreturn)) function pointers. Clang
  1376. * doesn't do it. It's a GCC quirk for backwards
  1377. * compatibility with code written for GCC <2.5. So,
  1378. * similarly to extra qualifiers for array, just drop
  1379. * them, instead of handling them.
  1380. */
  1381. btf_dump_drop_mods(d, decls);
  1382. if (decls->cnt) {
  1383. btf_dump_printf(d, " (");
  1384. btf_dump_emit_type_chain(d, decls, fname, lvl);
  1385. btf_dump_printf(d, ")");
  1386. } else {
  1387. btf_dump_emit_name(d, fname, last_was_ptr);
  1388. }
  1389. btf_dump_printf(d, "(");
  1390. /*
  1391. * Clang for BPF target generates func_proto with no
  1392. * args as a func_proto with a single void arg (e.g.,
  1393. * `int (*f)(void)` vs just `int (*f)()`). We are
  1394. * going to emit valid empty args (void) syntax for
  1395. * such case. Similarly and conveniently, valid
  1396. * no args case can be special-cased here as well.
  1397. */
  1398. if (vlen == 0 || (vlen == 1 && p->type == 0)) {
  1399. btf_dump_printf(d, "void)");
  1400. return;
  1401. }
  1402. for (i = 0; i < vlen; i++, p++) {
  1403. if (i > 0)
  1404. btf_dump_printf(d, ", ");
  1405. /* last arg of type void is vararg */
  1406. if (i == vlen - 1 && p->type == 0) {
  1407. btf_dump_printf(d, "...");
  1408. break;
  1409. }
  1410. name = btf_name_of(d, p->name_off);
  1411. btf_dump_emit_type_decl(d, p->type, name, lvl);
  1412. }
  1413. btf_dump_printf(d, ")");
  1414. return;
  1415. }
  1416. default:
  1417. pr_warn("unexpected type in decl chain, kind:%u, id:[%u]\n",
  1418. kind, id);
  1419. return;
  1420. }
  1421. last_was_ptr = kind == BTF_KIND_PTR;
  1422. }
  1423. btf_dump_emit_name(d, fname, last_was_ptr);
  1424. }
  1425. /* show type name as (type_name) */
  1426. static void btf_dump_emit_type_cast(struct btf_dump *d, __u32 id,
  1427. bool top_level)
  1428. {
  1429. const struct btf_type *t;
  1430. /* for array members, we don't bother emitting type name for each
  1431. * member to avoid the redundancy of
  1432. * .name = (char[4])[(char)'f',(char)'o',(char)'o',]
  1433. */
  1434. if (d->typed_dump->is_array_member)
  1435. return;
  1436. /* avoid type name specification for variable/section; it will be done
  1437. * for the associated variable value(s).
  1438. */
  1439. t = btf__type_by_id(d->btf, id);
  1440. if (btf_is_var(t) || btf_is_datasec(t))
  1441. return;
  1442. if (top_level)
  1443. btf_dump_printf(d, "(");
  1444. d->skip_anon_defs = true;
  1445. d->strip_mods = true;
  1446. btf_dump_emit_type_decl(d, id, "", 0);
  1447. d->strip_mods = false;
  1448. d->skip_anon_defs = false;
  1449. if (top_level)
  1450. btf_dump_printf(d, ")");
  1451. }
  1452. /* return number of duplicates (occurrences) of a given name */
  1453. static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map,
  1454. const char *orig_name)
  1455. {
  1456. char *old_name, *new_name;
  1457. size_t dup_cnt = 0;
  1458. int err;
  1459. new_name = strdup(orig_name);
  1460. if (!new_name)
  1461. return 1;
  1462. (void)hashmap__find(name_map, orig_name, &dup_cnt);
  1463. dup_cnt++;
  1464. err = hashmap__set(name_map, new_name, dup_cnt, &old_name, NULL);
  1465. if (err)
  1466. free(new_name);
  1467. free(old_name);
  1468. return dup_cnt;
  1469. }
  1470. static const char *btf_dump_resolve_name(struct btf_dump *d, __u32 id,
  1471. struct hashmap *name_map)
  1472. {
  1473. struct btf_dump_type_aux_state *s = &d->type_states[id];
  1474. const struct btf_type *t = btf__type_by_id(d->btf, id);
  1475. const char *orig_name = btf_name_of(d, t->name_off);
  1476. const char **cached_name = &d->cached_names[id];
  1477. size_t dup_cnt;
  1478. if (t->name_off == 0)
  1479. return "";
  1480. if (s->name_resolved)
  1481. return *cached_name ? *cached_name : orig_name;
  1482. if (btf_is_fwd(t) || (btf_is_enum(t) && btf_vlen(t) == 0)) {
  1483. s->name_resolved = 1;
  1484. return orig_name;
  1485. }
  1486. dup_cnt = btf_dump_name_dups(d, name_map, orig_name);
  1487. if (dup_cnt > 1) {
  1488. const size_t max_len = 256;
  1489. char new_name[max_len];
  1490. snprintf(new_name, max_len, "%s___%zu", orig_name, dup_cnt);
  1491. *cached_name = strdup(new_name);
  1492. }
  1493. s->name_resolved = 1;
  1494. return *cached_name ? *cached_name : orig_name;
  1495. }
  1496. static const char *btf_dump_type_name(struct btf_dump *d, __u32 id)
  1497. {
  1498. return btf_dump_resolve_name(d, id, d->type_names);
  1499. }
  1500. static const char *btf_dump_ident_name(struct btf_dump *d, __u32 id)
  1501. {
  1502. return btf_dump_resolve_name(d, id, d->ident_names);
  1503. }
  1504. static int btf_dump_dump_type_data(struct btf_dump *d,
  1505. const char *fname,
  1506. const struct btf_type *t,
  1507. __u32 id,
  1508. const void *data,
  1509. __u8 bits_offset,
  1510. __u8 bit_sz);
  1511. static const char *btf_dump_data_newline(struct btf_dump *d)
  1512. {
  1513. return d->typed_dump->compact || d->typed_dump->depth == 0 ? "" : "\n";
  1514. }
  1515. static const char *btf_dump_data_delim(struct btf_dump *d)
  1516. {
  1517. return d->typed_dump->depth == 0 ? "" : ",";
  1518. }
  1519. static void btf_dump_data_pfx(struct btf_dump *d)
  1520. {
  1521. int i, lvl = d->typed_dump->indent_lvl + d->typed_dump->depth;
  1522. if (d->typed_dump->compact)
  1523. return;
  1524. for (i = 0; i < lvl; i++)
  1525. btf_dump_printf(d, "%s", d->typed_dump->indent_str);
  1526. }
  1527. /* A macro is used here as btf_type_value[s]() appends format specifiers
  1528. * to the format specifier passed in; these do the work of appending
  1529. * delimiters etc while the caller simply has to specify the type values
  1530. * in the format specifier + value(s).
  1531. */
  1532. #define btf_dump_type_values(d, fmt, ...) \
  1533. btf_dump_printf(d, fmt "%s%s", \
  1534. ##__VA_ARGS__, \
  1535. btf_dump_data_delim(d), \
  1536. btf_dump_data_newline(d))
  1537. static int btf_dump_unsupported_data(struct btf_dump *d,
  1538. const struct btf_type *t,
  1539. __u32 id)
  1540. {
  1541. btf_dump_printf(d, "<unsupported kind:%u>", btf_kind(t));
  1542. return -ENOTSUP;
  1543. }
  1544. static int btf_dump_get_bitfield_value(struct btf_dump *d,
  1545. const struct btf_type *t,
  1546. const void *data,
  1547. __u8 bits_offset,
  1548. __u8 bit_sz,
  1549. __u64 *value)
  1550. {
  1551. __u16 left_shift_bits, right_shift_bits;
  1552. const __u8 *bytes = data;
  1553. __u8 nr_copy_bits;
  1554. __u64 num = 0;
  1555. int i;
  1556. /* Maximum supported bitfield size is 64 bits */
  1557. if (t->size > 8) {
  1558. pr_warn("unexpected bitfield size %d\n", t->size);
  1559. return -EINVAL;
  1560. }
  1561. /* Bitfield value retrieval is done in two steps; first relevant bytes are
  1562. * stored in num, then we left/right shift num to eliminate irrelevant bits.
  1563. */
  1564. #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  1565. for (i = t->size - 1; i >= 0; i--)
  1566. num = num * 256 + bytes[i];
  1567. nr_copy_bits = bit_sz + bits_offset;
  1568. #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  1569. for (i = 0; i < t->size; i++)
  1570. num = num * 256 + bytes[i];
  1571. nr_copy_bits = t->size * 8 - bits_offset;
  1572. #else
  1573. # error "Unrecognized __BYTE_ORDER__"
  1574. #endif
  1575. left_shift_bits = 64 - nr_copy_bits;
  1576. right_shift_bits = 64 - bit_sz;
  1577. *value = (num << left_shift_bits) >> right_shift_bits;
  1578. return 0;
  1579. }
  1580. static int btf_dump_bitfield_check_zero(struct btf_dump *d,
  1581. const struct btf_type *t,
  1582. const void *data,
  1583. __u8 bits_offset,
  1584. __u8 bit_sz)
  1585. {
  1586. __u64 check_num;
  1587. int err;
  1588. err = btf_dump_get_bitfield_value(d, t, data, bits_offset, bit_sz, &check_num);
  1589. if (err)
  1590. return err;
  1591. if (check_num == 0)
  1592. return -ENODATA;
  1593. return 0;
  1594. }
  1595. static int btf_dump_bitfield_data(struct btf_dump *d,
  1596. const struct btf_type *t,
  1597. const void *data,
  1598. __u8 bits_offset,
  1599. __u8 bit_sz)
  1600. {
  1601. __u64 print_num;
  1602. int err;
  1603. err = btf_dump_get_bitfield_value(d, t, data, bits_offset, bit_sz, &print_num);
  1604. if (err)
  1605. return err;
  1606. btf_dump_type_values(d, "0x%llx", (unsigned long long)print_num);
  1607. return 0;
  1608. }
  1609. /* ints, floats and ptrs */
  1610. static int btf_dump_base_type_check_zero(struct btf_dump *d,
  1611. const struct btf_type *t,
  1612. __u32 id,
  1613. const void *data)
  1614. {
  1615. static __u8 bytecmp[16] = {};
  1616. int nr_bytes;
  1617. /* For pointer types, pointer size is not defined on a per-type basis.
  1618. * On dump creation however, we store the pointer size.
  1619. */
  1620. if (btf_kind(t) == BTF_KIND_PTR)
  1621. nr_bytes = d->ptr_sz;
  1622. else
  1623. nr_bytes = t->size;
  1624. if (nr_bytes < 1 || nr_bytes > 16) {
  1625. pr_warn("unexpected size %d for id [%u]\n", nr_bytes, id);
  1626. return -EINVAL;
  1627. }
  1628. if (memcmp(data, bytecmp, nr_bytes) == 0)
  1629. return -ENODATA;
  1630. return 0;
  1631. }
  1632. static bool ptr_is_aligned(const struct btf *btf, __u32 type_id,
  1633. const void *data)
  1634. {
  1635. int alignment = btf__align_of(btf, type_id);
  1636. if (alignment == 0)
  1637. return false;
  1638. return ((uintptr_t)data) % alignment == 0;
  1639. }
  1640. static int btf_dump_int_data(struct btf_dump *d,
  1641. const struct btf_type *t,
  1642. __u32 type_id,
  1643. const void *data,
  1644. __u8 bits_offset)
  1645. {
  1646. __u8 encoding = btf_int_encoding(t);
  1647. bool sign = encoding & BTF_INT_SIGNED;
  1648. char buf[16] __attribute__((aligned(16)));
  1649. int sz = t->size;
  1650. if (sz == 0 || sz > sizeof(buf)) {
  1651. pr_warn("unexpected size %d for id [%u]\n", sz, type_id);
  1652. return -EINVAL;
  1653. }
  1654. /* handle packed int data - accesses of integers not aligned on
  1655. * int boundaries can cause problems on some platforms.
  1656. */
  1657. if (!ptr_is_aligned(d->btf, type_id, data)) {
  1658. memcpy(buf, data, sz);
  1659. data = buf;
  1660. }
  1661. switch (sz) {
  1662. case 16: {
  1663. const __u64 *ints = data;
  1664. __u64 lsi, msi;
  1665. /* avoid use of __int128 as some 32-bit platforms do not
  1666. * support it.
  1667. */
  1668. #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  1669. lsi = ints[0];
  1670. msi = ints[1];
  1671. #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  1672. lsi = ints[1];
  1673. msi = ints[0];
  1674. #else
  1675. # error "Unrecognized __BYTE_ORDER__"
  1676. #endif
  1677. if (msi == 0)
  1678. btf_dump_type_values(d, "0x%llx", (unsigned long long)lsi);
  1679. else
  1680. btf_dump_type_values(d, "0x%llx%016llx", (unsigned long long)msi,
  1681. (unsigned long long)lsi);
  1682. break;
  1683. }
  1684. case 8:
  1685. if (sign)
  1686. btf_dump_type_values(d, "%lld", *(long long *)data);
  1687. else
  1688. btf_dump_type_values(d, "%llu", *(unsigned long long *)data);
  1689. break;
  1690. case 4:
  1691. if (sign)
  1692. btf_dump_type_values(d, "%d", *(__s32 *)data);
  1693. else
  1694. btf_dump_type_values(d, "%u", *(__u32 *)data);
  1695. break;
  1696. case 2:
  1697. if (sign)
  1698. btf_dump_type_values(d, "%d", *(__s16 *)data);
  1699. else
  1700. btf_dump_type_values(d, "%u", *(__u16 *)data);
  1701. break;
  1702. case 1:
  1703. if (d->typed_dump->is_array_char) {
  1704. /* check for null terminator */
  1705. if (d->typed_dump->is_array_terminated)
  1706. break;
  1707. if (*(char *)data == '\0') {
  1708. btf_dump_type_values(d, "'\\0'");
  1709. d->typed_dump->is_array_terminated = true;
  1710. break;
  1711. }
  1712. if (isprint(*(char *)data)) {
  1713. btf_dump_type_values(d, "'%c'", *(char *)data);
  1714. break;
  1715. }
  1716. }
  1717. if (sign)
  1718. btf_dump_type_values(d, "%d", *(__s8 *)data);
  1719. else
  1720. btf_dump_type_values(d, "%u", *(__u8 *)data);
  1721. break;
  1722. default:
  1723. pr_warn("unexpected sz %d for id [%u]\n", sz, type_id);
  1724. return -EINVAL;
  1725. }
  1726. return 0;
  1727. }
  1728. union float_data {
  1729. long double ld;
  1730. double d;
  1731. float f;
  1732. };
  1733. static int btf_dump_float_data(struct btf_dump *d,
  1734. const struct btf_type *t,
  1735. __u32 type_id,
  1736. const void *data)
  1737. {
  1738. const union float_data *flp = data;
  1739. union float_data fl;
  1740. int sz = t->size;
  1741. /* handle unaligned data; copy to local union */
  1742. if (!ptr_is_aligned(d->btf, type_id, data)) {
  1743. memcpy(&fl, data, sz);
  1744. flp = &fl;
  1745. }
  1746. switch (sz) {
  1747. case 16:
  1748. btf_dump_type_values(d, "%Lf", flp->ld);
  1749. break;
  1750. case 8:
  1751. btf_dump_type_values(d, "%lf", flp->d);
  1752. break;
  1753. case 4:
  1754. btf_dump_type_values(d, "%f", flp->f);
  1755. break;
  1756. default:
  1757. pr_warn("unexpected size %d for id [%u]\n", sz, type_id);
  1758. return -EINVAL;
  1759. }
  1760. return 0;
  1761. }
  1762. static int btf_dump_var_data(struct btf_dump *d,
  1763. const struct btf_type *v,
  1764. __u32 id,
  1765. const void *data)
  1766. {
  1767. enum btf_func_linkage linkage = btf_var(v)->linkage;
  1768. const struct btf_type *t;
  1769. const char *l;
  1770. __u32 type_id;
  1771. switch (linkage) {
  1772. case BTF_FUNC_STATIC:
  1773. l = "static ";
  1774. break;
  1775. case BTF_FUNC_EXTERN:
  1776. l = "extern ";
  1777. break;
  1778. case BTF_FUNC_GLOBAL:
  1779. default:
  1780. l = "";
  1781. break;
  1782. }
  1783. /* format of output here is [linkage] [type] [varname] = (type)value,
  1784. * for example "static int cpu_profile_flip = (int)1"
  1785. */
  1786. btf_dump_printf(d, "%s", l);
  1787. type_id = v->type;
  1788. t = btf__type_by_id(d->btf, type_id);
  1789. btf_dump_emit_type_cast(d, type_id, false);
  1790. btf_dump_printf(d, " %s = ", btf_name_of(d, v->name_off));
  1791. return btf_dump_dump_type_data(d, NULL, t, type_id, data, 0, 0);
  1792. }
  1793. static int btf_dump_array_data(struct btf_dump *d,
  1794. const struct btf_type *t,
  1795. __u32 id,
  1796. const void *data)
  1797. {
  1798. const struct btf_array *array = btf_array(t);
  1799. const struct btf_type *elem_type;
  1800. __u32 i, elem_type_id;
  1801. __s64 elem_size;
  1802. bool is_array_member;
  1803. bool is_array_terminated;
  1804. elem_type_id = array->type;
  1805. elem_type = skip_mods_and_typedefs(d->btf, elem_type_id, NULL);
  1806. elem_size = btf__resolve_size(d->btf, elem_type_id);
  1807. if (elem_size <= 0) {
  1808. pr_warn("unexpected elem size %zd for array type [%u]\n",
  1809. (ssize_t)elem_size, id);
  1810. return -EINVAL;
  1811. }
  1812. if (btf_is_int(elem_type)) {
  1813. /*
  1814. * BTF_INT_CHAR encoding never seems to be set for
  1815. * char arrays, so if size is 1 and element is
  1816. * printable as a char, we'll do that.
  1817. */
  1818. if (elem_size == 1)
  1819. d->typed_dump->is_array_char = true;
  1820. }
  1821. /* note that we increment depth before calling btf_dump_print() below;
  1822. * this is intentional. btf_dump_data_newline() will not print a
  1823. * newline for depth 0 (since this leaves us with trailing newlines
  1824. * at the end of typed display), so depth is incremented first.
  1825. * For similar reasons, we decrement depth before showing the closing
  1826. * parenthesis.
  1827. */
  1828. d->typed_dump->depth++;
  1829. btf_dump_printf(d, "[%s", btf_dump_data_newline(d));
  1830. /* may be a multidimensional array, so store current "is array member"
  1831. * status so we can restore it correctly later.
  1832. */
  1833. is_array_member = d->typed_dump->is_array_member;
  1834. d->typed_dump->is_array_member = true;
  1835. is_array_terminated = d->typed_dump->is_array_terminated;
  1836. d->typed_dump->is_array_terminated = false;
  1837. for (i = 0; i < array->nelems; i++, data += elem_size) {
  1838. if (d->typed_dump->is_array_terminated)
  1839. break;
  1840. btf_dump_dump_type_data(d, NULL, elem_type, elem_type_id, data, 0, 0);
  1841. }
  1842. d->typed_dump->is_array_member = is_array_member;
  1843. d->typed_dump->is_array_terminated = is_array_terminated;
  1844. d->typed_dump->depth--;
  1845. btf_dump_data_pfx(d);
  1846. btf_dump_type_values(d, "]");
  1847. return 0;
  1848. }
  1849. static int btf_dump_struct_data(struct btf_dump *d,
  1850. const struct btf_type *t,
  1851. __u32 id,
  1852. const void *data)
  1853. {
  1854. const struct btf_member *m = btf_members(t);
  1855. __u16 n = btf_vlen(t);
  1856. int i, err = 0;
  1857. /* note that we increment depth before calling btf_dump_print() below;
  1858. * this is intentional. btf_dump_data_newline() will not print a
  1859. * newline for depth 0 (since this leaves us with trailing newlines
  1860. * at the end of typed display), so depth is incremented first.
  1861. * For similar reasons, we decrement depth before showing the closing
  1862. * parenthesis.
  1863. */
  1864. d->typed_dump->depth++;
  1865. btf_dump_printf(d, "{%s", btf_dump_data_newline(d));
  1866. for (i = 0; i < n; i++, m++) {
  1867. const struct btf_type *mtype;
  1868. const char *mname;
  1869. __u32 moffset;
  1870. __u8 bit_sz;
  1871. mtype = btf__type_by_id(d->btf, m->type);
  1872. mname = btf_name_of(d, m->name_off);
  1873. moffset = btf_member_bit_offset(t, i);
  1874. bit_sz = btf_member_bitfield_size(t, i);
  1875. err = btf_dump_dump_type_data(d, mname, mtype, m->type, data + moffset / 8,
  1876. moffset % 8, bit_sz);
  1877. if (err < 0)
  1878. return err;
  1879. }
  1880. d->typed_dump->depth--;
  1881. btf_dump_data_pfx(d);
  1882. btf_dump_type_values(d, "}");
  1883. return err;
  1884. }
  1885. union ptr_data {
  1886. unsigned int p;
  1887. unsigned long long lp;
  1888. };
  1889. static int btf_dump_ptr_data(struct btf_dump *d,
  1890. const struct btf_type *t,
  1891. __u32 id,
  1892. const void *data)
  1893. {
  1894. if (ptr_is_aligned(d->btf, id, data) && d->ptr_sz == sizeof(void *)) {
  1895. btf_dump_type_values(d, "%p", *(void **)data);
  1896. } else {
  1897. union ptr_data pt;
  1898. memcpy(&pt, data, d->ptr_sz);
  1899. if (d->ptr_sz == 4)
  1900. btf_dump_type_values(d, "0x%x", pt.p);
  1901. else
  1902. btf_dump_type_values(d, "0x%llx", pt.lp);
  1903. }
  1904. return 0;
  1905. }
  1906. static int btf_dump_get_enum_value(struct btf_dump *d,
  1907. const struct btf_type *t,
  1908. const void *data,
  1909. __u32 id,
  1910. __s64 *value)
  1911. {
  1912. bool is_signed = btf_kflag(t);
  1913. if (!ptr_is_aligned(d->btf, id, data)) {
  1914. __u64 val;
  1915. int err;
  1916. err = btf_dump_get_bitfield_value(d, t, data, 0, 0, &val);
  1917. if (err)
  1918. return err;
  1919. *value = (__s64)val;
  1920. return 0;
  1921. }
  1922. switch (t->size) {
  1923. case 8:
  1924. *value = *(__s64 *)data;
  1925. return 0;
  1926. case 4:
  1927. *value = is_signed ? (__s64)*(__s32 *)data : *(__u32 *)data;
  1928. return 0;
  1929. case 2:
  1930. *value = is_signed ? *(__s16 *)data : *(__u16 *)data;
  1931. return 0;
  1932. case 1:
  1933. *value = is_signed ? *(__s8 *)data : *(__u8 *)data;
  1934. return 0;
  1935. default:
  1936. pr_warn("unexpected size %d for enum, id:[%u]\n", t->size, id);
  1937. return -EINVAL;
  1938. }
  1939. }
  1940. static int btf_dump_enum_data(struct btf_dump *d,
  1941. const struct btf_type *t,
  1942. __u32 id,
  1943. const void *data)
  1944. {
  1945. bool is_signed;
  1946. __s64 value;
  1947. int i, err;
  1948. err = btf_dump_get_enum_value(d, t, data, id, &value);
  1949. if (err)
  1950. return err;
  1951. is_signed = btf_kflag(t);
  1952. if (btf_is_enum(t)) {
  1953. const struct btf_enum *e;
  1954. for (i = 0, e = btf_enum(t); i < btf_vlen(t); i++, e++) {
  1955. if (value != e->val)
  1956. continue;
  1957. btf_dump_type_values(d, "%s", btf_name_of(d, e->name_off));
  1958. return 0;
  1959. }
  1960. btf_dump_type_values(d, is_signed ? "%d" : "%u", value);
  1961. } else {
  1962. const struct btf_enum64 *e;
  1963. for (i = 0, e = btf_enum64(t); i < btf_vlen(t); i++, e++) {
  1964. if (value != btf_enum64_value(e))
  1965. continue;
  1966. btf_dump_type_values(d, "%s", btf_name_of(d, e->name_off));
  1967. return 0;
  1968. }
  1969. btf_dump_type_values(d, is_signed ? "%lldLL" : "%lluULL",
  1970. (unsigned long long)value);
  1971. }
  1972. return 0;
  1973. }
  1974. static int btf_dump_datasec_data(struct btf_dump *d,
  1975. const struct btf_type *t,
  1976. __u32 id,
  1977. const void *data)
  1978. {
  1979. const struct btf_var_secinfo *vsi;
  1980. const struct btf_type *var;
  1981. __u32 i;
  1982. int err;
  1983. btf_dump_type_values(d, "SEC(\"%s\") ", btf_name_of(d, t->name_off));
  1984. for (i = 0, vsi = btf_var_secinfos(t); i < btf_vlen(t); i++, vsi++) {
  1985. var = btf__type_by_id(d->btf, vsi->type);
  1986. err = btf_dump_dump_type_data(d, NULL, var, vsi->type, data + vsi->offset, 0, 0);
  1987. if (err < 0)
  1988. return err;
  1989. btf_dump_printf(d, ";");
  1990. }
  1991. return 0;
  1992. }
  1993. /* return size of type, or if base type overflows, return -E2BIG. */
  1994. static int btf_dump_type_data_check_overflow(struct btf_dump *d,
  1995. const struct btf_type *t,
  1996. __u32 id,
  1997. const void *data,
  1998. __u8 bits_offset,
  1999. __u8 bit_sz)
  2000. {
  2001. __s64 size;
  2002. if (bit_sz) {
  2003. /* bits_offset is at most 7. bit_sz is at most 128. */
  2004. __u8 nr_bytes = (bits_offset + bit_sz + 7) / 8;
  2005. /* When bit_sz is non zero, it is called from
  2006. * btf_dump_struct_data() where it only cares about
  2007. * negative error value.
  2008. * Return nr_bytes in success case to make it
  2009. * consistent as the regular integer case below.
  2010. */
  2011. return data + nr_bytes > d->typed_dump->data_end ? -E2BIG : nr_bytes;
  2012. }
  2013. size = btf__resolve_size(d->btf, id);
  2014. if (size < 0 || size >= INT_MAX) {
  2015. pr_warn("unexpected size [%zu] for id [%u]\n",
  2016. (size_t)size, id);
  2017. return -EINVAL;
  2018. }
  2019. /* Only do overflow checking for base types; we do not want to
  2020. * avoid showing part of a struct, union or array, even if we
  2021. * do not have enough data to show the full object. By
  2022. * restricting overflow checking to base types we can ensure
  2023. * that partial display succeeds, while avoiding overflowing
  2024. * and using bogus data for display.
  2025. */
  2026. t = skip_mods_and_typedefs(d->btf, id, NULL);
  2027. if (!t) {
  2028. pr_warn("unexpected error skipping mods/typedefs for id [%u]\n",
  2029. id);
  2030. return -EINVAL;
  2031. }
  2032. switch (btf_kind(t)) {
  2033. case BTF_KIND_INT:
  2034. case BTF_KIND_FLOAT:
  2035. case BTF_KIND_PTR:
  2036. case BTF_KIND_ENUM:
  2037. case BTF_KIND_ENUM64:
  2038. if (data + bits_offset / 8 + size > d->typed_dump->data_end)
  2039. return -E2BIG;
  2040. break;
  2041. default:
  2042. break;
  2043. }
  2044. return (int)size;
  2045. }
  2046. static int btf_dump_type_data_check_zero(struct btf_dump *d,
  2047. const struct btf_type *t,
  2048. __u32 id,
  2049. const void *data,
  2050. __u8 bits_offset,
  2051. __u8 bit_sz)
  2052. {
  2053. __s64 value;
  2054. int i, err;
  2055. /* toplevel exceptions; we show zero values if
  2056. * - we ask for them (emit_zeros)
  2057. * - if we are at top-level so we see "struct empty { }"
  2058. * - or if we are an array member and the array is non-empty and
  2059. * not a char array; we don't want to be in a situation where we
  2060. * have an integer array 0, 1, 0, 1 and only show non-zero values.
  2061. * If the array contains zeroes only, or is a char array starting
  2062. * with a '\0', the array-level check_zero() will prevent showing it;
  2063. * we are concerned with determining zero value at the array member
  2064. * level here.
  2065. */
  2066. if (d->typed_dump->emit_zeroes || d->typed_dump->depth == 0 ||
  2067. (d->typed_dump->is_array_member &&
  2068. !d->typed_dump->is_array_char))
  2069. return 0;
  2070. t = skip_mods_and_typedefs(d->btf, id, NULL);
  2071. switch (btf_kind(t)) {
  2072. case BTF_KIND_INT:
  2073. if (bit_sz)
  2074. return btf_dump_bitfield_check_zero(d, t, data, bits_offset, bit_sz);
  2075. return btf_dump_base_type_check_zero(d, t, id, data);
  2076. case BTF_KIND_FLOAT:
  2077. case BTF_KIND_PTR:
  2078. return btf_dump_base_type_check_zero(d, t, id, data);
  2079. case BTF_KIND_ARRAY: {
  2080. const struct btf_array *array = btf_array(t);
  2081. const struct btf_type *elem_type;
  2082. __u32 elem_type_id, elem_size;
  2083. bool ischar;
  2084. elem_type_id = array->type;
  2085. elem_size = btf__resolve_size(d->btf, elem_type_id);
  2086. elem_type = skip_mods_and_typedefs(d->btf, elem_type_id, NULL);
  2087. ischar = btf_is_int(elem_type) && elem_size == 1;
  2088. /* check all elements; if _any_ element is nonzero, all
  2089. * of array is displayed. We make an exception however
  2090. * for char arrays where the first element is 0; these
  2091. * are considered zeroed also, even if later elements are
  2092. * non-zero because the string is terminated.
  2093. */
  2094. for (i = 0; i < array->nelems; i++) {
  2095. if (i == 0 && ischar && *(char *)data == 0)
  2096. return -ENODATA;
  2097. err = btf_dump_type_data_check_zero(d, elem_type,
  2098. elem_type_id,
  2099. data +
  2100. (i * elem_size),
  2101. bits_offset, 0);
  2102. if (err != -ENODATA)
  2103. return err;
  2104. }
  2105. return -ENODATA;
  2106. }
  2107. case BTF_KIND_STRUCT:
  2108. case BTF_KIND_UNION: {
  2109. const struct btf_member *m = btf_members(t);
  2110. __u16 n = btf_vlen(t);
  2111. /* if any struct/union member is non-zero, the struct/union
  2112. * is considered non-zero and dumped.
  2113. */
  2114. for (i = 0; i < n; i++, m++) {
  2115. const struct btf_type *mtype;
  2116. __u32 moffset;
  2117. mtype = btf__type_by_id(d->btf, m->type);
  2118. moffset = btf_member_bit_offset(t, i);
  2119. /* btf_int_bits() does not store member bitfield size;
  2120. * bitfield size needs to be stored here so int display
  2121. * of member can retrieve it.
  2122. */
  2123. bit_sz = btf_member_bitfield_size(t, i);
  2124. err = btf_dump_type_data_check_zero(d, mtype, m->type, data + moffset / 8,
  2125. moffset % 8, bit_sz);
  2126. if (err != ENODATA)
  2127. return err;
  2128. }
  2129. return -ENODATA;
  2130. }
  2131. case BTF_KIND_ENUM:
  2132. case BTF_KIND_ENUM64:
  2133. err = btf_dump_get_enum_value(d, t, data, id, &value);
  2134. if (err)
  2135. return err;
  2136. if (value == 0)
  2137. return -ENODATA;
  2138. return 0;
  2139. default:
  2140. return 0;
  2141. }
  2142. }
  2143. /* returns size of data dumped, or error. */
  2144. static int btf_dump_dump_type_data(struct btf_dump *d,
  2145. const char *fname,
  2146. const struct btf_type *t,
  2147. __u32 id,
  2148. const void *data,
  2149. __u8 bits_offset,
  2150. __u8 bit_sz)
  2151. {
  2152. int size, err = 0;
  2153. size = btf_dump_type_data_check_overflow(d, t, id, data, bits_offset, bit_sz);
  2154. if (size < 0)
  2155. return size;
  2156. err = btf_dump_type_data_check_zero(d, t, id, data, bits_offset, bit_sz);
  2157. if (err) {
  2158. /* zeroed data is expected and not an error, so simply skip
  2159. * dumping such data. Record other errors however.
  2160. */
  2161. if (err == -ENODATA)
  2162. return size;
  2163. return err;
  2164. }
  2165. btf_dump_data_pfx(d);
  2166. if (!d->typed_dump->skip_names) {
  2167. if (fname && strlen(fname) > 0)
  2168. btf_dump_printf(d, ".%s = ", fname);
  2169. btf_dump_emit_type_cast(d, id, true);
  2170. }
  2171. t = skip_mods_and_typedefs(d->btf, id, NULL);
  2172. switch (btf_kind(t)) {
  2173. case BTF_KIND_UNKN:
  2174. case BTF_KIND_FWD:
  2175. case BTF_KIND_FUNC:
  2176. case BTF_KIND_FUNC_PROTO:
  2177. case BTF_KIND_DECL_TAG:
  2178. err = btf_dump_unsupported_data(d, t, id);
  2179. break;
  2180. case BTF_KIND_INT:
  2181. if (bit_sz)
  2182. err = btf_dump_bitfield_data(d, t, data, bits_offset, bit_sz);
  2183. else
  2184. err = btf_dump_int_data(d, t, id, data, bits_offset);
  2185. break;
  2186. case BTF_KIND_FLOAT:
  2187. err = btf_dump_float_data(d, t, id, data);
  2188. break;
  2189. case BTF_KIND_PTR:
  2190. err = btf_dump_ptr_data(d, t, id, data);
  2191. break;
  2192. case BTF_KIND_ARRAY:
  2193. err = btf_dump_array_data(d, t, id, data);
  2194. break;
  2195. case BTF_KIND_STRUCT:
  2196. case BTF_KIND_UNION:
  2197. err = btf_dump_struct_data(d, t, id, data);
  2198. break;
  2199. case BTF_KIND_ENUM:
  2200. case BTF_KIND_ENUM64:
  2201. /* handle bitfield and int enum values */
  2202. if (bit_sz) {
  2203. __u64 print_num;
  2204. __s64 enum_val;
  2205. err = btf_dump_get_bitfield_value(d, t, data, bits_offset, bit_sz,
  2206. &print_num);
  2207. if (err)
  2208. break;
  2209. enum_val = (__s64)print_num;
  2210. err = btf_dump_enum_data(d, t, id, &enum_val);
  2211. } else
  2212. err = btf_dump_enum_data(d, t, id, data);
  2213. break;
  2214. case BTF_KIND_VAR:
  2215. err = btf_dump_var_data(d, t, id, data);
  2216. break;
  2217. case BTF_KIND_DATASEC:
  2218. err = btf_dump_datasec_data(d, t, id, data);
  2219. break;
  2220. default:
  2221. pr_warn("unexpected kind [%u] for id [%u]\n",
  2222. BTF_INFO_KIND(t->info), id);
  2223. return -EINVAL;
  2224. }
  2225. if (err < 0)
  2226. return err;
  2227. return size;
  2228. }
  2229. int btf_dump__dump_type_data(struct btf_dump *d, __u32 id,
  2230. const void *data, size_t data_sz,
  2231. const struct btf_dump_type_data_opts *opts)
  2232. {
  2233. struct btf_dump_data typed_dump = {};
  2234. const struct btf_type *t;
  2235. int ret;
  2236. if (!OPTS_VALID(opts, btf_dump_type_data_opts))
  2237. return libbpf_err(-EINVAL);
  2238. t = btf__type_by_id(d->btf, id);
  2239. if (!t)
  2240. return libbpf_err(-ENOENT);
  2241. d->typed_dump = &typed_dump;
  2242. d->typed_dump->data_end = data + data_sz;
  2243. d->typed_dump->indent_lvl = OPTS_GET(opts, indent_level, 0);
  2244. /* default indent string is a tab */
  2245. if (!OPTS_GET(opts, indent_str, NULL))
  2246. d->typed_dump->indent_str[0] = '\t';
  2247. else
  2248. libbpf_strlcpy(d->typed_dump->indent_str, opts->indent_str,
  2249. sizeof(d->typed_dump->indent_str));
  2250. d->typed_dump->compact = OPTS_GET(opts, compact, false);
  2251. d->typed_dump->skip_names = OPTS_GET(opts, skip_names, false);
  2252. d->typed_dump->emit_zeroes = OPTS_GET(opts, emit_zeroes, false);
  2253. ret = btf_dump_dump_type_data(d, NULL, t, id, data, 0, 0);
  2254. d->typed_dump = NULL;
  2255. return libbpf_err(ret);
  2256. }