static_call_inline.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/init.h>
  3. #include <linux/static_call.h>
  4. #include <linux/bug.h>
  5. #include <linux/smp.h>
  6. #include <linux/sort.h>
  7. #include <linux/slab.h>
  8. #include <linux/module.h>
  9. #include <linux/cpu.h>
  10. #include <linux/processor.h>
  11. #include <asm/sections.h>
  12. extern struct static_call_site __start_static_call_sites[],
  13. __stop_static_call_sites[];
  14. extern struct static_call_tramp_key __start_static_call_tramp_key[],
  15. __stop_static_call_tramp_key[];
  16. int static_call_initialized;
  17. /*
  18. * Must be called before early_initcall() to be effective.
  19. */
  20. void static_call_force_reinit(void)
  21. {
  22. if (WARN_ON_ONCE(!static_call_initialized))
  23. return;
  24. static_call_initialized++;
  25. }
  26. /* mutex to protect key modules/sites */
  27. static DEFINE_MUTEX(static_call_mutex);
  28. static void static_call_lock(void)
  29. {
  30. mutex_lock(&static_call_mutex);
  31. }
  32. static void static_call_unlock(void)
  33. {
  34. mutex_unlock(&static_call_mutex);
  35. }
  36. static inline void *static_call_addr(struct static_call_site *site)
  37. {
  38. return (void *)((long)site->addr + (long)&site->addr);
  39. }
  40. static inline unsigned long __static_call_key(const struct static_call_site *site)
  41. {
  42. return (long)site->key + (long)&site->key;
  43. }
  44. static inline struct static_call_key *static_call_key(const struct static_call_site *site)
  45. {
  46. return (void *)(__static_call_key(site) & ~STATIC_CALL_SITE_FLAGS);
  47. }
  48. /* These assume the key is word-aligned. */
  49. static inline bool static_call_is_init(struct static_call_site *site)
  50. {
  51. return __static_call_key(site) & STATIC_CALL_SITE_INIT;
  52. }
  53. static inline bool static_call_is_tail(struct static_call_site *site)
  54. {
  55. return __static_call_key(site) & STATIC_CALL_SITE_TAIL;
  56. }
  57. static inline void static_call_set_init(struct static_call_site *site)
  58. {
  59. site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) -
  60. (long)&site->key;
  61. }
  62. static int static_call_site_cmp(const void *_a, const void *_b)
  63. {
  64. const struct static_call_site *a = _a;
  65. const struct static_call_site *b = _b;
  66. const struct static_call_key *key_a = static_call_key(a);
  67. const struct static_call_key *key_b = static_call_key(b);
  68. if (key_a < key_b)
  69. return -1;
  70. if (key_a > key_b)
  71. return 1;
  72. return 0;
  73. }
  74. static void static_call_site_swap(void *_a, void *_b, int size)
  75. {
  76. long delta = (unsigned long)_a - (unsigned long)_b;
  77. struct static_call_site *a = _a;
  78. struct static_call_site *b = _b;
  79. struct static_call_site tmp = *a;
  80. a->addr = b->addr - delta;
  81. a->key = b->key - delta;
  82. b->addr = tmp.addr + delta;
  83. b->key = tmp.key + delta;
  84. }
  85. static inline void static_call_sort_entries(struct static_call_site *start,
  86. struct static_call_site *stop)
  87. {
  88. sort(start, stop - start, sizeof(struct static_call_site),
  89. static_call_site_cmp, static_call_site_swap);
  90. }
  91. static inline bool static_call_key_has_mods(struct static_call_key *key)
  92. {
  93. return !(key->type & 1);
  94. }
  95. static inline struct static_call_mod *static_call_key_next(struct static_call_key *key)
  96. {
  97. if (!static_call_key_has_mods(key))
  98. return NULL;
  99. return key->mods;
  100. }
  101. static inline struct static_call_site *static_call_key_sites(struct static_call_key *key)
  102. {
  103. if (static_call_key_has_mods(key))
  104. return NULL;
  105. return (struct static_call_site *)(key->type & ~1);
  106. }
  107. void __static_call_update(struct static_call_key *key, void *tramp, void *func)
  108. {
  109. struct static_call_site *site, *stop;
  110. struct static_call_mod *site_mod, first;
  111. cpus_read_lock();
  112. static_call_lock();
  113. if (key->func == func)
  114. goto done;
  115. key->func = func;
  116. arch_static_call_transform(NULL, tramp, func, false);
  117. /*
  118. * If uninitialized, we'll not update the callsites, but they still
  119. * point to the trampoline and we just patched that.
  120. */
  121. if (WARN_ON_ONCE(!static_call_initialized))
  122. goto done;
  123. first = (struct static_call_mod){
  124. .next = static_call_key_next(key),
  125. .mod = NULL,
  126. .sites = static_call_key_sites(key),
  127. };
  128. for (site_mod = &first; site_mod; site_mod = site_mod->next) {
  129. bool init = system_state < SYSTEM_RUNNING;
  130. struct module *mod = site_mod->mod;
  131. if (!site_mod->sites) {
  132. /*
  133. * This can happen if the static call key is defined in
  134. * a module which doesn't use it.
  135. *
  136. * It also happens in the has_mods case, where the
  137. * 'first' entry has no sites associated with it.
  138. */
  139. continue;
  140. }
  141. stop = __stop_static_call_sites;
  142. if (mod) {
  143. #ifdef CONFIG_MODULES
  144. stop = mod->static_call_sites +
  145. mod->num_static_call_sites;
  146. init = mod->state == MODULE_STATE_COMING;
  147. #endif
  148. }
  149. for (site = site_mod->sites;
  150. site < stop && static_call_key(site) == key; site++) {
  151. void *site_addr = static_call_addr(site);
  152. if (!init && static_call_is_init(site))
  153. continue;
  154. if (!kernel_text_address((unsigned long)site_addr)) {
  155. /*
  156. * This skips patching built-in __exit, which
  157. * is part of init_section_contains() but is
  158. * not part of kernel_text_address().
  159. *
  160. * Skipping built-in __exit is fine since it
  161. * will never be executed.
  162. */
  163. WARN_ONCE(!static_call_is_init(site),
  164. "can't patch static call site at %pS",
  165. site_addr);
  166. continue;
  167. }
  168. arch_static_call_transform(site_addr, NULL, func,
  169. static_call_is_tail(site));
  170. }
  171. }
  172. done:
  173. static_call_unlock();
  174. cpus_read_unlock();
  175. }
  176. EXPORT_SYMBOL_GPL(__static_call_update);
  177. static int __static_call_init(struct module *mod,
  178. struct static_call_site *start,
  179. struct static_call_site *stop)
  180. {
  181. struct static_call_site *site;
  182. struct static_call_key *key, *prev_key = NULL;
  183. struct static_call_mod *site_mod;
  184. if (start == stop)
  185. return 0;
  186. static_call_sort_entries(start, stop);
  187. for (site = start; site < stop; site++) {
  188. void *site_addr = static_call_addr(site);
  189. if ((mod && within_module_init((unsigned long)site_addr, mod)) ||
  190. (!mod && init_section_contains(site_addr, 1)))
  191. static_call_set_init(site);
  192. key = static_call_key(site);
  193. if (key != prev_key) {
  194. prev_key = key;
  195. /*
  196. * For vmlinux (!mod) avoid the allocation by storing
  197. * the sites pointer in the key itself. Also see
  198. * __static_call_update()'s @first.
  199. *
  200. * This allows architectures (eg. x86) to call
  201. * static_call_init() before memory allocation works.
  202. */
  203. if (!mod) {
  204. key->sites = site;
  205. key->type |= 1;
  206. goto do_transform;
  207. }
  208. site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
  209. if (!site_mod)
  210. return -ENOMEM;
  211. /*
  212. * When the key has a direct sites pointer, extract
  213. * that into an explicit struct static_call_mod, so we
  214. * can have a list of modules.
  215. */
  216. if (static_call_key_sites(key)) {
  217. site_mod->mod = NULL;
  218. site_mod->next = NULL;
  219. site_mod->sites = static_call_key_sites(key);
  220. key->mods = site_mod;
  221. site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
  222. if (!site_mod)
  223. return -ENOMEM;
  224. }
  225. site_mod->mod = mod;
  226. site_mod->sites = site;
  227. site_mod->next = static_call_key_next(key);
  228. key->mods = site_mod;
  229. }
  230. do_transform:
  231. arch_static_call_transform(site_addr, NULL, key->func,
  232. static_call_is_tail(site));
  233. }
  234. return 0;
  235. }
  236. static int addr_conflict(struct static_call_site *site, void *start, void *end)
  237. {
  238. unsigned long addr = (unsigned long)static_call_addr(site);
  239. if (addr <= (unsigned long)end &&
  240. addr + CALL_INSN_SIZE > (unsigned long)start)
  241. return 1;
  242. return 0;
  243. }
  244. static int __static_call_text_reserved(struct static_call_site *iter_start,
  245. struct static_call_site *iter_stop,
  246. void *start, void *end, bool init)
  247. {
  248. struct static_call_site *iter = iter_start;
  249. while (iter < iter_stop) {
  250. if (init || !static_call_is_init(iter)) {
  251. if (addr_conflict(iter, start, end))
  252. return 1;
  253. }
  254. iter++;
  255. }
  256. return 0;
  257. }
  258. #ifdef CONFIG_MODULES
  259. static int __static_call_mod_text_reserved(void *start, void *end)
  260. {
  261. struct module *mod;
  262. int ret;
  263. preempt_disable();
  264. mod = __module_text_address((unsigned long)start);
  265. WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
  266. if (!try_module_get(mod))
  267. mod = NULL;
  268. preempt_enable();
  269. if (!mod)
  270. return 0;
  271. ret = __static_call_text_reserved(mod->static_call_sites,
  272. mod->static_call_sites + mod->num_static_call_sites,
  273. start, end, mod->state == MODULE_STATE_COMING);
  274. module_put(mod);
  275. return ret;
  276. }
  277. static unsigned long tramp_key_lookup(unsigned long addr)
  278. {
  279. struct static_call_tramp_key *start = __start_static_call_tramp_key;
  280. struct static_call_tramp_key *stop = __stop_static_call_tramp_key;
  281. struct static_call_tramp_key *tramp_key;
  282. for (tramp_key = start; tramp_key != stop; tramp_key++) {
  283. unsigned long tramp;
  284. tramp = (long)tramp_key->tramp + (long)&tramp_key->tramp;
  285. if (tramp == addr)
  286. return (long)tramp_key->key + (long)&tramp_key->key;
  287. }
  288. return 0;
  289. }
  290. static int static_call_add_module(struct module *mod)
  291. {
  292. struct static_call_site *start = mod->static_call_sites;
  293. struct static_call_site *stop = start + mod->num_static_call_sites;
  294. struct static_call_site *site;
  295. for (site = start; site != stop; site++) {
  296. unsigned long s_key = __static_call_key(site);
  297. unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS;
  298. unsigned long key;
  299. /*
  300. * Is the key is exported, 'addr' points to the key, which
  301. * means modules are allowed to call static_call_update() on
  302. * it.
  303. *
  304. * Otherwise, the key isn't exported, and 'addr' points to the
  305. * trampoline so we need to lookup the key.
  306. *
  307. * We go through this dance to prevent crazy modules from
  308. * abusing sensitive static calls.
  309. */
  310. if (!kernel_text_address(addr))
  311. continue;
  312. key = tramp_key_lookup(addr);
  313. if (!key) {
  314. pr_warn("Failed to fixup __raw_static_call() usage at: %ps\n",
  315. static_call_addr(site));
  316. return -EINVAL;
  317. }
  318. key |= s_key & STATIC_CALL_SITE_FLAGS;
  319. site->key = key - (long)&site->key;
  320. }
  321. return __static_call_init(mod, start, stop);
  322. }
  323. static void static_call_del_module(struct module *mod)
  324. {
  325. struct static_call_site *start = mod->static_call_sites;
  326. struct static_call_site *stop = mod->static_call_sites +
  327. mod->num_static_call_sites;
  328. struct static_call_key *key, *prev_key = NULL;
  329. struct static_call_mod *site_mod, **prev;
  330. struct static_call_site *site;
  331. for (site = start; site < stop; site++) {
  332. key = static_call_key(site);
  333. /*
  334. * If the key was not updated due to a memory allocation
  335. * failure in __static_call_init() then treating key::sites
  336. * as key::mods in the code below would cause random memory
  337. * access and #GP. In that case all subsequent sites have
  338. * not been touched either, so stop iterating.
  339. */
  340. if (!static_call_key_has_mods(key))
  341. break;
  342. if (key == prev_key)
  343. continue;
  344. prev_key = key;
  345. for (prev = &key->mods, site_mod = key->mods;
  346. site_mod && site_mod->mod != mod;
  347. prev = &site_mod->next, site_mod = site_mod->next)
  348. ;
  349. if (!site_mod)
  350. continue;
  351. *prev = site_mod->next;
  352. kfree(site_mod);
  353. }
  354. }
  355. static int static_call_module_notify(struct notifier_block *nb,
  356. unsigned long val, void *data)
  357. {
  358. struct module *mod = data;
  359. int ret = 0;
  360. cpus_read_lock();
  361. static_call_lock();
  362. switch (val) {
  363. case MODULE_STATE_COMING:
  364. ret = static_call_add_module(mod);
  365. if (ret) {
  366. pr_warn("Failed to allocate memory for static calls\n");
  367. static_call_del_module(mod);
  368. }
  369. break;
  370. case MODULE_STATE_GOING:
  371. static_call_del_module(mod);
  372. break;
  373. }
  374. static_call_unlock();
  375. cpus_read_unlock();
  376. return notifier_from_errno(ret);
  377. }
  378. static struct notifier_block static_call_module_nb = {
  379. .notifier_call = static_call_module_notify,
  380. };
  381. #else
  382. static inline int __static_call_mod_text_reserved(void *start, void *end)
  383. {
  384. return 0;
  385. }
  386. #endif /* CONFIG_MODULES */
  387. int static_call_text_reserved(void *start, void *end)
  388. {
  389. bool init = system_state < SYSTEM_RUNNING;
  390. int ret = __static_call_text_reserved(__start_static_call_sites,
  391. __stop_static_call_sites, start, end, init);
  392. if (ret)
  393. return ret;
  394. return __static_call_mod_text_reserved(start, end);
  395. }
  396. int __init static_call_init(void)
  397. {
  398. int ret;
  399. /* See static_call_force_reinit(). */
  400. if (static_call_initialized == 1)
  401. return 0;
  402. cpus_read_lock();
  403. static_call_lock();
  404. ret = __static_call_init(NULL, __start_static_call_sites,
  405. __stop_static_call_sites);
  406. static_call_unlock();
  407. cpus_read_unlock();
  408. if (ret) {
  409. pr_err("Failed to allocate memory for static_call!\n");
  410. BUG();
  411. }
  412. #ifdef CONFIG_MODULES
  413. if (!static_call_initialized)
  414. register_module_notifier(&static_call_module_nb);
  415. #endif
  416. static_call_initialized = 1;
  417. return 0;
  418. }
  419. early_initcall(static_call_init);
  420. #ifdef CONFIG_STATIC_CALL_SELFTEST
  421. static int func_a(int x)
  422. {
  423. return x+1;
  424. }
  425. static int func_b(int x)
  426. {
  427. return x+2;
  428. }
  429. DEFINE_STATIC_CALL(sc_selftest, func_a);
  430. static struct static_call_data {
  431. int (*func)(int);
  432. int val;
  433. int expect;
  434. } static_call_data [] __initdata = {
  435. { NULL, 2, 3 },
  436. { func_b, 2, 4 },
  437. { func_a, 2, 3 }
  438. };
  439. static int __init test_static_call_init(void)
  440. {
  441. int i;
  442. for (i = 0; i < ARRAY_SIZE(static_call_data); i++ ) {
  443. struct static_call_data *scd = &static_call_data[i];
  444. if (scd->func)
  445. static_call_update(sc_selftest, scd->func);
  446. WARN_ON(static_call(sc_selftest)(scd->val) != scd->expect);
  447. }
  448. return 0;
  449. }
  450. early_initcall(test_static_call_init);
  451. #endif /* CONFIG_STATIC_CALL_SELFTEST */