kasan_test_c.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Copyright (c) 2014 Samsung Electronics Co., Ltd.
  5. * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
  6. */
  7. #define pr_fmt(fmt) "kasan: test: " fmt
  8. #include <kunit/test.h>
  9. #include <linux/bitops.h>
  10. #include <linux/delay.h>
  11. #include <linux/io.h>
  12. #include <linux/kasan.h>
  13. #include <linux/kernel.h>
  14. #include <linux/mempool.h>
  15. #include <linux/mm.h>
  16. #include <linux/mman.h>
  17. #include <linux/module.h>
  18. #include <linux/printk.h>
  19. #include <linux/random.h>
  20. #include <linux/set_memory.h>
  21. #include <linux/slab.h>
  22. #include <linux/string.h>
  23. #include <linux/tracepoint.h>
  24. #include <linux/uaccess.h>
  25. #include <linux/vmalloc.h>
  26. #include <trace/events/printk.h>
  27. #include <asm/page.h>
  28. #include "kasan.h"
  29. #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
  30. static bool multishot;
  31. /* Fields set based on lines observed in the console. */
  32. static struct {
  33. bool report_found;
  34. bool async_fault;
  35. } test_status;
  36. /*
  37. * Some tests use these global variables to store return values from function
  38. * calls that could otherwise be eliminated by the compiler as dead code.
  39. */
  40. void *kasan_ptr_result;
  41. int kasan_int_result;
  42. /* Probe for console output: obtains test_status lines of interest. */
  43. static void probe_console(void *ignore, const char *buf, size_t len)
  44. {
  45. if (strnstr(buf, "BUG: KASAN: ", len))
  46. WRITE_ONCE(test_status.report_found, true);
  47. else if (strnstr(buf, "Asynchronous fault: ", len))
  48. WRITE_ONCE(test_status.async_fault, true);
  49. }
  50. static int kasan_suite_init(struct kunit_suite *suite)
  51. {
  52. if (!kasan_enabled()) {
  53. pr_err("Can't run KASAN tests with KASAN disabled");
  54. return -1;
  55. }
  56. /* Stop failing KUnit tests on KASAN reports. */
  57. kasan_kunit_test_suite_start();
  58. /*
  59. * Temporarily enable multi-shot mode. Otherwise, KASAN would only
  60. * report the first detected bug and panic the kernel if panic_on_warn
  61. * is enabled.
  62. */
  63. multishot = kasan_save_enable_multi_shot();
  64. register_trace_console(probe_console, NULL);
  65. return 0;
  66. }
  67. static void kasan_suite_exit(struct kunit_suite *suite)
  68. {
  69. kasan_kunit_test_suite_end();
  70. kasan_restore_multi_shot(multishot);
  71. unregister_trace_console(probe_console, NULL);
  72. tracepoint_synchronize_unregister();
  73. }
  74. static void kasan_test_exit(struct kunit *test)
  75. {
  76. KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found));
  77. }
  78. /**
  79. * KUNIT_EXPECT_KASAN_FAIL - check that the executed expression produces a
  80. * KASAN report; causes a KUnit test failure otherwise.
  81. *
  82. * @test: Currently executing KUnit test.
  83. * @expression: Expression that must produce a KASAN report.
  84. *
  85. * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
  86. * checking is auto-disabled. When this happens, this test handler reenables
  87. * tag checking. As tag checking can be only disabled or enabled per CPU,
  88. * this handler disables migration (preemption).
  89. *
  90. * Since the compiler doesn't see that the expression can change the test_status
  91. * fields, it can reorder or optimize away the accesses to those fields.
  92. * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
  93. * expression to prevent that.
  94. *
  95. * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept
  96. * as false. This allows detecting KASAN reports that happen outside of the
  97. * checks by asserting !test_status.report_found at the start of
  98. * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit.
  99. */
  100. #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
  101. if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
  102. kasan_sync_fault_possible()) \
  103. migrate_disable(); \
  104. KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found)); \
  105. barrier(); \
  106. expression; \
  107. barrier(); \
  108. if (kasan_async_fault_possible()) \
  109. kasan_force_async_fault(); \
  110. if (!READ_ONCE(test_status.report_found)) { \
  111. KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \
  112. "expected in \"" #expression \
  113. "\", but none occurred"); \
  114. } \
  115. if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
  116. kasan_sync_fault_possible()) { \
  117. if (READ_ONCE(test_status.report_found) && \
  118. !READ_ONCE(test_status.async_fault)) \
  119. kasan_enable_hw_tags(); \
  120. migrate_enable(); \
  121. } \
  122. WRITE_ONCE(test_status.report_found, false); \
  123. WRITE_ONCE(test_status.async_fault, false); \
  124. } while (0)
  125. #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
  126. if (!IS_ENABLED(config)) \
  127. kunit_skip((test), "Test requires " #config "=y"); \
  128. } while (0)
  129. #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
  130. if (IS_ENABLED(config)) \
  131. kunit_skip((test), "Test requires " #config "=n"); \
  132. } while (0)
  133. #define KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test) do { \
  134. if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \
  135. break; /* No compiler instrumentation. */ \
  136. if (IS_ENABLED(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX)) \
  137. break; /* Should always be instrumented! */ \
  138. if (IS_ENABLED(CONFIG_GENERIC_ENTRY)) \
  139. kunit_skip((test), "Test requires checked mem*()"); \
  140. } while (0)
  141. static void kmalloc_oob_right(struct kunit *test)
  142. {
  143. char *ptr;
  144. size_t size = 128 - KASAN_GRANULE_SIZE - 5;
  145. ptr = kmalloc(size, GFP_KERNEL);
  146. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  147. OPTIMIZER_HIDE_VAR(ptr);
  148. /*
  149. * An unaligned access past the requested kmalloc size.
  150. * Only generic KASAN can precisely detect these.
  151. */
  152. if (IS_ENABLED(CONFIG_KASAN_GENERIC))
  153. KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x');
  154. /*
  155. * An aligned access into the first out-of-bounds granule that falls
  156. * within the aligned kmalloc object.
  157. */
  158. KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
  159. /* Out-of-bounds access past the aligned kmalloc object. */
  160. KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
  161. ptr[size + KASAN_GRANULE_SIZE + 5]);
  162. kfree(ptr);
  163. }
  164. static void kmalloc_oob_left(struct kunit *test)
  165. {
  166. char *ptr;
  167. size_t size = 15;
  168. ptr = kmalloc(size, GFP_KERNEL);
  169. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  170. OPTIMIZER_HIDE_VAR(ptr);
  171. KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
  172. kfree(ptr);
  173. }
  174. static void kmalloc_node_oob_right(struct kunit *test)
  175. {
  176. char *ptr;
  177. size_t size = 4096;
  178. ptr = kmalloc_node(size, GFP_KERNEL, 0);
  179. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  180. OPTIMIZER_HIDE_VAR(ptr);
  181. KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
  182. kfree(ptr);
  183. }
  184. /*
  185. * Check that KASAN detects an out-of-bounds access for a big object allocated
  186. * via kmalloc(). But not as big as to trigger the page_alloc fallback.
  187. */
  188. static void kmalloc_big_oob_right(struct kunit *test)
  189. {
  190. char *ptr;
  191. size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
  192. ptr = kmalloc(size, GFP_KERNEL);
  193. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  194. OPTIMIZER_HIDE_VAR(ptr);
  195. KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
  196. kfree(ptr);
  197. }
  198. /*
  199. * The kmalloc_large_* tests below use kmalloc() to allocate a memory chunk
  200. * that does not fit into the largest slab cache and therefore is allocated via
  201. * the page_alloc fallback.
  202. */
  203. static void kmalloc_large_oob_right(struct kunit *test)
  204. {
  205. char *ptr;
  206. size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
  207. ptr = kmalloc(size, GFP_KERNEL);
  208. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  209. OPTIMIZER_HIDE_VAR(ptr);
  210. KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
  211. kfree(ptr);
  212. }
  213. static void kmalloc_large_uaf(struct kunit *test)
  214. {
  215. char *ptr;
  216. size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
  217. ptr = kmalloc(size, GFP_KERNEL);
  218. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  219. kfree(ptr);
  220. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
  221. }
  222. static void kmalloc_large_invalid_free(struct kunit *test)
  223. {
  224. char *ptr;
  225. size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
  226. ptr = kmalloc(size, GFP_KERNEL);
  227. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  228. KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
  229. }
  230. static void page_alloc_oob_right(struct kunit *test)
  231. {
  232. char *ptr;
  233. struct page *pages;
  234. size_t order = 4;
  235. size_t size = (1UL << (PAGE_SHIFT + order));
  236. /*
  237. * With generic KASAN page allocations have no redzones, thus
  238. * out-of-bounds detection is not guaranteed.
  239. * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
  240. */
  241. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
  242. pages = alloc_pages(GFP_KERNEL, order);
  243. ptr = page_address(pages);
  244. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  245. KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
  246. free_pages((unsigned long)ptr, order);
  247. }
  248. static void page_alloc_uaf(struct kunit *test)
  249. {
  250. char *ptr;
  251. struct page *pages;
  252. size_t order = 4;
  253. pages = alloc_pages(GFP_KERNEL, order);
  254. ptr = page_address(pages);
  255. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  256. free_pages((unsigned long)ptr, order);
  257. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
  258. }
  259. static void krealloc_more_oob_helper(struct kunit *test,
  260. size_t size1, size_t size2)
  261. {
  262. char *ptr1, *ptr2;
  263. size_t middle;
  264. KUNIT_ASSERT_LT(test, size1, size2);
  265. middle = size1 + (size2 - size1) / 2;
  266. ptr1 = kmalloc(size1, GFP_KERNEL);
  267. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
  268. ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
  269. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
  270. /* Suppress -Warray-bounds warnings. */
  271. OPTIMIZER_HIDE_VAR(ptr2);
  272. /* All offsets up to size2 must be accessible. */
  273. ptr2[size1 - 1] = 'x';
  274. ptr2[size1] = 'x';
  275. ptr2[middle] = 'x';
  276. ptr2[size2 - 1] = 'x';
  277. /* Generic mode is precise, so unaligned size2 must be inaccessible. */
  278. if (IS_ENABLED(CONFIG_KASAN_GENERIC))
  279. KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
  280. /* For all modes first aligned offset after size2 must be inaccessible. */
  281. KUNIT_EXPECT_KASAN_FAIL(test,
  282. ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
  283. kfree(ptr2);
  284. }
  285. static void krealloc_less_oob_helper(struct kunit *test,
  286. size_t size1, size_t size2)
  287. {
  288. char *ptr1, *ptr2;
  289. size_t middle;
  290. KUNIT_ASSERT_LT(test, size2, size1);
  291. middle = size2 + (size1 - size2) / 2;
  292. ptr1 = kmalloc(size1, GFP_KERNEL);
  293. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
  294. ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
  295. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
  296. /* Suppress -Warray-bounds warnings. */
  297. OPTIMIZER_HIDE_VAR(ptr2);
  298. /* Must be accessible for all modes. */
  299. ptr2[size2 - 1] = 'x';
  300. /* Generic mode is precise, so unaligned size2 must be inaccessible. */
  301. if (IS_ENABLED(CONFIG_KASAN_GENERIC))
  302. KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
  303. /* For all modes first aligned offset after size2 must be inaccessible. */
  304. KUNIT_EXPECT_KASAN_FAIL(test,
  305. ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
  306. /*
  307. * For all modes all size2, middle, and size1 should land in separate
  308. * granules and thus the latter two offsets should be inaccessible.
  309. */
  310. KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
  311. round_down(middle, KASAN_GRANULE_SIZE));
  312. KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
  313. round_down(size1, KASAN_GRANULE_SIZE));
  314. KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
  315. KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
  316. KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
  317. kfree(ptr2);
  318. }
  319. static void krealloc_more_oob(struct kunit *test)
  320. {
  321. krealloc_more_oob_helper(test, 201, 235);
  322. }
  323. static void krealloc_less_oob(struct kunit *test)
  324. {
  325. krealloc_less_oob_helper(test, 235, 201);
  326. }
  327. static void krealloc_large_more_oob(struct kunit *test)
  328. {
  329. krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
  330. KMALLOC_MAX_CACHE_SIZE + 235);
  331. }
  332. static void krealloc_large_less_oob(struct kunit *test)
  333. {
  334. krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
  335. KMALLOC_MAX_CACHE_SIZE + 201);
  336. }
  337. /*
  338. * Check that krealloc() detects a use-after-free, returns NULL,
  339. * and doesn't unpoison the freed object.
  340. */
  341. static void krealloc_uaf(struct kunit *test)
  342. {
  343. char *ptr1, *ptr2;
  344. int size1 = 201;
  345. int size2 = 235;
  346. ptr1 = kmalloc(size1, GFP_KERNEL);
  347. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
  348. kfree(ptr1);
  349. KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
  350. KUNIT_ASSERT_NULL(test, ptr2);
  351. KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
  352. }
  353. static void kmalloc_oob_16(struct kunit *test)
  354. {
  355. struct {
  356. u64 words[2];
  357. } *ptr1, *ptr2;
  358. KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
  359. /* This test is specifically crafted for the generic mode. */
  360. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
  361. /* RELOC_HIDE to prevent gcc from warning about short alloc */
  362. ptr1 = RELOC_HIDE(kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL), 0);
  363. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
  364. ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
  365. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
  366. OPTIMIZER_HIDE_VAR(ptr1);
  367. OPTIMIZER_HIDE_VAR(ptr2);
  368. KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
  369. kfree(ptr1);
  370. kfree(ptr2);
  371. }
  372. static void kmalloc_uaf_16(struct kunit *test)
  373. {
  374. struct {
  375. u64 words[2];
  376. } *ptr1, *ptr2;
  377. KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
  378. ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
  379. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
  380. ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
  381. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
  382. kfree(ptr2);
  383. KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
  384. kfree(ptr1);
  385. }
  386. /*
  387. * Note: in the memset tests below, the written range touches both valid and
  388. * invalid memory. This makes sure that the instrumentation does not only check
  389. * the starting address but the whole range.
  390. */
  391. static void kmalloc_oob_memset_2(struct kunit *test)
  392. {
  393. char *ptr;
  394. size_t size = 128 - KASAN_GRANULE_SIZE;
  395. size_t memset_size = 2;
  396. KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
  397. ptr = kmalloc(size, GFP_KERNEL);
  398. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  399. OPTIMIZER_HIDE_VAR(ptr);
  400. OPTIMIZER_HIDE_VAR(size);
  401. OPTIMIZER_HIDE_VAR(memset_size);
  402. KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, memset_size));
  403. kfree(ptr);
  404. }
  405. static void kmalloc_oob_memset_4(struct kunit *test)
  406. {
  407. char *ptr;
  408. size_t size = 128 - KASAN_GRANULE_SIZE;
  409. size_t memset_size = 4;
  410. KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
  411. ptr = kmalloc(size, GFP_KERNEL);
  412. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  413. OPTIMIZER_HIDE_VAR(ptr);
  414. OPTIMIZER_HIDE_VAR(size);
  415. OPTIMIZER_HIDE_VAR(memset_size);
  416. KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, memset_size));
  417. kfree(ptr);
  418. }
  419. static void kmalloc_oob_memset_8(struct kunit *test)
  420. {
  421. char *ptr;
  422. size_t size = 128 - KASAN_GRANULE_SIZE;
  423. size_t memset_size = 8;
  424. KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
  425. ptr = kmalloc(size, GFP_KERNEL);
  426. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  427. OPTIMIZER_HIDE_VAR(ptr);
  428. OPTIMIZER_HIDE_VAR(size);
  429. OPTIMIZER_HIDE_VAR(memset_size);
  430. KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, memset_size));
  431. kfree(ptr);
  432. }
  433. static void kmalloc_oob_memset_16(struct kunit *test)
  434. {
  435. char *ptr;
  436. size_t size = 128 - KASAN_GRANULE_SIZE;
  437. size_t memset_size = 16;
  438. KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
  439. ptr = kmalloc(size, GFP_KERNEL);
  440. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  441. OPTIMIZER_HIDE_VAR(ptr);
  442. OPTIMIZER_HIDE_VAR(size);
  443. OPTIMIZER_HIDE_VAR(memset_size);
  444. KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, memset_size));
  445. kfree(ptr);
  446. }
  447. static void kmalloc_oob_in_memset(struct kunit *test)
  448. {
  449. char *ptr;
  450. size_t size = 128 - KASAN_GRANULE_SIZE;
  451. KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
  452. ptr = kmalloc(size, GFP_KERNEL);
  453. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  454. OPTIMIZER_HIDE_VAR(ptr);
  455. OPTIMIZER_HIDE_VAR(size);
  456. KUNIT_EXPECT_KASAN_FAIL(test,
  457. memset(ptr, 0, size + KASAN_GRANULE_SIZE));
  458. kfree(ptr);
  459. }
  460. static void kmalloc_memmove_negative_size(struct kunit *test)
  461. {
  462. char *ptr;
  463. size_t size = 64;
  464. size_t invalid_size = -2;
  465. KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
  466. /*
  467. * Hardware tag-based mode doesn't check memmove for negative size.
  468. * As a result, this test introduces a side-effect memory corruption,
  469. * which can result in a crash.
  470. */
  471. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
  472. ptr = kmalloc(size, GFP_KERNEL);
  473. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  474. memset((char *)ptr, 0, 64);
  475. OPTIMIZER_HIDE_VAR(ptr);
  476. OPTIMIZER_HIDE_VAR(invalid_size);
  477. KUNIT_EXPECT_KASAN_FAIL(test,
  478. memmove((char *)ptr, (char *)ptr + 4, invalid_size));
  479. kfree(ptr);
  480. }
  481. static void kmalloc_memmove_invalid_size(struct kunit *test)
  482. {
  483. char *ptr;
  484. size_t size = 64;
  485. size_t invalid_size = size;
  486. KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
  487. ptr = kmalloc(size, GFP_KERNEL);
  488. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  489. memset((char *)ptr, 0, 64);
  490. OPTIMIZER_HIDE_VAR(ptr);
  491. OPTIMIZER_HIDE_VAR(invalid_size);
  492. KUNIT_EXPECT_KASAN_FAIL(test,
  493. memmove((char *)ptr, (char *)ptr + 4, invalid_size));
  494. kfree(ptr);
  495. }
  496. static void kmalloc_uaf(struct kunit *test)
  497. {
  498. char *ptr;
  499. size_t size = 10;
  500. ptr = kmalloc(size, GFP_KERNEL);
  501. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  502. kfree(ptr);
  503. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
  504. }
  505. static void kmalloc_uaf_memset(struct kunit *test)
  506. {
  507. char *ptr;
  508. size_t size = 33;
  509. KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
  510. /*
  511. * Only generic KASAN uses quarantine, which is required to avoid a
  512. * kernel memory corruption this test causes.
  513. */
  514. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
  515. ptr = kmalloc(size, GFP_KERNEL);
  516. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  517. kfree(ptr);
  518. KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
  519. }
  520. static void kmalloc_uaf2(struct kunit *test)
  521. {
  522. char *ptr1, *ptr2;
  523. size_t size = 43;
  524. int counter = 0;
  525. again:
  526. ptr1 = kmalloc(size, GFP_KERNEL);
  527. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
  528. kfree(ptr1);
  529. ptr2 = kmalloc(size, GFP_KERNEL);
  530. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
  531. /*
  532. * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
  533. * Allow up to 16 attempts at generating different tags.
  534. */
  535. if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
  536. kfree(ptr2);
  537. goto again;
  538. }
  539. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
  540. KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
  541. kfree(ptr2);
  542. }
  543. /*
  544. * Check that KASAN detects use-after-free when another object was allocated in
  545. * the same slot. Relevant for the tag-based modes, which do not use quarantine.
  546. */
  547. static void kmalloc_uaf3(struct kunit *test)
  548. {
  549. char *ptr1, *ptr2;
  550. size_t size = 100;
  551. /* This test is specifically crafted for tag-based modes. */
  552. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
  553. ptr1 = kmalloc(size, GFP_KERNEL);
  554. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
  555. kfree(ptr1);
  556. ptr2 = kmalloc(size, GFP_KERNEL);
  557. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
  558. kfree(ptr2);
  559. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]);
  560. }
  561. static void kasan_atomics_helper(struct kunit *test, void *unsafe, void *safe)
  562. {
  563. int *i_unsafe = unsafe;
  564. KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*i_unsafe));
  565. KUNIT_EXPECT_KASAN_FAIL(test, WRITE_ONCE(*i_unsafe, 42));
  566. KUNIT_EXPECT_KASAN_FAIL(test, smp_load_acquire(i_unsafe));
  567. KUNIT_EXPECT_KASAN_FAIL(test, smp_store_release(i_unsafe, 42));
  568. KUNIT_EXPECT_KASAN_FAIL(test, atomic_read(unsafe));
  569. KUNIT_EXPECT_KASAN_FAIL(test, atomic_set(unsafe, 42));
  570. KUNIT_EXPECT_KASAN_FAIL(test, atomic_add(42, unsafe));
  571. KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub(42, unsafe));
  572. KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc(unsafe));
  573. KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec(unsafe));
  574. KUNIT_EXPECT_KASAN_FAIL(test, atomic_and(42, unsafe));
  575. KUNIT_EXPECT_KASAN_FAIL(test, atomic_andnot(42, unsafe));
  576. KUNIT_EXPECT_KASAN_FAIL(test, atomic_or(42, unsafe));
  577. KUNIT_EXPECT_KASAN_FAIL(test, atomic_xor(42, unsafe));
  578. KUNIT_EXPECT_KASAN_FAIL(test, atomic_xchg(unsafe, 42));
  579. KUNIT_EXPECT_KASAN_FAIL(test, atomic_cmpxchg(unsafe, 21, 42));
  580. KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(unsafe, safe, 42));
  581. KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(safe, unsafe, 42));
  582. KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub_and_test(42, unsafe));
  583. KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_and_test(unsafe));
  584. KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_and_test(unsafe));
  585. KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_negative(42, unsafe));
  586. KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_unless(unsafe, 21, 42));
  587. KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_not_zero(unsafe));
  588. KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_unless_negative(unsafe));
  589. KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_unless_positive(unsafe));
  590. KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_if_positive(unsafe));
  591. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_read(unsafe));
  592. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_set(unsafe, 42));
  593. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add(42, unsafe));
  594. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub(42, unsafe));
  595. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc(unsafe));
  596. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec(unsafe));
  597. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_and(42, unsafe));
  598. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_andnot(42, unsafe));
  599. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_or(42, unsafe));
  600. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xor(42, unsafe));
  601. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xchg(unsafe, 42));
  602. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_cmpxchg(unsafe, 21, 42));
  603. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(unsafe, safe, 42));
  604. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(safe, unsafe, 42));
  605. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub_and_test(42, unsafe));
  606. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_and_test(unsafe));
  607. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_and_test(unsafe));
  608. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_negative(42, unsafe));
  609. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_unless(unsafe, 21, 42));
  610. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_not_zero(unsafe));
  611. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_unless_negative(unsafe));
  612. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_unless_positive(unsafe));
  613. KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_if_positive(unsafe));
  614. }
  615. static void kasan_atomics(struct kunit *test)
  616. {
  617. void *a1, *a2;
  618. /*
  619. * Just as with kasan_bitops_tags(), we allocate 48 bytes of memory such
  620. * that the following 16 bytes will make up the redzone.
  621. */
  622. a1 = kzalloc(48, GFP_KERNEL);
  623. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a1);
  624. a2 = kzalloc(sizeof(atomic_long_t), GFP_KERNEL);
  625. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a2);
  626. /* Use atomics to access the redzone. */
  627. kasan_atomics_helper(test, a1 + 48, a2);
  628. kfree(a1);
  629. kfree(a2);
  630. }
  631. static void kmalloc_double_kzfree(struct kunit *test)
  632. {
  633. char *ptr;
  634. size_t size = 16;
  635. ptr = kmalloc(size, GFP_KERNEL);
  636. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  637. kfree_sensitive(ptr);
  638. KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
  639. }
  640. /* Check that ksize() does NOT unpoison whole object. */
  641. static void ksize_unpoisons_memory(struct kunit *test)
  642. {
  643. char *ptr;
  644. size_t size = 128 - KASAN_GRANULE_SIZE - 5;
  645. size_t real_size;
  646. ptr = kmalloc(size, GFP_KERNEL);
  647. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  648. real_size = ksize(ptr);
  649. KUNIT_EXPECT_GT(test, real_size, size);
  650. OPTIMIZER_HIDE_VAR(ptr);
  651. /* These accesses shouldn't trigger a KASAN report. */
  652. ptr[0] = 'x';
  653. ptr[size - 1] = 'x';
  654. /* These must trigger a KASAN report. */
  655. if (IS_ENABLED(CONFIG_KASAN_GENERIC))
  656. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
  657. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]);
  658. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
  659. kfree(ptr);
  660. }
  661. /*
  662. * Check that a use-after-free is detected by ksize() and via normal accesses
  663. * after it.
  664. */
  665. static void ksize_uaf(struct kunit *test)
  666. {
  667. char *ptr;
  668. int size = 128 - KASAN_GRANULE_SIZE;
  669. ptr = kmalloc(size, GFP_KERNEL);
  670. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  671. kfree(ptr);
  672. OPTIMIZER_HIDE_VAR(ptr);
  673. KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
  674. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
  675. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
  676. }
  677. /*
  678. * The two tests below check that Generic KASAN prints auxiliary stack traces
  679. * for RCU callbacks and workqueues. The reports need to be inspected manually.
  680. *
  681. * These tests are still enabled for other KASAN modes to make sure that all
  682. * modes report bad accesses in tested scenarios.
  683. */
  684. static struct kasan_rcu_info {
  685. int i;
  686. struct rcu_head rcu;
  687. } *global_rcu_ptr;
  688. static void rcu_uaf_reclaim(struct rcu_head *rp)
  689. {
  690. struct kasan_rcu_info *fp =
  691. container_of(rp, struct kasan_rcu_info, rcu);
  692. kfree(fp);
  693. ((volatile struct kasan_rcu_info *)fp)->i;
  694. }
  695. static void rcu_uaf(struct kunit *test)
  696. {
  697. struct kasan_rcu_info *ptr;
  698. ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
  699. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  700. global_rcu_ptr = rcu_dereference_protected(
  701. (struct kasan_rcu_info __rcu *)ptr, NULL);
  702. KUNIT_EXPECT_KASAN_FAIL(test,
  703. call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim);
  704. rcu_barrier());
  705. }
  706. static void workqueue_uaf_work(struct work_struct *work)
  707. {
  708. kfree(work);
  709. }
  710. static void workqueue_uaf(struct kunit *test)
  711. {
  712. struct workqueue_struct *workqueue;
  713. struct work_struct *work;
  714. workqueue = create_workqueue("kasan_workqueue_test");
  715. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue);
  716. work = kmalloc(sizeof(struct work_struct), GFP_KERNEL);
  717. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work);
  718. INIT_WORK(work, workqueue_uaf_work);
  719. queue_work(workqueue, work);
  720. destroy_workqueue(workqueue);
  721. KUNIT_EXPECT_KASAN_FAIL(test,
  722. ((volatile struct work_struct *)work)->data);
  723. }
  724. static void kfree_via_page(struct kunit *test)
  725. {
  726. char *ptr;
  727. size_t size = 8;
  728. struct page *page;
  729. unsigned long offset;
  730. ptr = kmalloc(size, GFP_KERNEL);
  731. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  732. page = virt_to_page(ptr);
  733. offset = offset_in_page(ptr);
  734. kfree(page_address(page) + offset);
  735. }
  736. static void kfree_via_phys(struct kunit *test)
  737. {
  738. char *ptr;
  739. size_t size = 8;
  740. phys_addr_t phys;
  741. ptr = kmalloc(size, GFP_KERNEL);
  742. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  743. phys = virt_to_phys(ptr);
  744. kfree(phys_to_virt(phys));
  745. }
  746. static void kmem_cache_oob(struct kunit *test)
  747. {
  748. char *p;
  749. size_t size = 200;
  750. struct kmem_cache *cache;
  751. cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
  752. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
  753. p = kmem_cache_alloc(cache, GFP_KERNEL);
  754. if (!p) {
  755. kunit_err(test, "Allocation failed: %s\n", __func__);
  756. kmem_cache_destroy(cache);
  757. return;
  758. }
  759. KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
  760. kmem_cache_free(cache, p);
  761. kmem_cache_destroy(cache);
  762. }
  763. static void kmem_cache_double_free(struct kunit *test)
  764. {
  765. char *p;
  766. size_t size = 200;
  767. struct kmem_cache *cache;
  768. cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
  769. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
  770. p = kmem_cache_alloc(cache, GFP_KERNEL);
  771. if (!p) {
  772. kunit_err(test, "Allocation failed: %s\n", __func__);
  773. kmem_cache_destroy(cache);
  774. return;
  775. }
  776. kmem_cache_free(cache, p);
  777. KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
  778. kmem_cache_destroy(cache);
  779. }
  780. static void kmem_cache_invalid_free(struct kunit *test)
  781. {
  782. char *p;
  783. size_t size = 200;
  784. struct kmem_cache *cache;
  785. cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
  786. NULL);
  787. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
  788. p = kmem_cache_alloc(cache, GFP_KERNEL);
  789. if (!p) {
  790. kunit_err(test, "Allocation failed: %s\n", __func__);
  791. kmem_cache_destroy(cache);
  792. return;
  793. }
  794. /* Trigger invalid free, the object doesn't get freed. */
  795. KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
  796. /*
  797. * Properly free the object to prevent the "Objects remaining in
  798. * test_cache on __kmem_cache_shutdown" BUG failure.
  799. */
  800. kmem_cache_free(cache, p);
  801. kmem_cache_destroy(cache);
  802. }
  803. static void kmem_cache_rcu_uaf(struct kunit *test)
  804. {
  805. char *p;
  806. size_t size = 200;
  807. struct kmem_cache *cache;
  808. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB_RCU_DEBUG);
  809. cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
  810. NULL);
  811. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
  812. p = kmem_cache_alloc(cache, GFP_KERNEL);
  813. if (!p) {
  814. kunit_err(test, "Allocation failed: %s\n", __func__);
  815. kmem_cache_destroy(cache);
  816. return;
  817. }
  818. *p = 1;
  819. rcu_read_lock();
  820. /* Free the object - this will internally schedule an RCU callback. */
  821. kmem_cache_free(cache, p);
  822. /*
  823. * We should still be allowed to access the object at this point because
  824. * the cache is SLAB_TYPESAFE_BY_RCU and we've been in an RCU read-side
  825. * critical section since before the kmem_cache_free().
  826. */
  827. READ_ONCE(*p);
  828. rcu_read_unlock();
  829. /*
  830. * Wait for the RCU callback to execute; after this, the object should
  831. * have actually been freed from KASAN's perspective.
  832. */
  833. rcu_barrier();
  834. KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*p));
  835. kmem_cache_destroy(cache);
  836. }
  837. static void empty_cache_ctor(void *object) { }
  838. static void kmem_cache_double_destroy(struct kunit *test)
  839. {
  840. struct kmem_cache *cache;
  841. /* Provide a constructor to prevent cache merging. */
  842. cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
  843. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
  844. kmem_cache_destroy(cache);
  845. KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
  846. }
  847. static void kmem_cache_accounted(struct kunit *test)
  848. {
  849. int i;
  850. char *p;
  851. size_t size = 200;
  852. struct kmem_cache *cache;
  853. cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
  854. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
  855. /*
  856. * Several allocations with a delay to allow for lazy per memcg kmem
  857. * cache creation.
  858. */
  859. for (i = 0; i < 5; i++) {
  860. p = kmem_cache_alloc(cache, GFP_KERNEL);
  861. if (!p)
  862. goto free_cache;
  863. kmem_cache_free(cache, p);
  864. msleep(100);
  865. }
  866. free_cache:
  867. kmem_cache_destroy(cache);
  868. }
  869. static void kmem_cache_bulk(struct kunit *test)
  870. {
  871. struct kmem_cache *cache;
  872. size_t size = 200;
  873. char *p[10];
  874. bool ret;
  875. int i;
  876. cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
  877. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
  878. ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
  879. if (!ret) {
  880. kunit_err(test, "Allocation failed: %s\n", __func__);
  881. kmem_cache_destroy(cache);
  882. return;
  883. }
  884. for (i = 0; i < ARRAY_SIZE(p); i++)
  885. p[i][0] = p[i][size - 1] = 42;
  886. kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
  887. kmem_cache_destroy(cache);
  888. }
  889. static void *mempool_prepare_kmalloc(struct kunit *test, mempool_t *pool, size_t size)
  890. {
  891. int pool_size = 4;
  892. int ret;
  893. void *elem;
  894. memset(pool, 0, sizeof(*pool));
  895. ret = mempool_init_kmalloc_pool(pool, pool_size, size);
  896. KUNIT_ASSERT_EQ(test, ret, 0);
  897. /*
  898. * Allocate one element to prevent mempool from freeing elements to the
  899. * underlying allocator and instead make it add them to the element
  900. * list when the tests trigger double-free and invalid-free bugs.
  901. * This allows testing KASAN annotations in add_element().
  902. */
  903. elem = mempool_alloc_preallocated(pool);
  904. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
  905. return elem;
  906. }
  907. static struct kmem_cache *mempool_prepare_slab(struct kunit *test, mempool_t *pool, size_t size)
  908. {
  909. struct kmem_cache *cache;
  910. int pool_size = 4;
  911. int ret;
  912. cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
  913. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
  914. memset(pool, 0, sizeof(*pool));
  915. ret = mempool_init_slab_pool(pool, pool_size, cache);
  916. KUNIT_ASSERT_EQ(test, ret, 0);
  917. /*
  918. * Do not allocate one preallocated element, as we skip the double-free
  919. * and invalid-free tests for slab mempool for simplicity.
  920. */
  921. return cache;
  922. }
  923. static void *mempool_prepare_page(struct kunit *test, mempool_t *pool, int order)
  924. {
  925. int pool_size = 4;
  926. int ret;
  927. void *elem;
  928. memset(pool, 0, sizeof(*pool));
  929. ret = mempool_init_page_pool(pool, pool_size, order);
  930. KUNIT_ASSERT_EQ(test, ret, 0);
  931. elem = mempool_alloc_preallocated(pool);
  932. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
  933. return elem;
  934. }
  935. static void mempool_oob_right_helper(struct kunit *test, mempool_t *pool, size_t size)
  936. {
  937. char *elem;
  938. elem = mempool_alloc_preallocated(pool);
  939. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
  940. OPTIMIZER_HIDE_VAR(elem);
  941. if (IS_ENABLED(CONFIG_KASAN_GENERIC))
  942. KUNIT_EXPECT_KASAN_FAIL(test,
  943. ((volatile char *)&elem[size])[0]);
  944. else
  945. KUNIT_EXPECT_KASAN_FAIL(test,
  946. ((volatile char *)&elem[round_up(size, KASAN_GRANULE_SIZE)])[0]);
  947. mempool_free(elem, pool);
  948. }
  949. static void mempool_kmalloc_oob_right(struct kunit *test)
  950. {
  951. mempool_t pool;
  952. size_t size = 128 - KASAN_GRANULE_SIZE - 5;
  953. void *extra_elem;
  954. extra_elem = mempool_prepare_kmalloc(test, &pool, size);
  955. mempool_oob_right_helper(test, &pool, size);
  956. mempool_free(extra_elem, &pool);
  957. mempool_exit(&pool);
  958. }
  959. static void mempool_kmalloc_large_oob_right(struct kunit *test)
  960. {
  961. mempool_t pool;
  962. size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
  963. void *extra_elem;
  964. extra_elem = mempool_prepare_kmalloc(test, &pool, size);
  965. mempool_oob_right_helper(test, &pool, size);
  966. mempool_free(extra_elem, &pool);
  967. mempool_exit(&pool);
  968. }
  969. static void mempool_slab_oob_right(struct kunit *test)
  970. {
  971. mempool_t pool;
  972. size_t size = 123;
  973. struct kmem_cache *cache;
  974. cache = mempool_prepare_slab(test, &pool, size);
  975. mempool_oob_right_helper(test, &pool, size);
  976. mempool_exit(&pool);
  977. kmem_cache_destroy(cache);
  978. }
  979. /*
  980. * Skip the out-of-bounds test for page mempool. With Generic KASAN, page
  981. * allocations have no redzones, and thus the out-of-bounds detection is not
  982. * guaranteed; see https://bugzilla.kernel.org/show_bug.cgi?id=210503. With
  983. * the tag-based KASAN modes, the neighboring allocation might have the same
  984. * tag; see https://bugzilla.kernel.org/show_bug.cgi?id=203505.
  985. */
  986. static void mempool_uaf_helper(struct kunit *test, mempool_t *pool, bool page)
  987. {
  988. char *elem, *ptr;
  989. elem = mempool_alloc_preallocated(pool);
  990. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
  991. mempool_free(elem, pool);
  992. ptr = page ? page_address((struct page *)elem) : elem;
  993. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
  994. }
  995. static void mempool_kmalloc_uaf(struct kunit *test)
  996. {
  997. mempool_t pool;
  998. size_t size = 128;
  999. void *extra_elem;
  1000. extra_elem = mempool_prepare_kmalloc(test, &pool, size);
  1001. mempool_uaf_helper(test, &pool, false);
  1002. mempool_free(extra_elem, &pool);
  1003. mempool_exit(&pool);
  1004. }
  1005. static void mempool_kmalloc_large_uaf(struct kunit *test)
  1006. {
  1007. mempool_t pool;
  1008. size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
  1009. void *extra_elem;
  1010. extra_elem = mempool_prepare_kmalloc(test, &pool, size);
  1011. mempool_uaf_helper(test, &pool, false);
  1012. mempool_free(extra_elem, &pool);
  1013. mempool_exit(&pool);
  1014. }
  1015. static void mempool_slab_uaf(struct kunit *test)
  1016. {
  1017. mempool_t pool;
  1018. size_t size = 123;
  1019. struct kmem_cache *cache;
  1020. cache = mempool_prepare_slab(test, &pool, size);
  1021. mempool_uaf_helper(test, &pool, false);
  1022. mempool_exit(&pool);
  1023. kmem_cache_destroy(cache);
  1024. }
  1025. static void mempool_page_alloc_uaf(struct kunit *test)
  1026. {
  1027. mempool_t pool;
  1028. int order = 2;
  1029. void *extra_elem;
  1030. extra_elem = mempool_prepare_page(test, &pool, order);
  1031. mempool_uaf_helper(test, &pool, true);
  1032. mempool_free(extra_elem, &pool);
  1033. mempool_exit(&pool);
  1034. }
  1035. static void mempool_double_free_helper(struct kunit *test, mempool_t *pool)
  1036. {
  1037. char *elem;
  1038. elem = mempool_alloc_preallocated(pool);
  1039. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
  1040. mempool_free(elem, pool);
  1041. KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem, pool));
  1042. }
  1043. static void mempool_kmalloc_double_free(struct kunit *test)
  1044. {
  1045. mempool_t pool;
  1046. size_t size = 128;
  1047. char *extra_elem;
  1048. extra_elem = mempool_prepare_kmalloc(test, &pool, size);
  1049. mempool_double_free_helper(test, &pool);
  1050. mempool_free(extra_elem, &pool);
  1051. mempool_exit(&pool);
  1052. }
  1053. static void mempool_kmalloc_large_double_free(struct kunit *test)
  1054. {
  1055. mempool_t pool;
  1056. size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
  1057. char *extra_elem;
  1058. extra_elem = mempool_prepare_kmalloc(test, &pool, size);
  1059. mempool_double_free_helper(test, &pool);
  1060. mempool_free(extra_elem, &pool);
  1061. mempool_exit(&pool);
  1062. }
  1063. static void mempool_page_alloc_double_free(struct kunit *test)
  1064. {
  1065. mempool_t pool;
  1066. int order = 2;
  1067. char *extra_elem;
  1068. extra_elem = mempool_prepare_page(test, &pool, order);
  1069. mempool_double_free_helper(test, &pool);
  1070. mempool_free(extra_elem, &pool);
  1071. mempool_exit(&pool);
  1072. }
  1073. static void mempool_kmalloc_invalid_free_helper(struct kunit *test, mempool_t *pool)
  1074. {
  1075. char *elem;
  1076. elem = mempool_alloc_preallocated(pool);
  1077. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
  1078. KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem + 1, pool));
  1079. mempool_free(elem, pool);
  1080. }
  1081. static void mempool_kmalloc_invalid_free(struct kunit *test)
  1082. {
  1083. mempool_t pool;
  1084. size_t size = 128;
  1085. char *extra_elem;
  1086. extra_elem = mempool_prepare_kmalloc(test, &pool, size);
  1087. mempool_kmalloc_invalid_free_helper(test, &pool);
  1088. mempool_free(extra_elem, &pool);
  1089. mempool_exit(&pool);
  1090. }
  1091. static void mempool_kmalloc_large_invalid_free(struct kunit *test)
  1092. {
  1093. mempool_t pool;
  1094. size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
  1095. char *extra_elem;
  1096. extra_elem = mempool_prepare_kmalloc(test, &pool, size);
  1097. mempool_kmalloc_invalid_free_helper(test, &pool);
  1098. mempool_free(extra_elem, &pool);
  1099. mempool_exit(&pool);
  1100. }
  1101. /*
  1102. * Skip the invalid-free test for page mempool. The invalid-free detection only
  1103. * works for compound pages and mempool preallocates all page elements without
  1104. * the __GFP_COMP flag.
  1105. */
  1106. static char global_array[10];
  1107. static void kasan_global_oob_right(struct kunit *test)
  1108. {
  1109. /*
  1110. * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
  1111. * from failing here and panicking the kernel, access the array via a
  1112. * volatile pointer, which will prevent the compiler from being able to
  1113. * determine the array bounds.
  1114. *
  1115. * This access uses a volatile pointer to char (char *volatile) rather
  1116. * than the more conventional pointer to volatile char (volatile char *)
  1117. * because we want to prevent the compiler from making inferences about
  1118. * the pointer itself (i.e. its array bounds), not the data that it
  1119. * refers to.
  1120. */
  1121. char *volatile array = global_array;
  1122. char *p = &array[ARRAY_SIZE(global_array) + 3];
  1123. /* Only generic mode instruments globals. */
  1124. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
  1125. KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
  1126. }
  1127. static void kasan_global_oob_left(struct kunit *test)
  1128. {
  1129. char *volatile array = global_array;
  1130. char *p = array - 3;
  1131. /*
  1132. * GCC is known to fail this test, skip it.
  1133. * See https://bugzilla.kernel.org/show_bug.cgi?id=215051.
  1134. */
  1135. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_CC_IS_CLANG);
  1136. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
  1137. KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
  1138. }
  1139. static void kasan_stack_oob(struct kunit *test)
  1140. {
  1141. char stack_array[10];
  1142. /* See comment in kasan_global_oob_right. */
  1143. char *volatile array = stack_array;
  1144. char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
  1145. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
  1146. KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
  1147. }
  1148. static void kasan_alloca_oob_left(struct kunit *test)
  1149. {
  1150. volatile int i = 10;
  1151. char alloca_array[i];
  1152. /* See comment in kasan_global_oob_right. */
  1153. char *volatile array = alloca_array;
  1154. char *p = array - 1;
  1155. /* Only generic mode instruments dynamic allocas. */
  1156. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
  1157. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
  1158. KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
  1159. }
  1160. static void kasan_alloca_oob_right(struct kunit *test)
  1161. {
  1162. volatile int i = 10;
  1163. char alloca_array[i];
  1164. /* See comment in kasan_global_oob_right. */
  1165. char *volatile array = alloca_array;
  1166. char *p = array + i;
  1167. /* Only generic mode instruments dynamic allocas. */
  1168. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
  1169. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
  1170. KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
  1171. }
  1172. static void kasan_memchr(struct kunit *test)
  1173. {
  1174. char *ptr;
  1175. size_t size = 24;
  1176. /*
  1177. * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
  1178. * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
  1179. */
  1180. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
  1181. if (OOB_TAG_OFF)
  1182. size = round_up(size, OOB_TAG_OFF);
  1183. ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
  1184. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  1185. OPTIMIZER_HIDE_VAR(ptr);
  1186. OPTIMIZER_HIDE_VAR(ptr);
  1187. OPTIMIZER_HIDE_VAR(size);
  1188. KUNIT_EXPECT_KASAN_FAIL(test,
  1189. kasan_ptr_result = memchr(ptr, '1', size + 1));
  1190. kfree(ptr);
  1191. }
  1192. static void kasan_memcmp(struct kunit *test)
  1193. {
  1194. char *ptr;
  1195. size_t size = 24;
  1196. int arr[9];
  1197. /*
  1198. * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
  1199. * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
  1200. */
  1201. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
  1202. if (OOB_TAG_OFF)
  1203. size = round_up(size, OOB_TAG_OFF);
  1204. ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
  1205. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  1206. memset(arr, 0, sizeof(arr));
  1207. OPTIMIZER_HIDE_VAR(ptr);
  1208. OPTIMIZER_HIDE_VAR(size);
  1209. KUNIT_EXPECT_KASAN_FAIL(test,
  1210. kasan_int_result = memcmp(ptr, arr, size+1));
  1211. kfree(ptr);
  1212. }
  1213. static void kasan_strings(struct kunit *test)
  1214. {
  1215. char *ptr;
  1216. size_t size = 24;
  1217. /*
  1218. * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
  1219. * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
  1220. */
  1221. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
  1222. ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
  1223. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  1224. OPTIMIZER_HIDE_VAR(ptr);
  1225. kfree(ptr);
  1226. /*
  1227. * Try to cause only 1 invalid access (less spam in dmesg).
  1228. * For that we need ptr to point to zeroed byte.
  1229. * Skip metadata that could be stored in freed object so ptr
  1230. * will likely point to zeroed byte.
  1231. */
  1232. ptr += 16;
  1233. KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
  1234. KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
  1235. KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
  1236. KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
  1237. KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
  1238. KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
  1239. }
  1240. static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
  1241. {
  1242. KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
  1243. KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
  1244. KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
  1245. KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
  1246. KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
  1247. KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
  1248. KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
  1249. KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
  1250. }
  1251. static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
  1252. {
  1253. KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
  1254. KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
  1255. KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
  1256. KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
  1257. KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
  1258. KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
  1259. KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
  1260. KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
  1261. if (nr < 7)
  1262. KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
  1263. xor_unlock_is_negative_byte(1 << nr, addr));
  1264. }
  1265. static void kasan_bitops_generic(struct kunit *test)
  1266. {
  1267. long *bits;
  1268. /* This test is specifically crafted for the generic mode. */
  1269. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
  1270. /*
  1271. * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
  1272. * this way we do not actually corrupt other memory.
  1273. */
  1274. bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
  1275. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
  1276. /*
  1277. * Below calls try to access bit within allocated memory; however, the
  1278. * below accesses are still out-of-bounds, since bitops are defined to
  1279. * operate on the whole long the bit is in.
  1280. */
  1281. kasan_bitops_modify(test, BITS_PER_LONG, bits);
  1282. /*
  1283. * Below calls try to access bit beyond allocated memory.
  1284. */
  1285. kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
  1286. kfree(bits);
  1287. }
  1288. static void kasan_bitops_tags(struct kunit *test)
  1289. {
  1290. long *bits;
  1291. /* This test is specifically crafted for tag-based modes. */
  1292. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
  1293. /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
  1294. bits = kzalloc(48, GFP_KERNEL);
  1295. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
  1296. /* Do the accesses past the 48 allocated bytes, but within the redone. */
  1297. kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
  1298. kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
  1299. kfree(bits);
  1300. }
  1301. static void vmalloc_helpers_tags(struct kunit *test)
  1302. {
  1303. void *ptr;
  1304. /* This test is intended for tag-based modes. */
  1305. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
  1306. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
  1307. if (!kasan_vmalloc_enabled())
  1308. kunit_skip(test, "Test requires kasan.vmalloc=on");
  1309. ptr = vmalloc(PAGE_SIZE);
  1310. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  1311. /* Check that the returned pointer is tagged. */
  1312. KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
  1313. KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
  1314. /* Make sure exported vmalloc helpers handle tagged pointers. */
  1315. KUNIT_ASSERT_TRUE(test, is_vmalloc_addr(ptr));
  1316. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vmalloc_to_page(ptr));
  1317. #if !IS_MODULE(CONFIG_KASAN_KUNIT_TEST)
  1318. {
  1319. int rv;
  1320. /* Make sure vmalloc'ed memory permissions can be changed. */
  1321. rv = set_memory_ro((unsigned long)ptr, 1);
  1322. KUNIT_ASSERT_GE(test, rv, 0);
  1323. rv = set_memory_rw((unsigned long)ptr, 1);
  1324. KUNIT_ASSERT_GE(test, rv, 0);
  1325. }
  1326. #endif
  1327. vfree(ptr);
  1328. }
  1329. static void vmalloc_oob(struct kunit *test)
  1330. {
  1331. char *v_ptr, *p_ptr;
  1332. struct page *page;
  1333. size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5;
  1334. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
  1335. if (!kasan_vmalloc_enabled())
  1336. kunit_skip(test, "Test requires kasan.vmalloc=on");
  1337. v_ptr = vmalloc(size);
  1338. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
  1339. OPTIMIZER_HIDE_VAR(v_ptr);
  1340. /*
  1341. * We have to be careful not to hit the guard page in vmalloc tests.
  1342. * The MMU will catch that and crash us.
  1343. */
  1344. /* Make sure in-bounds accesses are valid. */
  1345. v_ptr[0] = 0;
  1346. v_ptr[size - 1] = 0;
  1347. /*
  1348. * An unaligned access past the requested vmalloc size.
  1349. * Only generic KASAN can precisely detect these.
  1350. */
  1351. if (IS_ENABLED(CONFIG_KASAN_GENERIC))
  1352. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]);
  1353. /* An aligned access into the first out-of-bounds granule. */
  1354. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size + 5]);
  1355. /* Check that in-bounds accesses to the physical page are valid. */
  1356. page = vmalloc_to_page(v_ptr);
  1357. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
  1358. p_ptr = page_address(page);
  1359. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
  1360. p_ptr[0] = 0;
  1361. vfree(v_ptr);
  1362. /*
  1363. * We can't check for use-after-unmap bugs in this nor in the following
  1364. * vmalloc tests, as the page might be fully unmapped and accessing it
  1365. * will crash the kernel.
  1366. */
  1367. }
  1368. static void vmap_tags(struct kunit *test)
  1369. {
  1370. char *p_ptr, *v_ptr;
  1371. struct page *p_page, *v_page;
  1372. /*
  1373. * This test is specifically crafted for the software tag-based mode,
  1374. * the only tag-based mode that poisons vmap mappings.
  1375. */
  1376. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
  1377. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
  1378. if (!kasan_vmalloc_enabled())
  1379. kunit_skip(test, "Test requires kasan.vmalloc=on");
  1380. p_page = alloc_pages(GFP_KERNEL, 1);
  1381. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page);
  1382. p_ptr = page_address(p_page);
  1383. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
  1384. v_ptr = vmap(&p_page, 1, VM_MAP, PAGE_KERNEL);
  1385. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
  1386. /*
  1387. * We can't check for out-of-bounds bugs in this nor in the following
  1388. * vmalloc tests, as allocations have page granularity and accessing
  1389. * the guard page will crash the kernel.
  1390. */
  1391. KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
  1392. KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
  1393. /* Make sure that in-bounds accesses through both pointers work. */
  1394. *p_ptr = 0;
  1395. *v_ptr = 0;
  1396. /* Make sure vmalloc_to_page() correctly recovers the page pointer. */
  1397. v_page = vmalloc_to_page(v_ptr);
  1398. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_page);
  1399. KUNIT_EXPECT_PTR_EQ(test, p_page, v_page);
  1400. vunmap(v_ptr);
  1401. free_pages((unsigned long)p_ptr, 1);
  1402. }
  1403. static void vm_map_ram_tags(struct kunit *test)
  1404. {
  1405. char *p_ptr, *v_ptr;
  1406. struct page *page;
  1407. /*
  1408. * This test is specifically crafted for the software tag-based mode,
  1409. * the only tag-based mode that poisons vm_map_ram mappings.
  1410. */
  1411. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
  1412. page = alloc_pages(GFP_KERNEL, 1);
  1413. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
  1414. p_ptr = page_address(page);
  1415. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
  1416. v_ptr = vm_map_ram(&page, 1, -1);
  1417. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
  1418. KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
  1419. KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
  1420. /* Make sure that in-bounds accesses through both pointers work. */
  1421. *p_ptr = 0;
  1422. *v_ptr = 0;
  1423. vm_unmap_ram(v_ptr, 1);
  1424. free_pages((unsigned long)p_ptr, 1);
  1425. }
  1426. /*
  1427. * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
  1428. * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
  1429. * modes.
  1430. */
  1431. static void match_all_not_assigned(struct kunit *test)
  1432. {
  1433. char *ptr;
  1434. struct page *pages;
  1435. int i, size, order;
  1436. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
  1437. for (i = 0; i < 256; i++) {
  1438. size = get_random_u32_inclusive(1, 1024);
  1439. ptr = kmalloc(size, GFP_KERNEL);
  1440. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  1441. KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
  1442. KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
  1443. kfree(ptr);
  1444. }
  1445. for (i = 0; i < 256; i++) {
  1446. order = get_random_u32_inclusive(1, 4);
  1447. pages = alloc_pages(GFP_KERNEL, order);
  1448. ptr = page_address(pages);
  1449. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  1450. KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
  1451. KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
  1452. free_pages((unsigned long)ptr, order);
  1453. }
  1454. if (!kasan_vmalloc_enabled())
  1455. return;
  1456. for (i = 0; i < 256; i++) {
  1457. size = get_random_u32_inclusive(1, 1024);
  1458. ptr = vmalloc(size);
  1459. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  1460. KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
  1461. KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
  1462. vfree(ptr);
  1463. }
  1464. }
  1465. /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
  1466. static void match_all_ptr_tag(struct kunit *test)
  1467. {
  1468. char *ptr;
  1469. u8 tag;
  1470. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
  1471. ptr = kmalloc(128, GFP_KERNEL);
  1472. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  1473. /* Backup the assigned tag. */
  1474. tag = get_tag(ptr);
  1475. KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
  1476. /* Reset the tag to 0xff.*/
  1477. ptr = set_tag(ptr, KASAN_TAG_KERNEL);
  1478. /* This access shouldn't trigger a KASAN report. */
  1479. *ptr = 0;
  1480. /* Recover the pointer tag and free. */
  1481. ptr = set_tag(ptr, tag);
  1482. kfree(ptr);
  1483. }
  1484. /* Check that there are no match-all memory tags for tag-based modes. */
  1485. static void match_all_mem_tag(struct kunit *test)
  1486. {
  1487. char *ptr;
  1488. int tag;
  1489. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
  1490. ptr = kmalloc(128, GFP_KERNEL);
  1491. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  1492. KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
  1493. /* For each possible tag value not matching the pointer tag. */
  1494. for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
  1495. /*
  1496. * For Software Tag-Based KASAN, skip the majority of tag
  1497. * values to avoid the test printing too many reports.
  1498. */
  1499. if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) &&
  1500. tag >= KASAN_TAG_MIN + 8 && tag <= KASAN_TAG_KERNEL - 8)
  1501. continue;
  1502. if (tag == get_tag(ptr))
  1503. continue;
  1504. /* Mark the first memory granule with the chosen memory tag. */
  1505. kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
  1506. /* This access must cause a KASAN report. */
  1507. KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
  1508. }
  1509. /* Recover the memory tag and free. */
  1510. kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
  1511. kfree(ptr);
  1512. }
  1513. /*
  1514. * Check that Rust performing a use-after-free using `unsafe` is detected.
  1515. * This is a smoke test to make sure that Rust is being sanitized properly.
  1516. */
  1517. static void rust_uaf(struct kunit *test)
  1518. {
  1519. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_RUST);
  1520. KUNIT_EXPECT_KASAN_FAIL(test, kasan_test_rust_uaf());
  1521. }
  1522. static struct kunit_case kasan_kunit_test_cases[] = {
  1523. KUNIT_CASE(kmalloc_oob_right),
  1524. KUNIT_CASE(kmalloc_oob_left),
  1525. KUNIT_CASE(kmalloc_node_oob_right),
  1526. KUNIT_CASE(kmalloc_big_oob_right),
  1527. KUNIT_CASE(kmalloc_large_oob_right),
  1528. KUNIT_CASE(kmalloc_large_uaf),
  1529. KUNIT_CASE(kmalloc_large_invalid_free),
  1530. KUNIT_CASE(page_alloc_oob_right),
  1531. KUNIT_CASE(page_alloc_uaf),
  1532. KUNIT_CASE(krealloc_more_oob),
  1533. KUNIT_CASE(krealloc_less_oob),
  1534. KUNIT_CASE(krealloc_large_more_oob),
  1535. KUNIT_CASE(krealloc_large_less_oob),
  1536. KUNIT_CASE(krealloc_uaf),
  1537. KUNIT_CASE(kmalloc_oob_16),
  1538. KUNIT_CASE(kmalloc_uaf_16),
  1539. KUNIT_CASE(kmalloc_oob_in_memset),
  1540. KUNIT_CASE(kmalloc_oob_memset_2),
  1541. KUNIT_CASE(kmalloc_oob_memset_4),
  1542. KUNIT_CASE(kmalloc_oob_memset_8),
  1543. KUNIT_CASE(kmalloc_oob_memset_16),
  1544. KUNIT_CASE(kmalloc_memmove_negative_size),
  1545. KUNIT_CASE(kmalloc_memmove_invalid_size),
  1546. KUNIT_CASE(kmalloc_uaf),
  1547. KUNIT_CASE(kmalloc_uaf_memset),
  1548. KUNIT_CASE(kmalloc_uaf2),
  1549. KUNIT_CASE(kmalloc_uaf3),
  1550. KUNIT_CASE(kmalloc_double_kzfree),
  1551. KUNIT_CASE(ksize_unpoisons_memory),
  1552. KUNIT_CASE(ksize_uaf),
  1553. KUNIT_CASE(rcu_uaf),
  1554. KUNIT_CASE(workqueue_uaf),
  1555. KUNIT_CASE(kfree_via_page),
  1556. KUNIT_CASE(kfree_via_phys),
  1557. KUNIT_CASE(kmem_cache_oob),
  1558. KUNIT_CASE(kmem_cache_double_free),
  1559. KUNIT_CASE(kmem_cache_invalid_free),
  1560. KUNIT_CASE(kmem_cache_rcu_uaf),
  1561. KUNIT_CASE(kmem_cache_double_destroy),
  1562. KUNIT_CASE(kmem_cache_accounted),
  1563. KUNIT_CASE(kmem_cache_bulk),
  1564. KUNIT_CASE(mempool_kmalloc_oob_right),
  1565. KUNIT_CASE(mempool_kmalloc_large_oob_right),
  1566. KUNIT_CASE(mempool_slab_oob_right),
  1567. KUNIT_CASE(mempool_kmalloc_uaf),
  1568. KUNIT_CASE(mempool_kmalloc_large_uaf),
  1569. KUNIT_CASE(mempool_slab_uaf),
  1570. KUNIT_CASE(mempool_page_alloc_uaf),
  1571. KUNIT_CASE(mempool_kmalloc_double_free),
  1572. KUNIT_CASE(mempool_kmalloc_large_double_free),
  1573. KUNIT_CASE(mempool_page_alloc_double_free),
  1574. KUNIT_CASE(mempool_kmalloc_invalid_free),
  1575. KUNIT_CASE(mempool_kmalloc_large_invalid_free),
  1576. KUNIT_CASE(kasan_global_oob_right),
  1577. KUNIT_CASE(kasan_global_oob_left),
  1578. KUNIT_CASE(kasan_stack_oob),
  1579. KUNIT_CASE(kasan_alloca_oob_left),
  1580. KUNIT_CASE(kasan_alloca_oob_right),
  1581. KUNIT_CASE(kasan_memchr),
  1582. KUNIT_CASE(kasan_memcmp),
  1583. KUNIT_CASE(kasan_strings),
  1584. KUNIT_CASE(kasan_bitops_generic),
  1585. KUNIT_CASE(kasan_bitops_tags),
  1586. KUNIT_CASE(kasan_atomics),
  1587. KUNIT_CASE(vmalloc_helpers_tags),
  1588. KUNIT_CASE(vmalloc_oob),
  1589. KUNIT_CASE(vmap_tags),
  1590. KUNIT_CASE(vm_map_ram_tags),
  1591. KUNIT_CASE(match_all_not_assigned),
  1592. KUNIT_CASE(match_all_ptr_tag),
  1593. KUNIT_CASE(match_all_mem_tag),
  1594. KUNIT_CASE(rust_uaf),
  1595. {}
  1596. };
  1597. static struct kunit_suite kasan_kunit_test_suite = {
  1598. .name = "kasan",
  1599. .test_cases = kasan_kunit_test_cases,
  1600. .exit = kasan_test_exit,
  1601. .suite_init = kasan_suite_init,
  1602. .suite_exit = kasan_suite_exit,
  1603. };
  1604. kunit_test_suite(kasan_kunit_test_suite);
  1605. MODULE_LICENSE("GPL");