binder_alloc_selftest.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* binder_alloc_selftest.c
  3. *
  4. * Android IPC Subsystem
  5. *
  6. * Copyright (C) 2017 Google, Inc.
  7. */
  8. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9. #include <linux/mm_types.h>
  10. #include <linux/err.h>
  11. #include "binder_alloc.h"
  12. #define BUFFER_NUM 5
  13. #define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
  14. static bool binder_selftest_run = true;
  15. static int binder_selftest_failures;
  16. static DEFINE_MUTEX(binder_selftest_lock);
  17. /**
  18. * enum buf_end_align_type - Page alignment of a buffer
  19. * end with regard to the end of the previous buffer.
  20. *
  21. * In the pictures below, buf2 refers to the buffer we
  22. * are aligning. buf1 refers to previous buffer by addr.
  23. * Symbol [ means the start of a buffer, ] means the end
  24. * of a buffer, and | means page boundaries.
  25. */
  26. enum buf_end_align_type {
  27. /**
  28. * @SAME_PAGE_UNALIGNED: The end of this buffer is on
  29. * the same page as the end of the previous buffer and
  30. * is not page aligned. Examples:
  31. * buf1 ][ buf2 ][ ...
  32. * buf1 ]|[ buf2 ][ ...
  33. */
  34. SAME_PAGE_UNALIGNED = 0,
  35. /**
  36. * @SAME_PAGE_ALIGNED: When the end of the previous buffer
  37. * is not page aligned, the end of this buffer is on the
  38. * same page as the end of the previous buffer and is page
  39. * aligned. When the previous buffer is page aligned, the
  40. * end of this buffer is aligned to the next page boundary.
  41. * Examples:
  42. * buf1 ][ buf2 ]| ...
  43. * buf1 ]|[ buf2 ]| ...
  44. */
  45. SAME_PAGE_ALIGNED,
  46. /**
  47. * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
  48. * the page next to the end of the previous buffer and
  49. * is not page aligned. Examples:
  50. * buf1 ][ buf2 | buf2 ][ ...
  51. * buf1 ]|[ buf2 | buf2 ][ ...
  52. */
  53. NEXT_PAGE_UNALIGNED,
  54. /**
  55. * @NEXT_PAGE_ALIGNED: The end of this buffer is on
  56. * the page next to the end of the previous buffer and
  57. * is page aligned. Examples:
  58. * buf1 ][ buf2 | buf2 ]| ...
  59. * buf1 ]|[ buf2 | buf2 ]| ...
  60. */
  61. NEXT_PAGE_ALIGNED,
  62. /**
  63. * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
  64. * the page that follows the page after the end of the
  65. * previous buffer and is not page aligned. Examples:
  66. * buf1 ][ buf2 | buf2 | buf2 ][ ...
  67. * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
  68. */
  69. NEXT_NEXT_UNALIGNED,
  70. /**
  71. * @LOOP_END: The number of enum values in &buf_end_align_type.
  72. * It is used for controlling loop termination.
  73. */
  74. LOOP_END,
  75. };
  76. static void pr_err_size_seq(size_t *sizes, int *seq)
  77. {
  78. int i;
  79. pr_err("alloc sizes: ");
  80. for (i = 0; i < BUFFER_NUM; i++)
  81. pr_cont("[%zu]", sizes[i]);
  82. pr_cont("\n");
  83. pr_err("free seq: ");
  84. for (i = 0; i < BUFFER_NUM; i++)
  85. pr_cont("[%d]", seq[i]);
  86. pr_cont("\n");
  87. }
  88. static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
  89. struct binder_buffer *buffer,
  90. size_t size)
  91. {
  92. unsigned long page_addr;
  93. unsigned long end;
  94. int page_index;
  95. end = PAGE_ALIGN(buffer->user_data + size);
  96. page_addr = buffer->user_data;
  97. for (; page_addr < end; page_addr += PAGE_SIZE) {
  98. page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
  99. if (!alloc->pages[page_index].page_ptr ||
  100. !list_empty(&alloc->pages[page_index].lru)) {
  101. pr_err("expect alloc but is %s at page index %d\n",
  102. alloc->pages[page_index].page_ptr ?
  103. "lru" : "free", page_index);
  104. return false;
  105. }
  106. }
  107. return true;
  108. }
  109. static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
  110. struct binder_buffer *buffers[],
  111. size_t *sizes, int *seq)
  112. {
  113. int i;
  114. for (i = 0; i < BUFFER_NUM; i++) {
  115. buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
  116. if (IS_ERR(buffers[i]) ||
  117. !check_buffer_pages_allocated(alloc, buffers[i],
  118. sizes[i])) {
  119. pr_err_size_seq(sizes, seq);
  120. binder_selftest_failures++;
  121. }
  122. }
  123. }
  124. static void binder_selftest_free_buf(struct binder_alloc *alloc,
  125. struct binder_buffer *buffers[],
  126. size_t *sizes, int *seq, size_t end)
  127. {
  128. int i;
  129. for (i = 0; i < BUFFER_NUM; i++)
  130. binder_alloc_free_buf(alloc, buffers[seq[i]]);
  131. for (i = 0; i < end / PAGE_SIZE; i++) {
  132. /**
  133. * Error message on a free page can be false positive
  134. * if binder shrinker ran during binder_alloc_free_buf
  135. * calls above.
  136. */
  137. if (list_empty(&alloc->pages[i].lru)) {
  138. pr_err_size_seq(sizes, seq);
  139. pr_err("expect lru but is %s at page index %d\n",
  140. alloc->pages[i].page_ptr ? "alloc" : "free", i);
  141. binder_selftest_failures++;
  142. }
  143. }
  144. }
  145. static void binder_selftest_free_page(struct binder_alloc *alloc)
  146. {
  147. int i;
  148. unsigned long count;
  149. while ((count = list_lru_count(&binder_freelist))) {
  150. list_lru_walk(&binder_freelist, binder_alloc_free_page,
  151. NULL, count);
  152. }
  153. for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
  154. if (alloc->pages[i].page_ptr) {
  155. pr_err("expect free but is %s at page index %d\n",
  156. list_empty(&alloc->pages[i].lru) ?
  157. "alloc" : "lru", i);
  158. binder_selftest_failures++;
  159. }
  160. }
  161. }
  162. static void binder_selftest_alloc_free(struct binder_alloc *alloc,
  163. size_t *sizes, int *seq, size_t end)
  164. {
  165. struct binder_buffer *buffers[BUFFER_NUM];
  166. binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
  167. binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
  168. /* Allocate from lru. */
  169. binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
  170. if (list_lru_count(&binder_freelist))
  171. pr_err("lru list should be empty but is not\n");
  172. binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
  173. binder_selftest_free_page(alloc);
  174. }
  175. static bool is_dup(int *seq, int index, int val)
  176. {
  177. int i;
  178. for (i = 0; i < index; i++) {
  179. if (seq[i] == val)
  180. return true;
  181. }
  182. return false;
  183. }
  184. /* Generate BUFFER_NUM factorial free orders. */
  185. static void binder_selftest_free_seq(struct binder_alloc *alloc,
  186. size_t *sizes, int *seq,
  187. int index, size_t end)
  188. {
  189. int i;
  190. if (index == BUFFER_NUM) {
  191. binder_selftest_alloc_free(alloc, sizes, seq, end);
  192. return;
  193. }
  194. for (i = 0; i < BUFFER_NUM; i++) {
  195. if (is_dup(seq, index, i))
  196. continue;
  197. seq[index] = i;
  198. binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
  199. }
  200. }
  201. static void binder_selftest_alloc_size(struct binder_alloc *alloc,
  202. size_t *end_offset)
  203. {
  204. int i;
  205. int seq[BUFFER_NUM] = {0};
  206. size_t front_sizes[BUFFER_NUM];
  207. size_t back_sizes[BUFFER_NUM];
  208. size_t last_offset, offset = 0;
  209. for (i = 0; i < BUFFER_NUM; i++) {
  210. last_offset = offset;
  211. offset = end_offset[i];
  212. front_sizes[i] = offset - last_offset;
  213. back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
  214. }
  215. /*
  216. * Buffers share the first or last few pages.
  217. * Only BUFFER_NUM - 1 buffer sizes are adjustable since
  218. * we need one giant buffer before getting to the last page.
  219. */
  220. back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
  221. binder_selftest_free_seq(alloc, front_sizes, seq, 0,
  222. end_offset[BUFFER_NUM - 1]);
  223. binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
  224. }
  225. static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
  226. size_t *end_offset, int index)
  227. {
  228. int align;
  229. size_t end, prev;
  230. if (index == BUFFER_NUM) {
  231. binder_selftest_alloc_size(alloc, end_offset);
  232. return;
  233. }
  234. prev = index == 0 ? 0 : end_offset[index - 1];
  235. end = prev;
  236. BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
  237. for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
  238. if (align % 2)
  239. end = ALIGN(end, PAGE_SIZE);
  240. else
  241. end += BUFFER_MIN_SIZE;
  242. end_offset[index] = end;
  243. binder_selftest_alloc_offset(alloc, end_offset, index + 1);
  244. }
  245. }
  246. /**
  247. * binder_selftest_alloc() - Test alloc and free of buffer pages.
  248. * @alloc: Pointer to alloc struct.
  249. *
  250. * Allocate BUFFER_NUM buffers to cover all page alignment cases,
  251. * then free them in all orders possible. Check that pages are
  252. * correctly allocated, put onto lru when buffers are freed, and
  253. * are freed when binder_alloc_free_page is called.
  254. */
  255. void binder_selftest_alloc(struct binder_alloc *alloc)
  256. {
  257. size_t end_offset[BUFFER_NUM];
  258. if (!binder_selftest_run)
  259. return;
  260. mutex_lock(&binder_selftest_lock);
  261. if (!binder_selftest_run || !alloc->vma)
  262. goto done;
  263. pr_info("STARTED\n");
  264. binder_selftest_alloc_offset(alloc, end_offset, 0);
  265. binder_selftest_run = false;
  266. if (binder_selftest_failures > 0)
  267. pr_info("%d tests FAILED\n", binder_selftest_failures);
  268. else
  269. pr_info("PASSED\n");
  270. done:
  271. mutex_unlock(&binder_selftest_lock);
  272. }