binder_alloc.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060
  1. /* binder_alloc.c
  2. *
  3. * Android IPC Subsystem
  4. *
  5. * Copyright (C) 2007-2017 Google, Inc.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18. #include <linux/list.h>
  19. #include <linux/sched/mm.h>
  20. #include <linux/module.h>
  21. #include <linux/rtmutex.h>
  22. #include <linux/rbtree.h>
  23. #include <linux/seq_file.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/slab.h>
  26. #include <linux/sched.h>
  27. #include <linux/list_lru.h>
  28. #include <linux/ratelimit.h>
  29. #include <asm/cacheflush.h>
  30. #include "binder_alloc.h"
  31. #include "binder_trace.h"
  32. struct list_lru binder_alloc_lru;
  33. static DEFINE_MUTEX(binder_alloc_mmap_lock);
  34. enum {
  35. BINDER_DEBUG_USER_ERROR = 1U << 0,
  36. BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
  37. BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
  38. BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
  39. };
  40. static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
  41. module_param_named(debug_mask, binder_alloc_debug_mask,
  42. uint, 0644);
  43. #define binder_alloc_debug(mask, x...) \
  44. do { \
  45. if (binder_alloc_debug_mask & mask) \
  46. pr_info_ratelimited(x); \
  47. } while (0)
  48. static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
  49. {
  50. return list_entry(buffer->entry.next, struct binder_buffer, entry);
  51. }
  52. static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
  53. {
  54. return list_entry(buffer->entry.prev, struct binder_buffer, entry);
  55. }
  56. static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
  57. struct binder_buffer *buffer)
  58. {
  59. if (list_is_last(&buffer->entry, &alloc->buffers))
  60. return (u8 *)alloc->buffer +
  61. alloc->buffer_size - (u8 *)buffer->data;
  62. return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
  63. }
  64. static void binder_insert_free_buffer(struct binder_alloc *alloc,
  65. struct binder_buffer *new_buffer)
  66. {
  67. struct rb_node **p = &alloc->free_buffers.rb_node;
  68. struct rb_node *parent = NULL;
  69. struct binder_buffer *buffer;
  70. size_t buffer_size;
  71. size_t new_buffer_size;
  72. BUG_ON(!new_buffer->free);
  73. new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
  74. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  75. "%d: add free buffer, size %zd, at %pK\n",
  76. alloc->pid, new_buffer_size, new_buffer);
  77. while (*p) {
  78. parent = *p;
  79. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  80. BUG_ON(!buffer->free);
  81. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  82. if (new_buffer_size < buffer_size)
  83. p = &parent->rb_left;
  84. else
  85. p = &parent->rb_right;
  86. }
  87. rb_link_node(&new_buffer->rb_node, parent, p);
  88. rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
  89. }
  90. static void binder_insert_allocated_buffer_locked(
  91. struct binder_alloc *alloc, struct binder_buffer *new_buffer)
  92. {
  93. struct rb_node **p = &alloc->allocated_buffers.rb_node;
  94. struct rb_node *parent = NULL;
  95. struct binder_buffer *buffer;
  96. BUG_ON(new_buffer->free);
  97. while (*p) {
  98. parent = *p;
  99. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  100. BUG_ON(buffer->free);
  101. if (new_buffer->data < buffer->data)
  102. p = &parent->rb_left;
  103. else if (new_buffer->data > buffer->data)
  104. p = &parent->rb_right;
  105. else
  106. BUG();
  107. }
  108. rb_link_node(&new_buffer->rb_node, parent, p);
  109. rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
  110. }
  111. static struct binder_buffer *binder_alloc_prepare_to_free_locked(
  112. struct binder_alloc *alloc,
  113. uintptr_t user_ptr)
  114. {
  115. struct rb_node *n = alloc->allocated_buffers.rb_node;
  116. struct binder_buffer *buffer;
  117. void *kern_ptr;
  118. kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
  119. while (n) {
  120. buffer = rb_entry(n, struct binder_buffer, rb_node);
  121. BUG_ON(buffer->free);
  122. if (kern_ptr < buffer->data)
  123. n = n->rb_left;
  124. else if (kern_ptr > buffer->data)
  125. n = n->rb_right;
  126. else {
  127. /*
  128. * Guard against user threads attempting to
  129. * free the buffer when in use by kernel or
  130. * after it's already been freed.
  131. */
  132. if (!buffer->allow_user_free)
  133. return ERR_PTR(-EPERM);
  134. buffer->allow_user_free = 0;
  135. return buffer;
  136. }
  137. }
  138. return NULL;
  139. }
  140. /**
  141. * binder_alloc_buffer_lookup() - get buffer given user ptr
  142. * @alloc: binder_alloc for this proc
  143. * @user_ptr: User pointer to buffer data
  144. *
  145. * Validate userspace pointer to buffer data and return buffer corresponding to
  146. * that user pointer. Search the rb tree for buffer that matches user data
  147. * pointer.
  148. *
  149. * Return: Pointer to buffer or NULL
  150. */
  151. struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
  152. uintptr_t user_ptr)
  153. {
  154. struct binder_buffer *buffer;
  155. mutex_lock(&alloc->mutex);
  156. buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
  157. mutex_unlock(&alloc->mutex);
  158. return buffer;
  159. }
  160. static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
  161. void *start, void *end)
  162. {
  163. void *page_addr;
  164. unsigned long user_page_addr;
  165. struct binder_lru_page *page;
  166. struct vm_area_struct *vma = NULL;
  167. struct mm_struct *mm = NULL;
  168. bool need_mm = false;
  169. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  170. "%d: %s pages %pK-%pK\n", alloc->pid,
  171. allocate ? "allocate" : "free", start, end);
  172. if (end <= start)
  173. return 0;
  174. trace_binder_update_page_range(alloc, allocate, start, end);
  175. if (allocate == 0)
  176. goto free_range;
  177. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  178. page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
  179. if (!page->page_ptr) {
  180. need_mm = true;
  181. break;
  182. }
  183. }
  184. if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
  185. mm = alloc->vma_vm_mm;
  186. if (mm) {
  187. down_read(&mm->mmap_sem);
  188. vma = alloc->vma;
  189. }
  190. if (!vma && need_mm) {
  191. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  192. "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
  193. alloc->pid);
  194. goto err_no_vma;
  195. }
  196. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  197. int ret;
  198. bool on_lru;
  199. size_t index;
  200. index = (page_addr - alloc->buffer) / PAGE_SIZE;
  201. page = &alloc->pages[index];
  202. if (page->page_ptr) {
  203. trace_binder_alloc_lru_start(alloc, index);
  204. on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
  205. WARN_ON(!on_lru);
  206. trace_binder_alloc_lru_end(alloc, index);
  207. continue;
  208. }
  209. if (WARN_ON(!vma))
  210. goto err_page_ptr_cleared;
  211. trace_binder_alloc_page_start(alloc, index);
  212. page->page_ptr = alloc_page(GFP_KERNEL |
  213. __GFP_HIGHMEM |
  214. __GFP_ZERO);
  215. if (!page->page_ptr) {
  216. pr_err("%d: binder_alloc_buf failed for page at %pK\n",
  217. alloc->pid, page_addr);
  218. goto err_alloc_page_failed;
  219. }
  220. page->alloc = alloc;
  221. INIT_LIST_HEAD(&page->lru);
  222. ret = map_kernel_range_noflush((unsigned long)page_addr,
  223. PAGE_SIZE, PAGE_KERNEL,
  224. &page->page_ptr);
  225. flush_cache_vmap((unsigned long)page_addr,
  226. (unsigned long)page_addr + PAGE_SIZE);
  227. if (ret != 1) {
  228. pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
  229. alloc->pid, page_addr);
  230. goto err_map_kernel_failed;
  231. }
  232. user_page_addr =
  233. (uintptr_t)page_addr + alloc->user_buffer_offset;
  234. ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
  235. if (ret) {
  236. pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
  237. alloc->pid, user_page_addr);
  238. goto err_vm_insert_page_failed;
  239. }
  240. if (index + 1 > alloc->pages_high)
  241. alloc->pages_high = index + 1;
  242. trace_binder_alloc_page_end(alloc, index);
  243. /* vm_insert_page does not seem to increment the refcount */
  244. }
  245. if (mm) {
  246. up_read(&mm->mmap_sem);
  247. mmput(mm);
  248. }
  249. return 0;
  250. free_range:
  251. for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
  252. bool ret;
  253. size_t index;
  254. index = (page_addr - alloc->buffer) / PAGE_SIZE;
  255. page = &alloc->pages[index];
  256. trace_binder_free_lru_start(alloc, index);
  257. ret = list_lru_add(&binder_alloc_lru, &page->lru);
  258. WARN_ON(!ret);
  259. trace_binder_free_lru_end(alloc, index);
  260. if (page_addr == start)
  261. break;
  262. continue;
  263. err_vm_insert_page_failed:
  264. unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
  265. err_map_kernel_failed:
  266. __free_page(page->page_ptr);
  267. page->page_ptr = NULL;
  268. err_alloc_page_failed:
  269. err_page_ptr_cleared:
  270. if (page_addr == start)
  271. break;
  272. }
  273. err_no_vma:
  274. if (mm) {
  275. up_read(&mm->mmap_sem);
  276. mmput(mm);
  277. }
  278. return vma ? -ENOMEM : -ESRCH;
  279. }
  280. static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
  281. struct vm_area_struct *vma)
  282. {
  283. if (vma)
  284. alloc->vma_vm_mm = vma->vm_mm;
  285. /*
  286. * If we see alloc->vma is not NULL, buffer data structures set up
  287. * completely. Look at smp_rmb side binder_alloc_get_vma.
  288. * We also want to guarantee new alloc->vma_vm_mm is always visible
  289. * if alloc->vma is set.
  290. */
  291. smp_wmb();
  292. alloc->vma = vma;
  293. }
  294. static inline struct vm_area_struct *binder_alloc_get_vma(
  295. struct binder_alloc *alloc)
  296. {
  297. struct vm_area_struct *vma = NULL;
  298. if (alloc->vma) {
  299. /* Look at description in binder_alloc_set_vma */
  300. smp_rmb();
  301. vma = alloc->vma;
  302. }
  303. return vma;
  304. }
  305. static struct binder_buffer *binder_alloc_new_buf_locked(
  306. struct binder_alloc *alloc,
  307. size_t data_size,
  308. size_t offsets_size,
  309. size_t extra_buffers_size,
  310. int is_async)
  311. {
  312. struct rb_node *n = alloc->free_buffers.rb_node;
  313. struct binder_buffer *buffer;
  314. size_t buffer_size;
  315. struct rb_node *best_fit = NULL;
  316. void *has_page_addr;
  317. void *end_page_addr;
  318. size_t size, data_offsets_size;
  319. int ret;
  320. if (!binder_alloc_get_vma(alloc)) {
  321. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  322. "%d: binder_alloc_buf, no vma\n",
  323. alloc->pid);
  324. return ERR_PTR(-ESRCH);
  325. }
  326. data_offsets_size = ALIGN(data_size, sizeof(void *)) +
  327. ALIGN(offsets_size, sizeof(void *));
  328. if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
  329. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  330. "%d: got transaction with invalid size %zd-%zd\n",
  331. alloc->pid, data_size, offsets_size);
  332. return ERR_PTR(-EINVAL);
  333. }
  334. size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
  335. if (size < data_offsets_size || size < extra_buffers_size) {
  336. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  337. "%d: got transaction with invalid extra_buffers_size %zd\n",
  338. alloc->pid, extra_buffers_size);
  339. return ERR_PTR(-EINVAL);
  340. }
  341. if (is_async &&
  342. alloc->free_async_space < size + sizeof(struct binder_buffer)) {
  343. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  344. "%d: binder_alloc_buf size %zd failed, no async space left\n",
  345. alloc->pid, size);
  346. return ERR_PTR(-ENOSPC);
  347. }
  348. /* Pad 0-size buffers so they get assigned unique addresses */
  349. size = max(size, sizeof(void *));
  350. while (n) {
  351. buffer = rb_entry(n, struct binder_buffer, rb_node);
  352. BUG_ON(!buffer->free);
  353. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  354. if (size < buffer_size) {
  355. best_fit = n;
  356. n = n->rb_left;
  357. } else if (size > buffer_size)
  358. n = n->rb_right;
  359. else {
  360. best_fit = n;
  361. break;
  362. }
  363. }
  364. if (best_fit == NULL) {
  365. size_t allocated_buffers = 0;
  366. size_t largest_alloc_size = 0;
  367. size_t total_alloc_size = 0;
  368. size_t free_buffers = 0;
  369. size_t largest_free_size = 0;
  370. size_t total_free_size = 0;
  371. for (n = rb_first(&alloc->allocated_buffers); n != NULL;
  372. n = rb_next(n)) {
  373. buffer = rb_entry(n, struct binder_buffer, rb_node);
  374. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  375. allocated_buffers++;
  376. total_alloc_size += buffer_size;
  377. if (buffer_size > largest_alloc_size)
  378. largest_alloc_size = buffer_size;
  379. }
  380. for (n = rb_first(&alloc->free_buffers); n != NULL;
  381. n = rb_next(n)) {
  382. buffer = rb_entry(n, struct binder_buffer, rb_node);
  383. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  384. free_buffers++;
  385. total_free_size += buffer_size;
  386. if (buffer_size > largest_free_size)
  387. largest_free_size = buffer_size;
  388. }
  389. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  390. "%d: binder_alloc_buf size %zd failed, no address space\n",
  391. alloc->pid, size);
  392. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  393. "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
  394. total_alloc_size, allocated_buffers,
  395. largest_alloc_size, total_free_size,
  396. free_buffers, largest_free_size);
  397. return ERR_PTR(-ENOSPC);
  398. }
  399. if (n == NULL) {
  400. buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
  401. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  402. }
  403. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  404. "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
  405. alloc->pid, size, buffer, buffer_size);
  406. has_page_addr =
  407. (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
  408. WARN_ON(n && buffer_size != size);
  409. end_page_addr =
  410. (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
  411. if (end_page_addr > has_page_addr)
  412. end_page_addr = has_page_addr;
  413. ret = binder_update_page_range(alloc, 1,
  414. (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);
  415. if (ret)
  416. return ERR_PTR(ret);
  417. if (buffer_size != size) {
  418. struct binder_buffer *new_buffer;
  419. new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  420. if (!new_buffer) {
  421. pr_err("%s: %d failed to alloc new buffer struct\n",
  422. __func__, alloc->pid);
  423. goto err_alloc_buf_struct_failed;
  424. }
  425. new_buffer->data = (u8 *)buffer->data + size;
  426. list_add(&new_buffer->entry, &buffer->entry);
  427. new_buffer->free = 1;
  428. binder_insert_free_buffer(alloc, new_buffer);
  429. }
  430. rb_erase(best_fit, &alloc->free_buffers);
  431. buffer->free = 0;
  432. buffer->allow_user_free = 0;
  433. binder_insert_allocated_buffer_locked(alloc, buffer);
  434. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  435. "%d: binder_alloc_buf size %zd got %pK\n",
  436. alloc->pid, size, buffer);
  437. buffer->data_size = data_size;
  438. buffer->offsets_size = offsets_size;
  439. buffer->async_transaction = is_async;
  440. buffer->extra_buffers_size = extra_buffers_size;
  441. if (is_async) {
  442. alloc->free_async_space -= size + sizeof(struct binder_buffer);
  443. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  444. "%d: binder_alloc_buf size %zd async free %zd\n",
  445. alloc->pid, size, alloc->free_async_space);
  446. }
  447. return buffer;
  448. err_alloc_buf_struct_failed:
  449. binder_update_page_range(alloc, 0,
  450. (void *)PAGE_ALIGN((uintptr_t)buffer->data),
  451. end_page_addr);
  452. return ERR_PTR(-ENOMEM);
  453. }
  454. /**
  455. * binder_alloc_new_buf() - Allocate a new binder buffer
  456. * @alloc: binder_alloc for this proc
  457. * @data_size: size of user data buffer
  458. * @offsets_size: user specified buffer offset
  459. * @extra_buffers_size: size of extra space for meta-data (eg, security context)
  460. * @is_async: buffer for async transaction
  461. *
  462. * Allocate a new buffer given the requested sizes. Returns
  463. * the kernel version of the buffer pointer. The size allocated
  464. * is the sum of the three given sizes (each rounded up to
  465. * pointer-sized boundary)
  466. *
  467. * Return: The allocated buffer or %NULL if error
  468. */
  469. struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
  470. size_t data_size,
  471. size_t offsets_size,
  472. size_t extra_buffers_size,
  473. int is_async)
  474. {
  475. struct binder_buffer *buffer;
  476. mutex_lock(&alloc->mutex);
  477. buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
  478. extra_buffers_size, is_async);
  479. mutex_unlock(&alloc->mutex);
  480. return buffer;
  481. }
  482. static void *buffer_start_page(struct binder_buffer *buffer)
  483. {
  484. return (void *)((uintptr_t)buffer->data & PAGE_MASK);
  485. }
  486. static void *prev_buffer_end_page(struct binder_buffer *buffer)
  487. {
  488. return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
  489. }
  490. static void binder_delete_free_buffer(struct binder_alloc *alloc,
  491. struct binder_buffer *buffer)
  492. {
  493. struct binder_buffer *prev, *next = NULL;
  494. bool to_free = true;
  495. BUG_ON(alloc->buffers.next == &buffer->entry);
  496. prev = binder_buffer_prev(buffer);
  497. BUG_ON(!prev->free);
  498. if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
  499. to_free = false;
  500. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  501. "%d: merge free, buffer %pK share page with %pK\n",
  502. alloc->pid, buffer->data, prev->data);
  503. }
  504. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  505. next = binder_buffer_next(buffer);
  506. if (buffer_start_page(next) == buffer_start_page(buffer)) {
  507. to_free = false;
  508. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  509. "%d: merge free, buffer %pK share page with %pK\n",
  510. alloc->pid,
  511. buffer->data,
  512. next->data);
  513. }
  514. }
  515. if (PAGE_ALIGNED(buffer->data)) {
  516. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  517. "%d: merge free, buffer start %pK is page aligned\n",
  518. alloc->pid, buffer->data);
  519. to_free = false;
  520. }
  521. if (to_free) {
  522. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  523. "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
  524. alloc->pid, buffer->data,
  525. prev->data, next ? next->data : NULL);
  526. binder_update_page_range(alloc, 0, buffer_start_page(buffer),
  527. buffer_start_page(buffer) + PAGE_SIZE);
  528. }
  529. list_del(&buffer->entry);
  530. kfree(buffer);
  531. }
  532. static void binder_free_buf_locked(struct binder_alloc *alloc,
  533. struct binder_buffer *buffer)
  534. {
  535. size_t size, buffer_size;
  536. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  537. size = ALIGN(buffer->data_size, sizeof(void *)) +
  538. ALIGN(buffer->offsets_size, sizeof(void *)) +
  539. ALIGN(buffer->extra_buffers_size, sizeof(void *));
  540. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  541. "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
  542. alloc->pid, buffer, size, buffer_size);
  543. BUG_ON(buffer->free);
  544. BUG_ON(size > buffer_size);
  545. BUG_ON(buffer->transaction != NULL);
  546. BUG_ON(buffer->data < alloc->buffer);
  547. BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
  548. if (buffer->async_transaction) {
  549. alloc->free_async_space += size + sizeof(struct binder_buffer);
  550. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  551. "%d: binder_free_buf size %zd async free %zd\n",
  552. alloc->pid, size, alloc->free_async_space);
  553. }
  554. binder_update_page_range(alloc, 0,
  555. (void *)PAGE_ALIGN((uintptr_t)buffer->data),
  556. (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK));
  557. rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
  558. buffer->free = 1;
  559. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  560. struct binder_buffer *next = binder_buffer_next(buffer);
  561. if (next->free) {
  562. rb_erase(&next->rb_node, &alloc->free_buffers);
  563. binder_delete_free_buffer(alloc, next);
  564. }
  565. }
  566. if (alloc->buffers.next != &buffer->entry) {
  567. struct binder_buffer *prev = binder_buffer_prev(buffer);
  568. if (prev->free) {
  569. binder_delete_free_buffer(alloc, buffer);
  570. rb_erase(&prev->rb_node, &alloc->free_buffers);
  571. buffer = prev;
  572. }
  573. }
  574. binder_insert_free_buffer(alloc, buffer);
  575. }
  576. /**
  577. * binder_alloc_free_buf() - free a binder buffer
  578. * @alloc: binder_alloc for this proc
  579. * @buffer: kernel pointer to buffer
  580. *
  581. * Free the buffer allocated via binder_alloc_new_buffer()
  582. */
  583. void binder_alloc_free_buf(struct binder_alloc *alloc,
  584. struct binder_buffer *buffer)
  585. {
  586. mutex_lock(&alloc->mutex);
  587. binder_free_buf_locked(alloc, buffer);
  588. mutex_unlock(&alloc->mutex);
  589. }
  590. /**
  591. * binder_alloc_mmap_handler() - map virtual address space for proc
  592. * @alloc: alloc structure for this proc
  593. * @vma: vma passed to mmap()
  594. *
  595. * Called by binder_mmap() to initialize the space specified in
  596. * vma for allocating binder buffers
  597. *
  598. * Return:
  599. * 0 = success
  600. * -EBUSY = address space already mapped
  601. * -ENOMEM = failed to map memory to given address space
  602. */
  603. int binder_alloc_mmap_handler(struct binder_alloc *alloc,
  604. struct vm_area_struct *vma)
  605. {
  606. int ret;
  607. struct vm_struct *area;
  608. const char *failure_string;
  609. struct binder_buffer *buffer;
  610. mutex_lock(&binder_alloc_mmap_lock);
  611. if (alloc->buffer) {
  612. ret = -EBUSY;
  613. failure_string = "already mapped";
  614. goto err_already_mapped;
  615. }
  616. area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
  617. if (area == NULL) {
  618. ret = -ENOMEM;
  619. failure_string = "get_vm_area";
  620. goto err_get_vm_area_failed;
  621. }
  622. alloc->buffer = area->addr;
  623. alloc->user_buffer_offset =
  624. vma->vm_start - (uintptr_t)alloc->buffer;
  625. mutex_unlock(&binder_alloc_mmap_lock);
  626. #ifdef CONFIG_CPU_CACHE_VIPT
  627. if (cache_is_vipt_aliasing()) {
  628. while (CACHE_COLOUR(
  629. (vma->vm_start ^ (uint32_t)alloc->buffer))) {
  630. pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
  631. __func__, alloc->pid, vma->vm_start,
  632. vma->vm_end, alloc->buffer);
  633. vma->vm_start += PAGE_SIZE;
  634. }
  635. }
  636. #endif
  637. alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
  638. sizeof(alloc->pages[0]),
  639. GFP_KERNEL);
  640. if (alloc->pages == NULL) {
  641. ret = -ENOMEM;
  642. failure_string = "alloc page array";
  643. goto err_alloc_pages_failed;
  644. }
  645. alloc->buffer_size = vma->vm_end - vma->vm_start;
  646. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  647. if (!buffer) {
  648. ret = -ENOMEM;
  649. failure_string = "alloc buffer struct";
  650. goto err_alloc_buf_struct_failed;
  651. }
  652. buffer->data = alloc->buffer;
  653. list_add(&buffer->entry, &alloc->buffers);
  654. buffer->free = 1;
  655. binder_insert_free_buffer(alloc, buffer);
  656. alloc->free_async_space = alloc->buffer_size / 2;
  657. binder_alloc_set_vma(alloc, vma);
  658. mmgrab(alloc->vma_vm_mm);
  659. return 0;
  660. err_alloc_buf_struct_failed:
  661. kfree(alloc->pages);
  662. alloc->pages = NULL;
  663. err_alloc_pages_failed:
  664. mutex_lock(&binder_alloc_mmap_lock);
  665. vfree(alloc->buffer);
  666. alloc->buffer = NULL;
  667. err_get_vm_area_failed:
  668. err_already_mapped:
  669. mutex_unlock(&binder_alloc_mmap_lock);
  670. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  671. "%s: %d %lx-%lx %s failed %d\n", __func__,
  672. alloc->pid, vma->vm_start, vma->vm_end,
  673. failure_string, ret);
  674. return ret;
  675. }
  676. void binder_alloc_deferred_release(struct binder_alloc *alloc)
  677. {
  678. struct rb_node *n;
  679. int buffers, page_count;
  680. struct binder_buffer *buffer;
  681. buffers = 0;
  682. mutex_lock(&alloc->mutex);
  683. BUG_ON(alloc->vma);
  684. while ((n = rb_first(&alloc->allocated_buffers))) {
  685. buffer = rb_entry(n, struct binder_buffer, rb_node);
  686. /* Transaction should already have been freed */
  687. BUG_ON(buffer->transaction);
  688. binder_free_buf_locked(alloc, buffer);
  689. buffers++;
  690. }
  691. while (!list_empty(&alloc->buffers)) {
  692. buffer = list_first_entry(&alloc->buffers,
  693. struct binder_buffer, entry);
  694. WARN_ON(!buffer->free);
  695. list_del(&buffer->entry);
  696. WARN_ON_ONCE(!list_empty(&alloc->buffers));
  697. kfree(buffer);
  698. }
  699. page_count = 0;
  700. if (alloc->pages) {
  701. int i;
  702. for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
  703. void *page_addr;
  704. bool on_lru;
  705. if (!alloc->pages[i].page_ptr)
  706. continue;
  707. on_lru = list_lru_del(&binder_alloc_lru,
  708. &alloc->pages[i].lru);
  709. page_addr = alloc->buffer + i * PAGE_SIZE;
  710. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  711. "%s: %d: page %d at %pK %s\n",
  712. __func__, alloc->pid, i, page_addr,
  713. on_lru ? "on lru" : "active");
  714. unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
  715. __free_page(alloc->pages[i].page_ptr);
  716. page_count++;
  717. }
  718. kfree(alloc->pages);
  719. vfree(alloc->buffer);
  720. }
  721. mutex_unlock(&alloc->mutex);
  722. if (alloc->vma_vm_mm)
  723. mmdrop(alloc->vma_vm_mm);
  724. binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
  725. "%s: %d buffers %d, pages %d\n",
  726. __func__, alloc->pid, buffers, page_count);
  727. }
  728. static void print_binder_buffer(struct seq_file *m, const char *prefix,
  729. struct binder_buffer *buffer)
  730. {
  731. seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
  732. prefix, buffer->debug_id, buffer->data,
  733. buffer->data_size, buffer->offsets_size,
  734. buffer->extra_buffers_size,
  735. buffer->transaction ? "active" : "delivered");
  736. }
  737. /**
  738. * binder_alloc_print_allocated() - print buffer info
  739. * @m: seq_file for output via seq_printf()
  740. * @alloc: binder_alloc for this proc
  741. *
  742. * Prints information about every buffer associated with
  743. * the binder_alloc state to the given seq_file
  744. */
  745. void binder_alloc_print_allocated(struct seq_file *m,
  746. struct binder_alloc *alloc)
  747. {
  748. struct rb_node *n;
  749. mutex_lock(&alloc->mutex);
  750. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  751. print_binder_buffer(m, " buffer",
  752. rb_entry(n, struct binder_buffer, rb_node));
  753. mutex_unlock(&alloc->mutex);
  754. }
  755. /**
  756. * binder_alloc_print_pages() - print page usage
  757. * @m: seq_file for output via seq_printf()
  758. * @alloc: binder_alloc for this proc
  759. */
  760. void binder_alloc_print_pages(struct seq_file *m,
  761. struct binder_alloc *alloc)
  762. {
  763. struct binder_lru_page *page;
  764. int i;
  765. int active = 0;
  766. int lru = 0;
  767. int free = 0;
  768. mutex_lock(&alloc->mutex);
  769. /*
  770. * Make sure the binder_alloc is fully initialized, otherwise we might
  771. * read inconsistent state.
  772. */
  773. if (binder_alloc_get_vma(alloc) != NULL) {
  774. for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
  775. page = &alloc->pages[i];
  776. if (!page->page_ptr)
  777. free++;
  778. else if (list_empty(&page->lru))
  779. active++;
  780. else
  781. lru++;
  782. }
  783. }
  784. mutex_unlock(&alloc->mutex);
  785. seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
  786. seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
  787. }
  788. /**
  789. * binder_alloc_get_allocated_count() - return count of buffers
  790. * @alloc: binder_alloc for this proc
  791. *
  792. * Return: count of allocated buffers
  793. */
  794. int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
  795. {
  796. struct rb_node *n;
  797. int count = 0;
  798. mutex_lock(&alloc->mutex);
  799. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  800. count++;
  801. mutex_unlock(&alloc->mutex);
  802. return count;
  803. }
  804. /**
  805. * binder_alloc_vma_close() - invalidate address space
  806. * @alloc: binder_alloc for this proc
  807. *
  808. * Called from binder_vma_close() when releasing address space.
  809. * Clears alloc->vma to prevent new incoming transactions from
  810. * allocating more buffers.
  811. */
  812. void binder_alloc_vma_close(struct binder_alloc *alloc)
  813. {
  814. binder_alloc_set_vma(alloc, NULL);
  815. }
  816. /**
  817. * binder_alloc_free_page() - shrinker callback to free pages
  818. * @item: item to free
  819. * @lock: lock protecting the item
  820. * @cb_arg: callback argument
  821. *
  822. * Called from list_lru_walk() in binder_shrink_scan() to free
  823. * up pages when the system is under memory pressure.
  824. */
  825. enum lru_status binder_alloc_free_page(struct list_head *item,
  826. struct list_lru_one *lru,
  827. spinlock_t *lock,
  828. void *cb_arg)
  829. {
  830. struct mm_struct *mm = NULL;
  831. struct binder_lru_page *page = container_of(item,
  832. struct binder_lru_page,
  833. lru);
  834. struct binder_alloc *alloc;
  835. uintptr_t page_addr;
  836. size_t index;
  837. struct vm_area_struct *vma;
  838. alloc = page->alloc;
  839. if (!mutex_trylock(&alloc->mutex))
  840. goto err_get_alloc_mutex_failed;
  841. if (!page->page_ptr)
  842. goto err_page_already_freed;
  843. index = page - alloc->pages;
  844. page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
  845. mm = alloc->vma_vm_mm;
  846. if (!mmget_not_zero(mm))
  847. goto err_mmget;
  848. if (!down_read_trylock(&mm->mmap_sem))
  849. goto err_down_read_mmap_sem_failed;
  850. vma = binder_alloc_get_vma(alloc);
  851. list_lru_isolate(lru, item);
  852. spin_unlock(lock);
  853. if (vma) {
  854. trace_binder_unmap_user_start(alloc, index);
  855. zap_page_range(vma,
  856. page_addr + alloc->user_buffer_offset,
  857. PAGE_SIZE);
  858. trace_binder_unmap_user_end(alloc, index);
  859. }
  860. up_read(&mm->mmap_sem);
  861. mmput_async(mm);
  862. trace_binder_unmap_kernel_start(alloc, index);
  863. unmap_kernel_range(page_addr, PAGE_SIZE);
  864. __free_page(page->page_ptr);
  865. page->page_ptr = NULL;
  866. trace_binder_unmap_kernel_end(alloc, index);
  867. spin_lock(lock);
  868. mutex_unlock(&alloc->mutex);
  869. return LRU_REMOVED_RETRY;
  870. err_down_read_mmap_sem_failed:
  871. mmput_async(mm);
  872. err_mmget:
  873. err_page_already_freed:
  874. mutex_unlock(&alloc->mutex);
  875. err_get_alloc_mutex_failed:
  876. return LRU_SKIP;
  877. }
  878. static unsigned long
  879. binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
  880. {
  881. unsigned long ret = list_lru_count(&binder_alloc_lru);
  882. return ret;
  883. }
  884. static unsigned long
  885. binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
  886. {
  887. unsigned long ret;
  888. ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
  889. NULL, sc->nr_to_scan);
  890. return ret;
  891. }
  892. static struct shrinker binder_shrinker = {
  893. .count_objects = binder_shrink_count,
  894. .scan_objects = binder_shrink_scan,
  895. .seeks = DEFAULT_SEEKS,
  896. };
  897. /**
  898. * binder_alloc_init() - called by binder_open() for per-proc initialization
  899. * @alloc: binder_alloc for this proc
  900. *
  901. * Called from binder_open() to initialize binder_alloc fields for
  902. * new binder proc
  903. */
  904. void binder_alloc_init(struct binder_alloc *alloc)
  905. {
  906. alloc->pid = current->group_leader->pid;
  907. mutex_init(&alloc->mutex);
  908. INIT_LIST_HEAD(&alloc->buffers);
  909. }
  910. int binder_alloc_shrinker_init(void)
  911. {
  912. int ret = list_lru_init(&binder_alloc_lru);
  913. if (ret == 0) {
  914. ret = register_shrinker(&binder_shrinker);
  915. if (ret)
  916. list_lru_destroy(&binder_alloc_lru);
  917. }
  918. return ret;
  919. }