vmw_balloon.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * VMware Balloon driver.
  4. *
  5. * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
  6. *
  7. * This is VMware physical memory management driver for Linux. The driver
  8. * acts like a "balloon" that can be inflated to reclaim physical pages by
  9. * reserving them in the guest and invalidating them in the monitor,
  10. * freeing up the underlying machine pages so they can be allocated to
  11. * other guests. The balloon can also be deflated to allow the guest to
  12. * use more physical memory. Higher level policies can control the sizes
  13. * of balloons in VMs in order to manage physical memory resources.
  14. */
  15. //#define DEBUG
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/types.h>
  18. #include <linux/kernel.h>
  19. #include <linux/mm.h>
  20. #include <linux/vmalloc.h>
  21. #include <linux/sched.h>
  22. #include <linux/module.h>
  23. #include <linux/workqueue.h>
  24. #include <linux/debugfs.h>
  25. #include <linux/seq_file.h>
  26. #include <linux/vmw_vmci_defs.h>
  27. #include <linux/vmw_vmci_api.h>
  28. #include <asm/hypervisor.h>
  29. MODULE_AUTHOR("VMware, Inc.");
  30. MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
  31. MODULE_VERSION("1.5.0.0-k");
  32. MODULE_ALIAS("dmi:*:svnVMware*:*");
  33. MODULE_ALIAS("vmware_vmmemctl");
  34. MODULE_LICENSE("GPL");
  35. /*
  36. * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
  37. * allow wait (__GFP_RECLAIM) for NOSLEEP page allocations. Use
  38. * __GFP_NOWARN, to suppress page allocation failure warnings.
  39. */
  40. #define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
  41. /*
  42. * Use GFP_HIGHUSER when executing in a separate kernel thread
  43. * context and allocation can sleep. This is less stressful to
  44. * the guest memory system, since it allows the thread to block
  45. * while memory is reclaimed, and won't take pages from emergency
  46. * low-memory pools.
  47. */
  48. #define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
  49. /* Maximum number of refused pages we accumulate during inflation cycle */
  50. #define VMW_BALLOON_MAX_REFUSED 16
  51. /*
  52. * Hypervisor communication port definitions.
  53. */
  54. #define VMW_BALLOON_HV_PORT 0x5670
  55. #define VMW_BALLOON_HV_MAGIC 0x456c6d6f
  56. #define VMW_BALLOON_GUEST_ID 1 /* Linux */
  57. enum vmwballoon_capabilities {
  58. /*
  59. * Bit 0 is reserved and not associated to any capability.
  60. */
  61. VMW_BALLOON_BASIC_CMDS = (1 << 1),
  62. VMW_BALLOON_BATCHED_CMDS = (1 << 2),
  63. VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3),
  64. VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4),
  65. };
  66. #define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS \
  67. | VMW_BALLOON_BATCHED_CMDS \
  68. | VMW_BALLOON_BATCHED_2M_CMDS \
  69. | VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
  70. #define VMW_BALLOON_2M_SHIFT (9)
  71. #define VMW_BALLOON_NUM_PAGE_SIZES (2)
  72. /*
  73. * Backdoor commands availability:
  74. *
  75. * START, GET_TARGET and GUEST_ID are always available,
  76. *
  77. * VMW_BALLOON_BASIC_CMDS:
  78. * LOCK and UNLOCK commands,
  79. * VMW_BALLOON_BATCHED_CMDS:
  80. * BATCHED_LOCK and BATCHED_UNLOCK commands.
  81. * VMW BALLOON_BATCHED_2M_CMDS:
  82. * BATCHED_2M_LOCK and BATCHED_2M_UNLOCK commands,
  83. * VMW VMW_BALLOON_SIGNALLED_WAKEUP_CMD:
  84. * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command.
  85. */
  86. #define VMW_BALLOON_CMD_START 0
  87. #define VMW_BALLOON_CMD_GET_TARGET 1
  88. #define VMW_BALLOON_CMD_LOCK 2
  89. #define VMW_BALLOON_CMD_UNLOCK 3
  90. #define VMW_BALLOON_CMD_GUEST_ID 4
  91. #define VMW_BALLOON_CMD_BATCHED_LOCK 6
  92. #define VMW_BALLOON_CMD_BATCHED_UNLOCK 7
  93. #define VMW_BALLOON_CMD_BATCHED_2M_LOCK 8
  94. #define VMW_BALLOON_CMD_BATCHED_2M_UNLOCK 9
  95. #define VMW_BALLOON_CMD_VMCI_DOORBELL_SET 10
  96. /* error codes */
  97. #define VMW_BALLOON_SUCCESS 0
  98. #define VMW_BALLOON_FAILURE -1
  99. #define VMW_BALLOON_ERROR_CMD_INVALID 1
  100. #define VMW_BALLOON_ERROR_PPN_INVALID 2
  101. #define VMW_BALLOON_ERROR_PPN_LOCKED 3
  102. #define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
  103. #define VMW_BALLOON_ERROR_PPN_PINNED 5
  104. #define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
  105. #define VMW_BALLOON_ERROR_RESET 7
  106. #define VMW_BALLOON_ERROR_BUSY 8
  107. #define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
  108. /* Batch page description */
  109. /*
  110. * Layout of a page in the batch page:
  111. *
  112. * +-------------+----------+--------+
  113. * | | | |
  114. * | Page number | Reserved | Status |
  115. * | | | |
  116. * +-------------+----------+--------+
  117. * 64 PAGE_SHIFT 6 0
  118. *
  119. * The reserved field should be set to 0.
  120. */
  121. #define VMW_BALLOON_BATCH_MAX_PAGES (PAGE_SIZE / sizeof(u64))
  122. #define VMW_BALLOON_BATCH_STATUS_MASK ((1UL << 5) - 1)
  123. #define VMW_BALLOON_BATCH_PAGE_MASK (~((1UL << PAGE_SHIFT) - 1))
  124. struct vmballoon_batch_page {
  125. u64 pages[VMW_BALLOON_BATCH_MAX_PAGES];
  126. };
  127. static u64 vmballoon_batch_get_pa(struct vmballoon_batch_page *batch, int idx)
  128. {
  129. return batch->pages[idx] & VMW_BALLOON_BATCH_PAGE_MASK;
  130. }
  131. static int vmballoon_batch_get_status(struct vmballoon_batch_page *batch,
  132. int idx)
  133. {
  134. return (int)(batch->pages[idx] & VMW_BALLOON_BATCH_STATUS_MASK);
  135. }
  136. static void vmballoon_batch_set_pa(struct vmballoon_batch_page *batch, int idx,
  137. u64 pa)
  138. {
  139. batch->pages[idx] = pa;
  140. }
  141. #define VMWARE_BALLOON_CMD(cmd, arg1, arg2, result) \
  142. ({ \
  143. unsigned long __status, __dummy1, __dummy2, __dummy3; \
  144. __asm__ __volatile__ ("inl %%dx" : \
  145. "=a"(__status), \
  146. "=c"(__dummy1), \
  147. "=d"(__dummy2), \
  148. "=b"(result), \
  149. "=S" (__dummy3) : \
  150. "0"(VMW_BALLOON_HV_MAGIC), \
  151. "1"(VMW_BALLOON_CMD_##cmd), \
  152. "2"(VMW_BALLOON_HV_PORT), \
  153. "3"(arg1), \
  154. "4" (arg2) : \
  155. "memory"); \
  156. if (VMW_BALLOON_CMD_##cmd == VMW_BALLOON_CMD_START) \
  157. result = __dummy1; \
  158. result &= -1UL; \
  159. __status & -1UL; \
  160. })
  161. #ifdef CONFIG_DEBUG_FS
  162. struct vmballoon_stats {
  163. unsigned int timer;
  164. unsigned int doorbell;
  165. /* allocation statistics */
  166. unsigned int alloc[VMW_BALLOON_NUM_PAGE_SIZES];
  167. unsigned int alloc_fail[VMW_BALLOON_NUM_PAGE_SIZES];
  168. unsigned int sleep_alloc;
  169. unsigned int sleep_alloc_fail;
  170. unsigned int refused_alloc[VMW_BALLOON_NUM_PAGE_SIZES];
  171. unsigned int refused_free[VMW_BALLOON_NUM_PAGE_SIZES];
  172. unsigned int free[VMW_BALLOON_NUM_PAGE_SIZES];
  173. /* monitor operations */
  174. unsigned int lock[VMW_BALLOON_NUM_PAGE_SIZES];
  175. unsigned int lock_fail[VMW_BALLOON_NUM_PAGE_SIZES];
  176. unsigned int unlock[VMW_BALLOON_NUM_PAGE_SIZES];
  177. unsigned int unlock_fail[VMW_BALLOON_NUM_PAGE_SIZES];
  178. unsigned int target;
  179. unsigned int target_fail;
  180. unsigned int start;
  181. unsigned int start_fail;
  182. unsigned int guest_type;
  183. unsigned int guest_type_fail;
  184. unsigned int doorbell_set;
  185. unsigned int doorbell_unset;
  186. };
  187. #define STATS_INC(stat) (stat)++
  188. #else
  189. #define STATS_INC(stat)
  190. #endif
  191. struct vmballoon;
  192. struct vmballoon_ops {
  193. void (*add_page)(struct vmballoon *b, int idx, struct page *p);
  194. int (*lock)(struct vmballoon *b, unsigned int num_pages,
  195. bool is_2m_pages, unsigned int *target);
  196. int (*unlock)(struct vmballoon *b, unsigned int num_pages,
  197. bool is_2m_pages, unsigned int *target);
  198. };
  199. struct vmballoon_page_size {
  200. /* list of reserved physical pages */
  201. struct list_head pages;
  202. /* transient list of non-balloonable pages */
  203. struct list_head refused_pages;
  204. unsigned int n_refused_pages;
  205. };
  206. struct vmballoon {
  207. struct vmballoon_page_size page_sizes[VMW_BALLOON_NUM_PAGE_SIZES];
  208. /* supported page sizes. 1 == 4k pages only, 2 == 4k and 2m pages */
  209. unsigned supported_page_sizes;
  210. /* balloon size in pages */
  211. unsigned int size;
  212. unsigned int target;
  213. /* reset flag */
  214. bool reset_required;
  215. unsigned long capabilities;
  216. struct vmballoon_batch_page *batch_page;
  217. unsigned int batch_max_pages;
  218. struct page *page;
  219. const struct vmballoon_ops *ops;
  220. #ifdef CONFIG_DEBUG_FS
  221. /* statistics */
  222. struct vmballoon_stats stats;
  223. /* debugfs file exporting statistics */
  224. struct dentry *dbg_entry;
  225. #endif
  226. struct sysinfo sysinfo;
  227. struct delayed_work dwork;
  228. struct vmci_handle vmci_doorbell;
  229. };
  230. static struct vmballoon balloon;
  231. /*
  232. * Send "start" command to the host, communicating supported version
  233. * of the protocol.
  234. */
  235. static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
  236. {
  237. unsigned long status, capabilities, dummy = 0;
  238. bool success;
  239. STATS_INC(b->stats.start);
  240. status = VMWARE_BALLOON_CMD(START, req_caps, dummy, capabilities);
  241. switch (status) {
  242. case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
  243. b->capabilities = capabilities;
  244. success = true;
  245. break;
  246. case VMW_BALLOON_SUCCESS:
  247. b->capabilities = VMW_BALLOON_BASIC_CMDS;
  248. success = true;
  249. break;
  250. default:
  251. success = false;
  252. }
  253. /*
  254. * 2MB pages are only supported with batching. If batching is for some
  255. * reason disabled, do not use 2MB pages, since otherwise the legacy
  256. * mechanism is used with 2MB pages, causing a failure.
  257. */
  258. if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
  259. (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
  260. b->supported_page_sizes = 2;
  261. else
  262. b->supported_page_sizes = 1;
  263. if (!success) {
  264. pr_debug("%s - failed, hv returns %ld\n", __func__, status);
  265. STATS_INC(b->stats.start_fail);
  266. }
  267. return success;
  268. }
  269. static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
  270. {
  271. switch (status) {
  272. case VMW_BALLOON_SUCCESS:
  273. return true;
  274. case VMW_BALLOON_ERROR_RESET:
  275. b->reset_required = true;
  276. /* fall through */
  277. default:
  278. return false;
  279. }
  280. }
  281. /*
  282. * Communicate guest type to the host so that it can adjust ballooning
  283. * algorithm to the one most appropriate for the guest. This command
  284. * is normally issued after sending "start" command and is part of
  285. * standard reset sequence.
  286. */
  287. static bool vmballoon_send_guest_id(struct vmballoon *b)
  288. {
  289. unsigned long status, dummy = 0;
  290. status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy,
  291. dummy);
  292. STATS_INC(b->stats.guest_type);
  293. if (vmballoon_check_status(b, status))
  294. return true;
  295. pr_debug("%s - failed, hv returns %ld\n", __func__, status);
  296. STATS_INC(b->stats.guest_type_fail);
  297. return false;
  298. }
  299. static u16 vmballoon_page_size(bool is_2m_page)
  300. {
  301. if (is_2m_page)
  302. return 1 << VMW_BALLOON_2M_SHIFT;
  303. return 1;
  304. }
  305. /*
  306. * Retrieve desired balloon size from the host.
  307. */
  308. static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
  309. {
  310. unsigned long status;
  311. unsigned long target;
  312. unsigned long limit;
  313. unsigned long dummy = 0;
  314. u32 limit32;
  315. /*
  316. * si_meminfo() is cheap. Moreover, we want to provide dynamic
  317. * max balloon size later. So let us call si_meminfo() every
  318. * iteration.
  319. */
  320. si_meminfo(&b->sysinfo);
  321. limit = b->sysinfo.totalram;
  322. /* Ensure limit fits in 32-bits */
  323. limit32 = (u32)limit;
  324. if (limit != limit32)
  325. return false;
  326. /* update stats */
  327. STATS_INC(b->stats.target);
  328. status = VMWARE_BALLOON_CMD(GET_TARGET, limit, dummy, target);
  329. if (vmballoon_check_status(b, status)) {
  330. *new_target = target;
  331. return true;
  332. }
  333. pr_debug("%s - failed, hv returns %ld\n", __func__, status);
  334. STATS_INC(b->stats.target_fail);
  335. return false;
  336. }
  337. /*
  338. * Notify the host about allocated page so that host can use it without
  339. * fear that guest will need it. Host may reject some pages, we need to
  340. * check the return value and maybe submit a different page.
  341. */
  342. static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
  343. unsigned int *hv_status, unsigned int *target)
  344. {
  345. unsigned long status, dummy = 0;
  346. u32 pfn32;
  347. pfn32 = (u32)pfn;
  348. if (pfn32 != pfn)
  349. return -EINVAL;
  350. STATS_INC(b->stats.lock[false]);
  351. *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy, *target);
  352. if (vmballoon_check_status(b, status))
  353. return 0;
  354. pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
  355. STATS_INC(b->stats.lock_fail[false]);
  356. return -EIO;
  357. }
  358. static int vmballoon_send_batched_lock(struct vmballoon *b,
  359. unsigned int num_pages, bool is_2m_pages, unsigned int *target)
  360. {
  361. unsigned long status;
  362. unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
  363. STATS_INC(b->stats.lock[is_2m_pages]);
  364. if (is_2m_pages)
  365. status = VMWARE_BALLOON_CMD(BATCHED_2M_LOCK, pfn, num_pages,
  366. *target);
  367. else
  368. status = VMWARE_BALLOON_CMD(BATCHED_LOCK, pfn, num_pages,
  369. *target);
  370. if (vmballoon_check_status(b, status))
  371. return 0;
  372. pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
  373. STATS_INC(b->stats.lock_fail[is_2m_pages]);
  374. return 1;
  375. }
  376. /*
  377. * Notify the host that guest intends to release given page back into
  378. * the pool of available (to the guest) pages.
  379. */
  380. static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn,
  381. unsigned int *target)
  382. {
  383. unsigned long status, dummy = 0;
  384. u32 pfn32;
  385. pfn32 = (u32)pfn;
  386. if (pfn32 != pfn)
  387. return false;
  388. STATS_INC(b->stats.unlock[false]);
  389. status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy, *target);
  390. if (vmballoon_check_status(b, status))
  391. return true;
  392. pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
  393. STATS_INC(b->stats.unlock_fail[false]);
  394. return false;
  395. }
  396. static bool vmballoon_send_batched_unlock(struct vmballoon *b,
  397. unsigned int num_pages, bool is_2m_pages, unsigned int *target)
  398. {
  399. unsigned long status;
  400. unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
  401. STATS_INC(b->stats.unlock[is_2m_pages]);
  402. if (is_2m_pages)
  403. status = VMWARE_BALLOON_CMD(BATCHED_2M_UNLOCK, pfn, num_pages,
  404. *target);
  405. else
  406. status = VMWARE_BALLOON_CMD(BATCHED_UNLOCK, pfn, num_pages,
  407. *target);
  408. if (vmballoon_check_status(b, status))
  409. return true;
  410. pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
  411. STATS_INC(b->stats.unlock_fail[is_2m_pages]);
  412. return false;
  413. }
  414. static struct page *vmballoon_alloc_page(gfp_t flags, bool is_2m_page)
  415. {
  416. if (is_2m_page)
  417. return alloc_pages(flags, VMW_BALLOON_2M_SHIFT);
  418. return alloc_page(flags);
  419. }
  420. static void vmballoon_free_page(struct page *page, bool is_2m_page)
  421. {
  422. if (is_2m_page)
  423. __free_pages(page, VMW_BALLOON_2M_SHIFT);
  424. else
  425. __free_page(page);
  426. }
  427. /*
  428. * Quickly release all pages allocated for the balloon. This function is
  429. * called when host decides to "reset" balloon for one reason or another.
  430. * Unlike normal "deflate" we do not (shall not) notify host of the pages
  431. * being released.
  432. */
  433. static void vmballoon_pop(struct vmballoon *b)
  434. {
  435. struct page *page, *next;
  436. unsigned is_2m_pages;
  437. for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
  438. is_2m_pages++) {
  439. struct vmballoon_page_size *page_size =
  440. &b->page_sizes[is_2m_pages];
  441. u16 size_per_page = vmballoon_page_size(is_2m_pages);
  442. list_for_each_entry_safe(page, next, &page_size->pages, lru) {
  443. list_del(&page->lru);
  444. vmballoon_free_page(page, is_2m_pages);
  445. STATS_INC(b->stats.free[is_2m_pages]);
  446. b->size -= size_per_page;
  447. cond_resched();
  448. }
  449. }
  450. /* Clearing the batch_page unconditionally has no adverse effect */
  451. free_page((unsigned long)b->batch_page);
  452. b->batch_page = NULL;
  453. }
  454. /*
  455. * Notify the host of a ballooned page. If host rejects the page put it on the
  456. * refuse list, those refused page are then released at the end of the
  457. * inflation cycle.
  458. */
  459. static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
  460. bool is_2m_pages, unsigned int *target)
  461. {
  462. int locked, hv_status;
  463. struct page *page = b->page;
  464. struct vmballoon_page_size *page_size = &b->page_sizes[false];
  465. /* is_2m_pages can never happen as 2m pages support implies batching */
  466. locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
  467. target);
  468. if (locked) {
  469. STATS_INC(b->stats.refused_alloc[false]);
  470. if (locked == -EIO &&
  471. (hv_status == VMW_BALLOON_ERROR_RESET ||
  472. hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
  473. vmballoon_free_page(page, false);
  474. return -EIO;
  475. }
  476. /*
  477. * Place page on the list of non-balloonable pages
  478. * and retry allocation, unless we already accumulated
  479. * too many of them, in which case take a breather.
  480. */
  481. if (page_size->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
  482. page_size->n_refused_pages++;
  483. list_add(&page->lru, &page_size->refused_pages);
  484. } else {
  485. vmballoon_free_page(page, false);
  486. }
  487. return locked;
  488. }
  489. /* track allocated page */
  490. list_add(&page->lru, &page_size->pages);
  491. /* update balloon size */
  492. b->size++;
  493. return 0;
  494. }
  495. static int vmballoon_lock_batched_page(struct vmballoon *b,
  496. unsigned int num_pages, bool is_2m_pages, unsigned int *target)
  497. {
  498. int locked, i;
  499. u16 size_per_page = vmballoon_page_size(is_2m_pages);
  500. locked = vmballoon_send_batched_lock(b, num_pages, is_2m_pages,
  501. target);
  502. if (locked > 0) {
  503. for (i = 0; i < num_pages; i++) {
  504. u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
  505. struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
  506. vmballoon_free_page(p, is_2m_pages);
  507. }
  508. return -EIO;
  509. }
  510. for (i = 0; i < num_pages; i++) {
  511. u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
  512. struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
  513. struct vmballoon_page_size *page_size =
  514. &b->page_sizes[is_2m_pages];
  515. locked = vmballoon_batch_get_status(b->batch_page, i);
  516. switch (locked) {
  517. case VMW_BALLOON_SUCCESS:
  518. list_add(&p->lru, &page_size->pages);
  519. b->size += size_per_page;
  520. break;
  521. case VMW_BALLOON_ERROR_PPN_PINNED:
  522. case VMW_BALLOON_ERROR_PPN_INVALID:
  523. if (page_size->n_refused_pages
  524. < VMW_BALLOON_MAX_REFUSED) {
  525. list_add(&p->lru, &page_size->refused_pages);
  526. page_size->n_refused_pages++;
  527. break;
  528. }
  529. /* Fallthrough */
  530. case VMW_BALLOON_ERROR_RESET:
  531. case VMW_BALLOON_ERROR_PPN_NOTNEEDED:
  532. vmballoon_free_page(p, is_2m_pages);
  533. break;
  534. default:
  535. /* This should never happen */
  536. WARN_ON_ONCE(true);
  537. }
  538. }
  539. return 0;
  540. }
  541. /*
  542. * Release the page allocated for the balloon. Note that we first notify
  543. * the host so it can make sure the page will be available for the guest
  544. * to use, if needed.
  545. */
  546. static int vmballoon_unlock_page(struct vmballoon *b, unsigned int num_pages,
  547. bool is_2m_pages, unsigned int *target)
  548. {
  549. struct page *page = b->page;
  550. struct vmballoon_page_size *page_size = &b->page_sizes[false];
  551. /* is_2m_pages can never happen as 2m pages support implies batching */
  552. if (!vmballoon_send_unlock_page(b, page_to_pfn(page), target)) {
  553. list_add(&page->lru, &page_size->pages);
  554. return -EIO;
  555. }
  556. /* deallocate page */
  557. vmballoon_free_page(page, false);
  558. STATS_INC(b->stats.free[false]);
  559. /* update balloon size */
  560. b->size--;
  561. return 0;
  562. }
  563. static int vmballoon_unlock_batched_page(struct vmballoon *b,
  564. unsigned int num_pages, bool is_2m_pages,
  565. unsigned int *target)
  566. {
  567. int locked, i, ret = 0;
  568. bool hv_success;
  569. u16 size_per_page = vmballoon_page_size(is_2m_pages);
  570. hv_success = vmballoon_send_batched_unlock(b, num_pages, is_2m_pages,
  571. target);
  572. if (!hv_success)
  573. ret = -EIO;
  574. for (i = 0; i < num_pages; i++) {
  575. u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
  576. struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
  577. struct vmballoon_page_size *page_size =
  578. &b->page_sizes[is_2m_pages];
  579. locked = vmballoon_batch_get_status(b->batch_page, i);
  580. if (!hv_success || locked != VMW_BALLOON_SUCCESS) {
  581. /*
  582. * That page wasn't successfully unlocked by the
  583. * hypervisor, re-add it to the list of pages owned by
  584. * the balloon driver.
  585. */
  586. list_add(&p->lru, &page_size->pages);
  587. } else {
  588. /* deallocate page */
  589. vmballoon_free_page(p, is_2m_pages);
  590. STATS_INC(b->stats.free[is_2m_pages]);
  591. /* update balloon size */
  592. b->size -= size_per_page;
  593. }
  594. }
  595. return ret;
  596. }
  597. /*
  598. * Release pages that were allocated while attempting to inflate the
  599. * balloon but were refused by the host for one reason or another.
  600. */
  601. static void vmballoon_release_refused_pages(struct vmballoon *b,
  602. bool is_2m_pages)
  603. {
  604. struct page *page, *next;
  605. struct vmballoon_page_size *page_size =
  606. &b->page_sizes[is_2m_pages];
  607. list_for_each_entry_safe(page, next, &page_size->refused_pages, lru) {
  608. list_del(&page->lru);
  609. vmballoon_free_page(page, is_2m_pages);
  610. STATS_INC(b->stats.refused_free[is_2m_pages]);
  611. }
  612. page_size->n_refused_pages = 0;
  613. }
  614. static void vmballoon_add_page(struct vmballoon *b, int idx, struct page *p)
  615. {
  616. b->page = p;
  617. }
  618. static void vmballoon_add_batched_page(struct vmballoon *b, int idx,
  619. struct page *p)
  620. {
  621. vmballoon_batch_set_pa(b->batch_page, idx,
  622. (u64)page_to_pfn(p) << PAGE_SHIFT);
  623. }
  624. /*
  625. * Inflate the balloon towards its target size. Note that we try to limit
  626. * the rate of allocation to make sure we are not choking the rest of the
  627. * system.
  628. */
  629. static void vmballoon_inflate(struct vmballoon *b)
  630. {
  631. unsigned int num_pages = 0;
  632. int error = 0;
  633. gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
  634. bool is_2m_pages;
  635. pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
  636. /*
  637. * First try NOSLEEP page allocations to inflate balloon.
  638. *
  639. * If we do not throttle nosleep allocations, we can drain all
  640. * free pages in the guest quickly (if the balloon target is high).
  641. * As a side-effect, draining free pages helps to inform (force)
  642. * the guest to start swapping if balloon target is not met yet,
  643. * which is a desired behavior. However, balloon driver can consume
  644. * all available CPU cycles if too many pages are allocated in a
  645. * second. Therefore, we throttle nosleep allocations even when
  646. * the guest is not under memory pressure. OTOH, if we have already
  647. * predicted that the guest is under memory pressure, then we
  648. * slowdown page allocations considerably.
  649. */
  650. /*
  651. * Start with no sleep allocation rate which may be higher
  652. * than sleeping allocation rate.
  653. */
  654. is_2m_pages = b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES;
  655. pr_debug("%s - goal: %d", __func__, b->target - b->size);
  656. while (!b->reset_required &&
  657. b->size + num_pages * vmballoon_page_size(is_2m_pages)
  658. < b->target) {
  659. struct page *page;
  660. if (flags == VMW_PAGE_ALLOC_NOSLEEP)
  661. STATS_INC(b->stats.alloc[is_2m_pages]);
  662. else
  663. STATS_INC(b->stats.sleep_alloc);
  664. page = vmballoon_alloc_page(flags, is_2m_pages);
  665. if (!page) {
  666. STATS_INC(b->stats.alloc_fail[is_2m_pages]);
  667. if (is_2m_pages) {
  668. b->ops->lock(b, num_pages, true, &b->target);
  669. /*
  670. * ignore errors from locking as we now switch
  671. * to 4k pages and we might get different
  672. * errors.
  673. */
  674. num_pages = 0;
  675. is_2m_pages = false;
  676. continue;
  677. }
  678. if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
  679. /*
  680. * CANSLEEP page allocation failed, so guest
  681. * is under severe memory pressure. We just log
  682. * the event, but do not stop the inflation
  683. * due to its negative impact on performance.
  684. */
  685. STATS_INC(b->stats.sleep_alloc_fail);
  686. break;
  687. }
  688. /*
  689. * NOSLEEP page allocation failed, so the guest is
  690. * under memory pressure. Slowing down page alloctions
  691. * seems to be reasonable, but doing so might actually
  692. * cause the hypervisor to throttle us down, resulting
  693. * in degraded performance. We will count on the
  694. * scheduler and standard memory management mechanisms
  695. * for now.
  696. */
  697. flags = VMW_PAGE_ALLOC_CANSLEEP;
  698. continue;
  699. }
  700. b->ops->add_page(b, num_pages++, page);
  701. if (num_pages == b->batch_max_pages) {
  702. error = b->ops->lock(b, num_pages, is_2m_pages,
  703. &b->target);
  704. num_pages = 0;
  705. if (error)
  706. break;
  707. }
  708. cond_resched();
  709. }
  710. if (num_pages > 0)
  711. b->ops->lock(b, num_pages, is_2m_pages, &b->target);
  712. vmballoon_release_refused_pages(b, true);
  713. vmballoon_release_refused_pages(b, false);
  714. }
  715. /*
  716. * Decrease the size of the balloon allowing guest to use more memory.
  717. */
  718. static void vmballoon_deflate(struct vmballoon *b)
  719. {
  720. unsigned is_2m_pages;
  721. pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
  722. /* free pages to reach target */
  723. for (is_2m_pages = 0; is_2m_pages < b->supported_page_sizes;
  724. is_2m_pages++) {
  725. struct page *page, *next;
  726. unsigned int num_pages = 0;
  727. struct vmballoon_page_size *page_size =
  728. &b->page_sizes[is_2m_pages];
  729. list_for_each_entry_safe(page, next, &page_size->pages, lru) {
  730. if (b->reset_required ||
  731. (b->target > 0 &&
  732. b->size - num_pages
  733. * vmballoon_page_size(is_2m_pages)
  734. < b->target + vmballoon_page_size(true)))
  735. break;
  736. list_del(&page->lru);
  737. b->ops->add_page(b, num_pages++, page);
  738. if (num_pages == b->batch_max_pages) {
  739. int error;
  740. error = b->ops->unlock(b, num_pages,
  741. is_2m_pages, &b->target);
  742. num_pages = 0;
  743. if (error)
  744. return;
  745. }
  746. cond_resched();
  747. }
  748. if (num_pages > 0)
  749. b->ops->unlock(b, num_pages, is_2m_pages, &b->target);
  750. }
  751. }
  752. static const struct vmballoon_ops vmballoon_basic_ops = {
  753. .add_page = vmballoon_add_page,
  754. .lock = vmballoon_lock_page,
  755. .unlock = vmballoon_unlock_page
  756. };
  757. static const struct vmballoon_ops vmballoon_batched_ops = {
  758. .add_page = vmballoon_add_batched_page,
  759. .lock = vmballoon_lock_batched_page,
  760. .unlock = vmballoon_unlock_batched_page
  761. };
  762. static bool vmballoon_init_batching(struct vmballoon *b)
  763. {
  764. struct page *page;
  765. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  766. if (!page)
  767. return false;
  768. b->batch_page = page_address(page);
  769. return true;
  770. }
  771. /*
  772. * Receive notification and resize balloon
  773. */
  774. static void vmballoon_doorbell(void *client_data)
  775. {
  776. struct vmballoon *b = client_data;
  777. STATS_INC(b->stats.doorbell);
  778. mod_delayed_work(system_freezable_wq, &b->dwork, 0);
  779. }
  780. /*
  781. * Clean up vmci doorbell
  782. */
  783. static void vmballoon_vmci_cleanup(struct vmballoon *b)
  784. {
  785. int error;
  786. VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, VMCI_INVALID_ID,
  787. VMCI_INVALID_ID, error);
  788. STATS_INC(b->stats.doorbell_unset);
  789. if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
  790. vmci_doorbell_destroy(b->vmci_doorbell);
  791. b->vmci_doorbell = VMCI_INVALID_HANDLE;
  792. }
  793. }
  794. /*
  795. * Initialize vmci doorbell, to get notified as soon as balloon changes
  796. */
  797. static int vmballoon_vmci_init(struct vmballoon *b)
  798. {
  799. unsigned long error, dummy;
  800. if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
  801. return 0;
  802. error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
  803. VMCI_PRIVILEGE_FLAG_RESTRICTED,
  804. vmballoon_doorbell, b);
  805. if (error != VMCI_SUCCESS)
  806. goto fail;
  807. error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context,
  808. b->vmci_doorbell.resource, dummy);
  809. STATS_INC(b->stats.doorbell_set);
  810. if (error != VMW_BALLOON_SUCCESS)
  811. goto fail;
  812. return 0;
  813. fail:
  814. vmballoon_vmci_cleanup(b);
  815. return -EIO;
  816. }
  817. /*
  818. * Perform standard reset sequence by popping the balloon (in case it
  819. * is not empty) and then restarting protocol. This operation normally
  820. * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
  821. */
  822. static void vmballoon_reset(struct vmballoon *b)
  823. {
  824. int error;
  825. vmballoon_vmci_cleanup(b);
  826. /* free all pages, skipping monitor unlock */
  827. vmballoon_pop(b);
  828. if (!vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
  829. return;
  830. if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
  831. b->ops = &vmballoon_batched_ops;
  832. b->batch_max_pages = VMW_BALLOON_BATCH_MAX_PAGES;
  833. if (!vmballoon_init_batching(b)) {
  834. /*
  835. * We failed to initialize batching, inform the monitor
  836. * about it by sending a null capability.
  837. *
  838. * The guest will retry in one second.
  839. */
  840. vmballoon_send_start(b, 0);
  841. return;
  842. }
  843. } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
  844. b->ops = &vmballoon_basic_ops;
  845. b->batch_max_pages = 1;
  846. }
  847. b->reset_required = false;
  848. error = vmballoon_vmci_init(b);
  849. if (error)
  850. pr_err("failed to initialize vmci doorbell\n");
  851. if (!vmballoon_send_guest_id(b))
  852. pr_err("failed to send guest ID to the host\n");
  853. }
  854. /*
  855. * Balloon work function: reset protocol, if needed, get the new size and
  856. * adjust balloon as needed. Repeat in 1 sec.
  857. */
  858. static void vmballoon_work(struct work_struct *work)
  859. {
  860. struct delayed_work *dwork = to_delayed_work(work);
  861. struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
  862. unsigned int target;
  863. STATS_INC(b->stats.timer);
  864. if (b->reset_required)
  865. vmballoon_reset(b);
  866. if (!b->reset_required && vmballoon_send_get_target(b, &target)) {
  867. /* update target, adjust size */
  868. b->target = target;
  869. if (b->size < target)
  870. vmballoon_inflate(b);
  871. else if (target == 0 ||
  872. b->size > target + vmballoon_page_size(true))
  873. vmballoon_deflate(b);
  874. }
  875. /*
  876. * We are using a freezable workqueue so that balloon operations are
  877. * stopped while the system transitions to/from sleep/hibernation.
  878. */
  879. queue_delayed_work(system_freezable_wq,
  880. dwork, round_jiffies_relative(HZ));
  881. }
  882. /*
  883. * DEBUGFS Interface
  884. */
  885. #ifdef CONFIG_DEBUG_FS
  886. static int vmballoon_debug_show(struct seq_file *f, void *offset)
  887. {
  888. struct vmballoon *b = f->private;
  889. struct vmballoon_stats *stats = &b->stats;
  890. /* format capabilities info */
  891. seq_printf(f,
  892. "balloon capabilities: %#4x\n"
  893. "used capabilities: %#4lx\n"
  894. "is resetting: %c\n",
  895. VMW_BALLOON_CAPABILITIES, b->capabilities,
  896. b->reset_required ? 'y' : 'n');
  897. /* format size info */
  898. seq_printf(f,
  899. "target: %8d pages\n"
  900. "current: %8d pages\n",
  901. b->target, b->size);
  902. seq_printf(f,
  903. "\n"
  904. "timer: %8u\n"
  905. "doorbell: %8u\n"
  906. "start: %8u (%4u failed)\n"
  907. "guestType: %8u (%4u failed)\n"
  908. "2m-lock: %8u (%4u failed)\n"
  909. "lock: %8u (%4u failed)\n"
  910. "2m-unlock: %8u (%4u failed)\n"
  911. "unlock: %8u (%4u failed)\n"
  912. "target: %8u (%4u failed)\n"
  913. "prim2mAlloc: %8u (%4u failed)\n"
  914. "primNoSleepAlloc: %8u (%4u failed)\n"
  915. "primCanSleepAlloc: %8u (%4u failed)\n"
  916. "prim2mFree: %8u\n"
  917. "primFree: %8u\n"
  918. "err2mAlloc: %8u\n"
  919. "errAlloc: %8u\n"
  920. "err2mFree: %8u\n"
  921. "errFree: %8u\n"
  922. "doorbellSet: %8u\n"
  923. "doorbellUnset: %8u\n",
  924. stats->timer,
  925. stats->doorbell,
  926. stats->start, stats->start_fail,
  927. stats->guest_type, stats->guest_type_fail,
  928. stats->lock[true], stats->lock_fail[true],
  929. stats->lock[false], stats->lock_fail[false],
  930. stats->unlock[true], stats->unlock_fail[true],
  931. stats->unlock[false], stats->unlock_fail[false],
  932. stats->target, stats->target_fail,
  933. stats->alloc[true], stats->alloc_fail[true],
  934. stats->alloc[false], stats->alloc_fail[false],
  935. stats->sleep_alloc, stats->sleep_alloc_fail,
  936. stats->free[true],
  937. stats->free[false],
  938. stats->refused_alloc[true], stats->refused_alloc[false],
  939. stats->refused_free[true], stats->refused_free[false],
  940. stats->doorbell_set, stats->doorbell_unset);
  941. return 0;
  942. }
  943. static int vmballoon_debug_open(struct inode *inode, struct file *file)
  944. {
  945. return single_open(file, vmballoon_debug_show, inode->i_private);
  946. }
  947. static const struct file_operations vmballoon_debug_fops = {
  948. .owner = THIS_MODULE,
  949. .open = vmballoon_debug_open,
  950. .read = seq_read,
  951. .llseek = seq_lseek,
  952. .release = single_release,
  953. };
  954. static int __init vmballoon_debugfs_init(struct vmballoon *b)
  955. {
  956. int error;
  957. b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
  958. &vmballoon_debug_fops);
  959. if (IS_ERR(b->dbg_entry)) {
  960. error = PTR_ERR(b->dbg_entry);
  961. pr_err("failed to create debugfs entry, error: %d\n", error);
  962. return error;
  963. }
  964. return 0;
  965. }
  966. static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
  967. {
  968. debugfs_remove(b->dbg_entry);
  969. }
  970. #else
  971. static inline int vmballoon_debugfs_init(struct vmballoon *b)
  972. {
  973. return 0;
  974. }
  975. static inline void vmballoon_debugfs_exit(struct vmballoon *b)
  976. {
  977. }
  978. #endif /* CONFIG_DEBUG_FS */
  979. static int __init vmballoon_init(void)
  980. {
  981. int error;
  982. unsigned is_2m_pages;
  983. /*
  984. * Check if we are running on VMware's hypervisor and bail out
  985. * if we are not.
  986. */
  987. if (x86_hyper_type != X86_HYPER_VMWARE)
  988. return -ENODEV;
  989. for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
  990. is_2m_pages++) {
  991. INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].pages);
  992. INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].refused_pages);
  993. }
  994. INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
  995. error = vmballoon_debugfs_init(&balloon);
  996. if (error)
  997. return error;
  998. balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
  999. balloon.batch_page = NULL;
  1000. balloon.page = NULL;
  1001. balloon.reset_required = true;
  1002. queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
  1003. return 0;
  1004. }
  1005. /*
  1006. * Using late_initcall() instead of module_init() allows the balloon to use the
  1007. * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
  1008. * VMCI is probed only after the balloon is initialized. If the balloon is used
  1009. * as a module, late_initcall() is equivalent to module_init().
  1010. */
  1011. late_initcall(vmballoon_init);
  1012. static void __exit vmballoon_exit(void)
  1013. {
  1014. vmballoon_vmci_cleanup(&balloon);
  1015. cancel_delayed_work_sync(&balloon.dwork);
  1016. vmballoon_debugfs_exit(&balloon);
  1017. /*
  1018. * Deallocate all reserved memory, and reset connection with monitor.
  1019. * Reset connection before deallocating memory to avoid potential for
  1020. * additional spurious resets from guest touching deallocated pages.
  1021. */
  1022. vmballoon_send_start(&balloon, 0);
  1023. vmballoon_pop(&balloon);
  1024. }
  1025. module_exit(vmballoon_exit);