page_io.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/mm/page_io.c
  4. *
  5. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  6. *
  7. * Swap reorganised 29.12.95,
  8. * Asynchronous swapping added 30.12.95. Stephen Tweedie
  9. * Removed race in async swapping. 14.4.1996. Bruno Haible
  10. * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
  11. * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
  12. */
  13. #include <linux/mm.h>
  14. #include <linux/kernel_stat.h>
  15. #include <linux/gfp.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/swap.h>
  18. #include <linux/bio.h>
  19. #include <linux/swapops.h>
  20. #include <linux/writeback.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/psi.h>
  23. #include <linux/uio.h>
  24. #include <linux/sched/task.h>
  25. #include <linux/delayacct.h>
  26. #include <linux/zswap.h>
  27. #include "swap.h"
  28. static void __end_swap_bio_write(struct bio *bio)
  29. {
  30. struct folio *folio = bio_first_folio_all(bio);
  31. if (bio->bi_status) {
  32. /*
  33. * We failed to write the page out to swap-space.
  34. * Re-dirty the page in order to avoid it being reclaimed.
  35. * Also print a dire warning that things will go BAD (tm)
  36. * very quickly.
  37. *
  38. * Also clear PG_reclaim to avoid folio_rotate_reclaimable()
  39. */
  40. folio_mark_dirty(folio);
  41. pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
  42. MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
  43. (unsigned long long)bio->bi_iter.bi_sector);
  44. folio_clear_reclaim(folio);
  45. }
  46. folio_end_writeback(folio);
  47. }
  48. static void end_swap_bio_write(struct bio *bio)
  49. {
  50. __end_swap_bio_write(bio);
  51. bio_put(bio);
  52. }
  53. static void __end_swap_bio_read(struct bio *bio)
  54. {
  55. struct folio *folio = bio_first_folio_all(bio);
  56. if (bio->bi_status) {
  57. pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
  58. MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
  59. (unsigned long long)bio->bi_iter.bi_sector);
  60. } else {
  61. folio_mark_uptodate(folio);
  62. }
  63. folio_unlock(folio);
  64. }
  65. static void end_swap_bio_read(struct bio *bio)
  66. {
  67. __end_swap_bio_read(bio);
  68. bio_put(bio);
  69. }
  70. int generic_swapfile_activate(struct swap_info_struct *sis,
  71. struct file *swap_file,
  72. sector_t *span)
  73. {
  74. struct address_space *mapping = swap_file->f_mapping;
  75. struct inode *inode = mapping->host;
  76. unsigned blocks_per_page;
  77. unsigned long page_no;
  78. unsigned blkbits;
  79. sector_t probe_block;
  80. sector_t last_block;
  81. sector_t lowest_block = -1;
  82. sector_t highest_block = 0;
  83. int nr_extents = 0;
  84. int ret;
  85. blkbits = inode->i_blkbits;
  86. blocks_per_page = PAGE_SIZE >> blkbits;
  87. /*
  88. * Map all the blocks into the extent tree. This code doesn't try
  89. * to be very smart.
  90. */
  91. probe_block = 0;
  92. page_no = 0;
  93. last_block = i_size_read(inode) >> blkbits;
  94. while ((probe_block + blocks_per_page) <= last_block &&
  95. page_no < sis->max) {
  96. unsigned block_in_page;
  97. sector_t first_block;
  98. cond_resched();
  99. first_block = probe_block;
  100. ret = bmap(inode, &first_block);
  101. if (ret || !first_block)
  102. goto bad_bmap;
  103. /*
  104. * It must be PAGE_SIZE aligned on-disk
  105. */
  106. if (first_block & (blocks_per_page - 1)) {
  107. probe_block++;
  108. goto reprobe;
  109. }
  110. for (block_in_page = 1; block_in_page < blocks_per_page;
  111. block_in_page++) {
  112. sector_t block;
  113. block = probe_block + block_in_page;
  114. ret = bmap(inode, &block);
  115. if (ret || !block)
  116. goto bad_bmap;
  117. if (block != first_block + block_in_page) {
  118. /* Discontiguity */
  119. probe_block++;
  120. goto reprobe;
  121. }
  122. }
  123. first_block >>= (PAGE_SHIFT - blkbits);
  124. if (page_no) { /* exclude the header page */
  125. if (first_block < lowest_block)
  126. lowest_block = first_block;
  127. if (first_block > highest_block)
  128. highest_block = first_block;
  129. }
  130. /*
  131. * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
  132. */
  133. ret = add_swap_extent(sis, page_no, 1, first_block);
  134. if (ret < 0)
  135. goto out;
  136. nr_extents += ret;
  137. page_no++;
  138. probe_block += blocks_per_page;
  139. reprobe:
  140. continue;
  141. }
  142. ret = nr_extents;
  143. *span = 1 + highest_block - lowest_block;
  144. if (page_no == 0)
  145. page_no = 1; /* force Empty message */
  146. sis->max = page_no;
  147. sis->pages = page_no - 1;
  148. sis->highest_bit = page_no - 1;
  149. out:
  150. return ret;
  151. bad_bmap:
  152. pr_err("swapon: swapfile has holes\n");
  153. ret = -EINVAL;
  154. goto out;
  155. }
  156. static bool is_folio_zero_filled(struct folio *folio)
  157. {
  158. unsigned int pos, last_pos;
  159. unsigned long *data;
  160. unsigned int i;
  161. last_pos = PAGE_SIZE / sizeof(*data) - 1;
  162. for (i = 0; i < folio_nr_pages(folio); i++) {
  163. data = kmap_local_folio(folio, i * PAGE_SIZE);
  164. /*
  165. * Check last word first, incase the page is zero-filled at
  166. * the start and has non-zero data at the end, which is common
  167. * in real-world workloads.
  168. */
  169. if (data[last_pos]) {
  170. kunmap_local(data);
  171. return false;
  172. }
  173. for (pos = 0; pos < last_pos; pos++) {
  174. if (data[pos]) {
  175. kunmap_local(data);
  176. return false;
  177. }
  178. }
  179. kunmap_local(data);
  180. }
  181. return true;
  182. }
  183. static void swap_zeromap_folio_set(struct folio *folio)
  184. {
  185. struct obj_cgroup *objcg = get_obj_cgroup_from_folio(folio);
  186. struct swap_info_struct *sis = swp_swap_info(folio->swap);
  187. int nr_pages = folio_nr_pages(folio);
  188. swp_entry_t entry;
  189. unsigned int i;
  190. for (i = 0; i < folio_nr_pages(folio); i++) {
  191. entry = page_swap_entry(folio_page(folio, i));
  192. set_bit(swp_offset(entry), sis->zeromap);
  193. }
  194. count_vm_events(SWPOUT_ZERO, nr_pages);
  195. if (objcg) {
  196. count_objcg_events(objcg, SWPOUT_ZERO, nr_pages);
  197. obj_cgroup_put(objcg);
  198. }
  199. }
  200. static void swap_zeromap_folio_clear(struct folio *folio)
  201. {
  202. struct swap_info_struct *sis = swp_swap_info(folio->swap);
  203. swp_entry_t entry;
  204. unsigned int i;
  205. for (i = 0; i < folio_nr_pages(folio); i++) {
  206. entry = page_swap_entry(folio_page(folio, i));
  207. clear_bit(swp_offset(entry), sis->zeromap);
  208. }
  209. }
  210. /*
  211. * We may have stale swap cache pages in memory: notice
  212. * them here and get rid of the unnecessary final write.
  213. */
  214. int swap_writepage(struct page *page, struct writeback_control *wbc)
  215. {
  216. struct folio *folio = page_folio(page);
  217. int ret;
  218. if (folio_free_swap(folio)) {
  219. folio_unlock(folio);
  220. return 0;
  221. }
  222. /*
  223. * Arch code may have to preserve more data than just the page
  224. * contents, e.g. memory tags.
  225. */
  226. ret = arch_prepare_to_swap(folio);
  227. if (ret) {
  228. folio_mark_dirty(folio);
  229. folio_unlock(folio);
  230. return ret;
  231. }
  232. /*
  233. * Use a bitmap (zeromap) to avoid doing IO for zero-filled pages.
  234. * The bits in zeromap are protected by the locked swapcache folio
  235. * and atomic updates are used to protect against read-modify-write
  236. * corruption due to other zero swap entries seeing concurrent updates.
  237. */
  238. if (is_folio_zero_filled(folio)) {
  239. swap_zeromap_folio_set(folio);
  240. folio_unlock(folio);
  241. return 0;
  242. } else {
  243. /*
  244. * Clear bits this folio occupies in the zeromap to prevent
  245. * zero data being read in from any previous zero writes that
  246. * occupied the same swap entries.
  247. */
  248. swap_zeromap_folio_clear(folio);
  249. }
  250. if (zswap_store(folio)) {
  251. folio_unlock(folio);
  252. return 0;
  253. }
  254. if (!mem_cgroup_zswap_writeback_enabled(folio_memcg(folio))) {
  255. folio_mark_dirty(folio);
  256. return AOP_WRITEPAGE_ACTIVATE;
  257. }
  258. __swap_writepage(folio, wbc);
  259. return 0;
  260. }
  261. static inline void count_swpout_vm_event(struct folio *folio)
  262. {
  263. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  264. if (unlikely(folio_test_pmd_mappable(folio))) {
  265. count_memcg_folio_events(folio, THP_SWPOUT, 1);
  266. count_vm_event(THP_SWPOUT);
  267. }
  268. count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT);
  269. #endif
  270. count_vm_events(PSWPOUT, folio_nr_pages(folio));
  271. }
  272. #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
  273. static void bio_associate_blkg_from_page(struct bio *bio, struct folio *folio)
  274. {
  275. struct cgroup_subsys_state *css;
  276. struct mem_cgroup *memcg;
  277. memcg = folio_memcg(folio);
  278. if (!memcg)
  279. return;
  280. rcu_read_lock();
  281. css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys);
  282. bio_associate_blkg_from_css(bio, css);
  283. rcu_read_unlock();
  284. }
  285. #else
  286. #define bio_associate_blkg_from_page(bio, folio) do { } while (0)
  287. #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
  288. struct swap_iocb {
  289. struct kiocb iocb;
  290. struct bio_vec bvec[SWAP_CLUSTER_MAX];
  291. int pages;
  292. int len;
  293. };
  294. static mempool_t *sio_pool;
  295. int sio_pool_init(void)
  296. {
  297. if (!sio_pool) {
  298. mempool_t *pool = mempool_create_kmalloc_pool(
  299. SWAP_CLUSTER_MAX, sizeof(struct swap_iocb));
  300. if (cmpxchg(&sio_pool, NULL, pool))
  301. mempool_destroy(pool);
  302. }
  303. if (!sio_pool)
  304. return -ENOMEM;
  305. return 0;
  306. }
  307. static void sio_write_complete(struct kiocb *iocb, long ret)
  308. {
  309. struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
  310. struct page *page = sio->bvec[0].bv_page;
  311. int p;
  312. if (ret != sio->len) {
  313. /*
  314. * In the case of swap-over-nfs, this can be a
  315. * temporary failure if the system has limited
  316. * memory for allocating transmit buffers.
  317. * Mark the page dirty and avoid
  318. * folio_rotate_reclaimable but rate-limit the
  319. * messages.
  320. */
  321. pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
  322. ret, swap_dev_pos(page_swap_entry(page)));
  323. for (p = 0; p < sio->pages; p++) {
  324. page = sio->bvec[p].bv_page;
  325. set_page_dirty(page);
  326. ClearPageReclaim(page);
  327. }
  328. }
  329. for (p = 0; p < sio->pages; p++)
  330. end_page_writeback(sio->bvec[p].bv_page);
  331. mempool_free(sio, sio_pool);
  332. }
  333. static void swap_writepage_fs(struct folio *folio, struct writeback_control *wbc)
  334. {
  335. struct swap_iocb *sio = NULL;
  336. struct swap_info_struct *sis = swp_swap_info(folio->swap);
  337. struct file *swap_file = sis->swap_file;
  338. loff_t pos = swap_dev_pos(folio->swap);
  339. count_swpout_vm_event(folio);
  340. folio_start_writeback(folio);
  341. folio_unlock(folio);
  342. if (wbc->swap_plug)
  343. sio = *wbc->swap_plug;
  344. if (sio) {
  345. if (sio->iocb.ki_filp != swap_file ||
  346. sio->iocb.ki_pos + sio->len != pos) {
  347. swap_write_unplug(sio);
  348. sio = NULL;
  349. }
  350. }
  351. if (!sio) {
  352. sio = mempool_alloc(sio_pool, GFP_NOIO);
  353. init_sync_kiocb(&sio->iocb, swap_file);
  354. sio->iocb.ki_complete = sio_write_complete;
  355. sio->iocb.ki_pos = pos;
  356. sio->pages = 0;
  357. sio->len = 0;
  358. }
  359. bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
  360. sio->len += folio_size(folio);
  361. sio->pages += 1;
  362. if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
  363. swap_write_unplug(sio);
  364. sio = NULL;
  365. }
  366. if (wbc->swap_plug)
  367. *wbc->swap_plug = sio;
  368. }
  369. static void swap_writepage_bdev_sync(struct folio *folio,
  370. struct writeback_control *wbc, struct swap_info_struct *sis)
  371. {
  372. struct bio_vec bv;
  373. struct bio bio;
  374. bio_init(&bio, sis->bdev, &bv, 1,
  375. REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc));
  376. bio.bi_iter.bi_sector = swap_folio_sector(folio);
  377. bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
  378. bio_associate_blkg_from_page(&bio, folio);
  379. count_swpout_vm_event(folio);
  380. folio_start_writeback(folio);
  381. folio_unlock(folio);
  382. submit_bio_wait(&bio);
  383. __end_swap_bio_write(&bio);
  384. }
  385. static void swap_writepage_bdev_async(struct folio *folio,
  386. struct writeback_control *wbc, struct swap_info_struct *sis)
  387. {
  388. struct bio *bio;
  389. bio = bio_alloc(sis->bdev, 1,
  390. REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
  391. GFP_NOIO);
  392. bio->bi_iter.bi_sector = swap_folio_sector(folio);
  393. bio->bi_end_io = end_swap_bio_write;
  394. bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
  395. bio_associate_blkg_from_page(bio, folio);
  396. count_swpout_vm_event(folio);
  397. folio_start_writeback(folio);
  398. folio_unlock(folio);
  399. submit_bio(bio);
  400. }
  401. void __swap_writepage(struct folio *folio, struct writeback_control *wbc)
  402. {
  403. struct swap_info_struct *sis = swp_swap_info(folio->swap);
  404. VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
  405. /*
  406. * ->flags can be updated non-atomicially (scan_swap_map_slots),
  407. * but that will never affect SWP_FS_OPS, so the data_race
  408. * is safe.
  409. */
  410. if (data_race(sis->flags & SWP_FS_OPS))
  411. swap_writepage_fs(folio, wbc);
  412. /*
  413. * ->flags can be updated non-atomicially (scan_swap_map_slots),
  414. * but that will never affect SWP_SYNCHRONOUS_IO, so the data_race
  415. * is safe.
  416. */
  417. else if (data_race(sis->flags & SWP_SYNCHRONOUS_IO))
  418. swap_writepage_bdev_sync(folio, wbc, sis);
  419. else
  420. swap_writepage_bdev_async(folio, wbc, sis);
  421. }
  422. void swap_write_unplug(struct swap_iocb *sio)
  423. {
  424. struct iov_iter from;
  425. struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
  426. int ret;
  427. iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len);
  428. ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
  429. if (ret != -EIOCBQUEUED)
  430. sio_write_complete(&sio->iocb, ret);
  431. }
  432. static void sio_read_complete(struct kiocb *iocb, long ret)
  433. {
  434. struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
  435. int p;
  436. if (ret == sio->len) {
  437. for (p = 0; p < sio->pages; p++) {
  438. struct folio *folio = page_folio(sio->bvec[p].bv_page);
  439. folio_mark_uptodate(folio);
  440. folio_unlock(folio);
  441. }
  442. count_vm_events(PSWPIN, sio->pages);
  443. } else {
  444. for (p = 0; p < sio->pages; p++) {
  445. struct folio *folio = page_folio(sio->bvec[p].bv_page);
  446. folio_unlock(folio);
  447. }
  448. pr_alert_ratelimited("Read-error on swap-device\n");
  449. }
  450. mempool_free(sio, sio_pool);
  451. }
  452. static bool swap_read_folio_zeromap(struct folio *folio)
  453. {
  454. int nr_pages = folio_nr_pages(folio);
  455. struct obj_cgroup *objcg;
  456. bool is_zeromap;
  457. /*
  458. * Swapping in a large folio that is partially in the zeromap is not
  459. * currently handled. Return true without marking the folio uptodate so
  460. * that an IO error is emitted (e.g. do_swap_page() will sigbus).
  461. */
  462. if (WARN_ON_ONCE(swap_zeromap_batch(folio->swap, nr_pages,
  463. &is_zeromap) != nr_pages))
  464. return true;
  465. if (!is_zeromap)
  466. return false;
  467. objcg = get_obj_cgroup_from_folio(folio);
  468. count_vm_events(SWPIN_ZERO, nr_pages);
  469. if (objcg) {
  470. count_objcg_events(objcg, SWPIN_ZERO, nr_pages);
  471. obj_cgroup_put(objcg);
  472. }
  473. folio_zero_range(folio, 0, folio_size(folio));
  474. folio_mark_uptodate(folio);
  475. return true;
  476. }
  477. static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug)
  478. {
  479. struct swap_info_struct *sis = swp_swap_info(folio->swap);
  480. struct swap_iocb *sio = NULL;
  481. loff_t pos = swap_dev_pos(folio->swap);
  482. if (plug)
  483. sio = *plug;
  484. if (sio) {
  485. if (sio->iocb.ki_filp != sis->swap_file ||
  486. sio->iocb.ki_pos + sio->len != pos) {
  487. swap_read_unplug(sio);
  488. sio = NULL;
  489. }
  490. }
  491. if (!sio) {
  492. sio = mempool_alloc(sio_pool, GFP_KERNEL);
  493. init_sync_kiocb(&sio->iocb, sis->swap_file);
  494. sio->iocb.ki_pos = pos;
  495. sio->iocb.ki_complete = sio_read_complete;
  496. sio->pages = 0;
  497. sio->len = 0;
  498. }
  499. bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
  500. sio->len += folio_size(folio);
  501. sio->pages += 1;
  502. if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
  503. swap_read_unplug(sio);
  504. sio = NULL;
  505. }
  506. if (plug)
  507. *plug = sio;
  508. }
  509. static void swap_read_folio_bdev_sync(struct folio *folio,
  510. struct swap_info_struct *sis)
  511. {
  512. struct bio_vec bv;
  513. struct bio bio;
  514. bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ);
  515. bio.bi_iter.bi_sector = swap_folio_sector(folio);
  516. bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
  517. /*
  518. * Keep this task valid during swap readpage because the oom killer may
  519. * attempt to access it in the page fault retry time check.
  520. */
  521. get_task_struct(current);
  522. count_vm_events(PSWPIN, folio_nr_pages(folio));
  523. submit_bio_wait(&bio);
  524. __end_swap_bio_read(&bio);
  525. put_task_struct(current);
  526. }
  527. static void swap_read_folio_bdev_async(struct folio *folio,
  528. struct swap_info_struct *sis)
  529. {
  530. struct bio *bio;
  531. bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
  532. bio->bi_iter.bi_sector = swap_folio_sector(folio);
  533. bio->bi_end_io = end_swap_bio_read;
  534. bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
  535. count_vm_events(PSWPIN, folio_nr_pages(folio));
  536. submit_bio(bio);
  537. }
  538. void swap_read_folio(struct folio *folio, struct swap_iocb **plug)
  539. {
  540. struct swap_info_struct *sis = swp_swap_info(folio->swap);
  541. bool synchronous = sis->flags & SWP_SYNCHRONOUS_IO;
  542. bool workingset = folio_test_workingset(folio);
  543. unsigned long pflags;
  544. bool in_thrashing;
  545. VM_BUG_ON_FOLIO(!folio_test_swapcache(folio) && !synchronous, folio);
  546. VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
  547. VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio);
  548. /*
  549. * Count submission time as memory stall and delay. When the device
  550. * is congested, or the submitting cgroup IO-throttled, submission
  551. * can be a significant part of overall IO time.
  552. */
  553. if (workingset) {
  554. delayacct_thrashing_start(&in_thrashing);
  555. psi_memstall_enter(&pflags);
  556. }
  557. delayacct_swapin_start();
  558. if (swap_read_folio_zeromap(folio)) {
  559. folio_unlock(folio);
  560. goto finish;
  561. } else if (zswap_load(folio)) {
  562. folio_unlock(folio);
  563. goto finish;
  564. }
  565. /* We have to read from slower devices. Increase zswap protection. */
  566. zswap_folio_swapin(folio);
  567. if (data_race(sis->flags & SWP_FS_OPS)) {
  568. swap_read_folio_fs(folio, plug);
  569. } else if (synchronous) {
  570. swap_read_folio_bdev_sync(folio, sis);
  571. } else {
  572. swap_read_folio_bdev_async(folio, sis);
  573. }
  574. finish:
  575. if (workingset) {
  576. delayacct_thrashing_end(&in_thrashing);
  577. psi_memstall_leave(&pflags);
  578. }
  579. delayacct_swapin_end();
  580. }
  581. void __swap_read_unplug(struct swap_iocb *sio)
  582. {
  583. struct iov_iter from;
  584. struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
  585. int ret;
  586. iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len);
  587. ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
  588. if (ret != -EIOCBQUEUED)
  589. sio_read_complete(&sio->iocb, ret);
  590. }