bio.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Utility functions for file contents encryption/decryption on
  4. * block device-based filesystems.
  5. *
  6. * Copyright (C) 2015, Google, Inc.
  7. * Copyright (C) 2015, Motorola Mobility
  8. */
  9. #include <linux/pagemap.h>
  10. #include <linux/module.h>
  11. #include <linux/bio.h>
  12. #include <linux/namei.h>
  13. #include "fscrypt_private.h"
  14. /**
  15. * fscrypt_decrypt_bio() - decrypt the contents of a bio
  16. * @bio: the bio to decrypt
  17. *
  18. * Decrypt the contents of a "read" bio following successful completion of the
  19. * underlying disk read. The bio must be reading a whole number of blocks of an
  20. * encrypted file directly into the page cache. If the bio is reading the
  21. * ciphertext into bounce pages instead of the page cache (for example, because
  22. * the file is also compressed, so decompression is required after decryption),
  23. * then this function isn't applicable. This function may sleep, so it must be
  24. * called from a workqueue rather than from the bio's bi_end_io callback.
  25. *
  26. * Return: %true on success; %false on failure. On failure, bio->bi_status is
  27. * also set to an error status.
  28. */
  29. bool fscrypt_decrypt_bio(struct bio *bio)
  30. {
  31. struct folio_iter fi;
  32. bio_for_each_folio_all(fi, bio) {
  33. int err = fscrypt_decrypt_pagecache_blocks(fi.folio, fi.length,
  34. fi.offset);
  35. if (err) {
  36. bio->bi_status = errno_to_blk_status(err);
  37. return false;
  38. }
  39. }
  40. return true;
  41. }
  42. EXPORT_SYMBOL(fscrypt_decrypt_bio);
  43. static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
  44. pgoff_t lblk, sector_t pblk,
  45. unsigned int len)
  46. {
  47. const unsigned int blockbits = inode->i_blkbits;
  48. const unsigned int blocks_per_page = 1 << (PAGE_SHIFT - blockbits);
  49. struct bio *bio;
  50. int ret, err = 0;
  51. int num_pages = 0;
  52. /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
  53. bio = bio_alloc(inode->i_sb->s_bdev, BIO_MAX_VECS, REQ_OP_WRITE,
  54. GFP_NOFS);
  55. while (len) {
  56. unsigned int blocks_this_page = min(len, blocks_per_page);
  57. unsigned int bytes_this_page = blocks_this_page << blockbits;
  58. if (num_pages == 0) {
  59. fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS);
  60. bio->bi_iter.bi_sector =
  61. pblk << (blockbits - SECTOR_SHIFT);
  62. }
  63. ret = bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0);
  64. if (WARN_ON_ONCE(ret != bytes_this_page)) {
  65. err = -EIO;
  66. goto out;
  67. }
  68. num_pages++;
  69. len -= blocks_this_page;
  70. lblk += blocks_this_page;
  71. pblk += blocks_this_page;
  72. if (num_pages == BIO_MAX_VECS || !len ||
  73. !fscrypt_mergeable_bio(bio, inode, lblk)) {
  74. err = submit_bio_wait(bio);
  75. if (err)
  76. goto out;
  77. bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE);
  78. num_pages = 0;
  79. }
  80. }
  81. out:
  82. bio_put(bio);
  83. return err;
  84. }
  85. /**
  86. * fscrypt_zeroout_range() - zero out a range of blocks in an encrypted file
  87. * @inode: the file's inode
  88. * @lblk: the first file logical block to zero out
  89. * @pblk: the first filesystem physical block to zero out
  90. * @len: number of blocks to zero out
  91. *
  92. * Zero out filesystem blocks in an encrypted regular file on-disk, i.e. write
  93. * ciphertext blocks which decrypt to the all-zeroes block. The blocks must be
  94. * both logically and physically contiguous. It's also assumed that the
  95. * filesystem only uses a single block device, ->s_bdev.
  96. *
  97. * Note that since each block uses a different IV, this involves writing a
  98. * different ciphertext to each block; we can't simply reuse the same one.
  99. *
  100. * Return: 0 on success; -errno on failure.
  101. */
  102. int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
  103. sector_t pblk, unsigned int len)
  104. {
  105. const struct fscrypt_inode_info *ci = inode->i_crypt_info;
  106. const unsigned int du_bits = ci->ci_data_unit_bits;
  107. const unsigned int du_size = 1U << du_bits;
  108. const unsigned int du_per_page_bits = PAGE_SHIFT - du_bits;
  109. const unsigned int du_per_page = 1U << du_per_page_bits;
  110. u64 du_index = (u64)lblk << (inode->i_blkbits - du_bits);
  111. u64 du_remaining = (u64)len << (inode->i_blkbits - du_bits);
  112. sector_t sector = pblk << (inode->i_blkbits - SECTOR_SHIFT);
  113. struct page *pages[16]; /* write up to 16 pages at a time */
  114. unsigned int nr_pages;
  115. unsigned int i;
  116. unsigned int offset;
  117. struct bio *bio;
  118. int ret, err;
  119. if (len == 0)
  120. return 0;
  121. if (fscrypt_inode_uses_inline_crypto(inode))
  122. return fscrypt_zeroout_range_inline_crypt(inode, lblk, pblk,
  123. len);
  124. BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_VECS);
  125. nr_pages = min_t(u64, ARRAY_SIZE(pages),
  126. (du_remaining + du_per_page - 1) >> du_per_page_bits);
  127. /*
  128. * We need at least one page for ciphertext. Allocate the first one
  129. * from a mempool, with __GFP_DIRECT_RECLAIM set so that it can't fail.
  130. *
  131. * Any additional page allocations are allowed to fail, as they only
  132. * help performance, and waiting on the mempool for them could deadlock.
  133. */
  134. for (i = 0; i < nr_pages; i++) {
  135. pages[i] = fscrypt_alloc_bounce_page(i == 0 ? GFP_NOFS :
  136. GFP_NOWAIT | __GFP_NOWARN);
  137. if (!pages[i])
  138. break;
  139. }
  140. nr_pages = i;
  141. if (WARN_ON_ONCE(nr_pages <= 0))
  142. return -EINVAL;
  143. /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
  144. bio = bio_alloc(inode->i_sb->s_bdev, nr_pages, REQ_OP_WRITE, GFP_NOFS);
  145. do {
  146. bio->bi_iter.bi_sector = sector;
  147. i = 0;
  148. offset = 0;
  149. do {
  150. err = fscrypt_crypt_data_unit(ci, FS_ENCRYPT, du_index,
  151. ZERO_PAGE(0), pages[i],
  152. du_size, offset,
  153. GFP_NOFS);
  154. if (err)
  155. goto out;
  156. du_index++;
  157. sector += 1U << (du_bits - SECTOR_SHIFT);
  158. du_remaining--;
  159. offset += du_size;
  160. if (offset == PAGE_SIZE || du_remaining == 0) {
  161. ret = bio_add_page(bio, pages[i++], offset, 0);
  162. if (WARN_ON_ONCE(ret != offset)) {
  163. err = -EIO;
  164. goto out;
  165. }
  166. offset = 0;
  167. }
  168. } while (i != nr_pages && du_remaining != 0);
  169. err = submit_bio_wait(bio);
  170. if (err)
  171. goto out;
  172. bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE);
  173. } while (du_remaining != 0);
  174. err = 0;
  175. out:
  176. bio_put(bio);
  177. for (i = 0; i < nr_pages; i++)
  178. fscrypt_free_bounce_page(pages[i]);
  179. return err;
  180. }
  181. EXPORT_SYMBOL(fscrypt_zeroout_range);