crypto.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * This contains encryption functions for per-file encryption.
  4. *
  5. * Copyright (C) 2015, Google, Inc.
  6. * Copyright (C) 2015, Motorola Mobility
  7. *
  8. * Written by Michael Halcrow, 2014.
  9. *
  10. * Filename encryption additions
  11. * Uday Savagaonkar, 2014
  12. * Encryption policy handling additions
  13. * Ildar Muslukhov, 2014
  14. * Add fscrypt_pullback_bio_page()
  15. * Jaegeuk Kim, 2015.
  16. *
  17. * This has not yet undergone a rigorous security audit.
  18. *
  19. * The usage of AES-XTS should conform to recommendations in NIST
  20. * Special Publication 800-38E and IEEE P1619/D16.
  21. */
  22. #include <linux/pagemap.h>
  23. #include <linux/mempool.h>
  24. #include <linux/module.h>
  25. #include <linux/scatterlist.h>
  26. #include <linux/ratelimit.h>
  27. #include <crypto/skcipher.h>
  28. #include "fscrypt_private.h"
  29. static unsigned int num_prealloc_crypto_pages = 32;
  30. module_param(num_prealloc_crypto_pages, uint, 0444);
  31. MODULE_PARM_DESC(num_prealloc_crypto_pages,
  32. "Number of crypto pages to preallocate");
  33. static mempool_t *fscrypt_bounce_page_pool = NULL;
  34. static struct workqueue_struct *fscrypt_read_workqueue;
  35. static DEFINE_MUTEX(fscrypt_init_mutex);
  36. struct kmem_cache *fscrypt_inode_info_cachep;
  37. void fscrypt_enqueue_decrypt_work(struct work_struct *work)
  38. {
  39. queue_work(fscrypt_read_workqueue, work);
  40. }
  41. EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
  42. struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
  43. {
  44. if (WARN_ON_ONCE(!fscrypt_bounce_page_pool)) {
  45. /*
  46. * Oops, the filesystem called a function that uses the bounce
  47. * page pool, but it didn't set needs_bounce_pages.
  48. */
  49. return NULL;
  50. }
  51. return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
  52. }
  53. /**
  54. * fscrypt_free_bounce_page() - free a ciphertext bounce page
  55. * @bounce_page: the bounce page to free, or NULL
  56. *
  57. * Free a bounce page that was allocated by fscrypt_encrypt_pagecache_blocks(),
  58. * or by fscrypt_alloc_bounce_page() directly.
  59. */
  60. void fscrypt_free_bounce_page(struct page *bounce_page)
  61. {
  62. if (!bounce_page)
  63. return;
  64. set_page_private(bounce_page, (unsigned long)NULL);
  65. ClearPagePrivate(bounce_page);
  66. mempool_free(bounce_page, fscrypt_bounce_page_pool);
  67. }
  68. EXPORT_SYMBOL(fscrypt_free_bounce_page);
  69. /*
  70. * Generate the IV for the given data unit index within the given file.
  71. * For filenames encryption, index == 0.
  72. *
  73. * Keep this in sync with fscrypt_limit_io_blocks(). fscrypt_limit_io_blocks()
  74. * needs to know about any IV generation methods where the low bits of IV don't
  75. * simply contain the data unit index (e.g., IV_INO_LBLK_32).
  76. */
  77. void fscrypt_generate_iv(union fscrypt_iv *iv, u64 index,
  78. const struct fscrypt_inode_info *ci)
  79. {
  80. u8 flags = fscrypt_policy_flags(&ci->ci_policy);
  81. memset(iv, 0, ci->ci_mode->ivsize);
  82. if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
  83. WARN_ON_ONCE(index > U32_MAX);
  84. WARN_ON_ONCE(ci->ci_inode->i_ino > U32_MAX);
  85. index |= (u64)ci->ci_inode->i_ino << 32;
  86. } else if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) {
  87. WARN_ON_ONCE(index > U32_MAX);
  88. index = (u32)(ci->ci_hashed_ino + index);
  89. } else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
  90. memcpy(iv->nonce, ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE);
  91. }
  92. iv->index = cpu_to_le64(index);
  93. }
  94. /* Encrypt or decrypt a single "data unit" of file contents. */
  95. int fscrypt_crypt_data_unit(const struct fscrypt_inode_info *ci,
  96. fscrypt_direction_t rw, u64 index,
  97. struct page *src_page, struct page *dest_page,
  98. unsigned int len, unsigned int offs,
  99. gfp_t gfp_flags)
  100. {
  101. union fscrypt_iv iv;
  102. struct skcipher_request *req = NULL;
  103. DECLARE_CRYPTO_WAIT(wait);
  104. struct scatterlist dst, src;
  105. struct crypto_skcipher *tfm = ci->ci_enc_key.tfm;
  106. int res = 0;
  107. if (WARN_ON_ONCE(len <= 0))
  108. return -EINVAL;
  109. if (WARN_ON_ONCE(len % FSCRYPT_CONTENTS_ALIGNMENT != 0))
  110. return -EINVAL;
  111. fscrypt_generate_iv(&iv, index, ci);
  112. req = skcipher_request_alloc(tfm, gfp_flags);
  113. if (!req)
  114. return -ENOMEM;
  115. skcipher_request_set_callback(
  116. req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
  117. crypto_req_done, &wait);
  118. sg_init_table(&dst, 1);
  119. sg_set_page(&dst, dest_page, len, offs);
  120. sg_init_table(&src, 1);
  121. sg_set_page(&src, src_page, len, offs);
  122. skcipher_request_set_crypt(req, &src, &dst, len, &iv);
  123. if (rw == FS_DECRYPT)
  124. res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
  125. else
  126. res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
  127. skcipher_request_free(req);
  128. if (res) {
  129. fscrypt_err(ci->ci_inode,
  130. "%scryption failed for data unit %llu: %d",
  131. (rw == FS_DECRYPT ? "De" : "En"), index, res);
  132. return res;
  133. }
  134. return 0;
  135. }
  136. /**
  137. * fscrypt_encrypt_pagecache_blocks() - Encrypt data from a pagecache page
  138. * @page: the locked pagecache page containing the data to encrypt
  139. * @len: size of the data to encrypt, in bytes
  140. * @offs: offset within @page of the data to encrypt, in bytes
  141. * @gfp_flags: memory allocation flags; see details below
  142. *
  143. * This allocates a new bounce page and encrypts the given data into it. The
  144. * length and offset of the data must be aligned to the file's crypto data unit
  145. * size. Alignment to the filesystem block size fulfills this requirement, as
  146. * the filesystem block size is always a multiple of the data unit size.
  147. *
  148. * In the bounce page, the ciphertext data will be located at the same offset at
  149. * which the plaintext data was located in the source page. Any other parts of
  150. * the bounce page will be left uninitialized.
  151. *
  152. * This is for use by the filesystem's ->writepages() method.
  153. *
  154. * The bounce page allocation is mempool-backed, so it will always succeed when
  155. * @gfp_flags includes __GFP_DIRECT_RECLAIM, e.g. when it's GFP_NOFS. However,
  156. * only the first page of each bio can be allocated this way. To prevent
  157. * deadlocks, for any additional pages a mask like GFP_NOWAIT must be used.
  158. *
  159. * Return: the new encrypted bounce page on success; an ERR_PTR() on failure
  160. */
  161. struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
  162. unsigned int len,
  163. unsigned int offs,
  164. gfp_t gfp_flags)
  165. {
  166. const struct inode *inode = page->mapping->host;
  167. const struct fscrypt_inode_info *ci = inode->i_crypt_info;
  168. const unsigned int du_bits = ci->ci_data_unit_bits;
  169. const unsigned int du_size = 1U << du_bits;
  170. struct page *ciphertext_page;
  171. u64 index = ((u64)page->index << (PAGE_SHIFT - du_bits)) +
  172. (offs >> du_bits);
  173. unsigned int i;
  174. int err;
  175. if (WARN_ON_ONCE(!PageLocked(page)))
  176. return ERR_PTR(-EINVAL);
  177. if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, du_size)))
  178. return ERR_PTR(-EINVAL);
  179. ciphertext_page = fscrypt_alloc_bounce_page(gfp_flags);
  180. if (!ciphertext_page)
  181. return ERR_PTR(-ENOMEM);
  182. for (i = offs; i < offs + len; i += du_size, index++) {
  183. err = fscrypt_crypt_data_unit(ci, FS_ENCRYPT, index,
  184. page, ciphertext_page,
  185. du_size, i, gfp_flags);
  186. if (err) {
  187. fscrypt_free_bounce_page(ciphertext_page);
  188. return ERR_PTR(err);
  189. }
  190. }
  191. SetPagePrivate(ciphertext_page);
  192. set_page_private(ciphertext_page, (unsigned long)page);
  193. return ciphertext_page;
  194. }
  195. EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
  196. /**
  197. * fscrypt_encrypt_block_inplace() - Encrypt a filesystem block in-place
  198. * @inode: The inode to which this block belongs
  199. * @page: The page containing the block to encrypt
  200. * @len: Size of block to encrypt. This must be a multiple of
  201. * FSCRYPT_CONTENTS_ALIGNMENT.
  202. * @offs: Byte offset within @page at which the block to encrypt begins
  203. * @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
  204. * number of the block within the file
  205. * @gfp_flags: Memory allocation flags
  206. *
  207. * Encrypt a possibly-compressed filesystem block that is located in an
  208. * arbitrary page, not necessarily in the original pagecache page. The @inode
  209. * and @lblk_num must be specified, as they can't be determined from @page.
  210. *
  211. * This is not compatible with fscrypt_operations::supports_subblock_data_units.
  212. *
  213. * Return: 0 on success; -errno on failure
  214. */
  215. int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
  216. unsigned int len, unsigned int offs,
  217. u64 lblk_num, gfp_t gfp_flags)
  218. {
  219. if (WARN_ON_ONCE(inode->i_sb->s_cop->supports_subblock_data_units))
  220. return -EOPNOTSUPP;
  221. return fscrypt_crypt_data_unit(inode->i_crypt_info, FS_ENCRYPT,
  222. lblk_num, page, page, len, offs,
  223. gfp_flags);
  224. }
  225. EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
  226. /**
  227. * fscrypt_decrypt_pagecache_blocks() - Decrypt data from a pagecache folio
  228. * @folio: the pagecache folio containing the data to decrypt
  229. * @len: size of the data to decrypt, in bytes
  230. * @offs: offset within @folio of the data to decrypt, in bytes
  231. *
  232. * Decrypt data that has just been read from an encrypted file. The data must
  233. * be located in a pagecache folio that is still locked and not yet uptodate.
  234. * The length and offset of the data must be aligned to the file's crypto data
  235. * unit size. Alignment to the filesystem block size fulfills this requirement,
  236. * as the filesystem block size is always a multiple of the data unit size.
  237. *
  238. * Return: 0 on success; -errno on failure
  239. */
  240. int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len,
  241. size_t offs)
  242. {
  243. const struct inode *inode = folio->mapping->host;
  244. const struct fscrypt_inode_info *ci = inode->i_crypt_info;
  245. const unsigned int du_bits = ci->ci_data_unit_bits;
  246. const unsigned int du_size = 1U << du_bits;
  247. u64 index = ((u64)folio->index << (PAGE_SHIFT - du_bits)) +
  248. (offs >> du_bits);
  249. size_t i;
  250. int err;
  251. if (WARN_ON_ONCE(!folio_test_locked(folio)))
  252. return -EINVAL;
  253. if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, du_size)))
  254. return -EINVAL;
  255. for (i = offs; i < offs + len; i += du_size, index++) {
  256. struct page *page = folio_page(folio, i >> PAGE_SHIFT);
  257. err = fscrypt_crypt_data_unit(ci, FS_DECRYPT, index, page,
  258. page, du_size, i & ~PAGE_MASK,
  259. GFP_NOFS);
  260. if (err)
  261. return err;
  262. }
  263. return 0;
  264. }
  265. EXPORT_SYMBOL(fscrypt_decrypt_pagecache_blocks);
  266. /**
  267. * fscrypt_decrypt_block_inplace() - Decrypt a filesystem block in-place
  268. * @inode: The inode to which this block belongs
  269. * @page: The page containing the block to decrypt
  270. * @len: Size of block to decrypt. This must be a multiple of
  271. * FSCRYPT_CONTENTS_ALIGNMENT.
  272. * @offs: Byte offset within @page at which the block to decrypt begins
  273. * @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
  274. * number of the block within the file
  275. *
  276. * Decrypt a possibly-compressed filesystem block that is located in an
  277. * arbitrary page, not necessarily in the original pagecache page. The @inode
  278. * and @lblk_num must be specified, as they can't be determined from @page.
  279. *
  280. * This is not compatible with fscrypt_operations::supports_subblock_data_units.
  281. *
  282. * Return: 0 on success; -errno on failure
  283. */
  284. int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
  285. unsigned int len, unsigned int offs,
  286. u64 lblk_num)
  287. {
  288. if (WARN_ON_ONCE(inode->i_sb->s_cop->supports_subblock_data_units))
  289. return -EOPNOTSUPP;
  290. return fscrypt_crypt_data_unit(inode->i_crypt_info, FS_DECRYPT,
  291. lblk_num, page, page, len, offs,
  292. GFP_NOFS);
  293. }
  294. EXPORT_SYMBOL(fscrypt_decrypt_block_inplace);
  295. /**
  296. * fscrypt_initialize() - allocate major buffers for fs encryption.
  297. * @sb: the filesystem superblock
  298. *
  299. * We only call this when we start accessing encrypted files, since it
  300. * results in memory getting allocated that wouldn't otherwise be used.
  301. *
  302. * Return: 0 on success; -errno on failure
  303. */
  304. int fscrypt_initialize(struct super_block *sb)
  305. {
  306. int err = 0;
  307. mempool_t *pool;
  308. /* pairs with smp_store_release() below */
  309. if (likely(smp_load_acquire(&fscrypt_bounce_page_pool)))
  310. return 0;
  311. /* No need to allocate a bounce page pool if this FS won't use it. */
  312. if (!sb->s_cop->needs_bounce_pages)
  313. return 0;
  314. mutex_lock(&fscrypt_init_mutex);
  315. if (fscrypt_bounce_page_pool)
  316. goto out_unlock;
  317. err = -ENOMEM;
  318. pool = mempool_create_page_pool(num_prealloc_crypto_pages, 0);
  319. if (!pool)
  320. goto out_unlock;
  321. /* pairs with smp_load_acquire() above */
  322. smp_store_release(&fscrypt_bounce_page_pool, pool);
  323. err = 0;
  324. out_unlock:
  325. mutex_unlock(&fscrypt_init_mutex);
  326. return err;
  327. }
  328. void fscrypt_msg(const struct inode *inode, const char *level,
  329. const char *fmt, ...)
  330. {
  331. static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
  332. DEFAULT_RATELIMIT_BURST);
  333. struct va_format vaf;
  334. va_list args;
  335. if (!__ratelimit(&rs))
  336. return;
  337. va_start(args, fmt);
  338. vaf.fmt = fmt;
  339. vaf.va = &args;
  340. if (inode && inode->i_ino)
  341. printk("%sfscrypt (%s, inode %lu): %pV\n",
  342. level, inode->i_sb->s_id, inode->i_ino, &vaf);
  343. else if (inode)
  344. printk("%sfscrypt (%s): %pV\n", level, inode->i_sb->s_id, &vaf);
  345. else
  346. printk("%sfscrypt: %pV\n", level, &vaf);
  347. va_end(args);
  348. }
  349. /**
  350. * fscrypt_init() - Set up for fs encryption.
  351. *
  352. * Return: 0 on success; -errno on failure
  353. */
  354. static int __init fscrypt_init(void)
  355. {
  356. int err = -ENOMEM;
  357. /*
  358. * Use an unbound workqueue to allow bios to be decrypted in parallel
  359. * even when they happen to complete on the same CPU. This sacrifices
  360. * locality, but it's worthwhile since decryption is CPU-intensive.
  361. *
  362. * Also use a high-priority workqueue to prioritize decryption work,
  363. * which blocks reads from completing, over regular application tasks.
  364. */
  365. fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
  366. WQ_UNBOUND | WQ_HIGHPRI,
  367. num_online_cpus());
  368. if (!fscrypt_read_workqueue)
  369. goto fail;
  370. fscrypt_inode_info_cachep = KMEM_CACHE(fscrypt_inode_info,
  371. SLAB_RECLAIM_ACCOUNT);
  372. if (!fscrypt_inode_info_cachep)
  373. goto fail_free_queue;
  374. err = fscrypt_init_keyring();
  375. if (err)
  376. goto fail_free_inode_info;
  377. return 0;
  378. fail_free_inode_info:
  379. kmem_cache_destroy(fscrypt_inode_info_cachep);
  380. fail_free_queue:
  381. destroy_workqueue(fscrypt_read_workqueue);
  382. fail:
  383. return err;
  384. }
  385. late_initcall(fscrypt_init)