crypto.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506
  1. /*
  2. * This contains encryption functions for per-file encryption.
  3. *
  4. * Copyright (C) 2015, Google, Inc.
  5. * Copyright (C) 2015, Motorola Mobility
  6. *
  7. * Written by Michael Halcrow, 2014.
  8. *
  9. * Filename encryption additions
  10. * Uday Savagaonkar, 2014
  11. * Encryption policy handling additions
  12. * Ildar Muslukhov, 2014
  13. * Add fscrypt_pullback_bio_page()
  14. * Jaegeuk Kim, 2015.
  15. *
  16. * This has not yet undergone a rigorous security audit.
  17. *
  18. * The usage of AES-XTS should conform to recommendations in NIST
  19. * Special Publication 800-38E and IEEE P1619/D16.
  20. */
  21. #include <linux/pagemap.h>
  22. #include <linux/mempool.h>
  23. #include <linux/module.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/ratelimit.h>
  26. #include <linux/dcache.h>
  27. #include <linux/namei.h>
  28. #include <crypto/aes.h>
  29. #include <crypto/skcipher.h>
  30. #include "fscrypt_private.h"
  31. static unsigned int num_prealloc_crypto_pages = 32;
  32. static unsigned int num_prealloc_crypto_ctxs = 128;
  33. module_param(num_prealloc_crypto_pages, uint, 0444);
  34. MODULE_PARM_DESC(num_prealloc_crypto_pages,
  35. "Number of crypto pages to preallocate");
  36. module_param(num_prealloc_crypto_ctxs, uint, 0444);
  37. MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
  38. "Number of crypto contexts to preallocate");
  39. static mempool_t *fscrypt_bounce_page_pool = NULL;
  40. static LIST_HEAD(fscrypt_free_ctxs);
  41. static DEFINE_SPINLOCK(fscrypt_ctx_lock);
  42. static struct workqueue_struct *fscrypt_read_workqueue;
  43. static DEFINE_MUTEX(fscrypt_init_mutex);
  44. static struct kmem_cache *fscrypt_ctx_cachep;
  45. struct kmem_cache *fscrypt_info_cachep;
  46. void fscrypt_enqueue_decrypt_work(struct work_struct *work)
  47. {
  48. queue_work(fscrypt_read_workqueue, work);
  49. }
  50. EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
  51. /**
  52. * fscrypt_release_ctx() - Releases an encryption context
  53. * @ctx: The encryption context to release.
  54. *
  55. * If the encryption context was allocated from the pre-allocated pool, returns
  56. * it to that pool. Else, frees it.
  57. *
  58. * If there's a bounce page in the context, this frees that.
  59. */
  60. void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
  61. {
  62. unsigned long flags;
  63. if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) {
  64. mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
  65. ctx->w.bounce_page = NULL;
  66. }
  67. ctx->w.control_page = NULL;
  68. if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
  69. kmem_cache_free(fscrypt_ctx_cachep, ctx);
  70. } else {
  71. spin_lock_irqsave(&fscrypt_ctx_lock, flags);
  72. list_add(&ctx->free_list, &fscrypt_free_ctxs);
  73. spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
  74. }
  75. }
  76. EXPORT_SYMBOL(fscrypt_release_ctx);
  77. /**
  78. * fscrypt_get_ctx() - Gets an encryption context
  79. * @inode: The inode for which we are doing the crypto
  80. * @gfp_flags: The gfp flag for memory allocation
  81. *
  82. * Allocates and initializes an encryption context.
  83. *
  84. * Return: An allocated and initialized encryption context on success; error
  85. * value or NULL otherwise.
  86. */
  87. struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags)
  88. {
  89. struct fscrypt_ctx *ctx = NULL;
  90. struct fscrypt_info *ci = inode->i_crypt_info;
  91. unsigned long flags;
  92. if (ci == NULL)
  93. return ERR_PTR(-ENOKEY);
  94. /*
  95. * We first try getting the ctx from a free list because in
  96. * the common case the ctx will have an allocated and
  97. * initialized crypto tfm, so it's probably a worthwhile
  98. * optimization. For the bounce page, we first try getting it
  99. * from the kernel allocator because that's just about as fast
  100. * as getting it from a list and because a cache of free pages
  101. * should generally be a "last resort" option for a filesystem
  102. * to be able to do its job.
  103. */
  104. spin_lock_irqsave(&fscrypt_ctx_lock, flags);
  105. ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
  106. struct fscrypt_ctx, free_list);
  107. if (ctx)
  108. list_del(&ctx->free_list);
  109. spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
  110. if (!ctx) {
  111. ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
  112. if (!ctx)
  113. return ERR_PTR(-ENOMEM);
  114. ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
  115. } else {
  116. ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
  117. }
  118. ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL;
  119. return ctx;
  120. }
  121. EXPORT_SYMBOL(fscrypt_get_ctx);
  122. int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
  123. u64 lblk_num, struct page *src_page,
  124. struct page *dest_page, unsigned int len,
  125. unsigned int offs, gfp_t gfp_flags)
  126. {
  127. struct {
  128. __le64 index;
  129. u8 padding[FS_IV_SIZE - sizeof(__le64)];
  130. } iv;
  131. struct skcipher_request *req = NULL;
  132. DECLARE_CRYPTO_WAIT(wait);
  133. struct scatterlist dst, src;
  134. struct fscrypt_info *ci = inode->i_crypt_info;
  135. struct crypto_skcipher *tfm = ci->ci_ctfm;
  136. int res = 0;
  137. if (WARN_ON_ONCE(len <= 0))
  138. return -EINVAL;
  139. if (WARN_ON_ONCE(len % FS_CRYPTO_BLOCK_SIZE != 0))
  140. return -EINVAL;
  141. BUILD_BUG_ON(sizeof(iv) != FS_IV_SIZE);
  142. BUILD_BUG_ON(AES_BLOCK_SIZE != FS_IV_SIZE);
  143. iv.index = cpu_to_le64(lblk_num);
  144. memset(iv.padding, 0, sizeof(iv.padding));
  145. if (ci->ci_essiv_tfm != NULL) {
  146. crypto_cipher_encrypt_one(ci->ci_essiv_tfm, (u8 *)&iv,
  147. (u8 *)&iv);
  148. }
  149. req = skcipher_request_alloc(tfm, gfp_flags);
  150. if (!req)
  151. return -ENOMEM;
  152. skcipher_request_set_callback(
  153. req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
  154. crypto_req_done, &wait);
  155. sg_init_table(&dst, 1);
  156. sg_set_page(&dst, dest_page, len, offs);
  157. sg_init_table(&src, 1);
  158. sg_set_page(&src, src_page, len, offs);
  159. skcipher_request_set_crypt(req, &src, &dst, len, &iv);
  160. if (rw == FS_DECRYPT)
  161. res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
  162. else
  163. res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
  164. skcipher_request_free(req);
  165. if (res) {
  166. fscrypt_err(inode->i_sb,
  167. "%scryption failed for inode %lu, block %llu: %d",
  168. (rw == FS_DECRYPT ? "de" : "en"),
  169. inode->i_ino, lblk_num, res);
  170. return res;
  171. }
  172. return 0;
  173. }
  174. struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
  175. gfp_t gfp_flags)
  176. {
  177. ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
  178. if (ctx->w.bounce_page == NULL)
  179. return ERR_PTR(-ENOMEM);
  180. ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL;
  181. return ctx->w.bounce_page;
  182. }
  183. /**
  184. * fscypt_encrypt_page() - Encrypts a page
  185. * @inode: The inode for which the encryption should take place
  186. * @page: The page to encrypt. Must be locked for bounce-page
  187. * encryption.
  188. * @len: Length of data to encrypt in @page and encrypted
  189. * data in returned page.
  190. * @offs: Offset of data within @page and returned
  191. * page holding encrypted data.
  192. * @lblk_num: Logical block number. This must be unique for multiple
  193. * calls with same inode, except when overwriting
  194. * previously written data.
  195. * @gfp_flags: The gfp flag for memory allocation
  196. *
  197. * Encrypts @page using the ctx encryption context. Performs encryption
  198. * either in-place or into a newly allocated bounce page.
  199. * Called on the page write path.
  200. *
  201. * Bounce page allocation is the default.
  202. * In this case, the contents of @page are encrypted and stored in an
  203. * allocated bounce page. @page has to be locked and the caller must call
  204. * fscrypt_restore_control_page() on the returned ciphertext page to
  205. * release the bounce buffer and the encryption context.
  206. *
  207. * In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in
  208. * fscrypt_operations. Here, the input-page is returned with its content
  209. * encrypted.
  210. *
  211. * Return: A page with the encrypted content on success. Else, an
  212. * error value or NULL.
  213. */
  214. struct page *fscrypt_encrypt_page(const struct inode *inode,
  215. struct page *page,
  216. unsigned int len,
  217. unsigned int offs,
  218. u64 lblk_num, gfp_t gfp_flags)
  219. {
  220. struct fscrypt_ctx *ctx;
  221. struct page *ciphertext_page = page;
  222. int err;
  223. if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
  224. /* with inplace-encryption we just encrypt the page */
  225. err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page,
  226. ciphertext_page, len, offs,
  227. gfp_flags);
  228. if (err)
  229. return ERR_PTR(err);
  230. return ciphertext_page;
  231. }
  232. if (WARN_ON_ONCE(!PageLocked(page)))
  233. return ERR_PTR(-EINVAL);
  234. ctx = fscrypt_get_ctx(inode, gfp_flags);
  235. if (IS_ERR(ctx))
  236. return (struct page *)ctx;
  237. /* The encryption operation will require a bounce page. */
  238. ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags);
  239. if (IS_ERR(ciphertext_page))
  240. goto errout;
  241. ctx->w.control_page = page;
  242. err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num,
  243. page, ciphertext_page, len, offs,
  244. gfp_flags);
  245. if (err) {
  246. ciphertext_page = ERR_PTR(err);
  247. goto errout;
  248. }
  249. SetPagePrivate(ciphertext_page);
  250. set_page_private(ciphertext_page, (unsigned long)ctx);
  251. lock_page(ciphertext_page);
  252. return ciphertext_page;
  253. errout:
  254. fscrypt_release_ctx(ctx);
  255. return ciphertext_page;
  256. }
  257. EXPORT_SYMBOL(fscrypt_encrypt_page);
  258. /**
  259. * fscrypt_decrypt_page() - Decrypts a page in-place
  260. * @inode: The corresponding inode for the page to decrypt.
  261. * @page: The page to decrypt. Must be locked in case
  262. * it is a writeback page (FS_CFLG_OWN_PAGES unset).
  263. * @len: Number of bytes in @page to be decrypted.
  264. * @offs: Start of data in @page.
  265. * @lblk_num: Logical block number.
  266. *
  267. * Decrypts page in-place using the ctx encryption context.
  268. *
  269. * Called from the read completion callback.
  270. *
  271. * Return: Zero on success, non-zero otherwise.
  272. */
  273. int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
  274. unsigned int len, unsigned int offs, u64 lblk_num)
  275. {
  276. if (WARN_ON_ONCE(!PageLocked(page) &&
  277. !(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES)))
  278. return -EINVAL;
  279. return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page,
  280. len, offs, GFP_NOFS);
  281. }
  282. EXPORT_SYMBOL(fscrypt_decrypt_page);
  283. /*
  284. * Validate dentries in encrypted directories to make sure we aren't potentially
  285. * caching stale dentries after a key has been added.
  286. */
  287. static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
  288. {
  289. struct dentry *dir;
  290. int err;
  291. int valid;
  292. /*
  293. * Plaintext names are always valid, since fscrypt doesn't support
  294. * reverting to ciphertext names without evicting the directory's inode
  295. * -- which implies eviction of the dentries in the directory.
  296. */
  297. if (!(dentry->d_flags & DCACHE_ENCRYPTED_NAME))
  298. return 1;
  299. /*
  300. * Ciphertext name; valid if the directory's key is still unavailable.
  301. *
  302. * Although fscrypt forbids rename() on ciphertext names, we still must
  303. * use dget_parent() here rather than use ->d_parent directly. That's
  304. * because a corrupted fs image may contain directory hard links, which
  305. * the VFS handles by moving the directory's dentry tree in the dcache
  306. * each time ->lookup() finds the directory and it already has a dentry
  307. * elsewhere. Thus ->d_parent can be changing, and we must safely grab
  308. * a reference to some ->d_parent to prevent it from being freed.
  309. */
  310. if (flags & LOOKUP_RCU)
  311. return -ECHILD;
  312. dir = dget_parent(dentry);
  313. err = fscrypt_get_encryption_info(d_inode(dir));
  314. valid = !fscrypt_has_encryption_key(d_inode(dir));
  315. dput(dir);
  316. if (err < 0)
  317. return err;
  318. return valid;
  319. }
  320. const struct dentry_operations fscrypt_d_ops = {
  321. .d_revalidate = fscrypt_d_revalidate,
  322. };
  323. void fscrypt_restore_control_page(struct page *page)
  324. {
  325. struct fscrypt_ctx *ctx;
  326. ctx = (struct fscrypt_ctx *)page_private(page);
  327. set_page_private(page, (unsigned long)NULL);
  328. ClearPagePrivate(page);
  329. unlock_page(page);
  330. fscrypt_release_ctx(ctx);
  331. }
  332. EXPORT_SYMBOL(fscrypt_restore_control_page);
  333. static void fscrypt_destroy(void)
  334. {
  335. struct fscrypt_ctx *pos, *n;
  336. list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
  337. kmem_cache_free(fscrypt_ctx_cachep, pos);
  338. INIT_LIST_HEAD(&fscrypt_free_ctxs);
  339. mempool_destroy(fscrypt_bounce_page_pool);
  340. fscrypt_bounce_page_pool = NULL;
  341. }
  342. /**
  343. * fscrypt_initialize() - allocate major buffers for fs encryption.
  344. * @cop_flags: fscrypt operations flags
  345. *
  346. * We only call this when we start accessing encrypted files, since it
  347. * results in memory getting allocated that wouldn't otherwise be used.
  348. *
  349. * Return: Zero on success, non-zero otherwise.
  350. */
  351. int fscrypt_initialize(unsigned int cop_flags)
  352. {
  353. int i, res = -ENOMEM;
  354. /* No need to allocate a bounce page pool if this FS won't use it. */
  355. if (cop_flags & FS_CFLG_OWN_PAGES)
  356. return 0;
  357. mutex_lock(&fscrypt_init_mutex);
  358. if (fscrypt_bounce_page_pool)
  359. goto already_initialized;
  360. for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
  361. struct fscrypt_ctx *ctx;
  362. ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
  363. if (!ctx)
  364. goto fail;
  365. list_add(&ctx->free_list, &fscrypt_free_ctxs);
  366. }
  367. fscrypt_bounce_page_pool =
  368. mempool_create_page_pool(num_prealloc_crypto_pages, 0);
  369. if (!fscrypt_bounce_page_pool)
  370. goto fail;
  371. already_initialized:
  372. mutex_unlock(&fscrypt_init_mutex);
  373. return 0;
  374. fail:
  375. fscrypt_destroy();
  376. mutex_unlock(&fscrypt_init_mutex);
  377. return res;
  378. }
  379. void fscrypt_msg(struct super_block *sb, const char *level,
  380. const char *fmt, ...)
  381. {
  382. static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
  383. DEFAULT_RATELIMIT_BURST);
  384. struct va_format vaf;
  385. va_list args;
  386. if (!__ratelimit(&rs))
  387. return;
  388. va_start(args, fmt);
  389. vaf.fmt = fmt;
  390. vaf.va = &args;
  391. if (sb)
  392. printk("%sfscrypt (%s): %pV\n", level, sb->s_id, &vaf);
  393. else
  394. printk("%sfscrypt: %pV\n", level, &vaf);
  395. va_end(args);
  396. }
  397. /**
  398. * fscrypt_init() - Set up for fs encryption.
  399. */
  400. static int __init fscrypt_init(void)
  401. {
  402. /*
  403. * Use an unbound workqueue to allow bios to be decrypted in parallel
  404. * even when they happen to complete on the same CPU. This sacrifices
  405. * locality, but it's worthwhile since decryption is CPU-intensive.
  406. *
  407. * Also use a high-priority workqueue to prioritize decryption work,
  408. * which blocks reads from completing, over regular application tasks.
  409. */
  410. fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
  411. WQ_UNBOUND | WQ_HIGHPRI,
  412. num_online_cpus());
  413. if (!fscrypt_read_workqueue)
  414. goto fail;
  415. fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
  416. if (!fscrypt_ctx_cachep)
  417. goto fail_free_queue;
  418. fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
  419. if (!fscrypt_info_cachep)
  420. goto fail_free_ctx;
  421. return 0;
  422. fail_free_ctx:
  423. kmem_cache_destroy(fscrypt_ctx_cachep);
  424. fail_free_queue:
  425. destroy_workqueue(fscrypt_read_workqueue);
  426. fail:
  427. return -ENOMEM;
  428. }
  429. module_init(fscrypt_init)
  430. /**
  431. * fscrypt_exit() - Shutdown the fs encryption system
  432. */
  433. static void __exit fscrypt_exit(void)
  434. {
  435. fscrypt_destroy();
  436. if (fscrypt_read_workqueue)
  437. destroy_workqueue(fscrypt_read_workqueue);
  438. kmem_cache_destroy(fscrypt_ctx_cachep);
  439. kmem_cache_destroy(fscrypt_info_cachep);
  440. fscrypt_essiv_cleanup();
  441. }
  442. module_exit(fscrypt_exit);
  443. MODULE_LICENSE("GPL");