secretmem.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright IBM Corporation, 2021
  4. *
  5. * Author: Mike Rapoport <rppt@linux.ibm.com>
  6. */
  7. #include <linux/mm.h>
  8. #include <linux/fs.h>
  9. #include <linux/swap.h>
  10. #include <linux/mount.h>
  11. #include <linux/memfd.h>
  12. #include <linux/bitops.h>
  13. #include <linux/printk.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/syscalls.h>
  16. #include <linux/pseudo_fs.h>
  17. #include <linux/secretmem.h>
  18. #include <linux/set_memory.h>
  19. #include <linux/sched/signal.h>
  20. #include <uapi/linux/magic.h>
  21. #include <asm/tlbflush.h>
  22. #include "internal.h"
  23. #undef pr_fmt
  24. #define pr_fmt(fmt) "secretmem: " fmt
  25. /*
  26. * Define mode and flag masks to allow validation of the system call
  27. * parameters.
  28. */
  29. #define SECRETMEM_MODE_MASK (0x0)
  30. #define SECRETMEM_FLAGS_MASK SECRETMEM_MODE_MASK
  31. static bool secretmem_enable __ro_after_init = 1;
  32. module_param_named(enable, secretmem_enable, bool, 0400);
  33. MODULE_PARM_DESC(secretmem_enable,
  34. "Enable secretmem and memfd_secret(2) system call");
  35. static atomic_t secretmem_users;
  36. bool secretmem_active(void)
  37. {
  38. return !!atomic_read(&secretmem_users);
  39. }
  40. static vm_fault_t secretmem_fault(struct vm_fault *vmf)
  41. {
  42. struct address_space *mapping = vmf->vma->vm_file->f_mapping;
  43. struct inode *inode = file_inode(vmf->vma->vm_file);
  44. pgoff_t offset = vmf->pgoff;
  45. gfp_t gfp = vmf->gfp_mask;
  46. unsigned long addr;
  47. struct page *page;
  48. struct folio *folio;
  49. vm_fault_t ret;
  50. int err;
  51. if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode))
  52. return vmf_error(-EINVAL);
  53. filemap_invalidate_lock_shared(mapping);
  54. retry:
  55. page = find_lock_page(mapping, offset);
  56. if (!page) {
  57. folio = folio_alloc(gfp | __GFP_ZERO, 0);
  58. if (!folio) {
  59. ret = VM_FAULT_OOM;
  60. goto out;
  61. }
  62. page = &folio->page;
  63. err = set_direct_map_invalid_noflush(page);
  64. if (err) {
  65. folio_put(folio);
  66. ret = vmf_error(err);
  67. goto out;
  68. }
  69. __folio_mark_uptodate(folio);
  70. err = filemap_add_folio(mapping, folio, offset, gfp);
  71. if (unlikely(err)) {
  72. folio_put(folio);
  73. /*
  74. * If a split of large page was required, it
  75. * already happened when we marked the page invalid
  76. * which guarantees that this call won't fail
  77. */
  78. set_direct_map_default_noflush(page);
  79. if (err == -EEXIST)
  80. goto retry;
  81. ret = vmf_error(err);
  82. goto out;
  83. }
  84. addr = (unsigned long)page_address(page);
  85. flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
  86. }
  87. vmf->page = page;
  88. ret = VM_FAULT_LOCKED;
  89. out:
  90. filemap_invalidate_unlock_shared(mapping);
  91. return ret;
  92. }
  93. static const struct vm_operations_struct secretmem_vm_ops = {
  94. .fault = secretmem_fault,
  95. };
  96. static int secretmem_release(struct inode *inode, struct file *file)
  97. {
  98. atomic_dec(&secretmem_users);
  99. return 0;
  100. }
  101. static int secretmem_mmap(struct file *file, struct vm_area_struct *vma)
  102. {
  103. unsigned long len = vma->vm_end - vma->vm_start;
  104. if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
  105. return -EINVAL;
  106. if (!mlock_future_ok(vma->vm_mm, vma->vm_flags | VM_LOCKED, len))
  107. return -EAGAIN;
  108. vm_flags_set(vma, VM_LOCKED | VM_DONTDUMP);
  109. vma->vm_ops = &secretmem_vm_ops;
  110. return 0;
  111. }
  112. bool vma_is_secretmem(struct vm_area_struct *vma)
  113. {
  114. return vma->vm_ops == &secretmem_vm_ops;
  115. }
  116. static const struct file_operations secretmem_fops = {
  117. .release = secretmem_release,
  118. .mmap = secretmem_mmap,
  119. };
  120. static int secretmem_migrate_folio(struct address_space *mapping,
  121. struct folio *dst, struct folio *src, enum migrate_mode mode)
  122. {
  123. return -EBUSY;
  124. }
  125. static void secretmem_free_folio(struct folio *folio)
  126. {
  127. set_direct_map_default_noflush(&folio->page);
  128. folio_zero_segment(folio, 0, folio_size(folio));
  129. }
  130. const struct address_space_operations secretmem_aops = {
  131. .dirty_folio = noop_dirty_folio,
  132. .free_folio = secretmem_free_folio,
  133. .migrate_folio = secretmem_migrate_folio,
  134. };
  135. static int secretmem_setattr(struct mnt_idmap *idmap,
  136. struct dentry *dentry, struct iattr *iattr)
  137. {
  138. struct inode *inode = d_inode(dentry);
  139. struct address_space *mapping = inode->i_mapping;
  140. unsigned int ia_valid = iattr->ia_valid;
  141. int ret;
  142. filemap_invalidate_lock(mapping);
  143. if ((ia_valid & ATTR_SIZE) && inode->i_size)
  144. ret = -EINVAL;
  145. else
  146. ret = simple_setattr(idmap, dentry, iattr);
  147. filemap_invalidate_unlock(mapping);
  148. return ret;
  149. }
  150. static const struct inode_operations secretmem_iops = {
  151. .setattr = secretmem_setattr,
  152. };
  153. static struct vfsmount *secretmem_mnt;
  154. static struct file *secretmem_file_create(unsigned long flags)
  155. {
  156. struct file *file;
  157. struct inode *inode;
  158. const char *anon_name = "[secretmem]";
  159. const struct qstr qname = QSTR_INIT(anon_name, strlen(anon_name));
  160. int err;
  161. inode = alloc_anon_inode(secretmem_mnt->mnt_sb);
  162. if (IS_ERR(inode))
  163. return ERR_CAST(inode);
  164. err = security_inode_init_security_anon(inode, &qname, NULL);
  165. if (err) {
  166. file = ERR_PTR(err);
  167. goto err_free_inode;
  168. }
  169. file = alloc_file_pseudo(inode, secretmem_mnt, "secretmem",
  170. O_RDWR, &secretmem_fops);
  171. if (IS_ERR(file))
  172. goto err_free_inode;
  173. mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
  174. mapping_set_unevictable(inode->i_mapping);
  175. inode->i_op = &secretmem_iops;
  176. inode->i_mapping->a_ops = &secretmem_aops;
  177. /* pretend we are a normal file with zero size */
  178. inode->i_mode |= S_IFREG;
  179. inode->i_size = 0;
  180. return file;
  181. err_free_inode:
  182. iput(inode);
  183. return file;
  184. }
  185. SYSCALL_DEFINE1(memfd_secret, unsigned int, flags)
  186. {
  187. struct file *file;
  188. int fd, err;
  189. /* make sure local flags do not confict with global fcntl.h */
  190. BUILD_BUG_ON(SECRETMEM_FLAGS_MASK & O_CLOEXEC);
  191. if (!secretmem_enable || !can_set_direct_map())
  192. return -ENOSYS;
  193. if (flags & ~(SECRETMEM_FLAGS_MASK | O_CLOEXEC))
  194. return -EINVAL;
  195. if (atomic_read(&secretmem_users) < 0)
  196. return -ENFILE;
  197. fd = get_unused_fd_flags(flags & O_CLOEXEC);
  198. if (fd < 0)
  199. return fd;
  200. file = secretmem_file_create(flags);
  201. if (IS_ERR(file)) {
  202. err = PTR_ERR(file);
  203. goto err_put_fd;
  204. }
  205. file->f_flags |= O_LARGEFILE;
  206. atomic_inc(&secretmem_users);
  207. fd_install(fd, file);
  208. return fd;
  209. err_put_fd:
  210. put_unused_fd(fd);
  211. return err;
  212. }
  213. static int secretmem_init_fs_context(struct fs_context *fc)
  214. {
  215. return init_pseudo(fc, SECRETMEM_MAGIC) ? 0 : -ENOMEM;
  216. }
  217. static struct file_system_type secretmem_fs = {
  218. .name = "secretmem",
  219. .init_fs_context = secretmem_init_fs_context,
  220. .kill_sb = kill_anon_super,
  221. };
  222. static int __init secretmem_init(void)
  223. {
  224. if (!secretmem_enable || !can_set_direct_map())
  225. return 0;
  226. secretmem_mnt = kern_mount(&secretmem_fs);
  227. if (IS_ERR(secretmem_mnt))
  228. return PTR_ERR(secretmem_mnt);
  229. /* prevent secretmem mappings from ever getting PROT_EXEC */
  230. secretmem_mnt->mnt_flags |= MNT_NOEXEC;
  231. return 0;
  232. }
  233. fs_initcall(secretmem_init);