inode.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/proc/inode.c
  4. *
  5. * Copyright (C) 1991, 1992 Linus Torvalds
  6. */
  7. #include <linux/cache.h>
  8. #include <linux/time.h>
  9. #include <linux/proc_fs.h>
  10. #include <linux/kernel.h>
  11. #include <linux/pid_namespace.h>
  12. #include <linux/mm.h>
  13. #include <linux/string.h>
  14. #include <linux/stat.h>
  15. #include <linux/completion.h>
  16. #include <linux/poll.h>
  17. #include <linux/printk.h>
  18. #include <linux/file.h>
  19. #include <linux/limits.h>
  20. #include <linux/init.h>
  21. #include <linux/module.h>
  22. #include <linux/sysctl.h>
  23. #include <linux/seq_file.h>
  24. #include <linux/slab.h>
  25. #include <linux/mount.h>
  26. #include <linux/magic.h>
  27. #include <linux/uaccess.h>
  28. #include "internal.h"
  29. static void proc_evict_inode(struct inode *inode)
  30. {
  31. struct proc_dir_entry *de;
  32. struct ctl_table_header *head;
  33. truncate_inode_pages_final(&inode->i_data);
  34. clear_inode(inode);
  35. /* Stop tracking associated processes */
  36. put_pid(PROC_I(inode)->pid);
  37. /* Let go of any associated proc directory entry */
  38. de = PDE(inode);
  39. if (de)
  40. pde_put(de);
  41. head = PROC_I(inode)->sysctl;
  42. if (head) {
  43. RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
  44. proc_sys_evict_inode(inode, head);
  45. }
  46. }
  47. static struct kmem_cache *proc_inode_cachep __ro_after_init;
  48. static struct kmem_cache *pde_opener_cache __ro_after_init;
  49. static struct inode *proc_alloc_inode(struct super_block *sb)
  50. {
  51. struct proc_inode *ei;
  52. struct inode *inode;
  53. ei = kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL);
  54. if (!ei)
  55. return NULL;
  56. ei->pid = NULL;
  57. ei->fd = 0;
  58. ei->op.proc_get_link = NULL;
  59. ei->pde = NULL;
  60. ei->sysctl = NULL;
  61. ei->sysctl_entry = NULL;
  62. ei->ns_ops = NULL;
  63. inode = &ei->vfs_inode;
  64. return inode;
  65. }
  66. static void proc_i_callback(struct rcu_head *head)
  67. {
  68. struct inode *inode = container_of(head, struct inode, i_rcu);
  69. kmem_cache_free(proc_inode_cachep, PROC_I(inode));
  70. }
  71. static void proc_destroy_inode(struct inode *inode)
  72. {
  73. call_rcu(&inode->i_rcu, proc_i_callback);
  74. }
  75. static void init_once(void *foo)
  76. {
  77. struct proc_inode *ei = (struct proc_inode *) foo;
  78. inode_init_once(&ei->vfs_inode);
  79. }
  80. void __init proc_init_kmemcache(void)
  81. {
  82. proc_inode_cachep = kmem_cache_create("proc_inode_cache",
  83. sizeof(struct proc_inode),
  84. 0, (SLAB_RECLAIM_ACCOUNT|
  85. SLAB_MEM_SPREAD|SLAB_ACCOUNT|
  86. SLAB_PANIC),
  87. init_once);
  88. pde_opener_cache =
  89. kmem_cache_create("pde_opener", sizeof(struct pde_opener), 0,
  90. SLAB_ACCOUNT|SLAB_PANIC, NULL);
  91. proc_dir_entry_cache = kmem_cache_create_usercopy(
  92. "proc_dir_entry", SIZEOF_PDE, 0, SLAB_PANIC,
  93. offsetof(struct proc_dir_entry, inline_name),
  94. SIZEOF_PDE_INLINE_NAME, NULL);
  95. BUILD_BUG_ON(sizeof(struct proc_dir_entry) >= SIZEOF_PDE);
  96. }
  97. static int proc_show_options(struct seq_file *seq, struct dentry *root)
  98. {
  99. struct super_block *sb = root->d_sb;
  100. struct pid_namespace *pid = sb->s_fs_info;
  101. if (!gid_eq(pid->pid_gid, GLOBAL_ROOT_GID))
  102. seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, pid->pid_gid));
  103. if (pid->hide_pid != HIDEPID_OFF)
  104. seq_printf(seq, ",hidepid=%u", pid->hide_pid);
  105. return 0;
  106. }
  107. static const struct super_operations proc_sops = {
  108. .alloc_inode = proc_alloc_inode,
  109. .destroy_inode = proc_destroy_inode,
  110. .drop_inode = generic_delete_inode,
  111. .evict_inode = proc_evict_inode,
  112. .statfs = simple_statfs,
  113. .remount_fs = proc_remount,
  114. .show_options = proc_show_options,
  115. };
  116. enum {BIAS = -1U<<31};
  117. static inline int use_pde(struct proc_dir_entry *pde)
  118. {
  119. return likely(atomic_inc_unless_negative(&pde->in_use));
  120. }
  121. static void unuse_pde(struct proc_dir_entry *pde)
  122. {
  123. if (unlikely(atomic_dec_return(&pde->in_use) == BIAS))
  124. complete(pde->pde_unload_completion);
  125. }
  126. /* pde is locked on entry, unlocked on exit */
  127. static void close_pdeo(struct proc_dir_entry *pde, struct pde_opener *pdeo)
  128. {
  129. /*
  130. * close() (proc_reg_release()) can't delete an entry and proceed:
  131. * ->release hook needs to be available at the right moment.
  132. *
  133. * rmmod (remove_proc_entry() et al) can't delete an entry and proceed:
  134. * "struct file" needs to be available at the right moment.
  135. *
  136. * Therefore, first process to enter this function does ->release() and
  137. * signals its completion to the other process which does nothing.
  138. */
  139. if (pdeo->closing) {
  140. /* somebody else is doing that, just wait */
  141. DECLARE_COMPLETION_ONSTACK(c);
  142. pdeo->c = &c;
  143. spin_unlock(&pde->pde_unload_lock);
  144. wait_for_completion(&c);
  145. } else {
  146. struct file *file;
  147. struct completion *c;
  148. pdeo->closing = true;
  149. spin_unlock(&pde->pde_unload_lock);
  150. file = pdeo->file;
  151. pde->proc_fops->release(file_inode(file), file);
  152. spin_lock(&pde->pde_unload_lock);
  153. /* After ->release. */
  154. list_del(&pdeo->lh);
  155. c = pdeo->c;
  156. spin_unlock(&pde->pde_unload_lock);
  157. if (unlikely(c))
  158. complete(c);
  159. kmem_cache_free(pde_opener_cache, pdeo);
  160. }
  161. }
  162. void proc_entry_rundown(struct proc_dir_entry *de)
  163. {
  164. DECLARE_COMPLETION_ONSTACK(c);
  165. /* Wait until all existing callers into module are done. */
  166. de->pde_unload_completion = &c;
  167. if (atomic_add_return(BIAS, &de->in_use) != BIAS)
  168. wait_for_completion(&c);
  169. /* ->pde_openers list can't grow from now on. */
  170. spin_lock(&de->pde_unload_lock);
  171. while (!list_empty(&de->pde_openers)) {
  172. struct pde_opener *pdeo;
  173. pdeo = list_first_entry(&de->pde_openers, struct pde_opener, lh);
  174. close_pdeo(de, pdeo);
  175. spin_lock(&de->pde_unload_lock);
  176. }
  177. spin_unlock(&de->pde_unload_lock);
  178. }
  179. static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence)
  180. {
  181. struct proc_dir_entry *pde = PDE(file_inode(file));
  182. loff_t rv = -EINVAL;
  183. if (use_pde(pde)) {
  184. loff_t (*llseek)(struct file *, loff_t, int);
  185. llseek = pde->proc_fops->llseek;
  186. if (!llseek)
  187. llseek = default_llseek;
  188. rv = llseek(file, offset, whence);
  189. unuse_pde(pde);
  190. }
  191. return rv;
  192. }
  193. static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  194. {
  195. ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
  196. struct proc_dir_entry *pde = PDE(file_inode(file));
  197. ssize_t rv = -EIO;
  198. if (use_pde(pde)) {
  199. read = pde->proc_fops->read;
  200. if (read)
  201. rv = read(file, buf, count, ppos);
  202. unuse_pde(pde);
  203. }
  204. return rv;
  205. }
  206. static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
  207. {
  208. ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *);
  209. struct proc_dir_entry *pde = PDE(file_inode(file));
  210. ssize_t rv = -EIO;
  211. if (use_pde(pde)) {
  212. write = pde->proc_fops->write;
  213. if (write)
  214. rv = write(file, buf, count, ppos);
  215. unuse_pde(pde);
  216. }
  217. return rv;
  218. }
  219. static __poll_t proc_reg_poll(struct file *file, struct poll_table_struct *pts)
  220. {
  221. struct proc_dir_entry *pde = PDE(file_inode(file));
  222. __poll_t rv = DEFAULT_POLLMASK;
  223. __poll_t (*poll)(struct file *, struct poll_table_struct *);
  224. if (use_pde(pde)) {
  225. poll = pde->proc_fops->poll;
  226. if (poll)
  227. rv = poll(file, pts);
  228. unuse_pde(pde);
  229. }
  230. return rv;
  231. }
  232. static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  233. {
  234. struct proc_dir_entry *pde = PDE(file_inode(file));
  235. long rv = -ENOTTY;
  236. long (*ioctl)(struct file *, unsigned int, unsigned long);
  237. if (use_pde(pde)) {
  238. ioctl = pde->proc_fops->unlocked_ioctl;
  239. if (ioctl)
  240. rv = ioctl(file, cmd, arg);
  241. unuse_pde(pde);
  242. }
  243. return rv;
  244. }
  245. #ifdef CONFIG_COMPAT
  246. static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  247. {
  248. struct proc_dir_entry *pde = PDE(file_inode(file));
  249. long rv = -ENOTTY;
  250. long (*compat_ioctl)(struct file *, unsigned int, unsigned long);
  251. if (use_pde(pde)) {
  252. compat_ioctl = pde->proc_fops->compat_ioctl;
  253. if (compat_ioctl)
  254. rv = compat_ioctl(file, cmd, arg);
  255. unuse_pde(pde);
  256. }
  257. return rv;
  258. }
  259. #endif
  260. static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
  261. {
  262. struct proc_dir_entry *pde = PDE(file_inode(file));
  263. int rv = -EIO;
  264. int (*mmap)(struct file *, struct vm_area_struct *);
  265. if (use_pde(pde)) {
  266. mmap = pde->proc_fops->mmap;
  267. if (mmap)
  268. rv = mmap(file, vma);
  269. unuse_pde(pde);
  270. }
  271. return rv;
  272. }
  273. static unsigned long
  274. proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr,
  275. unsigned long len, unsigned long pgoff,
  276. unsigned long flags)
  277. {
  278. struct proc_dir_entry *pde = PDE(file_inode(file));
  279. unsigned long rv = -EIO;
  280. if (use_pde(pde)) {
  281. typeof(proc_reg_get_unmapped_area) *get_area;
  282. get_area = pde->proc_fops->get_unmapped_area;
  283. #ifdef CONFIG_MMU
  284. if (!get_area)
  285. get_area = current->mm->get_unmapped_area;
  286. #endif
  287. if (get_area)
  288. rv = get_area(file, orig_addr, len, pgoff, flags);
  289. else
  290. rv = orig_addr;
  291. unuse_pde(pde);
  292. }
  293. return rv;
  294. }
  295. static int proc_reg_open(struct inode *inode, struct file *file)
  296. {
  297. struct proc_dir_entry *pde = PDE(inode);
  298. int rv = 0;
  299. int (*open)(struct inode *, struct file *);
  300. int (*release)(struct inode *, struct file *);
  301. struct pde_opener *pdeo;
  302. /*
  303. * Ensure that
  304. * 1) PDE's ->release hook will be called no matter what
  305. * either normally by close()/->release, or forcefully by
  306. * rmmod/remove_proc_entry.
  307. *
  308. * 2) rmmod isn't blocked by opening file in /proc and sitting on
  309. * the descriptor (including "rmmod foo </proc/foo" scenario).
  310. *
  311. * Save every "struct file" with custom ->release hook.
  312. */
  313. if (!use_pde(pde))
  314. return -ENOENT;
  315. release = pde->proc_fops->release;
  316. if (release) {
  317. pdeo = kmem_cache_alloc(pde_opener_cache, GFP_KERNEL);
  318. if (!pdeo) {
  319. rv = -ENOMEM;
  320. goto out_unuse;
  321. }
  322. }
  323. open = pde->proc_fops->open;
  324. if (open)
  325. rv = open(inode, file);
  326. if (release) {
  327. if (rv == 0) {
  328. /* To know what to release. */
  329. pdeo->file = file;
  330. pdeo->closing = false;
  331. pdeo->c = NULL;
  332. spin_lock(&pde->pde_unload_lock);
  333. list_add(&pdeo->lh, &pde->pde_openers);
  334. spin_unlock(&pde->pde_unload_lock);
  335. } else
  336. kmem_cache_free(pde_opener_cache, pdeo);
  337. }
  338. out_unuse:
  339. unuse_pde(pde);
  340. return rv;
  341. }
  342. static int proc_reg_release(struct inode *inode, struct file *file)
  343. {
  344. struct proc_dir_entry *pde = PDE(inode);
  345. struct pde_opener *pdeo;
  346. spin_lock(&pde->pde_unload_lock);
  347. list_for_each_entry(pdeo, &pde->pde_openers, lh) {
  348. if (pdeo->file == file) {
  349. close_pdeo(pde, pdeo);
  350. return 0;
  351. }
  352. }
  353. spin_unlock(&pde->pde_unload_lock);
  354. return 0;
  355. }
  356. static const struct file_operations proc_reg_file_ops = {
  357. .llseek = proc_reg_llseek,
  358. .read = proc_reg_read,
  359. .write = proc_reg_write,
  360. .poll = proc_reg_poll,
  361. .unlocked_ioctl = proc_reg_unlocked_ioctl,
  362. #ifdef CONFIG_COMPAT
  363. .compat_ioctl = proc_reg_compat_ioctl,
  364. #endif
  365. .mmap = proc_reg_mmap,
  366. .get_unmapped_area = proc_reg_get_unmapped_area,
  367. .open = proc_reg_open,
  368. .release = proc_reg_release,
  369. };
  370. #ifdef CONFIG_COMPAT
  371. static const struct file_operations proc_reg_file_ops_no_compat = {
  372. .llseek = proc_reg_llseek,
  373. .read = proc_reg_read,
  374. .write = proc_reg_write,
  375. .poll = proc_reg_poll,
  376. .unlocked_ioctl = proc_reg_unlocked_ioctl,
  377. .mmap = proc_reg_mmap,
  378. .get_unmapped_area = proc_reg_get_unmapped_area,
  379. .open = proc_reg_open,
  380. .release = proc_reg_release,
  381. };
  382. #endif
  383. static void proc_put_link(void *p)
  384. {
  385. unuse_pde(p);
  386. }
  387. static const char *proc_get_link(struct dentry *dentry,
  388. struct inode *inode,
  389. struct delayed_call *done)
  390. {
  391. struct proc_dir_entry *pde = PDE(inode);
  392. if (!use_pde(pde))
  393. return ERR_PTR(-EINVAL);
  394. set_delayed_call(done, proc_put_link, pde);
  395. return pde->data;
  396. }
  397. const struct inode_operations proc_link_inode_operations = {
  398. .get_link = proc_get_link,
  399. };
  400. struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
  401. {
  402. struct inode *inode = new_inode(sb);
  403. if (inode) {
  404. inode->i_ino = de->low_ino;
  405. inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
  406. PROC_I(inode)->pde = de;
  407. if (is_empty_pde(de)) {
  408. make_empty_dir_inode(inode);
  409. return inode;
  410. }
  411. if (de->mode) {
  412. inode->i_mode = de->mode;
  413. inode->i_uid = de->uid;
  414. inode->i_gid = de->gid;
  415. }
  416. if (de->size)
  417. inode->i_size = de->size;
  418. if (de->nlink)
  419. set_nlink(inode, de->nlink);
  420. WARN_ON(!de->proc_iops);
  421. inode->i_op = de->proc_iops;
  422. if (de->proc_fops) {
  423. if (S_ISREG(inode->i_mode)) {
  424. #ifdef CONFIG_COMPAT
  425. if (!de->proc_fops->compat_ioctl)
  426. inode->i_fop =
  427. &proc_reg_file_ops_no_compat;
  428. else
  429. #endif
  430. inode->i_fop = &proc_reg_file_ops;
  431. } else {
  432. inode->i_fop = de->proc_fops;
  433. }
  434. }
  435. } else
  436. pde_put(de);
  437. return inode;
  438. }
  439. int proc_fill_super(struct super_block *s, void *data, int silent)
  440. {
  441. struct pid_namespace *ns = get_pid_ns(s->s_fs_info);
  442. struct inode *root_inode;
  443. int ret;
  444. if (!proc_parse_options(data, ns))
  445. return -EINVAL;
  446. /* User space would break if executables or devices appear on proc */
  447. s->s_iflags |= SB_I_USERNS_VISIBLE | SB_I_NOEXEC | SB_I_NODEV;
  448. s->s_flags |= SB_NODIRATIME | SB_NOSUID | SB_NOEXEC;
  449. s->s_blocksize = 1024;
  450. s->s_blocksize_bits = 10;
  451. s->s_magic = PROC_SUPER_MAGIC;
  452. s->s_op = &proc_sops;
  453. s->s_time_gran = 1;
  454. /*
  455. * procfs isn't actually a stacking filesystem; however, there is
  456. * too much magic going on inside it to permit stacking things on
  457. * top of it
  458. */
  459. s->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
  460. pde_get(&proc_root);
  461. root_inode = proc_get_inode(s, &proc_root);
  462. if (!root_inode) {
  463. pr_err("proc_fill_super: get root inode failed\n");
  464. return -ENOMEM;
  465. }
  466. s->s_root = d_make_root(root_inode);
  467. if (!s->s_root) {
  468. pr_err("proc_fill_super: allocate dentry failed\n");
  469. return -ENOMEM;
  470. }
  471. ret = proc_setup_self(s);
  472. if (ret) {
  473. return ret;
  474. }
  475. return proc_setup_thread_self(s);
  476. }