namespace.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/ipc/namespace.c
  4. * Copyright (C) 2006 Pavel Emelyanov <xemul@openvz.org> OpenVZ, SWsoft Inc.
  5. */
  6. #include <linux/ipc.h>
  7. #include <linux/msg.h>
  8. #include <linux/ipc_namespace.h>
  9. #include <linux/rcupdate.h>
  10. #include <linux/nsproxy.h>
  11. #include <linux/slab.h>
  12. #include <linux/cred.h>
  13. #include <linux/fs.h>
  14. #include <linux/mount.h>
  15. #include <linux/user_namespace.h>
  16. #include <linux/proc_ns.h>
  17. #include <linux/sched/task.h>
  18. #include "util.h"
  19. /*
  20. * The work queue is used to avoid the cost of synchronize_rcu in kern_unmount.
  21. */
  22. static void free_ipc(struct work_struct *unused);
  23. static DECLARE_WORK(free_ipc_work, free_ipc);
  24. static struct ucounts *inc_ipc_namespaces(struct user_namespace *ns)
  25. {
  26. return inc_ucount(ns, current_euid(), UCOUNT_IPC_NAMESPACES);
  27. }
  28. static void dec_ipc_namespaces(struct ucounts *ucounts)
  29. {
  30. dec_ucount(ucounts, UCOUNT_IPC_NAMESPACES);
  31. }
  32. static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns,
  33. struct ipc_namespace *old_ns)
  34. {
  35. struct ipc_namespace *ns;
  36. struct ucounts *ucounts;
  37. int err;
  38. err = -ENOSPC;
  39. again:
  40. ucounts = inc_ipc_namespaces(user_ns);
  41. if (!ucounts) {
  42. /*
  43. * IPC namespaces are freed asynchronously, by free_ipc_work.
  44. * If frees were pending, flush_work will wait, and
  45. * return true. Fail the allocation if no frees are pending.
  46. */
  47. if (flush_work(&free_ipc_work))
  48. goto again;
  49. goto fail;
  50. }
  51. err = -ENOMEM;
  52. ns = kzalloc(sizeof(struct ipc_namespace), GFP_KERNEL_ACCOUNT);
  53. if (ns == NULL)
  54. goto fail_dec;
  55. err = ns_alloc_inum(&ns->ns);
  56. if (err)
  57. goto fail_free;
  58. ns->ns.ops = &ipcns_operations;
  59. refcount_set(&ns->ns.count, 1);
  60. ns->user_ns = get_user_ns(user_ns);
  61. ns->ucounts = ucounts;
  62. err = mq_init_ns(ns);
  63. if (err)
  64. goto fail_put;
  65. err = -ENOMEM;
  66. if (!setup_mq_sysctls(ns))
  67. goto fail_put;
  68. if (!setup_ipc_sysctls(ns))
  69. goto fail_mq;
  70. err = msg_init_ns(ns);
  71. if (err)
  72. goto fail_ipc;
  73. sem_init_ns(ns);
  74. shm_init_ns(ns);
  75. return ns;
  76. fail_ipc:
  77. retire_ipc_sysctls(ns);
  78. fail_mq:
  79. retire_mq_sysctls(ns);
  80. fail_put:
  81. put_user_ns(ns->user_ns);
  82. ns_free_inum(&ns->ns);
  83. fail_free:
  84. kfree(ns);
  85. fail_dec:
  86. dec_ipc_namespaces(ucounts);
  87. fail:
  88. return ERR_PTR(err);
  89. }
  90. struct ipc_namespace *copy_ipcs(unsigned long flags,
  91. struct user_namespace *user_ns, struct ipc_namespace *ns)
  92. {
  93. if (!(flags & CLONE_NEWIPC))
  94. return get_ipc_ns(ns);
  95. return create_ipc_ns(user_ns, ns);
  96. }
  97. /*
  98. * free_ipcs - free all ipcs of one type
  99. * @ns: the namespace to remove the ipcs from
  100. * @ids: the table of ipcs to free
  101. * @free: the function called to free each individual ipc
  102. *
  103. * Called for each kind of ipc when an ipc_namespace exits.
  104. */
  105. void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
  106. void (*free)(struct ipc_namespace *, struct kern_ipc_perm *))
  107. {
  108. struct kern_ipc_perm *perm;
  109. int next_id;
  110. int total, in_use;
  111. down_write(&ids->rwsem);
  112. in_use = ids->in_use;
  113. for (total = 0, next_id = 0; total < in_use; next_id++) {
  114. perm = idr_find(&ids->ipcs_idr, next_id);
  115. if (perm == NULL)
  116. continue;
  117. rcu_read_lock();
  118. ipc_lock_object(perm);
  119. free(ns, perm);
  120. total++;
  121. }
  122. up_write(&ids->rwsem);
  123. }
  124. static void free_ipc_ns(struct ipc_namespace *ns)
  125. {
  126. /*
  127. * Caller needs to wait for an RCU grace period to have passed
  128. * after making the mount point inaccessible to new accesses.
  129. */
  130. mntput(ns->mq_mnt);
  131. sem_exit_ns(ns);
  132. msg_exit_ns(ns);
  133. shm_exit_ns(ns);
  134. retire_mq_sysctls(ns);
  135. retire_ipc_sysctls(ns);
  136. dec_ipc_namespaces(ns->ucounts);
  137. put_user_ns(ns->user_ns);
  138. ns_free_inum(&ns->ns);
  139. kfree(ns);
  140. }
  141. static LLIST_HEAD(free_ipc_list);
  142. static void free_ipc(struct work_struct *unused)
  143. {
  144. struct llist_node *node = llist_del_all(&free_ipc_list);
  145. struct ipc_namespace *n, *t;
  146. llist_for_each_entry_safe(n, t, node, mnt_llist)
  147. mnt_make_shortterm(n->mq_mnt);
  148. /* Wait for any last users to have gone away. */
  149. synchronize_rcu();
  150. llist_for_each_entry_safe(n, t, node, mnt_llist)
  151. free_ipc_ns(n);
  152. }
  153. /*
  154. * put_ipc_ns - drop a reference to an ipc namespace.
  155. * @ns: the namespace to put
  156. *
  157. * If this is the last task in the namespace exiting, and
  158. * it is dropping the refcount to 0, then it can race with
  159. * a task in another ipc namespace but in a mounts namespace
  160. * which has this ipcns's mqueuefs mounted, doing some action
  161. * with one of the mqueuefs files. That can raise the refcount.
  162. * So dropping the refcount, and raising the refcount when
  163. * accessing it through the VFS, are protected with mq_lock.
  164. *
  165. * (Clearly, a task raising the refcount on its own ipc_ns
  166. * needn't take mq_lock since it can't race with the last task
  167. * in the ipcns exiting).
  168. */
  169. void put_ipc_ns(struct ipc_namespace *ns)
  170. {
  171. if (refcount_dec_and_lock(&ns->ns.count, &mq_lock)) {
  172. mq_clear_sbinfo(ns);
  173. spin_unlock(&mq_lock);
  174. if (llist_add(&ns->mnt_llist, &free_ipc_list))
  175. schedule_work(&free_ipc_work);
  176. }
  177. }
  178. static inline struct ipc_namespace *to_ipc_ns(struct ns_common *ns)
  179. {
  180. return container_of(ns, struct ipc_namespace, ns);
  181. }
  182. static struct ns_common *ipcns_get(struct task_struct *task)
  183. {
  184. struct ipc_namespace *ns = NULL;
  185. struct nsproxy *nsproxy;
  186. task_lock(task);
  187. nsproxy = task->nsproxy;
  188. if (nsproxy)
  189. ns = get_ipc_ns(nsproxy->ipc_ns);
  190. task_unlock(task);
  191. return ns ? &ns->ns : NULL;
  192. }
  193. static void ipcns_put(struct ns_common *ns)
  194. {
  195. return put_ipc_ns(to_ipc_ns(ns));
  196. }
  197. static int ipcns_install(struct nsset *nsset, struct ns_common *new)
  198. {
  199. struct nsproxy *nsproxy = nsset->nsproxy;
  200. struct ipc_namespace *ns = to_ipc_ns(new);
  201. if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) ||
  202. !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN))
  203. return -EPERM;
  204. put_ipc_ns(nsproxy->ipc_ns);
  205. nsproxy->ipc_ns = get_ipc_ns(ns);
  206. return 0;
  207. }
  208. static struct user_namespace *ipcns_owner(struct ns_common *ns)
  209. {
  210. return to_ipc_ns(ns)->user_ns;
  211. }
  212. const struct proc_ns_operations ipcns_operations = {
  213. .name = "ipc",
  214. .type = CLONE_NEWIPC,
  215. .get = ipcns_get,
  216. .put = ipcns_put,
  217. .install = ipcns_install,
  218. .owner = ipcns_owner,
  219. };