David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Pid namespaces |
| 4 | * |
| 5 | * Authors: |
| 6 | * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. |
| 7 | * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM |
| 8 | * Many thanks to Oleg Nesterov for comments and help |
| 9 | * |
| 10 | */ |
| 11 | |
| 12 | #include <linux/pid.h> |
| 13 | #include <linux/pid_namespace.h> |
| 14 | #include <linux/user_namespace.h> |
| 15 | #include <linux/syscalls.h> |
| 16 | #include <linux/cred.h> |
| 17 | #include <linux/err.h> |
| 18 | #include <linux/acct.h> |
| 19 | #include <linux/slab.h> |
| 20 | #include <linux/proc_ns.h> |
| 21 | #include <linux/reboot.h> |
| 22 | #include <linux/export.h> |
| 23 | #include <linux/sched/task.h> |
| 24 | #include <linux/sched/signal.h> |
| 25 | #include <linux/idr.h> |
| 26 | |
| 27 | static DEFINE_MUTEX(pid_caches_mutex); |
| 28 | static struct kmem_cache *pid_ns_cachep; |
| 29 | /* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */ |
| 30 | #define MAX_PID_NS_LEVEL 32 |
| 31 | /* Write once array, filled from the beginning. */ |
| 32 | static struct kmem_cache *pid_cache[MAX_PID_NS_LEVEL]; |
| 33 | |
| 34 | /* |
| 35 | * creates the kmem cache to allocate pids from. |
| 36 | * @level: pid namespace level |
| 37 | */ |
| 38 | |
| 39 | static struct kmem_cache *create_pid_cachep(unsigned int level) |
| 40 | { |
| 41 | /* Level 0 is init_pid_ns.pid_cachep */ |
| 42 | struct kmem_cache **pkc = &pid_cache[level - 1]; |
| 43 | struct kmem_cache *kc; |
| 44 | char name[4 + 10 + 1]; |
| 45 | unsigned int len; |
| 46 | |
| 47 | kc = READ_ONCE(*pkc); |
| 48 | if (kc) |
| 49 | return kc; |
| 50 | |
| 51 | snprintf(name, sizeof(name), "pid_%u", level + 1); |
| 52 | len = sizeof(struct pid) + level * sizeof(struct upid); |
| 53 | mutex_lock(&pid_caches_mutex); |
| 54 | /* Name collision forces to do allocation under mutex. */ |
| 55 | if (!*pkc) |
| 56 | *pkc = kmem_cache_create(name, len, 0, SLAB_HWCACHE_ALIGN, 0); |
| 57 | mutex_unlock(&pid_caches_mutex); |
| 58 | /* current can fail, but someone else can succeed. */ |
| 59 | return READ_ONCE(*pkc); |
| 60 | } |
| 61 | |
| 62 | static void proc_cleanup_work(struct work_struct *work) |
| 63 | { |
| 64 | struct pid_namespace *ns = container_of(work, struct pid_namespace, proc_work); |
| 65 | pid_ns_release_proc(ns); |
| 66 | } |
| 67 | |
| 68 | static struct ucounts *inc_pid_namespaces(struct user_namespace *ns) |
| 69 | { |
| 70 | return inc_ucount(ns, current_euid(), UCOUNT_PID_NAMESPACES); |
| 71 | } |
| 72 | |
| 73 | static void dec_pid_namespaces(struct ucounts *ucounts) |
| 74 | { |
| 75 | dec_ucount(ucounts, UCOUNT_PID_NAMESPACES); |
| 76 | } |
| 77 | |
| 78 | static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns, |
| 79 | struct pid_namespace *parent_pid_ns) |
| 80 | { |
| 81 | struct pid_namespace *ns; |
| 82 | unsigned int level = parent_pid_ns->level + 1; |
| 83 | struct ucounts *ucounts; |
| 84 | int err; |
| 85 | |
| 86 | err = -EINVAL; |
| 87 | if (!in_userns(parent_pid_ns->user_ns, user_ns)) |
| 88 | goto out; |
| 89 | |
| 90 | err = -ENOSPC; |
| 91 | if (level > MAX_PID_NS_LEVEL) |
| 92 | goto out; |
| 93 | ucounts = inc_pid_namespaces(user_ns); |
| 94 | if (!ucounts) |
| 95 | goto out; |
| 96 | |
| 97 | err = -ENOMEM; |
| 98 | ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL); |
| 99 | if (ns == NULL) |
| 100 | goto out_dec; |
| 101 | |
| 102 | idr_init(&ns->idr); |
| 103 | |
| 104 | ns->pid_cachep = create_pid_cachep(level); |
| 105 | if (ns->pid_cachep == NULL) |
| 106 | goto out_free_idr; |
| 107 | |
| 108 | err = ns_alloc_inum(&ns->ns); |
| 109 | if (err) |
| 110 | goto out_free_idr; |
| 111 | ns->ns.ops = &pidns_operations; |
| 112 | |
| 113 | kref_init(&ns->kref); |
| 114 | ns->level = level; |
| 115 | ns->parent = get_pid_ns(parent_pid_ns); |
| 116 | ns->user_ns = get_user_ns(user_ns); |
| 117 | ns->ucounts = ucounts; |
| 118 | ns->pid_allocated = PIDNS_ADDING; |
| 119 | INIT_WORK(&ns->proc_work, proc_cleanup_work); |
| 120 | |
| 121 | return ns; |
| 122 | |
| 123 | out_free_idr: |
| 124 | idr_destroy(&ns->idr); |
| 125 | kmem_cache_free(pid_ns_cachep, ns); |
| 126 | out_dec: |
| 127 | dec_pid_namespaces(ucounts); |
| 128 | out: |
| 129 | return ERR_PTR(err); |
| 130 | } |
| 131 | |
| 132 | static void delayed_free_pidns(struct rcu_head *p) |
| 133 | { |
| 134 | struct pid_namespace *ns = container_of(p, struct pid_namespace, rcu); |
| 135 | |
| 136 | dec_pid_namespaces(ns->ucounts); |
| 137 | put_user_ns(ns->user_ns); |
| 138 | |
| 139 | kmem_cache_free(pid_ns_cachep, ns); |
| 140 | } |
| 141 | |
| 142 | static void destroy_pid_namespace(struct pid_namespace *ns) |
| 143 | { |
| 144 | ns_free_inum(&ns->ns); |
| 145 | |
| 146 | idr_destroy(&ns->idr); |
| 147 | call_rcu(&ns->rcu, delayed_free_pidns); |
| 148 | } |
| 149 | |
| 150 | struct pid_namespace *copy_pid_ns(unsigned long flags, |
| 151 | struct user_namespace *user_ns, struct pid_namespace *old_ns) |
| 152 | { |
| 153 | if (!(flags & CLONE_NEWPID)) |
| 154 | return get_pid_ns(old_ns); |
| 155 | if (task_active_pid_ns(current) != old_ns) |
| 156 | return ERR_PTR(-EINVAL); |
| 157 | return create_pid_namespace(user_ns, old_ns); |
| 158 | } |
| 159 | |
| 160 | static void free_pid_ns(struct kref *kref) |
| 161 | { |
| 162 | struct pid_namespace *ns; |
| 163 | |
| 164 | ns = container_of(kref, struct pid_namespace, kref); |
| 165 | destroy_pid_namespace(ns); |
| 166 | } |
| 167 | |
| 168 | void put_pid_ns(struct pid_namespace *ns) |
| 169 | { |
| 170 | struct pid_namespace *parent; |
| 171 | |
| 172 | while (ns != &init_pid_ns) { |
| 173 | parent = ns->parent; |
| 174 | if (!kref_put(&ns->kref, free_pid_ns)) |
| 175 | break; |
| 176 | ns = parent; |
| 177 | } |
| 178 | } |
| 179 | EXPORT_SYMBOL_GPL(put_pid_ns); |
| 180 | |
| 181 | void zap_pid_ns_processes(struct pid_namespace *pid_ns) |
| 182 | { |
| 183 | int nr; |
| 184 | int rc; |
| 185 | struct task_struct *task, *me = current; |
| 186 | int init_pids = thread_group_leader(me) ? 1 : 2; |
| 187 | struct pid *pid; |
| 188 | |
| 189 | /* Don't allow any more processes into the pid namespace */ |
| 190 | disable_pid_allocation(pid_ns); |
| 191 | |
| 192 | /* |
| 193 | * Ignore SIGCHLD causing any terminated children to autoreap. |
| 194 | * This speeds up the namespace shutdown, plus see the comment |
| 195 | * below. |
| 196 | */ |
| 197 | spin_lock_irq(&me->sighand->siglock); |
| 198 | me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN; |
| 199 | spin_unlock_irq(&me->sighand->siglock); |
| 200 | |
| 201 | /* |
| 202 | * The last thread in the cgroup-init thread group is terminating. |
| 203 | * Find remaining pid_ts in the namespace, signal and wait for them |
| 204 | * to exit. |
| 205 | * |
| 206 | * Note: This signals each threads in the namespace - even those that |
| 207 | * belong to the same thread group, To avoid this, we would have |
| 208 | * to walk the entire tasklist looking a processes in this |
| 209 | * namespace, but that could be unnecessarily expensive if the |
| 210 | * pid namespace has just a few processes. Or we need to |
| 211 | * maintain a tasklist for each pid namespace. |
| 212 | * |
| 213 | */ |
| 214 | rcu_read_lock(); |
| 215 | read_lock(&tasklist_lock); |
| 216 | nr = 2; |
| 217 | idr_for_each_entry_continue(&pid_ns->idr, pid, nr) { |
| 218 | task = pid_task(pid, PIDTYPE_PID); |
| 219 | if (task && !__fatal_signal_pending(task)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 220 | group_send_sig_info(SIGKILL, SEND_SIG_PRIV, task, PIDTYPE_MAX); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 221 | } |
| 222 | read_unlock(&tasklist_lock); |
| 223 | rcu_read_unlock(); |
| 224 | |
| 225 | /* |
| 226 | * Reap the EXIT_ZOMBIE children we had before we ignored SIGCHLD. |
| 227 | * kernel_wait4() will also block until our children traced from the |
| 228 | * parent namespace are detached and become EXIT_DEAD. |
| 229 | */ |
| 230 | do { |
| 231 | clear_thread_flag(TIF_SIGPENDING); |
| 232 | rc = kernel_wait4(-1, NULL, __WALL, NULL); |
| 233 | } while (rc != -ECHILD); |
| 234 | |
| 235 | /* |
| 236 | * kernel_wait4() above can't reap the EXIT_DEAD children but we do not |
| 237 | * really care, we could reparent them to the global init. We could |
| 238 | * exit and reap ->child_reaper even if it is not the last thread in |
| 239 | * this pid_ns, free_pid(pid_allocated == 0) calls proc_cleanup_work(), |
| 240 | * pid_ns can not go away until proc_kill_sb() drops the reference. |
| 241 | * |
| 242 | * But this ns can also have other tasks injected by setns()+fork(). |
| 243 | * Again, ignoring the user visible semantics we do not really need |
| 244 | * to wait until they are all reaped, but they can be reparented to |
| 245 | * us and thus we need to ensure that pid->child_reaper stays valid |
| 246 | * until they all go away. See free_pid()->wake_up_process(). |
| 247 | * |
| 248 | * We rely on ignored SIGCHLD, an injected zombie must be autoreaped |
| 249 | * if reparented. |
| 250 | */ |
| 251 | for (;;) { |
| 252 | set_current_state(TASK_INTERRUPTIBLE); |
| 253 | if (pid_ns->pid_allocated == init_pids) |
| 254 | break; |
| 255 | schedule(); |
| 256 | } |
| 257 | __set_current_state(TASK_RUNNING); |
| 258 | |
| 259 | if (pid_ns->reboot) |
| 260 | current->signal->group_exit_code = pid_ns->reboot; |
| 261 | |
| 262 | acct_exit_ns(pid_ns); |
| 263 | return; |
| 264 | } |
| 265 | |
| 266 | #ifdef CONFIG_CHECKPOINT_RESTORE |
| 267 | static int pid_ns_ctl_handler(struct ctl_table *table, int write, |
| 268 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 269 | { |
| 270 | struct pid_namespace *pid_ns = task_active_pid_ns(current); |
| 271 | struct ctl_table tmp = *table; |
| 272 | int ret, next; |
| 273 | |
| 274 | if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN)) |
| 275 | return -EPERM; |
| 276 | |
| 277 | /* |
| 278 | * Writing directly to ns' last_pid field is OK, since this field |
| 279 | * is volatile in a living namespace anyway and a code writing to |
| 280 | * it should synchronize its usage with external means. |
| 281 | */ |
| 282 | |
| 283 | next = idr_get_cursor(&pid_ns->idr) - 1; |
| 284 | |
| 285 | tmp.data = &next; |
| 286 | ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); |
| 287 | if (!ret && write) |
| 288 | idr_set_cursor(&pid_ns->idr, next + 1); |
| 289 | |
| 290 | return ret; |
| 291 | } |
| 292 | |
| 293 | extern int pid_max; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 294 | static struct ctl_table pid_ns_ctl_table[] = { |
| 295 | { |
| 296 | .procname = "ns_last_pid", |
| 297 | .maxlen = sizeof(int), |
| 298 | .mode = 0666, /* permissions are checked in the handler */ |
| 299 | .proc_handler = pid_ns_ctl_handler, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 300 | .extra1 = SYSCTL_ZERO, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 301 | .extra2 = &pid_max, |
| 302 | }, |
| 303 | { } |
| 304 | }; |
| 305 | static struct ctl_path kern_path[] = { { .procname = "kernel", }, { } }; |
| 306 | #endif /* CONFIG_CHECKPOINT_RESTORE */ |
| 307 | |
| 308 | int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd) |
| 309 | { |
| 310 | if (pid_ns == &init_pid_ns) |
| 311 | return 0; |
| 312 | |
| 313 | switch (cmd) { |
| 314 | case LINUX_REBOOT_CMD_RESTART2: |
| 315 | case LINUX_REBOOT_CMD_RESTART: |
| 316 | pid_ns->reboot = SIGHUP; |
| 317 | break; |
| 318 | |
| 319 | case LINUX_REBOOT_CMD_POWER_OFF: |
| 320 | case LINUX_REBOOT_CMD_HALT: |
| 321 | pid_ns->reboot = SIGINT; |
| 322 | break; |
| 323 | default: |
| 324 | return -EINVAL; |
| 325 | } |
| 326 | |
| 327 | read_lock(&tasklist_lock); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 328 | send_sig(SIGKILL, pid_ns->child_reaper, 1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 329 | read_unlock(&tasklist_lock); |
| 330 | |
| 331 | do_exit(0); |
| 332 | |
| 333 | /* Not reached */ |
| 334 | return 0; |
| 335 | } |
| 336 | |
| 337 | static inline struct pid_namespace *to_pid_ns(struct ns_common *ns) |
| 338 | { |
| 339 | return container_of(ns, struct pid_namespace, ns); |
| 340 | } |
| 341 | |
| 342 | static struct ns_common *pidns_get(struct task_struct *task) |
| 343 | { |
| 344 | struct pid_namespace *ns; |
| 345 | |
| 346 | rcu_read_lock(); |
| 347 | ns = task_active_pid_ns(task); |
| 348 | if (ns) |
| 349 | get_pid_ns(ns); |
| 350 | rcu_read_unlock(); |
| 351 | |
| 352 | return ns ? &ns->ns : NULL; |
| 353 | } |
| 354 | |
| 355 | static struct ns_common *pidns_for_children_get(struct task_struct *task) |
| 356 | { |
| 357 | struct pid_namespace *ns = NULL; |
| 358 | |
| 359 | task_lock(task); |
| 360 | if (task->nsproxy) { |
| 361 | ns = task->nsproxy->pid_ns_for_children; |
| 362 | get_pid_ns(ns); |
| 363 | } |
| 364 | task_unlock(task); |
| 365 | |
| 366 | if (ns) { |
| 367 | read_lock(&tasklist_lock); |
| 368 | if (!ns->child_reaper) { |
| 369 | put_pid_ns(ns); |
| 370 | ns = NULL; |
| 371 | } |
| 372 | read_unlock(&tasklist_lock); |
| 373 | } |
| 374 | |
| 375 | return ns ? &ns->ns : NULL; |
| 376 | } |
| 377 | |
| 378 | static void pidns_put(struct ns_common *ns) |
| 379 | { |
| 380 | put_pid_ns(to_pid_ns(ns)); |
| 381 | } |
| 382 | |
| 383 | static int pidns_install(struct nsproxy *nsproxy, struct ns_common *ns) |
| 384 | { |
| 385 | struct pid_namespace *active = task_active_pid_ns(current); |
| 386 | struct pid_namespace *ancestor, *new = to_pid_ns(ns); |
| 387 | |
| 388 | if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) || |
| 389 | !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) |
| 390 | return -EPERM; |
| 391 | |
| 392 | /* |
| 393 | * Only allow entering the current active pid namespace |
| 394 | * or a child of the current active pid namespace. |
| 395 | * |
| 396 | * This is required for fork to return a usable pid value and |
| 397 | * this maintains the property that processes and their |
| 398 | * children can not escape their current pid namespace. |
| 399 | */ |
| 400 | if (new->level < active->level) |
| 401 | return -EINVAL; |
| 402 | |
| 403 | ancestor = new; |
| 404 | while (ancestor->level > active->level) |
| 405 | ancestor = ancestor->parent; |
| 406 | if (ancestor != active) |
| 407 | return -EINVAL; |
| 408 | |
| 409 | put_pid_ns(nsproxy->pid_ns_for_children); |
| 410 | nsproxy->pid_ns_for_children = get_pid_ns(new); |
| 411 | return 0; |
| 412 | } |
| 413 | |
| 414 | static struct ns_common *pidns_get_parent(struct ns_common *ns) |
| 415 | { |
| 416 | struct pid_namespace *active = task_active_pid_ns(current); |
| 417 | struct pid_namespace *pid_ns, *p; |
| 418 | |
| 419 | /* See if the parent is in the current namespace */ |
| 420 | pid_ns = p = to_pid_ns(ns)->parent; |
| 421 | for (;;) { |
| 422 | if (!p) |
| 423 | return ERR_PTR(-EPERM); |
| 424 | if (p == active) |
| 425 | break; |
| 426 | p = p->parent; |
| 427 | } |
| 428 | |
| 429 | return &get_pid_ns(pid_ns)->ns; |
| 430 | } |
| 431 | |
| 432 | static struct user_namespace *pidns_owner(struct ns_common *ns) |
| 433 | { |
| 434 | return to_pid_ns(ns)->user_ns; |
| 435 | } |
| 436 | |
| 437 | const struct proc_ns_operations pidns_operations = { |
| 438 | .name = "pid", |
| 439 | .type = CLONE_NEWPID, |
| 440 | .get = pidns_get, |
| 441 | .put = pidns_put, |
| 442 | .install = pidns_install, |
| 443 | .owner = pidns_owner, |
| 444 | .get_parent = pidns_get_parent, |
| 445 | }; |
| 446 | |
| 447 | const struct proc_ns_operations pidns_for_children_operations = { |
| 448 | .name = "pid_for_children", |
| 449 | .real_ns_name = "pid", |
| 450 | .type = CLONE_NEWPID, |
| 451 | .get = pidns_for_children_get, |
| 452 | .put = pidns_put, |
| 453 | .install = pidns_install, |
| 454 | .owner = pidns_owner, |
| 455 | .get_parent = pidns_get_parent, |
| 456 | }; |
| 457 | |
| 458 | static __init int pid_namespaces_init(void) |
| 459 | { |
| 460 | pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC); |
| 461 | |
| 462 | #ifdef CONFIG_CHECKPOINT_RESTORE |
| 463 | register_sysctl_paths(kern_path, pid_ns_ctl_table); |
| 464 | #endif |
| 465 | return 0; |
| 466 | } |
| 467 | |
| 468 | __initcall(pid_namespaces_init); |