David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Pid namespaces |
| 4 | * |
| 5 | * Authors: |
| 6 | * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. |
| 7 | * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM |
| 8 | * Many thanks to Oleg Nesterov for comments and help |
| 9 | * |
| 10 | */ |
| 11 | |
| 12 | #include <linux/pid.h> |
| 13 | #include <linux/pid_namespace.h> |
| 14 | #include <linux/user_namespace.h> |
| 15 | #include <linux/syscalls.h> |
| 16 | #include <linux/cred.h> |
| 17 | #include <linux/err.h> |
| 18 | #include <linux/acct.h> |
| 19 | #include <linux/slab.h> |
| 20 | #include <linux/proc_ns.h> |
| 21 | #include <linux/reboot.h> |
| 22 | #include <linux/export.h> |
| 23 | #include <linux/sched/task.h> |
| 24 | #include <linux/sched/signal.h> |
| 25 | #include <linux/idr.h> |
| 26 | |
| 27 | static DEFINE_MUTEX(pid_caches_mutex); |
| 28 | static struct kmem_cache *pid_ns_cachep; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 29 | /* Write once array, filled from the beginning. */ |
| 30 | static struct kmem_cache *pid_cache[MAX_PID_NS_LEVEL]; |
| 31 | |
| 32 | /* |
| 33 | * creates the kmem cache to allocate pids from. |
| 34 | * @level: pid namespace level |
| 35 | */ |
| 36 | |
| 37 | static struct kmem_cache *create_pid_cachep(unsigned int level) |
| 38 | { |
| 39 | /* Level 0 is init_pid_ns.pid_cachep */ |
| 40 | struct kmem_cache **pkc = &pid_cache[level - 1]; |
| 41 | struct kmem_cache *kc; |
| 42 | char name[4 + 10 + 1]; |
| 43 | unsigned int len; |
| 44 | |
| 45 | kc = READ_ONCE(*pkc); |
| 46 | if (kc) |
| 47 | return kc; |
| 48 | |
| 49 | snprintf(name, sizeof(name), "pid_%u", level + 1); |
| 50 | len = sizeof(struct pid) + level * sizeof(struct upid); |
| 51 | mutex_lock(&pid_caches_mutex); |
| 52 | /* Name collision forces to do allocation under mutex. */ |
| 53 | if (!*pkc) |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 54 | *pkc = kmem_cache_create(name, len, 0, |
| 55 | SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, 0); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 56 | mutex_unlock(&pid_caches_mutex); |
| 57 | /* current can fail, but someone else can succeed. */ |
| 58 | return READ_ONCE(*pkc); |
| 59 | } |
| 60 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 61 | static struct ucounts *inc_pid_namespaces(struct user_namespace *ns) |
| 62 | { |
| 63 | return inc_ucount(ns, current_euid(), UCOUNT_PID_NAMESPACES); |
| 64 | } |
| 65 | |
| 66 | static void dec_pid_namespaces(struct ucounts *ucounts) |
| 67 | { |
| 68 | dec_ucount(ucounts, UCOUNT_PID_NAMESPACES); |
| 69 | } |
| 70 | |
| 71 | static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns, |
| 72 | struct pid_namespace *parent_pid_ns) |
| 73 | { |
| 74 | struct pid_namespace *ns; |
| 75 | unsigned int level = parent_pid_ns->level + 1; |
| 76 | struct ucounts *ucounts; |
| 77 | int err; |
| 78 | |
| 79 | err = -EINVAL; |
| 80 | if (!in_userns(parent_pid_ns->user_ns, user_ns)) |
| 81 | goto out; |
| 82 | |
| 83 | err = -ENOSPC; |
| 84 | if (level > MAX_PID_NS_LEVEL) |
| 85 | goto out; |
| 86 | ucounts = inc_pid_namespaces(user_ns); |
| 87 | if (!ucounts) |
| 88 | goto out; |
| 89 | |
| 90 | err = -ENOMEM; |
| 91 | ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL); |
| 92 | if (ns == NULL) |
| 93 | goto out_dec; |
| 94 | |
| 95 | idr_init(&ns->idr); |
| 96 | |
| 97 | ns->pid_cachep = create_pid_cachep(level); |
| 98 | if (ns->pid_cachep == NULL) |
| 99 | goto out_free_idr; |
| 100 | |
| 101 | err = ns_alloc_inum(&ns->ns); |
| 102 | if (err) |
| 103 | goto out_free_idr; |
| 104 | ns->ns.ops = &pidns_operations; |
| 105 | |
| 106 | kref_init(&ns->kref); |
| 107 | ns->level = level; |
| 108 | ns->parent = get_pid_ns(parent_pid_ns); |
| 109 | ns->user_ns = get_user_ns(user_ns); |
| 110 | ns->ucounts = ucounts; |
| 111 | ns->pid_allocated = PIDNS_ADDING; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 112 | |
| 113 | return ns; |
| 114 | |
| 115 | out_free_idr: |
| 116 | idr_destroy(&ns->idr); |
| 117 | kmem_cache_free(pid_ns_cachep, ns); |
| 118 | out_dec: |
| 119 | dec_pid_namespaces(ucounts); |
| 120 | out: |
| 121 | return ERR_PTR(err); |
| 122 | } |
| 123 | |
| 124 | static void delayed_free_pidns(struct rcu_head *p) |
| 125 | { |
| 126 | struct pid_namespace *ns = container_of(p, struct pid_namespace, rcu); |
| 127 | |
| 128 | dec_pid_namespaces(ns->ucounts); |
| 129 | put_user_ns(ns->user_ns); |
| 130 | |
| 131 | kmem_cache_free(pid_ns_cachep, ns); |
| 132 | } |
| 133 | |
| 134 | static void destroy_pid_namespace(struct pid_namespace *ns) |
| 135 | { |
| 136 | ns_free_inum(&ns->ns); |
| 137 | |
| 138 | idr_destroy(&ns->idr); |
| 139 | call_rcu(&ns->rcu, delayed_free_pidns); |
| 140 | } |
| 141 | |
| 142 | struct pid_namespace *copy_pid_ns(unsigned long flags, |
| 143 | struct user_namespace *user_ns, struct pid_namespace *old_ns) |
| 144 | { |
| 145 | if (!(flags & CLONE_NEWPID)) |
| 146 | return get_pid_ns(old_ns); |
| 147 | if (task_active_pid_ns(current) != old_ns) |
| 148 | return ERR_PTR(-EINVAL); |
| 149 | return create_pid_namespace(user_ns, old_ns); |
| 150 | } |
| 151 | |
| 152 | static void free_pid_ns(struct kref *kref) |
| 153 | { |
| 154 | struct pid_namespace *ns; |
| 155 | |
| 156 | ns = container_of(kref, struct pid_namespace, kref); |
| 157 | destroy_pid_namespace(ns); |
| 158 | } |
| 159 | |
| 160 | void put_pid_ns(struct pid_namespace *ns) |
| 161 | { |
| 162 | struct pid_namespace *parent; |
| 163 | |
| 164 | while (ns != &init_pid_ns) { |
| 165 | parent = ns->parent; |
| 166 | if (!kref_put(&ns->kref, free_pid_ns)) |
| 167 | break; |
| 168 | ns = parent; |
| 169 | } |
| 170 | } |
| 171 | EXPORT_SYMBOL_GPL(put_pid_ns); |
| 172 | |
| 173 | void zap_pid_ns_processes(struct pid_namespace *pid_ns) |
| 174 | { |
| 175 | int nr; |
| 176 | int rc; |
| 177 | struct task_struct *task, *me = current; |
| 178 | int init_pids = thread_group_leader(me) ? 1 : 2; |
| 179 | struct pid *pid; |
| 180 | |
| 181 | /* Don't allow any more processes into the pid namespace */ |
| 182 | disable_pid_allocation(pid_ns); |
| 183 | |
| 184 | /* |
| 185 | * Ignore SIGCHLD causing any terminated children to autoreap. |
| 186 | * This speeds up the namespace shutdown, plus see the comment |
| 187 | * below. |
| 188 | */ |
| 189 | spin_lock_irq(&me->sighand->siglock); |
| 190 | me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN; |
| 191 | spin_unlock_irq(&me->sighand->siglock); |
| 192 | |
| 193 | /* |
| 194 | * The last thread in the cgroup-init thread group is terminating. |
| 195 | * Find remaining pid_ts in the namespace, signal and wait for them |
| 196 | * to exit. |
| 197 | * |
| 198 | * Note: This signals each threads in the namespace - even those that |
| 199 | * belong to the same thread group, To avoid this, we would have |
| 200 | * to walk the entire tasklist looking a processes in this |
| 201 | * namespace, but that could be unnecessarily expensive if the |
| 202 | * pid namespace has just a few processes. Or we need to |
| 203 | * maintain a tasklist for each pid namespace. |
| 204 | * |
| 205 | */ |
| 206 | rcu_read_lock(); |
| 207 | read_lock(&tasklist_lock); |
| 208 | nr = 2; |
| 209 | idr_for_each_entry_continue(&pid_ns->idr, pid, nr) { |
| 210 | task = pid_task(pid, PIDTYPE_PID); |
| 211 | if (task && !__fatal_signal_pending(task)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 212 | group_send_sig_info(SIGKILL, SEND_SIG_PRIV, task, PIDTYPE_MAX); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 213 | } |
| 214 | read_unlock(&tasklist_lock); |
| 215 | rcu_read_unlock(); |
| 216 | |
| 217 | /* |
| 218 | * Reap the EXIT_ZOMBIE children we had before we ignored SIGCHLD. |
| 219 | * kernel_wait4() will also block until our children traced from the |
| 220 | * parent namespace are detached and become EXIT_DEAD. |
| 221 | */ |
| 222 | do { |
| 223 | clear_thread_flag(TIF_SIGPENDING); |
| 224 | rc = kernel_wait4(-1, NULL, __WALL, NULL); |
| 225 | } while (rc != -ECHILD); |
| 226 | |
| 227 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 228 | * kernel_wait4() misses EXIT_DEAD children, and EXIT_ZOMBIE |
| 229 | * process whose parents processes are outside of the pid |
| 230 | * namespace. Such processes are created with setns()+fork(). |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 231 | * |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 232 | * If those EXIT_ZOMBIE processes are not reaped by their |
| 233 | * parents before their parents exit, they will be reparented |
| 234 | * to pid_ns->child_reaper. Thus pidns->child_reaper needs to |
| 235 | * stay valid until they all go away. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 236 | * |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 237 | * The code relies on the pid_ns->child_reaper ignoring |
| 238 | * SIGCHILD to cause those EXIT_ZOMBIE processes to be |
| 239 | * autoreaped if reparented. |
| 240 | * |
| 241 | * Semantically it is also desirable to wait for EXIT_ZOMBIE |
| 242 | * processes before allowing the child_reaper to be reaped, as |
| 243 | * that gives the invariant that when the init process of a |
| 244 | * pid namespace is reaped all of the processes in the pid |
| 245 | * namespace are gone. |
| 246 | * |
| 247 | * Once all of the other tasks are gone from the pid_namespace |
| 248 | * free_pid() will awaken this task. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 249 | */ |
| 250 | for (;;) { |
| 251 | set_current_state(TASK_INTERRUPTIBLE); |
| 252 | if (pid_ns->pid_allocated == init_pids) |
| 253 | break; |
| 254 | schedule(); |
| 255 | } |
| 256 | __set_current_state(TASK_RUNNING); |
| 257 | |
| 258 | if (pid_ns->reboot) |
| 259 | current->signal->group_exit_code = pid_ns->reboot; |
| 260 | |
| 261 | acct_exit_ns(pid_ns); |
| 262 | return; |
| 263 | } |
| 264 | |
| 265 | #ifdef CONFIG_CHECKPOINT_RESTORE |
| 266 | static int pid_ns_ctl_handler(struct ctl_table *table, int write, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 267 | void *buffer, size_t *lenp, loff_t *ppos) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 268 | { |
| 269 | struct pid_namespace *pid_ns = task_active_pid_ns(current); |
| 270 | struct ctl_table tmp = *table; |
| 271 | int ret, next; |
| 272 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 273 | if (write && !checkpoint_restore_ns_capable(pid_ns->user_ns)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 274 | return -EPERM; |
| 275 | |
| 276 | /* |
| 277 | * Writing directly to ns' last_pid field is OK, since this field |
| 278 | * is volatile in a living namespace anyway and a code writing to |
| 279 | * it should synchronize its usage with external means. |
| 280 | */ |
| 281 | |
| 282 | next = idr_get_cursor(&pid_ns->idr) - 1; |
| 283 | |
| 284 | tmp.data = &next; |
| 285 | ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); |
| 286 | if (!ret && write) |
| 287 | idr_set_cursor(&pid_ns->idr, next + 1); |
| 288 | |
| 289 | return ret; |
| 290 | } |
| 291 | |
| 292 | extern int pid_max; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 293 | static struct ctl_table pid_ns_ctl_table[] = { |
| 294 | { |
| 295 | .procname = "ns_last_pid", |
| 296 | .maxlen = sizeof(int), |
| 297 | .mode = 0666, /* permissions are checked in the handler */ |
| 298 | .proc_handler = pid_ns_ctl_handler, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 299 | .extra1 = SYSCTL_ZERO, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 300 | .extra2 = &pid_max, |
| 301 | }, |
| 302 | { } |
| 303 | }; |
| 304 | static struct ctl_path kern_path[] = { { .procname = "kernel", }, { } }; |
| 305 | #endif /* CONFIG_CHECKPOINT_RESTORE */ |
| 306 | |
| 307 | int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd) |
| 308 | { |
| 309 | if (pid_ns == &init_pid_ns) |
| 310 | return 0; |
| 311 | |
| 312 | switch (cmd) { |
| 313 | case LINUX_REBOOT_CMD_RESTART2: |
| 314 | case LINUX_REBOOT_CMD_RESTART: |
| 315 | pid_ns->reboot = SIGHUP; |
| 316 | break; |
| 317 | |
| 318 | case LINUX_REBOOT_CMD_POWER_OFF: |
| 319 | case LINUX_REBOOT_CMD_HALT: |
| 320 | pid_ns->reboot = SIGINT; |
| 321 | break; |
| 322 | default: |
| 323 | return -EINVAL; |
| 324 | } |
| 325 | |
| 326 | read_lock(&tasklist_lock); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 327 | send_sig(SIGKILL, pid_ns->child_reaper, 1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 328 | read_unlock(&tasklist_lock); |
| 329 | |
| 330 | do_exit(0); |
| 331 | |
| 332 | /* Not reached */ |
| 333 | return 0; |
| 334 | } |
| 335 | |
| 336 | static inline struct pid_namespace *to_pid_ns(struct ns_common *ns) |
| 337 | { |
| 338 | return container_of(ns, struct pid_namespace, ns); |
| 339 | } |
| 340 | |
| 341 | static struct ns_common *pidns_get(struct task_struct *task) |
| 342 | { |
| 343 | struct pid_namespace *ns; |
| 344 | |
| 345 | rcu_read_lock(); |
| 346 | ns = task_active_pid_ns(task); |
| 347 | if (ns) |
| 348 | get_pid_ns(ns); |
| 349 | rcu_read_unlock(); |
| 350 | |
| 351 | return ns ? &ns->ns : NULL; |
| 352 | } |
| 353 | |
| 354 | static struct ns_common *pidns_for_children_get(struct task_struct *task) |
| 355 | { |
| 356 | struct pid_namespace *ns = NULL; |
| 357 | |
| 358 | task_lock(task); |
| 359 | if (task->nsproxy) { |
| 360 | ns = task->nsproxy->pid_ns_for_children; |
| 361 | get_pid_ns(ns); |
| 362 | } |
| 363 | task_unlock(task); |
| 364 | |
| 365 | if (ns) { |
| 366 | read_lock(&tasklist_lock); |
| 367 | if (!ns->child_reaper) { |
| 368 | put_pid_ns(ns); |
| 369 | ns = NULL; |
| 370 | } |
| 371 | read_unlock(&tasklist_lock); |
| 372 | } |
| 373 | |
| 374 | return ns ? &ns->ns : NULL; |
| 375 | } |
| 376 | |
| 377 | static void pidns_put(struct ns_common *ns) |
| 378 | { |
| 379 | put_pid_ns(to_pid_ns(ns)); |
| 380 | } |
| 381 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 382 | static int pidns_install(struct nsset *nsset, struct ns_common *ns) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 383 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 384 | struct nsproxy *nsproxy = nsset->nsproxy; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 385 | struct pid_namespace *active = task_active_pid_ns(current); |
| 386 | struct pid_namespace *ancestor, *new = to_pid_ns(ns); |
| 387 | |
| 388 | if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) || |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 389 | !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 390 | return -EPERM; |
| 391 | |
| 392 | /* |
| 393 | * Only allow entering the current active pid namespace |
| 394 | * or a child of the current active pid namespace. |
| 395 | * |
| 396 | * This is required for fork to return a usable pid value and |
| 397 | * this maintains the property that processes and their |
| 398 | * children can not escape their current pid namespace. |
| 399 | */ |
| 400 | if (new->level < active->level) |
| 401 | return -EINVAL; |
| 402 | |
| 403 | ancestor = new; |
| 404 | while (ancestor->level > active->level) |
| 405 | ancestor = ancestor->parent; |
| 406 | if (ancestor != active) |
| 407 | return -EINVAL; |
| 408 | |
| 409 | put_pid_ns(nsproxy->pid_ns_for_children); |
| 410 | nsproxy->pid_ns_for_children = get_pid_ns(new); |
| 411 | return 0; |
| 412 | } |
| 413 | |
| 414 | static struct ns_common *pidns_get_parent(struct ns_common *ns) |
| 415 | { |
| 416 | struct pid_namespace *active = task_active_pid_ns(current); |
| 417 | struct pid_namespace *pid_ns, *p; |
| 418 | |
| 419 | /* See if the parent is in the current namespace */ |
| 420 | pid_ns = p = to_pid_ns(ns)->parent; |
| 421 | for (;;) { |
| 422 | if (!p) |
| 423 | return ERR_PTR(-EPERM); |
| 424 | if (p == active) |
| 425 | break; |
| 426 | p = p->parent; |
| 427 | } |
| 428 | |
| 429 | return &get_pid_ns(pid_ns)->ns; |
| 430 | } |
| 431 | |
| 432 | static struct user_namespace *pidns_owner(struct ns_common *ns) |
| 433 | { |
| 434 | return to_pid_ns(ns)->user_ns; |
| 435 | } |
| 436 | |
| 437 | const struct proc_ns_operations pidns_operations = { |
| 438 | .name = "pid", |
| 439 | .type = CLONE_NEWPID, |
| 440 | .get = pidns_get, |
| 441 | .put = pidns_put, |
| 442 | .install = pidns_install, |
| 443 | .owner = pidns_owner, |
| 444 | .get_parent = pidns_get_parent, |
| 445 | }; |
| 446 | |
| 447 | const struct proc_ns_operations pidns_for_children_operations = { |
| 448 | .name = "pid_for_children", |
| 449 | .real_ns_name = "pid", |
| 450 | .type = CLONE_NEWPID, |
| 451 | .get = pidns_for_children_get, |
| 452 | .put = pidns_put, |
| 453 | .install = pidns_install, |
| 454 | .owner = pidns_owner, |
| 455 | .get_parent = pidns_get_parent, |
| 456 | }; |
| 457 | |
| 458 | static __init int pid_namespaces_init(void) |
| 459 | { |
| 460 | pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC); |
| 461 | |
| 462 | #ifdef CONFIG_CHECKPOINT_RESTORE |
| 463 | register_sysctl_paths(kern_path, pid_ns_ctl_table); |
| 464 | #endif |
| 465 | return 0; |
| 466 | } |
| 467 | |
| 468 | __initcall(pid_namespaces_init); |