blob: 3d91f89fd85276041cfbcb3353ea26a3e10686e3 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004 */
5
6#include <linux/kernel.h>
7#include <linux/ptrace.h>
8#include <linux/seccomp.h>
9#include <kern_util.h>
10#include <sysdep/ptrace.h>
11#include <sysdep/ptrace_user.h>
12#include <sysdep/syscalls.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020013#include <linux/time-internal.h>
14#include <asm/unistd.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015
16void handle_syscall(struct uml_pt_regs *r)
17{
18 struct pt_regs *regs = container_of(r, struct pt_regs, regs);
19 int syscall;
20
David Brazdil0f672f62019-12-10 10:32:29 +000021 /*
22 * If we have infinite CPU resources, then make every syscall also a
23 * preemption point, since we don't have any other preemption in this
24 * case, and kernel threads would basically never run until userspace
25 * went to sleep, even if said userspace interacts with the kernel in
26 * various ways.
27 */
Olivier Deprez157378f2022-04-04 15:47:50 +020028 if (time_travel_mode == TT_MODE_INFCPU ||
29 time_travel_mode == TT_MODE_EXTERNAL)
David Brazdil0f672f62019-12-10 10:32:29 +000030 schedule();
31
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000032 /* Initialize the syscall number and default return value. */
33 UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp);
34 PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS);
35
36 if (syscall_trace_enter(regs))
37 goto out;
38
39 /* Do the seccomp check after ptrace; failures should be fast. */
Olivier Deprez157378f2022-04-04 15:47:50 +020040 if (secure_computing() == -1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000041 goto out;
42
43 syscall = UPT_SYSCALL_NR(r);
44 if (syscall >= 0 && syscall <= __NR_syscall_max)
45 PT_REGS_SET_SYSCALL_RETURN(regs,
46 EXECUTE_SYSCALL(syscall, regs));
47
48out:
49 syscall_trace_leave(regs);
50}