blob: a0854f283efda39577292e0a1bfce550ba78825d [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6 *
7 * X86-64 port
8 * Andi Kleen.
9 *
10 * CPU hotplug support - ashok.raj@intel.com
11 */
12
13/*
14 * This file handles the architecture-dependent parts of process handling..
15 */
16
17#include <linux/cpu.h>
18#include <linux/errno.h>
19#include <linux/sched.h>
20#include <linux/sched/task.h>
21#include <linux/sched/task_stack.h>
22#include <linux/fs.h>
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/elfcore.h>
26#include <linux/smp.h>
27#include <linux/slab.h>
28#include <linux/user.h>
29#include <linux/interrupt.h>
30#include <linux/delay.h>
31#include <linux/export.h>
32#include <linux/ptrace.h>
33#include <linux/notifier.h>
34#include <linux/kprobes.h>
35#include <linux/kdebug.h>
36#include <linux/prctl.h>
37#include <linux/uaccess.h>
38#include <linux/io.h>
39#include <linux/ftrace.h>
40#include <linux/syscalls.h>
41
42#include <asm/pgtable.h>
43#include <asm/processor.h>
44#include <asm/fpu/internal.h>
45#include <asm/mmu_context.h>
46#include <asm/prctl.h>
47#include <asm/desc.h>
48#include <asm/proto.h>
49#include <asm/ia32.h>
50#include <asm/syscalls.h>
51#include <asm/debugreg.h>
52#include <asm/switch_to.h>
53#include <asm/xen/hypervisor.h>
54#include <asm/vdso.h>
55#include <asm/intel_rdt_sched.h>
56#include <asm/unistd.h>
57#ifdef CONFIG_IA32_EMULATION
58/* Not included via unistd.h */
59#include <asm/unistd_32_ia32.h>
60#endif
61
62#include "process.h"
63
64__visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
65
66/* Prints also some state that isn't saved in the pt_regs */
67void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
68{
69 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
70 unsigned long d0, d1, d2, d3, d6, d7;
71 unsigned int fsindex, gsindex;
72 unsigned int ds, cs, es;
73
74 show_iret_regs(regs);
75
76 if (regs->orig_ax != -1)
77 pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
78 else
79 pr_cont("\n");
80
81 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
82 regs->ax, regs->bx, regs->cx);
83 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
84 regs->dx, regs->si, regs->di);
85 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
86 regs->bp, regs->r8, regs->r9);
87 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
88 regs->r10, regs->r11, regs->r12);
89 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
90 regs->r13, regs->r14, regs->r15);
91
92 if (mode == SHOW_REGS_SHORT)
93 return;
94
95 if (mode == SHOW_REGS_USER) {
96 rdmsrl(MSR_FS_BASE, fs);
97 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
98 printk(KERN_DEFAULT "FS: %016lx GS: %016lx\n",
99 fs, shadowgs);
100 return;
101 }
102
103 asm("movl %%ds,%0" : "=r" (ds));
104 asm("movl %%cs,%0" : "=r" (cs));
105 asm("movl %%es,%0" : "=r" (es));
106 asm("movl %%fs,%0" : "=r" (fsindex));
107 asm("movl %%gs,%0" : "=r" (gsindex));
108
109 rdmsrl(MSR_FS_BASE, fs);
110 rdmsrl(MSR_GS_BASE, gs);
111 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
112
113 cr0 = read_cr0();
114 cr2 = read_cr2();
115 cr3 = __read_cr3();
116 cr4 = __read_cr4();
117
118 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
119 fs, fsindex, gs, gsindex, shadowgs);
120 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
121 es, cr0);
122 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
123 cr4);
124
125 get_debugreg(d0, 0);
126 get_debugreg(d1, 1);
127 get_debugreg(d2, 2);
128 get_debugreg(d3, 3);
129 get_debugreg(d6, 6);
130 get_debugreg(d7, 7);
131
132 /* Only print out debug registers if they are in their non-default state. */
133 if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
134 (d6 == DR6_RESERVED) && (d7 == 0x400))) {
135 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
136 d0, d1, d2);
137 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
138 d3, d6, d7);
139 }
140
141 if (boot_cpu_has(X86_FEATURE_OSPKE))
142 printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
143}
144
145void release_thread(struct task_struct *dead_task)
146{
147 if (dead_task->mm) {
148#ifdef CONFIG_MODIFY_LDT_SYSCALL
149 if (dead_task->mm->context.ldt) {
150 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
151 dead_task->comm,
152 dead_task->mm->context.ldt->entries,
153 dead_task->mm->context.ldt->nr_entries);
154 BUG();
155 }
156#endif
157 }
158}
159
160enum which_selector {
161 FS,
162 GS
163};
164
165/*
166 * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
167 * not available. The goal is to be reasonably fast on non-FSGSBASE systems.
168 * It's forcibly inlined because it'll generate better code and this function
169 * is hot.
170 */
171static __always_inline void save_base_legacy(struct task_struct *prev_p,
172 unsigned short selector,
173 enum which_selector which)
174{
175 if (likely(selector == 0)) {
176 /*
177 * On Intel (without X86_BUG_NULL_SEG), the segment base could
178 * be the pre-existing saved base or it could be zero. On AMD
179 * (with X86_BUG_NULL_SEG), the segment base could be almost
180 * anything.
181 *
182 * This branch is very hot (it's hit twice on almost every
183 * context switch between 64-bit programs), and avoiding
184 * the RDMSR helps a lot, so we just assume that whatever
185 * value is already saved is correct. This matches historical
186 * Linux behavior, so it won't break existing applications.
187 *
188 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
189 * report that the base is zero, it needs to actually be zero:
190 * see the corresponding logic in load_seg_legacy.
191 */
192 } else {
193 /*
194 * If the selector is 1, 2, or 3, then the base is zero on
195 * !X86_BUG_NULL_SEG CPUs and could be anything on
196 * X86_BUG_NULL_SEG CPUs. In the latter case, Linux
197 * has never attempted to preserve the base across context
198 * switches.
199 *
200 * If selector > 3, then it refers to a real segment, and
201 * saving the base isn't necessary.
202 */
203 if (which == FS)
204 prev_p->thread.fsbase = 0;
205 else
206 prev_p->thread.gsbase = 0;
207 }
208}
209
210static __always_inline void save_fsgs(struct task_struct *task)
211{
212 savesegment(fs, task->thread.fsindex);
213 savesegment(gs, task->thread.gsindex);
214 save_base_legacy(task, task->thread.fsindex, FS);
215 save_base_legacy(task, task->thread.gsindex, GS);
216}
217
218#if IS_ENABLED(CONFIG_KVM)
219/*
220 * While a process is running,current->thread.fsbase and current->thread.gsbase
221 * may not match the corresponding CPU registers (see save_base_legacy()). KVM
222 * wants an efficient way to save and restore FSBASE and GSBASE.
223 * When FSGSBASE extensions are enabled, this will have to use RD{FS,GS}BASE.
224 */
225void save_fsgs_for_kvm(void)
226{
227 save_fsgs(current);
228}
229EXPORT_SYMBOL_GPL(save_fsgs_for_kvm);
230#endif
231
232static __always_inline void loadseg(enum which_selector which,
233 unsigned short sel)
234{
235 if (which == FS)
236 loadsegment(fs, sel);
237 else
238 load_gs_index(sel);
239}
240
241static __always_inline void load_seg_legacy(unsigned short prev_index,
242 unsigned long prev_base,
243 unsigned short next_index,
244 unsigned long next_base,
245 enum which_selector which)
246{
247 if (likely(next_index <= 3)) {
248 /*
249 * The next task is using 64-bit TLS, is not using this
250 * segment at all, or is having fun with arcane CPU features.
251 */
252 if (next_base == 0) {
253 /*
254 * Nasty case: on AMD CPUs, we need to forcibly zero
255 * the base.
256 */
257 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
258 loadseg(which, __USER_DS);
259 loadseg(which, next_index);
260 } else {
261 /*
262 * We could try to exhaustively detect cases
263 * under which we can skip the segment load,
264 * but there's really only one case that matters
265 * for performance: if both the previous and
266 * next states are fully zeroed, we can skip
267 * the load.
268 *
269 * (This assumes that prev_base == 0 has no
270 * false positives. This is the case on
271 * Intel-style CPUs.)
272 */
273 if (likely(prev_index | next_index | prev_base))
274 loadseg(which, next_index);
275 }
276 } else {
277 if (prev_index != next_index)
278 loadseg(which, next_index);
279 wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
280 next_base);
281 }
282 } else {
283 /*
284 * The next task is using a real segment. Loading the selector
285 * is sufficient.
286 */
287 loadseg(which, next_index);
288 }
289}
290
291int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
292 unsigned long arg, struct task_struct *p, unsigned long tls)
293{
294 int err;
295 struct pt_regs *childregs;
296 struct fork_frame *fork_frame;
297 struct inactive_task_frame *frame;
298 struct task_struct *me = current;
299
300 childregs = task_pt_regs(p);
301 fork_frame = container_of(childregs, struct fork_frame, regs);
302 frame = &fork_frame->frame;
303 frame->bp = 0;
304 frame->ret_addr = (unsigned long) ret_from_fork;
305 p->thread.sp = (unsigned long) fork_frame;
306 p->thread.io_bitmap_ptr = NULL;
307
308 savesegment(gs, p->thread.gsindex);
309 p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
310 savesegment(fs, p->thread.fsindex);
311 p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
312 savesegment(es, p->thread.es);
313 savesegment(ds, p->thread.ds);
314 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
315
316 if (unlikely(p->flags & PF_KTHREAD)) {
317 /* kernel thread */
318 memset(childregs, 0, sizeof(struct pt_regs));
319 frame->bx = sp; /* function */
320 frame->r12 = arg;
321 return 0;
322 }
323 frame->bx = 0;
324 *childregs = *current_pt_regs();
325
326 childregs->ax = 0;
327 if (sp)
328 childregs->sp = sp;
329
330 err = -ENOMEM;
331 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
332 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
333 IO_BITMAP_BYTES, GFP_KERNEL);
334 if (!p->thread.io_bitmap_ptr) {
335 p->thread.io_bitmap_max = 0;
336 return -ENOMEM;
337 }
338 set_tsk_thread_flag(p, TIF_IO_BITMAP);
339 }
340
341 /*
342 * Set a new TLS for the child thread?
343 */
344 if (clone_flags & CLONE_SETTLS) {
345#ifdef CONFIG_IA32_EMULATION
346 if (in_ia32_syscall())
347 err = do_set_thread_area(p, -1,
348 (struct user_desc __user *)tls, 0);
349 else
350#endif
351 err = do_arch_prctl_64(p, ARCH_SET_FS, tls);
352 if (err)
353 goto out;
354 }
355 err = 0;
356out:
357 if (err && p->thread.io_bitmap_ptr) {
358 kfree(p->thread.io_bitmap_ptr);
359 p->thread.io_bitmap_max = 0;
360 }
361
362 return err;
363}
364
365static void
366start_thread_common(struct pt_regs *regs, unsigned long new_ip,
367 unsigned long new_sp,
368 unsigned int _cs, unsigned int _ss, unsigned int _ds)
369{
370 WARN_ON_ONCE(regs != current_pt_regs());
371
372 if (static_cpu_has(X86_BUG_NULL_SEG)) {
373 /* Loading zero below won't clear the base. */
374 loadsegment(fs, __USER_DS);
375 load_gs_index(__USER_DS);
376 }
377
378 loadsegment(fs, 0);
379 loadsegment(es, _ds);
380 loadsegment(ds, _ds);
381 load_gs_index(0);
382
383 regs->ip = new_ip;
384 regs->sp = new_sp;
385 regs->cs = _cs;
386 regs->ss = _ss;
387 regs->flags = X86_EFLAGS_IF;
388 force_iret();
389}
390
391void
392start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
393{
394 start_thread_common(regs, new_ip, new_sp,
395 __USER_CS, __USER_DS, 0);
396}
397EXPORT_SYMBOL_GPL(start_thread);
398
399#ifdef CONFIG_COMPAT
400void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
401{
402 start_thread_common(regs, new_ip, new_sp,
403 test_thread_flag(TIF_X32)
404 ? __USER_CS : __USER32_CS,
405 __USER_DS, __USER_DS);
406}
407#endif
408
409/*
410 * switch_to(x,y) should switch tasks from x to y.
411 *
412 * This could still be optimized:
413 * - fold all the options into a flag word and test it with a single test.
414 * - could test fs/gs bitsliced
415 *
416 * Kprobes not supported here. Set the probe on schedule instead.
417 * Function graph tracer not supported too.
418 */
419__visible __notrace_funcgraph struct task_struct *
420__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
421{
422 struct thread_struct *prev = &prev_p->thread;
423 struct thread_struct *next = &next_p->thread;
424 struct fpu *prev_fpu = &prev->fpu;
425 struct fpu *next_fpu = &next->fpu;
426 int cpu = smp_processor_id();
427
428 WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
429 this_cpu_read(irq_count) != -1);
430
431 switch_fpu_prepare(prev_fpu, cpu);
432
433 /* We must save %fs and %gs before load_TLS() because
434 * %fs and %gs may be cleared by load_TLS().
435 *
436 * (e.g. xen_load_tls())
437 */
438 save_fsgs(prev_p);
439
440 /*
441 * Load TLS before restoring any segments so that segment loads
442 * reference the correct GDT entries.
443 */
444 load_TLS(next, cpu);
445
446 /*
447 * Leave lazy mode, flushing any hypercalls made here. This
448 * must be done after loading TLS entries in the GDT but before
449 * loading segments that might reference them, and and it must
450 * be done before fpu__restore(), so the TS bit is up to
451 * date.
452 */
453 arch_end_context_switch(next_p);
454
455 /* Switch DS and ES.
456 *
457 * Reading them only returns the selectors, but writing them (if
458 * nonzero) loads the full descriptor from the GDT or LDT. The
459 * LDT for next is loaded in switch_mm, and the GDT is loaded
460 * above.
461 *
462 * We therefore need to write new values to the segment
463 * registers on every context switch unless both the new and old
464 * values are zero.
465 *
466 * Note that we don't need to do anything for CS and SS, as
467 * those are saved and restored as part of pt_regs.
468 */
469 savesegment(es, prev->es);
470 if (unlikely(next->es | prev->es))
471 loadsegment(es, next->es);
472
473 savesegment(ds, prev->ds);
474 if (unlikely(next->ds | prev->ds))
475 loadsegment(ds, next->ds);
476
477 load_seg_legacy(prev->fsindex, prev->fsbase,
478 next->fsindex, next->fsbase, FS);
479 load_seg_legacy(prev->gsindex, prev->gsbase,
480 next->gsindex, next->gsbase, GS);
481
482 switch_fpu_finish(next_fpu, cpu);
483
484 /*
485 * Switch the PDA and FPU contexts.
486 */
487 this_cpu_write(current_task, next_p);
488 this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
489
490 /* Reload sp0. */
491 update_task_stack(next_p);
492
493 switch_to_extra(prev_p, next_p);
494
495#ifdef CONFIG_XEN_PV
496 /*
497 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
498 * current_pt_regs()->flags may not match the current task's
499 * intended IOPL. We need to switch it manually.
500 */
501 if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
502 prev->iopl != next->iopl))
503 xen_set_iopl_mask(next->iopl);
504#endif
505
506 if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
507 /*
508 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
509 * does not update the cached descriptor. As a result, if we
510 * do SYSRET while SS is NULL, we'll end up in user mode with
511 * SS apparently equal to __USER_DS but actually unusable.
512 *
513 * The straightforward workaround would be to fix it up just
514 * before SYSRET, but that would slow down the system call
515 * fast paths. Instead, we ensure that SS is never NULL in
516 * system call context. We do this by replacing NULL SS
517 * selectors at every context switch. SYSCALL sets up a valid
518 * SS, so the only way to get NULL is to re-enter the kernel
519 * from CPL 3 through an interrupt. Since that can't happen
520 * in the same task as a running syscall, we are guaranteed to
521 * context switch between every interrupt vector entry and a
522 * subsequent SYSRET.
523 *
524 * We read SS first because SS reads are much faster than
525 * writes. Out of caution, we force SS to __KERNEL_DS even if
526 * it previously had a different non-NULL value.
527 */
528 unsigned short ss_sel;
529 savesegment(ss, ss_sel);
530 if (ss_sel != __KERNEL_DS)
531 loadsegment(ss, __KERNEL_DS);
532 }
533
534 /* Load the Intel cache allocation PQR MSR. */
535 intel_rdt_sched_in();
536
537 return prev_p;
538}
539
540void set_personality_64bit(void)
541{
542 /* inherit personality from parent */
543
544 /* Make sure to be in 64bit mode */
545 clear_thread_flag(TIF_IA32);
546 clear_thread_flag(TIF_ADDR32);
547 clear_thread_flag(TIF_X32);
548 /* Pretend that this comes from a 64bit execve */
549 task_pt_regs(current)->orig_ax = __NR_execve;
550 current_thread_info()->status &= ~TS_COMPAT;
551
552 /* Ensure the corresponding mm is not marked. */
553 if (current->mm)
554 current->mm->context.ia32_compat = 0;
555
556 /* TBD: overwrites user setup. Should have two bits.
557 But 64bit processes have always behaved this way,
558 so it's not too bad. The main problem is just that
559 32bit childs are affected again. */
560 current->personality &= ~READ_IMPLIES_EXEC;
561}
562
563static void __set_personality_x32(void)
564{
565#ifdef CONFIG_X86_X32
566 clear_thread_flag(TIF_IA32);
567 set_thread_flag(TIF_X32);
568 if (current->mm)
569 current->mm->context.ia32_compat = TIF_X32;
570 current->personality &= ~READ_IMPLIES_EXEC;
571 /*
572 * in_compat_syscall() uses the presence of the x32 syscall bit
573 * flag to determine compat status. The x86 mmap() code relies on
574 * the syscall bitness so set x32 syscall bit right here to make
575 * in_compat_syscall() work during exec().
576 *
577 * Pretend to come from a x32 execve.
578 */
579 task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT;
580 current_thread_info()->status &= ~TS_COMPAT;
581#endif
582}
583
584static void __set_personality_ia32(void)
585{
586#ifdef CONFIG_IA32_EMULATION
587 set_thread_flag(TIF_IA32);
588 clear_thread_flag(TIF_X32);
589 if (current->mm)
590 current->mm->context.ia32_compat = TIF_IA32;
591 current->personality |= force_personality32;
592 /* Prepare the first "return" to user space */
593 task_pt_regs(current)->orig_ax = __NR_ia32_execve;
594 current_thread_info()->status |= TS_COMPAT;
595#endif
596}
597
598void set_personality_ia32(bool x32)
599{
600 /* Make sure to be in 32bit mode */
601 set_thread_flag(TIF_ADDR32);
602
603 if (x32)
604 __set_personality_x32();
605 else
606 __set_personality_ia32();
607}
608EXPORT_SYMBOL_GPL(set_personality_ia32);
609
610#ifdef CONFIG_CHECKPOINT_RESTORE
611static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
612{
613 int ret;
614
615 ret = map_vdso_once(image, addr);
616 if (ret)
617 return ret;
618
619 return (long)image->size;
620}
621#endif
622
623long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
624{
625 int ret = 0;
626 int doit = task == current;
627 int cpu;
628
629 switch (option) {
630 case ARCH_SET_GS:
631 if (arg2 >= TASK_SIZE_MAX)
632 return -EPERM;
633 cpu = get_cpu();
634 task->thread.gsindex = 0;
635 task->thread.gsbase = arg2;
636 if (doit) {
637 load_gs_index(0);
638 ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, arg2);
639 }
640 put_cpu();
641 break;
642 case ARCH_SET_FS:
643 /* Not strictly needed for fs, but do it for symmetry
644 with gs */
645 if (arg2 >= TASK_SIZE_MAX)
646 return -EPERM;
647 cpu = get_cpu();
648 task->thread.fsindex = 0;
649 task->thread.fsbase = arg2;
650 if (doit) {
651 /* set the selector to 0 to not confuse __switch_to */
652 loadsegment(fs, 0);
653 ret = wrmsrl_safe(MSR_FS_BASE, arg2);
654 }
655 put_cpu();
656 break;
657 case ARCH_GET_FS: {
658 unsigned long base;
659
660 if (doit)
661 rdmsrl(MSR_FS_BASE, base);
662 else
663 base = task->thread.fsbase;
664 ret = put_user(base, (unsigned long __user *)arg2);
665 break;
666 }
667 case ARCH_GET_GS: {
668 unsigned long base;
669
670 if (doit)
671 rdmsrl(MSR_KERNEL_GS_BASE, base);
672 else
673 base = task->thread.gsbase;
674 ret = put_user(base, (unsigned long __user *)arg2);
675 break;
676 }
677
678#ifdef CONFIG_CHECKPOINT_RESTORE
679# ifdef CONFIG_X86_X32_ABI
680 case ARCH_MAP_VDSO_X32:
681 return prctl_map_vdso(&vdso_image_x32, arg2);
682# endif
683# if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
684 case ARCH_MAP_VDSO_32:
685 return prctl_map_vdso(&vdso_image_32, arg2);
686# endif
687 case ARCH_MAP_VDSO_64:
688 return prctl_map_vdso(&vdso_image_64, arg2);
689#endif
690
691 default:
692 ret = -EINVAL;
693 break;
694 }
695
696 return ret;
697}
698
699SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
700{
701 long ret;
702
703 ret = do_arch_prctl_64(current, option, arg2);
704 if (ret == -EINVAL)
705 ret = do_arch_prctl_common(current, option, arg2);
706
707 return ret;
708}
709
710#ifdef CONFIG_IA32_EMULATION
711COMPAT_SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
712{
713 return do_arch_prctl_common(current, option, arg2);
714}
715#endif
716
717unsigned long KSTK_ESP(struct task_struct *task)
718{
719 return task_pt_regs(task)->sp;
720}