blob: e82151ba95c091ffce968e3422536a5484ba500c [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/******************************************************************************
3 * emulate.c
4 *
5 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 *
7 * Copyright (c) 2005 Keir Fraser
8 *
9 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
10 * privileged instructions:
11 *
12 * Copyright (C) 2006 Qumranet
13 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 *
15 * Avi Kivity <avi@qumranet.com>
16 * Yaniv Kamay <yaniv@qumranet.com>
17 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000018 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
19 */
20
21#include <linux/kvm_host.h>
22#include "kvm_cache_regs.h"
Olivier Deprez157378f2022-04-04 15:47:50 +020023#include "kvm_emulate.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000024#include <linux/stringify.h>
Olivier Deprez0e641232021-09-23 10:07:05 +020025#include <asm/fpu/api.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000026#include <asm/debugreg.h>
27#include <asm/nospec-branch.h>
28
29#include "x86.h"
30#include "tss.h"
31#include "mmu.h"
32#include "pmu.h"
33
34/*
35 * Operand types
36 */
37#define OpNone 0ull
38#define OpImplicit 1ull /* No generic decode */
39#define OpReg 2ull /* Register */
40#define OpMem 3ull /* Memory */
41#define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
42#define OpDI 5ull /* ES:DI/EDI/RDI */
43#define OpMem64 6ull /* Memory, 64-bit */
44#define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
45#define OpDX 8ull /* DX register */
46#define OpCL 9ull /* CL register (for shifts) */
47#define OpImmByte 10ull /* 8-bit sign extended immediate */
48#define OpOne 11ull /* Implied 1 */
49#define OpImm 12ull /* Sign extended up to 32-bit immediate */
50#define OpMem16 13ull /* Memory operand (16-bit). */
51#define OpMem32 14ull /* Memory operand (32-bit). */
52#define OpImmU 15ull /* Immediate operand, zero extended */
53#define OpSI 16ull /* SI/ESI/RSI */
54#define OpImmFAddr 17ull /* Immediate far address */
55#define OpMemFAddr 18ull /* Far address in memory */
56#define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
57#define OpES 20ull /* ES */
58#define OpCS 21ull /* CS */
59#define OpSS 22ull /* SS */
60#define OpDS 23ull /* DS */
61#define OpFS 24ull /* FS */
62#define OpGS 25ull /* GS */
63#define OpMem8 26ull /* 8-bit zero extended memory operand */
64#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
65#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
66#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
67#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
68
69#define OpBits 5 /* Width of operand field */
70#define OpMask ((1ull << OpBits) - 1)
71
72/*
73 * Opcode effective-address decode tables.
74 * Note that we only emulate instructions that have at least one memory
75 * operand (excluding implicit stack references). We assume that stack
76 * references and instruction fetches will never occur in special memory
77 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
78 * not be handled.
79 */
80
81/* Operand sizes: 8-bit operands or specified/overridden size. */
82#define ByteOp (1<<0) /* 8-bit operands. */
83/* Destination operand type. */
84#define DstShift 1
85#define ImplicitOps (OpImplicit << DstShift)
86#define DstReg (OpReg << DstShift)
87#define DstMem (OpMem << DstShift)
88#define DstAcc (OpAcc << DstShift)
89#define DstDI (OpDI << DstShift)
90#define DstMem64 (OpMem64 << DstShift)
91#define DstMem16 (OpMem16 << DstShift)
92#define DstImmUByte (OpImmUByte << DstShift)
93#define DstDX (OpDX << DstShift)
94#define DstAccLo (OpAccLo << DstShift)
95#define DstMask (OpMask << DstShift)
96/* Source operand type. */
97#define SrcShift 6
98#define SrcNone (OpNone << SrcShift)
99#define SrcReg (OpReg << SrcShift)
100#define SrcMem (OpMem << SrcShift)
101#define SrcMem16 (OpMem16 << SrcShift)
102#define SrcMem32 (OpMem32 << SrcShift)
103#define SrcImm (OpImm << SrcShift)
104#define SrcImmByte (OpImmByte << SrcShift)
105#define SrcOne (OpOne << SrcShift)
106#define SrcImmUByte (OpImmUByte << SrcShift)
107#define SrcImmU (OpImmU << SrcShift)
108#define SrcSI (OpSI << SrcShift)
109#define SrcXLat (OpXLat << SrcShift)
110#define SrcImmFAddr (OpImmFAddr << SrcShift)
111#define SrcMemFAddr (OpMemFAddr << SrcShift)
112#define SrcAcc (OpAcc << SrcShift)
113#define SrcImmU16 (OpImmU16 << SrcShift)
114#define SrcImm64 (OpImm64 << SrcShift)
115#define SrcDX (OpDX << SrcShift)
116#define SrcMem8 (OpMem8 << SrcShift)
117#define SrcAccHi (OpAccHi << SrcShift)
118#define SrcMask (OpMask << SrcShift)
119#define BitOp (1<<11)
120#define MemAbs (1<<12) /* Memory operand is absolute displacement */
121#define String (1<<13) /* String instruction (rep capable) */
122#define Stack (1<<14) /* Stack instruction (push/pop) */
123#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
124#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
125#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
126#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
127#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
128#define Escape (5<<15) /* Escape to coprocessor instruction */
129#define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
130#define ModeDual (7<<15) /* Different instruction for 32/64 bit */
131#define Sse (1<<18) /* SSE Vector instruction */
132/* Generic ModRM decode. */
133#define ModRM (1<<19)
134/* Destination is only written; never read. */
135#define Mov (1<<20)
136/* Misc flags */
137#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
138#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
139#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
140#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
141#define Undefined (1<<25) /* No Such Instruction */
142#define Lock (1<<26) /* lock prefix is allowed for the instruction */
143#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
144#define No64 (1<<28)
145#define PageTable (1 << 29) /* instruction used to write page table */
146#define NotImpl (1 << 30) /* instruction is not implemented */
147/* Source 2 operand type */
148#define Src2Shift (31)
149#define Src2None (OpNone << Src2Shift)
150#define Src2Mem (OpMem << Src2Shift)
151#define Src2CL (OpCL << Src2Shift)
152#define Src2ImmByte (OpImmByte << Src2Shift)
153#define Src2One (OpOne << Src2Shift)
154#define Src2Imm (OpImm << Src2Shift)
155#define Src2ES (OpES << Src2Shift)
156#define Src2CS (OpCS << Src2Shift)
157#define Src2SS (OpSS << Src2Shift)
158#define Src2DS (OpDS << Src2Shift)
159#define Src2FS (OpFS << Src2Shift)
160#define Src2GS (OpGS << Src2Shift)
161#define Src2Mask (OpMask << Src2Shift)
162#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
163#define AlignMask ((u64)7 << 41)
164#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
165#define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
166#define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
167#define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
168#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
169#define NoWrite ((u64)1 << 45) /* No writeback */
170#define SrcWrite ((u64)1 << 46) /* Write back src operand */
171#define NoMod ((u64)1 << 47) /* Mod field is ignored */
172#define Intercept ((u64)1 << 48) /* Has valid intercept field */
173#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
174#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
175#define NearBranch ((u64)1 << 52) /* Near branches */
176#define No16 ((u64)1 << 53) /* No 16 bit operand */
177#define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
178#define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
179
180#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
181
182#define X2(x...) x, x
183#define X3(x...) X2(x), x
184#define X4(x...) X2(x), X2(x)
185#define X5(x...) X4(x), x
186#define X6(x...) X4(x), X2(x)
187#define X7(x...) X4(x), X3(x)
188#define X8(x...) X4(x), X4(x)
189#define X16(x...) X8(x), X8(x)
190
191#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
192#define FASTOP_SIZE 8
193
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000194struct opcode {
195 u64 flags : 56;
196 u64 intercept : 8;
197 union {
198 int (*execute)(struct x86_emulate_ctxt *ctxt);
199 const struct opcode *group;
200 const struct group_dual *gdual;
201 const struct gprefix *gprefix;
202 const struct escape *esc;
203 const struct instr_dual *idual;
204 const struct mode_dual *mdual;
205 void (*fastop)(struct fastop *fake);
206 } u;
207 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
208};
209
210struct group_dual {
211 struct opcode mod012[8];
212 struct opcode mod3[8];
213};
214
215struct gprefix {
216 struct opcode pfx_no;
217 struct opcode pfx_66;
218 struct opcode pfx_f2;
219 struct opcode pfx_f3;
220};
221
222struct escape {
223 struct opcode op[8];
224 struct opcode high[64];
225};
226
227struct instr_dual {
228 struct opcode mod012;
229 struct opcode mod3;
230};
231
232struct mode_dual {
233 struct opcode mode32;
234 struct opcode mode64;
235};
236
237#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
238
239enum x86_transfer_type {
240 X86_TRANSFER_NONE,
241 X86_TRANSFER_CALL_JMP,
242 X86_TRANSFER_RET,
243 X86_TRANSFER_TASK_SWITCH,
244};
245
246static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
247{
248 if (!(ctxt->regs_valid & (1 << nr))) {
249 ctxt->regs_valid |= 1 << nr;
250 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
251 }
252 return ctxt->_regs[nr];
253}
254
255static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
256{
257 ctxt->regs_valid |= 1 << nr;
258 ctxt->regs_dirty |= 1 << nr;
259 return &ctxt->_regs[nr];
260}
261
262static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
263{
264 reg_read(ctxt, nr);
265 return reg_write(ctxt, nr);
266}
267
268static void writeback_registers(struct x86_emulate_ctxt *ctxt)
269{
270 unsigned reg;
271
272 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
273 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
274}
275
276static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
277{
278 ctxt->regs_dirty = 0;
279 ctxt->regs_valid = 0;
280}
281
282/*
283 * These EFLAGS bits are restored from saved value during emulation, and
284 * any changes are written back to the saved value after emulation.
285 */
286#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
287 X86_EFLAGS_PF|X86_EFLAGS_CF)
288
289#ifdef CONFIG_X86_64
290#define ON64(x) x
291#else
292#define ON64(x)
293#endif
294
Olivier Deprez157378f2022-04-04 15:47:50 +0200295/*
296 * fastop functions have a special calling convention:
297 *
298 * dst: rax (in/out)
299 * src: rdx (in/out)
300 * src2: rcx (in)
301 * flags: rflags (in/out)
302 * ex: rsi (in:fastop pointer, out:zero if exception)
303 *
304 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
305 * different operand sizes can be reached by calculation, rather than a jump
306 * table (which would be bigger than the code).
307 */
308static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000309
David Brazdil0f672f62019-12-10 10:32:29 +0000310#define __FOP_FUNC(name) \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000311 ".align " __stringify(FASTOP_SIZE) " \n\t" \
312 ".type " name ", @function \n\t" \
313 name ":\n\t"
314
David Brazdil0f672f62019-12-10 10:32:29 +0000315#define FOP_FUNC(name) \
316 __FOP_FUNC(#name)
317
318#define __FOP_RET(name) \
319 "ret \n\t" \
320 ".size " name ", .-" name "\n\t"
321
322#define FOP_RET(name) \
323 __FOP_RET(#name)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000324
325#define FOP_START(op) \
326 extern void em_##op(struct fastop *fake); \
327 asm(".pushsection .text, \"ax\" \n\t" \
328 ".global em_" #op " \n\t" \
David Brazdil0f672f62019-12-10 10:32:29 +0000329 ".align " __stringify(FASTOP_SIZE) " \n\t" \
330 "em_" #op ":\n\t"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000331
332#define FOP_END \
333 ".popsection")
334
David Brazdil0f672f62019-12-10 10:32:29 +0000335#define __FOPNOP(name) \
336 __FOP_FUNC(name) \
337 __FOP_RET(name)
338
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000339#define FOPNOP() \
David Brazdil0f672f62019-12-10 10:32:29 +0000340 __FOPNOP(__stringify(__UNIQUE_ID(nop)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000341
342#define FOP1E(op, dst) \
David Brazdil0f672f62019-12-10 10:32:29 +0000343 __FOP_FUNC(#op "_" #dst) \
344 "10: " #op " %" #dst " \n\t" \
345 __FOP_RET(#op "_" #dst)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000346
347#define FOP1EEX(op, dst) \
348 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
349
350#define FASTOP1(op) \
351 FOP_START(op) \
352 FOP1E(op##b, al) \
353 FOP1E(op##w, ax) \
354 FOP1E(op##l, eax) \
355 ON64(FOP1E(op##q, rax)) \
356 FOP_END
357
358/* 1-operand, using src2 (for MUL/DIV r/m) */
359#define FASTOP1SRC2(op, name) \
360 FOP_START(name) \
361 FOP1E(op, cl) \
362 FOP1E(op, cx) \
363 FOP1E(op, ecx) \
364 ON64(FOP1E(op, rcx)) \
365 FOP_END
366
367/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
368#define FASTOP1SRC2EX(op, name) \
369 FOP_START(name) \
370 FOP1EEX(op, cl) \
371 FOP1EEX(op, cx) \
372 FOP1EEX(op, ecx) \
373 ON64(FOP1EEX(op, rcx)) \
374 FOP_END
375
376#define FOP2E(op, dst, src) \
David Brazdil0f672f62019-12-10 10:32:29 +0000377 __FOP_FUNC(#op "_" #dst "_" #src) \
378 #op " %" #src ", %" #dst " \n\t" \
379 __FOP_RET(#op "_" #dst "_" #src)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000380
381#define FASTOP2(op) \
382 FOP_START(op) \
383 FOP2E(op##b, al, dl) \
384 FOP2E(op##w, ax, dx) \
385 FOP2E(op##l, eax, edx) \
386 ON64(FOP2E(op##q, rax, rdx)) \
387 FOP_END
388
389/* 2 operand, word only */
390#define FASTOP2W(op) \
391 FOP_START(op) \
392 FOPNOP() \
393 FOP2E(op##w, ax, dx) \
394 FOP2E(op##l, eax, edx) \
395 ON64(FOP2E(op##q, rax, rdx)) \
396 FOP_END
397
398/* 2 operand, src is CL */
399#define FASTOP2CL(op) \
400 FOP_START(op) \
401 FOP2E(op##b, al, cl) \
402 FOP2E(op##w, ax, cl) \
403 FOP2E(op##l, eax, cl) \
404 ON64(FOP2E(op##q, rax, cl)) \
405 FOP_END
406
407/* 2 operand, src and dest are reversed */
408#define FASTOP2R(op, name) \
409 FOP_START(name) \
410 FOP2E(op##b, dl, al) \
411 FOP2E(op##w, dx, ax) \
412 FOP2E(op##l, edx, eax) \
413 ON64(FOP2E(op##q, rdx, rax)) \
414 FOP_END
415
416#define FOP3E(op, dst, src, src2) \
David Brazdil0f672f62019-12-10 10:32:29 +0000417 __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
418 #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
419 __FOP_RET(#op "_" #dst "_" #src "_" #src2)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000420
421/* 3-operand, word-only, src2=cl */
422#define FASTOP3WCL(op) \
423 FOP_START(op) \
424 FOPNOP() \
425 FOP3E(op##w, ax, dx, cl) \
426 FOP3E(op##l, eax, edx, cl) \
427 ON64(FOP3E(op##q, rax, rdx, cl)) \
428 FOP_END
429
430/* Special case for SETcc - 1 instruction per cc */
431#define FOP_SETCC(op) \
432 ".align 4 \n\t" \
433 ".type " #op ", @function \n\t" \
434 #op ": \n\t" \
435 #op " %al \n\t" \
David Brazdil0f672f62019-12-10 10:32:29 +0000436 __FOP_RET(#op)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000437
438asm(".pushsection .fixup, \"ax\"\n"
439 ".global kvm_fastop_exception \n"
440 "kvm_fastop_exception: xor %esi, %esi; ret\n"
441 ".popsection");
442
443FOP_START(setcc)
444FOP_SETCC(seto)
445FOP_SETCC(setno)
446FOP_SETCC(setc)
447FOP_SETCC(setnc)
448FOP_SETCC(setz)
449FOP_SETCC(setnz)
450FOP_SETCC(setbe)
451FOP_SETCC(setnbe)
452FOP_SETCC(sets)
453FOP_SETCC(setns)
454FOP_SETCC(setp)
455FOP_SETCC(setnp)
456FOP_SETCC(setl)
457FOP_SETCC(setnl)
458FOP_SETCC(setle)
459FOP_SETCC(setnle)
460FOP_END;
461
David Brazdil0f672f62019-12-10 10:32:29 +0000462FOP_START(salc)
463FOP_FUNC(salc)
464"pushf; sbb %al, %al; popf \n\t"
465FOP_RET(salc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000466FOP_END;
467
468/*
469 * XXX: inoutclob user must know where the argument is being expanded.
David Brazdil0f672f62019-12-10 10:32:29 +0000470 * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000471 */
472#define asm_safe(insn, inoutclob...) \
473({ \
474 int _fault = 0; \
475 \
476 asm volatile("1:" insn "\n" \
477 "2:\n" \
478 ".pushsection .fixup, \"ax\"\n" \
479 "3: movl $1, %[_fault]\n" \
480 " jmp 2b\n" \
481 ".popsection\n" \
482 _ASM_EXTABLE(1b, 3b) \
483 : [_fault] "+qm"(_fault) inoutclob ); \
484 \
485 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
486})
487
488static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
489 enum x86_intercept intercept,
490 enum x86_intercept_stage stage)
491{
492 struct x86_instruction_info info = {
493 .intercept = intercept,
494 .rep_prefix = ctxt->rep_prefix,
495 .modrm_mod = ctxt->modrm_mod,
496 .modrm_reg = ctxt->modrm_reg,
497 .modrm_rm = ctxt->modrm_rm,
498 .src_val = ctxt->src.val64,
499 .dst_val = ctxt->dst.val64,
500 .src_bytes = ctxt->src.bytes,
501 .dst_bytes = ctxt->dst.bytes,
502 .ad_bytes = ctxt->ad_bytes,
503 .next_rip = ctxt->eip,
504 };
505
506 return ctxt->ops->intercept(ctxt, &info, stage);
507}
508
509static void assign_masked(ulong *dest, ulong src, ulong mask)
510{
511 *dest = (*dest & ~mask) | (src & mask);
512}
513
514static void assign_register(unsigned long *reg, u64 val, int bytes)
515{
516 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
517 switch (bytes) {
518 case 1:
519 *(u8 *)reg = (u8)val;
520 break;
521 case 2:
522 *(u16 *)reg = (u16)val;
523 break;
524 case 4:
525 *reg = (u32)val;
526 break; /* 64b: zero-extend */
527 case 8:
528 *reg = val;
529 break;
530 }
531}
532
533static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
534{
535 return (1UL << (ctxt->ad_bytes << 3)) - 1;
536}
537
538static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
539{
540 u16 sel;
541 struct desc_struct ss;
542
543 if (ctxt->mode == X86EMUL_MODE_PROT64)
544 return ~0UL;
545 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
546 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
547}
548
549static int stack_size(struct x86_emulate_ctxt *ctxt)
550{
551 return (__fls(stack_mask(ctxt)) + 1) >> 3;
552}
553
554/* Access/update address held in a register, based on addressing mode. */
555static inline unsigned long
556address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
557{
558 if (ctxt->ad_bytes == sizeof(unsigned long))
559 return reg;
560 else
561 return reg & ad_mask(ctxt);
562}
563
564static inline unsigned long
565register_address(struct x86_emulate_ctxt *ctxt, int reg)
566{
567 return address_mask(ctxt, reg_read(ctxt, reg));
568}
569
570static void masked_increment(ulong *reg, ulong mask, int inc)
571{
572 assign_masked(reg, *reg + inc, mask);
573}
574
575static inline void
576register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
577{
578 ulong *preg = reg_rmw(ctxt, reg);
579
580 assign_register(preg, *preg + inc, ctxt->ad_bytes);
581}
582
583static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
584{
585 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
586}
587
588static u32 desc_limit_scaled(struct desc_struct *desc)
589{
590 u32 limit = get_desc_limit(desc);
591
592 return desc->g ? (limit << 12) | 0xfff : limit;
593}
594
595static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
596{
597 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
598 return 0;
599
600 return ctxt->ops->get_cached_segment_base(ctxt, seg);
601}
602
603static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
604 u32 error, bool valid)
605{
606 WARN_ON(vec > 0x1f);
607 ctxt->exception.vector = vec;
608 ctxt->exception.error_code = error;
609 ctxt->exception.error_code_valid = valid;
610 return X86EMUL_PROPAGATE_FAULT;
611}
612
613static int emulate_db(struct x86_emulate_ctxt *ctxt)
614{
615 return emulate_exception(ctxt, DB_VECTOR, 0, false);
616}
617
618static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
619{
620 return emulate_exception(ctxt, GP_VECTOR, err, true);
621}
622
623static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
624{
625 return emulate_exception(ctxt, SS_VECTOR, err, true);
626}
627
628static int emulate_ud(struct x86_emulate_ctxt *ctxt)
629{
630 return emulate_exception(ctxt, UD_VECTOR, 0, false);
631}
632
633static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
634{
635 return emulate_exception(ctxt, TS_VECTOR, err, true);
636}
637
638static int emulate_de(struct x86_emulate_ctxt *ctxt)
639{
640 return emulate_exception(ctxt, DE_VECTOR, 0, false);
641}
642
643static int emulate_nm(struct x86_emulate_ctxt *ctxt)
644{
645 return emulate_exception(ctxt, NM_VECTOR, 0, false);
646}
647
648static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
649{
650 u16 selector;
651 struct desc_struct desc;
652
653 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
654 return selector;
655}
656
657static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
658 unsigned seg)
659{
660 u16 dummy;
661 u32 base3;
662 struct desc_struct desc;
663
664 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
665 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
666}
667
Olivier Deprez157378f2022-04-04 15:47:50 +0200668static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
669{
670 return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
671}
672
673static inline bool emul_is_noncanonical_address(u64 la,
674 struct x86_emulate_ctxt *ctxt)
675{
676 return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
677}
678
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000679/*
680 * x86 defines three classes of vector instructions: explicitly
681 * aligned, explicitly unaligned, and the rest, which change behaviour
682 * depending on whether they're AVX encoded or not.
683 *
684 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
685 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
686 * 512 bytes of data must be aligned to a 16 byte boundary.
687 */
688static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
689{
690 u64 alignment = ctxt->d & AlignMask;
691
692 if (likely(size < 16))
693 return 1;
694
695 switch (alignment) {
696 case Unaligned:
697 case Avx:
698 return 1;
699 case Aligned16:
700 return 16;
701 case Aligned:
702 default:
703 return size;
704 }
705}
706
707static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
708 struct segmented_address addr,
709 unsigned *max_size, unsigned size,
710 bool write, bool fetch,
711 enum x86emul_mode mode, ulong *linear)
712{
713 struct desc_struct desc;
714 bool usable;
715 ulong la;
716 u32 lim;
717 u16 sel;
718 u8 va_bits;
719
720 la = seg_base(ctxt, addr.seg) + addr.ea;
721 *max_size = 0;
722 switch (mode) {
723 case X86EMUL_MODE_PROT64:
724 *linear = la;
725 va_bits = ctxt_virt_addr_bits(ctxt);
726 if (get_canonical(la, va_bits) != la)
727 goto bad;
728
729 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
730 if (size > *max_size)
731 goto bad;
732 break;
733 default:
734 *linear = la = (u32)la;
735 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
736 addr.seg);
737 if (!usable)
738 goto bad;
739 /* code segment in protected mode or read-only data segment */
740 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
741 || !(desc.type & 2)) && write)
742 goto bad;
743 /* unreadable code segment */
744 if (!fetch && (desc.type & 8) && !(desc.type & 2))
745 goto bad;
746 lim = desc_limit_scaled(&desc);
747 if (!(desc.type & 8) && (desc.type & 4)) {
748 /* expand-down segment */
749 if (addr.ea <= lim)
750 goto bad;
751 lim = desc.d ? 0xffffffff : 0xffff;
752 }
753 if (addr.ea > lim)
754 goto bad;
755 if (lim == 0xffffffff)
756 *max_size = ~0u;
757 else {
758 *max_size = (u64)lim + 1 - addr.ea;
759 if (size > *max_size)
760 goto bad;
761 }
762 break;
763 }
764 if (la & (insn_alignment(ctxt, size) - 1))
765 return emulate_gp(ctxt, 0);
766 return X86EMUL_CONTINUE;
767bad:
768 if (addr.seg == VCPU_SREG_SS)
769 return emulate_ss(ctxt, 0);
770 else
771 return emulate_gp(ctxt, 0);
772}
773
774static int linearize(struct x86_emulate_ctxt *ctxt,
775 struct segmented_address addr,
776 unsigned size, bool write,
777 ulong *linear)
778{
779 unsigned max_size;
780 return __linearize(ctxt, addr, &max_size, size, write, false,
781 ctxt->mode, linear);
782}
783
784static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
785 enum x86emul_mode mode)
786{
787 ulong linear;
788 int rc;
789 unsigned max_size;
790 struct segmented_address addr = { .seg = VCPU_SREG_CS,
791 .ea = dst };
792
793 if (ctxt->op_bytes != sizeof(unsigned long))
794 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
795 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
796 if (rc == X86EMUL_CONTINUE)
797 ctxt->_eip = addr.ea;
798 return rc;
799}
800
801static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
802{
803 return assign_eip(ctxt, dst, ctxt->mode);
804}
805
806static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
807 const struct desc_struct *cs_desc)
808{
809 enum x86emul_mode mode = ctxt->mode;
810 int rc;
811
812#ifdef CONFIG_X86_64
813 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
814 if (cs_desc->l) {
815 u64 efer = 0;
816
817 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
818 if (efer & EFER_LMA)
819 mode = X86EMUL_MODE_PROT64;
820 } else
821 mode = X86EMUL_MODE_PROT32; /* temporary value */
822 }
823#endif
824 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
825 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
826 rc = assign_eip(ctxt, dst, mode);
827 if (rc == X86EMUL_CONTINUE)
828 ctxt->mode = mode;
829 return rc;
830}
831
832static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
833{
834 return assign_eip_near(ctxt, ctxt->_eip + rel);
835}
836
837static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
838 void *data, unsigned size)
839{
840 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
841}
842
843static int linear_write_system(struct x86_emulate_ctxt *ctxt,
844 ulong linear, void *data,
845 unsigned int size)
846{
847 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
848}
849
850static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
851 struct segmented_address addr,
852 void *data,
853 unsigned size)
854{
855 int rc;
856 ulong linear;
857
858 rc = linearize(ctxt, addr, size, false, &linear);
859 if (rc != X86EMUL_CONTINUE)
860 return rc;
861 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
862}
863
864static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
865 struct segmented_address addr,
866 void *data,
867 unsigned int size)
868{
869 int rc;
870 ulong linear;
871
872 rc = linearize(ctxt, addr, size, true, &linear);
873 if (rc != X86EMUL_CONTINUE)
874 return rc;
875 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
876}
877
878/*
879 * Prefetch the remaining bytes of the instruction without crossing page
880 * boundary if they are not in fetch_cache yet.
881 */
882static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
883{
884 int rc;
885 unsigned size, max_size;
886 unsigned long linear;
887 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
888 struct segmented_address addr = { .seg = VCPU_SREG_CS,
889 .ea = ctxt->eip + cur_size };
890
891 /*
892 * We do not know exactly how many bytes will be needed, and
893 * __linearize is expensive, so fetch as much as possible. We
894 * just have to avoid going beyond the 15 byte limit, the end
895 * of the segment, or the end of the page.
896 *
897 * __linearize is called with size 0 so that it does not do any
898 * boundary check itself. Instead, we use max_size to check
899 * against op_size.
900 */
901 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
902 &linear);
903 if (unlikely(rc != X86EMUL_CONTINUE))
904 return rc;
905
906 size = min_t(unsigned, 15UL ^ cur_size, max_size);
907 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
908
909 /*
910 * One instruction can only straddle two pages,
911 * and one has been loaded at the beginning of
912 * x86_decode_insn. So, if not enough bytes
913 * still, we must have hit the 15-byte boundary.
914 */
915 if (unlikely(size < op_size))
916 return emulate_gp(ctxt, 0);
917
918 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
919 size, &ctxt->exception);
920 if (unlikely(rc != X86EMUL_CONTINUE))
921 return rc;
922 ctxt->fetch.end += size;
923 return X86EMUL_CONTINUE;
924}
925
926static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
927 unsigned size)
928{
929 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
930
931 if (unlikely(done_size < size))
932 return __do_insn_fetch_bytes(ctxt, size - done_size);
933 else
934 return X86EMUL_CONTINUE;
935}
936
937/* Fetch next part of the instruction being emulated. */
938#define insn_fetch(_type, _ctxt) \
939({ _type _x; \
940 \
941 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
942 if (rc != X86EMUL_CONTINUE) \
943 goto done; \
944 ctxt->_eip += sizeof(_type); \
945 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
946 ctxt->fetch.ptr += sizeof(_type); \
947 _x; \
948})
949
950#define insn_fetch_arr(_arr, _size, _ctxt) \
951({ \
952 rc = do_insn_fetch_bytes(_ctxt, _size); \
953 if (rc != X86EMUL_CONTINUE) \
954 goto done; \
955 ctxt->_eip += (_size); \
956 memcpy(_arr, ctxt->fetch.ptr, _size); \
957 ctxt->fetch.ptr += (_size); \
958})
959
960/*
961 * Given the 'reg' portion of a ModRM byte, and a register block, return a
962 * pointer into the block that addresses the relevant register.
963 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
964 */
965static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
966 int byteop)
967{
968 void *p;
969 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
970
971 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
972 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
973 else
974 p = reg_rmw(ctxt, modrm_reg);
975 return p;
976}
977
978static int read_descriptor(struct x86_emulate_ctxt *ctxt,
979 struct segmented_address addr,
980 u16 *size, unsigned long *address, int op_bytes)
981{
982 int rc;
983
984 if (op_bytes == 2)
985 op_bytes = 3;
986 *address = 0;
987 rc = segmented_read_std(ctxt, addr, size, 2);
988 if (rc != X86EMUL_CONTINUE)
989 return rc;
990 addr.ea += 2;
991 rc = segmented_read_std(ctxt, addr, address, op_bytes);
992 return rc;
993}
994
995FASTOP2(add);
996FASTOP2(or);
997FASTOP2(adc);
998FASTOP2(sbb);
999FASTOP2(and);
1000FASTOP2(sub);
1001FASTOP2(xor);
1002FASTOP2(cmp);
1003FASTOP2(test);
1004
1005FASTOP1SRC2(mul, mul_ex);
1006FASTOP1SRC2(imul, imul_ex);
1007FASTOP1SRC2EX(div, div_ex);
1008FASTOP1SRC2EX(idiv, idiv_ex);
1009
1010FASTOP3WCL(shld);
1011FASTOP3WCL(shrd);
1012
1013FASTOP2W(imul);
1014
1015FASTOP1(not);
1016FASTOP1(neg);
1017FASTOP1(inc);
1018FASTOP1(dec);
1019
1020FASTOP2CL(rol);
1021FASTOP2CL(ror);
1022FASTOP2CL(rcl);
1023FASTOP2CL(rcr);
1024FASTOP2CL(shl);
1025FASTOP2CL(shr);
1026FASTOP2CL(sar);
1027
1028FASTOP2W(bsf);
1029FASTOP2W(bsr);
1030FASTOP2W(bt);
1031FASTOP2W(bts);
1032FASTOP2W(btr);
1033FASTOP2W(btc);
1034
1035FASTOP2(xadd);
1036
1037FASTOP2R(cmp, cmp_r);
1038
1039static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1040{
1041 /* If src is zero, do not writeback, but update flags */
1042 if (ctxt->src.val == 0)
1043 ctxt->dst.type = OP_NONE;
1044 return fastop(ctxt, em_bsf);
1045}
1046
1047static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1048{
1049 /* If src is zero, do not writeback, but update flags */
1050 if (ctxt->src.val == 0)
1051 ctxt->dst.type = OP_NONE;
1052 return fastop(ctxt, em_bsr);
1053}
1054
1055static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1056{
1057 u8 rc;
1058 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1059
1060 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1061 asm("push %[flags]; popf; " CALL_NOSPEC
1062 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1063 return rc;
1064}
1065
1066static void fetch_register_operand(struct operand *op)
1067{
1068 switch (op->bytes) {
1069 case 1:
1070 op->val = *(u8 *)op->addr.reg;
1071 break;
1072 case 2:
1073 op->val = *(u16 *)op->addr.reg;
1074 break;
1075 case 4:
1076 op->val = *(u32 *)op->addr.reg;
1077 break;
1078 case 8:
1079 op->val = *(u64 *)op->addr.reg;
1080 break;
1081 }
1082}
1083
Olivier Deprez0e641232021-09-23 10:07:05 +02001084static void emulator_get_fpu(void)
1085{
1086 fpregs_lock();
1087
1088 fpregs_assert_state_consistent();
1089 if (test_thread_flag(TIF_NEED_FPU_LOAD))
1090 switch_fpu_return();
1091}
1092
1093static void emulator_put_fpu(void)
1094{
1095 fpregs_unlock();
1096}
1097
Olivier Deprez157378f2022-04-04 15:47:50 +02001098static void read_sse_reg(sse128_t *data, int reg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001099{
Olivier Deprez0e641232021-09-23 10:07:05 +02001100 emulator_get_fpu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001101 switch (reg) {
1102 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1103 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1104 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1105 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1106 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1107 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1108 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1109 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1110#ifdef CONFIG_X86_64
1111 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1112 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1113 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1114 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1115 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1116 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1117 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1118 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1119#endif
1120 default: BUG();
1121 }
Olivier Deprez0e641232021-09-23 10:07:05 +02001122 emulator_put_fpu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001123}
1124
Olivier Deprez157378f2022-04-04 15:47:50 +02001125static void write_sse_reg(sse128_t *data, int reg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001126{
Olivier Deprez0e641232021-09-23 10:07:05 +02001127 emulator_get_fpu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001128 switch (reg) {
1129 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1130 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1131 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1132 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1133 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1134 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1135 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1136 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1137#ifdef CONFIG_X86_64
1138 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1139 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1140 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1141 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1142 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1143 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1144 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1145 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1146#endif
1147 default: BUG();
1148 }
Olivier Deprez0e641232021-09-23 10:07:05 +02001149 emulator_put_fpu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001150}
1151
Olivier Deprez157378f2022-04-04 15:47:50 +02001152static void read_mmx_reg(u64 *data, int reg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001153{
Olivier Deprez0e641232021-09-23 10:07:05 +02001154 emulator_get_fpu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001155 switch (reg) {
1156 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1157 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1158 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1159 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1160 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1161 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1162 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1163 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1164 default: BUG();
1165 }
Olivier Deprez0e641232021-09-23 10:07:05 +02001166 emulator_put_fpu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001167}
1168
Olivier Deprez157378f2022-04-04 15:47:50 +02001169static void write_mmx_reg(u64 *data, int reg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001170{
Olivier Deprez0e641232021-09-23 10:07:05 +02001171 emulator_get_fpu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001172 switch (reg) {
1173 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1174 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1175 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1176 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1177 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1178 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1179 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1180 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1181 default: BUG();
1182 }
Olivier Deprez0e641232021-09-23 10:07:05 +02001183 emulator_put_fpu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001184}
1185
1186static int em_fninit(struct x86_emulate_ctxt *ctxt)
1187{
1188 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1189 return emulate_nm(ctxt);
1190
Olivier Deprez0e641232021-09-23 10:07:05 +02001191 emulator_get_fpu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001192 asm volatile("fninit");
Olivier Deprez0e641232021-09-23 10:07:05 +02001193 emulator_put_fpu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001194 return X86EMUL_CONTINUE;
1195}
1196
1197static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1198{
1199 u16 fcw;
1200
1201 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1202 return emulate_nm(ctxt);
1203
Olivier Deprez0e641232021-09-23 10:07:05 +02001204 emulator_get_fpu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001205 asm volatile("fnstcw %0": "+m"(fcw));
Olivier Deprez0e641232021-09-23 10:07:05 +02001206 emulator_put_fpu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001207
1208 ctxt->dst.val = fcw;
1209
1210 return X86EMUL_CONTINUE;
1211}
1212
1213static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1214{
1215 u16 fsw;
1216
1217 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1218 return emulate_nm(ctxt);
1219
Olivier Deprez0e641232021-09-23 10:07:05 +02001220 emulator_get_fpu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001221 asm volatile("fnstsw %0": "+m"(fsw));
Olivier Deprez0e641232021-09-23 10:07:05 +02001222 emulator_put_fpu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001223
1224 ctxt->dst.val = fsw;
1225
1226 return X86EMUL_CONTINUE;
1227}
1228
1229static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1230 struct operand *op)
1231{
1232 unsigned reg = ctxt->modrm_reg;
1233
1234 if (!(ctxt->d & ModRM))
1235 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1236
1237 if (ctxt->d & Sse) {
1238 op->type = OP_XMM;
1239 op->bytes = 16;
1240 op->addr.xmm = reg;
Olivier Deprez157378f2022-04-04 15:47:50 +02001241 read_sse_reg(&op->vec_val, reg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001242 return;
1243 }
1244 if (ctxt->d & Mmx) {
1245 reg &= 7;
1246 op->type = OP_MM;
1247 op->bytes = 8;
1248 op->addr.mm = reg;
1249 return;
1250 }
1251
1252 op->type = OP_REG;
1253 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1254 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1255
1256 fetch_register_operand(op);
1257 op->orig_val = op->val;
1258}
1259
1260static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1261{
1262 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1263 ctxt->modrm_seg = VCPU_SREG_SS;
1264}
1265
1266static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1267 struct operand *op)
1268{
1269 u8 sib;
1270 int index_reg, base_reg, scale;
1271 int rc = X86EMUL_CONTINUE;
1272 ulong modrm_ea = 0;
1273
1274 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1275 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1276 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1277
1278 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1279 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1280 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1281 ctxt->modrm_seg = VCPU_SREG_DS;
1282
1283 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1284 op->type = OP_REG;
1285 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1286 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1287 ctxt->d & ByteOp);
1288 if (ctxt->d & Sse) {
1289 op->type = OP_XMM;
1290 op->bytes = 16;
1291 op->addr.xmm = ctxt->modrm_rm;
Olivier Deprez157378f2022-04-04 15:47:50 +02001292 read_sse_reg(&op->vec_val, ctxt->modrm_rm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001293 return rc;
1294 }
1295 if (ctxt->d & Mmx) {
1296 op->type = OP_MM;
1297 op->bytes = 8;
1298 op->addr.mm = ctxt->modrm_rm & 7;
1299 return rc;
1300 }
1301 fetch_register_operand(op);
1302 return rc;
1303 }
1304
1305 op->type = OP_MEM;
1306
1307 if (ctxt->ad_bytes == 2) {
1308 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1309 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1310 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1311 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1312
1313 /* 16-bit ModR/M decode. */
1314 switch (ctxt->modrm_mod) {
1315 case 0:
1316 if (ctxt->modrm_rm == 6)
1317 modrm_ea += insn_fetch(u16, ctxt);
1318 break;
1319 case 1:
1320 modrm_ea += insn_fetch(s8, ctxt);
1321 break;
1322 case 2:
1323 modrm_ea += insn_fetch(u16, ctxt);
1324 break;
1325 }
1326 switch (ctxt->modrm_rm) {
1327 case 0:
1328 modrm_ea += bx + si;
1329 break;
1330 case 1:
1331 modrm_ea += bx + di;
1332 break;
1333 case 2:
1334 modrm_ea += bp + si;
1335 break;
1336 case 3:
1337 modrm_ea += bp + di;
1338 break;
1339 case 4:
1340 modrm_ea += si;
1341 break;
1342 case 5:
1343 modrm_ea += di;
1344 break;
1345 case 6:
1346 if (ctxt->modrm_mod != 0)
1347 modrm_ea += bp;
1348 break;
1349 case 7:
1350 modrm_ea += bx;
1351 break;
1352 }
1353 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1354 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1355 ctxt->modrm_seg = VCPU_SREG_SS;
1356 modrm_ea = (u16)modrm_ea;
1357 } else {
1358 /* 32/64-bit ModR/M decode. */
1359 if ((ctxt->modrm_rm & 7) == 4) {
1360 sib = insn_fetch(u8, ctxt);
1361 index_reg |= (sib >> 3) & 7;
1362 base_reg |= sib & 7;
1363 scale = sib >> 6;
1364
1365 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1366 modrm_ea += insn_fetch(s32, ctxt);
1367 else {
1368 modrm_ea += reg_read(ctxt, base_reg);
1369 adjust_modrm_seg(ctxt, base_reg);
1370 /* Increment ESP on POP [ESP] */
1371 if ((ctxt->d & IncSP) &&
1372 base_reg == VCPU_REGS_RSP)
1373 modrm_ea += ctxt->op_bytes;
1374 }
1375 if (index_reg != 4)
1376 modrm_ea += reg_read(ctxt, index_reg) << scale;
1377 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1378 modrm_ea += insn_fetch(s32, ctxt);
1379 if (ctxt->mode == X86EMUL_MODE_PROT64)
1380 ctxt->rip_relative = 1;
1381 } else {
1382 base_reg = ctxt->modrm_rm;
1383 modrm_ea += reg_read(ctxt, base_reg);
1384 adjust_modrm_seg(ctxt, base_reg);
1385 }
1386 switch (ctxt->modrm_mod) {
1387 case 1:
1388 modrm_ea += insn_fetch(s8, ctxt);
1389 break;
1390 case 2:
1391 modrm_ea += insn_fetch(s32, ctxt);
1392 break;
1393 }
1394 }
1395 op->addr.mem.ea = modrm_ea;
1396 if (ctxt->ad_bytes != 8)
1397 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1398
1399done:
1400 return rc;
1401}
1402
1403static int decode_abs(struct x86_emulate_ctxt *ctxt,
1404 struct operand *op)
1405{
1406 int rc = X86EMUL_CONTINUE;
1407
1408 op->type = OP_MEM;
1409 switch (ctxt->ad_bytes) {
1410 case 2:
1411 op->addr.mem.ea = insn_fetch(u16, ctxt);
1412 break;
1413 case 4:
1414 op->addr.mem.ea = insn_fetch(u32, ctxt);
1415 break;
1416 case 8:
1417 op->addr.mem.ea = insn_fetch(u64, ctxt);
1418 break;
1419 }
1420done:
1421 return rc;
1422}
1423
1424static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1425{
1426 long sv = 0, mask;
1427
1428 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1429 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1430
1431 if (ctxt->src.bytes == 2)
1432 sv = (s16)ctxt->src.val & (s16)mask;
1433 else if (ctxt->src.bytes == 4)
1434 sv = (s32)ctxt->src.val & (s32)mask;
1435 else
1436 sv = (s64)ctxt->src.val & (s64)mask;
1437
1438 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1439 ctxt->dst.addr.mem.ea + (sv >> 3));
1440 }
1441
1442 /* only subword offset */
1443 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1444}
1445
1446static int read_emulated(struct x86_emulate_ctxt *ctxt,
1447 unsigned long addr, void *dest, unsigned size)
1448{
1449 int rc;
1450 struct read_cache *mc = &ctxt->mem_read;
1451
1452 if (mc->pos < mc->end)
1453 goto read_cached;
1454
1455 WARN_ON((mc->end + size) >= sizeof(mc->data));
1456
1457 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1458 &ctxt->exception);
1459 if (rc != X86EMUL_CONTINUE)
1460 return rc;
1461
1462 mc->end += size;
1463
1464read_cached:
1465 memcpy(dest, mc->data + mc->pos, size);
1466 mc->pos += size;
1467 return X86EMUL_CONTINUE;
1468}
1469
1470static int segmented_read(struct x86_emulate_ctxt *ctxt,
1471 struct segmented_address addr,
1472 void *data,
1473 unsigned size)
1474{
1475 int rc;
1476 ulong linear;
1477
1478 rc = linearize(ctxt, addr, size, false, &linear);
1479 if (rc != X86EMUL_CONTINUE)
1480 return rc;
1481 return read_emulated(ctxt, linear, data, size);
1482}
1483
1484static int segmented_write(struct x86_emulate_ctxt *ctxt,
1485 struct segmented_address addr,
1486 const void *data,
1487 unsigned size)
1488{
1489 int rc;
1490 ulong linear;
1491
1492 rc = linearize(ctxt, addr, size, true, &linear);
1493 if (rc != X86EMUL_CONTINUE)
1494 return rc;
1495 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1496 &ctxt->exception);
1497}
1498
1499static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1500 struct segmented_address addr,
1501 const void *orig_data, const void *data,
1502 unsigned size)
1503{
1504 int rc;
1505 ulong linear;
1506
1507 rc = linearize(ctxt, addr, size, true, &linear);
1508 if (rc != X86EMUL_CONTINUE)
1509 return rc;
1510 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1511 size, &ctxt->exception);
1512}
1513
1514static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1515 unsigned int size, unsigned short port,
1516 void *dest)
1517{
1518 struct read_cache *rc = &ctxt->io_read;
1519
1520 if (rc->pos == rc->end) { /* refill pio read ahead */
1521 unsigned int in_page, n;
1522 unsigned int count = ctxt->rep_prefix ?
1523 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1524 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1525 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1526 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1527 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1528 if (n == 0)
1529 n = 1;
1530 rc->pos = rc->end = 0;
1531 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1532 return 0;
1533 rc->end = n * size;
1534 }
1535
1536 if (ctxt->rep_prefix && (ctxt->d & String) &&
1537 !(ctxt->eflags & X86_EFLAGS_DF)) {
1538 ctxt->dst.data = rc->data + rc->pos;
1539 ctxt->dst.type = OP_MEM_STR;
1540 ctxt->dst.count = (rc->end - rc->pos) / size;
1541 rc->pos = rc->end;
1542 } else {
1543 memcpy(dest, rc->data + rc->pos, size);
1544 rc->pos += size;
1545 }
1546 return 1;
1547}
1548
1549static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1550 u16 index, struct desc_struct *desc)
1551{
1552 struct desc_ptr dt;
1553 ulong addr;
1554
1555 ctxt->ops->get_idt(ctxt, &dt);
1556
1557 if (dt.size < index * 8 + 7)
1558 return emulate_gp(ctxt, index << 3 | 0x2);
1559
1560 addr = dt.address + index * 8;
David Brazdil0f672f62019-12-10 10:32:29 +00001561 return linear_read_system(ctxt, addr, desc, sizeof(*desc));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001562}
1563
1564static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1565 u16 selector, struct desc_ptr *dt)
1566{
1567 const struct x86_emulate_ops *ops = ctxt->ops;
1568 u32 base3 = 0;
1569
1570 if (selector & 1 << 2) {
1571 struct desc_struct desc;
1572 u16 sel;
1573
David Brazdil0f672f62019-12-10 10:32:29 +00001574 memset(dt, 0, sizeof(*dt));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001575 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1576 VCPU_SREG_LDTR))
1577 return;
1578
1579 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1580 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1581 } else
1582 ops->get_gdt(ctxt, dt);
1583}
1584
1585static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1586 u16 selector, ulong *desc_addr_p)
1587{
1588 struct desc_ptr dt;
1589 u16 index = selector >> 3;
1590 ulong addr;
1591
1592 get_descriptor_table_ptr(ctxt, selector, &dt);
1593
1594 if (dt.size < index * 8 + 7)
1595 return emulate_gp(ctxt, selector & 0xfffc);
1596
1597 addr = dt.address + index * 8;
1598
1599#ifdef CONFIG_X86_64
1600 if (addr >> 32 != 0) {
1601 u64 efer = 0;
1602
1603 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1604 if (!(efer & EFER_LMA))
1605 addr &= (u32)-1;
1606 }
1607#endif
1608
1609 *desc_addr_p = addr;
1610 return X86EMUL_CONTINUE;
1611}
1612
1613/* allowed just for 8 bytes segments */
1614static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1615 u16 selector, struct desc_struct *desc,
1616 ulong *desc_addr_p)
1617{
1618 int rc;
1619
1620 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1621 if (rc != X86EMUL_CONTINUE)
1622 return rc;
1623
1624 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1625}
1626
1627/* allowed just for 8 bytes segments */
1628static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1629 u16 selector, struct desc_struct *desc)
1630{
1631 int rc;
1632 ulong addr;
1633
1634 rc = get_descriptor_ptr(ctxt, selector, &addr);
1635 if (rc != X86EMUL_CONTINUE)
1636 return rc;
1637
David Brazdil0f672f62019-12-10 10:32:29 +00001638 return linear_write_system(ctxt, addr, desc, sizeof(*desc));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001639}
1640
1641static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1642 u16 selector, int seg, u8 cpl,
1643 enum x86_transfer_type transfer,
1644 struct desc_struct *desc)
1645{
1646 struct desc_struct seg_desc, old_desc;
1647 u8 dpl, rpl;
1648 unsigned err_vec = GP_VECTOR;
1649 u32 err_code = 0;
1650 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1651 ulong desc_addr;
1652 int ret;
1653 u16 dummy;
1654 u32 base3 = 0;
1655
David Brazdil0f672f62019-12-10 10:32:29 +00001656 memset(&seg_desc, 0, sizeof(seg_desc));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001657
1658 if (ctxt->mode == X86EMUL_MODE_REAL) {
1659 /* set real mode segment descriptor (keep limit etc. for
1660 * unreal mode) */
1661 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1662 set_desc_base(&seg_desc, selector << 4);
1663 goto load;
1664 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1665 /* VM86 needs a clean new segment descriptor */
1666 set_desc_base(&seg_desc, selector << 4);
1667 set_desc_limit(&seg_desc, 0xffff);
1668 seg_desc.type = 3;
1669 seg_desc.p = 1;
1670 seg_desc.s = 1;
1671 seg_desc.dpl = 3;
1672 goto load;
1673 }
1674
1675 rpl = selector & 3;
1676
1677 /* TR should be in GDT only */
1678 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1679 goto exception;
1680
1681 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1682 if (null_selector) {
1683 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1684 goto exception;
1685
1686 if (seg == VCPU_SREG_SS) {
1687 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1688 goto exception;
1689
1690 /*
1691 * ctxt->ops->set_segment expects the CPL to be in
1692 * SS.DPL, so fake an expand-up 32-bit data segment.
1693 */
1694 seg_desc.type = 3;
1695 seg_desc.p = 1;
1696 seg_desc.s = 1;
1697 seg_desc.dpl = cpl;
1698 seg_desc.d = 1;
1699 seg_desc.g = 1;
1700 }
1701
1702 /* Skip all following checks */
1703 goto load;
1704 }
1705
1706 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1707 if (ret != X86EMUL_CONTINUE)
1708 return ret;
1709
1710 err_code = selector & 0xfffc;
1711 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1712 GP_VECTOR;
1713
1714 /* can't load system descriptor into segment selector */
1715 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1716 if (transfer == X86_TRANSFER_CALL_JMP)
1717 return X86EMUL_UNHANDLEABLE;
1718 goto exception;
1719 }
1720
1721 if (!seg_desc.p) {
1722 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1723 goto exception;
1724 }
1725
1726 dpl = seg_desc.dpl;
1727
1728 switch (seg) {
1729 case VCPU_SREG_SS:
1730 /*
1731 * segment is not a writable data segment or segment
1732 * selector's RPL != CPL or segment selector's RPL != CPL
1733 */
1734 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1735 goto exception;
1736 break;
1737 case VCPU_SREG_CS:
1738 if (!(seg_desc.type & 8))
1739 goto exception;
1740
1741 if (seg_desc.type & 4) {
1742 /* conforming */
1743 if (dpl > cpl)
1744 goto exception;
1745 } else {
1746 /* nonconforming */
1747 if (rpl > cpl || dpl != cpl)
1748 goto exception;
1749 }
1750 /* in long-mode d/b must be clear if l is set */
1751 if (seg_desc.d && seg_desc.l) {
1752 u64 efer = 0;
1753
1754 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1755 if (efer & EFER_LMA)
1756 goto exception;
1757 }
1758
1759 /* CS(RPL) <- CPL */
1760 selector = (selector & 0xfffc) | cpl;
1761 break;
1762 case VCPU_SREG_TR:
1763 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1764 goto exception;
1765 old_desc = seg_desc;
1766 seg_desc.type |= 2; /* busy */
1767 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1768 sizeof(seg_desc), &ctxt->exception);
1769 if (ret != X86EMUL_CONTINUE)
1770 return ret;
1771 break;
1772 case VCPU_SREG_LDTR:
1773 if (seg_desc.s || seg_desc.type != 2)
1774 goto exception;
1775 break;
1776 default: /* DS, ES, FS, or GS */
1777 /*
1778 * segment is not a data or readable code segment or
1779 * ((segment is a data or nonconforming code segment)
1780 * and (both RPL and CPL > DPL))
1781 */
1782 if ((seg_desc.type & 0xa) == 0x8 ||
1783 (((seg_desc.type & 0xc) != 0xc) &&
1784 (rpl > dpl && cpl > dpl)))
1785 goto exception;
1786 break;
1787 }
1788
1789 if (seg_desc.s) {
1790 /* mark segment as accessed */
1791 if (!(seg_desc.type & 1)) {
1792 seg_desc.type |= 1;
1793 ret = write_segment_descriptor(ctxt, selector,
1794 &seg_desc);
1795 if (ret != X86EMUL_CONTINUE)
1796 return ret;
1797 }
1798 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1799 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1800 if (ret != X86EMUL_CONTINUE)
1801 return ret;
1802 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1803 ((u64)base3 << 32), ctxt))
1804 return emulate_gp(ctxt, 0);
1805 }
1806load:
1807 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1808 if (desc)
1809 *desc = seg_desc;
1810 return X86EMUL_CONTINUE;
1811exception:
1812 return emulate_exception(ctxt, err_vec, err_code, true);
1813}
1814
1815static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1816 u16 selector, int seg)
1817{
1818 u8 cpl = ctxt->ops->cpl(ctxt);
1819
1820 /*
1821 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1822 * they can load it at CPL<3 (Intel's manual says only LSS can,
1823 * but it's wrong).
1824 *
1825 * However, the Intel manual says that putting IST=1/DPL=3 in
1826 * an interrupt gate will result in SS=3 (the AMD manual instead
1827 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1828 * and only forbid it here.
1829 */
1830 if (seg == VCPU_SREG_SS && selector == 3 &&
1831 ctxt->mode == X86EMUL_MODE_PROT64)
1832 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1833
1834 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1835 X86_TRANSFER_NONE, NULL);
1836}
1837
1838static void write_register_operand(struct operand *op)
1839{
1840 return assign_register(op->addr.reg, op->val, op->bytes);
1841}
1842
1843static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1844{
1845 switch (op->type) {
1846 case OP_REG:
1847 write_register_operand(op);
1848 break;
1849 case OP_MEM:
1850 if (ctxt->lock_prefix)
1851 return segmented_cmpxchg(ctxt,
1852 op->addr.mem,
1853 &op->orig_val,
1854 &op->val,
1855 op->bytes);
1856 else
1857 return segmented_write(ctxt,
1858 op->addr.mem,
1859 &op->val,
1860 op->bytes);
1861 break;
1862 case OP_MEM_STR:
1863 return segmented_write(ctxt,
1864 op->addr.mem,
1865 op->data,
1866 op->bytes * op->count);
1867 break;
1868 case OP_XMM:
Olivier Deprez157378f2022-04-04 15:47:50 +02001869 write_sse_reg(&op->vec_val, op->addr.xmm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001870 break;
1871 case OP_MM:
Olivier Deprez157378f2022-04-04 15:47:50 +02001872 write_mmx_reg(&op->mm_val, op->addr.mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001873 break;
1874 case OP_NONE:
1875 /* no writeback */
1876 break;
1877 default:
1878 break;
1879 }
1880 return X86EMUL_CONTINUE;
1881}
1882
1883static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1884{
1885 struct segmented_address addr;
1886
1887 rsp_increment(ctxt, -bytes);
1888 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1889 addr.seg = VCPU_SREG_SS;
1890
1891 return segmented_write(ctxt, addr, data, bytes);
1892}
1893
1894static int em_push(struct x86_emulate_ctxt *ctxt)
1895{
1896 /* Disable writeback. */
1897 ctxt->dst.type = OP_NONE;
1898 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1899}
1900
1901static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1902 void *dest, int len)
1903{
1904 int rc;
1905 struct segmented_address addr;
1906
1907 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1908 addr.seg = VCPU_SREG_SS;
1909 rc = segmented_read(ctxt, addr, dest, len);
1910 if (rc != X86EMUL_CONTINUE)
1911 return rc;
1912
1913 rsp_increment(ctxt, len);
1914 return rc;
1915}
1916
1917static int em_pop(struct x86_emulate_ctxt *ctxt)
1918{
1919 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1920}
1921
1922static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1923 void *dest, int len)
1924{
1925 int rc;
1926 unsigned long val, change_mask;
1927 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1928 int cpl = ctxt->ops->cpl(ctxt);
1929
1930 rc = emulate_pop(ctxt, &val, len);
1931 if (rc != X86EMUL_CONTINUE)
1932 return rc;
1933
1934 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1935 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1936 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1937 X86_EFLAGS_AC | X86_EFLAGS_ID;
1938
1939 switch(ctxt->mode) {
1940 case X86EMUL_MODE_PROT64:
1941 case X86EMUL_MODE_PROT32:
1942 case X86EMUL_MODE_PROT16:
1943 if (cpl == 0)
1944 change_mask |= X86_EFLAGS_IOPL;
1945 if (cpl <= iopl)
1946 change_mask |= X86_EFLAGS_IF;
1947 break;
1948 case X86EMUL_MODE_VM86:
1949 if (iopl < 3)
1950 return emulate_gp(ctxt, 0);
1951 change_mask |= X86_EFLAGS_IF;
1952 break;
1953 default: /* real mode */
1954 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1955 break;
1956 }
1957
1958 *(unsigned long *)dest =
1959 (ctxt->eflags & ~change_mask) | (val & change_mask);
1960
1961 return rc;
1962}
1963
1964static int em_popf(struct x86_emulate_ctxt *ctxt)
1965{
1966 ctxt->dst.type = OP_REG;
1967 ctxt->dst.addr.reg = &ctxt->eflags;
1968 ctxt->dst.bytes = ctxt->op_bytes;
1969 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1970}
1971
1972static int em_enter(struct x86_emulate_ctxt *ctxt)
1973{
1974 int rc;
1975 unsigned frame_size = ctxt->src.val;
1976 unsigned nesting_level = ctxt->src2.val & 31;
1977 ulong rbp;
1978
1979 if (nesting_level)
1980 return X86EMUL_UNHANDLEABLE;
1981
1982 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1983 rc = push(ctxt, &rbp, stack_size(ctxt));
1984 if (rc != X86EMUL_CONTINUE)
1985 return rc;
1986 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1987 stack_mask(ctxt));
1988 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1989 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1990 stack_mask(ctxt));
1991 return X86EMUL_CONTINUE;
1992}
1993
1994static int em_leave(struct x86_emulate_ctxt *ctxt)
1995{
1996 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1997 stack_mask(ctxt));
1998 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1999}
2000
2001static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
2002{
2003 int seg = ctxt->src2.val;
2004
2005 ctxt->src.val = get_segment_selector(ctxt, seg);
2006 if (ctxt->op_bytes == 4) {
2007 rsp_increment(ctxt, -2);
2008 ctxt->op_bytes = 2;
2009 }
2010
2011 return em_push(ctxt);
2012}
2013
2014static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
2015{
2016 int seg = ctxt->src2.val;
2017 unsigned long selector;
2018 int rc;
2019
2020 rc = emulate_pop(ctxt, &selector, 2);
2021 if (rc != X86EMUL_CONTINUE)
2022 return rc;
2023
2024 if (ctxt->modrm_reg == VCPU_SREG_SS)
2025 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2026 if (ctxt->op_bytes > 2)
2027 rsp_increment(ctxt, ctxt->op_bytes - 2);
2028
2029 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
2030 return rc;
2031}
2032
2033static int em_pusha(struct x86_emulate_ctxt *ctxt)
2034{
2035 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
2036 int rc = X86EMUL_CONTINUE;
2037 int reg = VCPU_REGS_RAX;
2038
2039 while (reg <= VCPU_REGS_RDI) {
2040 (reg == VCPU_REGS_RSP) ?
2041 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
2042
2043 rc = em_push(ctxt);
2044 if (rc != X86EMUL_CONTINUE)
2045 return rc;
2046
2047 ++reg;
2048 }
2049
2050 return rc;
2051}
2052
2053static int em_pushf(struct x86_emulate_ctxt *ctxt)
2054{
2055 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2056 return em_push(ctxt);
2057}
2058
2059static int em_popa(struct x86_emulate_ctxt *ctxt)
2060{
2061 int rc = X86EMUL_CONTINUE;
2062 int reg = VCPU_REGS_RDI;
2063 u32 val;
2064
2065 while (reg >= VCPU_REGS_RAX) {
2066 if (reg == VCPU_REGS_RSP) {
2067 rsp_increment(ctxt, ctxt->op_bytes);
2068 --reg;
2069 }
2070
2071 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2072 if (rc != X86EMUL_CONTINUE)
2073 break;
2074 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2075 --reg;
2076 }
2077 return rc;
2078}
2079
2080static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2081{
2082 const struct x86_emulate_ops *ops = ctxt->ops;
2083 int rc;
2084 struct desc_ptr dt;
2085 gva_t cs_addr;
2086 gva_t eip_addr;
2087 u16 cs, eip;
2088
2089 /* TODO: Add limit checks */
2090 ctxt->src.val = ctxt->eflags;
2091 rc = em_push(ctxt);
2092 if (rc != X86EMUL_CONTINUE)
2093 return rc;
2094
2095 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2096
2097 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2098 rc = em_push(ctxt);
2099 if (rc != X86EMUL_CONTINUE)
2100 return rc;
2101
2102 ctxt->src.val = ctxt->_eip;
2103 rc = em_push(ctxt);
2104 if (rc != X86EMUL_CONTINUE)
2105 return rc;
2106
2107 ops->get_idt(ctxt, &dt);
2108
2109 eip_addr = dt.address + (irq << 2);
2110 cs_addr = dt.address + (irq << 2) + 2;
2111
2112 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2113 if (rc != X86EMUL_CONTINUE)
2114 return rc;
2115
2116 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2117 if (rc != X86EMUL_CONTINUE)
2118 return rc;
2119
2120 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2121 if (rc != X86EMUL_CONTINUE)
2122 return rc;
2123
2124 ctxt->_eip = eip;
2125
2126 return rc;
2127}
2128
2129int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2130{
2131 int rc;
2132
2133 invalidate_registers(ctxt);
2134 rc = __emulate_int_real(ctxt, irq);
2135 if (rc == X86EMUL_CONTINUE)
2136 writeback_registers(ctxt);
2137 return rc;
2138}
2139
2140static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2141{
2142 switch(ctxt->mode) {
2143 case X86EMUL_MODE_REAL:
2144 return __emulate_int_real(ctxt, irq);
2145 case X86EMUL_MODE_VM86:
2146 case X86EMUL_MODE_PROT16:
2147 case X86EMUL_MODE_PROT32:
2148 case X86EMUL_MODE_PROT64:
2149 default:
2150 /* Protected mode interrupts unimplemented yet */
2151 return X86EMUL_UNHANDLEABLE;
2152 }
2153}
2154
2155static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2156{
2157 int rc = X86EMUL_CONTINUE;
2158 unsigned long temp_eip = 0;
2159 unsigned long temp_eflags = 0;
2160 unsigned long cs = 0;
2161 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2162 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2163 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2164 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2165 X86_EFLAGS_AC | X86_EFLAGS_ID |
2166 X86_EFLAGS_FIXED;
2167 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2168 X86_EFLAGS_VIP;
2169
2170 /* TODO: Add stack limit check */
2171
2172 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2173
2174 if (rc != X86EMUL_CONTINUE)
2175 return rc;
2176
2177 if (temp_eip & ~0xffff)
2178 return emulate_gp(ctxt, 0);
2179
2180 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2181
2182 if (rc != X86EMUL_CONTINUE)
2183 return rc;
2184
2185 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2186
2187 if (rc != X86EMUL_CONTINUE)
2188 return rc;
2189
2190 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2191
2192 if (rc != X86EMUL_CONTINUE)
2193 return rc;
2194
2195 ctxt->_eip = temp_eip;
2196
2197 if (ctxt->op_bytes == 4)
2198 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2199 else if (ctxt->op_bytes == 2) {
2200 ctxt->eflags &= ~0xffff;
2201 ctxt->eflags |= temp_eflags;
2202 }
2203
2204 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2205 ctxt->eflags |= X86_EFLAGS_FIXED;
2206 ctxt->ops->set_nmi_mask(ctxt, false);
2207
2208 return rc;
2209}
2210
2211static int em_iret(struct x86_emulate_ctxt *ctxt)
2212{
2213 switch(ctxt->mode) {
2214 case X86EMUL_MODE_REAL:
2215 return emulate_iret_real(ctxt);
2216 case X86EMUL_MODE_VM86:
2217 case X86EMUL_MODE_PROT16:
2218 case X86EMUL_MODE_PROT32:
2219 case X86EMUL_MODE_PROT64:
2220 default:
2221 /* iret from protected mode unimplemented yet */
2222 return X86EMUL_UNHANDLEABLE;
2223 }
2224}
2225
2226static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2227{
2228 int rc;
2229 unsigned short sel;
2230 struct desc_struct new_desc;
2231 u8 cpl = ctxt->ops->cpl(ctxt);
2232
2233 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2234
2235 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2236 X86_TRANSFER_CALL_JMP,
2237 &new_desc);
2238 if (rc != X86EMUL_CONTINUE)
2239 return rc;
2240
2241 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2242 /* Error handling is not implemented. */
2243 if (rc != X86EMUL_CONTINUE)
2244 return X86EMUL_UNHANDLEABLE;
2245
2246 return rc;
2247}
2248
2249static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2250{
2251 return assign_eip_near(ctxt, ctxt->src.val);
2252}
2253
2254static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2255{
2256 int rc;
2257 long int old_eip;
2258
2259 old_eip = ctxt->_eip;
2260 rc = assign_eip_near(ctxt, ctxt->src.val);
2261 if (rc != X86EMUL_CONTINUE)
2262 return rc;
2263 ctxt->src.val = old_eip;
2264 rc = em_push(ctxt);
2265 return rc;
2266}
2267
2268static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2269{
2270 u64 old = ctxt->dst.orig_val64;
2271
2272 if (ctxt->dst.bytes == 16)
2273 return X86EMUL_UNHANDLEABLE;
2274
2275 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2276 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2277 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2278 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2279 ctxt->eflags &= ~X86_EFLAGS_ZF;
2280 } else {
2281 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2282 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2283
2284 ctxt->eflags |= X86_EFLAGS_ZF;
2285 }
2286 return X86EMUL_CONTINUE;
2287}
2288
2289static int em_ret(struct x86_emulate_ctxt *ctxt)
2290{
2291 int rc;
2292 unsigned long eip;
2293
2294 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2295 if (rc != X86EMUL_CONTINUE)
2296 return rc;
2297
2298 return assign_eip_near(ctxt, eip);
2299}
2300
2301static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2302{
2303 int rc;
2304 unsigned long eip, cs;
2305 int cpl = ctxt->ops->cpl(ctxt);
2306 struct desc_struct new_desc;
2307
2308 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2309 if (rc != X86EMUL_CONTINUE)
2310 return rc;
2311 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2312 if (rc != X86EMUL_CONTINUE)
2313 return rc;
2314 /* Outer-privilege level return is not implemented */
2315 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2316 return X86EMUL_UNHANDLEABLE;
2317 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2318 X86_TRANSFER_RET,
2319 &new_desc);
2320 if (rc != X86EMUL_CONTINUE)
2321 return rc;
2322 rc = assign_eip_far(ctxt, eip, &new_desc);
2323 /* Error handling is not implemented. */
2324 if (rc != X86EMUL_CONTINUE)
2325 return X86EMUL_UNHANDLEABLE;
2326
2327 return rc;
2328}
2329
2330static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2331{
2332 int rc;
2333
2334 rc = em_ret_far(ctxt);
2335 if (rc != X86EMUL_CONTINUE)
2336 return rc;
2337 rsp_increment(ctxt, ctxt->src.val);
2338 return X86EMUL_CONTINUE;
2339}
2340
2341static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2342{
2343 /* Save real source value, then compare EAX against destination. */
2344 ctxt->dst.orig_val = ctxt->dst.val;
2345 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2346 ctxt->src.orig_val = ctxt->src.val;
2347 ctxt->src.val = ctxt->dst.orig_val;
2348 fastop(ctxt, em_cmp);
2349
2350 if (ctxt->eflags & X86_EFLAGS_ZF) {
2351 /* Success: write back to memory; no update of EAX */
2352 ctxt->src.type = OP_NONE;
2353 ctxt->dst.val = ctxt->src.orig_val;
2354 } else {
2355 /* Failure: write the value we saw to EAX. */
2356 ctxt->src.type = OP_REG;
2357 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2358 ctxt->src.val = ctxt->dst.orig_val;
2359 /* Create write-cycle to dest by writing the same value */
2360 ctxt->dst.val = ctxt->dst.orig_val;
2361 }
2362 return X86EMUL_CONTINUE;
2363}
2364
2365static int em_lseg(struct x86_emulate_ctxt *ctxt)
2366{
2367 int seg = ctxt->src2.val;
2368 unsigned short sel;
2369 int rc;
2370
2371 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2372
2373 rc = load_segment_descriptor(ctxt, sel, seg);
2374 if (rc != X86EMUL_CONTINUE)
2375 return rc;
2376
2377 ctxt->dst.val = ctxt->src.val;
2378 return rc;
2379}
2380
2381static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2382{
David Brazdil0f672f62019-12-10 10:32:29 +00002383#ifdef CONFIG_X86_64
Olivier Deprez157378f2022-04-04 15:47:50 +02002384 return ctxt->ops->guest_has_long_mode(ctxt);
David Brazdil0f672f62019-12-10 10:32:29 +00002385#else
2386 return false;
2387#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002388}
2389
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002390static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2391{
2392 desc->g = (flags >> 23) & 1;
2393 desc->d = (flags >> 22) & 1;
2394 desc->l = (flags >> 21) & 1;
2395 desc->avl = (flags >> 20) & 1;
2396 desc->p = (flags >> 15) & 1;
2397 desc->dpl = (flags >> 13) & 3;
2398 desc->s = (flags >> 12) & 1;
2399 desc->type = (flags >> 8) & 15;
2400}
2401
David Brazdil0f672f62019-12-10 10:32:29 +00002402static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
2403 int n)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002404{
2405 struct desc_struct desc;
2406 int offset;
2407 u16 selector;
2408
David Brazdil0f672f62019-12-10 10:32:29 +00002409 selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002410
2411 if (n < 3)
2412 offset = 0x7f84 + n * 12;
2413 else
2414 offset = 0x7f2c + (n - 3) * 12;
2415
David Brazdil0f672f62019-12-10 10:32:29 +00002416 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2417 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2418 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002419 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2420 return X86EMUL_CONTINUE;
2421}
2422
David Brazdil0f672f62019-12-10 10:32:29 +00002423#ifdef CONFIG_X86_64
2424static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
2425 int n)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002426{
2427 struct desc_struct desc;
2428 int offset;
2429 u16 selector;
2430 u32 base3;
2431
2432 offset = 0x7e00 + n * 16;
2433
David Brazdil0f672f62019-12-10 10:32:29 +00002434 selector = GET_SMSTATE(u16, smstate, offset);
2435 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
2436 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2437 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2438 base3 = GET_SMSTATE(u32, smstate, offset + 12);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002439
2440 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2441 return X86EMUL_CONTINUE;
2442}
David Brazdil0f672f62019-12-10 10:32:29 +00002443#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002444
2445static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2446 u64 cr0, u64 cr3, u64 cr4)
2447{
2448 int bad;
2449 u64 pcid;
2450
2451 /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
2452 pcid = 0;
2453 if (cr4 & X86_CR4_PCIDE) {
2454 pcid = cr3 & 0xfff;
2455 cr3 &= ~0xfff;
2456 }
2457
2458 bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2459 if (bad)
2460 return X86EMUL_UNHANDLEABLE;
2461
2462 /*
2463 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2464 * Then enable protected mode. However, PCID cannot be enabled
2465 * if EFER.LMA=0, so set it separately.
2466 */
2467 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2468 if (bad)
2469 return X86EMUL_UNHANDLEABLE;
2470
2471 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2472 if (bad)
2473 return X86EMUL_UNHANDLEABLE;
2474
2475 if (cr4 & X86_CR4_PCIDE) {
2476 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2477 if (bad)
2478 return X86EMUL_UNHANDLEABLE;
2479 if (pcid) {
2480 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2481 if (bad)
2482 return X86EMUL_UNHANDLEABLE;
2483 }
2484
2485 }
2486
2487 return X86EMUL_CONTINUE;
2488}
2489
David Brazdil0f672f62019-12-10 10:32:29 +00002490static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
2491 const char *smstate)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002492{
2493 struct desc_struct desc;
2494 struct desc_ptr dt;
2495 u16 selector;
2496 u32 val, cr0, cr3, cr4;
2497 int i;
2498
David Brazdil0f672f62019-12-10 10:32:29 +00002499 cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
2500 cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
2501 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
2502 ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002503
2504 for (i = 0; i < 8; i++)
David Brazdil0f672f62019-12-10 10:32:29 +00002505 *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002506
David Brazdil0f672f62019-12-10 10:32:29 +00002507 val = GET_SMSTATE(u32, smstate, 0x7fcc);
Olivier Deprez157378f2022-04-04 15:47:50 +02002508
2509 if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
2510 return X86EMUL_UNHANDLEABLE;
2511
David Brazdil0f672f62019-12-10 10:32:29 +00002512 val = GET_SMSTATE(u32, smstate, 0x7fc8);
Olivier Deprez157378f2022-04-04 15:47:50 +02002513
2514 if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
2515 return X86EMUL_UNHANDLEABLE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002516
David Brazdil0f672f62019-12-10 10:32:29 +00002517 selector = GET_SMSTATE(u32, smstate, 0x7fc4);
2518 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
2519 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
2520 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002521 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2522
David Brazdil0f672f62019-12-10 10:32:29 +00002523 selector = GET_SMSTATE(u32, smstate, 0x7fc0);
2524 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
2525 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
2526 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002527 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2528
David Brazdil0f672f62019-12-10 10:32:29 +00002529 dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
2530 dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002531 ctxt->ops->set_gdt(ctxt, &dt);
2532
David Brazdil0f672f62019-12-10 10:32:29 +00002533 dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
2534 dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002535 ctxt->ops->set_idt(ctxt, &dt);
2536
2537 for (i = 0; i < 6; i++) {
David Brazdil0f672f62019-12-10 10:32:29 +00002538 int r = rsm_load_seg_32(ctxt, smstate, i);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002539 if (r != X86EMUL_CONTINUE)
2540 return r;
2541 }
2542
David Brazdil0f672f62019-12-10 10:32:29 +00002543 cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002544
David Brazdil0f672f62019-12-10 10:32:29 +00002545 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002546
2547 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2548}
2549
David Brazdil0f672f62019-12-10 10:32:29 +00002550#ifdef CONFIG_X86_64
2551static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
2552 const char *smstate)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002553{
2554 struct desc_struct desc;
2555 struct desc_ptr dt;
2556 u64 val, cr0, cr3, cr4;
2557 u32 base3;
2558 u16 selector;
2559 int i, r;
2560
2561 for (i = 0; i < 16; i++)
David Brazdil0f672f62019-12-10 10:32:29 +00002562 *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002563
David Brazdil0f672f62019-12-10 10:32:29 +00002564 ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
2565 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002566
Olivier Deprez157378f2022-04-04 15:47:50 +02002567 val = GET_SMSTATE(u64, smstate, 0x7f68);
2568
2569 if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
2570 return X86EMUL_UNHANDLEABLE;
2571
2572 val = GET_SMSTATE(u64, smstate, 0x7f60);
2573
2574 if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
2575 return X86EMUL_UNHANDLEABLE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002576
David Brazdil0f672f62019-12-10 10:32:29 +00002577 cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
2578 cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
2579 cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
2580 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
2581 val = GET_SMSTATE(u64, smstate, 0x7ed0);
Olivier Deprez157378f2022-04-04 15:47:50 +02002582
2583 if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA))
2584 return X86EMUL_UNHANDLEABLE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002585
David Brazdil0f672f62019-12-10 10:32:29 +00002586 selector = GET_SMSTATE(u32, smstate, 0x7e90);
2587 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
2588 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
2589 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
2590 base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002591 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2592
David Brazdil0f672f62019-12-10 10:32:29 +00002593 dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
2594 dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002595 ctxt->ops->set_idt(ctxt, &dt);
2596
David Brazdil0f672f62019-12-10 10:32:29 +00002597 selector = GET_SMSTATE(u32, smstate, 0x7e70);
2598 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
2599 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
2600 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
2601 base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002602 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2603
David Brazdil0f672f62019-12-10 10:32:29 +00002604 dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
2605 dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002606 ctxt->ops->set_gdt(ctxt, &dt);
2607
2608 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2609 if (r != X86EMUL_CONTINUE)
2610 return r;
2611
2612 for (i = 0; i < 6; i++) {
David Brazdil0f672f62019-12-10 10:32:29 +00002613 r = rsm_load_seg_64(ctxt, smstate, i);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002614 if (r != X86EMUL_CONTINUE)
2615 return r;
2616 }
2617
2618 return X86EMUL_CONTINUE;
2619}
David Brazdil0f672f62019-12-10 10:32:29 +00002620#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002621
2622static int em_rsm(struct x86_emulate_ctxt *ctxt)
2623{
2624 unsigned long cr0, cr4, efer;
David Brazdil0f672f62019-12-10 10:32:29 +00002625 char buf[512];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002626 u64 smbase;
2627 int ret;
2628
2629 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2630 return emulate_ud(ctxt);
2631
David Brazdil0f672f62019-12-10 10:32:29 +00002632 smbase = ctxt->ops->get_smbase(ctxt);
2633
2634 ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
2635 if (ret != X86EMUL_CONTINUE)
2636 return X86EMUL_UNHANDLEABLE;
2637
2638 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2639 ctxt->ops->set_nmi_mask(ctxt, false);
2640
2641 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2642 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2643
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002644 /*
2645 * Get back to real mode, to prepare a safe state in which to load
2646 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
2647 * supports long mode.
2648 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002649 if (emulator_has_longmode(ctxt)) {
2650 struct desc_struct cs_desc;
2651
2652 /* Zero CR4.PCIDE before CR0.PG. */
David Brazdil0f672f62019-12-10 10:32:29 +00002653 cr4 = ctxt->ops->get_cr(ctxt, 4);
2654 if (cr4 & X86_CR4_PCIDE)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002655 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002656
2657 /* A 32-bit code segment is required to clear EFER.LMA. */
2658 memset(&cs_desc, 0, sizeof(cs_desc));
2659 cs_desc.type = 0xb;
2660 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2661 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2662 }
2663
2664 /* For the 64-bit case, this will clear EFER.LMA. */
2665 cr0 = ctxt->ops->get_cr(ctxt, 0);
2666 if (cr0 & X86_CR0_PE)
2667 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2668
David Brazdil0f672f62019-12-10 10:32:29 +00002669 if (emulator_has_longmode(ctxt)) {
2670 /* Clear CR4.PAE before clearing EFER.LME. */
2671 cr4 = ctxt->ops->get_cr(ctxt, 4);
2672 if (cr4 & X86_CR4_PAE)
2673 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002674
David Brazdil0f672f62019-12-10 10:32:29 +00002675 /* And finally go back to 32-bit mode. */
2676 efer = 0;
2677 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2678 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002679
2680 /*
2681 * Give pre_leave_smm() a chance to make ISA-specific changes to the
2682 * vCPU state (e.g. enter guest mode) before loading state from the SMM
2683 * state-save area.
2684 */
David Brazdil0f672f62019-12-10 10:32:29 +00002685 if (ctxt->ops->pre_leave_smm(ctxt, buf))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002686 return X86EMUL_UNHANDLEABLE;
2687
David Brazdil0f672f62019-12-10 10:32:29 +00002688#ifdef CONFIG_X86_64
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002689 if (emulator_has_longmode(ctxt))
David Brazdil0f672f62019-12-10 10:32:29 +00002690 ret = rsm_load_state_64(ctxt, buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002691 else
David Brazdil0f672f62019-12-10 10:32:29 +00002692#endif
2693 ret = rsm_load_state_32(ctxt, buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002694
2695 if (ret != X86EMUL_CONTINUE) {
2696 /* FIXME: should triple fault */
2697 return X86EMUL_UNHANDLEABLE;
2698 }
2699
David Brazdil0f672f62019-12-10 10:32:29 +00002700 ctxt->ops->post_leave_smm(ctxt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002701
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002702 return X86EMUL_CONTINUE;
2703}
2704
2705static void
2706setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2707 struct desc_struct *cs, struct desc_struct *ss)
2708{
2709 cs->l = 0; /* will be adjusted later */
2710 set_desc_base(cs, 0); /* flat segment */
2711 cs->g = 1; /* 4kb granularity */
2712 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2713 cs->type = 0x0b; /* Read, Execute, Accessed */
2714 cs->s = 1;
2715 cs->dpl = 0; /* will be adjusted later */
2716 cs->p = 1;
2717 cs->d = 1;
2718 cs->avl = 0;
2719
2720 set_desc_base(ss, 0); /* flat segment */
2721 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2722 ss->g = 1; /* 4kb granularity */
2723 ss->s = 1;
2724 ss->type = 0x03; /* Read/Write, Accessed */
2725 ss->d = 1; /* 32bit stack segment */
2726 ss->dpl = 0;
2727 ss->p = 1;
2728 ss->l = 0;
2729 ss->avl = 0;
2730}
2731
2732static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2733{
2734 u32 eax, ebx, ecx, edx;
2735
2736 eax = ecx = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +02002737 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2738 return is_guest_vendor_intel(ebx, ecx, edx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002739}
2740
2741static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2742{
2743 const struct x86_emulate_ops *ops = ctxt->ops;
2744 u32 eax, ebx, ecx, edx;
2745
2746 /*
2747 * syscall should always be enabled in longmode - so only become
2748 * vendor specific (cpuid) if other modes are active...
2749 */
2750 if (ctxt->mode == X86EMUL_MODE_PROT64)
2751 return true;
2752
2753 eax = 0x00000000;
2754 ecx = 0x00000000;
Olivier Deprez157378f2022-04-04 15:47:50 +02002755 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002756 /*
Olivier Deprez157378f2022-04-04 15:47:50 +02002757 * remark: Intel CPUs only support "syscall" in 64bit longmode. Also a
2758 * 64bit guest with a 32bit compat-app running will #UD !! While this
2759 * behaviour can be fixed (by emulating) into AMD response - CPUs of
2760 * AMD can't behave like Intel.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002761 */
Olivier Deprez157378f2022-04-04 15:47:50 +02002762 if (is_guest_vendor_intel(ebx, ecx, edx))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002763 return false;
2764
Olivier Deprez157378f2022-04-04 15:47:50 +02002765 if (is_guest_vendor_amd(ebx, ecx, edx) ||
2766 is_guest_vendor_hygon(ebx, ecx, edx))
David Brazdil0f672f62019-12-10 10:32:29 +00002767 return true;
2768
2769 /*
2770 * default: (not Intel, not AMD, not Hygon), apply Intel's
2771 * stricter rules...
2772 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002773 return false;
2774}
2775
2776static int em_syscall(struct x86_emulate_ctxt *ctxt)
2777{
2778 const struct x86_emulate_ops *ops = ctxt->ops;
2779 struct desc_struct cs, ss;
2780 u64 msr_data;
2781 u16 cs_sel, ss_sel;
2782 u64 efer = 0;
2783
2784 /* syscall is not available in real mode */
2785 if (ctxt->mode == X86EMUL_MODE_REAL ||
2786 ctxt->mode == X86EMUL_MODE_VM86)
2787 return emulate_ud(ctxt);
2788
2789 if (!(em_syscall_is_enabled(ctxt)))
2790 return emulate_ud(ctxt);
2791
2792 ops->get_msr(ctxt, MSR_EFER, &efer);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002793 if (!(efer & EFER_SCE))
2794 return emulate_ud(ctxt);
2795
Olivier Deprez157378f2022-04-04 15:47:50 +02002796 setup_syscalls_segments(ctxt, &cs, &ss);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002797 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2798 msr_data >>= 32;
2799 cs_sel = (u16)(msr_data & 0xfffc);
2800 ss_sel = (u16)(msr_data + 8);
2801
2802 if (efer & EFER_LMA) {
2803 cs.d = 0;
2804 cs.l = 1;
2805 }
2806 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2807 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2808
2809 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2810 if (efer & EFER_LMA) {
2811#ifdef CONFIG_X86_64
2812 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2813
2814 ops->get_msr(ctxt,
2815 ctxt->mode == X86EMUL_MODE_PROT64 ?
2816 MSR_LSTAR : MSR_CSTAR, &msr_data);
2817 ctxt->_eip = msr_data;
2818
2819 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2820 ctxt->eflags &= ~msr_data;
2821 ctxt->eflags |= X86_EFLAGS_FIXED;
2822#endif
2823 } else {
2824 /* legacy mode */
2825 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2826 ctxt->_eip = (u32)msr_data;
2827
2828 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2829 }
2830
2831 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2832 return X86EMUL_CONTINUE;
2833}
2834
2835static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2836{
2837 const struct x86_emulate_ops *ops = ctxt->ops;
2838 struct desc_struct cs, ss;
2839 u64 msr_data;
2840 u16 cs_sel, ss_sel;
2841 u64 efer = 0;
2842
2843 ops->get_msr(ctxt, MSR_EFER, &efer);
2844 /* inject #GP if in real mode */
2845 if (ctxt->mode == X86EMUL_MODE_REAL)
2846 return emulate_gp(ctxt, 0);
2847
2848 /*
2849 * Not recognized on AMD in compat mode (but is recognized in legacy
2850 * mode).
2851 */
2852 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2853 && !vendor_intel(ctxt))
2854 return emulate_ud(ctxt);
2855
2856 /* sysenter/sysexit have not been tested in 64bit mode. */
2857 if (ctxt->mode == X86EMUL_MODE_PROT64)
2858 return X86EMUL_UNHANDLEABLE;
2859
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002860 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2861 if ((msr_data & 0xfffc) == 0x0)
2862 return emulate_gp(ctxt, 0);
2863
Olivier Deprez157378f2022-04-04 15:47:50 +02002864 setup_syscalls_segments(ctxt, &cs, &ss);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002865 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2866 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2867 ss_sel = cs_sel + 8;
2868 if (efer & EFER_LMA) {
2869 cs.d = 0;
2870 cs.l = 1;
2871 }
2872
2873 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2874 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2875
2876 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2877 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2878
2879 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2880 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2881 (u32)msr_data;
Olivier Deprez0e641232021-09-23 10:07:05 +02002882 if (efer & EFER_LMA)
2883 ctxt->mode = X86EMUL_MODE_PROT64;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002884
2885 return X86EMUL_CONTINUE;
2886}
2887
2888static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2889{
2890 const struct x86_emulate_ops *ops = ctxt->ops;
2891 struct desc_struct cs, ss;
2892 u64 msr_data, rcx, rdx;
2893 int usermode;
2894 u16 cs_sel = 0, ss_sel = 0;
2895
2896 /* inject #GP if in real mode or Virtual 8086 mode */
2897 if (ctxt->mode == X86EMUL_MODE_REAL ||
2898 ctxt->mode == X86EMUL_MODE_VM86)
2899 return emulate_gp(ctxt, 0);
2900
2901 setup_syscalls_segments(ctxt, &cs, &ss);
2902
2903 if ((ctxt->rex_prefix & 0x8) != 0x0)
2904 usermode = X86EMUL_MODE_PROT64;
2905 else
2906 usermode = X86EMUL_MODE_PROT32;
2907
2908 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2909 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2910
2911 cs.dpl = 3;
2912 ss.dpl = 3;
2913 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2914 switch (usermode) {
2915 case X86EMUL_MODE_PROT32:
2916 cs_sel = (u16)(msr_data + 16);
2917 if ((msr_data & 0xfffc) == 0x0)
2918 return emulate_gp(ctxt, 0);
2919 ss_sel = (u16)(msr_data + 24);
2920 rcx = (u32)rcx;
2921 rdx = (u32)rdx;
2922 break;
2923 case X86EMUL_MODE_PROT64:
2924 cs_sel = (u16)(msr_data + 32);
2925 if (msr_data == 0x0)
2926 return emulate_gp(ctxt, 0);
2927 ss_sel = cs_sel + 8;
2928 cs.d = 0;
2929 cs.l = 1;
2930 if (emul_is_noncanonical_address(rcx, ctxt) ||
2931 emul_is_noncanonical_address(rdx, ctxt))
2932 return emulate_gp(ctxt, 0);
2933 break;
2934 }
2935 cs_sel |= SEGMENT_RPL_MASK;
2936 ss_sel |= SEGMENT_RPL_MASK;
2937
2938 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2939 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2940
2941 ctxt->_eip = rdx;
2942 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2943
2944 return X86EMUL_CONTINUE;
2945}
2946
2947static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2948{
2949 int iopl;
2950 if (ctxt->mode == X86EMUL_MODE_REAL)
2951 return false;
2952 if (ctxt->mode == X86EMUL_MODE_VM86)
2953 return true;
2954 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2955 return ctxt->ops->cpl(ctxt) > iopl;
2956}
2957
2958#define VMWARE_PORT_VMPORT (0x5658)
2959#define VMWARE_PORT_VMRPC (0x5659)
2960
2961static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2962 u16 port, u16 len)
2963{
2964 const struct x86_emulate_ops *ops = ctxt->ops;
2965 struct desc_struct tr_seg;
2966 u32 base3;
2967 int r;
2968 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2969 unsigned mask = (1 << len) - 1;
2970 unsigned long base;
2971
2972 /*
2973 * VMware allows access to these ports even if denied
2974 * by TSS I/O permission bitmap. Mimic behavior.
2975 */
2976 if (enable_vmware_backdoor &&
2977 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2978 return true;
2979
2980 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2981 if (!tr_seg.p)
2982 return false;
2983 if (desc_limit_scaled(&tr_seg) < 103)
2984 return false;
2985 base = get_desc_base(&tr_seg);
2986#ifdef CONFIG_X86_64
2987 base |= ((u64)base3) << 32;
2988#endif
2989 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2990 if (r != X86EMUL_CONTINUE)
2991 return false;
2992 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2993 return false;
2994 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2995 if (r != X86EMUL_CONTINUE)
2996 return false;
2997 if ((perm >> bit_idx) & mask)
2998 return false;
2999 return true;
3000}
3001
3002static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
3003 u16 port, u16 len)
3004{
3005 if (ctxt->perm_ok)
3006 return true;
3007
3008 if (emulator_bad_iopl(ctxt))
3009 if (!emulator_io_port_access_allowed(ctxt, port, len))
3010 return false;
3011
3012 ctxt->perm_ok = true;
3013
3014 return true;
3015}
3016
3017static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
3018{
3019 /*
3020 * Intel CPUs mask the counter and pointers in quite strange
3021 * manner when ECX is zero due to REP-string optimizations.
3022 */
3023#ifdef CONFIG_X86_64
3024 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
3025 return;
3026
3027 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
3028
3029 switch (ctxt->b) {
3030 case 0xa4: /* movsb */
3031 case 0xa5: /* movsd/w */
3032 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
Olivier Deprez157378f2022-04-04 15:47:50 +02003033 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003034 case 0xaa: /* stosb */
3035 case 0xab: /* stosd/w */
3036 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
3037 }
3038#endif
3039}
3040
3041static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
3042 struct tss_segment_16 *tss)
3043{
3044 tss->ip = ctxt->_eip;
3045 tss->flag = ctxt->eflags;
3046 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
3047 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
3048 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
3049 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
3050 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
3051 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
3052 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
3053 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
3054
3055 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3056 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3057 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3058 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3059 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
3060}
3061
3062static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
3063 struct tss_segment_16 *tss)
3064{
3065 int ret;
3066 u8 cpl;
3067
3068 ctxt->_eip = tss->ip;
3069 ctxt->eflags = tss->flag | 2;
3070 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
3071 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
3072 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
3073 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
3074 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
3075 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
3076 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
3077 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
3078
3079 /*
3080 * SDM says that segment selectors are loaded before segment
3081 * descriptors
3082 */
3083 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
3084 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3085 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3086 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3087 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3088
3089 cpl = tss->cs & 3;
3090
3091 /*
3092 * Now load segment descriptors. If fault happens at this stage
3093 * it is handled in a context of new task
3094 */
3095 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3096 X86_TRANSFER_TASK_SWITCH, NULL);
3097 if (ret != X86EMUL_CONTINUE)
3098 return ret;
3099 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3100 X86_TRANSFER_TASK_SWITCH, NULL);
3101 if (ret != X86EMUL_CONTINUE)
3102 return ret;
3103 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3104 X86_TRANSFER_TASK_SWITCH, NULL);
3105 if (ret != X86EMUL_CONTINUE)
3106 return ret;
3107 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3108 X86_TRANSFER_TASK_SWITCH, NULL);
3109 if (ret != X86EMUL_CONTINUE)
3110 return ret;
3111 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3112 X86_TRANSFER_TASK_SWITCH, NULL);
3113 if (ret != X86EMUL_CONTINUE)
3114 return ret;
3115
3116 return X86EMUL_CONTINUE;
3117}
3118
3119static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3120 u16 tss_selector, u16 old_tss_sel,
3121 ulong old_tss_base, struct desc_struct *new_desc)
3122{
3123 struct tss_segment_16 tss_seg;
3124 int ret;
3125 u32 new_tss_base = get_desc_base(new_desc);
3126
David Brazdil0f672f62019-12-10 10:32:29 +00003127 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003128 if (ret != X86EMUL_CONTINUE)
3129 return ret;
3130
3131 save_state_to_tss16(ctxt, &tss_seg);
3132
David Brazdil0f672f62019-12-10 10:32:29 +00003133 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003134 if (ret != X86EMUL_CONTINUE)
3135 return ret;
3136
David Brazdil0f672f62019-12-10 10:32:29 +00003137 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003138 if (ret != X86EMUL_CONTINUE)
3139 return ret;
3140
3141 if (old_tss_sel != 0xffff) {
3142 tss_seg.prev_task_link = old_tss_sel;
3143
3144 ret = linear_write_system(ctxt, new_tss_base,
3145 &tss_seg.prev_task_link,
David Brazdil0f672f62019-12-10 10:32:29 +00003146 sizeof(tss_seg.prev_task_link));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003147 if (ret != X86EMUL_CONTINUE)
3148 return ret;
3149 }
3150
3151 return load_state_from_tss16(ctxt, &tss_seg);
3152}
3153
3154static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3155 struct tss_segment_32 *tss)
3156{
3157 /* CR3 and ldt selector are not saved intentionally */
3158 tss->eip = ctxt->_eip;
3159 tss->eflags = ctxt->eflags;
3160 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3161 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3162 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3163 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3164 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3165 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3166 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3167 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3168
3169 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3170 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3171 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3172 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3173 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3174 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3175}
3176
3177static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3178 struct tss_segment_32 *tss)
3179{
3180 int ret;
3181 u8 cpl;
3182
3183 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3184 return emulate_gp(ctxt, 0);
3185 ctxt->_eip = tss->eip;
3186 ctxt->eflags = tss->eflags | 2;
3187
3188 /* General purpose registers */
3189 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3190 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3191 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3192 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3193 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3194 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3195 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3196 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3197
3198 /*
3199 * SDM says that segment selectors are loaded before segment
3200 * descriptors. This is important because CPL checks will
3201 * use CS.RPL.
3202 */
3203 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3204 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3205 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3206 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3207 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3208 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3209 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3210
3211 /*
3212 * If we're switching between Protected Mode and VM86, we need to make
3213 * sure to update the mode before loading the segment descriptors so
3214 * that the selectors are interpreted correctly.
3215 */
3216 if (ctxt->eflags & X86_EFLAGS_VM) {
3217 ctxt->mode = X86EMUL_MODE_VM86;
3218 cpl = 3;
3219 } else {
3220 ctxt->mode = X86EMUL_MODE_PROT32;
3221 cpl = tss->cs & 3;
3222 }
3223
3224 /*
3225 * Now load segment descriptors. If fault happenes at this stage
3226 * it is handled in a context of new task
3227 */
3228 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3229 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3230 if (ret != X86EMUL_CONTINUE)
3231 return ret;
3232 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3233 X86_TRANSFER_TASK_SWITCH, NULL);
3234 if (ret != X86EMUL_CONTINUE)
3235 return ret;
3236 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3237 X86_TRANSFER_TASK_SWITCH, NULL);
3238 if (ret != X86EMUL_CONTINUE)
3239 return ret;
3240 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3241 X86_TRANSFER_TASK_SWITCH, NULL);
3242 if (ret != X86EMUL_CONTINUE)
3243 return ret;
3244 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3245 X86_TRANSFER_TASK_SWITCH, NULL);
3246 if (ret != X86EMUL_CONTINUE)
3247 return ret;
3248 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3249 X86_TRANSFER_TASK_SWITCH, NULL);
3250 if (ret != X86EMUL_CONTINUE)
3251 return ret;
3252 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3253 X86_TRANSFER_TASK_SWITCH, NULL);
3254
3255 return ret;
3256}
3257
3258static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3259 u16 tss_selector, u16 old_tss_sel,
3260 ulong old_tss_base, struct desc_struct *new_desc)
3261{
3262 struct tss_segment_32 tss_seg;
3263 int ret;
3264 u32 new_tss_base = get_desc_base(new_desc);
3265 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3266 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3267
David Brazdil0f672f62019-12-10 10:32:29 +00003268 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003269 if (ret != X86EMUL_CONTINUE)
3270 return ret;
3271
3272 save_state_to_tss32(ctxt, &tss_seg);
3273
3274 /* Only GP registers and segment selectors are saved */
3275 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3276 ldt_sel_offset - eip_offset);
3277 if (ret != X86EMUL_CONTINUE)
3278 return ret;
3279
David Brazdil0f672f62019-12-10 10:32:29 +00003280 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003281 if (ret != X86EMUL_CONTINUE)
3282 return ret;
3283
3284 if (old_tss_sel != 0xffff) {
3285 tss_seg.prev_task_link = old_tss_sel;
3286
3287 ret = linear_write_system(ctxt, new_tss_base,
3288 &tss_seg.prev_task_link,
David Brazdil0f672f62019-12-10 10:32:29 +00003289 sizeof(tss_seg.prev_task_link));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003290 if (ret != X86EMUL_CONTINUE)
3291 return ret;
3292 }
3293
3294 return load_state_from_tss32(ctxt, &tss_seg);
3295}
3296
3297static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3298 u16 tss_selector, int idt_index, int reason,
3299 bool has_error_code, u32 error_code)
3300{
3301 const struct x86_emulate_ops *ops = ctxt->ops;
3302 struct desc_struct curr_tss_desc, next_tss_desc;
3303 int ret;
3304 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3305 ulong old_tss_base =
3306 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3307 u32 desc_limit;
3308 ulong desc_addr, dr7;
3309
3310 /* FIXME: old_tss_base == ~0 ? */
3311
3312 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3313 if (ret != X86EMUL_CONTINUE)
3314 return ret;
3315 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3316 if (ret != X86EMUL_CONTINUE)
3317 return ret;
3318
3319 /* FIXME: check that next_tss_desc is tss */
3320
3321 /*
3322 * Check privileges. The three cases are task switch caused by...
3323 *
3324 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3325 * 2. Exception/IRQ/iret: No check is performed
3326 * 3. jmp/call to TSS/task-gate: No check is performed since the
3327 * hardware checks it before exiting.
3328 */
3329 if (reason == TASK_SWITCH_GATE) {
3330 if (idt_index != -1) {
3331 /* Software interrupts */
3332 struct desc_struct task_gate_desc;
3333 int dpl;
3334
3335 ret = read_interrupt_descriptor(ctxt, idt_index,
3336 &task_gate_desc);
3337 if (ret != X86EMUL_CONTINUE)
3338 return ret;
3339
3340 dpl = task_gate_desc.dpl;
3341 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3342 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3343 }
3344 }
3345
3346 desc_limit = desc_limit_scaled(&next_tss_desc);
3347 if (!next_tss_desc.p ||
3348 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3349 desc_limit < 0x2b)) {
3350 return emulate_ts(ctxt, tss_selector & 0xfffc);
3351 }
3352
3353 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3354 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3355 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3356 }
3357
3358 if (reason == TASK_SWITCH_IRET)
3359 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3360
3361 /* set back link to prev task only if NT bit is set in eflags
3362 note that old_tss_sel is not used after this point */
3363 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3364 old_tss_sel = 0xffff;
3365
3366 if (next_tss_desc.type & 8)
3367 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3368 old_tss_base, &next_tss_desc);
3369 else
3370 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3371 old_tss_base, &next_tss_desc);
3372 if (ret != X86EMUL_CONTINUE)
3373 return ret;
3374
3375 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3376 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3377
3378 if (reason != TASK_SWITCH_IRET) {
3379 next_tss_desc.type |= (1 << 1); /* set busy flag */
3380 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3381 }
3382
3383 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3384 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3385
3386 if (has_error_code) {
3387 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3388 ctxt->lock_prefix = 0;
3389 ctxt->src.val = (unsigned long) error_code;
3390 ret = em_push(ctxt);
3391 }
3392
3393 ops->get_dr(ctxt, 7, &dr7);
3394 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3395
3396 return ret;
3397}
3398
3399int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3400 u16 tss_selector, int idt_index, int reason,
3401 bool has_error_code, u32 error_code)
3402{
3403 int rc;
3404
3405 invalidate_registers(ctxt);
3406 ctxt->_eip = ctxt->eip;
3407 ctxt->dst.type = OP_NONE;
3408
3409 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3410 has_error_code, error_code);
3411
3412 if (rc == X86EMUL_CONTINUE) {
3413 ctxt->eip = ctxt->_eip;
3414 writeback_registers(ctxt);
3415 }
3416
3417 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3418}
3419
3420static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3421 struct operand *op)
3422{
3423 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3424
3425 register_address_increment(ctxt, reg, df * op->bytes);
3426 op->addr.mem.ea = register_address(ctxt, reg);
3427}
3428
3429static int em_das(struct x86_emulate_ctxt *ctxt)
3430{
3431 u8 al, old_al;
3432 bool af, cf, old_cf;
3433
3434 cf = ctxt->eflags & X86_EFLAGS_CF;
3435 al = ctxt->dst.val;
3436
3437 old_al = al;
3438 old_cf = cf;
3439 cf = false;
3440 af = ctxt->eflags & X86_EFLAGS_AF;
3441 if ((al & 0x0f) > 9 || af) {
3442 al -= 6;
3443 cf = old_cf | (al >= 250);
3444 af = true;
3445 } else {
3446 af = false;
3447 }
3448 if (old_al > 0x99 || old_cf) {
3449 al -= 0x60;
3450 cf = true;
3451 }
3452
3453 ctxt->dst.val = al;
3454 /* Set PF, ZF, SF */
3455 ctxt->src.type = OP_IMM;
3456 ctxt->src.val = 0;
3457 ctxt->src.bytes = 1;
3458 fastop(ctxt, em_or);
3459 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3460 if (cf)
3461 ctxt->eflags |= X86_EFLAGS_CF;
3462 if (af)
3463 ctxt->eflags |= X86_EFLAGS_AF;
3464 return X86EMUL_CONTINUE;
3465}
3466
3467static int em_aam(struct x86_emulate_ctxt *ctxt)
3468{
3469 u8 al, ah;
3470
3471 if (ctxt->src.val == 0)
3472 return emulate_de(ctxt);
3473
3474 al = ctxt->dst.val & 0xff;
3475 ah = al / ctxt->src.val;
3476 al %= ctxt->src.val;
3477
3478 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3479
3480 /* Set PF, ZF, SF */
3481 ctxt->src.type = OP_IMM;
3482 ctxt->src.val = 0;
3483 ctxt->src.bytes = 1;
3484 fastop(ctxt, em_or);
3485
3486 return X86EMUL_CONTINUE;
3487}
3488
3489static int em_aad(struct x86_emulate_ctxt *ctxt)
3490{
3491 u8 al = ctxt->dst.val & 0xff;
3492 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3493
3494 al = (al + (ah * ctxt->src.val)) & 0xff;
3495
3496 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3497
3498 /* Set PF, ZF, SF */
3499 ctxt->src.type = OP_IMM;
3500 ctxt->src.val = 0;
3501 ctxt->src.bytes = 1;
3502 fastop(ctxt, em_or);
3503
3504 return X86EMUL_CONTINUE;
3505}
3506
3507static int em_call(struct x86_emulate_ctxt *ctxt)
3508{
3509 int rc;
3510 long rel = ctxt->src.val;
3511
3512 ctxt->src.val = (unsigned long)ctxt->_eip;
3513 rc = jmp_rel(ctxt, rel);
3514 if (rc != X86EMUL_CONTINUE)
3515 return rc;
3516 return em_push(ctxt);
3517}
3518
3519static int em_call_far(struct x86_emulate_ctxt *ctxt)
3520{
3521 u16 sel, old_cs;
3522 ulong old_eip;
3523 int rc;
3524 struct desc_struct old_desc, new_desc;
3525 const struct x86_emulate_ops *ops = ctxt->ops;
3526 int cpl = ctxt->ops->cpl(ctxt);
3527 enum x86emul_mode prev_mode = ctxt->mode;
3528
3529 old_eip = ctxt->_eip;
3530 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3531
3532 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3533 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3534 X86_TRANSFER_CALL_JMP, &new_desc);
3535 if (rc != X86EMUL_CONTINUE)
3536 return rc;
3537
3538 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3539 if (rc != X86EMUL_CONTINUE)
3540 goto fail;
3541
3542 ctxt->src.val = old_cs;
3543 rc = em_push(ctxt);
3544 if (rc != X86EMUL_CONTINUE)
3545 goto fail;
3546
3547 ctxt->src.val = old_eip;
3548 rc = em_push(ctxt);
3549 /* If we failed, we tainted the memory, but the very least we should
3550 restore cs */
3551 if (rc != X86EMUL_CONTINUE) {
3552 pr_warn_once("faulting far call emulation tainted memory\n");
3553 goto fail;
3554 }
3555 return rc;
3556fail:
3557 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3558 ctxt->mode = prev_mode;
3559 return rc;
3560
3561}
3562
3563static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3564{
3565 int rc;
3566 unsigned long eip;
3567
3568 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3569 if (rc != X86EMUL_CONTINUE)
3570 return rc;
3571 rc = assign_eip_near(ctxt, eip);
3572 if (rc != X86EMUL_CONTINUE)
3573 return rc;
3574 rsp_increment(ctxt, ctxt->src.val);
3575 return X86EMUL_CONTINUE;
3576}
3577
3578static int em_xchg(struct x86_emulate_ctxt *ctxt)
3579{
3580 /* Write back the register source. */
3581 ctxt->src.val = ctxt->dst.val;
3582 write_register_operand(&ctxt->src);
3583
3584 /* Write back the memory destination with implicit LOCK prefix. */
3585 ctxt->dst.val = ctxt->src.orig_val;
3586 ctxt->lock_prefix = 1;
3587 return X86EMUL_CONTINUE;
3588}
3589
3590static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3591{
3592 ctxt->dst.val = ctxt->src2.val;
3593 return fastop(ctxt, em_imul);
3594}
3595
3596static int em_cwd(struct x86_emulate_ctxt *ctxt)
3597{
3598 ctxt->dst.type = OP_REG;
3599 ctxt->dst.bytes = ctxt->src.bytes;
3600 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3601 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3602
3603 return X86EMUL_CONTINUE;
3604}
3605
3606static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3607{
3608 u64 tsc_aux = 0;
3609
3610 if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
Olivier Deprez0e641232021-09-23 10:07:05 +02003611 return emulate_ud(ctxt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003612 ctxt->dst.val = tsc_aux;
3613 return X86EMUL_CONTINUE;
3614}
3615
3616static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3617{
3618 u64 tsc = 0;
3619
3620 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3621 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3622 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3623 return X86EMUL_CONTINUE;
3624}
3625
3626static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3627{
3628 u64 pmc;
3629
3630 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3631 return emulate_gp(ctxt, 0);
3632 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3633 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3634 return X86EMUL_CONTINUE;
3635}
3636
3637static int em_mov(struct x86_emulate_ctxt *ctxt)
3638{
3639 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3640 return X86EMUL_CONTINUE;
3641}
3642
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003643static int em_movbe(struct x86_emulate_ctxt *ctxt)
3644{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003645 u16 tmp;
3646
Olivier Deprez157378f2022-04-04 15:47:50 +02003647 if (!ctxt->ops->guest_has_movbe(ctxt))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003648 return emulate_ud(ctxt);
3649
3650 switch (ctxt->op_bytes) {
3651 case 2:
3652 /*
3653 * From MOVBE definition: "...When the operand size is 16 bits,
3654 * the upper word of the destination register remains unchanged
3655 * ..."
3656 *
3657 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3658 * rules so we have to do the operation almost per hand.
3659 */
3660 tmp = (u16)ctxt->src.val;
3661 ctxt->dst.val &= ~0xffffUL;
3662 ctxt->dst.val |= (unsigned long)swab16(tmp);
3663 break;
3664 case 4:
3665 ctxt->dst.val = swab32((u32)ctxt->src.val);
3666 break;
3667 case 8:
3668 ctxt->dst.val = swab64(ctxt->src.val);
3669 break;
3670 default:
3671 BUG();
3672 }
3673 return X86EMUL_CONTINUE;
3674}
3675
3676static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3677{
3678 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3679 return emulate_gp(ctxt, 0);
3680
3681 /* Disable writeback. */
3682 ctxt->dst.type = OP_NONE;
3683 return X86EMUL_CONTINUE;
3684}
3685
3686static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3687{
3688 unsigned long val;
3689
3690 if (ctxt->mode == X86EMUL_MODE_PROT64)
3691 val = ctxt->src.val & ~0ULL;
3692 else
3693 val = ctxt->src.val & ~0U;
3694
3695 /* #UD condition is already handled. */
3696 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3697 return emulate_gp(ctxt, 0);
3698
3699 /* Disable writeback. */
3700 ctxt->dst.type = OP_NONE;
3701 return X86EMUL_CONTINUE;
3702}
3703
3704static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3705{
Olivier Deprez157378f2022-04-04 15:47:50 +02003706 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003707 u64 msr_data;
Olivier Deprez157378f2022-04-04 15:47:50 +02003708 int r;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003709
3710 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3711 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
Olivier Deprez157378f2022-04-04 15:47:50 +02003712 r = ctxt->ops->set_msr(ctxt, msr_index, msr_data);
3713
3714 if (r == X86EMUL_IO_NEEDED)
3715 return r;
3716
3717 if (r > 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003718 return emulate_gp(ctxt, 0);
3719
Olivier Deprez157378f2022-04-04 15:47:50 +02003720 return r < 0 ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003721}
3722
3723static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3724{
Olivier Deprez157378f2022-04-04 15:47:50 +02003725 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003726 u64 msr_data;
Olivier Deprez157378f2022-04-04 15:47:50 +02003727 int r;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003728
Olivier Deprez157378f2022-04-04 15:47:50 +02003729 r = ctxt->ops->get_msr(ctxt, msr_index, &msr_data);
3730
3731 if (r == X86EMUL_IO_NEEDED)
3732 return r;
3733
3734 if (r)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003735 return emulate_gp(ctxt, 0);
3736
3737 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3738 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3739 return X86EMUL_CONTINUE;
3740}
3741
3742static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3743{
3744 if (segment > VCPU_SREG_GS &&
3745 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3746 ctxt->ops->cpl(ctxt) > 0)
3747 return emulate_gp(ctxt, 0);
3748
3749 ctxt->dst.val = get_segment_selector(ctxt, segment);
3750 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3751 ctxt->dst.bytes = 2;
3752 return X86EMUL_CONTINUE;
3753}
3754
3755static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3756{
3757 if (ctxt->modrm_reg > VCPU_SREG_GS)
3758 return emulate_ud(ctxt);
3759
3760 return em_store_sreg(ctxt, ctxt->modrm_reg);
3761}
3762
3763static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3764{
3765 u16 sel = ctxt->src.val;
3766
3767 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3768 return emulate_ud(ctxt);
3769
3770 if (ctxt->modrm_reg == VCPU_SREG_SS)
3771 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3772
3773 /* Disable writeback. */
3774 ctxt->dst.type = OP_NONE;
3775 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3776}
3777
3778static int em_sldt(struct x86_emulate_ctxt *ctxt)
3779{
3780 return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3781}
3782
3783static int em_lldt(struct x86_emulate_ctxt *ctxt)
3784{
3785 u16 sel = ctxt->src.val;
3786
3787 /* Disable writeback. */
3788 ctxt->dst.type = OP_NONE;
3789 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3790}
3791
3792static int em_str(struct x86_emulate_ctxt *ctxt)
3793{
3794 return em_store_sreg(ctxt, VCPU_SREG_TR);
3795}
3796
3797static int em_ltr(struct x86_emulate_ctxt *ctxt)
3798{
3799 u16 sel = ctxt->src.val;
3800
3801 /* Disable writeback. */
3802 ctxt->dst.type = OP_NONE;
3803 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3804}
3805
3806static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3807{
3808 int rc;
3809 ulong linear;
3810
3811 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3812 if (rc == X86EMUL_CONTINUE)
3813 ctxt->ops->invlpg(ctxt, linear);
3814 /* Disable writeback. */
3815 ctxt->dst.type = OP_NONE;
3816 return X86EMUL_CONTINUE;
3817}
3818
3819static int em_clts(struct x86_emulate_ctxt *ctxt)
3820{
3821 ulong cr0;
3822
3823 cr0 = ctxt->ops->get_cr(ctxt, 0);
3824 cr0 &= ~X86_CR0_TS;
3825 ctxt->ops->set_cr(ctxt, 0, cr0);
3826 return X86EMUL_CONTINUE;
3827}
3828
3829static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3830{
3831 int rc = ctxt->ops->fix_hypercall(ctxt);
3832
3833 if (rc != X86EMUL_CONTINUE)
3834 return rc;
3835
3836 /* Let the processor re-execute the fixed hypercall */
3837 ctxt->_eip = ctxt->eip;
3838 /* Disable writeback. */
3839 ctxt->dst.type = OP_NONE;
3840 return X86EMUL_CONTINUE;
3841}
3842
3843static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3844 void (*get)(struct x86_emulate_ctxt *ctxt,
3845 struct desc_ptr *ptr))
3846{
3847 struct desc_ptr desc_ptr;
3848
3849 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3850 ctxt->ops->cpl(ctxt) > 0)
3851 return emulate_gp(ctxt, 0);
3852
3853 if (ctxt->mode == X86EMUL_MODE_PROT64)
3854 ctxt->op_bytes = 8;
3855 get(ctxt, &desc_ptr);
3856 if (ctxt->op_bytes == 2) {
3857 ctxt->op_bytes = 4;
3858 desc_ptr.address &= 0x00ffffff;
3859 }
3860 /* Disable writeback. */
3861 ctxt->dst.type = OP_NONE;
3862 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3863 &desc_ptr, 2 + ctxt->op_bytes);
3864}
3865
3866static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3867{
3868 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3869}
3870
3871static int em_sidt(struct x86_emulate_ctxt *ctxt)
3872{
3873 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3874}
3875
3876static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3877{
3878 struct desc_ptr desc_ptr;
3879 int rc;
3880
3881 if (ctxt->mode == X86EMUL_MODE_PROT64)
3882 ctxt->op_bytes = 8;
3883 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3884 &desc_ptr.size, &desc_ptr.address,
3885 ctxt->op_bytes);
3886 if (rc != X86EMUL_CONTINUE)
3887 return rc;
3888 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3889 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3890 return emulate_gp(ctxt, 0);
3891 if (lgdt)
3892 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3893 else
3894 ctxt->ops->set_idt(ctxt, &desc_ptr);
3895 /* Disable writeback. */
3896 ctxt->dst.type = OP_NONE;
3897 return X86EMUL_CONTINUE;
3898}
3899
3900static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3901{
3902 return em_lgdt_lidt(ctxt, true);
3903}
3904
3905static int em_lidt(struct x86_emulate_ctxt *ctxt)
3906{
3907 return em_lgdt_lidt(ctxt, false);
3908}
3909
3910static int em_smsw(struct x86_emulate_ctxt *ctxt)
3911{
3912 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3913 ctxt->ops->cpl(ctxt) > 0)
3914 return emulate_gp(ctxt, 0);
3915
3916 if (ctxt->dst.type == OP_MEM)
3917 ctxt->dst.bytes = 2;
3918 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3919 return X86EMUL_CONTINUE;
3920}
3921
3922static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3923{
3924 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3925 | (ctxt->src.val & 0x0f));
3926 ctxt->dst.type = OP_NONE;
3927 return X86EMUL_CONTINUE;
3928}
3929
3930static int em_loop(struct x86_emulate_ctxt *ctxt)
3931{
3932 int rc = X86EMUL_CONTINUE;
3933
3934 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3935 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3936 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3937 rc = jmp_rel(ctxt, ctxt->src.val);
3938
3939 return rc;
3940}
3941
3942static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3943{
3944 int rc = X86EMUL_CONTINUE;
3945
3946 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3947 rc = jmp_rel(ctxt, ctxt->src.val);
3948
3949 return rc;
3950}
3951
3952static int em_in(struct x86_emulate_ctxt *ctxt)
3953{
3954 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3955 &ctxt->dst.val))
3956 return X86EMUL_IO_NEEDED;
3957
3958 return X86EMUL_CONTINUE;
3959}
3960
3961static int em_out(struct x86_emulate_ctxt *ctxt)
3962{
3963 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3964 &ctxt->src.val, 1);
3965 /* Disable writeback. */
3966 ctxt->dst.type = OP_NONE;
3967 return X86EMUL_CONTINUE;
3968}
3969
3970static int em_cli(struct x86_emulate_ctxt *ctxt)
3971{
3972 if (emulator_bad_iopl(ctxt))
3973 return emulate_gp(ctxt, 0);
3974
3975 ctxt->eflags &= ~X86_EFLAGS_IF;
3976 return X86EMUL_CONTINUE;
3977}
3978
3979static int em_sti(struct x86_emulate_ctxt *ctxt)
3980{
3981 if (emulator_bad_iopl(ctxt))
3982 return emulate_gp(ctxt, 0);
3983
3984 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3985 ctxt->eflags |= X86_EFLAGS_IF;
3986 return X86EMUL_CONTINUE;
3987}
3988
3989static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3990{
3991 u32 eax, ebx, ecx, edx;
3992 u64 msr = 0;
3993
3994 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3995 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3996 ctxt->ops->cpl(ctxt)) {
3997 return emulate_gp(ctxt, 0);
3998 }
3999
4000 eax = reg_read(ctxt, VCPU_REGS_RAX);
4001 ecx = reg_read(ctxt, VCPU_REGS_RCX);
Olivier Deprez157378f2022-04-04 15:47:50 +02004002 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004003 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
4004 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
4005 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
4006 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
4007 return X86EMUL_CONTINUE;
4008}
4009
4010static int em_sahf(struct x86_emulate_ctxt *ctxt)
4011{
4012 u32 flags;
4013
4014 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
4015 X86_EFLAGS_SF;
4016 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
4017
4018 ctxt->eflags &= ~0xffUL;
4019 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
4020 return X86EMUL_CONTINUE;
4021}
4022
4023static int em_lahf(struct x86_emulate_ctxt *ctxt)
4024{
4025 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
4026 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
4027 return X86EMUL_CONTINUE;
4028}
4029
4030static int em_bswap(struct x86_emulate_ctxt *ctxt)
4031{
4032 switch (ctxt->op_bytes) {
4033#ifdef CONFIG_X86_64
4034 case 8:
4035 asm("bswap %0" : "+r"(ctxt->dst.val));
4036 break;
4037#endif
4038 default:
4039 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
4040 break;
4041 }
4042 return X86EMUL_CONTINUE;
4043}
4044
4045static int em_clflush(struct x86_emulate_ctxt *ctxt)
4046{
4047 /* emulating clflush regardless of cpuid */
4048 return X86EMUL_CONTINUE;
4049}
4050
Olivier Deprez0e641232021-09-23 10:07:05 +02004051static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
4052{
4053 /* emulating clflushopt regardless of cpuid */
4054 return X86EMUL_CONTINUE;
4055}
4056
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004057static int em_movsxd(struct x86_emulate_ctxt *ctxt)
4058{
4059 ctxt->dst.val = (s32) ctxt->src.val;
4060 return X86EMUL_CONTINUE;
4061}
4062
4063static int check_fxsr(struct x86_emulate_ctxt *ctxt)
4064{
Olivier Deprez157378f2022-04-04 15:47:50 +02004065 if (!ctxt->ops->guest_has_fxsr(ctxt))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004066 return emulate_ud(ctxt);
4067
4068 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
4069 return emulate_nm(ctxt);
4070
4071 /*
4072 * Don't emulate a case that should never be hit, instead of working
4073 * around a lack of fxsave64/fxrstor64 on old compilers.
4074 */
4075 if (ctxt->mode >= X86EMUL_MODE_PROT64)
4076 return X86EMUL_UNHANDLEABLE;
4077
4078 return X86EMUL_CONTINUE;
4079}
4080
4081/*
4082 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
4083 * and restore MXCSR.
4084 */
4085static size_t __fxstate_size(int nregs)
4086{
4087 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
4088}
4089
4090static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
4091{
4092 bool cr4_osfxsr;
4093 if (ctxt->mode == X86EMUL_MODE_PROT64)
4094 return __fxstate_size(16);
4095
4096 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
4097 return __fxstate_size(cr4_osfxsr ? 8 : 0);
4098}
4099
4100/*
4101 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
4102 * 1) 16 bit mode
4103 * 2) 32 bit mode
4104 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
4105 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
4106 * save and restore
4107 * 3) 64-bit mode with REX.W prefix
4108 * - like (2), but XMM 8-15 are being saved and restored
4109 * 4) 64-bit mode without REX.W prefix
4110 * - like (3), but FIP and FDP are 64 bit
4111 *
4112 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
4113 * desired result. (4) is not emulated.
4114 *
4115 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
4116 * and FPU DS) should match.
4117 */
4118static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4119{
4120 struct fxregs_state fx_state;
4121 int rc;
4122
4123 rc = check_fxsr(ctxt);
4124 if (rc != X86EMUL_CONTINUE)
4125 return rc;
4126
Olivier Deprez0e641232021-09-23 10:07:05 +02004127 emulator_get_fpu();
4128
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004129 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4130
Olivier Deprez0e641232021-09-23 10:07:05 +02004131 emulator_put_fpu();
4132
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004133 if (rc != X86EMUL_CONTINUE)
4134 return rc;
4135
4136 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4137 fxstate_size(ctxt));
4138}
4139
4140/*
4141 * FXRSTOR might restore XMM registers not provided by the guest. Fill
4142 * in the host registers (via FXSAVE) instead, so they won't be modified.
4143 * (preemption has to stay disabled until FXRSTOR).
4144 *
4145 * Use noinline to keep the stack for other functions called by callers small.
4146 */
4147static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4148 const size_t used_size)
4149{
4150 struct fxregs_state fx_tmp;
4151 int rc;
4152
4153 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4154 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4155 __fxstate_size(16) - used_size);
4156
4157 return rc;
4158}
4159
4160static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4161{
4162 struct fxregs_state fx_state;
4163 int rc;
4164 size_t size;
4165
4166 rc = check_fxsr(ctxt);
4167 if (rc != X86EMUL_CONTINUE)
4168 return rc;
4169
4170 size = fxstate_size(ctxt);
4171 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4172 if (rc != X86EMUL_CONTINUE)
4173 return rc;
4174
Olivier Deprez0e641232021-09-23 10:07:05 +02004175 emulator_get_fpu();
4176
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004177 if (size < __fxstate_size(16)) {
4178 rc = fxregs_fixup(&fx_state, size);
4179 if (rc != X86EMUL_CONTINUE)
4180 goto out;
4181 }
4182
4183 if (fx_state.mxcsr >> 16) {
4184 rc = emulate_gp(ctxt, 0);
4185 goto out;
4186 }
4187
4188 if (rc == X86EMUL_CONTINUE)
4189 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4190
4191out:
Olivier Deprez0e641232021-09-23 10:07:05 +02004192 emulator_put_fpu();
4193
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004194 return rc;
4195}
4196
David Brazdil0f672f62019-12-10 10:32:29 +00004197static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
4198{
4199 u32 eax, ecx, edx;
4200
4201 eax = reg_read(ctxt, VCPU_REGS_RAX);
4202 edx = reg_read(ctxt, VCPU_REGS_RDX);
4203 ecx = reg_read(ctxt, VCPU_REGS_RCX);
4204
4205 if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
4206 return emulate_gp(ctxt, 0);
4207
4208 return X86EMUL_CONTINUE;
4209}
4210
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004211static bool valid_cr(int nr)
4212{
4213 switch (nr) {
4214 case 0:
4215 case 2 ... 4:
4216 case 8:
4217 return true;
4218 default:
4219 return false;
4220 }
4221}
4222
Olivier Deprez157378f2022-04-04 15:47:50 +02004223static int check_cr_access(struct x86_emulate_ctxt *ctxt)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004224{
4225 if (!valid_cr(ctxt->modrm_reg))
4226 return emulate_ud(ctxt);
4227
4228 return X86EMUL_CONTINUE;
4229}
4230
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004231static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4232{
4233 unsigned long dr7;
4234
4235 ctxt->ops->get_dr(ctxt, 7, &dr7);
4236
4237 /* Check if DR7.Global_Enable is set */
4238 return dr7 & (1 << 13);
4239}
4240
4241static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4242{
4243 int dr = ctxt->modrm_reg;
4244 u64 cr4;
4245
4246 if (dr > 7)
4247 return emulate_ud(ctxt);
4248
4249 cr4 = ctxt->ops->get_cr(ctxt, 4);
4250 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4251 return emulate_ud(ctxt);
4252
4253 if (check_dr7_gd(ctxt)) {
4254 ulong dr6;
4255
4256 ctxt->ops->get_dr(ctxt, 6, &dr6);
David Brazdil0f672f62019-12-10 10:32:29 +00004257 dr6 &= ~DR_TRAP_BITS;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004258 dr6 |= DR6_BD | DR6_RTM;
4259 ctxt->ops->set_dr(ctxt, 6, dr6);
4260 return emulate_db(ctxt);
4261 }
4262
4263 return X86EMUL_CONTINUE;
4264}
4265
4266static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4267{
4268 u64 new_val = ctxt->src.val64;
4269 int dr = ctxt->modrm_reg;
4270
4271 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4272 return emulate_gp(ctxt, 0);
4273
4274 return check_dr_read(ctxt);
4275}
4276
4277static int check_svme(struct x86_emulate_ctxt *ctxt)
4278{
4279 u64 efer = 0;
4280
4281 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4282
4283 if (!(efer & EFER_SVME))
4284 return emulate_ud(ctxt);
4285
4286 return X86EMUL_CONTINUE;
4287}
4288
4289static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4290{
4291 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4292
4293 /* Valid physical address? */
4294 if (rax & 0xffff000000000000ULL)
4295 return emulate_gp(ctxt, 0);
4296
4297 return check_svme(ctxt);
4298}
4299
4300static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4301{
4302 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4303
4304 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4305 return emulate_ud(ctxt);
4306
4307 return X86EMUL_CONTINUE;
4308}
4309
4310static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4311{
4312 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4313 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4314
4315 /*
4316 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
4317 * in Ring3 when CR4.PCE=0.
4318 */
4319 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
4320 return X86EMUL_CONTINUE;
4321
4322 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4323 ctxt->ops->check_pmc(ctxt, rcx))
4324 return emulate_gp(ctxt, 0);
4325
4326 return X86EMUL_CONTINUE;
4327}
4328
4329static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4330{
4331 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4332 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4333 return emulate_gp(ctxt, 0);
4334
4335 return X86EMUL_CONTINUE;
4336}
4337
4338static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4339{
4340 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4341 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4342 return emulate_gp(ctxt, 0);
4343
4344 return X86EMUL_CONTINUE;
4345}
4346
4347#define D(_y) { .flags = (_y) }
4348#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4349#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4350 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4351#define N D(NotImpl)
4352#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4353#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4354#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4355#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4356#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4357#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4358#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4359#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4360#define II(_f, _e, _i) \
4361 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4362#define IIP(_f, _e, _i, _p) \
4363 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4364 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4365#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4366
4367#define D2bv(_f) D((_f) | ByteOp), D(_f)
4368#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4369#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4370#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4371#define I2bvIP(_f, _e, _i, _p) \
4372 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4373
4374#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4375 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4376 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4377
4378static const struct opcode group7_rm0[] = {
4379 N,
4380 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4381 N, N, N, N, N, N,
4382};
4383
4384static const struct opcode group7_rm1[] = {
4385 DI(SrcNone | Priv, monitor),
4386 DI(SrcNone | Priv, mwait),
4387 N, N, N, N, N, N,
4388};
4389
David Brazdil0f672f62019-12-10 10:32:29 +00004390static const struct opcode group7_rm2[] = {
4391 N,
4392 II(ImplicitOps | Priv, em_xsetbv, xsetbv),
4393 N, N, N, N, N, N,
4394};
4395
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004396static const struct opcode group7_rm3[] = {
4397 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4398 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4399 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4400 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4401 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4402 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4403 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4404 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4405};
4406
4407static const struct opcode group7_rm7[] = {
4408 N,
4409 DIP(SrcNone, rdtscp, check_rdtsc),
4410 N, N, N, N, N, N,
4411};
4412
4413static const struct opcode group1[] = {
4414 F(Lock, em_add),
4415 F(Lock | PageTable, em_or),
4416 F(Lock, em_adc),
4417 F(Lock, em_sbb),
4418 F(Lock | PageTable, em_and),
4419 F(Lock, em_sub),
4420 F(Lock, em_xor),
4421 F(NoWrite, em_cmp),
4422};
4423
4424static const struct opcode group1A[] = {
4425 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4426};
4427
4428static const struct opcode group2[] = {
4429 F(DstMem | ModRM, em_rol),
4430 F(DstMem | ModRM, em_ror),
4431 F(DstMem | ModRM, em_rcl),
4432 F(DstMem | ModRM, em_rcr),
4433 F(DstMem | ModRM, em_shl),
4434 F(DstMem | ModRM, em_shr),
4435 F(DstMem | ModRM, em_shl),
4436 F(DstMem | ModRM, em_sar),
4437};
4438
4439static const struct opcode group3[] = {
4440 F(DstMem | SrcImm | NoWrite, em_test),
4441 F(DstMem | SrcImm | NoWrite, em_test),
4442 F(DstMem | SrcNone | Lock, em_not),
4443 F(DstMem | SrcNone | Lock, em_neg),
4444 F(DstXacc | Src2Mem, em_mul_ex),
4445 F(DstXacc | Src2Mem, em_imul_ex),
4446 F(DstXacc | Src2Mem, em_div_ex),
4447 F(DstXacc | Src2Mem, em_idiv_ex),
4448};
4449
4450static const struct opcode group4[] = {
4451 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4452 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4453 N, N, N, N, N, N,
4454};
4455
4456static const struct opcode group5[] = {
4457 F(DstMem | SrcNone | Lock, em_inc),
4458 F(DstMem | SrcNone | Lock, em_dec),
4459 I(SrcMem | NearBranch, em_call_near_abs),
4460 I(SrcMemFAddr | ImplicitOps, em_call_far),
4461 I(SrcMem | NearBranch, em_jmp_abs),
4462 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
4463 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4464};
4465
4466static const struct opcode group6[] = {
4467 II(Prot | DstMem, em_sldt, sldt),
4468 II(Prot | DstMem, em_str, str),
4469 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4470 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4471 N, N, N, N,
4472};
4473
4474static const struct group_dual group7 = { {
4475 II(Mov | DstMem, em_sgdt, sgdt),
4476 II(Mov | DstMem, em_sidt, sidt),
4477 II(SrcMem | Priv, em_lgdt, lgdt),
4478 II(SrcMem | Priv, em_lidt, lidt),
4479 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4480 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4481 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4482}, {
4483 EXT(0, group7_rm0),
4484 EXT(0, group7_rm1),
David Brazdil0f672f62019-12-10 10:32:29 +00004485 EXT(0, group7_rm2),
4486 EXT(0, group7_rm3),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004487 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4488 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4489 EXT(0, group7_rm7),
4490} };
4491
4492static const struct opcode group8[] = {
4493 N, N, N, N,
4494 F(DstMem | SrcImmByte | NoWrite, em_bt),
4495 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4496 F(DstMem | SrcImmByte | Lock, em_btr),
4497 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4498};
4499
4500/*
4501 * The "memory" destination is actually always a register, since we come
4502 * from the register case of group9.
4503 */
4504static const struct gprefix pfx_0f_c7_7 = {
Olivier Deprez157378f2022-04-04 15:47:50 +02004505 N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004506};
4507
4508
4509static const struct group_dual group9 = { {
4510 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4511}, {
4512 N, N, N, N, N, N, N,
4513 GP(0, &pfx_0f_c7_7),
4514} };
4515
4516static const struct opcode group11[] = {
4517 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4518 X7(D(Undefined)),
4519};
4520
4521static const struct gprefix pfx_0f_ae_7 = {
Olivier Deprez0e641232021-09-23 10:07:05 +02004522 I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004523};
4524
4525static const struct group_dual group15 = { {
4526 I(ModRM | Aligned16, em_fxsave),
4527 I(ModRM | Aligned16, em_fxrstor),
4528 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4529}, {
4530 N, N, N, N, N, N, N, N,
4531} };
4532
4533static const struct gprefix pfx_0f_6f_0f_7f = {
4534 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4535};
4536
4537static const struct instr_dual instr_dual_0f_2b = {
4538 I(0, em_mov), N
4539};
4540
4541static const struct gprefix pfx_0f_2b = {
4542 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4543};
4544
4545static const struct gprefix pfx_0f_10_0f_11 = {
4546 I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4547};
4548
4549static const struct gprefix pfx_0f_28_0f_29 = {
4550 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4551};
4552
4553static const struct gprefix pfx_0f_e7 = {
4554 N, I(Sse, em_mov), N, N,
4555};
4556
4557static const struct escape escape_d9 = { {
4558 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4559}, {
4560 /* 0xC0 - 0xC7 */
4561 N, N, N, N, N, N, N, N,
4562 /* 0xC8 - 0xCF */
4563 N, N, N, N, N, N, N, N,
4564 /* 0xD0 - 0xC7 */
4565 N, N, N, N, N, N, N, N,
4566 /* 0xD8 - 0xDF */
4567 N, N, N, N, N, N, N, N,
4568 /* 0xE0 - 0xE7 */
4569 N, N, N, N, N, N, N, N,
4570 /* 0xE8 - 0xEF */
4571 N, N, N, N, N, N, N, N,
4572 /* 0xF0 - 0xF7 */
4573 N, N, N, N, N, N, N, N,
4574 /* 0xF8 - 0xFF */
4575 N, N, N, N, N, N, N, N,
4576} };
4577
4578static const struct escape escape_db = { {
4579 N, N, N, N, N, N, N, N,
4580}, {
4581 /* 0xC0 - 0xC7 */
4582 N, N, N, N, N, N, N, N,
4583 /* 0xC8 - 0xCF */
4584 N, N, N, N, N, N, N, N,
4585 /* 0xD0 - 0xC7 */
4586 N, N, N, N, N, N, N, N,
4587 /* 0xD8 - 0xDF */
4588 N, N, N, N, N, N, N, N,
4589 /* 0xE0 - 0xE7 */
4590 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4591 /* 0xE8 - 0xEF */
4592 N, N, N, N, N, N, N, N,
4593 /* 0xF0 - 0xF7 */
4594 N, N, N, N, N, N, N, N,
4595 /* 0xF8 - 0xFF */
4596 N, N, N, N, N, N, N, N,
4597} };
4598
4599static const struct escape escape_dd = { {
4600 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4601}, {
4602 /* 0xC0 - 0xC7 */
4603 N, N, N, N, N, N, N, N,
4604 /* 0xC8 - 0xCF */
4605 N, N, N, N, N, N, N, N,
4606 /* 0xD0 - 0xC7 */
4607 N, N, N, N, N, N, N, N,
4608 /* 0xD8 - 0xDF */
4609 N, N, N, N, N, N, N, N,
4610 /* 0xE0 - 0xE7 */
4611 N, N, N, N, N, N, N, N,
4612 /* 0xE8 - 0xEF */
4613 N, N, N, N, N, N, N, N,
4614 /* 0xF0 - 0xF7 */
4615 N, N, N, N, N, N, N, N,
4616 /* 0xF8 - 0xFF */
4617 N, N, N, N, N, N, N, N,
4618} };
4619
4620static const struct instr_dual instr_dual_0f_c3 = {
4621 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4622};
4623
4624static const struct mode_dual mode_dual_63 = {
4625 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4626};
4627
4628static const struct opcode opcode_table[256] = {
4629 /* 0x00 - 0x07 */
4630 F6ALU(Lock, em_add),
4631 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4632 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4633 /* 0x08 - 0x0F */
4634 F6ALU(Lock | PageTable, em_or),
4635 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4636 N,
4637 /* 0x10 - 0x17 */
4638 F6ALU(Lock, em_adc),
4639 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4640 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4641 /* 0x18 - 0x1F */
4642 F6ALU(Lock, em_sbb),
4643 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4644 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4645 /* 0x20 - 0x27 */
4646 F6ALU(Lock | PageTable, em_and), N, N,
4647 /* 0x28 - 0x2F */
4648 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4649 /* 0x30 - 0x37 */
4650 F6ALU(Lock, em_xor), N, N,
4651 /* 0x38 - 0x3F */
4652 F6ALU(NoWrite, em_cmp), N, N,
4653 /* 0x40 - 0x4F */
4654 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4655 /* 0x50 - 0x57 */
4656 X8(I(SrcReg | Stack, em_push)),
4657 /* 0x58 - 0x5F */
4658 X8(I(DstReg | Stack, em_pop)),
4659 /* 0x60 - 0x67 */
4660 I(ImplicitOps | Stack | No64, em_pusha),
4661 I(ImplicitOps | Stack | No64, em_popa),
4662 N, MD(ModRM, &mode_dual_63),
4663 N, N, N, N,
4664 /* 0x68 - 0x6F */
4665 I(SrcImm | Mov | Stack, em_push),
4666 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4667 I(SrcImmByte | Mov | Stack, em_push),
4668 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4669 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4670 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4671 /* 0x70 - 0x7F */
4672 X16(D(SrcImmByte | NearBranch)),
4673 /* 0x80 - 0x87 */
4674 G(ByteOp | DstMem | SrcImm, group1),
4675 G(DstMem | SrcImm, group1),
4676 G(ByteOp | DstMem | SrcImm | No64, group1),
4677 G(DstMem | SrcImmByte, group1),
4678 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4679 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4680 /* 0x88 - 0x8F */
4681 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4682 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4683 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4684 D(ModRM | SrcMem | NoAccess | DstReg),
4685 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4686 G(0, group1A),
4687 /* 0x90 - 0x97 */
4688 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4689 /* 0x98 - 0x9F */
4690 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4691 I(SrcImmFAddr | No64, em_call_far), N,
4692 II(ImplicitOps | Stack, em_pushf, pushf),
4693 II(ImplicitOps | Stack, em_popf, popf),
4694 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4695 /* 0xA0 - 0xA7 */
4696 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4697 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4698 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4699 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4700 /* 0xA8 - 0xAF */
4701 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4702 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4703 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4704 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4705 /* 0xB0 - 0xB7 */
4706 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4707 /* 0xB8 - 0xBF */
4708 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4709 /* 0xC0 - 0xC7 */
4710 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4711 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4712 I(ImplicitOps | NearBranch, em_ret),
4713 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4714 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4715 G(ByteOp, group11), G(0, group11),
4716 /* 0xC8 - 0xCF */
4717 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4718 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4719 I(ImplicitOps, em_ret_far),
4720 D(ImplicitOps), DI(SrcImmByte, intn),
4721 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4722 /* 0xD0 - 0xD7 */
4723 G(Src2One | ByteOp, group2), G(Src2One, group2),
4724 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4725 I(DstAcc | SrcImmUByte | No64, em_aam),
4726 I(DstAcc | SrcImmUByte | No64, em_aad),
4727 F(DstAcc | ByteOp | No64, em_salc),
4728 I(DstAcc | SrcXLat | ByteOp, em_mov),
4729 /* 0xD8 - 0xDF */
4730 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4731 /* 0xE0 - 0xE7 */
4732 X3(I(SrcImmByte | NearBranch, em_loop)),
4733 I(SrcImmByte | NearBranch, em_jcxz),
4734 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4735 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4736 /* 0xE8 - 0xEF */
4737 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4738 I(SrcImmFAddr | No64, em_jmp_far),
4739 D(SrcImmByte | ImplicitOps | NearBranch),
4740 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4741 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4742 /* 0xF0 - 0xF7 */
4743 N, DI(ImplicitOps, icebp), N, N,
4744 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4745 G(ByteOp, group3), G(0, group3),
4746 /* 0xF8 - 0xFF */
4747 D(ImplicitOps), D(ImplicitOps),
4748 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4749 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4750};
4751
4752static const struct opcode twobyte_table[256] = {
4753 /* 0x00 - 0x0F */
4754 G(0, group6), GD(0, &group7), N, N,
4755 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4756 II(ImplicitOps | Priv, em_clts, clts), N,
4757 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4758 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4759 /* 0x10 - 0x1F */
4760 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4761 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4762 N, N, N, N, N, N,
Olivier Deprez157378f2022-04-04 15:47:50 +02004763 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 4 * prefetch + 4 * reserved NOP */
4764 D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4765 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4766 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4767 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4768 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004769 /* 0x20 - 0x2F */
Olivier Deprez157378f2022-04-04 15:47:50 +02004770 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004771 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4772 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
Olivier Deprez157378f2022-04-04 15:47:50 +02004773 check_cr_access),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004774 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4775 check_dr_write),
4776 N, N, N, N,
4777 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4778 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4779 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4780 N, N, N, N,
4781 /* 0x30 - 0x3F */
4782 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4783 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4784 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4785 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4786 I(ImplicitOps | EmulateOnUD, em_sysenter),
4787 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4788 N, N,
4789 N, N, N, N, N, N, N, N,
4790 /* 0x40 - 0x4F */
4791 X16(D(DstReg | SrcMem | ModRM)),
4792 /* 0x50 - 0x5F */
4793 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4794 /* 0x60 - 0x6F */
4795 N, N, N, N,
4796 N, N, N, N,
4797 N, N, N, N,
4798 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4799 /* 0x70 - 0x7F */
4800 N, N, N, N,
4801 N, N, N, N,
4802 N, N, N, N,
4803 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4804 /* 0x80 - 0x8F */
4805 X16(D(SrcImm | NearBranch)),
4806 /* 0x90 - 0x9F */
4807 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4808 /* 0xA0 - 0xA7 */
4809 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4810 II(ImplicitOps, em_cpuid, cpuid),
4811 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4812 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4813 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4814 /* 0xA8 - 0xAF */
4815 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4816 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4817 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4818 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4819 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4820 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4821 /* 0xB0 - 0xB7 */
4822 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4823 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4824 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4825 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4826 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4827 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4828 /* 0xB8 - 0xBF */
4829 N, N,
4830 G(BitOp, group8),
4831 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4832 I(DstReg | SrcMem | ModRM, em_bsf_c),
4833 I(DstReg | SrcMem | ModRM, em_bsr_c),
4834 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4835 /* 0xC0 - 0xC7 */
4836 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4837 N, ID(0, &instr_dual_0f_c3),
4838 N, N, N, GD(0, &group9),
4839 /* 0xC8 - 0xCF */
4840 X8(I(DstReg, em_bswap)),
4841 /* 0xD0 - 0xDF */
4842 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4843 /* 0xE0 - 0xEF */
4844 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4845 N, N, N, N, N, N, N, N,
4846 /* 0xF0 - 0xFF */
4847 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4848};
4849
4850static const struct instr_dual instr_dual_0f_38_f0 = {
4851 I(DstReg | SrcMem | Mov, em_movbe), N
4852};
4853
4854static const struct instr_dual instr_dual_0f_38_f1 = {
4855 I(DstMem | SrcReg | Mov, em_movbe), N
4856};
4857
4858static const struct gprefix three_byte_0f_38_f0 = {
4859 ID(0, &instr_dual_0f_38_f0), N, N, N
4860};
4861
4862static const struct gprefix three_byte_0f_38_f1 = {
4863 ID(0, &instr_dual_0f_38_f1), N, N, N
4864};
4865
4866/*
4867 * Insns below are selected by the prefix which indexed by the third opcode
4868 * byte.
4869 */
4870static const struct opcode opcode_map_0f_38[256] = {
4871 /* 0x00 - 0x7f */
4872 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4873 /* 0x80 - 0xef */
4874 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4875 /* 0xf0 - 0xf1 */
4876 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4877 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4878 /* 0xf2 - 0xff */
4879 N, N, X4(N), X8(N)
4880};
4881
4882#undef D
4883#undef N
4884#undef G
4885#undef GD
4886#undef I
4887#undef GP
4888#undef EXT
4889#undef MD
4890#undef ID
4891
4892#undef D2bv
4893#undef D2bvIP
4894#undef I2bv
4895#undef I2bvIP
4896#undef I6ALU
4897
4898static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4899{
4900 unsigned size;
4901
4902 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4903 if (size == 8)
4904 size = 4;
4905 return size;
4906}
4907
4908static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4909 unsigned size, bool sign_extension)
4910{
4911 int rc = X86EMUL_CONTINUE;
4912
4913 op->type = OP_IMM;
4914 op->bytes = size;
4915 op->addr.mem.ea = ctxt->_eip;
4916 /* NB. Immediates are sign-extended as necessary. */
4917 switch (op->bytes) {
4918 case 1:
4919 op->val = insn_fetch(s8, ctxt);
4920 break;
4921 case 2:
4922 op->val = insn_fetch(s16, ctxt);
4923 break;
4924 case 4:
4925 op->val = insn_fetch(s32, ctxt);
4926 break;
4927 case 8:
4928 op->val = insn_fetch(s64, ctxt);
4929 break;
4930 }
4931 if (!sign_extension) {
4932 switch (op->bytes) {
4933 case 1:
4934 op->val &= 0xff;
4935 break;
4936 case 2:
4937 op->val &= 0xffff;
4938 break;
4939 case 4:
4940 op->val &= 0xffffffff;
4941 break;
4942 }
4943 }
4944done:
4945 return rc;
4946}
4947
4948static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4949 unsigned d)
4950{
4951 int rc = X86EMUL_CONTINUE;
4952
4953 switch (d) {
4954 case OpReg:
4955 decode_register_operand(ctxt, op);
4956 break;
4957 case OpImmUByte:
4958 rc = decode_imm(ctxt, op, 1, false);
4959 break;
4960 case OpMem:
4961 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4962 mem_common:
4963 *op = ctxt->memop;
4964 ctxt->memopp = op;
4965 if (ctxt->d & BitOp)
4966 fetch_bit_operand(ctxt);
4967 op->orig_val = op->val;
4968 break;
4969 case OpMem64:
4970 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4971 goto mem_common;
4972 case OpAcc:
4973 op->type = OP_REG;
4974 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4975 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4976 fetch_register_operand(op);
4977 op->orig_val = op->val;
4978 break;
4979 case OpAccLo:
4980 op->type = OP_REG;
4981 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4982 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4983 fetch_register_operand(op);
4984 op->orig_val = op->val;
4985 break;
4986 case OpAccHi:
4987 if (ctxt->d & ByteOp) {
4988 op->type = OP_NONE;
4989 break;
4990 }
4991 op->type = OP_REG;
4992 op->bytes = ctxt->op_bytes;
4993 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4994 fetch_register_operand(op);
4995 op->orig_val = op->val;
4996 break;
4997 case OpDI:
4998 op->type = OP_MEM;
4999 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5000 op->addr.mem.ea =
5001 register_address(ctxt, VCPU_REGS_RDI);
5002 op->addr.mem.seg = VCPU_SREG_ES;
5003 op->val = 0;
5004 op->count = 1;
5005 break;
5006 case OpDX:
5007 op->type = OP_REG;
5008 op->bytes = 2;
5009 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
5010 fetch_register_operand(op);
5011 break;
5012 case OpCL:
5013 op->type = OP_IMM;
5014 op->bytes = 1;
5015 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
5016 break;
5017 case OpImmByte:
5018 rc = decode_imm(ctxt, op, 1, true);
5019 break;
5020 case OpOne:
5021 op->type = OP_IMM;
5022 op->bytes = 1;
5023 op->val = 1;
5024 break;
5025 case OpImm:
5026 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
5027 break;
5028 case OpImm64:
5029 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
5030 break;
5031 case OpMem8:
5032 ctxt->memop.bytes = 1;
5033 if (ctxt->memop.type == OP_REG) {
5034 ctxt->memop.addr.reg = decode_register(ctxt,
5035 ctxt->modrm_rm, true);
5036 fetch_register_operand(&ctxt->memop);
5037 }
5038 goto mem_common;
5039 case OpMem16:
5040 ctxt->memop.bytes = 2;
5041 goto mem_common;
5042 case OpMem32:
5043 ctxt->memop.bytes = 4;
5044 goto mem_common;
5045 case OpImmU16:
5046 rc = decode_imm(ctxt, op, 2, false);
5047 break;
5048 case OpImmU:
5049 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
5050 break;
5051 case OpSI:
5052 op->type = OP_MEM;
5053 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5054 op->addr.mem.ea =
5055 register_address(ctxt, VCPU_REGS_RSI);
5056 op->addr.mem.seg = ctxt->seg_override;
5057 op->val = 0;
5058 op->count = 1;
5059 break;
5060 case OpXLat:
5061 op->type = OP_MEM;
5062 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5063 op->addr.mem.ea =
5064 address_mask(ctxt,
5065 reg_read(ctxt, VCPU_REGS_RBX) +
5066 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
5067 op->addr.mem.seg = ctxt->seg_override;
5068 op->val = 0;
5069 break;
5070 case OpImmFAddr:
5071 op->type = OP_IMM;
5072 op->addr.mem.ea = ctxt->_eip;
5073 op->bytes = ctxt->op_bytes + 2;
5074 insn_fetch_arr(op->valptr, op->bytes, ctxt);
5075 break;
5076 case OpMemFAddr:
5077 ctxt->memop.bytes = ctxt->op_bytes + 2;
5078 goto mem_common;
5079 case OpES:
5080 op->type = OP_IMM;
5081 op->val = VCPU_SREG_ES;
5082 break;
5083 case OpCS:
5084 op->type = OP_IMM;
5085 op->val = VCPU_SREG_CS;
5086 break;
5087 case OpSS:
5088 op->type = OP_IMM;
5089 op->val = VCPU_SREG_SS;
5090 break;
5091 case OpDS:
5092 op->type = OP_IMM;
5093 op->val = VCPU_SREG_DS;
5094 break;
5095 case OpFS:
5096 op->type = OP_IMM;
5097 op->val = VCPU_SREG_FS;
5098 break;
5099 case OpGS:
5100 op->type = OP_IMM;
5101 op->val = VCPU_SREG_GS;
5102 break;
5103 case OpImplicit:
5104 /* Special instructions do their own operand decoding. */
5105 default:
5106 op->type = OP_NONE; /* Disable writeback. */
5107 break;
5108 }
5109
5110done:
5111 return rc;
5112}
5113
5114int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
5115{
5116 int rc = X86EMUL_CONTINUE;
5117 int mode = ctxt->mode;
5118 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5119 bool op_prefix = false;
5120 bool has_seg_override = false;
5121 struct opcode opcode;
5122 u16 dummy;
5123 struct desc_struct desc;
5124
5125 ctxt->memop.type = OP_NONE;
5126 ctxt->memopp = NULL;
5127 ctxt->_eip = ctxt->eip;
5128 ctxt->fetch.ptr = ctxt->fetch.data;
5129 ctxt->fetch.end = ctxt->fetch.data + insn_len;
5130 ctxt->opcode_len = 1;
Olivier Deprez0e641232021-09-23 10:07:05 +02005131 ctxt->intercept = x86_intercept_none;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005132 if (insn_len > 0)
5133 memcpy(ctxt->fetch.data, insn, insn_len);
5134 else {
5135 rc = __do_insn_fetch_bytes(ctxt, 1);
5136 if (rc != X86EMUL_CONTINUE)
David Brazdil0f672f62019-12-10 10:32:29 +00005137 goto done;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005138 }
5139
5140 switch (mode) {
5141 case X86EMUL_MODE_REAL:
5142 case X86EMUL_MODE_VM86:
5143 def_op_bytes = def_ad_bytes = 2;
5144 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5145 if (desc.d)
5146 def_op_bytes = def_ad_bytes = 4;
5147 break;
5148 case X86EMUL_MODE_PROT16:
5149 def_op_bytes = def_ad_bytes = 2;
5150 break;
5151 case X86EMUL_MODE_PROT32:
5152 def_op_bytes = def_ad_bytes = 4;
5153 break;
5154#ifdef CONFIG_X86_64
5155 case X86EMUL_MODE_PROT64:
5156 def_op_bytes = 4;
5157 def_ad_bytes = 8;
5158 break;
5159#endif
5160 default:
5161 return EMULATION_FAILED;
5162 }
5163
5164 ctxt->op_bytes = def_op_bytes;
5165 ctxt->ad_bytes = def_ad_bytes;
5166
5167 /* Legacy prefixes. */
5168 for (;;) {
5169 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5170 case 0x66: /* operand-size override */
5171 op_prefix = true;
5172 /* switch between 2/4 bytes */
5173 ctxt->op_bytes = def_op_bytes ^ 6;
5174 break;
5175 case 0x67: /* address-size override */
5176 if (mode == X86EMUL_MODE_PROT64)
5177 /* switch between 4/8 bytes */
5178 ctxt->ad_bytes = def_ad_bytes ^ 12;
5179 else
5180 /* switch between 2/4 bytes */
5181 ctxt->ad_bytes = def_ad_bytes ^ 6;
5182 break;
5183 case 0x26: /* ES override */
Olivier Deprez0e641232021-09-23 10:07:05 +02005184 has_seg_override = true;
5185 ctxt->seg_override = VCPU_SREG_ES;
5186 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005187 case 0x2e: /* CS override */
Olivier Deprez0e641232021-09-23 10:07:05 +02005188 has_seg_override = true;
5189 ctxt->seg_override = VCPU_SREG_CS;
5190 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005191 case 0x36: /* SS override */
Olivier Deprez0e641232021-09-23 10:07:05 +02005192 has_seg_override = true;
5193 ctxt->seg_override = VCPU_SREG_SS;
5194 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005195 case 0x3e: /* DS override */
5196 has_seg_override = true;
Olivier Deprez0e641232021-09-23 10:07:05 +02005197 ctxt->seg_override = VCPU_SREG_DS;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005198 break;
5199 case 0x64: /* FS override */
Olivier Deprez0e641232021-09-23 10:07:05 +02005200 has_seg_override = true;
5201 ctxt->seg_override = VCPU_SREG_FS;
5202 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005203 case 0x65: /* GS override */
5204 has_seg_override = true;
Olivier Deprez0e641232021-09-23 10:07:05 +02005205 ctxt->seg_override = VCPU_SREG_GS;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005206 break;
5207 case 0x40 ... 0x4f: /* REX */
5208 if (mode != X86EMUL_MODE_PROT64)
5209 goto done_prefixes;
5210 ctxt->rex_prefix = ctxt->b;
5211 continue;
5212 case 0xf0: /* LOCK */
5213 ctxt->lock_prefix = 1;
5214 break;
5215 case 0xf2: /* REPNE/REPNZ */
5216 case 0xf3: /* REP/REPE/REPZ */
5217 ctxt->rep_prefix = ctxt->b;
5218 break;
5219 default:
5220 goto done_prefixes;
5221 }
5222
5223 /* Any legacy prefix after a REX prefix nullifies its effect. */
5224
5225 ctxt->rex_prefix = 0;
5226 }
5227
5228done_prefixes:
5229
5230 /* REX prefix. */
5231 if (ctxt->rex_prefix & 8)
5232 ctxt->op_bytes = 8; /* REX.W */
5233
5234 /* Opcode byte(s). */
5235 opcode = opcode_table[ctxt->b];
5236 /* Two-byte opcode? */
5237 if (ctxt->b == 0x0f) {
5238 ctxt->opcode_len = 2;
5239 ctxt->b = insn_fetch(u8, ctxt);
5240 opcode = twobyte_table[ctxt->b];
5241
5242 /* 0F_38 opcode map */
5243 if (ctxt->b == 0x38) {
5244 ctxt->opcode_len = 3;
5245 ctxt->b = insn_fetch(u8, ctxt);
5246 opcode = opcode_map_0f_38[ctxt->b];
5247 }
5248 }
5249 ctxt->d = opcode.flags;
5250
5251 if (ctxt->d & ModRM)
5252 ctxt->modrm = insn_fetch(u8, ctxt);
5253
5254 /* vex-prefix instructions are not implemented */
5255 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5256 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5257 ctxt->d = NotImpl;
5258 }
5259
5260 while (ctxt->d & GroupMask) {
5261 switch (ctxt->d & GroupMask) {
5262 case Group:
5263 goffset = (ctxt->modrm >> 3) & 7;
5264 opcode = opcode.u.group[goffset];
5265 break;
5266 case GroupDual:
5267 goffset = (ctxt->modrm >> 3) & 7;
5268 if ((ctxt->modrm >> 6) == 3)
5269 opcode = opcode.u.gdual->mod3[goffset];
5270 else
5271 opcode = opcode.u.gdual->mod012[goffset];
5272 break;
5273 case RMExt:
5274 goffset = ctxt->modrm & 7;
5275 opcode = opcode.u.group[goffset];
5276 break;
5277 case Prefix:
5278 if (ctxt->rep_prefix && op_prefix)
5279 return EMULATION_FAILED;
5280 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5281 switch (simd_prefix) {
5282 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5283 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5284 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5285 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5286 }
5287 break;
5288 case Escape:
Olivier Deprez0e641232021-09-23 10:07:05 +02005289 if (ctxt->modrm > 0xbf) {
5290 size_t size = ARRAY_SIZE(opcode.u.esc->high);
5291 u32 index = array_index_nospec(
5292 ctxt->modrm - 0xc0, size);
5293
5294 opcode = opcode.u.esc->high[index];
5295 } else {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005296 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
Olivier Deprez0e641232021-09-23 10:07:05 +02005297 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005298 break;
5299 case InstrDual:
5300 if ((ctxt->modrm >> 6) == 3)
5301 opcode = opcode.u.idual->mod3;
5302 else
5303 opcode = opcode.u.idual->mod012;
5304 break;
5305 case ModeDual:
5306 if (ctxt->mode == X86EMUL_MODE_PROT64)
5307 opcode = opcode.u.mdual->mode64;
5308 else
5309 opcode = opcode.u.mdual->mode32;
5310 break;
5311 default:
5312 return EMULATION_FAILED;
5313 }
5314
5315 ctxt->d &= ~(u64)GroupMask;
5316 ctxt->d |= opcode.flags;
5317 }
5318
5319 /* Unrecognised? */
5320 if (ctxt->d == 0)
5321 return EMULATION_FAILED;
5322
5323 ctxt->execute = opcode.u.execute;
5324
5325 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5326 return EMULATION_FAILED;
5327
5328 if (unlikely(ctxt->d &
5329 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5330 No16))) {
5331 /*
5332 * These are copied unconditionally here, and checked unconditionally
5333 * in x86_emulate_insn.
5334 */
5335 ctxt->check_perm = opcode.check_perm;
5336 ctxt->intercept = opcode.intercept;
5337
5338 if (ctxt->d & NotImpl)
5339 return EMULATION_FAILED;
5340
5341 if (mode == X86EMUL_MODE_PROT64) {
5342 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5343 ctxt->op_bytes = 8;
5344 else if (ctxt->d & NearBranch)
5345 ctxt->op_bytes = 8;
5346 }
5347
5348 if (ctxt->d & Op3264) {
5349 if (mode == X86EMUL_MODE_PROT64)
5350 ctxt->op_bytes = 8;
5351 else
5352 ctxt->op_bytes = 4;
5353 }
5354
5355 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5356 ctxt->op_bytes = 4;
5357
5358 if (ctxt->d & Sse)
5359 ctxt->op_bytes = 16;
5360 else if (ctxt->d & Mmx)
5361 ctxt->op_bytes = 8;
5362 }
5363
5364 /* ModRM and SIB bytes. */
5365 if (ctxt->d & ModRM) {
5366 rc = decode_modrm(ctxt, &ctxt->memop);
5367 if (!has_seg_override) {
5368 has_seg_override = true;
5369 ctxt->seg_override = ctxt->modrm_seg;
5370 }
5371 } else if (ctxt->d & MemAbs)
5372 rc = decode_abs(ctxt, &ctxt->memop);
5373 if (rc != X86EMUL_CONTINUE)
5374 goto done;
5375
5376 if (!has_seg_override)
5377 ctxt->seg_override = VCPU_SREG_DS;
5378
5379 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5380
5381 /*
5382 * Decode and fetch the source operand: register, memory
5383 * or immediate.
5384 */
5385 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5386 if (rc != X86EMUL_CONTINUE)
5387 goto done;
5388
5389 /*
5390 * Decode and fetch the second source operand: register, memory
5391 * or immediate.
5392 */
5393 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5394 if (rc != X86EMUL_CONTINUE)
5395 goto done;
5396
5397 /* Decode and fetch the destination operand: register or memory. */
5398 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5399
5400 if (ctxt->rip_relative && likely(ctxt->memopp))
5401 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5402 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5403
5404done:
David Brazdil0f672f62019-12-10 10:32:29 +00005405 if (rc == X86EMUL_PROPAGATE_FAULT)
5406 ctxt->have_exception = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005407 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5408}
5409
5410bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5411{
5412 return ctxt->d & PageTable;
5413}
5414
5415static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5416{
5417 /* The second termination condition only applies for REPE
5418 * and REPNE. Test if the repeat string operation prefix is
5419 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5420 * corresponding termination condition according to:
5421 * - if REPE/REPZ and ZF = 0 then done
5422 * - if REPNE/REPNZ and ZF = 1 then done
5423 */
5424 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5425 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5426 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5427 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5428 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5429 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5430 return true;
5431
5432 return false;
5433}
5434
5435static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5436{
5437 int rc;
5438
Olivier Deprez0e641232021-09-23 10:07:05 +02005439 emulator_get_fpu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005440 rc = asm_safe("fwait");
Olivier Deprez0e641232021-09-23 10:07:05 +02005441 emulator_put_fpu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005442
5443 if (unlikely(rc != X86EMUL_CONTINUE))
5444 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5445
5446 return X86EMUL_CONTINUE;
5447}
5448
Olivier Deprez157378f2022-04-04 15:47:50 +02005449static void fetch_possible_mmx_operand(struct operand *op)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005450{
5451 if (op->type == OP_MM)
Olivier Deprez157378f2022-04-04 15:47:50 +02005452 read_mmx_reg(&op->mm_val, op->addr.mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005453}
5454
Olivier Deprez157378f2022-04-04 15:47:50 +02005455static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005456{
5457 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5458
5459 if (!(ctxt->d & ByteOp))
5460 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5461
5462 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5463 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5464 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5465 : "c"(ctxt->src2.val));
5466
5467 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5468 if (!fop) /* exception is returned in fop variable */
5469 return emulate_de(ctxt);
5470 return X86EMUL_CONTINUE;
5471}
5472
5473void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5474{
5475 memset(&ctxt->rip_relative, 0,
5476 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5477
5478 ctxt->io_read.pos = 0;
5479 ctxt->io_read.end = 0;
5480 ctxt->mem_read.end = 0;
5481}
5482
5483int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5484{
5485 const struct x86_emulate_ops *ops = ctxt->ops;
5486 int rc = X86EMUL_CONTINUE;
5487 int saved_dst_type = ctxt->dst.type;
5488 unsigned emul_flags;
5489
5490 ctxt->mem_read.pos = 0;
5491
5492 /* LOCK prefix is allowed only with some instructions */
5493 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5494 rc = emulate_ud(ctxt);
5495 goto done;
5496 }
5497
5498 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5499 rc = emulate_ud(ctxt);
5500 goto done;
5501 }
5502
5503 emul_flags = ctxt->ops->get_hflags(ctxt);
5504 if (unlikely(ctxt->d &
5505 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5506 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5507 (ctxt->d & Undefined)) {
5508 rc = emulate_ud(ctxt);
5509 goto done;
5510 }
5511
5512 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5513 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5514 rc = emulate_ud(ctxt);
5515 goto done;
5516 }
5517
5518 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5519 rc = emulate_nm(ctxt);
5520 goto done;
5521 }
5522
5523 if (ctxt->d & Mmx) {
5524 rc = flush_pending_x87_faults(ctxt);
5525 if (rc != X86EMUL_CONTINUE)
5526 goto done;
5527 /*
5528 * Now that we know the fpu is exception safe, we can fetch
5529 * operands from it.
5530 */
Olivier Deprez157378f2022-04-04 15:47:50 +02005531 fetch_possible_mmx_operand(&ctxt->src);
5532 fetch_possible_mmx_operand(&ctxt->src2);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005533 if (!(ctxt->d & Mov))
Olivier Deprez157378f2022-04-04 15:47:50 +02005534 fetch_possible_mmx_operand(&ctxt->dst);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005535 }
5536
5537 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5538 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5539 X86_ICPT_PRE_EXCEPT);
5540 if (rc != X86EMUL_CONTINUE)
5541 goto done;
5542 }
5543
5544 /* Instruction can only be executed in protected mode */
5545 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5546 rc = emulate_ud(ctxt);
5547 goto done;
5548 }
5549
5550 /* Privileged instruction can be executed only in CPL=0 */
5551 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5552 if (ctxt->d & PrivUD)
5553 rc = emulate_ud(ctxt);
5554 else
5555 rc = emulate_gp(ctxt, 0);
5556 goto done;
5557 }
5558
5559 /* Do instruction specific permission checks */
5560 if (ctxt->d & CheckPerm) {
5561 rc = ctxt->check_perm(ctxt);
5562 if (rc != X86EMUL_CONTINUE)
5563 goto done;
5564 }
5565
5566 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5567 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5568 X86_ICPT_POST_EXCEPT);
5569 if (rc != X86EMUL_CONTINUE)
5570 goto done;
5571 }
5572
5573 if (ctxt->rep_prefix && (ctxt->d & String)) {
5574 /* All REP prefixes have the same first termination condition */
5575 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5576 string_registers_quirk(ctxt);
5577 ctxt->eip = ctxt->_eip;
5578 ctxt->eflags &= ~X86_EFLAGS_RF;
5579 goto done;
5580 }
5581 }
5582 }
5583
5584 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5585 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5586 ctxt->src.valptr, ctxt->src.bytes);
5587 if (rc != X86EMUL_CONTINUE)
5588 goto done;
5589 ctxt->src.orig_val64 = ctxt->src.val64;
5590 }
5591
5592 if (ctxt->src2.type == OP_MEM) {
5593 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5594 &ctxt->src2.val, ctxt->src2.bytes);
5595 if (rc != X86EMUL_CONTINUE)
5596 goto done;
5597 }
5598
5599 if ((ctxt->d & DstMask) == ImplicitOps)
5600 goto special_insn;
5601
5602
5603 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5604 /* optimisation - avoid slow emulated read if Mov */
5605 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5606 &ctxt->dst.val, ctxt->dst.bytes);
5607 if (rc != X86EMUL_CONTINUE) {
5608 if (!(ctxt->d & NoWrite) &&
5609 rc == X86EMUL_PROPAGATE_FAULT &&
5610 ctxt->exception.vector == PF_VECTOR)
5611 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5612 goto done;
5613 }
5614 }
5615 /* Copy full 64-bit value for CMPXCHG8B. */
5616 ctxt->dst.orig_val64 = ctxt->dst.val64;
5617
5618special_insn:
5619
5620 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5621 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5622 X86_ICPT_POST_MEMACCESS);
5623 if (rc != X86EMUL_CONTINUE)
5624 goto done;
5625 }
5626
5627 if (ctxt->rep_prefix && (ctxt->d & String))
5628 ctxt->eflags |= X86_EFLAGS_RF;
5629 else
5630 ctxt->eflags &= ~X86_EFLAGS_RF;
5631
5632 if (ctxt->execute) {
Olivier Deprez157378f2022-04-04 15:47:50 +02005633 if (ctxt->d & Fastop)
5634 rc = fastop(ctxt, ctxt->fop);
5635 else
5636 rc = ctxt->execute(ctxt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005637 if (rc != X86EMUL_CONTINUE)
5638 goto done;
5639 goto writeback;
5640 }
5641
5642 if (ctxt->opcode_len == 2)
5643 goto twobyte_insn;
5644 else if (ctxt->opcode_len == 3)
5645 goto threebyte_insn;
5646
5647 switch (ctxt->b) {
5648 case 0x70 ... 0x7f: /* jcc (short) */
5649 if (test_cc(ctxt->b, ctxt->eflags))
5650 rc = jmp_rel(ctxt, ctxt->src.val);
5651 break;
5652 case 0x8d: /* lea r16/r32, m */
5653 ctxt->dst.val = ctxt->src.addr.mem.ea;
5654 break;
5655 case 0x90 ... 0x97: /* nop / xchg reg, rax */
5656 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5657 ctxt->dst.type = OP_NONE;
5658 else
5659 rc = em_xchg(ctxt);
5660 break;
5661 case 0x98: /* cbw/cwde/cdqe */
5662 switch (ctxt->op_bytes) {
5663 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5664 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5665 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5666 }
5667 break;
5668 case 0xcc: /* int3 */
5669 rc = emulate_int(ctxt, 3);
5670 break;
5671 case 0xcd: /* int n */
5672 rc = emulate_int(ctxt, ctxt->src.val);
5673 break;
5674 case 0xce: /* into */
5675 if (ctxt->eflags & X86_EFLAGS_OF)
5676 rc = emulate_int(ctxt, 4);
5677 break;
5678 case 0xe9: /* jmp rel */
5679 case 0xeb: /* jmp rel short */
5680 rc = jmp_rel(ctxt, ctxt->src.val);
5681 ctxt->dst.type = OP_NONE; /* Disable writeback. */
5682 break;
5683 case 0xf4: /* hlt */
5684 ctxt->ops->halt(ctxt);
5685 break;
5686 case 0xf5: /* cmc */
5687 /* complement carry flag from eflags reg */
5688 ctxt->eflags ^= X86_EFLAGS_CF;
5689 break;
5690 case 0xf8: /* clc */
5691 ctxt->eflags &= ~X86_EFLAGS_CF;
5692 break;
5693 case 0xf9: /* stc */
5694 ctxt->eflags |= X86_EFLAGS_CF;
5695 break;
5696 case 0xfc: /* cld */
5697 ctxt->eflags &= ~X86_EFLAGS_DF;
5698 break;
5699 case 0xfd: /* std */
5700 ctxt->eflags |= X86_EFLAGS_DF;
5701 break;
5702 default:
5703 goto cannot_emulate;
5704 }
5705
5706 if (rc != X86EMUL_CONTINUE)
5707 goto done;
5708
5709writeback:
5710 if (ctxt->d & SrcWrite) {
5711 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5712 rc = writeback(ctxt, &ctxt->src);
5713 if (rc != X86EMUL_CONTINUE)
5714 goto done;
5715 }
5716 if (!(ctxt->d & NoWrite)) {
5717 rc = writeback(ctxt, &ctxt->dst);
5718 if (rc != X86EMUL_CONTINUE)
5719 goto done;
5720 }
5721
5722 /*
5723 * restore dst type in case the decoding will be reused
5724 * (happens for string instruction )
5725 */
5726 ctxt->dst.type = saved_dst_type;
5727
5728 if ((ctxt->d & SrcMask) == SrcSI)
5729 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5730
5731 if ((ctxt->d & DstMask) == DstDI)
5732 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5733
5734 if (ctxt->rep_prefix && (ctxt->d & String)) {
5735 unsigned int count;
5736 struct read_cache *r = &ctxt->io_read;
5737 if ((ctxt->d & SrcMask) == SrcSI)
5738 count = ctxt->src.count;
5739 else
5740 count = ctxt->dst.count;
5741 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5742
5743 if (!string_insn_completed(ctxt)) {
5744 /*
5745 * Re-enter guest when pio read ahead buffer is empty
5746 * or, if it is not used, after each 1024 iteration.
5747 */
5748 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5749 (r->end == 0 || r->end != r->pos)) {
5750 /*
5751 * Reset read cache. Usually happens before
5752 * decode, but since instruction is restarted
5753 * we have to do it here.
5754 */
5755 ctxt->mem_read.end = 0;
5756 writeback_registers(ctxt);
5757 return EMULATION_RESTART;
5758 }
5759 goto done; /* skip rip writeback */
5760 }
5761 ctxt->eflags &= ~X86_EFLAGS_RF;
5762 }
5763
5764 ctxt->eip = ctxt->_eip;
Olivier Deprez0e641232021-09-23 10:07:05 +02005765 if (ctxt->mode != X86EMUL_MODE_PROT64)
5766 ctxt->eip = (u32)ctxt->_eip;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005767
5768done:
5769 if (rc == X86EMUL_PROPAGATE_FAULT) {
5770 WARN_ON(ctxt->exception.vector > 0x1f);
5771 ctxt->have_exception = true;
5772 }
5773 if (rc == X86EMUL_INTERCEPTED)
5774 return EMULATION_INTERCEPTED;
5775
5776 if (rc == X86EMUL_CONTINUE)
5777 writeback_registers(ctxt);
5778
5779 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5780
5781twobyte_insn:
5782 switch (ctxt->b) {
5783 case 0x09: /* wbinvd */
5784 (ctxt->ops->wbinvd)(ctxt);
5785 break;
5786 case 0x08: /* invd */
5787 case 0x0d: /* GrpP (prefetch) */
5788 case 0x18: /* Grp16 (prefetch/nop) */
5789 case 0x1f: /* nop */
5790 break;
5791 case 0x20: /* mov cr, reg */
5792 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5793 break;
5794 case 0x21: /* mov from dr to reg */
5795 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5796 break;
5797 case 0x40 ... 0x4f: /* cmov */
5798 if (test_cc(ctxt->b, ctxt->eflags))
5799 ctxt->dst.val = ctxt->src.val;
5800 else if (ctxt->op_bytes != 4)
5801 ctxt->dst.type = OP_NONE; /* no writeback */
5802 break;
5803 case 0x80 ... 0x8f: /* jnz rel, etc*/
5804 if (test_cc(ctxt->b, ctxt->eflags))
5805 rc = jmp_rel(ctxt, ctxt->src.val);
5806 break;
5807 case 0x90 ... 0x9f: /* setcc r/m8 */
5808 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5809 break;
5810 case 0xb6 ... 0xb7: /* movzx */
5811 ctxt->dst.bytes = ctxt->op_bytes;
5812 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5813 : (u16) ctxt->src.val;
5814 break;
5815 case 0xbe ... 0xbf: /* movsx */
5816 ctxt->dst.bytes = ctxt->op_bytes;
5817 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5818 (s16) ctxt->src.val;
5819 break;
5820 default:
5821 goto cannot_emulate;
5822 }
5823
5824threebyte_insn:
5825
5826 if (rc != X86EMUL_CONTINUE)
5827 goto done;
5828
5829 goto writeback;
5830
5831cannot_emulate:
5832 return EMULATION_FAILED;
5833}
5834
5835void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5836{
5837 invalidate_registers(ctxt);
5838}
5839
5840void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5841{
5842 writeback_registers(ctxt);
5843}
5844
5845bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5846{
5847 if (ctxt->rep_prefix && (ctxt->d & String))
5848 return false;
5849
5850 if (ctxt->d & TwoMemOp)
5851 return false;
5852
5853 return true;
5854}