blob: 69089d46f1285565c9ff5b32dbf934b5d80e15d5 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_PARAVIRT_H
3#define _ASM_X86_PARAVIRT_H
4/* Various instructions on x86 need to be replaced for
5 * para-virtualization: those hooks are defined here. */
6
7#ifdef CONFIG_PARAVIRT
8#include <asm/pgtable_types.h>
9#include <asm/asm.h>
10#include <asm/nospec-branch.h>
11
12#include <asm/paravirt_types.h>
13
14#ifndef __ASSEMBLY__
15#include <linux/bug.h>
16#include <linux/types.h>
17#include <linux/cpumask.h>
18#include <asm/frame.h>
19
David Brazdil0f672f62019-12-10 10:32:29 +000020static inline unsigned long long paravirt_sched_clock(void)
21{
22 return PVOP_CALL0(unsigned long long, time.sched_clock);
23}
24
25struct static_key;
26extern struct static_key paravirt_steal_enabled;
27extern struct static_key paravirt_steal_rq_enabled;
28
29__visible void __native_queued_spin_unlock(struct qspinlock *lock);
30bool pv_is_native_spin_unlock(void);
31__visible bool __native_vcpu_is_preempted(long cpu);
32bool pv_is_native_vcpu_is_preempted(void);
33
34static inline u64 paravirt_steal_clock(int cpu)
35{
36 return PVOP_CALL1(u64, time.steal_clock, cpu);
37}
38
39/* The paravirtualized I/O functions */
40static inline void slow_down_io(void)
41{
42 pv_ops.cpu.io_delay();
43#ifdef REALLY_SLOW_IO
44 pv_ops.cpu.io_delay();
45 pv_ops.cpu.io_delay();
46 pv_ops.cpu.io_delay();
47#endif
48}
49
50static inline void __flush_tlb(void)
51{
52 PVOP_VCALL0(mmu.flush_tlb_user);
53}
54
55static inline void __flush_tlb_global(void)
56{
57 PVOP_VCALL0(mmu.flush_tlb_kernel);
58}
59
60static inline void __flush_tlb_one_user(unsigned long addr)
61{
62 PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
63}
64
65static inline void flush_tlb_others(const struct cpumask *cpumask,
66 const struct flush_tlb_info *info)
67{
68 PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info);
69}
70
71static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
72{
73 PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
74}
75
76static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
77{
78 PVOP_VCALL1(mmu.exit_mmap, mm);
79}
80
81#ifdef CONFIG_PARAVIRT_XXL
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000082static inline void load_sp0(unsigned long sp0)
83{
David Brazdil0f672f62019-12-10 10:32:29 +000084 PVOP_VCALL1(cpu.load_sp0, sp0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000085}
86
87/* The paravirtualized CPUID instruction. */
88static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
89 unsigned int *ecx, unsigned int *edx)
90{
David Brazdil0f672f62019-12-10 10:32:29 +000091 PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000092}
93
94/*
95 * These special macros can be used to get or set a debugging register
96 */
97static inline unsigned long paravirt_get_debugreg(int reg)
98{
David Brazdil0f672f62019-12-10 10:32:29 +000099 return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000100}
101#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
102static inline void set_debugreg(unsigned long val, int reg)
103{
David Brazdil0f672f62019-12-10 10:32:29 +0000104 PVOP_VCALL2(cpu.set_debugreg, reg, val);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000105}
106
107static inline unsigned long read_cr0(void)
108{
David Brazdil0f672f62019-12-10 10:32:29 +0000109 return PVOP_CALL0(unsigned long, cpu.read_cr0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000110}
111
112static inline void write_cr0(unsigned long x)
113{
David Brazdil0f672f62019-12-10 10:32:29 +0000114 PVOP_VCALL1(cpu.write_cr0, x);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000115}
116
117static inline unsigned long read_cr2(void)
118{
David Brazdil0f672f62019-12-10 10:32:29 +0000119 return PVOP_CALLEE0(unsigned long, mmu.read_cr2);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000120}
121
122static inline void write_cr2(unsigned long x)
123{
David Brazdil0f672f62019-12-10 10:32:29 +0000124 PVOP_VCALL1(mmu.write_cr2, x);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000125}
126
127static inline unsigned long __read_cr3(void)
128{
David Brazdil0f672f62019-12-10 10:32:29 +0000129 return PVOP_CALL0(unsigned long, mmu.read_cr3);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000130}
131
132static inline void write_cr3(unsigned long x)
133{
David Brazdil0f672f62019-12-10 10:32:29 +0000134 PVOP_VCALL1(mmu.write_cr3, x);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000135}
136
137static inline void __write_cr4(unsigned long x)
138{
David Brazdil0f672f62019-12-10 10:32:29 +0000139 PVOP_VCALL1(cpu.write_cr4, x);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000140}
141
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000142static inline void arch_safe_halt(void)
143{
David Brazdil0f672f62019-12-10 10:32:29 +0000144 PVOP_VCALL0(irq.safe_halt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000145}
146
147static inline void halt(void)
148{
David Brazdil0f672f62019-12-10 10:32:29 +0000149 PVOP_VCALL0(irq.halt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000150}
151
152static inline void wbinvd(void)
153{
David Brazdil0f672f62019-12-10 10:32:29 +0000154 PVOP_VCALL0(cpu.wbinvd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000155}
156
157#define get_kernel_rpl() (pv_info.kernel_rpl)
158
159static inline u64 paravirt_read_msr(unsigned msr)
160{
David Brazdil0f672f62019-12-10 10:32:29 +0000161 return PVOP_CALL1(u64, cpu.read_msr, msr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000162}
163
164static inline void paravirt_write_msr(unsigned msr,
165 unsigned low, unsigned high)
166{
David Brazdil0f672f62019-12-10 10:32:29 +0000167 PVOP_VCALL3(cpu.write_msr, msr, low, high);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000168}
169
170static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
171{
David Brazdil0f672f62019-12-10 10:32:29 +0000172 return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000173}
174
175static inline int paravirt_write_msr_safe(unsigned msr,
176 unsigned low, unsigned high)
177{
David Brazdil0f672f62019-12-10 10:32:29 +0000178 return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000179}
180
181#define rdmsr(msr, val1, val2) \
182do { \
183 u64 _l = paravirt_read_msr(msr); \
184 val1 = (u32)_l; \
185 val2 = _l >> 32; \
186} while (0)
187
188#define wrmsr(msr, val1, val2) \
189do { \
190 paravirt_write_msr(msr, val1, val2); \
191} while (0)
192
193#define rdmsrl(msr, val) \
194do { \
195 val = paravirt_read_msr(msr); \
196} while (0)
197
198static inline void wrmsrl(unsigned msr, u64 val)
199{
200 wrmsr(msr, (u32)val, (u32)(val>>32));
201}
202
203#define wrmsr_safe(msr, a, b) paravirt_write_msr_safe(msr, a, b)
204
205/* rdmsr with exception handling */
206#define rdmsr_safe(msr, a, b) \
207({ \
208 int _err; \
209 u64 _l = paravirt_read_msr_safe(msr, &_err); \
210 (*a) = (u32)_l; \
211 (*b) = _l >> 32; \
212 _err; \
213})
214
215static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
216{
217 int err;
218
219 *p = paravirt_read_msr_safe(msr, &err);
220 return err;
221}
222
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000223static inline unsigned long long paravirt_read_pmc(int counter)
224{
David Brazdil0f672f62019-12-10 10:32:29 +0000225 return PVOP_CALL1(u64, cpu.read_pmc, counter);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000226}
227
228#define rdpmc(counter, low, high) \
229do { \
230 u64 _l = paravirt_read_pmc(counter); \
231 low = (u32)_l; \
232 high = _l >> 32; \
233} while (0)
234
235#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
236
237static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
238{
David Brazdil0f672f62019-12-10 10:32:29 +0000239 PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000240}
241
242static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
243{
David Brazdil0f672f62019-12-10 10:32:29 +0000244 PVOP_VCALL2(cpu.free_ldt, ldt, entries);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000245}
246
247static inline void load_TR_desc(void)
248{
David Brazdil0f672f62019-12-10 10:32:29 +0000249 PVOP_VCALL0(cpu.load_tr_desc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000250}
251static inline void load_gdt(const struct desc_ptr *dtr)
252{
David Brazdil0f672f62019-12-10 10:32:29 +0000253 PVOP_VCALL1(cpu.load_gdt, dtr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000254}
255static inline void load_idt(const struct desc_ptr *dtr)
256{
David Brazdil0f672f62019-12-10 10:32:29 +0000257 PVOP_VCALL1(cpu.load_idt, dtr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000258}
259static inline void set_ldt(const void *addr, unsigned entries)
260{
David Brazdil0f672f62019-12-10 10:32:29 +0000261 PVOP_VCALL2(cpu.set_ldt, addr, entries);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000262}
263static inline unsigned long paravirt_store_tr(void)
264{
David Brazdil0f672f62019-12-10 10:32:29 +0000265 return PVOP_CALL0(unsigned long, cpu.store_tr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000266}
David Brazdil0f672f62019-12-10 10:32:29 +0000267
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000268#define store_tr(tr) ((tr) = paravirt_store_tr())
269static inline void load_TLS(struct thread_struct *t, unsigned cpu)
270{
David Brazdil0f672f62019-12-10 10:32:29 +0000271 PVOP_VCALL2(cpu.load_tls, t, cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000272}
273
274#ifdef CONFIG_X86_64
275static inline void load_gs_index(unsigned int gs)
276{
David Brazdil0f672f62019-12-10 10:32:29 +0000277 PVOP_VCALL1(cpu.load_gs_index, gs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000278}
279#endif
280
281static inline void write_ldt_entry(struct desc_struct *dt, int entry,
282 const void *desc)
283{
David Brazdil0f672f62019-12-10 10:32:29 +0000284 PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000285}
286
287static inline void write_gdt_entry(struct desc_struct *dt, int entry,
288 void *desc, int type)
289{
David Brazdil0f672f62019-12-10 10:32:29 +0000290 PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000291}
292
293static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
294{
David Brazdil0f672f62019-12-10 10:32:29 +0000295 PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000296}
297static inline void set_iopl_mask(unsigned mask)
298{
David Brazdil0f672f62019-12-10 10:32:29 +0000299 PVOP_VCALL1(cpu.set_iopl_mask, mask);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000300}
301
302static inline void paravirt_activate_mm(struct mm_struct *prev,
303 struct mm_struct *next)
304{
David Brazdil0f672f62019-12-10 10:32:29 +0000305 PVOP_VCALL2(mmu.activate_mm, prev, next);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000306}
307
308static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
309 struct mm_struct *mm)
310{
David Brazdil0f672f62019-12-10 10:32:29 +0000311 PVOP_VCALL2(mmu.dup_mmap, oldmm, mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000312}
313
314static inline int paravirt_pgd_alloc(struct mm_struct *mm)
315{
David Brazdil0f672f62019-12-10 10:32:29 +0000316 return PVOP_CALL1(int, mmu.pgd_alloc, mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000317}
318
319static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
320{
David Brazdil0f672f62019-12-10 10:32:29 +0000321 PVOP_VCALL2(mmu.pgd_free, mm, pgd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000322}
323
324static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
325{
David Brazdil0f672f62019-12-10 10:32:29 +0000326 PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000327}
328static inline void paravirt_release_pte(unsigned long pfn)
329{
David Brazdil0f672f62019-12-10 10:32:29 +0000330 PVOP_VCALL1(mmu.release_pte, pfn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000331}
332
333static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
334{
David Brazdil0f672f62019-12-10 10:32:29 +0000335 PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000336}
337
338static inline void paravirt_release_pmd(unsigned long pfn)
339{
David Brazdil0f672f62019-12-10 10:32:29 +0000340 PVOP_VCALL1(mmu.release_pmd, pfn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000341}
342
343static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
344{
David Brazdil0f672f62019-12-10 10:32:29 +0000345 PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000346}
347static inline void paravirt_release_pud(unsigned long pfn)
348{
David Brazdil0f672f62019-12-10 10:32:29 +0000349 PVOP_VCALL1(mmu.release_pud, pfn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000350}
351
352static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
353{
David Brazdil0f672f62019-12-10 10:32:29 +0000354 PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000355}
356
357static inline void paravirt_release_p4d(unsigned long pfn)
358{
David Brazdil0f672f62019-12-10 10:32:29 +0000359 PVOP_VCALL1(mmu.release_p4d, pfn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000360}
361
362static inline pte_t __pte(pteval_t val)
363{
364 pteval_t ret;
365
366 if (sizeof(pteval_t) > sizeof(long))
David Brazdil0f672f62019-12-10 10:32:29 +0000367 ret = PVOP_CALLEE2(pteval_t, mmu.make_pte, val, (u64)val >> 32);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000368 else
David Brazdil0f672f62019-12-10 10:32:29 +0000369 ret = PVOP_CALLEE1(pteval_t, mmu.make_pte, val);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000370
371 return (pte_t) { .pte = ret };
372}
373
374static inline pteval_t pte_val(pte_t pte)
375{
376 pteval_t ret;
377
378 if (sizeof(pteval_t) > sizeof(long))
David Brazdil0f672f62019-12-10 10:32:29 +0000379 ret = PVOP_CALLEE2(pteval_t, mmu.pte_val,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000380 pte.pte, (u64)pte.pte >> 32);
381 else
David Brazdil0f672f62019-12-10 10:32:29 +0000382 ret = PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000383
384 return ret;
385}
386
387static inline pgd_t __pgd(pgdval_t val)
388{
389 pgdval_t ret;
390
391 if (sizeof(pgdval_t) > sizeof(long))
David Brazdil0f672f62019-12-10 10:32:29 +0000392 ret = PVOP_CALLEE2(pgdval_t, mmu.make_pgd, val, (u64)val >> 32);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000393 else
David Brazdil0f672f62019-12-10 10:32:29 +0000394 ret = PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000395
396 return (pgd_t) { ret };
397}
398
399static inline pgdval_t pgd_val(pgd_t pgd)
400{
401 pgdval_t ret;
402
403 if (sizeof(pgdval_t) > sizeof(long))
David Brazdil0f672f62019-12-10 10:32:29 +0000404 ret = PVOP_CALLEE2(pgdval_t, mmu.pgd_val,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000405 pgd.pgd, (u64)pgd.pgd >> 32);
406 else
David Brazdil0f672f62019-12-10 10:32:29 +0000407 ret = PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000408
409 return ret;
410}
411
412#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
David Brazdil0f672f62019-12-10 10:32:29 +0000413static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000414 pte_t *ptep)
415{
416 pteval_t ret;
417
David Brazdil0f672f62019-12-10 10:32:29 +0000418 ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000419
420 return (pte_t) { .pte = ret };
421}
422
David Brazdil0f672f62019-12-10 10:32:29 +0000423static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
424 pte_t *ptep, pte_t old_pte, pte_t pte)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000425{
David Brazdil0f672f62019-12-10 10:32:29 +0000426
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000427 if (sizeof(pteval_t) > sizeof(long))
428 /* 5 arg words */
David Brazdil0f672f62019-12-10 10:32:29 +0000429 pv_ops.mmu.ptep_modify_prot_commit(vma, addr, ptep, pte);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000430 else
David Brazdil0f672f62019-12-10 10:32:29 +0000431 PVOP_VCALL4(mmu.ptep_modify_prot_commit,
432 vma, addr, ptep, pte.pte);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000433}
434
435static inline void set_pte(pte_t *ptep, pte_t pte)
436{
437 if (sizeof(pteval_t) > sizeof(long))
David Brazdil0f672f62019-12-10 10:32:29 +0000438 PVOP_VCALL3(mmu.set_pte, ptep, pte.pte, (u64)pte.pte >> 32);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000439 else
David Brazdil0f672f62019-12-10 10:32:29 +0000440 PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000441}
442
443static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
444 pte_t *ptep, pte_t pte)
445{
446 if (sizeof(pteval_t) > sizeof(long))
447 /* 5 arg words */
David Brazdil0f672f62019-12-10 10:32:29 +0000448 pv_ops.mmu.set_pte_at(mm, addr, ptep, pte);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000449 else
David Brazdil0f672f62019-12-10 10:32:29 +0000450 PVOP_VCALL4(mmu.set_pte_at, mm, addr, ptep, pte.pte);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000451}
452
453static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
454{
455 pmdval_t val = native_pmd_val(pmd);
456
457 if (sizeof(pmdval_t) > sizeof(long))
David Brazdil0f672f62019-12-10 10:32:29 +0000458 PVOP_VCALL3(mmu.set_pmd, pmdp, val, (u64)val >> 32);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000459 else
David Brazdil0f672f62019-12-10 10:32:29 +0000460 PVOP_VCALL2(mmu.set_pmd, pmdp, val);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000461}
462
463#if CONFIG_PGTABLE_LEVELS >= 3
464static inline pmd_t __pmd(pmdval_t val)
465{
466 pmdval_t ret;
467
468 if (sizeof(pmdval_t) > sizeof(long))
David Brazdil0f672f62019-12-10 10:32:29 +0000469 ret = PVOP_CALLEE2(pmdval_t, mmu.make_pmd, val, (u64)val >> 32);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000470 else
David Brazdil0f672f62019-12-10 10:32:29 +0000471 ret = PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000472
473 return (pmd_t) { ret };
474}
475
476static inline pmdval_t pmd_val(pmd_t pmd)
477{
478 pmdval_t ret;
479
480 if (sizeof(pmdval_t) > sizeof(long))
David Brazdil0f672f62019-12-10 10:32:29 +0000481 ret = PVOP_CALLEE2(pmdval_t, mmu.pmd_val,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000482 pmd.pmd, (u64)pmd.pmd >> 32);
483 else
David Brazdil0f672f62019-12-10 10:32:29 +0000484 ret = PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000485
486 return ret;
487}
488
489static inline void set_pud(pud_t *pudp, pud_t pud)
490{
491 pudval_t val = native_pud_val(pud);
492
493 if (sizeof(pudval_t) > sizeof(long))
David Brazdil0f672f62019-12-10 10:32:29 +0000494 PVOP_VCALL3(mmu.set_pud, pudp, val, (u64)val >> 32);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000495 else
David Brazdil0f672f62019-12-10 10:32:29 +0000496 PVOP_VCALL2(mmu.set_pud, pudp, val);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000497}
498#if CONFIG_PGTABLE_LEVELS >= 4
499static inline pud_t __pud(pudval_t val)
500{
501 pudval_t ret;
502
David Brazdil0f672f62019-12-10 10:32:29 +0000503 ret = PVOP_CALLEE1(pudval_t, mmu.make_pud, val);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000504
505 return (pud_t) { ret };
506}
507
508static inline pudval_t pud_val(pud_t pud)
509{
David Brazdil0f672f62019-12-10 10:32:29 +0000510 return PVOP_CALLEE1(pudval_t, mmu.pud_val, pud.pud);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000511}
512
513static inline void pud_clear(pud_t *pudp)
514{
515 set_pud(pudp, __pud(0));
516}
517
518static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
519{
520 p4dval_t val = native_p4d_val(p4d);
521
David Brazdil0f672f62019-12-10 10:32:29 +0000522 PVOP_VCALL2(mmu.set_p4d, p4dp, val);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000523}
524
525#if CONFIG_PGTABLE_LEVELS >= 5
526
527static inline p4d_t __p4d(p4dval_t val)
528{
David Brazdil0f672f62019-12-10 10:32:29 +0000529 p4dval_t ret = PVOP_CALLEE1(p4dval_t, mmu.make_p4d, val);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000530
531 return (p4d_t) { ret };
532}
533
534static inline p4dval_t p4d_val(p4d_t p4d)
535{
David Brazdil0f672f62019-12-10 10:32:29 +0000536 return PVOP_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000537}
538
539static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
540{
David Brazdil0f672f62019-12-10 10:32:29 +0000541 PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000542}
543
544#define set_pgd(pgdp, pgdval) do { \
545 if (pgtable_l5_enabled()) \
546 __set_pgd(pgdp, pgdval); \
547 else \
548 set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd }); \
549} while (0)
550
551#define pgd_clear(pgdp) do { \
552 if (pgtable_l5_enabled()) \
553 set_pgd(pgdp, __pgd(0)); \
554} while (0)
555
556#endif /* CONFIG_PGTABLE_LEVELS == 5 */
557
558static inline void p4d_clear(p4d_t *p4dp)
559{
560 set_p4d(p4dp, __p4d(0));
561}
562
563#endif /* CONFIG_PGTABLE_LEVELS == 4 */
564
565#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
566
567#ifdef CONFIG_X86_PAE
568/* Special-case pte-setting operations for PAE, which can't update a
569 64-bit pte atomically */
570static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
571{
David Brazdil0f672f62019-12-10 10:32:29 +0000572 PVOP_VCALL3(mmu.set_pte_atomic, ptep, pte.pte, pte.pte >> 32);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000573}
574
575static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
576 pte_t *ptep)
577{
David Brazdil0f672f62019-12-10 10:32:29 +0000578 PVOP_VCALL3(mmu.pte_clear, mm, addr, ptep);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000579}
580
581static inline void pmd_clear(pmd_t *pmdp)
582{
David Brazdil0f672f62019-12-10 10:32:29 +0000583 PVOP_VCALL1(mmu.pmd_clear, pmdp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000584}
585#else /* !CONFIG_X86_PAE */
586static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
587{
588 set_pte(ptep, pte);
589}
590
591static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
592 pte_t *ptep)
593{
594 set_pte_at(mm, addr, ptep, __pte(0));
595}
596
597static inline void pmd_clear(pmd_t *pmdp)
598{
599 set_pmd(pmdp, __pmd(0));
600}
601#endif /* CONFIG_X86_PAE */
602
603#define __HAVE_ARCH_START_CONTEXT_SWITCH
604static inline void arch_start_context_switch(struct task_struct *prev)
605{
David Brazdil0f672f62019-12-10 10:32:29 +0000606 PVOP_VCALL1(cpu.start_context_switch, prev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000607}
608
609static inline void arch_end_context_switch(struct task_struct *next)
610{
David Brazdil0f672f62019-12-10 10:32:29 +0000611 PVOP_VCALL1(cpu.end_context_switch, next);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000612}
613
614#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
615static inline void arch_enter_lazy_mmu_mode(void)
616{
David Brazdil0f672f62019-12-10 10:32:29 +0000617 PVOP_VCALL0(mmu.lazy_mode.enter);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000618}
619
620static inline void arch_leave_lazy_mmu_mode(void)
621{
David Brazdil0f672f62019-12-10 10:32:29 +0000622 PVOP_VCALL0(mmu.lazy_mode.leave);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000623}
624
625static inline void arch_flush_lazy_mmu_mode(void)
626{
David Brazdil0f672f62019-12-10 10:32:29 +0000627 PVOP_VCALL0(mmu.lazy_mode.flush);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000628}
629
630static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
631 phys_addr_t phys, pgprot_t flags)
632{
David Brazdil0f672f62019-12-10 10:32:29 +0000633 pv_ops.mmu.set_fixmap(idx, phys, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000634}
David Brazdil0f672f62019-12-10 10:32:29 +0000635#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000636
637#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
638
639static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
640 u32 val)
641{
David Brazdil0f672f62019-12-10 10:32:29 +0000642 PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000643}
644
645static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
646{
David Brazdil0f672f62019-12-10 10:32:29 +0000647 PVOP_VCALLEE1(lock.queued_spin_unlock, lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000648}
649
650static __always_inline void pv_wait(u8 *ptr, u8 val)
651{
David Brazdil0f672f62019-12-10 10:32:29 +0000652 PVOP_VCALL2(lock.wait, ptr, val);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000653}
654
655static __always_inline void pv_kick(int cpu)
656{
David Brazdil0f672f62019-12-10 10:32:29 +0000657 PVOP_VCALL1(lock.kick, cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000658}
659
660static __always_inline bool pv_vcpu_is_preempted(long cpu)
661{
David Brazdil0f672f62019-12-10 10:32:29 +0000662 return PVOP_CALLEE1(bool, lock.vcpu_is_preempted, cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000663}
664
David Brazdil0f672f62019-12-10 10:32:29 +0000665void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
666bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
667
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000668#endif /* SMP && PARAVIRT_SPINLOCKS */
669
670#ifdef CONFIG_X86_32
671#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
672#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
673
674/* save and restore all caller-save registers, except return value */
675#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
676#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
677
678#define PV_FLAGS_ARG "0"
679#define PV_EXTRA_CLOBBERS
680#define PV_VEXTRA_CLOBBERS
681#else
682/* save and restore all caller-save registers, except return value */
683#define PV_SAVE_ALL_CALLER_REGS \
684 "push %rcx;" \
685 "push %rdx;" \
686 "push %rsi;" \
687 "push %rdi;" \
688 "push %r8;" \
689 "push %r9;" \
690 "push %r10;" \
691 "push %r11;"
692#define PV_RESTORE_ALL_CALLER_REGS \
693 "pop %r11;" \
694 "pop %r10;" \
695 "pop %r9;" \
696 "pop %r8;" \
697 "pop %rdi;" \
698 "pop %rsi;" \
699 "pop %rdx;" \
700 "pop %rcx;"
701
702/* We save some registers, but all of them, that's too much. We clobber all
703 * caller saved registers but the argument parameter */
704#define PV_SAVE_REGS "pushq %%rdi;"
705#define PV_RESTORE_REGS "popq %%rdi;"
706#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
707#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
708#define PV_FLAGS_ARG "D"
709#endif
710
711/*
712 * Generate a thunk around a function which saves all caller-save
713 * registers except for the return value. This allows C functions to
714 * be called from assembler code where fewer than normal registers are
715 * available. It may also help code generation around calls from C
716 * code if the common case doesn't use many registers.
717 *
718 * When a callee is wrapped in a thunk, the caller can assume that all
719 * arg regs and all scratch registers are preserved across the
720 * call. The return value in rax/eax will not be saved, even for void
721 * functions.
722 */
723#define PV_THUNK_NAME(func) "__raw_callee_save_" #func
724#define PV_CALLEE_SAVE_REGS_THUNK(func) \
725 extern typeof(func) __raw_callee_save_##func; \
726 \
727 asm(".pushsection .text;" \
728 ".globl " PV_THUNK_NAME(func) ";" \
729 ".type " PV_THUNK_NAME(func) ", @function;" \
730 PV_THUNK_NAME(func) ":" \
731 FRAME_BEGIN \
732 PV_SAVE_ALL_CALLER_REGS \
733 "call " #func ";" \
734 PV_RESTORE_ALL_CALLER_REGS \
735 FRAME_END \
736 "ret;" \
David Brazdil0f672f62019-12-10 10:32:29 +0000737 ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000738 ".popsection")
739
740/* Get a reference to a callee-save function */
741#define PV_CALLEE_SAVE(func) \
742 ((struct paravirt_callee_save) { __raw_callee_save_##func })
743
744/* Promise that "func" already uses the right calling convention */
745#define __PV_IS_CALLEE_SAVE(func) \
746 ((struct paravirt_callee_save) { func })
747
David Brazdil0f672f62019-12-10 10:32:29 +0000748#ifdef CONFIG_PARAVIRT_XXL
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000749static inline notrace unsigned long arch_local_save_flags(void)
750{
David Brazdil0f672f62019-12-10 10:32:29 +0000751 return PVOP_CALLEE0(unsigned long, irq.save_fl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000752}
753
754static inline notrace void arch_local_irq_restore(unsigned long f)
755{
David Brazdil0f672f62019-12-10 10:32:29 +0000756 PVOP_VCALLEE1(irq.restore_fl, f);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000757}
758
759static inline notrace void arch_local_irq_disable(void)
760{
David Brazdil0f672f62019-12-10 10:32:29 +0000761 PVOP_VCALLEE0(irq.irq_disable);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000762}
763
764static inline notrace void arch_local_irq_enable(void)
765{
David Brazdil0f672f62019-12-10 10:32:29 +0000766 PVOP_VCALLEE0(irq.irq_enable);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000767}
768
769static inline notrace unsigned long arch_local_irq_save(void)
770{
771 unsigned long f;
772
773 f = arch_local_save_flags();
774 arch_local_irq_disable();
775 return f;
776}
David Brazdil0f672f62019-12-10 10:32:29 +0000777#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000778
779
780/* Make sure as little as possible of this mess escapes. */
781#undef PARAVIRT_CALL
782#undef __PVOP_CALL
783#undef __PVOP_VCALL
784#undef PVOP_VCALL0
785#undef PVOP_CALL0
786#undef PVOP_VCALL1
787#undef PVOP_CALL1
788#undef PVOP_VCALL2
789#undef PVOP_CALL2
790#undef PVOP_VCALL3
791#undef PVOP_CALL3
792#undef PVOP_VCALL4
793#undef PVOP_CALL4
794
795extern void default_banner(void);
796
797#else /* __ASSEMBLY__ */
798
David Brazdil0f672f62019-12-10 10:32:29 +0000799#define _PVSITE(ptype, ops, word, algn) \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000800771:; \
801 ops; \
802772:; \
803 .pushsection .parainstructions,"a"; \
804 .align algn; \
805 word 771b; \
806 .byte ptype; \
807 .byte 772b-771b; \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000808 .popsection
809
810
811#define COND_PUSH(set, mask, reg) \
812 .if ((~(set)) & mask); push %reg; .endif
813#define COND_POP(set, mask, reg) \
814 .if ((~(set)) & mask); pop %reg; .endif
815
816#ifdef CONFIG_X86_64
817
818#define PV_SAVE_REGS(set) \
819 COND_PUSH(set, CLBR_RAX, rax); \
820 COND_PUSH(set, CLBR_RCX, rcx); \
821 COND_PUSH(set, CLBR_RDX, rdx); \
822 COND_PUSH(set, CLBR_RSI, rsi); \
823 COND_PUSH(set, CLBR_RDI, rdi); \
824 COND_PUSH(set, CLBR_R8, r8); \
825 COND_PUSH(set, CLBR_R9, r9); \
826 COND_PUSH(set, CLBR_R10, r10); \
827 COND_PUSH(set, CLBR_R11, r11)
828#define PV_RESTORE_REGS(set) \
829 COND_POP(set, CLBR_R11, r11); \
830 COND_POP(set, CLBR_R10, r10); \
831 COND_POP(set, CLBR_R9, r9); \
832 COND_POP(set, CLBR_R8, r8); \
833 COND_POP(set, CLBR_RDI, rdi); \
834 COND_POP(set, CLBR_RSI, rsi); \
835 COND_POP(set, CLBR_RDX, rdx); \
836 COND_POP(set, CLBR_RCX, rcx); \
837 COND_POP(set, CLBR_RAX, rax)
838
David Brazdil0f672f62019-12-10 10:32:29 +0000839#define PARA_PATCH(off) ((off) / 8)
840#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .quad, 8)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000841#define PARA_INDIRECT(addr) *addr(%rip)
842#else
843#define PV_SAVE_REGS(set) \
844 COND_PUSH(set, CLBR_EAX, eax); \
845 COND_PUSH(set, CLBR_EDI, edi); \
846 COND_PUSH(set, CLBR_ECX, ecx); \
847 COND_PUSH(set, CLBR_EDX, edx)
848#define PV_RESTORE_REGS(set) \
849 COND_POP(set, CLBR_EDX, edx); \
850 COND_POP(set, CLBR_ECX, ecx); \
851 COND_POP(set, CLBR_EDI, edi); \
852 COND_POP(set, CLBR_EAX, eax)
853
David Brazdil0f672f62019-12-10 10:32:29 +0000854#define PARA_PATCH(off) ((off) / 4)
855#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .long, 4)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000856#define PARA_INDIRECT(addr) *%cs:addr
857#endif
858
David Brazdil0f672f62019-12-10 10:32:29 +0000859#ifdef CONFIG_PARAVIRT_XXL
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000860#define INTERRUPT_RETURN \
David Brazdil0f672f62019-12-10 10:32:29 +0000861 PARA_SITE(PARA_PATCH(PV_CPU_iret), \
862 ANNOTATE_RETPOLINE_SAFE; \
863 jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000864
865#define DISABLE_INTERRUPTS(clobbers) \
David Brazdil0f672f62019-12-10 10:32:29 +0000866 PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable), \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000867 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
David Brazdil0f672f62019-12-10 10:32:29 +0000868 ANNOTATE_RETPOLINE_SAFE; \
869 call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000870 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
871
872#define ENABLE_INTERRUPTS(clobbers) \
David Brazdil0f672f62019-12-10 10:32:29 +0000873 PARA_SITE(PARA_PATCH(PV_IRQ_irq_enable), \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000874 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
David Brazdil0f672f62019-12-10 10:32:29 +0000875 ANNOTATE_RETPOLINE_SAFE; \
876 call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000877 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
David Brazdil0f672f62019-12-10 10:32:29 +0000878#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000879
David Brazdil0f672f62019-12-10 10:32:29 +0000880#ifdef CONFIG_X86_64
881#ifdef CONFIG_PARAVIRT_XXL
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000882/*
883 * If swapgs is used while the userspace stack is still current,
884 * there's no way to call a pvop. The PV replacement *must* be
885 * inlined, or the swapgs instruction must be trapped and emulated.
886 */
887#define SWAPGS_UNSAFE_STACK \
David Brazdil0f672f62019-12-10 10:32:29 +0000888 PARA_SITE(PARA_PATCH(PV_CPU_swapgs), swapgs)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000889
890/*
891 * Note: swapgs is very special, and in practise is either going to be
892 * implemented with a single "swapgs" instruction or something very
893 * special. Either way, we don't need to save any registers for
894 * it.
895 */
896#define SWAPGS \
David Brazdil0f672f62019-12-10 10:32:29 +0000897 PARA_SITE(PARA_PATCH(PV_CPU_swapgs), \
898 ANNOTATE_RETPOLINE_SAFE; \
899 call PARA_INDIRECT(pv_ops+PV_CPU_swapgs); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000900 )
901
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000902#define USERGS_SYSRET64 \
David Brazdil0f672f62019-12-10 10:32:29 +0000903 PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64), \
904 ANNOTATE_RETPOLINE_SAFE; \
905 jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000906
907#ifdef CONFIG_DEBUG_ENTRY
908#define SAVE_FLAGS(clobbers) \
David Brazdil0f672f62019-12-10 10:32:29 +0000909 PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000910 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
David Brazdil0f672f62019-12-10 10:32:29 +0000911 ANNOTATE_RETPOLINE_SAFE; \
912 call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000913 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
914#endif
David Brazdil0f672f62019-12-10 10:32:29 +0000915#endif /* CONFIG_PARAVIRT_XXL */
916#endif /* CONFIG_X86_64 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000917
David Brazdil0f672f62019-12-10 10:32:29 +0000918#ifdef CONFIG_PARAVIRT_XXL
919
920#define GET_CR2_INTO_AX \
921 PARA_SITE(PARA_PATCH(PV_MMU_read_cr2), \
922 ANNOTATE_RETPOLINE_SAFE; \
923 call PARA_INDIRECT(pv_ops+PV_MMU_read_cr2); \
924 )
925
926#endif /* CONFIG_PARAVIRT_XXL */
927
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000928
929#endif /* __ASSEMBLY__ */
930#else /* CONFIG_PARAVIRT */
931# define default_banner x86_init_noop
David Brazdil0f672f62019-12-10 10:32:29 +0000932#endif /* !CONFIG_PARAVIRT */
933
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000934#ifndef __ASSEMBLY__
David Brazdil0f672f62019-12-10 10:32:29 +0000935#ifndef CONFIG_PARAVIRT_XXL
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000936static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
937 struct mm_struct *mm)
938{
939}
David Brazdil0f672f62019-12-10 10:32:29 +0000940#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000941
David Brazdil0f672f62019-12-10 10:32:29 +0000942#ifndef CONFIG_PARAVIRT
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000943static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
944{
945}
David Brazdil0f672f62019-12-10 10:32:29 +0000946#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000947#endif /* __ASSEMBLY__ */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000948#endif /* _ASM_X86_PARAVIRT_H */