Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/include/xen/arm/hypervisor.h b/include/xen/arm/hypervisor.h
index 2982571..43ef24d 100644
--- a/include/xen/arm/hypervisor.h
+++ b/include/xen/arm/hypervisor.h
@@ -19,8 +19,6 @@
return PARAVIRT_LAZY_NONE;
}
-extern const struct dma_map_ops *xen_dma_ops;
-
#ifdef CONFIG_XEN
void __init xen_early_init(void);
#else
diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h
index 59a2607..b9cc11e 100644
--- a/include/xen/arm/page-coherent.h
+++ b/include/xen/arm/page-coherent.h
@@ -1,107 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
-#define _ASM_ARM_XEN_PAGE_COHERENT_H
+#ifndef _XEN_ARM_PAGE_COHERENT_H
+#define _XEN_ARM_PAGE_COHERENT_H
-#include <asm/page.h>
-#include <asm/dma-mapping.h>
#include <linux/dma-mapping.h>
-
-static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
-{
- if (dev && dev->archdata.dev_dma_ops)
- return dev->archdata.dev_dma_ops;
- return get_arch_dma_ops(NULL);
-}
-
-void __xen_dma_map_page(struct device *hwdev, struct page *page,
- dma_addr_t dev_addr, unsigned long offset, size_t size,
- enum dma_data_direction dir, unsigned long attrs);
-void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
-void __xen_dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir);
-
-void __xen_dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir);
+#include <asm/page.h>
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
{
- return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
+ return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs);
}
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
{
- xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
+ dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs);
}
-static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
- dma_addr_t dev_addr, unsigned long offset, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- unsigned long page_pfn = page_to_xen_pfn(page);
- unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
- unsigned long compound_pages =
- (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
- bool local = (page_pfn <= dev_pfn) &&
- (dev_pfn - page_pfn < compound_pages);
-
- /*
- * Dom0 is mapped 1:1, while the Linux page can span across
- * multiple Xen pages, it's not possible for it to contain a
- * mix of local and foreign Xen pages. So if the first xen_pfn
- * == mfn the page is local otherwise it's a foreign page
- * grant-mapped in dom0. If the page is local we can safely
- * call the native dma_ops function, otherwise we call the xen
- * specific function.
- */
- if (local)
- xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
- else
- __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
-}
-
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
- unsigned long pfn = PFN_DOWN(handle);
- /*
- * Dom0 is mapped 1:1, while the Linux page can be spanned accross
- * multiple Xen page, it's not possible to have a mix of local and
- * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
- * foreign mfn will always return false. If the page is local we can
- * safely call the native dma_ops function, otherwise we call the xen
- * specific function.
- */
- if (pfn_valid(pfn)) {
- if (xen_get_dma_ops(hwdev)->unmap_page)
- xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
- } else
- __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
-}
-
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- unsigned long pfn = PFN_DOWN(handle);
- if (pfn_valid(pfn)) {
- if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
- xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
- } else
- __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
-}
-
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- unsigned long pfn = PFN_DOWN(handle);
- if (pfn_valid(pfn)) {
- if (xen_get_dma_ops(hwdev)->sync_single_for_device)
- xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
- } else
- __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
-}
-
-#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
+#endif /* _XEN_ARM_PAGE_COHERENT_H */
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index 4914b93..6fb95aa 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -27,16 +27,6 @@
int alloc_xenballooned_pages(int nr_pages, struct page **pages);
void free_xenballooned_pages(int nr_pages, struct page **pages);
-struct device;
-#ifdef CONFIG_XEN_SELFBALLOONING
-extern int register_xen_selfballooning(struct device *dev);
-#else
-static inline int register_xen_selfballooning(struct device *dev)
-{
- return -ENOSYS;
-}
-#endif
-
#ifdef CONFIG_XEN_BALLOON
void xen_balloon_init(void);
#else
diff --git a/include/xen/events.h b/include/xen/events.h
index c3e6bc6..c0e6a05 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -3,6 +3,7 @@
#define _XEN_EVENTS_H
#include <linux/interrupt.h>
+#include <linux/irq.h>
#ifdef CONFIG_PCI_MSI
#include <linux/msi.h>
#endif
@@ -59,7 +60,7 @@
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
void rebind_evtchn_irq(int evtchn, int irq);
-int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu);
+int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu);
static inline void notify_remote_via_evtchn(int port)
{
@@ -89,11 +90,13 @@
int irq_from_virq(unsigned int cpu, unsigned int virq);
unsigned int evtchn_from_irq(unsigned irq);
+#ifdef CONFIG_XEN_PVHVM
/* Xen HVM evtchn vector callback */
void xen_hvm_callback_vector(void);
#ifdef CONFIG_TRACING
#define trace_xen_hvm_callback_vector xen_hvm_callback_vector
#endif
+#endif
int xen_set_callback_via(uint64_t via);
void xen_evtchn_do_upcall(struct pt_regs *regs);
void xen_hvm_evtchn_do_upcall(void);
diff --git a/include/xen/interface/hvm/start_info.h b/include/xen/interface/hvm/start_info.h
index 6484159..50af9ea 100644
--- a/include/xen/interface/hvm/start_info.h
+++ b/include/xen/interface/hvm/start_info.h
@@ -33,7 +33,7 @@
* | magic | Contains the magic value XEN_HVM_START_MAGIC_VALUE
* | | ("xEn3" with the 0x80 bit of the "E" set).
* 4 +----------------+
- * | version | Version of this structure. Current version is 0. New
+ * | version | Version of this structure. Current version is 1. New
* | | versions are guaranteed to be backwards-compatible.
* 8 +----------------+
* | flags | SIF_xxx flags.
@@ -48,6 +48,15 @@
* 32 +----------------+
* | rsdp_paddr | Physical address of the RSDP ACPI data structure.
* 40 +----------------+
+ * | memmap_paddr | Physical address of the (optional) memory map. Only
+ * | | present in version 1 and newer of the structure.
+ * 48 +----------------+
+ * | memmap_entries | Number of entries in the memory map table. Zero
+ * | | if there is no memory map being provided. Only
+ * | | present in version 1 and newer of the structure.
+ * 52 +----------------+
+ * | reserved | Version 1 and newer only.
+ * 56 +----------------+
*
* The layout of each entry in the module structure is the following:
*
@@ -62,14 +71,52 @@
* | reserved |
* 32 +----------------+
*
+ * The layout of each entry in the memory map table is as follows:
+ *
+ * 0 +----------------+
+ * | addr | Base address
+ * 8 +----------------+
+ * | size | Size of mapping in bytes
+ * 16 +----------------+
+ * | type | Type of mapping as defined between the hypervisor
+ * | | and guest. See XEN_HVM_MEMMAP_TYPE_* values below.
+ * 20 +----------------|
+ * | reserved |
+ * 24 +----------------+
+ *
* The address and sizes are always a 64bit little endian unsigned integer.
*
* NB: Xen on x86 will always try to place all the data below the 4GiB
* boundary.
+ *
+ * Version numbers of the hvm_start_info structure have evolved like this:
+ *
+ * Version 0: Initial implementation.
+ *
+ * Version 1: Added the memmap_paddr/memmap_entries fields (plus 4 bytes of
+ * padding) to the end of the hvm_start_info struct. These new
+ * fields can be used to pass a memory map to the guest. The
+ * memory map is optional and so guests that understand version 1
+ * of the structure must check that memmap_entries is non-zero
+ * before trying to read the memory map.
*/
#define XEN_HVM_START_MAGIC_VALUE 0x336ec578
/*
+ * The values used in the type field of the memory map table entries are
+ * defined below and match the Address Range Types as defined in the "System
+ * Address Map Interfaces" section of the ACPI Specification. Please refer to
+ * section 15 in version 6.2 of the ACPI spec: http://uefi.org/specifications
+ */
+#define XEN_HVM_MEMMAP_TYPE_RAM 1
+#define XEN_HVM_MEMMAP_TYPE_RESERVED 2
+#define XEN_HVM_MEMMAP_TYPE_ACPI 3
+#define XEN_HVM_MEMMAP_TYPE_NVS 4
+#define XEN_HVM_MEMMAP_TYPE_UNUSABLE 5
+#define XEN_HVM_MEMMAP_TYPE_DISABLED 6
+#define XEN_HVM_MEMMAP_TYPE_PMEM 7
+
+/*
* C representation of the x86/HVM start info layout.
*
* The canonical definition of this layout is above, this is just a way to
@@ -86,6 +133,13 @@
uint64_t cmdline_paddr; /* Physical address of the command line. */
uint64_t rsdp_paddr; /* Physical address of the RSDP ACPI data */
/* structure. */
+ /* All following fields only present in version 1 and newer */
+ uint64_t memmap_paddr; /* Physical address of an array of */
+ /* hvm_memmap_table_entry. */
+ uint32_t memmap_entries; /* Number of entries in the memmap table. */
+ /* Value will be zero if there is no memory */
+ /* map being provided. */
+ uint32_t reserved; /* Must be zero. */
};
struct hvm_modlist_entry {
@@ -95,4 +149,11 @@
uint64_t reserved;
};
+struct hvm_memmap_table_entry {
+ uint64_t addr; /* Base address of the memory region */
+ uint64_t size; /* Size of the memory region in bytes */
+ uint32_t type; /* Mapping type */
+ uint32_t reserved; /* Must be zero for Version 1. */
+};
+
#endif /* __XEN_PUBLIC_ARCH_X86_HVM_START_INFO_H__ */
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
index 4c5751c..4470048 100644
--- a/include/xen/interface/memory.h
+++ b/include/xen/interface/memory.h
@@ -245,12 +245,6 @@
/*
- * Prevent the balloon driver from changing the memory reservation
- * during a driver critical region.
- */
-extern spinlock_t xen_reservation_lock;
-
-/*
* Unmaps the page appearing at a particular GPFN from the specified guest's
* pseudophysical address space.
* arg == addr of xen_remove_from_physmap_t.
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index 5e4b83f..d71380f 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -4,6 +4,11 @@
#include <linux/swiotlb.h>
+void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
+ phys_addr_t paddr, size_t size, enum dma_data_direction dir);
+void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
+ phys_addr_t paddr, size_t size, enum dma_data_direction dir);
+
extern int xen_swiotlb_init(int verbose, bool early);
extern const struct dma_map_ops xen_swiotlb_dma_ops;
diff --git a/include/xen/tmem.h b/include/xen/tmem.h
deleted file mode 100644
index c80bafe..0000000
--- a/include/xen/tmem.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _XEN_TMEM_H
-#define _XEN_TMEM_H
-
-#include <linux/types.h>
-
-#ifdef CONFIG_XEN_TMEM_MODULE
-#define tmem_enabled true
-#else
-/* defined in drivers/xen/tmem.c */
-extern bool tmem_enabled;
-#endif
-
-#ifdef CONFIG_XEN_SELFBALLOONING
-extern int xen_selfballoon_init(bool, bool);
-#endif
-
-#endif /* _XEN_TMEM_H */
diff --git a/include/xen/xen-front-pgdir-shbuf.h b/include/xen/xen-front-pgdir-shbuf.h
new file mode 100644
index 0000000..150ef7e
--- /dev/null
+++ b/include/xen/xen-front-pgdir-shbuf.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ * Xen frontend/backend page directory based shared buffer
+ * helper module.
+ *
+ * Copyright (C) 2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_FRONT_PGDIR_SHBUF_H_
+#define __XEN_FRONT_PGDIR_SHBUF_H_
+
+#include <linux/kernel.h>
+
+#include <xen/grant_table.h>
+
+struct xen_front_pgdir_shbuf_ops;
+
+struct xen_front_pgdir_shbuf {
+ /*
+ * Number of references granted for the backend use:
+ *
+ * - for frontend allocated/imported buffers this holds the number
+ * of grant references for the page directory and the pages
+ * of the buffer
+ *
+ * - for the buffer provided by the backend this only holds the number
+ * of grant references for the page directory itself as grant
+ * references for the buffer will be provided by the backend.
+ */
+ int num_grefs;
+ grant_ref_t *grefs;
+ /* Page directory backing storage. */
+ u8 *directory;
+
+ /*
+ * Number of pages for the shared buffer itself (excluding the page
+ * directory).
+ */
+ int num_pages;
+ /*
+ * Backing storage of the shared buffer: these are the pages being
+ * shared.
+ */
+ struct page **pages;
+
+ struct xenbus_device *xb_dev;
+
+ /* These are the ops used internally depending on be_alloc mode. */
+ const struct xen_front_pgdir_shbuf_ops *ops;
+
+ /* Xen map handles for the buffer allocated by the backend. */
+ grant_handle_t *backend_map_handles;
+};
+
+struct xen_front_pgdir_shbuf_cfg {
+ struct xenbus_device *xb_dev;
+
+ /* Number of pages of the buffer backing storage. */
+ int num_pages;
+ /* Pages of the buffer to be shared. */
+ struct page **pages;
+
+ /*
+ * This is allocated outside because there are use-cases when
+ * the buffer structure is allocated as a part of a bigger one.
+ */
+ struct xen_front_pgdir_shbuf *pgdir;
+ /*
+ * Mode of grant reference sharing: if set then backend will share
+ * grant references to the buffer with the frontend.
+ */
+ int be_alloc;
+};
+
+int xen_front_pgdir_shbuf_alloc(struct xen_front_pgdir_shbuf_cfg *cfg);
+
+grant_ref_t
+xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf);
+
+int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf);
+
+int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf);
+
+void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf);
+
+#endif /* __XEN_FRONT_PGDIR_SHBUF_H_ */
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index f6e798d..d89969a 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -5,6 +5,7 @@
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/efi.h>
+#include <xen/features.h>
#include <asm/xen/interface.h>
#include <xen/interface/vcpu.h>
@@ -60,80 +61,29 @@
unsigned int order) { }
#endif
+#if defined(CONFIG_XEN_PV)
+int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
+ xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
+ unsigned int domid, bool no_translate, struct page **pages);
+#else
+static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
+ xen_pfn_t *pfn, int nr, int *err_ptr,
+ pgprot_t prot, unsigned int domid,
+ bool no_translate, struct page **pages)
+{
+ BUG();
+ return 0;
+}
+#endif
+
struct vm_area_struct;
-/*
- * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
- * @vma: VMA to map the pages into
- * @addr: Address at which to map the pages
- * @gfn: Array of GFNs to map
- * @nr: Number entries in the GFN array
- * @err_ptr: Returns per-GFN error status.
- * @prot: page protection mask
- * @domid: Domain owning the pages
- * @pages: Array of pages if this domain has an auto-translated physmap
- *
- * @gfn and @err_ptr may point to the same buffer, the GFNs will be
- * overwritten by the error codes after they are mapped.
- *
- * Returns the number of successfully mapped frames, or a -ve error
- * code.
- */
-int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t *gfn, int nr,
- int *err_ptr, pgprot_t prot,
- unsigned domid,
- struct page **pages);
-
-/*
- * xen_remap_domain_mfn_array() - map an array of foreign frames by mfn
- * @vma: VMA to map the pages into
- * @addr: Address at which to map the pages
- * @mfn: Array of MFNs to map
- * @nr: Number entries in the MFN array
- * @err_ptr: Returns per-MFN error status.
- * @prot: page protection mask
- * @domid: Domain owning the pages
- * @pages: Array of pages if this domain has an auto-translated physmap
- *
- * @mfn and @err_ptr may point to the same buffer, the MFNs will be
- * overwritten by the error codes after they are mapped.
- *
- * Returns the number of successfully mapped frames, or a -ve error
- * code.
- */
-int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
- unsigned long addr, xen_pfn_t *mfn, int nr,
- int *err_ptr, pgprot_t prot,
- unsigned int domid, struct page **pages);
-
-/* xen_remap_domain_gfn_range() - map a range of foreign frames
- * @vma: VMA to map the pages into
- * @addr: Address at which to map the pages
- * @gfn: First GFN to map.
- * @nr: Number frames to map
- * @prot: page protection mask
- * @domid: Domain owning the pages
- * @pages: Array of pages if this domain has an auto-translated physmap
- *
- * Returns the number of successfully mapped frames, or a -ve error
- * code.
- */
-int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t gfn, int nr,
- pgprot_t prot, unsigned domid,
- struct page **pages);
-int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
- int numpgs, struct page **pages);
-
#ifdef CONFIG_XEN_AUTO_XLATE
int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
unsigned long addr,
xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot,
- unsigned domid,
+ unsigned int domid,
struct page **pages);
int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
int nr, struct page **pages);
@@ -159,35 +109,110 @@
}
#endif
+int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long len);
+
+/*
+ * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
+ * @vma: VMA to map the pages into
+ * @addr: Address at which to map the pages
+ * @gfn: Array of GFNs to map
+ * @nr: Number entries in the GFN array
+ * @err_ptr: Returns per-GFN error status.
+ * @prot: page protection mask
+ * @domid: Domain owning the pages
+ * @pages: Array of pages if this domain has an auto-translated physmap
+ *
+ * @gfn and @err_ptr may point to the same buffer, the GFNs will be
+ * overwritten by the error codes after they are mapped.
+ *
+ * Returns the number of successfully mapped frames, or a -ve error
+ * code.
+ */
+static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *gfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned int domid,
+ struct page **pages)
+{
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
+ prot, domid, pages);
+
+ /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
+ * and the consequences later is quite hard to detect what the actual
+ * cause of "wrong memory was mapped in".
+ */
+ BUG_ON(err_ptr == NULL);
+ return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
+ false, pages);
+}
+
+/*
+ * xen_remap_domain_mfn_array() - map an array of foreign frames by mfn
+ * @vma: VMA to map the pages into
+ * @addr: Address at which to map the pages
+ * @mfn: Array of MFNs to map
+ * @nr: Number entries in the MFN array
+ * @err_ptr: Returns per-MFN error status.
+ * @prot: page protection mask
+ * @domid: Domain owning the pages
+ * @pages: Array of pages if this domain has an auto-translated physmap
+ *
+ * @mfn and @err_ptr may point to the same buffer, the MFNs will be
+ * overwritten by the error codes after they are mapped.
+ *
+ * Returns the number of successfully mapped frames, or a -ve error
+ * code.
+ */
+static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+ unsigned long addr, xen_pfn_t *mfn,
+ int nr, int *err_ptr,
+ pgprot_t prot, unsigned int domid,
+ struct page **pages)
+{
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return -EOPNOTSUPP;
+
+ return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
+ true, pages);
+}
+
+/* xen_remap_domain_gfn_range() - map a range of foreign frames
+ * @vma: VMA to map the pages into
+ * @addr: Address at which to map the pages
+ * @gfn: First GFN to map.
+ * @nr: Number frames to map
+ * @prot: page protection mask
+ * @domid: Domain owning the pages
+ * @pages: Array of pages if this domain has an auto-translated physmap
+ *
+ * Returns the number of successfully mapped frames, or a -ve error
+ * code.
+ */
+static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t gfn, int nr,
+ pgprot_t prot, unsigned int domid,
+ struct page **pages)
+{
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return -EOPNOTSUPP;
+
+ return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
+ pages);
+}
+
+int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
+ int numpgs, struct page **pages);
+
int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
unsigned long nr_grant_frames);
bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
-efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc);
-efi_status_t xen_efi_set_time(efi_time_t *tm);
-efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
- efi_time_t *tm);
-efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm);
-efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor,
- u32 *attr, unsigned long *data_size,
- void *data);
-efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
- efi_char16_t *name, efi_guid_t *vendor);
-efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor,
- u32 attr, unsigned long data_size,
- void *data);
-efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space,
- u64 *remaining_space,
- u64 *max_variable_size);
-efi_status_t xen_efi_get_next_high_mono_count(u32 *count);
-efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
- unsigned long count, unsigned long sg_list);
-efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
- unsigned long count, u64 *max_size,
- int *reset_type);
-void xen_efi_reset_system(int reset_type, efi_status_t status,
- unsigned long data_size, efi_char16_t *data);
+void xen_efi_runtime_setup(void);
#ifdef CONFIG_PREEMPT
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 1e1d9bd..19a72f5 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -29,6 +29,9 @@
extern uint32_t xen_start_flags;
+#include <xen/interface/hvm/start_info.h>
+extern struct hvm_start_info pvh_start_info;
+
#ifdef CONFIG_XEN_DOM0
#include <xen/interface/xen.h>
#include <asm/xen/hypervisor.h>
@@ -39,4 +42,14 @@
#define xen_initial_domain() (0)
#endif /* CONFIG_XEN_DOM0 */
+struct bio_vec;
+struct page;
+
+bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
+ const struct page *page);
+
+#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
+extern u64 xen_saved_max_mem_size;
+#endif
+
#endif /* _XEN_XEN_H */