blob: 9c286b2a19001616ae0e9986be13905891b5f5ff [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2
3/*
4 * Common functionality of grant device.
5 *
6 * Copyright (c) 2006-2007, D G Murray.
7 * (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
8 * (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
9 */
10
11#ifndef _GNTDEV_COMMON_H
12#define _GNTDEV_COMMON_H
13
14#include <linux/mm.h>
15#include <linux/mman.h>
16#include <linux/mmu_notifier.h>
17#include <linux/types.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020018#include <xen/interface/event_channel.h>
Olivier Deprez92d4c212022-12-06 15:05:30 +010019#include <xen/grant_table.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000020
21struct gntdev_dmabuf_priv;
22
23struct gntdev_priv {
24 /* Maps with visible offsets in the file descriptor. */
25 struct list_head maps;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000026 /* lock protects maps and freeable_maps. */
27 struct mutex lock;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000028
29#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
30 /* Device for which DMA memory is allocated. */
31 struct device *dma_dev;
32#endif
33
34#ifdef CONFIG_XEN_GNTDEV_DMABUF
35 struct gntdev_dmabuf_priv *dmabuf_priv;
36#endif
37};
38
39struct gntdev_unmap_notify {
40 int flags;
41 /* Address relative to the start of the gntdev_grant_map. */
42 int addr;
Olivier Deprez157378f2022-04-04 15:47:50 +020043 evtchn_port_t event;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000044};
45
46struct gntdev_grant_map {
Olivier Deprez92d4c212022-12-06 15:05:30 +010047 atomic_t in_use;
Olivier Deprez157378f2022-04-04 15:47:50 +020048 struct mmu_interval_notifier notifier;
Olivier Deprez92d4c212022-12-06 15:05:30 +010049 bool notifier_init;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000050 struct list_head next;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051 int index;
52 int count;
53 int flags;
54 refcount_t users;
55 struct gntdev_unmap_notify notify;
56 struct ioctl_gntdev_grant_ref *grants;
57 struct gnttab_map_grant_ref *map_ops;
58 struct gnttab_unmap_grant_ref *unmap_ops;
59 struct gnttab_map_grant_ref *kmap_ops;
60 struct gnttab_unmap_grant_ref *kunmap_ops;
Olivier Deprez92d4c212022-12-06 15:05:30 +010061 bool *being_removed;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000062 struct page **pages;
63 unsigned long pages_vm_start;
64
65#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
66 /*
67 * If dmabuf_vaddr is not NULL then this mapping is backed by DMA
68 * capable memory.
69 */
70
71 struct device *dma_dev;
72 /* Flags used to create this DMA buffer: GNTDEV_DMA_FLAG_XXX. */
73 int dma_flags;
74 void *dma_vaddr;
75 dma_addr_t dma_bus_addr;
76 /* Needed to avoid allocation in gnttab_dma_free_pages(). */
77 xen_pfn_t *frames;
78#endif
Olivier Deprez92d4c212022-12-06 15:05:30 +010079
80 /* Number of live grants */
81 atomic_t live_grants;
82 /* Needed to avoid allocation in __unmap_grant_pages */
83 struct gntab_unmap_queue_data unmap_data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000084};
85
86struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
87 int dma_flags);
88
89void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add);
90
91void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map);
92
Olivier Deprez157378f2022-04-04 15:47:50 +020093bool gntdev_test_page_count(unsigned int count);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000094
95int gntdev_map_grant_pages(struct gntdev_grant_map *map);
96
97#endif