blob: 16cfef09932953167de76fff884a24be4e4cdd1f [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/******************************************************************************
2 * Client-facing interface for the Xenbus driver. In other words, the
3 * interface between the Xenbus and the device-specific code, be it the
4 * frontend or the backend of that driver.
5 *
6 * Copyright (C) 2005 XenSource Ltd
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33#include <linux/mm.h>
34#include <linux/slab.h>
35#include <linux/types.h>
36#include <linux/spinlock.h>
37#include <linux/vmalloc.h>
38#include <linux/export.h>
39#include <asm/xen/hypervisor.h>
40#include <xen/page.h>
41#include <xen/interface/xen.h>
42#include <xen/interface/event_channel.h>
43#include <xen/balloon.h>
44#include <xen/events.h>
45#include <xen/grant_table.h>
46#include <xen/xenbus.h>
47#include <xen/xen.h>
48#include <xen/features.h>
49
50#include "xenbus.h"
51
52#define XENBUS_PAGES(_grants) (DIV_ROUND_UP(_grants, XEN_PFN_PER_PAGE))
53
54#define XENBUS_MAX_RING_PAGES (XENBUS_PAGES(XENBUS_MAX_RING_GRANTS))
55
56struct xenbus_map_node {
57 struct list_head next;
58 union {
59 struct {
60 struct vm_struct *area;
61 } pv;
62 struct {
63 struct page *pages[XENBUS_MAX_RING_PAGES];
64 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
65 void *addr;
66 } hvm;
67 };
68 grant_handle_t handles[XENBUS_MAX_RING_GRANTS];
69 unsigned int nr_handles;
70};
71
Olivier Deprez157378f2022-04-04 15:47:50 +020072struct map_ring_valloc {
73 struct xenbus_map_node *node;
74
75 /* Why do we need two arrays? See comment of __xenbus_map_ring */
76 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
77 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
78
79 struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
80 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
81
82 unsigned int idx;
83};
84
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000085static DEFINE_SPINLOCK(xenbus_valloc_lock);
86static LIST_HEAD(xenbus_valloc_pages);
87
88struct xenbus_ring_ops {
Olivier Deprez157378f2022-04-04 15:47:50 +020089 int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000090 grant_ref_t *gnt_refs, unsigned int nr_grefs,
91 void **vaddr);
92 int (*unmap)(struct xenbus_device *dev, void *vaddr);
93};
94
95static const struct xenbus_ring_ops *ring_ops __read_mostly;
96
97const char *xenbus_strstate(enum xenbus_state state)
98{
99 static const char *const name[] = {
100 [ XenbusStateUnknown ] = "Unknown",
101 [ XenbusStateInitialising ] = "Initialising",
102 [ XenbusStateInitWait ] = "InitWait",
103 [ XenbusStateInitialised ] = "Initialised",
104 [ XenbusStateConnected ] = "Connected",
105 [ XenbusStateClosing ] = "Closing",
106 [ XenbusStateClosed ] = "Closed",
107 [XenbusStateReconfiguring] = "Reconfiguring",
108 [XenbusStateReconfigured] = "Reconfigured",
109 };
110 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
111}
112EXPORT_SYMBOL_GPL(xenbus_strstate);
113
114/**
115 * xenbus_watch_path - register a watch
116 * @dev: xenbus device
117 * @path: path to watch
118 * @watch: watch to register
119 * @callback: callback to register
120 *
121 * Register a @watch on the given path, using the given xenbus_watch structure
122 * for storage, and the given @callback function as the callback. Return 0 on
123 * success, or -errno on error. On success, the given @path will be saved as
124 * @watch->node, and remains the caller's to free. On error, @watch->node will
125 * be NULL, the device will switch to %XenbusStateClosing, and the error will
126 * be saved in the store.
127 */
128int xenbus_watch_path(struct xenbus_device *dev, const char *path,
129 struct xenbus_watch *watch,
Olivier Deprez0e641232021-09-23 10:07:05 +0200130 bool (*will_handle)(struct xenbus_watch *,
131 const char *, const char *),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000132 void (*callback)(struct xenbus_watch *,
133 const char *, const char *))
134{
135 int err;
136
137 watch->node = path;
Olivier Deprez0e641232021-09-23 10:07:05 +0200138 watch->will_handle = will_handle;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000139 watch->callback = callback;
140
141 err = register_xenbus_watch(watch);
142
143 if (err) {
144 watch->node = NULL;
Olivier Deprez0e641232021-09-23 10:07:05 +0200145 watch->will_handle = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000146 watch->callback = NULL;
147 xenbus_dev_fatal(dev, err, "adding watch on %s", path);
148 }
149
150 return err;
151}
152EXPORT_SYMBOL_GPL(xenbus_watch_path);
153
154
155/**
156 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
157 * @dev: xenbus device
158 * @watch: watch to register
159 * @callback: callback to register
160 * @pathfmt: format of path to watch
161 *
162 * Register a watch on the given @path, using the given xenbus_watch
163 * structure for storage, and the given @callback function as the callback.
164 * Return 0 on success, or -errno on error. On success, the watched path
165 * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
166 * kfree(). On error, watch->node will be NULL, so the caller has nothing to
167 * free, the device will switch to %XenbusStateClosing, and the error will be
168 * saved in the store.
169 */
170int xenbus_watch_pathfmt(struct xenbus_device *dev,
171 struct xenbus_watch *watch,
Olivier Deprez0e641232021-09-23 10:07:05 +0200172 bool (*will_handle)(struct xenbus_watch *,
173 const char *, const char *),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000174 void (*callback)(struct xenbus_watch *,
175 const char *, const char *),
176 const char *pathfmt, ...)
177{
178 int err;
179 va_list ap;
180 char *path;
181
182 va_start(ap, pathfmt);
183 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
184 va_end(ap);
185
186 if (!path) {
187 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
188 return -ENOMEM;
189 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200190 err = xenbus_watch_path(dev, path, watch, will_handle, callback);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000191
192 if (err)
193 kfree(path);
194 return err;
195}
196EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
197
198static void xenbus_switch_fatal(struct xenbus_device *, int, int,
199 const char *, ...);
200
201static int
202__xenbus_switch_state(struct xenbus_device *dev,
203 enum xenbus_state state, int depth)
204{
205 /* We check whether the state is currently set to the given value, and
206 if not, then the state is set. We don't want to unconditionally
207 write the given state, because we don't want to fire watches
208 unnecessarily. Furthermore, if the node has gone, we don't write
209 to it, as the device will be tearing down, and we don't want to
210 resurrect that directory.
211
212 Note that, because of this cached value of our state, this
213 function will not take a caller's Xenstore transaction
214 (something it was trying to in the past) because dev->state
215 would not get reset if the transaction was aborted.
216 */
217
218 struct xenbus_transaction xbt;
219 int current_state;
220 int err, abort;
221
222 if (state == dev->state)
223 return 0;
224
225again:
226 abort = 1;
227
228 err = xenbus_transaction_start(&xbt);
229 if (err) {
230 xenbus_switch_fatal(dev, depth, err, "starting transaction");
231 return 0;
232 }
233
234 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", &current_state);
235 if (err != 1)
236 goto abort;
237
238 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
239 if (err) {
240 xenbus_switch_fatal(dev, depth, err, "writing new state");
241 goto abort;
242 }
243
244 abort = 0;
245abort:
246 err = xenbus_transaction_end(xbt, abort);
247 if (err) {
248 if (err == -EAGAIN && !abort)
249 goto again;
250 xenbus_switch_fatal(dev, depth, err, "ending transaction");
251 } else
252 dev->state = state;
253
254 return 0;
255}
256
257/**
258 * xenbus_switch_state
259 * @dev: xenbus device
260 * @state: new state
261 *
262 * Advertise in the store a change of the given driver to the given new_state.
263 * Return 0 on success, or -errno on error. On error, the device will switch
264 * to XenbusStateClosing, and the error will be saved in the store.
265 */
266int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
267{
268 return __xenbus_switch_state(dev, state, 0);
269}
270
271EXPORT_SYMBOL_GPL(xenbus_switch_state);
272
273int xenbus_frontend_closed(struct xenbus_device *dev)
274{
275 xenbus_switch_state(dev, XenbusStateClosed);
276 complete(&dev->down);
277 return 0;
278}
279EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
280
281static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
282 const char *fmt, va_list ap)
283{
284 unsigned int len;
285 char *printf_buffer;
286 char *path_buffer;
287
288#define PRINTF_BUFFER_SIZE 4096
289
290 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
291 if (!printf_buffer)
292 return;
293
294 len = sprintf(printf_buffer, "%i ", -err);
295 vsnprintf(printf_buffer + len, PRINTF_BUFFER_SIZE - len, fmt, ap);
296
297 dev_err(&dev->dev, "%s\n", printf_buffer);
298
299 path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
David Brazdil0f672f62019-12-10 10:32:29 +0000300 if (path_buffer)
301 xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000302
303 kfree(printf_buffer);
304 kfree(path_buffer);
305}
306
307/**
308 * xenbus_dev_error
309 * @dev: xenbus device
310 * @err: error to report
311 * @fmt: error message format
312 *
313 * Report the given negative errno into the store, along with the given
314 * formatted message.
315 */
316void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
317{
318 va_list ap;
319
320 va_start(ap, fmt);
321 xenbus_va_dev_error(dev, err, fmt, ap);
322 va_end(ap);
323}
324EXPORT_SYMBOL_GPL(xenbus_dev_error);
325
326/**
327 * xenbus_dev_fatal
328 * @dev: xenbus device
329 * @err: error to report
330 * @fmt: error message format
331 *
332 * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
333 * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
334 * closedown of this driver and its peer.
335 */
336
337void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
338{
339 va_list ap;
340
341 va_start(ap, fmt);
342 xenbus_va_dev_error(dev, err, fmt, ap);
343 va_end(ap);
344
345 xenbus_switch_state(dev, XenbusStateClosing);
346}
347EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
348
349/**
350 * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
351 * avoiding recursion within xenbus_switch_state.
352 */
353static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
354 const char *fmt, ...)
355{
356 va_list ap;
357
358 va_start(ap, fmt);
359 xenbus_va_dev_error(dev, err, fmt, ap);
360 va_end(ap);
361
362 if (!depth)
363 __xenbus_switch_state(dev, XenbusStateClosing, 1);
364}
365
366/**
367 * xenbus_grant_ring
368 * @dev: xenbus device
369 * @vaddr: starting virtual address of the ring
370 * @nr_pages: number of pages to be granted
371 * @grefs: grant reference array to be filled in
372 *
373 * Grant access to the given @vaddr to the peer of the given device.
374 * Then fill in @grefs with grant references. Return 0 on success, or
375 * -errno on error. On error, the device will switch to
376 * XenbusStateClosing, and the error will be saved in the store.
377 */
378int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
379 unsigned int nr_pages, grant_ref_t *grefs)
380{
381 int err;
Olivier Deprez157378f2022-04-04 15:47:50 +0200382 unsigned int i;
383 grant_ref_t gref_head;
384
385 err = gnttab_alloc_grant_references(nr_pages, &gref_head);
386 if (err) {
387 xenbus_dev_fatal(dev, err, "granting access to ring page");
388 return err;
389 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000390
391 for (i = 0; i < nr_pages; i++) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200392 unsigned long gfn;
393
394 if (is_vmalloc_addr(vaddr))
395 gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr));
396 else
397 gfn = virt_to_gfn(vaddr);
398
Olivier Deprez157378f2022-04-04 15:47:50 +0200399 grefs[i] = gnttab_claim_grant_reference(&gref_head);
400 gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
401 gfn, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000402
403 vaddr = vaddr + XEN_PAGE_SIZE;
404 }
405
406 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000407}
408EXPORT_SYMBOL_GPL(xenbus_grant_ring);
409
410
411/**
412 * Allocate an event channel for the given xenbus_device, assigning the newly
413 * created local port to *port. Return 0 on success, or -errno on error. On
414 * error, the device will switch to XenbusStateClosing, and the error will be
415 * saved in the store.
416 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200417int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000418{
419 struct evtchn_alloc_unbound alloc_unbound;
420 int err;
421
422 alloc_unbound.dom = DOMID_SELF;
423 alloc_unbound.remote_dom = dev->otherend_id;
424
425 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
426 &alloc_unbound);
427 if (err)
428 xenbus_dev_fatal(dev, err, "allocating event channel");
429 else
430 *port = alloc_unbound.port;
431
432 return err;
433}
434EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
435
436
437/**
438 * Free an existing event channel. Returns 0 on success or -errno on error.
439 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200440int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000441{
442 struct evtchn_close close;
443 int err;
444
445 close.port = port;
446
447 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
448 if (err)
Olivier Deprez157378f2022-04-04 15:47:50 +0200449 xenbus_dev_error(dev, err, "freeing event channel %u", port);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000450
451 return err;
452}
453EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
454
455
456/**
457 * xenbus_map_ring_valloc
458 * @dev: xenbus device
459 * @gnt_refs: grant reference array
460 * @nr_grefs: number of grant references
461 * @vaddr: pointer to address to be filled out by mapping
462 *
463 * Map @nr_grefs pages of memory into this domain from another
464 * domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs
465 * pages of virtual address space, maps the pages to that address, and
Olivier Deprez157378f2022-04-04 15:47:50 +0200466 * sets *vaddr to that address. Returns 0 on success, and -errno on
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000467 * error. If an error is returned, device will switch to
468 * XenbusStateClosing and the error message will be saved in XenStore.
469 */
470int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
471 unsigned int nr_grefs, void **vaddr)
472{
Olivier Deprez0e641232021-09-23 10:07:05 +0200473 int err;
Olivier Deprez157378f2022-04-04 15:47:50 +0200474 struct map_ring_valloc *info;
Olivier Deprez0e641232021-09-23 10:07:05 +0200475
Olivier Deprez157378f2022-04-04 15:47:50 +0200476 *vaddr = NULL;
Olivier Deprez0e641232021-09-23 10:07:05 +0200477
Olivier Deprez157378f2022-04-04 15:47:50 +0200478 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
479 return -EINVAL;
480
481 info = kzalloc(sizeof(*info), GFP_KERNEL);
482 if (!info)
483 return -ENOMEM;
484
485 info->node = kzalloc(sizeof(*info->node), GFP_KERNEL);
486 if (!info->node)
487 err = -ENOMEM;
488 else
489 err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr);
490
491 kfree(info->node);
492 kfree(info);
Olivier Deprez0e641232021-09-23 10:07:05 +0200493 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000494}
495EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
496
497/* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned
498 * long), e.g. 32-on-64. Caller is responsible for preparing the
499 * right array to feed into this function */
500static int __xenbus_map_ring(struct xenbus_device *dev,
501 grant_ref_t *gnt_refs,
502 unsigned int nr_grefs,
503 grant_handle_t *handles,
Olivier Deprez157378f2022-04-04 15:47:50 +0200504 struct map_ring_valloc *info,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000505 unsigned int flags,
506 bool *leaked)
507{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000508 int i, j;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000509
510 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
511 return -EINVAL;
512
513 for (i = 0; i < nr_grefs; i++) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200514 gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags,
515 gnt_refs[i], dev->otherend_id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000516 handles[i] = INVALID_GRANT_HANDLE;
517 }
518
Olivier Deprez157378f2022-04-04 15:47:50 +0200519 gnttab_batch_map(info->map, i);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000520
521 for (i = 0; i < nr_grefs; i++) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200522 if (info->map[i].status != GNTST_okay) {
523 xenbus_dev_fatal(dev, info->map[i].status,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000524 "mapping in shared page %d from domain %d",
525 gnt_refs[i], dev->otherend_id);
526 goto fail;
527 } else
Olivier Deprez157378f2022-04-04 15:47:50 +0200528 handles[i] = info->map[i].handle;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000529 }
530
Olivier Deprez157378f2022-04-04 15:47:50 +0200531 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000532
533 fail:
534 for (i = j = 0; i < nr_grefs; i++) {
535 if (handles[i] != INVALID_GRANT_HANDLE) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200536 gnttab_set_unmap_op(&info->unmap[j],
537 info->phys_addrs[i],
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000538 GNTMAP_host_map, handles[i]);
539 j++;
540 }
541 }
542
Olivier Deprez157378f2022-04-04 15:47:50 +0200543 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000544 BUG();
545
546 *leaked = false;
547 for (i = 0; i < j; i++) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200548 if (info->unmap[i].status != GNTST_okay) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000549 *leaked = true;
550 break;
551 }
552 }
553
Olivier Deprez157378f2022-04-04 15:47:50 +0200554 return -ENOENT;
555}
556
557/**
558 * xenbus_unmap_ring
559 * @dev: xenbus device
560 * @handles: grant handle array
561 * @nr_handles: number of handles in the array
562 * @vaddrs: addresses to unmap
563 *
564 * Unmap memory in this domain that was imported from another domain.
565 * Returns 0 on success and returns GNTST_* on error
566 * (see xen/include/interface/grant_table.h).
567 */
568static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
569 unsigned int nr_handles, unsigned long *vaddrs)
570{
571 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
572 int i;
573 int err;
574
575 if (nr_handles > XENBUS_MAX_RING_GRANTS)
576 return -EINVAL;
577
578 for (i = 0; i < nr_handles; i++)
579 gnttab_set_unmap_op(&unmap[i], vaddrs[i],
580 GNTMAP_host_map, handles[i]);
581
582 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
583 BUG();
584
585 err = GNTST_okay;
586 for (i = 0; i < nr_handles; i++) {
587 if (unmap[i].status != GNTST_okay) {
588 xenbus_dev_error(dev, unmap[i].status,
589 "unmapping page at handle %d error %d",
590 handles[i], unmap[i].status);
591 err = unmap[i].status;
592 break;
593 }
594 }
595
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000596 return err;
597}
598
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000599static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
600 unsigned int goffset,
601 unsigned int len,
602 void *data)
603{
Olivier Deprez157378f2022-04-04 15:47:50 +0200604 struct map_ring_valloc *info = data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000605 unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
606
607 info->phys_addrs[info->idx] = vaddr;
608 info->addrs[info->idx] = vaddr;
609
610 info->idx++;
611}
612
Olivier Deprez157378f2022-04-04 15:47:50 +0200613static int xenbus_map_ring_hvm(struct xenbus_device *dev,
614 struct map_ring_valloc *info,
615 grant_ref_t *gnt_ref,
616 unsigned int nr_grefs,
617 void **vaddr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000618{
Olivier Deprez157378f2022-04-04 15:47:50 +0200619 struct xenbus_map_node *node = info->node;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000620 int err;
621 void *addr;
622 bool leaked = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000623 unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
624
Olivier Deprez157378f2022-04-04 15:47:50 +0200625 err = xen_alloc_unpopulated_pages(nr_pages, node->hvm.pages);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000626 if (err)
627 goto out_err;
628
629 gnttab_foreach_grant(node->hvm.pages, nr_grefs,
630 xenbus_map_ring_setup_grant_hvm,
Olivier Deprez157378f2022-04-04 15:47:50 +0200631 info);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000632
633 err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
Olivier Deprez157378f2022-04-04 15:47:50 +0200634 info, GNTMAP_host_map, &leaked);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000635 node->nr_handles = nr_grefs;
636
637 if (err)
638 goto out_free_ballooned_pages;
639
640 addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP,
641 PAGE_KERNEL);
642 if (!addr) {
643 err = -ENOMEM;
644 goto out_xenbus_unmap_ring;
645 }
646
647 node->hvm.addr = addr;
648
649 spin_lock(&xenbus_valloc_lock);
650 list_add(&node->next, &xenbus_valloc_pages);
651 spin_unlock(&xenbus_valloc_lock);
652
653 *vaddr = addr;
Olivier Deprez157378f2022-04-04 15:47:50 +0200654 info->node = NULL;
655
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000656 return 0;
657
658 out_xenbus_unmap_ring:
659 if (!leaked)
Olivier Deprez157378f2022-04-04 15:47:50 +0200660 xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000661 else
662 pr_alert("leaking %p size %u page(s)",
663 addr, nr_pages);
664 out_free_ballooned_pages:
665 if (!leaked)
Olivier Deprez157378f2022-04-04 15:47:50 +0200666 xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000667 out_err:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000668 return err;
669}
670
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000671/**
672 * xenbus_unmap_ring_vfree
673 * @dev: xenbus device
674 * @vaddr: addr to unmap
675 *
676 * Based on Rusty Russell's skeleton driver's unmap_page.
677 * Unmap a page of memory in this domain that was imported from another domain.
678 * Use xenbus_unmap_ring_vfree if you mapped in your memory with
679 * xenbus_map_ring_valloc (it will free the virtual address space).
680 * Returns 0 on success and returns GNTST_* on error
681 * (see xen/include/interface/grant_table.h).
682 */
683int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
684{
685 return ring_ops->unmap(dev, vaddr);
686}
687EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
688
689#ifdef CONFIG_XEN_PV
Olivier Deprez157378f2022-04-04 15:47:50 +0200690static int map_ring_apply(pte_t *pte, unsigned long addr, void *data)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000691{
Olivier Deprez157378f2022-04-04 15:47:50 +0200692 struct map_ring_valloc *info = data;
693
694 info->phys_addrs[info->idx++] = arbitrary_virt_to_machine(pte).maddr;
695 return 0;
696}
697
698static int xenbus_map_ring_pv(struct xenbus_device *dev,
699 struct map_ring_valloc *info,
700 grant_ref_t *gnt_refs,
701 unsigned int nr_grefs,
702 void **vaddr)
703{
704 struct xenbus_map_node *node = info->node;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000705 struct vm_struct *area;
Olivier Deprez157378f2022-04-04 15:47:50 +0200706 bool leaked = false;
707 int err = -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000708
Olivier Deprez157378f2022-04-04 15:47:50 +0200709 area = get_vm_area(XEN_PAGE_SIZE * nr_grefs, VM_IOREMAP);
710 if (!area)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000711 return -ENOMEM;
Olivier Deprez157378f2022-04-04 15:47:50 +0200712 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
713 XEN_PAGE_SIZE * nr_grefs, map_ring_apply, info))
714 goto failed;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000715 err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
Olivier Deprez157378f2022-04-04 15:47:50 +0200716 info, GNTMAP_host_map | GNTMAP_contains_pte,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000717 &leaked);
718 if (err)
719 goto failed;
720
721 node->nr_handles = nr_grefs;
722 node->pv.area = area;
723
724 spin_lock(&xenbus_valloc_lock);
725 list_add(&node->next, &xenbus_valloc_pages);
726 spin_unlock(&xenbus_valloc_lock);
727
728 *vaddr = area->addr;
Olivier Deprez157378f2022-04-04 15:47:50 +0200729 info->node = NULL;
730
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000731 return 0;
732
733failed:
734 if (!leaked)
735 free_vm_area(area);
736 else
737 pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
738
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000739 return err;
740}
741
Olivier Deprez157378f2022-04-04 15:47:50 +0200742static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000743{
744 struct xenbus_map_node *node;
745 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
746 unsigned int level;
747 int i;
748 bool leaked = false;
749 int err;
750
751 spin_lock(&xenbus_valloc_lock);
752 list_for_each_entry(node, &xenbus_valloc_pages, next) {
753 if (node->pv.area->addr == vaddr) {
754 list_del(&node->next);
755 goto found;
756 }
757 }
758 node = NULL;
759 found:
760 spin_unlock(&xenbus_valloc_lock);
761
762 if (!node) {
763 xenbus_dev_error(dev, -ENOENT,
764 "can't find mapped virtual address %p", vaddr);
765 return GNTST_bad_virt_addr;
766 }
767
768 for (i = 0; i < node->nr_handles; i++) {
769 unsigned long addr;
770
771 memset(&unmap[i], 0, sizeof(unmap[i]));
772 addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i);
773 unmap[i].host_addr = arbitrary_virt_to_machine(
774 lookup_address(addr, &level)).maddr;
775 unmap[i].dev_bus_addr = 0;
776 unmap[i].handle = node->handles[i];
777 }
778
779 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
780 BUG();
781
782 err = GNTST_okay;
783 leaked = false;
784 for (i = 0; i < node->nr_handles; i++) {
785 if (unmap[i].status != GNTST_okay) {
786 leaked = true;
787 xenbus_dev_error(dev, unmap[i].status,
788 "unmapping page at handle %d error %d",
789 node->handles[i], unmap[i].status);
790 err = unmap[i].status;
791 break;
792 }
793 }
794
795 if (!leaked)
796 free_vm_area(node->pv.area);
797 else
798 pr_alert("leaking VM area %p size %u page(s)",
799 node->pv.area, node->nr_handles);
800
801 kfree(node);
802 return err;
803}
804
805static const struct xenbus_ring_ops ring_ops_pv = {
Olivier Deprez157378f2022-04-04 15:47:50 +0200806 .map = xenbus_map_ring_pv,
807 .unmap = xenbus_unmap_ring_pv,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000808};
809#endif
810
Olivier Deprez157378f2022-04-04 15:47:50 +0200811struct unmap_ring_hvm
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000812{
813 unsigned int idx;
814 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
815};
816
817static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
818 unsigned int goffset,
819 unsigned int len,
820 void *data)
821{
Olivier Deprez157378f2022-04-04 15:47:50 +0200822 struct unmap_ring_hvm *info = data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000823
824 info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
825
826 info->idx++;
827}
828
Olivier Deprez157378f2022-04-04 15:47:50 +0200829static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000830{
831 int rv;
832 struct xenbus_map_node *node;
833 void *addr;
Olivier Deprez157378f2022-04-04 15:47:50 +0200834 struct unmap_ring_hvm info = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000835 .idx = 0,
836 };
837 unsigned int nr_pages;
838
839 spin_lock(&xenbus_valloc_lock);
840 list_for_each_entry(node, &xenbus_valloc_pages, next) {
841 addr = node->hvm.addr;
842 if (addr == vaddr) {
843 list_del(&node->next);
844 goto found;
845 }
846 }
847 node = addr = NULL;
848 found:
849 spin_unlock(&xenbus_valloc_lock);
850
851 if (!node) {
852 xenbus_dev_error(dev, -ENOENT,
853 "can't find mapped virtual address %p", vaddr);
854 return GNTST_bad_virt_addr;
855 }
856
857 nr_pages = XENBUS_PAGES(node->nr_handles);
858
859 gnttab_foreach_grant(node->hvm.pages, node->nr_handles,
860 xenbus_unmap_ring_setup_grant_hvm,
861 &info);
862
863 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
864 info.addrs);
865 if (!rv) {
866 vunmap(vaddr);
Olivier Deprez157378f2022-04-04 15:47:50 +0200867 xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000868 }
869 else
870 WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
871
872 kfree(node);
873 return rv;
874}
875
876/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000877 * xenbus_read_driver_state
878 * @path: path for driver
879 *
880 * Return the state of the driver rooted at the given store path, or
881 * XenbusStateUnknown if no state can be read.
882 */
883enum xenbus_state xenbus_read_driver_state(const char *path)
884{
885 enum xenbus_state result;
886 int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
887 if (err)
888 result = XenbusStateUnknown;
889
890 return result;
891}
892EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
893
894static const struct xenbus_ring_ops ring_ops_hvm = {
Olivier Deprez157378f2022-04-04 15:47:50 +0200895 .map = xenbus_map_ring_hvm,
896 .unmap = xenbus_unmap_ring_hvm,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000897};
898
899void __init xenbus_ring_ops_init(void)
900{
901#ifdef CONFIG_XEN_PV
902 if (!xen_feature(XENFEAT_auto_translated_physmap))
903 ring_ops = &ring_ops_pv;
904 else
905#endif
906 ring_ops = &ring_ops_hvm;
907}