blob: e4d60009d90832c5b6ffb1f278cf2af3af84c8b4 [file] [log] [blame]
Olivier Deprez157378f2022-04-04 15:47:50 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018-2020 Intel Corporation.
4 * Copyright (C) 2020 Red Hat, Inc.
5 *
6 * Author: Tiwei Bie <tiwei.bie@intel.com>
7 * Jason Wang <jasowang@redhat.com>
8 *
9 * Thanks Michael S. Tsirkin for the valuable comments and
10 * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
11 * their supports.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/cdev.h>
17#include <linux/device.h>
18#include <linux/mm.h>
19#include <linux/iommu.h>
20#include <linux/uuid.h>
21#include <linux/vdpa.h>
22#include <linux/nospec.h>
23#include <linux/vhost.h>
24#include <linux/virtio_net.h>
25
26#include "vhost.h"
27
28enum {
29 VHOST_VDPA_BACKEND_FEATURES =
30 (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
31 (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
32};
33
34#define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
35
36struct vhost_vdpa {
37 struct vhost_dev vdev;
38 struct iommu_domain *domain;
39 struct vhost_virtqueue *vqs;
40 struct completion completion;
41 struct vdpa_device *vdpa;
42 struct device dev;
43 struct cdev cdev;
44 atomic_t opened;
45 int nvqs;
46 int virtio_id;
47 int minor;
48 struct eventfd_ctx *config_ctx;
49 int in_batch;
50 struct vdpa_iova_range range;
51};
52
53static DEFINE_IDA(vhost_vdpa_ida);
54
55static dev_t vhost_vdpa_major;
56
57static void handle_vq_kick(struct vhost_work *work)
58{
59 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
60 poll.work);
61 struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
62 const struct vdpa_config_ops *ops = v->vdpa->config;
63
64 ops->kick_vq(v->vdpa, vq - v->vqs);
65}
66
67static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
68{
69 struct vhost_virtqueue *vq = private;
70 struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
71
72 if (call_ctx)
73 eventfd_signal(call_ctx, 1);
74
75 return IRQ_HANDLED;
76}
77
78static irqreturn_t vhost_vdpa_config_cb(void *private)
79{
80 struct vhost_vdpa *v = private;
81 struct eventfd_ctx *config_ctx = v->config_ctx;
82
83 if (config_ctx)
84 eventfd_signal(config_ctx, 1);
85
86 return IRQ_HANDLED;
87}
88
89static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
90{
91 struct vhost_virtqueue *vq = &v->vqs[qid];
92 const struct vdpa_config_ops *ops = v->vdpa->config;
93 struct vdpa_device *vdpa = v->vdpa;
94 int ret, irq;
95
96 if (!ops->get_vq_irq)
97 return;
98
99 irq = ops->get_vq_irq(vdpa, qid);
100 irq_bypass_unregister_producer(&vq->call_ctx.producer);
101 if (!vq->call_ctx.ctx || irq < 0)
102 return;
103
104 vq->call_ctx.producer.token = vq->call_ctx.ctx;
105 vq->call_ctx.producer.irq = irq;
106 ret = irq_bypass_register_producer(&vq->call_ctx.producer);
107 if (unlikely(ret))
108 dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n",
109 qid, vq->call_ctx.producer.token, ret);
110}
111
112static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
113{
114 struct vhost_virtqueue *vq = &v->vqs[qid];
115
116 irq_bypass_unregister_producer(&vq->call_ctx.producer);
117}
118
119static void vhost_vdpa_reset(struct vhost_vdpa *v)
120{
121 struct vdpa_device *vdpa = v->vdpa;
122
123 vdpa_reset(vdpa);
124 v->in_batch = 0;
125}
126
127static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
128{
129 struct vdpa_device *vdpa = v->vdpa;
130 const struct vdpa_config_ops *ops = vdpa->config;
131 u32 device_id;
132
133 device_id = ops->get_device_id(vdpa);
134
135 if (copy_to_user(argp, &device_id, sizeof(device_id)))
136 return -EFAULT;
137
138 return 0;
139}
140
141static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
142{
143 struct vdpa_device *vdpa = v->vdpa;
144 const struct vdpa_config_ops *ops = vdpa->config;
145 u8 status;
146
147 status = ops->get_status(vdpa);
148
149 if (copy_to_user(statusp, &status, sizeof(status)))
150 return -EFAULT;
151
152 return 0;
153}
154
155static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
156{
157 struct vdpa_device *vdpa = v->vdpa;
158 const struct vdpa_config_ops *ops = vdpa->config;
159 u8 status, status_old;
160 int nvqs = v->nvqs;
161 u16 i;
162
163 if (copy_from_user(&status, statusp, sizeof(status)))
164 return -EFAULT;
165
166 status_old = ops->get_status(vdpa);
167
168 /*
169 * Userspace shouldn't remove status bits unless reset the
170 * status to 0.
171 */
172 if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
173 return -EINVAL;
174
175 ops->set_status(vdpa, status);
176
177 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
178 for (i = 0; i < nvqs; i++)
179 vhost_vdpa_setup_vq_irq(v, i);
180
181 if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
182 for (i = 0; i < nvqs; i++)
183 vhost_vdpa_unsetup_vq_irq(v, i);
184
185 return 0;
186}
187
188static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
189 struct vhost_vdpa_config *c)
190{
191 long size = 0;
192
193 switch (v->virtio_id) {
194 case VIRTIO_ID_NET:
195 size = sizeof(struct virtio_net_config);
196 break;
197 }
198
199 if (c->len == 0 || c->off > size)
200 return -EINVAL;
201
202 if (c->len > size - c->off)
203 return -E2BIG;
204
205 return 0;
206}
207
208static long vhost_vdpa_get_config(struct vhost_vdpa *v,
209 struct vhost_vdpa_config __user *c)
210{
211 struct vdpa_device *vdpa = v->vdpa;
212 struct vhost_vdpa_config config;
213 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
214 u8 *buf;
215
216 if (copy_from_user(&config, c, size))
217 return -EFAULT;
218 if (vhost_vdpa_config_validate(v, &config))
219 return -EINVAL;
220 buf = kvzalloc(config.len, GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223
224 vdpa_get_config(vdpa, config.off, buf, config.len);
225
226 if (copy_to_user(c->buf, buf, config.len)) {
227 kvfree(buf);
228 return -EFAULT;
229 }
230
231 kvfree(buf);
232 return 0;
233}
234
235static long vhost_vdpa_set_config(struct vhost_vdpa *v,
236 struct vhost_vdpa_config __user *c)
237{
238 struct vdpa_device *vdpa = v->vdpa;
239 const struct vdpa_config_ops *ops = vdpa->config;
240 struct vhost_vdpa_config config;
241 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
242 u8 *buf;
243
244 if (copy_from_user(&config, c, size))
245 return -EFAULT;
246 if (vhost_vdpa_config_validate(v, &config))
247 return -EINVAL;
248 buf = kvzalloc(config.len, GFP_KERNEL);
249 if (!buf)
250 return -ENOMEM;
251
252 if (copy_from_user(buf, c->buf, config.len)) {
253 kvfree(buf);
254 return -EFAULT;
255 }
256
257 ops->set_config(vdpa, config.off, buf, config.len);
258
259 kvfree(buf);
260 return 0;
261}
262
263static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
264{
265 struct vdpa_device *vdpa = v->vdpa;
266 const struct vdpa_config_ops *ops = vdpa->config;
267 u64 features;
268
269 features = ops->get_features(vdpa);
270
271 if (copy_to_user(featurep, &features, sizeof(features)))
272 return -EFAULT;
273
274 return 0;
275}
276
277static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
278{
279 struct vdpa_device *vdpa = v->vdpa;
280 const struct vdpa_config_ops *ops = vdpa->config;
281 u64 features;
282
283 /*
284 * It's not allowed to change the features after they have
285 * been negotiated.
286 */
287 if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
288 return -EBUSY;
289
290 if (copy_from_user(&features, featurep, sizeof(features)))
291 return -EFAULT;
292
293 if (vdpa_set_features(vdpa, features))
294 return -EINVAL;
295
296 return 0;
297}
298
299static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
300{
301 struct vdpa_device *vdpa = v->vdpa;
302 const struct vdpa_config_ops *ops = vdpa->config;
303 u16 num;
304
305 num = ops->get_vq_num_max(vdpa);
306
307 if (copy_to_user(argp, &num, sizeof(num)))
308 return -EFAULT;
309
310 return 0;
311}
312
313static void vhost_vdpa_config_put(struct vhost_vdpa *v)
314{
315 if (v->config_ctx) {
316 eventfd_ctx_put(v->config_ctx);
317 v->config_ctx = NULL;
318 }
319}
320
321static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
322{
323 struct vdpa_callback cb;
324 int fd;
325 struct eventfd_ctx *ctx;
326
327 cb.callback = vhost_vdpa_config_cb;
328 cb.private = v;
329 if (copy_from_user(&fd, argp, sizeof(fd)))
330 return -EFAULT;
331
332 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
333 swap(ctx, v->config_ctx);
334
335 if (!IS_ERR_OR_NULL(ctx))
336 eventfd_ctx_put(ctx);
337
338 if (IS_ERR(v->config_ctx)) {
339 long ret = PTR_ERR(v->config_ctx);
340
341 v->config_ctx = NULL;
342 return ret;
343 }
344
345 v->vdpa->config->set_config_cb(v->vdpa, &cb);
346
347 return 0;
348}
349
350static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
351{
352 struct vhost_vdpa_iova_range range = {
353 .first = v->range.first,
354 .last = v->range.last,
355 };
356
357 if (copy_to_user(argp, &range, sizeof(range)))
358 return -EFAULT;
359 return 0;
360}
361
362static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
363 void __user *argp)
364{
365 struct vdpa_device *vdpa = v->vdpa;
366 const struct vdpa_config_ops *ops = vdpa->config;
367 struct vdpa_vq_state vq_state;
368 struct vdpa_callback cb;
369 struct vhost_virtqueue *vq;
370 struct vhost_vring_state s;
371 u32 idx;
372 long r;
373
374 r = get_user(idx, (u32 __user *)argp);
375 if (r < 0)
376 return r;
377
378 if (idx >= v->nvqs)
379 return -ENOBUFS;
380
381 idx = array_index_nospec(idx, v->nvqs);
382 vq = &v->vqs[idx];
383
384 switch (cmd) {
385 case VHOST_VDPA_SET_VRING_ENABLE:
386 if (copy_from_user(&s, argp, sizeof(s)))
387 return -EFAULT;
388 ops->set_vq_ready(vdpa, idx, s.num);
389 return 0;
390 case VHOST_GET_VRING_BASE:
391 r = ops->get_vq_state(v->vdpa, idx, &vq_state);
392 if (r)
393 return r;
394
395 vq->last_avail_idx = vq_state.avail_index;
396 break;
397 }
398
399 r = vhost_vring_ioctl(&v->vdev, cmd, argp);
400 if (r)
401 return r;
402
403 switch (cmd) {
404 case VHOST_SET_VRING_ADDR:
405 if (ops->set_vq_address(vdpa, idx,
406 (u64)(uintptr_t)vq->desc,
407 (u64)(uintptr_t)vq->avail,
408 (u64)(uintptr_t)vq->used))
409 r = -EINVAL;
410 break;
411
412 case VHOST_SET_VRING_BASE:
413 vq_state.avail_index = vq->last_avail_idx;
414 if (ops->set_vq_state(vdpa, idx, &vq_state))
415 r = -EINVAL;
416 break;
417
418 case VHOST_SET_VRING_CALL:
419 if (vq->call_ctx.ctx) {
420 cb.callback = vhost_vdpa_virtqueue_cb;
421 cb.private = vq;
422 } else {
423 cb.callback = NULL;
424 cb.private = NULL;
425 }
426 ops->set_vq_cb(vdpa, idx, &cb);
427 vhost_vdpa_setup_vq_irq(v, idx);
428 break;
429
430 case VHOST_SET_VRING_NUM:
431 ops->set_vq_num(vdpa, idx, vq->num);
432 break;
433 }
434
435 return r;
436}
437
438static long vhost_vdpa_unlocked_ioctl(struct file *filep,
439 unsigned int cmd, unsigned long arg)
440{
441 struct vhost_vdpa *v = filep->private_data;
442 struct vhost_dev *d = &v->vdev;
443 void __user *argp = (void __user *)arg;
444 u64 __user *featurep = argp;
445 u64 features;
446 long r = 0;
447
448 if (cmd == VHOST_SET_BACKEND_FEATURES) {
449 if (copy_from_user(&features, featurep, sizeof(features)))
450 return -EFAULT;
451 if (features & ~VHOST_VDPA_BACKEND_FEATURES)
452 return -EOPNOTSUPP;
453 vhost_set_backend_features(&v->vdev, features);
454 return 0;
455 }
456
457 mutex_lock(&d->mutex);
458
459 switch (cmd) {
460 case VHOST_VDPA_GET_DEVICE_ID:
461 r = vhost_vdpa_get_device_id(v, argp);
462 break;
463 case VHOST_VDPA_GET_STATUS:
464 r = vhost_vdpa_get_status(v, argp);
465 break;
466 case VHOST_VDPA_SET_STATUS:
467 r = vhost_vdpa_set_status(v, argp);
468 break;
469 case VHOST_VDPA_GET_CONFIG:
470 r = vhost_vdpa_get_config(v, argp);
471 break;
472 case VHOST_VDPA_SET_CONFIG:
473 r = vhost_vdpa_set_config(v, argp);
474 break;
475 case VHOST_GET_FEATURES:
476 r = vhost_vdpa_get_features(v, argp);
477 break;
478 case VHOST_SET_FEATURES:
479 r = vhost_vdpa_set_features(v, argp);
480 break;
481 case VHOST_VDPA_GET_VRING_NUM:
482 r = vhost_vdpa_get_vring_num(v, argp);
483 break;
484 case VHOST_SET_LOG_BASE:
485 case VHOST_SET_LOG_FD:
486 r = -ENOIOCTLCMD;
487 break;
488 case VHOST_VDPA_SET_CONFIG_CALL:
489 r = vhost_vdpa_set_config_call(v, argp);
490 break;
491 case VHOST_GET_BACKEND_FEATURES:
492 features = VHOST_VDPA_BACKEND_FEATURES;
493 if (copy_to_user(featurep, &features, sizeof(features)))
494 r = -EFAULT;
495 break;
496 case VHOST_VDPA_GET_IOVA_RANGE:
497 r = vhost_vdpa_get_iova_range(v, argp);
498 break;
499 default:
500 r = vhost_dev_ioctl(&v->vdev, cmd, argp);
501 if (r == -ENOIOCTLCMD)
502 r = vhost_vdpa_vring_ioctl(v, cmd, argp);
503 break;
504 }
505
506 mutex_unlock(&d->mutex);
507 return r;
508}
509
510static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
511{
512 struct vhost_dev *dev = &v->vdev;
513 struct vhost_iotlb *iotlb = dev->iotlb;
514 struct vhost_iotlb_map *map;
515 struct page *page;
516 unsigned long pfn, pinned;
517
518 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
519 pinned = map->size >> PAGE_SHIFT;
520 for (pfn = map->addr >> PAGE_SHIFT;
521 pinned > 0; pfn++, pinned--) {
522 page = pfn_to_page(pfn);
523 if (map->perm & VHOST_ACCESS_WO)
524 set_page_dirty_lock(page);
525 unpin_user_page(page);
526 }
527 atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
528 vhost_iotlb_map_free(iotlb, map);
529 }
530}
531
532static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
533{
534 struct vhost_dev *dev = &v->vdev;
535
536 vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
537 kfree(dev->iotlb);
538 dev->iotlb = NULL;
539}
540
541static int perm_to_iommu_flags(u32 perm)
542{
543 int flags = 0;
544
545 switch (perm) {
546 case VHOST_ACCESS_WO:
547 flags |= IOMMU_WRITE;
548 break;
549 case VHOST_ACCESS_RO:
550 flags |= IOMMU_READ;
551 break;
552 case VHOST_ACCESS_RW:
553 flags |= (IOMMU_WRITE | IOMMU_READ);
554 break;
555 default:
556 WARN(1, "invalidate vhost IOTLB permission\n");
557 break;
558 }
559
560 return flags | IOMMU_CACHE;
561}
562
563static int vhost_vdpa_map(struct vhost_vdpa *v,
564 u64 iova, u64 size, u64 pa, u32 perm)
565{
566 struct vhost_dev *dev = &v->vdev;
567 struct vdpa_device *vdpa = v->vdpa;
568 const struct vdpa_config_ops *ops = vdpa->config;
569 int r = 0;
570
571 r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
572 pa, perm);
573 if (r)
574 return r;
575
576 if (ops->dma_map) {
577 r = ops->dma_map(vdpa, iova, size, pa, perm);
578 } else if (ops->set_map) {
579 if (!v->in_batch)
580 r = ops->set_map(vdpa, dev->iotlb);
581 } else {
582 r = iommu_map(v->domain, iova, pa, size,
583 perm_to_iommu_flags(perm));
584 }
585
586 if (r)
587 vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
588 else
589 atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
590
591 return r;
592}
593
594static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
595{
596 struct vhost_dev *dev = &v->vdev;
597 struct vdpa_device *vdpa = v->vdpa;
598 const struct vdpa_config_ops *ops = vdpa->config;
599
600 vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
601
602 if (ops->dma_map) {
603 ops->dma_unmap(vdpa, iova, size);
604 } else if (ops->set_map) {
605 if (!v->in_batch)
606 ops->set_map(vdpa, dev->iotlb);
607 } else {
608 iommu_unmap(v->domain, iova, size);
609 }
610}
611
612static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
613 struct vhost_iotlb_msg *msg)
614{
615 struct vhost_dev *dev = &v->vdev;
616 struct vhost_iotlb *iotlb = dev->iotlb;
617 struct page **page_list;
618 unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
619 unsigned int gup_flags = FOLL_LONGTERM;
620 unsigned long npages, cur_base, map_pfn, last_pfn = 0;
621 unsigned long lock_limit, sz2pin, nchunks, i;
622 u64 iova = msg->iova;
623 long pinned;
624 int ret = 0;
625
626 if (msg->iova < v->range.first || !msg->size ||
627 msg->iova > U64_MAX - msg->size + 1 ||
628 msg->iova + msg->size - 1 > v->range.last)
629 return -EINVAL;
630
631 if (vhost_iotlb_itree_first(iotlb, msg->iova,
632 msg->iova + msg->size - 1))
633 return -EEXIST;
634
635 /* Limit the use of memory for bookkeeping */
636 page_list = (struct page **) __get_free_page(GFP_KERNEL);
637 if (!page_list)
638 return -ENOMEM;
639
640 if (msg->perm & VHOST_ACCESS_WO)
641 gup_flags |= FOLL_WRITE;
642
643 npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
644 if (!npages) {
645 ret = -EINVAL;
646 goto free;
647 }
648
649 mmap_read_lock(dev->mm);
650
651 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
652 if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
653 ret = -ENOMEM;
654 goto unlock;
655 }
656
657 cur_base = msg->uaddr & PAGE_MASK;
658 iova &= PAGE_MASK;
659 nchunks = 0;
660
661 while (npages) {
662 sz2pin = min_t(unsigned long, npages, list_size);
663 pinned = pin_user_pages(cur_base, sz2pin,
664 gup_flags, page_list, NULL);
665 if (sz2pin != pinned) {
666 if (pinned < 0) {
667 ret = pinned;
668 } else {
669 unpin_user_pages(page_list, pinned);
670 ret = -ENOMEM;
671 }
672 goto out;
673 }
674 nchunks++;
675
676 if (!last_pfn)
677 map_pfn = page_to_pfn(page_list[0]);
678
679 for (i = 0; i < pinned; i++) {
680 unsigned long this_pfn = page_to_pfn(page_list[i]);
681 u64 csize;
682
683 if (last_pfn && (this_pfn != last_pfn + 1)) {
684 /* Pin a contiguous chunk of memory */
685 csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
686 ret = vhost_vdpa_map(v, iova, csize,
687 map_pfn << PAGE_SHIFT,
688 msg->perm);
689 if (ret) {
690 /*
691 * Unpin the pages that are left unmapped
692 * from this point on in the current
693 * page_list. The remaining outstanding
694 * ones which may stride across several
695 * chunks will be covered in the common
696 * error path subsequently.
697 */
698 unpin_user_pages(&page_list[i],
699 pinned - i);
700 goto out;
701 }
702
703 map_pfn = this_pfn;
704 iova += csize;
705 nchunks = 0;
706 }
707
708 last_pfn = this_pfn;
709 }
710
711 cur_base += pinned << PAGE_SHIFT;
712 npages -= pinned;
713 }
714
715 /* Pin the rest chunk */
716 ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
717 map_pfn << PAGE_SHIFT, msg->perm);
718out:
719 if (ret) {
720 if (nchunks) {
721 unsigned long pfn;
722
723 /*
724 * Unpin the outstanding pages which are yet to be
725 * mapped but haven't due to vdpa_map() or
726 * pin_user_pages() failure.
727 *
728 * Mapped pages are accounted in vdpa_map(), hence
729 * the corresponding unpinning will be handled by
730 * vdpa_unmap().
731 */
732 WARN_ON(!last_pfn);
733 for (pfn = map_pfn; pfn <= last_pfn; pfn++)
734 unpin_user_page(pfn_to_page(pfn));
735 }
736 vhost_vdpa_unmap(v, msg->iova, msg->size);
737 }
738unlock:
739 mmap_read_unlock(dev->mm);
740free:
741 free_page((unsigned long)page_list);
742 return ret;
743}
744
745static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
746 struct vhost_iotlb_msg *msg)
747{
748 struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
749 struct vdpa_device *vdpa = v->vdpa;
750 const struct vdpa_config_ops *ops = vdpa->config;
751 int r = 0;
752
753 mutex_lock(&dev->mutex);
754
755 r = vhost_dev_check_owner(dev);
756 if (r)
757 goto unlock;
758
759 switch (msg->type) {
760 case VHOST_IOTLB_UPDATE:
761 r = vhost_vdpa_process_iotlb_update(v, msg);
762 break;
763 case VHOST_IOTLB_INVALIDATE:
764 vhost_vdpa_unmap(v, msg->iova, msg->size);
765 break;
766 case VHOST_IOTLB_BATCH_BEGIN:
767 v->in_batch = true;
768 break;
769 case VHOST_IOTLB_BATCH_END:
770 if (v->in_batch && ops->set_map)
771 ops->set_map(vdpa, dev->iotlb);
772 v->in_batch = false;
773 break;
774 default:
775 r = -EINVAL;
776 break;
777 }
778unlock:
779 mutex_unlock(&dev->mutex);
780
781 return r;
782}
783
784static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
785 struct iov_iter *from)
786{
787 struct file *file = iocb->ki_filp;
788 struct vhost_vdpa *v = file->private_data;
789 struct vhost_dev *dev = &v->vdev;
790
791 return vhost_chr_write_iter(dev, from);
792}
793
794static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
795{
796 struct vdpa_device *vdpa = v->vdpa;
797 const struct vdpa_config_ops *ops = vdpa->config;
798 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
799 struct bus_type *bus;
800 int ret;
801
802 /* Device want to do DMA by itself */
803 if (ops->set_map || ops->dma_map)
804 return 0;
805
806 bus = dma_dev->bus;
807 if (!bus)
808 return -EFAULT;
809
810 if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
811 return -ENOTSUPP;
812
813 v->domain = iommu_domain_alloc(bus);
814 if (!v->domain)
815 return -EIO;
816
817 ret = iommu_attach_device(v->domain, dma_dev);
818 if (ret)
819 goto err_attach;
820
821 return 0;
822
823err_attach:
824 iommu_domain_free(v->domain);
825 return ret;
826}
827
828static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
829{
830 struct vdpa_device *vdpa = v->vdpa;
831 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
832
833 if (v->domain) {
834 iommu_detach_device(v->domain, dma_dev);
835 iommu_domain_free(v->domain);
836 }
837
838 v->domain = NULL;
839}
840
841static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
842{
843 struct vdpa_iova_range *range = &v->range;
844 struct iommu_domain_geometry geo;
845 struct vdpa_device *vdpa = v->vdpa;
846 const struct vdpa_config_ops *ops = vdpa->config;
847
848 if (ops->get_iova_range) {
849 *range = ops->get_iova_range(vdpa);
850 } else if (v->domain &&
851 !iommu_domain_get_attr(v->domain,
852 DOMAIN_ATTR_GEOMETRY, &geo) &&
853 geo.force_aperture) {
854 range->first = geo.aperture_start;
855 range->last = geo.aperture_end;
856 } else {
857 range->first = 0;
858 range->last = ULLONG_MAX;
859 }
860}
861
862static int vhost_vdpa_open(struct inode *inode, struct file *filep)
863{
864 struct vhost_vdpa *v;
865 struct vhost_dev *dev;
866 struct vhost_virtqueue **vqs;
867 int nvqs, i, r, opened;
868
869 v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
870
871 opened = atomic_cmpxchg(&v->opened, 0, 1);
872 if (opened)
873 return -EBUSY;
874
875 nvqs = v->nvqs;
876 vhost_vdpa_reset(v);
877
878 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
879 if (!vqs) {
880 r = -ENOMEM;
881 goto err;
882 }
883
884 dev = &v->vdev;
885 for (i = 0; i < nvqs; i++) {
886 vqs[i] = &v->vqs[i];
887 vqs[i]->handle_kick = handle_vq_kick;
888 }
889 vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
890 vhost_vdpa_process_iotlb_msg);
891
892 dev->iotlb = vhost_iotlb_alloc(0, 0);
893 if (!dev->iotlb) {
894 r = -ENOMEM;
895 goto err_init_iotlb;
896 }
897
898 r = vhost_vdpa_alloc_domain(v);
899 if (r)
900 goto err_init_iotlb;
901
902 vhost_vdpa_set_iova_range(v);
903
904 filep->private_data = v;
905
906 return 0;
907
908err_init_iotlb:
909 vhost_dev_cleanup(&v->vdev);
910 kfree(vqs);
911err:
912 atomic_dec(&v->opened);
913 return r;
914}
915
916static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
917{
918 int i;
919
920 for (i = 0; i < v->nvqs; i++)
921 vhost_vdpa_unsetup_vq_irq(v, i);
922}
923
924static int vhost_vdpa_release(struct inode *inode, struct file *filep)
925{
926 struct vhost_vdpa *v = filep->private_data;
927 struct vhost_dev *d = &v->vdev;
928
929 mutex_lock(&d->mutex);
930 filep->private_data = NULL;
931 vhost_vdpa_reset(v);
932 vhost_dev_stop(&v->vdev);
933 vhost_vdpa_iotlb_free(v);
934 vhost_vdpa_free_domain(v);
935 vhost_vdpa_config_put(v);
936 vhost_vdpa_clean_irq(v);
937 vhost_dev_cleanup(&v->vdev);
938 kfree(v->vdev.vqs);
939 mutex_unlock(&d->mutex);
940
941 atomic_dec(&v->opened);
942 complete(&v->completion);
943
944 return 0;
945}
946
947#ifdef CONFIG_MMU
948static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
949{
950 struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
951 struct vdpa_device *vdpa = v->vdpa;
952 const struct vdpa_config_ops *ops = vdpa->config;
953 struct vdpa_notification_area notify;
954 struct vm_area_struct *vma = vmf->vma;
955 u16 index = vma->vm_pgoff;
956
957 notify = ops->get_vq_notification(vdpa, index);
958
959 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
960 if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
961 notify.addr >> PAGE_SHIFT, PAGE_SIZE,
962 vma->vm_page_prot))
963 return VM_FAULT_SIGBUS;
964
965 return VM_FAULT_NOPAGE;
966}
967
968static const struct vm_operations_struct vhost_vdpa_vm_ops = {
969 .fault = vhost_vdpa_fault,
970};
971
972static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
973{
974 struct vhost_vdpa *v = vma->vm_file->private_data;
975 struct vdpa_device *vdpa = v->vdpa;
976 const struct vdpa_config_ops *ops = vdpa->config;
977 struct vdpa_notification_area notify;
978 unsigned long index = vma->vm_pgoff;
979
980 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
981 return -EINVAL;
982 if ((vma->vm_flags & VM_SHARED) == 0)
983 return -EINVAL;
984 if (vma->vm_flags & VM_READ)
985 return -EINVAL;
986 if (index > 65535)
987 return -EINVAL;
988 if (!ops->get_vq_notification)
989 return -ENOTSUPP;
990
991 /* To be safe and easily modelled by userspace, We only
992 * support the doorbell which sits on the page boundary and
993 * does not share the page with other registers.
994 */
995 notify = ops->get_vq_notification(vdpa, index);
996 if (notify.addr & (PAGE_SIZE - 1))
997 return -EINVAL;
998 if (vma->vm_end - vma->vm_start != notify.size)
999 return -ENOTSUPP;
1000
1001 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1002 vma->vm_ops = &vhost_vdpa_vm_ops;
1003 return 0;
1004}
1005#endif /* CONFIG_MMU */
1006
1007static const struct file_operations vhost_vdpa_fops = {
1008 .owner = THIS_MODULE,
1009 .open = vhost_vdpa_open,
1010 .release = vhost_vdpa_release,
1011 .write_iter = vhost_vdpa_chr_write_iter,
1012 .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
1013#ifdef CONFIG_MMU
1014 .mmap = vhost_vdpa_mmap,
1015#endif /* CONFIG_MMU */
1016 .compat_ioctl = compat_ptr_ioctl,
1017};
1018
1019static void vhost_vdpa_release_dev(struct device *device)
1020{
1021 struct vhost_vdpa *v =
1022 container_of(device, struct vhost_vdpa, dev);
1023
1024 ida_simple_remove(&vhost_vdpa_ida, v->minor);
1025 kfree(v->vqs);
1026 kfree(v);
1027}
1028
1029static int vhost_vdpa_probe(struct vdpa_device *vdpa)
1030{
1031 const struct vdpa_config_ops *ops = vdpa->config;
1032 struct vhost_vdpa *v;
1033 int minor;
1034 int r;
1035
1036 /* Currently, we only accept the network devices. */
1037 if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
1038 return -ENOTSUPP;
1039
1040 v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1041 if (!v)
1042 return -ENOMEM;
1043
1044 minor = ida_simple_get(&vhost_vdpa_ida, 0,
1045 VHOST_VDPA_DEV_MAX, GFP_KERNEL);
1046 if (minor < 0) {
1047 kfree(v);
1048 return minor;
1049 }
1050
1051 atomic_set(&v->opened, 0);
1052 v->minor = minor;
1053 v->vdpa = vdpa;
1054 v->nvqs = vdpa->nvqs;
1055 v->virtio_id = ops->get_device_id(vdpa);
1056
1057 device_initialize(&v->dev);
1058 v->dev.release = vhost_vdpa_release_dev;
1059 v->dev.parent = &vdpa->dev;
1060 v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
1061 v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
1062 GFP_KERNEL);
1063 if (!v->vqs) {
1064 r = -ENOMEM;
1065 goto err;
1066 }
1067
1068 r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
1069 if (r)
1070 goto err;
1071
1072 cdev_init(&v->cdev, &vhost_vdpa_fops);
1073 v->cdev.owner = THIS_MODULE;
1074
1075 r = cdev_device_add(&v->cdev, &v->dev);
1076 if (r)
1077 goto err;
1078
1079 init_completion(&v->completion);
1080 vdpa_set_drvdata(vdpa, v);
1081
1082 return 0;
1083
1084err:
1085 put_device(&v->dev);
1086 return r;
1087}
1088
1089static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1090{
1091 struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1092 int opened;
1093
1094 cdev_device_del(&v->cdev, &v->dev);
1095
1096 do {
1097 opened = atomic_cmpxchg(&v->opened, 0, 1);
1098 if (!opened)
1099 break;
1100 wait_for_completion(&v->completion);
1101 } while (1);
1102
1103 put_device(&v->dev);
1104}
1105
1106static struct vdpa_driver vhost_vdpa_driver = {
1107 .driver = {
1108 .name = "vhost_vdpa",
1109 },
1110 .probe = vhost_vdpa_probe,
1111 .remove = vhost_vdpa_remove,
1112};
1113
1114static int __init vhost_vdpa_init(void)
1115{
1116 int r;
1117
1118 r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1119 "vhost-vdpa");
1120 if (r)
1121 goto err_alloc_chrdev;
1122
1123 r = vdpa_register_driver(&vhost_vdpa_driver);
1124 if (r)
1125 goto err_vdpa_register_driver;
1126
1127 return 0;
1128
1129err_vdpa_register_driver:
1130 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1131err_alloc_chrdev:
1132 return r;
1133}
1134module_init(vhost_vdpa_init);
1135
1136static void __exit vhost_vdpa_exit(void)
1137{
1138 vdpa_unregister_driver(&vhost_vdpa_driver);
1139 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1140}
1141module_exit(vhost_vdpa_exit);
1142
1143MODULE_VERSION("0.0.1");
1144MODULE_LICENSE("GPL v2");
1145MODULE_AUTHOR("Intel Corporation");
1146MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");