David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * psb GEM interface |
| 4 | * |
| 5 | * Copyright (c) 2011, Intel Corporation. |
| 6 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7 | * Authors: Alan Cox |
| 8 | * |
| 9 | * TODO: |
| 10 | * - we need to work out if the MMU is relevant (eg for |
| 11 | * accelerated operations on a GEM object) |
| 12 | */ |
| 13 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 14 | #include <linux/pagemap.h> |
| 15 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 16 | #include <drm/drm.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 17 | #include <drm/drm_vma_manager.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 18 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 19 | #include "psb_drv.h" |
| 20 | |
| 21 | void psb_gem_free_object(struct drm_gem_object *obj) |
| 22 | { |
| 23 | struct gtt_range *gtt = container_of(obj, struct gtt_range, gem); |
| 24 | |
| 25 | /* Remove the list map if one is present */ |
| 26 | drm_gem_free_mmap_offset(obj); |
| 27 | drm_gem_object_release(obj); |
| 28 | |
| 29 | /* This must occur last as it frees up the memory of the GEM object */ |
| 30 | psb_gtt_free_range(obj->dev, gtt); |
| 31 | } |
| 32 | |
| 33 | int psb_gem_get_aperture(struct drm_device *dev, void *data, |
| 34 | struct drm_file *file) |
| 35 | { |
| 36 | return -EINVAL; |
| 37 | } |
| 38 | |
| 39 | /** |
| 40 | * psb_gem_create - create a mappable object |
| 41 | * @file: the DRM file of the client |
| 42 | * @dev: our device |
| 43 | * @size: the size requested |
| 44 | * @handlep: returned handle (opaque number) |
| 45 | * |
| 46 | * Create a GEM object, fill in the boilerplate and attach a handle to |
| 47 | * it so that userspace can speak about it. This does the core work |
| 48 | * for the various methods that do/will create GEM objects for things |
| 49 | */ |
| 50 | int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size, |
| 51 | u32 *handlep, int stolen, u32 align) |
| 52 | { |
| 53 | struct gtt_range *r; |
| 54 | int ret; |
| 55 | u32 handle; |
| 56 | |
| 57 | size = roundup(size, PAGE_SIZE); |
| 58 | |
| 59 | /* Allocate our object - for now a direct gtt range which is not |
| 60 | stolen memory backed */ |
| 61 | r = psb_gtt_alloc_range(dev, size, "gem", 0, PAGE_SIZE); |
| 62 | if (r == NULL) { |
| 63 | dev_err(dev->dev, "no memory for %lld byte GEM object\n", size); |
| 64 | return -ENOSPC; |
| 65 | } |
| 66 | /* Initialize the extra goodies GEM needs to do all the hard work */ |
| 67 | if (drm_gem_object_init(dev, &r->gem, size) != 0) { |
| 68 | psb_gtt_free_range(dev, r); |
| 69 | /* GEM doesn't give an error code so use -ENOMEM */ |
| 70 | dev_err(dev->dev, "GEM init failed for %lld\n", size); |
| 71 | return -ENOMEM; |
| 72 | } |
| 73 | /* Limit the object to 32bit mappings */ |
| 74 | mapping_set_gfp_mask(r->gem.filp->f_mapping, GFP_KERNEL | __GFP_DMA32); |
| 75 | /* Give the object a handle so we can carry it more easily */ |
| 76 | ret = drm_gem_handle_create(file, &r->gem, &handle); |
| 77 | if (ret) { |
| 78 | dev_err(dev->dev, "GEM handle failed for %p, %lld\n", |
| 79 | &r->gem, size); |
| 80 | drm_gem_object_release(&r->gem); |
| 81 | psb_gtt_free_range(dev, r); |
| 82 | return ret; |
| 83 | } |
| 84 | /* We have the initial and handle reference but need only one now */ |
| 85 | drm_gem_object_put_unlocked(&r->gem); |
| 86 | *handlep = handle; |
| 87 | return 0; |
| 88 | } |
| 89 | |
| 90 | /** |
| 91 | * psb_gem_dumb_create - create a dumb buffer |
| 92 | * @drm_file: our client file |
| 93 | * @dev: our device |
| 94 | * @args: the requested arguments copied from userspace |
| 95 | * |
| 96 | * Allocate a buffer suitable for use for a frame buffer of the |
| 97 | * form described by user space. Give userspace a handle by which |
| 98 | * to reference it. |
| 99 | */ |
| 100 | int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, |
| 101 | struct drm_mode_create_dumb *args) |
| 102 | { |
| 103 | args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); |
| 104 | args->size = args->pitch * args->height; |
| 105 | return psb_gem_create(file, dev, args->size, &args->handle, 0, |
| 106 | PAGE_SIZE); |
| 107 | } |
| 108 | |
| 109 | /** |
| 110 | * psb_gem_fault - pagefault handler for GEM objects |
| 111 | * @vma: the VMA of the GEM object |
| 112 | * @vmf: fault detail |
| 113 | * |
| 114 | * Invoked when a fault occurs on an mmap of a GEM managed area. GEM |
| 115 | * does most of the work for us including the actual map/unmap calls |
| 116 | * but we need to do the actual page work. |
| 117 | * |
| 118 | * This code eventually needs to handle faulting objects in and out |
| 119 | * of the GTT and repacking it when we run out of space. We can put |
| 120 | * that off for now and for our simple uses |
| 121 | * |
| 122 | * The VMA was set up by GEM. In doing so it also ensured that the |
| 123 | * vma->vm_private_data points to the GEM object that is backing this |
| 124 | * mapping. |
| 125 | */ |
| 126 | vm_fault_t psb_gem_fault(struct vm_fault *vmf) |
| 127 | { |
| 128 | struct vm_area_struct *vma = vmf->vma; |
| 129 | struct drm_gem_object *obj; |
| 130 | struct gtt_range *r; |
| 131 | int err; |
| 132 | vm_fault_t ret; |
| 133 | unsigned long pfn; |
| 134 | pgoff_t page_offset; |
| 135 | struct drm_device *dev; |
| 136 | struct drm_psb_private *dev_priv; |
| 137 | |
| 138 | obj = vma->vm_private_data; /* GEM object */ |
| 139 | dev = obj->dev; |
| 140 | dev_priv = dev->dev_private; |
| 141 | |
| 142 | r = container_of(obj, struct gtt_range, gem); /* Get the gtt range */ |
| 143 | |
| 144 | /* Make sure we don't parallel update on a fault, nor move or remove |
| 145 | something from beneath our feet */ |
| 146 | mutex_lock(&dev_priv->mmap_mutex); |
| 147 | |
| 148 | /* For now the mmap pins the object and it stays pinned. As things |
| 149 | stand that will do us no harm */ |
| 150 | if (r->mmapping == 0) { |
| 151 | err = psb_gtt_pin(r); |
| 152 | if (err < 0) { |
| 153 | dev_err(dev->dev, "gma500: pin failed: %d\n", err); |
| 154 | ret = vmf_error(err); |
| 155 | goto fail; |
| 156 | } |
| 157 | r->mmapping = 1; |
| 158 | } |
| 159 | |
| 160 | /* Page relative to the VMA start - we must calculate this ourselves |
| 161 | because vmf->pgoff is the fake GEM offset */ |
| 162 | page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; |
| 163 | |
| 164 | /* CPU view of the page, don't go via the GART for CPU writes */ |
| 165 | if (r->stolen) |
| 166 | pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT; |
| 167 | else |
| 168 | pfn = page_to_pfn(r->pages[page_offset]); |
| 169 | ret = vmf_insert_pfn(vma, vmf->address, pfn); |
| 170 | fail: |
| 171 | mutex_unlock(&dev_priv->mmap_mutex); |
| 172 | |
| 173 | return ret; |
| 174 | } |