Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | // Copyright 2017 IBM Corp. |
| 3 | #include <linux/sched/mm.h> |
| 4 | #include "trace.h" |
| 5 | #include "ocxl_internal.h" |
| 6 | |
| 7 | struct ocxl_context *ocxl_context_alloc(void) |
| 8 | { |
| 9 | return kzalloc(sizeof(struct ocxl_context), GFP_KERNEL); |
| 10 | } |
| 11 | |
| 12 | int ocxl_context_init(struct ocxl_context *ctx, struct ocxl_afu *afu, |
| 13 | struct address_space *mapping) |
| 14 | { |
| 15 | int pasid; |
| 16 | |
| 17 | ctx->afu = afu; |
| 18 | mutex_lock(&afu->contexts_lock); |
| 19 | pasid = idr_alloc(&afu->contexts_idr, ctx, afu->pasid_base, |
| 20 | afu->pasid_base + afu->pasid_max, GFP_KERNEL); |
| 21 | if (pasid < 0) { |
| 22 | mutex_unlock(&afu->contexts_lock); |
| 23 | return pasid; |
| 24 | } |
| 25 | afu->pasid_count++; |
| 26 | mutex_unlock(&afu->contexts_lock); |
| 27 | |
| 28 | ctx->pasid = pasid; |
| 29 | ctx->status = OPENED; |
| 30 | mutex_init(&ctx->status_mutex); |
| 31 | ctx->mapping = mapping; |
| 32 | mutex_init(&ctx->mapping_lock); |
| 33 | init_waitqueue_head(&ctx->events_wq); |
| 34 | mutex_init(&ctx->xsl_error_lock); |
| 35 | mutex_init(&ctx->irq_lock); |
| 36 | idr_init(&ctx->irq_idr); |
| 37 | ctx->tidr = 0; |
| 38 | |
| 39 | /* |
| 40 | * Keep a reference on the AFU to make sure it's valid for the |
| 41 | * duration of the life of the context |
| 42 | */ |
| 43 | ocxl_afu_get(afu); |
| 44 | return 0; |
| 45 | } |
| 46 | |
| 47 | /* |
| 48 | * Callback for when a translation fault triggers an error |
| 49 | * data: a pointer to the context which triggered the fault |
| 50 | * addr: the address that triggered the error |
| 51 | * dsisr: the value of the PPC64 dsisr register |
| 52 | */ |
| 53 | static void xsl_fault_error(void *data, u64 addr, u64 dsisr) |
| 54 | { |
| 55 | struct ocxl_context *ctx = (struct ocxl_context *) data; |
| 56 | |
| 57 | mutex_lock(&ctx->xsl_error_lock); |
| 58 | ctx->xsl_error.addr = addr; |
| 59 | ctx->xsl_error.dsisr = dsisr; |
| 60 | ctx->xsl_error.count++; |
| 61 | mutex_unlock(&ctx->xsl_error_lock); |
| 62 | |
| 63 | wake_up_all(&ctx->events_wq); |
| 64 | } |
| 65 | |
| 66 | int ocxl_context_attach(struct ocxl_context *ctx, u64 amr) |
| 67 | { |
| 68 | int rc; |
| 69 | |
| 70 | // Locks both status & tidr |
| 71 | mutex_lock(&ctx->status_mutex); |
| 72 | if (ctx->status != OPENED) { |
| 73 | rc = -EIO; |
| 74 | goto out; |
| 75 | } |
| 76 | |
| 77 | rc = ocxl_link_add_pe(ctx->afu->fn->link, ctx->pasid, |
| 78 | current->mm->context.id, ctx->tidr, amr, current->mm, |
| 79 | xsl_fault_error, ctx); |
| 80 | if (rc) |
| 81 | goto out; |
| 82 | |
| 83 | ctx->status = ATTACHED; |
| 84 | out: |
| 85 | mutex_unlock(&ctx->status_mutex); |
| 86 | return rc; |
| 87 | } |
| 88 | |
| 89 | static vm_fault_t map_afu_irq(struct vm_area_struct *vma, unsigned long address, |
| 90 | u64 offset, struct ocxl_context *ctx) |
| 91 | { |
| 92 | u64 trigger_addr; |
| 93 | |
| 94 | trigger_addr = ocxl_afu_irq_get_addr(ctx, offset); |
| 95 | if (!trigger_addr) |
| 96 | return VM_FAULT_SIGBUS; |
| 97 | |
| 98 | return vmf_insert_pfn(vma, address, trigger_addr >> PAGE_SHIFT); |
| 99 | } |
| 100 | |
| 101 | static vm_fault_t map_pp_mmio(struct vm_area_struct *vma, unsigned long address, |
| 102 | u64 offset, struct ocxl_context *ctx) |
| 103 | { |
| 104 | u64 pp_mmio_addr; |
| 105 | int pasid_off; |
| 106 | vm_fault_t ret; |
| 107 | |
| 108 | if (offset >= ctx->afu->config.pp_mmio_stride) |
| 109 | return VM_FAULT_SIGBUS; |
| 110 | |
| 111 | mutex_lock(&ctx->status_mutex); |
| 112 | if (ctx->status != ATTACHED) { |
| 113 | mutex_unlock(&ctx->status_mutex); |
| 114 | pr_debug("%s: Context not attached, failing mmio mmap\n", |
| 115 | __func__); |
| 116 | return VM_FAULT_SIGBUS; |
| 117 | } |
| 118 | |
| 119 | pasid_off = ctx->pasid - ctx->afu->pasid_base; |
| 120 | pp_mmio_addr = ctx->afu->pp_mmio_start + |
| 121 | pasid_off * ctx->afu->config.pp_mmio_stride + |
| 122 | offset; |
| 123 | |
| 124 | ret = vmf_insert_pfn(vma, address, pp_mmio_addr >> PAGE_SHIFT); |
| 125 | mutex_unlock(&ctx->status_mutex); |
| 126 | return ret; |
| 127 | } |
| 128 | |
| 129 | static vm_fault_t ocxl_mmap_fault(struct vm_fault *vmf) |
| 130 | { |
| 131 | struct vm_area_struct *vma = vmf->vma; |
| 132 | struct ocxl_context *ctx = vma->vm_file->private_data; |
| 133 | u64 offset; |
| 134 | vm_fault_t ret; |
| 135 | |
| 136 | offset = vmf->pgoff << PAGE_SHIFT; |
| 137 | pr_debug("%s: pasid %d address 0x%lx offset 0x%llx\n", __func__, |
| 138 | ctx->pasid, vmf->address, offset); |
| 139 | |
| 140 | if (offset < ctx->afu->irq_base_offset) |
| 141 | ret = map_pp_mmio(vma, vmf->address, offset, ctx); |
| 142 | else |
| 143 | ret = map_afu_irq(vma, vmf->address, offset, ctx); |
| 144 | return ret; |
| 145 | } |
| 146 | |
| 147 | static const struct vm_operations_struct ocxl_vmops = { |
| 148 | .fault = ocxl_mmap_fault, |
| 149 | }; |
| 150 | |
| 151 | static int check_mmap_afu_irq(struct ocxl_context *ctx, |
| 152 | struct vm_area_struct *vma) |
| 153 | { |
| 154 | /* only one page */ |
| 155 | if (vma_pages(vma) != 1) |
| 156 | return -EINVAL; |
| 157 | |
| 158 | /* check offset validty */ |
| 159 | if (!ocxl_afu_irq_get_addr(ctx, vma->vm_pgoff << PAGE_SHIFT)) |
| 160 | return -EINVAL; |
| 161 | |
| 162 | /* |
| 163 | * trigger page should only be accessible in write mode. |
| 164 | * |
| 165 | * It's a bit theoretical, as a page mmaped with only |
| 166 | * PROT_WRITE is currently readable, but it doesn't hurt. |
| 167 | */ |
| 168 | if ((vma->vm_flags & VM_READ) || (vma->vm_flags & VM_EXEC) || |
| 169 | !(vma->vm_flags & VM_WRITE)) |
| 170 | return -EINVAL; |
| 171 | vma->vm_flags &= ~(VM_MAYREAD | VM_MAYEXEC); |
| 172 | return 0; |
| 173 | } |
| 174 | |
| 175 | static int check_mmap_mmio(struct ocxl_context *ctx, |
| 176 | struct vm_area_struct *vma) |
| 177 | { |
| 178 | if ((vma_pages(vma) + vma->vm_pgoff) > |
| 179 | (ctx->afu->config.pp_mmio_stride >> PAGE_SHIFT)) |
| 180 | return -EINVAL; |
| 181 | return 0; |
| 182 | } |
| 183 | |
| 184 | int ocxl_context_mmap(struct ocxl_context *ctx, struct vm_area_struct *vma) |
| 185 | { |
| 186 | int rc; |
| 187 | |
| 188 | if ((vma->vm_pgoff << PAGE_SHIFT) < ctx->afu->irq_base_offset) |
| 189 | rc = check_mmap_mmio(ctx, vma); |
| 190 | else |
| 191 | rc = check_mmap_afu_irq(ctx, vma); |
| 192 | if (rc) |
| 193 | return rc; |
| 194 | |
| 195 | vma->vm_flags |= VM_IO | VM_PFNMAP; |
| 196 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
| 197 | vma->vm_ops = &ocxl_vmops; |
| 198 | return 0; |
| 199 | } |
| 200 | |
| 201 | int ocxl_context_detach(struct ocxl_context *ctx) |
| 202 | { |
| 203 | struct pci_dev *dev; |
| 204 | int afu_control_pos; |
| 205 | enum ocxl_context_status status; |
| 206 | int rc; |
| 207 | |
| 208 | mutex_lock(&ctx->status_mutex); |
| 209 | status = ctx->status; |
| 210 | ctx->status = CLOSED; |
| 211 | mutex_unlock(&ctx->status_mutex); |
| 212 | if (status != ATTACHED) |
| 213 | return 0; |
| 214 | |
| 215 | dev = to_pci_dev(ctx->afu->fn->dev.parent); |
| 216 | afu_control_pos = ctx->afu->config.dvsec_afu_control_pos; |
| 217 | |
| 218 | mutex_lock(&ctx->afu->afu_control_lock); |
| 219 | rc = ocxl_config_terminate_pasid(dev, afu_control_pos, ctx->pasid); |
| 220 | mutex_unlock(&ctx->afu->afu_control_lock); |
| 221 | trace_ocxl_terminate_pasid(ctx->pasid, rc); |
| 222 | if (rc) { |
| 223 | /* |
| 224 | * If we timeout waiting for the AFU to terminate the |
| 225 | * pasid, then it's dangerous to clean up the Process |
| 226 | * Element entry in the SPA, as it may be referenced |
| 227 | * in the future by the AFU. In which case, we would |
| 228 | * checkstop because of an invalid PE access (FIR |
| 229 | * register 2, bit 42). So leave the PE |
| 230 | * defined. Caller shouldn't free the context so that |
| 231 | * PASID remains allocated. |
| 232 | * |
| 233 | * A link reset will be required to cleanup the AFU |
| 234 | * and the SPA. |
| 235 | */ |
| 236 | if (rc == -EBUSY) |
| 237 | return rc; |
| 238 | } |
| 239 | rc = ocxl_link_remove_pe(ctx->afu->fn->link, ctx->pasid); |
| 240 | if (rc) { |
| 241 | dev_warn(&ctx->afu->dev, |
| 242 | "Couldn't remove PE entry cleanly: %d\n", rc); |
| 243 | } |
| 244 | return 0; |
| 245 | } |
| 246 | |
| 247 | void ocxl_context_detach_all(struct ocxl_afu *afu) |
| 248 | { |
| 249 | struct ocxl_context *ctx; |
| 250 | int tmp; |
| 251 | |
| 252 | mutex_lock(&afu->contexts_lock); |
| 253 | idr_for_each_entry(&afu->contexts_idr, ctx, tmp) { |
| 254 | ocxl_context_detach(ctx); |
| 255 | /* |
| 256 | * We are force detaching - remove any active mmio |
| 257 | * mappings so userspace cannot interfere with the |
| 258 | * card if it comes back. Easiest way to exercise |
| 259 | * this is to unbind and rebind the driver via sysfs |
| 260 | * while it is in use. |
| 261 | */ |
| 262 | mutex_lock(&ctx->mapping_lock); |
| 263 | if (ctx->mapping) |
| 264 | unmap_mapping_range(ctx->mapping, 0, 0, 1); |
| 265 | mutex_unlock(&ctx->mapping_lock); |
| 266 | } |
| 267 | mutex_unlock(&afu->contexts_lock); |
| 268 | } |
| 269 | |
| 270 | void ocxl_context_free(struct ocxl_context *ctx) |
| 271 | { |
| 272 | mutex_lock(&ctx->afu->contexts_lock); |
| 273 | ctx->afu->pasid_count--; |
| 274 | idr_remove(&ctx->afu->contexts_idr, ctx->pasid); |
| 275 | mutex_unlock(&ctx->afu->contexts_lock); |
| 276 | |
| 277 | ocxl_afu_irq_free_all(ctx); |
| 278 | idr_destroy(&ctx->irq_idr); |
| 279 | /* reference to the AFU taken in ocxl_context_init */ |
| 280 | ocxl_afu_put(ctx->afu); |
| 281 | kfree(ctx); |
| 282 | } |