Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | // Copyright 2017 IBM Corp. |
| 3 | #include <linux/interrupt.h> |
| 4 | #include <linux/eventfd.h> |
| 5 | #include <asm/pnv-ocxl.h> |
| 6 | #include "ocxl_internal.h" |
| 7 | #include "trace.h" |
| 8 | |
| 9 | struct afu_irq { |
| 10 | int id; |
| 11 | int hw_irq; |
| 12 | unsigned int virq; |
| 13 | char *name; |
| 14 | u64 trigger_page; |
| 15 | struct eventfd_ctx *ev_ctx; |
| 16 | }; |
| 17 | |
| 18 | static int irq_offset_to_id(struct ocxl_context *ctx, u64 offset) |
| 19 | { |
| 20 | return (offset - ctx->afu->irq_base_offset) >> PAGE_SHIFT; |
| 21 | } |
| 22 | |
| 23 | static u64 irq_id_to_offset(struct ocxl_context *ctx, int id) |
| 24 | { |
| 25 | return ctx->afu->irq_base_offset + (id << PAGE_SHIFT); |
| 26 | } |
| 27 | |
| 28 | static irqreturn_t afu_irq_handler(int virq, void *data) |
| 29 | { |
| 30 | struct afu_irq *irq = (struct afu_irq *) data; |
| 31 | |
| 32 | trace_ocxl_afu_irq_receive(virq); |
| 33 | if (irq->ev_ctx) |
| 34 | eventfd_signal(irq->ev_ctx, 1); |
| 35 | return IRQ_HANDLED; |
| 36 | } |
| 37 | |
| 38 | static int setup_afu_irq(struct ocxl_context *ctx, struct afu_irq *irq) |
| 39 | { |
| 40 | int rc; |
| 41 | |
| 42 | irq->virq = irq_create_mapping(NULL, irq->hw_irq); |
| 43 | if (!irq->virq) { |
| 44 | pr_err("irq_create_mapping failed\n"); |
| 45 | return -ENOMEM; |
| 46 | } |
| 47 | pr_debug("hw_irq %d mapped to virq %u\n", irq->hw_irq, irq->virq); |
| 48 | |
| 49 | irq->name = kasprintf(GFP_KERNEL, "ocxl-afu-%u", irq->virq); |
| 50 | if (!irq->name) { |
| 51 | irq_dispose_mapping(irq->virq); |
| 52 | return -ENOMEM; |
| 53 | } |
| 54 | |
| 55 | rc = request_irq(irq->virq, afu_irq_handler, 0, irq->name, irq); |
| 56 | if (rc) { |
| 57 | kfree(irq->name); |
| 58 | irq->name = NULL; |
| 59 | irq_dispose_mapping(irq->virq); |
| 60 | pr_err("request_irq failed: %d\n", rc); |
| 61 | return rc; |
| 62 | } |
| 63 | return 0; |
| 64 | } |
| 65 | |
| 66 | static void release_afu_irq(struct afu_irq *irq) |
| 67 | { |
| 68 | free_irq(irq->virq, irq); |
| 69 | irq_dispose_mapping(irq->virq); |
| 70 | kfree(irq->name); |
| 71 | } |
| 72 | |
| 73 | int ocxl_afu_irq_alloc(struct ocxl_context *ctx, u64 *irq_offset) |
| 74 | { |
| 75 | struct afu_irq *irq; |
| 76 | int rc; |
| 77 | |
| 78 | irq = kzalloc(sizeof(struct afu_irq), GFP_KERNEL); |
| 79 | if (!irq) |
| 80 | return -ENOMEM; |
| 81 | |
| 82 | /* |
| 83 | * We limit the number of afu irqs per context and per link to |
| 84 | * avoid a single process or user depleting the pool of IPIs |
| 85 | */ |
| 86 | |
| 87 | mutex_lock(&ctx->irq_lock); |
| 88 | |
| 89 | irq->id = idr_alloc(&ctx->irq_idr, irq, 0, MAX_IRQ_PER_CONTEXT, |
| 90 | GFP_KERNEL); |
| 91 | if (irq->id < 0) { |
| 92 | rc = -ENOSPC; |
| 93 | goto err_unlock; |
| 94 | } |
| 95 | |
| 96 | rc = ocxl_link_irq_alloc(ctx->afu->fn->link, &irq->hw_irq, |
| 97 | &irq->trigger_page); |
| 98 | if (rc) |
| 99 | goto err_idr; |
| 100 | |
| 101 | rc = setup_afu_irq(ctx, irq); |
| 102 | if (rc) |
| 103 | goto err_alloc; |
| 104 | |
| 105 | *irq_offset = irq_id_to_offset(ctx, irq->id); |
| 106 | |
| 107 | trace_ocxl_afu_irq_alloc(ctx->pasid, irq->id, irq->virq, irq->hw_irq, |
| 108 | *irq_offset); |
| 109 | mutex_unlock(&ctx->irq_lock); |
| 110 | return 0; |
| 111 | |
| 112 | err_alloc: |
| 113 | ocxl_link_free_irq(ctx->afu->fn->link, irq->hw_irq); |
| 114 | err_idr: |
| 115 | idr_remove(&ctx->irq_idr, irq->id); |
| 116 | err_unlock: |
| 117 | mutex_unlock(&ctx->irq_lock); |
| 118 | kfree(irq); |
| 119 | return rc; |
| 120 | } |
| 121 | |
| 122 | static void afu_irq_free(struct afu_irq *irq, struct ocxl_context *ctx) |
| 123 | { |
| 124 | trace_ocxl_afu_irq_free(ctx->pasid, irq->id); |
| 125 | if (ctx->mapping) |
| 126 | unmap_mapping_range(ctx->mapping, |
| 127 | irq_id_to_offset(ctx, irq->id), |
| 128 | 1 << PAGE_SHIFT, 1); |
| 129 | release_afu_irq(irq); |
| 130 | if (irq->ev_ctx) |
| 131 | eventfd_ctx_put(irq->ev_ctx); |
| 132 | ocxl_link_free_irq(ctx->afu->fn->link, irq->hw_irq); |
| 133 | kfree(irq); |
| 134 | } |
| 135 | |
| 136 | int ocxl_afu_irq_free(struct ocxl_context *ctx, u64 irq_offset) |
| 137 | { |
| 138 | struct afu_irq *irq; |
| 139 | int id = irq_offset_to_id(ctx, irq_offset); |
| 140 | |
| 141 | mutex_lock(&ctx->irq_lock); |
| 142 | |
| 143 | irq = idr_find(&ctx->irq_idr, id); |
| 144 | if (!irq) { |
| 145 | mutex_unlock(&ctx->irq_lock); |
| 146 | return -EINVAL; |
| 147 | } |
| 148 | idr_remove(&ctx->irq_idr, irq->id); |
| 149 | afu_irq_free(irq, ctx); |
| 150 | mutex_unlock(&ctx->irq_lock); |
| 151 | return 0; |
| 152 | } |
| 153 | |
| 154 | void ocxl_afu_irq_free_all(struct ocxl_context *ctx) |
| 155 | { |
| 156 | struct afu_irq *irq; |
| 157 | int id; |
| 158 | |
| 159 | mutex_lock(&ctx->irq_lock); |
| 160 | idr_for_each_entry(&ctx->irq_idr, irq, id) |
| 161 | afu_irq_free(irq, ctx); |
| 162 | mutex_unlock(&ctx->irq_lock); |
| 163 | } |
| 164 | |
| 165 | int ocxl_afu_irq_set_fd(struct ocxl_context *ctx, u64 irq_offset, int eventfd) |
| 166 | { |
| 167 | struct afu_irq *irq; |
| 168 | struct eventfd_ctx *ev_ctx; |
| 169 | int rc = 0, id = irq_offset_to_id(ctx, irq_offset); |
| 170 | |
| 171 | mutex_lock(&ctx->irq_lock); |
| 172 | irq = idr_find(&ctx->irq_idr, id); |
| 173 | if (!irq) { |
| 174 | rc = -EINVAL; |
| 175 | goto unlock; |
| 176 | } |
| 177 | |
| 178 | ev_ctx = eventfd_ctx_fdget(eventfd); |
| 179 | if (IS_ERR(ev_ctx)) { |
| 180 | rc = -EINVAL; |
| 181 | goto unlock; |
| 182 | } |
| 183 | |
| 184 | irq->ev_ctx = ev_ctx; |
| 185 | unlock: |
| 186 | mutex_unlock(&ctx->irq_lock); |
| 187 | return rc; |
| 188 | } |
| 189 | |
| 190 | u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, u64 irq_offset) |
| 191 | { |
| 192 | struct afu_irq *irq; |
| 193 | int id = irq_offset_to_id(ctx, irq_offset); |
| 194 | u64 addr = 0; |
| 195 | |
| 196 | mutex_lock(&ctx->irq_lock); |
| 197 | irq = idr_find(&ctx->irq_idr, id); |
| 198 | if (irq) |
| 199 | addr = irq->trigger_page; |
| 200 | mutex_unlock(&ctx->irq_lock); |
| 201 | return addr; |
| 202 | } |