David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Tegra host1x Interrupt Management |
| 4 | * |
| 5 | * Copyright (c) 2010-2013, NVIDIA Corporation. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #include <linux/clk.h> |
| 9 | #include <linux/interrupt.h> |
| 10 | #include <linux/slab.h> |
| 11 | #include <linux/irq.h> |
| 12 | |
| 13 | #include <trace/events/host1x.h> |
| 14 | #include "channel.h" |
| 15 | #include "dev.h" |
| 16 | #include "intr.h" |
| 17 | |
| 18 | /* Wait list management */ |
| 19 | |
| 20 | enum waitlist_state { |
| 21 | WLS_PENDING, |
| 22 | WLS_REMOVED, |
| 23 | WLS_CANCELLED, |
| 24 | WLS_HANDLED |
| 25 | }; |
| 26 | |
| 27 | static void waiter_release(struct kref *kref) |
| 28 | { |
| 29 | kfree(container_of(kref, struct host1x_waitlist, refcount)); |
| 30 | } |
| 31 | |
| 32 | /* |
| 33 | * add a waiter to a waiter queue, sorted by threshold |
| 34 | * returns true if it was added at the head of the queue |
| 35 | */ |
| 36 | static bool add_waiter_to_queue(struct host1x_waitlist *waiter, |
| 37 | struct list_head *queue) |
| 38 | { |
| 39 | struct host1x_waitlist *pos; |
| 40 | u32 thresh = waiter->thresh; |
| 41 | |
| 42 | list_for_each_entry_reverse(pos, queue, list) |
| 43 | if ((s32)(pos->thresh - thresh) <= 0) { |
| 44 | list_add(&waiter->list, &pos->list); |
| 45 | return false; |
| 46 | } |
| 47 | |
| 48 | list_add(&waiter->list, queue); |
| 49 | return true; |
| 50 | } |
| 51 | |
| 52 | /* |
| 53 | * run through a waiter queue for a single sync point ID |
| 54 | * and gather all completed waiters into lists by actions |
| 55 | */ |
| 56 | static void remove_completed_waiters(struct list_head *head, u32 sync, |
| 57 | struct list_head completed[HOST1X_INTR_ACTION_COUNT]) |
| 58 | { |
| 59 | struct list_head *dest; |
| 60 | struct host1x_waitlist *waiter, *next, *prev; |
| 61 | |
| 62 | list_for_each_entry_safe(waiter, next, head, list) { |
| 63 | if ((s32)(waiter->thresh - sync) > 0) |
| 64 | break; |
| 65 | |
| 66 | dest = completed + waiter->action; |
| 67 | |
| 68 | /* consolidate submit cleanups */ |
| 69 | if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE && |
| 70 | !list_empty(dest)) { |
| 71 | prev = list_entry(dest->prev, |
| 72 | struct host1x_waitlist, list); |
| 73 | if (prev->data == waiter->data) { |
| 74 | prev->count++; |
| 75 | dest = NULL; |
| 76 | } |
| 77 | } |
| 78 | |
| 79 | /* PENDING->REMOVED or CANCELLED->HANDLED */ |
| 80 | if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) { |
| 81 | list_del(&waiter->list); |
| 82 | kref_put(&waiter->refcount, waiter_release); |
| 83 | } else |
| 84 | list_move_tail(&waiter->list, dest); |
| 85 | } |
| 86 | } |
| 87 | |
| 88 | static void reset_threshold_interrupt(struct host1x *host, |
| 89 | struct list_head *head, |
| 90 | unsigned int id) |
| 91 | { |
| 92 | u32 thresh = |
| 93 | list_first_entry(head, struct host1x_waitlist, list)->thresh; |
| 94 | |
| 95 | host1x_hw_intr_set_syncpt_threshold(host, id, thresh); |
| 96 | host1x_hw_intr_enable_syncpt_intr(host, id); |
| 97 | } |
| 98 | |
| 99 | static void action_submit_complete(struct host1x_waitlist *waiter) |
| 100 | { |
| 101 | struct host1x_channel *channel = waiter->data; |
| 102 | |
| 103 | host1x_cdma_update(&channel->cdma); |
| 104 | |
| 105 | /* Add nr_completed to trace */ |
| 106 | trace_host1x_channel_submit_complete(dev_name(channel->dev), |
| 107 | waiter->count, waiter->thresh); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 108 | } |
| 109 | |
| 110 | static void action_wakeup(struct host1x_waitlist *waiter) |
| 111 | { |
| 112 | wait_queue_head_t *wq = waiter->data; |
| 113 | |
| 114 | wake_up(wq); |
| 115 | } |
| 116 | |
| 117 | static void action_wakeup_interruptible(struct host1x_waitlist *waiter) |
| 118 | { |
| 119 | wait_queue_head_t *wq = waiter->data; |
| 120 | |
| 121 | wake_up_interruptible(wq); |
| 122 | } |
| 123 | |
| 124 | typedef void (*action_handler)(struct host1x_waitlist *waiter); |
| 125 | |
| 126 | static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = { |
| 127 | action_submit_complete, |
| 128 | action_wakeup, |
| 129 | action_wakeup_interruptible, |
| 130 | }; |
| 131 | |
| 132 | static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT]) |
| 133 | { |
| 134 | struct list_head *head = completed; |
| 135 | unsigned int i; |
| 136 | |
| 137 | for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) { |
| 138 | action_handler handler = action_handlers[i]; |
| 139 | struct host1x_waitlist *waiter, *next; |
| 140 | |
| 141 | list_for_each_entry_safe(waiter, next, head, list) { |
| 142 | list_del(&waiter->list); |
| 143 | handler(waiter); |
| 144 | WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) != |
| 145 | WLS_REMOVED); |
| 146 | kref_put(&waiter->refcount, waiter_release); |
| 147 | } |
| 148 | } |
| 149 | } |
| 150 | |
| 151 | /* |
| 152 | * Remove & handle all waiters that have completed for the given syncpt |
| 153 | */ |
| 154 | static int process_wait_list(struct host1x *host, |
| 155 | struct host1x_syncpt *syncpt, |
| 156 | u32 threshold) |
| 157 | { |
| 158 | struct list_head completed[HOST1X_INTR_ACTION_COUNT]; |
| 159 | unsigned int i; |
| 160 | int empty; |
| 161 | |
| 162 | for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i) |
| 163 | INIT_LIST_HEAD(completed + i); |
| 164 | |
| 165 | spin_lock(&syncpt->intr.lock); |
| 166 | |
| 167 | remove_completed_waiters(&syncpt->intr.wait_head, threshold, |
| 168 | completed); |
| 169 | |
| 170 | empty = list_empty(&syncpt->intr.wait_head); |
| 171 | if (empty) |
| 172 | host1x_hw_intr_disable_syncpt_intr(host, syncpt->id); |
| 173 | else |
| 174 | reset_threshold_interrupt(host, &syncpt->intr.wait_head, |
| 175 | syncpt->id); |
| 176 | |
| 177 | spin_unlock(&syncpt->intr.lock); |
| 178 | |
| 179 | run_handlers(completed); |
| 180 | |
| 181 | return empty; |
| 182 | } |
| 183 | |
| 184 | /* |
| 185 | * Sync point threshold interrupt service thread function |
| 186 | * Handles sync point threshold triggers, in thread context |
| 187 | */ |
| 188 | |
| 189 | static void syncpt_thresh_work(struct work_struct *work) |
| 190 | { |
| 191 | struct host1x_syncpt_intr *syncpt_intr = |
| 192 | container_of(work, struct host1x_syncpt_intr, work); |
| 193 | struct host1x_syncpt *syncpt = |
| 194 | container_of(syncpt_intr, struct host1x_syncpt, intr); |
| 195 | unsigned int id = syncpt->id; |
| 196 | struct host1x *host = syncpt->host; |
| 197 | |
| 198 | (void)process_wait_list(host, syncpt, |
| 199 | host1x_syncpt_load(host->syncpt + id)); |
| 200 | } |
| 201 | |
| 202 | int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt, |
| 203 | u32 thresh, enum host1x_intr_action action, |
| 204 | void *data, struct host1x_waitlist *waiter, |
| 205 | void **ref) |
| 206 | { |
| 207 | int queue_was_empty; |
| 208 | |
| 209 | if (waiter == NULL) { |
| 210 | pr_warn("%s: NULL waiter\n", __func__); |
| 211 | return -EINVAL; |
| 212 | } |
| 213 | |
| 214 | /* initialize a new waiter */ |
| 215 | INIT_LIST_HEAD(&waiter->list); |
| 216 | kref_init(&waiter->refcount); |
| 217 | if (ref) |
| 218 | kref_get(&waiter->refcount); |
| 219 | waiter->thresh = thresh; |
| 220 | waiter->action = action; |
| 221 | atomic_set(&waiter->state, WLS_PENDING); |
| 222 | waiter->data = data; |
| 223 | waiter->count = 1; |
| 224 | |
| 225 | spin_lock(&syncpt->intr.lock); |
| 226 | |
| 227 | queue_was_empty = list_empty(&syncpt->intr.wait_head); |
| 228 | |
| 229 | if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) { |
| 230 | /* added at head of list - new threshold value */ |
| 231 | host1x_hw_intr_set_syncpt_threshold(host, syncpt->id, thresh); |
| 232 | |
| 233 | /* added as first waiter - enable interrupt */ |
| 234 | if (queue_was_empty) |
| 235 | host1x_hw_intr_enable_syncpt_intr(host, syncpt->id); |
| 236 | } |
| 237 | |
| 238 | spin_unlock(&syncpt->intr.lock); |
| 239 | |
| 240 | if (ref) |
| 241 | *ref = waiter; |
| 242 | return 0; |
| 243 | } |
| 244 | |
| 245 | void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref) |
| 246 | { |
| 247 | struct host1x_waitlist *waiter = ref; |
| 248 | struct host1x_syncpt *syncpt; |
| 249 | |
| 250 | while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) == |
| 251 | WLS_REMOVED) |
| 252 | schedule(); |
| 253 | |
| 254 | syncpt = host->syncpt + id; |
| 255 | (void)process_wait_list(host, syncpt, |
| 256 | host1x_syncpt_load(host->syncpt + id)); |
| 257 | |
| 258 | kref_put(&waiter->refcount, waiter_release); |
| 259 | } |
| 260 | |
| 261 | int host1x_intr_init(struct host1x *host, unsigned int irq_sync) |
| 262 | { |
| 263 | unsigned int id; |
| 264 | u32 nb_pts = host1x_syncpt_nb_pts(host); |
| 265 | |
| 266 | mutex_init(&host->intr_mutex); |
| 267 | host->intr_syncpt_irq = irq_sync; |
| 268 | |
| 269 | for (id = 0; id < nb_pts; ++id) { |
| 270 | struct host1x_syncpt *syncpt = host->syncpt + id; |
| 271 | |
| 272 | spin_lock_init(&syncpt->intr.lock); |
| 273 | INIT_LIST_HEAD(&syncpt->intr.wait_head); |
| 274 | snprintf(syncpt->intr.thresh_irq_name, |
| 275 | sizeof(syncpt->intr.thresh_irq_name), |
| 276 | "host1x_sp_%02u", id); |
| 277 | } |
| 278 | |
| 279 | host1x_intr_start(host); |
| 280 | |
| 281 | return 0; |
| 282 | } |
| 283 | |
| 284 | void host1x_intr_deinit(struct host1x *host) |
| 285 | { |
| 286 | host1x_intr_stop(host); |
| 287 | } |
| 288 | |
| 289 | void host1x_intr_start(struct host1x *host) |
| 290 | { |
| 291 | u32 hz = clk_get_rate(host->clk); |
| 292 | int err; |
| 293 | |
| 294 | mutex_lock(&host->intr_mutex); |
| 295 | err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000), |
| 296 | syncpt_thresh_work); |
| 297 | if (err) { |
| 298 | mutex_unlock(&host->intr_mutex); |
| 299 | return; |
| 300 | } |
| 301 | mutex_unlock(&host->intr_mutex); |
| 302 | } |
| 303 | |
| 304 | void host1x_intr_stop(struct host1x *host) |
| 305 | { |
| 306 | unsigned int id; |
| 307 | struct host1x_syncpt *syncpt = host->syncpt; |
| 308 | u32 nb_pts = host1x_syncpt_nb_pts(host); |
| 309 | |
| 310 | mutex_lock(&host->intr_mutex); |
| 311 | |
| 312 | host1x_hw_intr_disable_all_syncpt_intrs(host); |
| 313 | |
| 314 | for (id = 0; id < nb_pts; ++id) { |
| 315 | struct host1x_waitlist *waiter, *next; |
| 316 | |
| 317 | list_for_each_entry_safe(waiter, next, |
| 318 | &syncpt[id].intr.wait_head, list) { |
| 319 | if (atomic_cmpxchg(&waiter->state, |
| 320 | WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) { |
| 321 | list_del(&waiter->list); |
| 322 | kref_put(&waiter->refcount, waiter_release); |
| 323 | } |
| 324 | } |
| 325 | |
| 326 | if (!list_empty(&syncpt[id].intr.wait_head)) { |
| 327 | /* output diagnostics */ |
| 328 | mutex_unlock(&host->intr_mutex); |
| 329 | pr_warn("%s cannot stop syncpt intr id=%u\n", |
| 330 | __func__, id); |
| 331 | return; |
| 332 | } |
| 333 | } |
| 334 | |
| 335 | host1x_hw_intr_free_syncpt_irq(host); |
| 336 | |
| 337 | mutex_unlock(&host->intr_mutex); |
| 338 | } |