Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* |
| 3 | * IRQ subsystem internal functions and variables: |
| 4 | * |
| 5 | * Do not ever include this file from anything else than |
| 6 | * kernel/irq/. Do not even think about using any information outside |
| 7 | * of this file for your non core code. |
| 8 | */ |
| 9 | #include <linux/irqdesc.h> |
| 10 | #include <linux/kernel_stat.h> |
| 11 | #include <linux/pm_runtime.h> |
| 12 | #include <linux/sched/clock.h> |
| 13 | |
| 14 | #ifdef CONFIG_SPARSE_IRQ |
| 15 | # define IRQ_BITMAP_BITS (NR_IRQS + 8196) |
| 16 | #else |
| 17 | # define IRQ_BITMAP_BITS NR_IRQS |
| 18 | #endif |
| 19 | |
| 20 | #define istate core_internal_state__do_not_mess_with_it |
| 21 | |
| 22 | extern bool noirqdebug; |
| 23 | |
| 24 | extern struct irqaction chained_action; |
| 25 | |
| 26 | /* |
| 27 | * Bits used by threaded handlers: |
| 28 | * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run |
| 29 | * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed |
| 30 | * IRQTF_AFFINITY - irq thread is requested to adjust affinity |
| 31 | * IRQTF_FORCED_THREAD - irq action is force threaded |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame] | 32 | * IRQTF_READY - signals that irq thread is ready |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 33 | */ |
| 34 | enum { |
| 35 | IRQTF_RUNTHREAD, |
| 36 | IRQTF_WARNED, |
| 37 | IRQTF_AFFINITY, |
| 38 | IRQTF_FORCED_THREAD, |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame] | 39 | IRQTF_READY, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 40 | }; |
| 41 | |
| 42 | /* |
| 43 | * Bit masks for desc->core_internal_state__do_not_mess_with_it |
| 44 | * |
| 45 | * IRQS_AUTODETECT - autodetection in progress |
| 46 | * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt |
| 47 | * detection |
| 48 | * IRQS_POLL_INPROGRESS - polling in progress |
| 49 | * IRQS_ONESHOT - irq is not unmasked in primary handler |
| 50 | * IRQS_REPLAY - irq is replayed |
| 51 | * IRQS_WAITING - irq is waiting |
| 52 | * IRQS_PENDING - irq is pending and replayed later |
| 53 | * IRQS_SUSPENDED - irq is suspended |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 54 | * IRQS_NMI - irq line is used to deliver NMIs |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 55 | */ |
| 56 | enum { |
| 57 | IRQS_AUTODETECT = 0x00000001, |
| 58 | IRQS_SPURIOUS_DISABLED = 0x00000002, |
| 59 | IRQS_POLL_INPROGRESS = 0x00000008, |
| 60 | IRQS_ONESHOT = 0x00000020, |
| 61 | IRQS_REPLAY = 0x00000040, |
| 62 | IRQS_WAITING = 0x00000080, |
| 63 | IRQS_PENDING = 0x00000200, |
| 64 | IRQS_SUSPENDED = 0x00000800, |
| 65 | IRQS_TIMINGS = 0x00001000, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 66 | IRQS_NMI = 0x00002000, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 67 | }; |
| 68 | |
| 69 | #include "debug.h" |
| 70 | #include "settings.h" |
| 71 | |
| 72 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned long flags); |
| 73 | extern void __disable_irq(struct irq_desc *desc); |
| 74 | extern void __enable_irq(struct irq_desc *desc); |
| 75 | |
| 76 | #define IRQ_RESEND true |
| 77 | #define IRQ_NORESEND false |
| 78 | |
| 79 | #define IRQ_START_FORCE true |
| 80 | #define IRQ_START_COND false |
| 81 | |
| 82 | extern int irq_activate(struct irq_desc *desc); |
| 83 | extern int irq_activate_and_startup(struct irq_desc *desc, bool resend); |
| 84 | extern int irq_startup(struct irq_desc *desc, bool resend, bool force); |
| 85 | |
| 86 | extern void irq_shutdown(struct irq_desc *desc); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 87 | extern void irq_shutdown_and_deactivate(struct irq_desc *desc); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 88 | extern void irq_enable(struct irq_desc *desc); |
| 89 | extern void irq_disable(struct irq_desc *desc); |
| 90 | extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu); |
| 91 | extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu); |
| 92 | extern void mask_irq(struct irq_desc *desc); |
| 93 | extern void unmask_irq(struct irq_desc *desc); |
| 94 | extern void unmask_threaded_irq(struct irq_desc *desc); |
| 95 | |
| 96 | #ifdef CONFIG_SPARSE_IRQ |
| 97 | static inline void irq_mark_irq(unsigned int irq) { } |
| 98 | #else |
| 99 | extern void irq_mark_irq(unsigned int irq); |
| 100 | #endif |
| 101 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 102 | extern int __irq_get_irqchip_state(struct irq_data *data, |
| 103 | enum irqchip_irq_state which, |
| 104 | bool *state); |
| 105 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 106 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); |
| 107 | |
| 108 | irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags); |
| 109 | irqreturn_t handle_irq_event_percpu(struct irq_desc *desc); |
| 110 | irqreturn_t handle_irq_event(struct irq_desc *desc); |
| 111 | |
| 112 | /* Resending of interrupts :*/ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 113 | int check_irq_resend(struct irq_desc *desc, bool inject); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 114 | bool irq_wait_for_poll(struct irq_desc *desc); |
| 115 | void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); |
| 116 | |
| 117 | #ifdef CONFIG_PROC_FS |
| 118 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); |
| 119 | extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc); |
| 120 | extern void register_handler_proc(unsigned int irq, struct irqaction *action); |
| 121 | extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); |
| 122 | #else |
| 123 | static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } |
| 124 | static inline void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { } |
| 125 | static inline void register_handler_proc(unsigned int irq, |
| 126 | struct irqaction *action) { } |
| 127 | static inline void unregister_handler_proc(unsigned int irq, |
| 128 | struct irqaction *action) { } |
| 129 | #endif |
| 130 | |
| 131 | extern bool irq_can_set_affinity_usr(unsigned int irq); |
| 132 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 133 | extern void irq_set_thread_affinity(struct irq_desc *desc); |
| 134 | |
| 135 | extern int irq_do_set_affinity(struct irq_data *data, |
| 136 | const struct cpumask *dest, bool force); |
| 137 | |
| 138 | #ifdef CONFIG_SMP |
| 139 | extern int irq_setup_affinity(struct irq_desc *desc); |
| 140 | #else |
| 141 | static inline int irq_setup_affinity(struct irq_desc *desc) { return 0; } |
| 142 | #endif |
| 143 | |
| 144 | /* Inline functions for support of irq chips on slow busses */ |
| 145 | static inline void chip_bus_lock(struct irq_desc *desc) |
| 146 | { |
| 147 | if (unlikely(desc->irq_data.chip->irq_bus_lock)) |
| 148 | desc->irq_data.chip->irq_bus_lock(&desc->irq_data); |
| 149 | } |
| 150 | |
| 151 | static inline void chip_bus_sync_unlock(struct irq_desc *desc) |
| 152 | { |
| 153 | if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock)) |
| 154 | desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); |
| 155 | } |
| 156 | |
| 157 | #define _IRQ_DESC_CHECK (1 << 0) |
| 158 | #define _IRQ_DESC_PERCPU (1 << 1) |
| 159 | |
| 160 | #define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK) |
| 161 | #define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU) |
| 162 | |
| 163 | #define for_each_action_of_desc(desc, act) \ |
| 164 | for (act = desc->action; act; act = act->next) |
| 165 | |
| 166 | struct irq_desc * |
| 167 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, |
| 168 | unsigned int check); |
| 169 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus); |
| 170 | |
| 171 | static inline struct irq_desc * |
| 172 | irq_get_desc_buslock(unsigned int irq, unsigned long *flags, unsigned int check) |
| 173 | { |
| 174 | return __irq_get_desc_lock(irq, flags, true, check); |
| 175 | } |
| 176 | |
| 177 | static inline void |
| 178 | irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags) |
| 179 | { |
| 180 | __irq_put_desc_unlock(desc, flags, true); |
| 181 | } |
| 182 | |
| 183 | static inline struct irq_desc * |
| 184 | irq_get_desc_lock(unsigned int irq, unsigned long *flags, unsigned int check) |
| 185 | { |
| 186 | return __irq_get_desc_lock(irq, flags, false, check); |
| 187 | } |
| 188 | |
| 189 | static inline void |
| 190 | irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags) |
| 191 | { |
| 192 | __irq_put_desc_unlock(desc, flags, false); |
| 193 | } |
| 194 | |
| 195 | #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) |
| 196 | |
| 197 | static inline unsigned int irqd_get(struct irq_data *d) |
| 198 | { |
| 199 | return __irqd_to_state(d); |
| 200 | } |
| 201 | |
| 202 | /* |
| 203 | * Manipulation functions for irq_data.state |
| 204 | */ |
| 205 | static inline void irqd_set_move_pending(struct irq_data *d) |
| 206 | { |
| 207 | __irqd_to_state(d) |= IRQD_SETAFFINITY_PENDING; |
| 208 | } |
| 209 | |
| 210 | static inline void irqd_clr_move_pending(struct irq_data *d) |
| 211 | { |
| 212 | __irqd_to_state(d) &= ~IRQD_SETAFFINITY_PENDING; |
| 213 | } |
| 214 | |
| 215 | static inline void irqd_set_managed_shutdown(struct irq_data *d) |
| 216 | { |
| 217 | __irqd_to_state(d) |= IRQD_MANAGED_SHUTDOWN; |
| 218 | } |
| 219 | |
| 220 | static inline void irqd_clr_managed_shutdown(struct irq_data *d) |
| 221 | { |
| 222 | __irqd_to_state(d) &= ~IRQD_MANAGED_SHUTDOWN; |
| 223 | } |
| 224 | |
| 225 | static inline void irqd_clear(struct irq_data *d, unsigned int mask) |
| 226 | { |
| 227 | __irqd_to_state(d) &= ~mask; |
| 228 | } |
| 229 | |
| 230 | static inline void irqd_set(struct irq_data *d, unsigned int mask) |
| 231 | { |
| 232 | __irqd_to_state(d) |= mask; |
| 233 | } |
| 234 | |
| 235 | static inline bool irqd_has_set(struct irq_data *d, unsigned int mask) |
| 236 | { |
| 237 | return __irqd_to_state(d) & mask; |
| 238 | } |
| 239 | |
| 240 | static inline void irq_state_set_disabled(struct irq_desc *desc) |
| 241 | { |
| 242 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
| 243 | } |
| 244 | |
| 245 | static inline void irq_state_set_masked(struct irq_desc *desc) |
| 246 | { |
| 247 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); |
| 248 | } |
| 249 | |
| 250 | #undef __irqd_to_state |
| 251 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 252 | static inline void __kstat_incr_irqs_this_cpu(struct irq_desc *desc) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 253 | { |
| 254 | __this_cpu_inc(*desc->kstat_irqs); |
| 255 | __this_cpu_inc(kstat.irqs_sum); |
| 256 | } |
| 257 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 258 | static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc) |
| 259 | { |
| 260 | __kstat_incr_irqs_this_cpu(desc); |
| 261 | desc->tot_count++; |
| 262 | } |
| 263 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 264 | static inline int irq_desc_get_node(struct irq_desc *desc) |
| 265 | { |
| 266 | return irq_common_data_get_node(&desc->irq_common_data); |
| 267 | } |
| 268 | |
| 269 | static inline int irq_desc_is_chained(struct irq_desc *desc) |
| 270 | { |
| 271 | return (desc->action && desc->action == &chained_action); |
| 272 | } |
| 273 | |
| 274 | #ifdef CONFIG_PM_SLEEP |
| 275 | bool irq_pm_check_wakeup(struct irq_desc *desc); |
| 276 | void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action); |
| 277 | void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action); |
| 278 | #else |
| 279 | static inline bool irq_pm_check_wakeup(struct irq_desc *desc) { return false; } |
| 280 | static inline void |
| 281 | irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { } |
| 282 | static inline void |
| 283 | irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { } |
| 284 | #endif |
| 285 | |
| 286 | #ifdef CONFIG_IRQ_TIMINGS |
| 287 | |
| 288 | #define IRQ_TIMINGS_SHIFT 5 |
| 289 | #define IRQ_TIMINGS_SIZE (1 << IRQ_TIMINGS_SHIFT) |
| 290 | #define IRQ_TIMINGS_MASK (IRQ_TIMINGS_SIZE - 1) |
| 291 | |
| 292 | /** |
| 293 | * struct irq_timings - irq timings storing structure |
| 294 | * @values: a circular buffer of u64 encoded <timestamp,irq> values |
| 295 | * @count: the number of elements in the array |
| 296 | */ |
| 297 | struct irq_timings { |
| 298 | u64 values[IRQ_TIMINGS_SIZE]; |
| 299 | int count; |
| 300 | }; |
| 301 | |
| 302 | DECLARE_PER_CPU(struct irq_timings, irq_timings); |
| 303 | |
| 304 | extern void irq_timings_free(int irq); |
| 305 | extern int irq_timings_alloc(int irq); |
| 306 | |
| 307 | static inline void irq_remove_timings(struct irq_desc *desc) |
| 308 | { |
| 309 | desc->istate &= ~IRQS_TIMINGS; |
| 310 | |
| 311 | irq_timings_free(irq_desc_get_irq(desc)); |
| 312 | } |
| 313 | |
| 314 | static inline void irq_setup_timings(struct irq_desc *desc, struct irqaction *act) |
| 315 | { |
| 316 | int irq = irq_desc_get_irq(desc); |
| 317 | int ret; |
| 318 | |
| 319 | /* |
| 320 | * We don't need the measurement because the idle code already |
| 321 | * knows the next expiry event. |
| 322 | */ |
| 323 | if (act->flags & __IRQF_TIMER) |
| 324 | return; |
| 325 | |
| 326 | /* |
| 327 | * In case the timing allocation fails, we just want to warn, |
| 328 | * not fail, so letting the system boot anyway. |
| 329 | */ |
| 330 | ret = irq_timings_alloc(irq); |
| 331 | if (ret) { |
| 332 | pr_warn("Failed to allocate irq timing stats for irq%d (%d)", |
| 333 | irq, ret); |
| 334 | return; |
| 335 | } |
| 336 | |
| 337 | desc->istate |= IRQS_TIMINGS; |
| 338 | } |
| 339 | |
| 340 | extern void irq_timings_enable(void); |
| 341 | extern void irq_timings_disable(void); |
| 342 | |
| 343 | DECLARE_STATIC_KEY_FALSE(irq_timing_enabled); |
| 344 | |
| 345 | /* |
| 346 | * The interrupt number and the timestamp are encoded into a single |
| 347 | * u64 variable to optimize the size. |
| 348 | * 48 bit time stamp and 16 bit IRQ number is way sufficient. |
| 349 | * Who cares an IRQ after 78 hours of idle time? |
| 350 | */ |
| 351 | static inline u64 irq_timing_encode(u64 timestamp, int irq) |
| 352 | { |
| 353 | return (timestamp << 16) | irq; |
| 354 | } |
| 355 | |
| 356 | static inline int irq_timing_decode(u64 value, u64 *timestamp) |
| 357 | { |
| 358 | *timestamp = value >> 16; |
| 359 | return value & U16_MAX; |
| 360 | } |
| 361 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 362 | static __always_inline void irq_timings_push(u64 ts, int irq) |
| 363 | { |
| 364 | struct irq_timings *timings = this_cpu_ptr(&irq_timings); |
| 365 | |
| 366 | timings->values[timings->count & IRQ_TIMINGS_MASK] = |
| 367 | irq_timing_encode(ts, irq); |
| 368 | |
| 369 | timings->count++; |
| 370 | } |
| 371 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 372 | /* |
| 373 | * The function record_irq_time is only called in one place in the |
| 374 | * interrupts handler. We want this function always inline so the code |
| 375 | * inside is embedded in the function and the static key branching |
| 376 | * code can act at the higher level. Without the explicit |
| 377 | * __always_inline we can end up with a function call and a small |
| 378 | * overhead in the hotpath for nothing. |
| 379 | */ |
| 380 | static __always_inline void record_irq_time(struct irq_desc *desc) |
| 381 | { |
| 382 | if (!static_branch_likely(&irq_timing_enabled)) |
| 383 | return; |
| 384 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 385 | if (desc->istate & IRQS_TIMINGS) |
| 386 | irq_timings_push(local_clock(), irq_desc_get_irq(desc)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 387 | } |
| 388 | #else |
| 389 | static inline void irq_remove_timings(struct irq_desc *desc) {} |
| 390 | static inline void irq_setup_timings(struct irq_desc *desc, |
| 391 | struct irqaction *act) {}; |
| 392 | static inline void record_irq_time(struct irq_desc *desc) {} |
| 393 | #endif /* CONFIG_IRQ_TIMINGS */ |
| 394 | |
| 395 | |
| 396 | #ifdef CONFIG_GENERIC_IRQ_CHIP |
| 397 | void irq_init_generic_chip(struct irq_chip_generic *gc, const char *name, |
| 398 | int num_ct, unsigned int irq_base, |
| 399 | void __iomem *reg_base, irq_flow_handler_t handler); |
| 400 | #else |
| 401 | static inline void |
| 402 | irq_init_generic_chip(struct irq_chip_generic *gc, const char *name, |
| 403 | int num_ct, unsigned int irq_base, |
| 404 | void __iomem *reg_base, irq_flow_handler_t handler) { } |
| 405 | #endif /* CONFIG_GENERIC_IRQ_CHIP */ |
| 406 | |
| 407 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 408 | static inline bool irq_can_move_pcntxt(struct irq_data *data) |
| 409 | { |
| 410 | return irqd_can_move_in_process_context(data); |
| 411 | } |
| 412 | static inline bool irq_move_pending(struct irq_data *data) |
| 413 | { |
| 414 | return irqd_is_setaffinity_pending(data); |
| 415 | } |
| 416 | static inline void |
| 417 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) |
| 418 | { |
| 419 | cpumask_copy(desc->pending_mask, mask); |
| 420 | } |
| 421 | static inline void |
| 422 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) |
| 423 | { |
| 424 | cpumask_copy(mask, desc->pending_mask); |
| 425 | } |
| 426 | static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) |
| 427 | { |
| 428 | return desc->pending_mask; |
| 429 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 430 | static inline bool handle_enforce_irqctx(struct irq_data *data) |
| 431 | { |
| 432 | return irqd_is_handle_enforce_irqctx(data); |
| 433 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 434 | bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear); |
| 435 | #else /* CONFIG_GENERIC_PENDING_IRQ */ |
| 436 | static inline bool irq_can_move_pcntxt(struct irq_data *data) |
| 437 | { |
| 438 | return true; |
| 439 | } |
| 440 | static inline bool irq_move_pending(struct irq_data *data) |
| 441 | { |
| 442 | return false; |
| 443 | } |
| 444 | static inline void |
| 445 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) |
| 446 | { |
| 447 | } |
| 448 | static inline void |
| 449 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) |
| 450 | { |
| 451 | } |
| 452 | static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) |
| 453 | { |
| 454 | return NULL; |
| 455 | } |
| 456 | static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear) |
| 457 | { |
| 458 | return false; |
| 459 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 460 | static inline bool handle_enforce_irqctx(struct irq_data *data) |
| 461 | { |
| 462 | return false; |
| 463 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 464 | #endif /* !CONFIG_GENERIC_PENDING_IRQ */ |
| 465 | |
| 466 | #if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY) |
| 467 | static inline int irq_domain_activate_irq(struct irq_data *data, bool reserve) |
| 468 | { |
| 469 | irqd_set_activated(data); |
| 470 | return 0; |
| 471 | } |
| 472 | static inline void irq_domain_deactivate_irq(struct irq_data *data) |
| 473 | { |
| 474 | irqd_clr_activated(data); |
| 475 | } |
| 476 | #endif |
| 477 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 478 | static inline struct irq_data *irqd_get_parent_data(struct irq_data *irqd) |
| 479 | { |
| 480 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY |
| 481 | return irqd->parent_data; |
| 482 | #else |
| 483 | return NULL; |
| 484 | #endif |
| 485 | } |
| 486 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 487 | #ifdef CONFIG_GENERIC_IRQ_DEBUGFS |
| 488 | #include <linux/debugfs.h> |
| 489 | |
| 490 | void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc); |
| 491 | static inline void irq_remove_debugfs_entry(struct irq_desc *desc) |
| 492 | { |
| 493 | debugfs_remove(desc->debugfs_file); |
| 494 | kfree(desc->dev_name); |
| 495 | } |
| 496 | void irq_debugfs_copy_devname(int irq, struct device *dev); |
| 497 | # ifdef CONFIG_IRQ_DOMAIN |
| 498 | void irq_domain_debugfs_init(struct dentry *root); |
| 499 | # else |
| 500 | static inline void irq_domain_debugfs_init(struct dentry *root) |
| 501 | { |
| 502 | } |
| 503 | # endif |
| 504 | #else /* CONFIG_GENERIC_IRQ_DEBUGFS */ |
| 505 | static inline void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *d) |
| 506 | { |
| 507 | } |
| 508 | static inline void irq_remove_debugfs_entry(struct irq_desc *d) |
| 509 | { |
| 510 | } |
| 511 | static inline void irq_debugfs_copy_devname(int irq, struct device *dev) |
| 512 | { |
| 513 | } |
| 514 | #endif /* CONFIG_GENERIC_IRQ_DEBUGFS */ |