Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar |
| 4 | * |
| 5 | * This file contains the lowest level x86_64-specific interrupt |
| 6 | * entry and irq statistics code. All the remaining irq logic is |
| 7 | * done by the generic kernel/irq/ code and in the |
| 8 | * x86_64-specific irq controller code. (e.g. i8259.c and |
| 9 | * io_apic.c.) |
| 10 | */ |
| 11 | |
| 12 | #include <linux/kernel_stat.h> |
| 13 | #include <linux/interrupt.h> |
| 14 | #include <linux/irq.h> |
| 15 | #include <linux/seq_file.h> |
| 16 | #include <linux/delay.h> |
| 17 | #include <linux/ftrace.h> |
| 18 | #include <linux/uaccess.h> |
| 19 | #include <linux/smp.h> |
| 20 | #include <linux/sched/task_stack.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 21 | |
| 22 | #include <asm/cpu_entry_area.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 23 | #include <asm/io_apic.h> |
| 24 | #include <asm/apic.h> |
| 25 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 26 | DEFINE_PER_CPU_PAGE_ALIGNED(struct irq_stack, irq_stack_backing_store) __visible; |
| 27 | DECLARE_INIT_PER_CPU(irq_stack_backing_store); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 28 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 29 | #ifdef CONFIG_VMAP_STACK |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 30 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 31 | * VMAP the backing store with guard pages |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 32 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 33 | static int map_irq_stack(unsigned int cpu) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 34 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 35 | char *stack = (char *)per_cpu_ptr(&irq_stack_backing_store, cpu); |
| 36 | struct page *pages[IRQ_STACK_SIZE / PAGE_SIZE]; |
| 37 | void *va; |
| 38 | int i; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 39 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 40 | for (i = 0; i < IRQ_STACK_SIZE / PAGE_SIZE; i++) { |
| 41 | phys_addr_t pa = per_cpu_ptr_to_phys(stack + (i << PAGE_SHIFT)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 42 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 43 | pages[i] = pfn_to_page(pa >> PAGE_SHIFT); |
| 44 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 45 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 46 | va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL); |
| 47 | if (!va) |
| 48 | return -ENOMEM; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 49 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 50 | per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE; |
| 51 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 52 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 53 | #else |
| 54 | /* |
| 55 | * If VMAP stacks are disabled due to KASAN, just use the per cpu |
| 56 | * backing store without guard pages. |
| 57 | */ |
| 58 | static int map_irq_stack(unsigned int cpu) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 59 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 60 | void *va = per_cpu_ptr(&irq_stack_backing_store, cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 61 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 62 | per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE; |
| 63 | return 0; |
| 64 | } |
| 65 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 66 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 67 | int irq_init_percpu_irqstack(unsigned int cpu) |
| 68 | { |
| 69 | if (per_cpu(hardirq_stack_ptr, cpu)) |
| 70 | return 0; |
| 71 | return map_irq_stack(cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 72 | } |