David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2017 SiFive |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4 | */ |
| 5 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6 | #include <asm/cacheflush.h> |
| 7 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8 | #ifdef CONFIG_SMP |
| 9 | |
| 10 | #include <asm/sbi.h> |
| 11 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 12 | static void ipi_remote_fence_i(void *info) |
| 13 | { |
| 14 | return local_flush_icache_all(); |
| 15 | } |
| 16 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 17 | void flush_icache_all(void) |
| 18 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 19 | local_flush_icache_all(); |
| 20 | |
| 21 | if (IS_ENABLED(CONFIG_RISCV_SBI)) |
| 22 | sbi_remote_fence_i(NULL); |
| 23 | else |
| 24 | on_each_cpu(ipi_remote_fence_i, NULL, 1); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 25 | } |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 26 | EXPORT_SYMBOL(flush_icache_all); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 27 | |
| 28 | /* |
| 29 | * Performs an icache flush for the given MM context. RISC-V has no direct |
| 30 | * mechanism for instruction cache shoot downs, so instead we send an IPI that |
| 31 | * informs the remote harts they need to flush their local instruction caches. |
| 32 | * To avoid pathologically slow behavior in a common case (a bunch of |
| 33 | * single-hart processes on a many-hart machine, ie 'make -j') we avoid the |
| 34 | * IPIs for harts that are not currently executing a MM context and instead |
| 35 | * schedule a deferred local instruction cache flush to be performed before |
| 36 | * execution resumes on each hart. |
| 37 | */ |
| 38 | void flush_icache_mm(struct mm_struct *mm, bool local) |
| 39 | { |
| 40 | unsigned int cpu; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 41 | cpumask_t others, *mask; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 42 | |
| 43 | preempt_disable(); |
| 44 | |
| 45 | /* Mark every hart's icache as needing a flush for this MM. */ |
| 46 | mask = &mm->context.icache_stale_mask; |
| 47 | cpumask_setall(mask); |
| 48 | /* Flush this hart's I$ now, and mark it as flushed. */ |
| 49 | cpu = smp_processor_id(); |
| 50 | cpumask_clear_cpu(cpu, mask); |
| 51 | local_flush_icache_all(); |
| 52 | |
| 53 | /* |
| 54 | * Flush the I$ of other harts concurrently executing, and mark them as |
| 55 | * flushed. |
| 56 | */ |
| 57 | cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); |
| 58 | local |= cpumask_empty(&others); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 59 | if (mm == current->active_mm && local) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 60 | /* |
| 61 | * It's assumed that at least one strongly ordered operation is |
| 62 | * performed on this hart between setting a hart's cpumask bit |
| 63 | * and scheduling this MM context on that hart. Sending an SBI |
| 64 | * remote message will do this, but in the case where no |
| 65 | * messages are sent we still need to order this hart's writes |
| 66 | * with flush_icache_deferred(). |
| 67 | */ |
| 68 | smp_mb(); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 69 | } else if (IS_ENABLED(CONFIG_RISCV_SBI)) { |
| 70 | cpumask_t hartid_mask; |
| 71 | |
| 72 | riscv_cpuid_to_hartid_mask(&others, &hartid_mask); |
| 73 | sbi_remote_fence_i(cpumask_bits(&hartid_mask)); |
| 74 | } else { |
| 75 | on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 76 | } |
| 77 | |
| 78 | preempt_enable(); |
| 79 | } |
| 80 | |
| 81 | #endif /* CONFIG_SMP */ |
| 82 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 83 | #ifdef CONFIG_MMU |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 84 | void flush_icache_pte(pte_t pte) |
| 85 | { |
| 86 | struct page *page = pte_page(pte); |
| 87 | |
| 88 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) |
| 89 | flush_icache_all(); |
| 90 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 91 | #endif /* CONFIG_MMU */ |