blob: 3440ddf21c8b43fa9a5f8a9514a5e63c4cc2d549 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-or-later */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * vDSO provided cache flush routines
4 *
5 * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org),
6 * IBM Corp.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8#include <asm/processor.h>
9#include <asm/ppc_asm.h>
10#include <asm/vdso.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020011#include <asm/vdso_datapage.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012#include <asm/asm-offsets.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020013#include <asm/cache.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014
15 .text
16
17/*
18 * Default "generic" version of __kernel_sync_dicache.
19 *
20 * void __kernel_sync_dicache(unsigned long start, unsigned long end)
21 *
22 * Flushes the data cache & invalidate the instruction cache for the
23 * provided range [start, end[
24 */
25V_FUNCTION_BEGIN(__kernel_sync_dicache)
26 .cfi_startproc
Olivier Deprez157378f2022-04-04 15:47:50 +020027#ifdef CONFIG_PPC64
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000028 mflr r12
29 .cfi_register lr,r12
Olivier Deprez157378f2022-04-04 15:47:50 +020030 get_datapage r10, r0
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000031 mtlr r12
Olivier Deprez157378f2022-04-04 15:47:50 +020032#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000033
Olivier Deprez157378f2022-04-04 15:47:50 +020034#ifdef CONFIG_PPC64
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000035 lwz r7,CFG_DCACHE_BLOCKSZ(r10)
36 addi r5,r7,-1
Olivier Deprez157378f2022-04-04 15:47:50 +020037#else
38 li r5, L1_CACHE_BYTES - 1
39#endif
40 andc r6,r3,r5 /* round low to line bdy */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000041 subf r8,r6,r4 /* compute length */
42 add r8,r8,r5 /* ensure we get enough */
Olivier Deprez157378f2022-04-04 15:47:50 +020043#ifdef CONFIG_PPC64
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000044 lwz r9,CFG_DCACHE_LOGBLOCKSZ(r10)
45 srw. r8,r8,r9 /* compute line count */
Olivier Deprez157378f2022-04-04 15:47:50 +020046#else
47 srwi. r8, r8, L1_CACHE_SHIFT
48 mr r7, r6
49#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000050 crclr cr0*4+so
51 beqlr /* nothing to do? */
52 mtctr r8
531: dcbst 0,r6
Olivier Deprez157378f2022-04-04 15:47:50 +020054#ifdef CONFIG_PPC64
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000055 add r6,r6,r7
Olivier Deprez157378f2022-04-04 15:47:50 +020056#else
57 addi r6, r6, L1_CACHE_BYTES
58#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059 bdnz 1b
60 sync
61
62/* Now invalidate the instruction cache */
63
Olivier Deprez157378f2022-04-04 15:47:50 +020064#ifdef CONFIG_PPC64
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000065 lwz r7,CFG_ICACHE_BLOCKSZ(r10)
66 addi r5,r7,-1
Olivier Deprez157378f2022-04-04 15:47:50 +020067 andc r6,r3,r5 /* round low to line bdy */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000068 subf r8,r6,r4 /* compute length */
69 add r8,r8,r5
70 lwz r9,CFG_ICACHE_LOGBLOCKSZ(r10)
71 srw. r8,r8,r9 /* compute line count */
72 crclr cr0*4+so
73 beqlr /* nothing to do? */
Olivier Deprez157378f2022-04-04 15:47:50 +020074#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000075 mtctr r8
Olivier Deprez157378f2022-04-04 15:47:50 +020076#ifdef CONFIG_PPC64
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000772: icbi 0,r6
78 add r6,r6,r7
Olivier Deprez157378f2022-04-04 15:47:50 +020079#else
802: icbi 0, r7
81 addi r7, r7, L1_CACHE_BYTES
82#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000083 bdnz 2b
84 isync
85 li r3,0
86 blr
87 .cfi_endproc
88V_FUNCTION_END(__kernel_sync_dicache)
89
90
91/*
92 * POWER5 version of __kernel_sync_dicache
93 */
94V_FUNCTION_BEGIN(__kernel_sync_dicache_p5)
95 .cfi_startproc
96 crclr cr0*4+so
97 sync
98 isync
99 li r3,0
100 blr
101 .cfi_endproc
102V_FUNCTION_END(__kernel_sync_dicache_p5)
103