blob: 0ffdd18b9f268b2b75f3c305505deb89fb6d09de [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-or-later */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * This file contains the power_save function for 6xx & 7xxx CPUs
4 * rewritten in assembler
5 *
6 * Warning ! This code assumes that if your machine has a 750fx
7 * it will have PLL 1 set to low speed mode (used during NAP/DOZE).
8 * if this is not the case some additional changes will have to
9 * be done to check a runtime var (a bit like powersave-nap)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010 */
11
12#include <linux/threads.h>
13#include <asm/reg.h>
14#include <asm/page.h>
15#include <asm/cputable.h>
16#include <asm/thread_info.h>
17#include <asm/ppc_asm.h>
18#include <asm/asm-offsets.h>
19#include <asm/feature-fixups.h>
20
21 .text
22
23/*
24 * Init idle, called at early CPU setup time from head.S for each CPU
25 * Make sure no rest of NAP mode remains in HID0, save default
26 * values for some CPU specific registers. Called with r24
27 * containing CPU number and r3 reloc offset
28 */
29_GLOBAL(init_idle_6xx)
30BEGIN_FTR_SECTION
31 mfspr r4,SPRN_HID0
32 rlwinm r4,r4,0,10,8 /* Clear NAP */
33 mtspr SPRN_HID0, r4
34 b 1f
35END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
36 blr
371:
38 slwi r5,r24,2
39 add r5,r5,r3
40BEGIN_FTR_SECTION
41 mfspr r4,SPRN_MSSCR0
42 addis r6,r5, nap_save_msscr0@ha
43 stw r4,nap_save_msscr0@l(r6)
44END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
45BEGIN_FTR_SECTION
46 mfspr r4,SPRN_HID1
47 addis r6,r5,nap_save_hid1@ha
48 stw r4,nap_save_hid1@l(r6)
49END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
50 blr
51
52/*
53 * Here is the power_save_6xx function. This could eventually be
54 * split into several functions & changing the function pointer
55 * depending on the various features.
56 */
57_GLOBAL(ppc6xx_idle)
58 /* Check if we can nap or doze, put HID0 mask in r3
59 */
60 lis r3, 0
61BEGIN_FTR_SECTION
62 lis r3,HID0_DOZE@h
63END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
64BEGIN_FTR_SECTION
65 /* We must dynamically check for the NAP feature as it
66 * can be cleared by CPU init after the fixups are done
67 */
68 lis r4,cur_cpu_spec@ha
69 lwz r4,cur_cpu_spec@l(r4)
70 lwz r4,CPU_SPEC_FEATURES(r4)
71 andi. r0,r4,CPU_FTR_CAN_NAP
72 beq 1f
73 /* Now check if user or arch enabled NAP mode */
74 lis r4,powersave_nap@ha
75 lwz r4,powersave_nap@l(r4)
76 cmpwi 0,r4,0
77 beq 1f
78 lis r3,HID0_NAP@h
791:
80END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
81 cmpwi 0,r3,0
82 beqlr
83
84 /* Some pre-nap cleanups needed on some CPUs */
85 andis. r0,r3,HID0_NAP@h
86 beq 2f
87BEGIN_FTR_SECTION
88 /* Disable L2 prefetch on some 745x and try to ensure
89 * L2 prefetch engines are idle. As explained by errata
90 * text, we can't be sure they are, we just hope very hard
91 * that well be enough (sic !). At least I noticed Apple
92 * doesn't even bother doing the dcbf's here...
93 */
94 mfspr r4,SPRN_MSSCR0
95 rlwinm r4,r4,0,0,29
96 sync
97 mtspr SPRN_MSSCR0,r4
98 sync
99 isync
100 lis r4,KERNELBASE@h
101 dcbf 0,r4
102 dcbf 0,r4
103 dcbf 0,r4
104 dcbf 0,r4
105END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
1062:
107BEGIN_FTR_SECTION
108 /* Go to low speed mode on some 750FX */
109 lis r4,powersave_lowspeed@ha
110 lwz r4,powersave_lowspeed@l(r4)
111 cmpwi 0,r4,0
112 beq 1f
113 mfspr r4,SPRN_HID1
114 oris r4,r4,0x0001
115 mtspr SPRN_HID1,r4
1161:
117END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
118
119 /* Go to NAP or DOZE now */
120 mfspr r4,SPRN_HID0
121 lis r5,(HID0_NAP|HID0_SLEEP)@h
122BEGIN_FTR_SECTION
123 oris r5,r5,HID0_DOZE@h
124END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
125 andc r4,r4,r5
126 or r4,r4,r3
127BEGIN_FTR_SECTION
128 oris r4,r4,HID0_DPM@h /* that should be done once for all */
129END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
130 mtspr SPRN_HID0,r4
131BEGIN_FTR_SECTION
132 DSSALL
133 sync
134END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
David Brazdil0f672f62019-12-10 10:32:29 +0000135 lwz r8,TI_LOCAL_FLAGS(r2) /* set napping bit */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000136 ori r8,r8,_TLF_NAPPING /* so when we take an exception */
David Brazdil0f672f62019-12-10 10:32:29 +0000137 stw r8,TI_LOCAL_FLAGS(r2) /* it will return to our caller */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000138 mfmsr r7
139 ori r7,r7,MSR_EE
140 oris r7,r7,MSR_POW@h
1411: sync
142 mtmsr r7
143 isync
144 b 1b
145
146/*
147 * Return from NAP/DOZE mode, restore some CPU specific registers,
148 * we are called with DR/IR still off and r2 containing physical
149 * address of current. R11 points to the exception frame (physical
150 * address). We have to preserve r10.
151 */
152_GLOBAL(power_save_ppc32_restore)
153 lwz r9,_LINK(r11) /* interrupted in ppc6xx_idle: */
154 stw r9,_NIP(r11) /* make it do a blr */
155
156#ifdef CONFIG_SMP
David Brazdil0f672f62019-12-10 10:32:29 +0000157 lwz r11,TASK_CPU(r2) /* get cpu number * 4 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000158 slwi r11,r11,2
159#else
160 li r11,0
161#endif
162 /* Todo make sure all these are in the same page
163 * and load r11 (@ha part + CPU offset) only once
164 */
165BEGIN_FTR_SECTION
166 mfspr r9,SPRN_HID0
167 andis. r9,r9,HID0_NAP@h
168 beq 1f
169 addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
170 lwz r9,nap_save_msscr0@l(r9)
171 mtspr SPRN_MSSCR0, r9
172 sync
173 isync
1741:
175END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
176BEGIN_FTR_SECTION
177 addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
178 lwz r9,nap_save_hid1@l(r9)
179 mtspr SPRN_HID1, r9
180END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
181 b transfer_to_handler_cont
182
183 .data
184
185_GLOBAL(nap_save_msscr0)
186 .space 4*NR_CPUS
187
188_GLOBAL(nap_save_hid1)
189 .space 4*NR_CPUS
190
191_GLOBAL(powersave_lowspeed)
192 .long 0