Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Suspend support specific for s390. |
| 4 | * |
| 5 | * Copyright IBM Corp. 2009 |
| 6 | * |
| 7 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> |
| 8 | */ |
| 9 | |
| 10 | #include <linux/pfn.h> |
| 11 | #include <linux/suspend.h> |
| 12 | #include <linux/mm.h> |
| 13 | #include <linux/pci.h> |
| 14 | #include <asm/ctl_reg.h> |
| 15 | #include <asm/ipl.h> |
| 16 | #include <asm/cio.h> |
| 17 | #include <asm/sections.h> |
| 18 | #include "entry.h" |
| 19 | |
| 20 | /* |
| 21 | * The restore of the saved pages in an hibernation image will set |
| 22 | * the change and referenced bits in the storage key for each page. |
| 23 | * Overindication of the referenced bits after an hibernation cycle |
| 24 | * does not cause any harm but the overindication of the change bits |
| 25 | * would cause trouble. |
| 26 | * Use the ARCH_SAVE_PAGE_KEYS hooks to save the storage key of each |
| 27 | * page to the most significant byte of the associated page frame |
| 28 | * number in the hibernation image. |
| 29 | */ |
| 30 | |
| 31 | /* |
| 32 | * Key storage is allocated as a linked list of pages. |
| 33 | * The size of the keys array is (PAGE_SIZE - sizeof(long)) |
| 34 | */ |
| 35 | struct page_key_data { |
| 36 | struct page_key_data *next; |
| 37 | unsigned char data[]; |
| 38 | }; |
| 39 | |
| 40 | #define PAGE_KEY_DATA_SIZE (PAGE_SIZE - sizeof(struct page_key_data *)) |
| 41 | |
| 42 | static struct page_key_data *page_key_data; |
| 43 | static struct page_key_data *page_key_rp, *page_key_wp; |
| 44 | static unsigned long page_key_rx, page_key_wx; |
| 45 | unsigned long suspend_zero_pages; |
| 46 | |
| 47 | /* |
| 48 | * For each page in the hibernation image one additional byte is |
| 49 | * stored in the most significant byte of the page frame number. |
| 50 | * On suspend no additional memory is required but on resume the |
| 51 | * keys need to be memorized until the page data has been restored. |
| 52 | * Only then can the storage keys be set to their old state. |
| 53 | */ |
| 54 | unsigned long page_key_additional_pages(unsigned long pages) |
| 55 | { |
| 56 | return DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE); |
| 57 | } |
| 58 | |
| 59 | /* |
| 60 | * Free page_key_data list of arrays. |
| 61 | */ |
| 62 | void page_key_free(void) |
| 63 | { |
| 64 | struct page_key_data *pkd; |
| 65 | |
| 66 | while (page_key_data) { |
| 67 | pkd = page_key_data; |
| 68 | page_key_data = pkd->next; |
| 69 | free_page((unsigned long) pkd); |
| 70 | } |
| 71 | } |
| 72 | |
| 73 | /* |
| 74 | * Allocate page_key_data list of arrays with enough room to store |
| 75 | * one byte for each page in the hibernation image. |
| 76 | */ |
| 77 | int page_key_alloc(unsigned long pages) |
| 78 | { |
| 79 | struct page_key_data *pk; |
| 80 | unsigned long size; |
| 81 | |
| 82 | size = DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE); |
| 83 | while (size--) { |
| 84 | pk = (struct page_key_data *) get_zeroed_page(GFP_KERNEL); |
| 85 | if (!pk) { |
| 86 | page_key_free(); |
| 87 | return -ENOMEM; |
| 88 | } |
| 89 | pk->next = page_key_data; |
| 90 | page_key_data = pk; |
| 91 | } |
| 92 | page_key_rp = page_key_wp = page_key_data; |
| 93 | page_key_rx = page_key_wx = 0; |
| 94 | return 0; |
| 95 | } |
| 96 | |
| 97 | /* |
| 98 | * Save the storage key into the upper 8 bits of the page frame number. |
| 99 | */ |
| 100 | void page_key_read(unsigned long *pfn) |
| 101 | { |
| 102 | struct page *page; |
| 103 | unsigned long addr; |
| 104 | unsigned char key; |
| 105 | |
| 106 | page = pfn_to_page(*pfn); |
| 107 | addr = (unsigned long) page_address(page); |
| 108 | key = (unsigned char) page_get_storage_key(addr) & 0x7f; |
| 109 | if (arch_test_page_nodat(page)) |
| 110 | key |= 0x80; |
| 111 | *(unsigned char *) pfn = key; |
| 112 | } |
| 113 | |
| 114 | /* |
| 115 | * Extract the storage key from the upper 8 bits of the page frame number |
| 116 | * and store it in the page_key_data list of arrays. |
| 117 | */ |
| 118 | void page_key_memorize(unsigned long *pfn) |
| 119 | { |
| 120 | page_key_wp->data[page_key_wx] = *(unsigned char *) pfn; |
| 121 | *(unsigned char *) pfn = 0; |
| 122 | if (++page_key_wx < PAGE_KEY_DATA_SIZE) |
| 123 | return; |
| 124 | page_key_wp = page_key_wp->next; |
| 125 | page_key_wx = 0; |
| 126 | } |
| 127 | |
| 128 | /* |
| 129 | * Get the next key from the page_key_data list of arrays and set the |
| 130 | * storage key of the page referred by @address. If @address refers to |
| 131 | * a "safe" page the swsusp_arch_resume code will transfer the storage |
| 132 | * key from the buffer page to the original page. |
| 133 | */ |
| 134 | void page_key_write(void *address) |
| 135 | { |
| 136 | struct page *page; |
| 137 | unsigned char key; |
| 138 | |
| 139 | key = page_key_rp->data[page_key_rx]; |
| 140 | page_set_storage_key((unsigned long) address, key & 0x7f, 0); |
| 141 | page = virt_to_page(address); |
| 142 | if (key & 0x80) |
| 143 | arch_set_page_nodat(page, 0); |
| 144 | else |
| 145 | arch_set_page_dat(page, 0); |
| 146 | if (++page_key_rx >= PAGE_KEY_DATA_SIZE) |
| 147 | return; |
| 148 | page_key_rp = page_key_rp->next; |
| 149 | page_key_rx = 0; |
| 150 | } |
| 151 | |
| 152 | int pfn_is_nosave(unsigned long pfn) |
| 153 | { |
| 154 | unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); |
| 155 | unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end)); |
| 156 | unsigned long end_rodata_pfn = PFN_DOWN(__pa(__end_rodata)) - 1; |
| 157 | unsigned long stext_pfn = PFN_DOWN(__pa(_stext)); |
| 158 | |
| 159 | /* Always save lowcore pages (LC protection might be enabled). */ |
| 160 | if (pfn <= LC_PAGES) |
| 161 | return 0; |
| 162 | if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) |
| 163 | return 1; |
| 164 | /* Skip memory holes and read-only pages (DCSS, ...). */ |
| 165 | if (pfn >= stext_pfn && pfn <= end_rodata_pfn) |
| 166 | return 0; |
| 167 | if (tprot(PFN_PHYS(pfn))) |
| 168 | return 1; |
| 169 | return 0; |
| 170 | } |
| 171 | |
| 172 | /* |
| 173 | * PM notifier callback for suspend |
| 174 | */ |
| 175 | static int suspend_pm_cb(struct notifier_block *nb, unsigned long action, |
| 176 | void *ptr) |
| 177 | { |
| 178 | switch (action) { |
| 179 | case PM_SUSPEND_PREPARE: |
| 180 | case PM_HIBERNATION_PREPARE: |
| 181 | suspend_zero_pages = __get_free_pages(GFP_KERNEL, LC_ORDER); |
| 182 | if (!suspend_zero_pages) |
| 183 | return NOTIFY_BAD; |
| 184 | break; |
| 185 | case PM_POST_SUSPEND: |
| 186 | case PM_POST_HIBERNATION: |
| 187 | free_pages(suspend_zero_pages, LC_ORDER); |
| 188 | break; |
| 189 | default: |
| 190 | return NOTIFY_DONE; |
| 191 | } |
| 192 | return NOTIFY_OK; |
| 193 | } |
| 194 | |
| 195 | static int __init suspend_pm_init(void) |
| 196 | { |
| 197 | pm_notifier(suspend_pm_cb, 0); |
| 198 | return 0; |
| 199 | } |
| 200 | arch_initcall(suspend_pm_init); |
| 201 | |
| 202 | void save_processor_state(void) |
| 203 | { |
| 204 | /* swsusp_arch_suspend() actually saves all cpu register contents. |
| 205 | * Machine checks must be disabled since swsusp_arch_suspend() stores |
| 206 | * register contents to their lowcore save areas. That's the same |
| 207 | * place where register contents on machine checks would be saved. |
| 208 | * To avoid register corruption disable machine checks. |
| 209 | * We must also disable machine checks in the new psw mask for |
| 210 | * program checks, since swsusp_arch_suspend() may generate program |
| 211 | * checks. Disabling machine checks for all other new psw masks is |
| 212 | * just paranoia. |
| 213 | */ |
| 214 | local_mcck_disable(); |
| 215 | /* Disable lowcore protection */ |
| 216 | __ctl_clear_bit(0,28); |
| 217 | S390_lowcore.external_new_psw.mask &= ~PSW_MASK_MCHECK; |
| 218 | S390_lowcore.svc_new_psw.mask &= ~PSW_MASK_MCHECK; |
| 219 | S390_lowcore.io_new_psw.mask &= ~PSW_MASK_MCHECK; |
| 220 | S390_lowcore.program_new_psw.mask &= ~PSW_MASK_MCHECK; |
| 221 | } |
| 222 | |
| 223 | void restore_processor_state(void) |
| 224 | { |
| 225 | S390_lowcore.external_new_psw.mask |= PSW_MASK_MCHECK; |
| 226 | S390_lowcore.svc_new_psw.mask |= PSW_MASK_MCHECK; |
| 227 | S390_lowcore.io_new_psw.mask |= PSW_MASK_MCHECK; |
| 228 | S390_lowcore.program_new_psw.mask |= PSW_MASK_MCHECK; |
| 229 | /* Enable lowcore protection */ |
| 230 | __ctl_set_bit(0,28); |
| 231 | local_mcck_enable(); |
| 232 | } |
| 233 | |
| 234 | /* Called at the end of swsusp_arch_resume */ |
| 235 | void s390_early_resume(void) |
| 236 | { |
| 237 | lgr_info_log(); |
| 238 | channel_subsystem_reinit(); |
| 239 | zpci_rescan(); |
| 240 | } |