blob: 0455dc1b279770773e3939cd40502af83f6cb453 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Firmware Assisted dump: A robust mechanism to get reliable kernel crash
4 * dump with assistance from firmware. This approach does not use kexec,
5 * instead firmware assists in booting the kdump kernel while preserving
6 * memory contents. The most of the code implementation has been adapted
7 * from phyp assisted dump implementation written by Linas Vepstas and
8 * Manish Ahuja
9 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010 * Copyright 2011 IBM Corporation
11 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
12 */
13
14#undef DEBUG
15#define pr_fmt(fmt) "fadump: " fmt
16
17#include <linux/string.h>
18#include <linux/memblock.h>
19#include <linux/delay.h>
20#include <linux/seq_file.h>
21#include <linux/crash_dump.h>
22#include <linux/kobject.h>
23#include <linux/sysfs.h>
24#include <linux/slab.h>
David Brazdil0f672f62019-12-10 10:32:29 +000025#include <linux/cma.h>
26#include <linux/hugetlb.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000027
28#include <asm/debugfs.h>
29#include <asm/page.h>
30#include <asm/prom.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000031#include <asm/fadump.h>
David Brazdil0f672f62019-12-10 10:32:29 +000032#include <asm/fadump-internal.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000033#include <asm/setup.h>
34
35static struct fw_dump fw_dump;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000036
David Brazdil0f672f62019-12-10 10:32:29 +000037static void __init fadump_reserve_crash_area(u64 base);
38
39#ifndef CONFIG_PRESERVE_FA_DUMP
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000040static DEFINE_MUTEX(fadump_mutex);
Olivier Deprez0e641232021-09-23 10:07:05 +020041struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false };
42
43#define RESERVED_RNGS_SZ 16384 /* 16K - 128 entries */
44#define RESERVED_RNGS_CNT (RESERVED_RNGS_SZ / \
45 sizeof(struct fadump_memory_range))
46static struct fadump_memory_range rngs[RESERVED_RNGS_CNT];
47struct fadump_mrange_info reserved_mrange_info = { "reserved", rngs,
48 RESERVED_RNGS_SZ, 0,
49 RESERVED_RNGS_CNT, true };
50
51static void __init early_init_dt_scan_reserved_ranges(unsigned long node);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052
David Brazdil0f672f62019-12-10 10:32:29 +000053#ifdef CONFIG_CMA
54static struct cma *fadump_cma;
55
56/*
57 * fadump_cma_init() - Initialize CMA area from a fadump reserved memory
58 *
59 * This function initializes CMA area from fadump reserved memory.
60 * The total size of fadump reserved memory covers for boot memory size
61 * + cpu data size + hpte size and metadata.
62 * Initialize only the area equivalent to boot memory size for CMA use.
63 * The reamining portion of fadump reserved memory will be not given
64 * to CMA and pages for thoes will stay reserved. boot memory size is
65 * aligned per CMA requirement to satisy cma_init_reserved_mem() call.
66 * But for some reason even if it fails we still have the memory reservation
67 * with us and we can still continue doing fadump.
68 */
69int __init fadump_cma_init(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000070{
David Brazdil0f672f62019-12-10 10:32:29 +000071 unsigned long long base, size;
72 int rc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073
David Brazdil0f672f62019-12-10 10:32:29 +000074 if (!fw_dump.fadump_enabled)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000075 return 0;
76
77 /*
David Brazdil0f672f62019-12-10 10:32:29 +000078 * Do not use CMA if user has provided fadump=nocma kernel parameter.
79 * Return 1 to continue with fadump old behaviour.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000080 */
David Brazdil0f672f62019-12-10 10:32:29 +000081 if (fw_dump.nocma)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000082 return 1;
83
David Brazdil0f672f62019-12-10 10:32:29 +000084 base = fw_dump.reserve_dump_area_start;
85 size = fw_dump.boot_memory_size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000086
David Brazdil0f672f62019-12-10 10:32:29 +000087 if (!size)
88 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000089
David Brazdil0f672f62019-12-10 10:32:29 +000090 rc = cma_init_reserved_mem(base, size, 0, "fadump_cma", &fadump_cma);
91 if (rc) {
92 pr_err("Failed to init cma area for firmware-assisted dump,%d\n", rc);
93 /*
94 * Though the CMA init has failed we still have memory
95 * reservation with us. The reserved memory will be
96 * blocked from production system usage. Hence return 1,
97 * so that we can continue with fadump.
98 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000099 return 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000100 }
101
David Brazdil0f672f62019-12-10 10:32:29 +0000102 /*
103 * So we now have successfully initialized cma area for fadump.
104 */
105 pr_info("Initialized 0x%lx bytes cma area at %ldMB from 0x%lx "
106 "bytes of memory reserved for firmware-assisted dump\n",
107 cma_get_size(fadump_cma),
108 (unsigned long)cma_get_base(fadump_cma) >> 20,
109 fw_dump.reserve_dump_area_size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000110 return 1;
111}
David Brazdil0f672f62019-12-10 10:32:29 +0000112#else
113static int __init fadump_cma_init(void) { return 1; }
114#endif /* CONFIG_CMA */
115
116/* Scan the Firmware Assisted dump configuration details. */
117int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
118 int depth, void *data)
119{
Olivier Deprez0e641232021-09-23 10:07:05 +0200120 if (depth == 0) {
121 early_init_dt_scan_reserved_ranges(node);
122 return 0;
123 }
124
David Brazdil0f672f62019-12-10 10:32:29 +0000125 if (depth != 1)
126 return 0;
127
128 if (strcmp(uname, "rtas") == 0) {
129 rtas_fadump_dt_scan(&fw_dump, node);
130 return 1;
131 }
132
133 if (strcmp(uname, "ibm,opal") == 0) {
134 opal_fadump_dt_scan(&fw_dump, node);
135 return 1;
136 }
137
138 return 0;
139}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000140
141/*
142 * If fadump is registered, check if the memory provided
David Brazdil0f672f62019-12-10 10:32:29 +0000143 * falls within boot memory area and reserved memory area.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000144 */
David Brazdil0f672f62019-12-10 10:32:29 +0000145int is_fadump_memory_area(u64 addr, unsigned long size)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000146{
David Brazdil0f672f62019-12-10 10:32:29 +0000147 u64 d_start, d_end;
148
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000149 if (!fw_dump.dump_registered)
150 return 0;
151
David Brazdil0f672f62019-12-10 10:32:29 +0000152 if (!size)
153 return 0;
154
155 d_start = fw_dump.reserve_dump_area_start;
156 d_end = d_start + fw_dump.reserve_dump_area_size;
157 if (((addr + size) > d_start) && (addr <= d_end))
158 return 1;
159
160 return (addr <= fw_dump.boot_mem_top);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000161}
162
163int should_fadump_crash(void)
164{
165 if (!fw_dump.dump_registered || !fw_dump.fadumphdr_addr)
166 return 0;
167 return 1;
168}
169
170int is_fadump_active(void)
171{
172 return fw_dump.dump_active;
173}
174
175/*
David Brazdil0f672f62019-12-10 10:32:29 +0000176 * Returns true, if there are no holes in memory area between d_start to d_end,
177 * false otherwise.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000178 */
David Brazdil0f672f62019-12-10 10:32:29 +0000179static bool is_fadump_mem_area_contiguous(u64 d_start, u64 d_end)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000180{
181 struct memblock_region *reg;
David Brazdil0f672f62019-12-10 10:32:29 +0000182 bool ret = false;
183 u64 start, end;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000184
185 for_each_memblock(memory, reg) {
David Brazdil0f672f62019-12-10 10:32:29 +0000186 start = max_t(u64, d_start, reg->base);
187 end = min_t(u64, d_end, (reg->base + reg->size));
188 if (d_start < end) {
189 /* Memory hole from d_start to start */
190 if (start > d_start)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000191 break;
192
David Brazdil0f672f62019-12-10 10:32:29 +0000193 if (end == d_end) {
194 ret = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000195 break;
196 }
197
David Brazdil0f672f62019-12-10 10:32:29 +0000198 d_start = end + 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000199 }
200 }
201
202 return ret;
203}
204
David Brazdil0f672f62019-12-10 10:32:29 +0000205/*
206 * Returns true, if there are no holes in boot memory area,
207 * false otherwise.
208 */
209bool is_fadump_boot_mem_contiguous(void)
210{
211 unsigned long d_start, d_end;
212 bool ret = false;
213 int i;
214
215 for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
216 d_start = fw_dump.boot_mem_addr[i];
217 d_end = d_start + fw_dump.boot_mem_sz[i];
218
219 ret = is_fadump_mem_area_contiguous(d_start, d_end);
220 if (!ret)
221 break;
222 }
223
224 return ret;
225}
226
227/*
228 * Returns true, if there are no holes in reserved memory area,
229 * false otherwise.
230 */
231bool is_fadump_reserved_mem_contiguous(void)
232{
233 u64 d_start, d_end;
234
235 d_start = fw_dump.reserve_dump_area_start;
236 d_end = d_start + fw_dump.reserve_dump_area_size;
237 return is_fadump_mem_area_contiguous(d_start, d_end);
238}
239
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000240/* Print firmware assisted dump configurations for debugging purpose. */
241static void fadump_show_config(void)
242{
David Brazdil0f672f62019-12-10 10:32:29 +0000243 int i;
244
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000245 pr_debug("Support for firmware-assisted dump (fadump): %s\n",
246 (fw_dump.fadump_supported ? "present" : "no support"));
247
248 if (!fw_dump.fadump_supported)
249 return;
250
251 pr_debug("Fadump enabled : %s\n",
252 (fw_dump.fadump_enabled ? "yes" : "no"));
253 pr_debug("Dump Active : %s\n",
254 (fw_dump.dump_active ? "yes" : "no"));
255 pr_debug("Dump section sizes:\n");
256 pr_debug(" CPU state data size: %lx\n", fw_dump.cpu_state_data_size);
257 pr_debug(" HPTE region size : %lx\n", fw_dump.hpte_region_size);
David Brazdil0f672f62019-12-10 10:32:29 +0000258 pr_debug(" Boot memory size : %lx\n", fw_dump.boot_memory_size);
259 pr_debug(" Boot memory top : %llx\n", fw_dump.boot_mem_top);
260 pr_debug("Boot memory regions cnt: %llx\n", fw_dump.boot_mem_regs_cnt);
261 for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
262 pr_debug("[%03d] base = %llx, size = %llx\n", i,
263 fw_dump.boot_mem_addr[i], fw_dump.boot_mem_sz[i]);
264 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000265}
266
267/**
268 * fadump_calculate_reserve_size(): reserve variable boot area 5% of System RAM
269 *
270 * Function to find the largest memory size we need to reserve during early
271 * boot process. This will be the size of the memory that is required for a
272 * kernel to boot successfully.
273 *
274 * This function has been taken from phyp-assisted dump feature implementation.
275 *
276 * returns larger of 256MB or 5% rounded down to multiples of 256MB.
277 *
278 * TODO: Come up with better approach to find out more accurate memory size
279 * that is required for a kernel to boot successfully.
280 *
281 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200282static __init u64 fadump_calculate_reserve_size(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000283{
David Brazdil0f672f62019-12-10 10:32:29 +0000284 u64 base, size, bootmem_min;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000285 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000286
287 if (fw_dump.reserve_bootvar)
288 pr_warn("'fadump_reserve_mem=' parameter is deprecated in favor of 'crashkernel=' parameter.\n");
289
290 /*
291 * Check if the size is specified through crashkernel= cmdline
292 * option. If yes, then use that but ignore base as fadump reserves
293 * memory at a predefined offset.
294 */
295 ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
296 &size, &base);
297 if (ret == 0 && size > 0) {
298 unsigned long max_size;
299
300 if (fw_dump.reserve_bootvar)
301 pr_info("Using 'crashkernel=' parameter for memory reservation.\n");
302
303 fw_dump.reserve_bootvar = (unsigned long)size;
304
305 /*
306 * Adjust if the boot memory size specified is above
307 * the upper limit.
308 */
309 max_size = memblock_phys_mem_size() / MAX_BOOT_MEM_RATIO;
310 if (fw_dump.reserve_bootvar > max_size) {
311 fw_dump.reserve_bootvar = max_size;
312 pr_info("Adjusted boot memory size to %luMB\n",
313 (fw_dump.reserve_bootvar >> 20));
314 }
315
316 return fw_dump.reserve_bootvar;
317 } else if (fw_dump.reserve_bootvar) {
318 /*
319 * 'fadump_reserve_mem=' is being used to reserve memory
320 * for firmware-assisted dump.
321 */
322 return fw_dump.reserve_bootvar;
323 }
324
325 /* divide by 20 to get 5% of value */
326 size = memblock_phys_mem_size() / 20;
327
328 /* round it down in multiples of 256 */
329 size = size & ~0x0FFFFFFFUL;
330
331 /* Truncate to memory_limit. We don't want to over reserve the memory.*/
332 if (memory_limit && size > memory_limit)
333 size = memory_limit;
334
David Brazdil0f672f62019-12-10 10:32:29 +0000335 bootmem_min = fw_dump.ops->fadump_get_bootmem_min();
336 return (size > bootmem_min ? size : bootmem_min);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000337}
338
339/*
340 * Calculate the total memory size required to be reserved for
341 * firmware-assisted dump registration.
342 */
343static unsigned long get_fadump_area_size(void)
344{
345 unsigned long size = 0;
346
347 size += fw_dump.cpu_state_data_size;
348 size += fw_dump.hpte_region_size;
349 size += fw_dump.boot_memory_size;
350 size += sizeof(struct fadump_crash_info_header);
351 size += sizeof(struct elfhdr); /* ELF core header.*/
352 size += sizeof(struct elf_phdr); /* place holder for cpu notes */
353 /* Program headers for crash memory regions. */
354 size += sizeof(struct elf_phdr) * (memblock_num_regions(memory) + 2);
355
356 size = PAGE_ALIGN(size);
David Brazdil0f672f62019-12-10 10:32:29 +0000357
358 /* This is to hold kernel metadata on platforms that support it */
359 size += (fw_dump.ops->fadump_get_metadata_size ?
360 fw_dump.ops->fadump_get_metadata_size() : 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000361 return size;
362}
363
David Brazdil0f672f62019-12-10 10:32:29 +0000364static int __init add_boot_mem_region(unsigned long rstart,
365 unsigned long rsize)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000366{
David Brazdil0f672f62019-12-10 10:32:29 +0000367 int i = fw_dump.boot_mem_regs_cnt++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000368
David Brazdil0f672f62019-12-10 10:32:29 +0000369 if (fw_dump.boot_mem_regs_cnt > FADUMP_MAX_MEM_REGS) {
370 fw_dump.boot_mem_regs_cnt = FADUMP_MAX_MEM_REGS;
371 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000372 }
David Brazdil0f672f62019-12-10 10:32:29 +0000373
374 pr_debug("Added boot memory range[%d] [%#016lx-%#016lx)\n",
375 i, rstart, (rstart + rsize));
376 fw_dump.boot_mem_addr[i] = rstart;
377 fw_dump.boot_mem_sz[i] = rsize;
378 return 1;
379}
380
381/*
382 * Firmware usually has a hard limit on the data it can copy per region.
383 * Honour that by splitting a memory range into multiple regions.
384 */
385static int __init add_boot_mem_regions(unsigned long mstart,
386 unsigned long msize)
387{
388 unsigned long rstart, rsize, max_size;
389 int ret = 1;
390
391 rstart = mstart;
392 max_size = fw_dump.max_copy_size ? fw_dump.max_copy_size : msize;
393 while (msize) {
394 if (msize > max_size)
395 rsize = max_size;
396 else
397 rsize = msize;
398
399 ret = add_boot_mem_region(rstart, rsize);
400 if (!ret)
401 break;
402
403 msize -= rsize;
404 rstart += rsize;
405 }
406
407 return ret;
408}
409
410static int __init fadump_get_boot_mem_regions(void)
411{
412 unsigned long base, size, cur_size, hole_size, last_end;
413 unsigned long mem_size = fw_dump.boot_memory_size;
414 struct memblock_region *reg;
415 int ret = 1;
416
417 fw_dump.boot_mem_regs_cnt = 0;
418
419 last_end = 0;
420 hole_size = 0;
421 cur_size = 0;
422 for_each_memblock(memory, reg) {
423 base = reg->base;
424 size = reg->size;
425 hole_size += (base - last_end);
426
427 if ((cur_size + size) >= mem_size) {
428 size = (mem_size - cur_size);
429 ret = add_boot_mem_regions(base, size);
430 break;
431 }
432
433 mem_size -= size;
434 cur_size += size;
435 ret = add_boot_mem_regions(base, size);
436 if (!ret)
437 break;
438
439 last_end = base + size;
440 }
441 fw_dump.boot_mem_top = PAGE_ALIGN(fw_dump.boot_memory_size + hole_size);
442
443 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000444}
445
Olivier Deprez0e641232021-09-23 10:07:05 +0200446/*
447 * Returns true, if the given range overlaps with reserved memory ranges
448 * starting at idx. Also, updates idx to index of overlapping memory range
449 * with the given memory range.
450 * False, otherwise.
451 */
452static bool overlaps_reserved_ranges(u64 base, u64 end, int *idx)
453{
454 bool ret = false;
455 int i;
456
457 for (i = *idx; i < reserved_mrange_info.mem_range_cnt; i++) {
458 u64 rbase = reserved_mrange_info.mem_ranges[i].base;
459 u64 rend = rbase + reserved_mrange_info.mem_ranges[i].size;
460
461 if (end <= rbase)
462 break;
463
464 if ((end > rbase) && (base < rend)) {
465 *idx = i;
466 ret = true;
467 break;
468 }
469 }
470
471 return ret;
472}
473
474/*
475 * Locate a suitable memory area to reserve memory for FADump. While at it,
476 * lookup reserved-ranges & avoid overlap with them, as they are used by F/W.
477 */
478static u64 __init fadump_locate_reserve_mem(u64 base, u64 size)
479{
480 struct fadump_memory_range *mrngs;
481 phys_addr_t mstart, mend;
482 int idx = 0;
483 u64 i, ret = 0;
484
485 mrngs = reserved_mrange_info.mem_ranges;
486 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
487 &mstart, &mend, NULL) {
488 pr_debug("%llu) mstart: %llx, mend: %llx, base: %llx\n",
489 i, mstart, mend, base);
490
491 if (mstart > base)
492 base = PAGE_ALIGN(mstart);
493
494 while ((mend > base) && ((mend - base) >= size)) {
495 if (!overlaps_reserved_ranges(base, base+size, &idx)) {
496 ret = base;
497 goto out;
498 }
499
500 base = mrngs[idx].base + mrngs[idx].size;
501 base = PAGE_ALIGN(base);
502 }
503 }
504
505out:
506 return ret;
507}
508
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000509int __init fadump_reserve_mem(void)
510{
Olivier Deprez0e641232021-09-23 10:07:05 +0200511 u64 base, size, mem_boundary, bootmem_min;
David Brazdil0f672f62019-12-10 10:32:29 +0000512 int ret = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000513
514 if (!fw_dump.fadump_enabled)
515 return 0;
516
517 if (!fw_dump.fadump_supported) {
David Brazdil0f672f62019-12-10 10:32:29 +0000518 pr_info("Firmware-Assisted Dump is not supported on this hardware\n");
519 goto error_out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000520 }
David Brazdil0f672f62019-12-10 10:32:29 +0000521
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000522 /*
523 * Initialize boot memory size
524 * If dump is active then we have already calculated the size during
525 * first kernel.
526 */
David Brazdil0f672f62019-12-10 10:32:29 +0000527 if (!fw_dump.dump_active) {
528 fw_dump.boot_memory_size =
529 PAGE_ALIGN(fadump_calculate_reserve_size());
530#ifdef CONFIG_CMA
531 if (!fw_dump.nocma) {
David Brazdil0f672f62019-12-10 10:32:29 +0000532 fw_dump.boot_memory_size =
Olivier Deprez0e641232021-09-23 10:07:05 +0200533 ALIGN(fw_dump.boot_memory_size,
534 FADUMP_CMA_ALIGNMENT);
David Brazdil0f672f62019-12-10 10:32:29 +0000535 }
536#endif
537
538 bootmem_min = fw_dump.ops->fadump_get_bootmem_min();
539 if (fw_dump.boot_memory_size < bootmem_min) {
540 pr_err("Can't enable fadump with boot memory size (0x%lx) less than 0x%llx\n",
541 fw_dump.boot_memory_size, bootmem_min);
542 goto error_out;
543 }
544
545 if (!fadump_get_boot_mem_regions()) {
546 pr_err("Too many holes in boot memory area to enable fadump\n");
547 goto error_out;
548 }
549 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000550
551 /*
552 * Calculate the memory boundary.
553 * If memory_limit is less than actual memory boundary then reserve
554 * the memory for fadump beyond the memory_limit and adjust the
555 * memory_limit accordingly, so that the running kernel can run with
556 * specified memory_limit.
557 */
558 if (memory_limit && memory_limit < memblock_end_of_DRAM()) {
559 size = get_fadump_area_size();
560 if ((memory_limit + size) < memblock_end_of_DRAM())
561 memory_limit += size;
562 else
563 memory_limit = memblock_end_of_DRAM();
564 printk(KERN_INFO "Adjusted memory_limit for firmware-assisted"
565 " dump, now %#016llx\n", memory_limit);
566 }
567 if (memory_limit)
David Brazdil0f672f62019-12-10 10:32:29 +0000568 mem_boundary = memory_limit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000569 else
David Brazdil0f672f62019-12-10 10:32:29 +0000570 mem_boundary = memblock_end_of_DRAM();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000571
David Brazdil0f672f62019-12-10 10:32:29 +0000572 base = fw_dump.boot_mem_top;
573 size = get_fadump_area_size();
574 fw_dump.reserve_dump_area_size = size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000575 if (fw_dump.dump_active) {
576 pr_info("Firmware-assisted dump is active.\n");
577
578#ifdef CONFIG_HUGETLB_PAGE
579 /*
580 * FADump capture kernel doesn't care much about hugepages.
581 * In fact, handling hugepages in capture kernel is asking for
582 * trouble. So, disable HugeTLB support when fadump is active.
583 */
584 hugetlb_disabled = true;
585#endif
586 /*
587 * If last boot has crashed then reserve all the memory
David Brazdil0f672f62019-12-10 10:32:29 +0000588 * above boot memory size so that we don't touch it until
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000589 * dump is written to disk by userspace tool. This memory
David Brazdil0f672f62019-12-10 10:32:29 +0000590 * can be released for general use by invalidating fadump.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000591 */
David Brazdil0f672f62019-12-10 10:32:29 +0000592 fadump_reserve_crash_area(base);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000593
David Brazdil0f672f62019-12-10 10:32:29 +0000594 pr_debug("fadumphdr_addr = %#016lx\n", fw_dump.fadumphdr_addr);
595 pr_debug("Reserve dump area start address: 0x%lx\n",
596 fw_dump.reserve_dump_area_start);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000597 } else {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000598 /*
599 * Reserve memory at an offset closer to bottom of the RAM to
David Brazdil0f672f62019-12-10 10:32:29 +0000600 * minimize the impact of memory hot-remove operation.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000601 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200602 base = fadump_locate_reserve_mem(base, size);
David Brazdil0f672f62019-12-10 10:32:29 +0000603
Olivier Deprez0e641232021-09-23 10:07:05 +0200604 if (!base || (base + size > mem_boundary)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000605 pr_err("Failed to find memory chunk for reservation!\n");
606 goto error_out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000607 }
David Brazdil0f672f62019-12-10 10:32:29 +0000608 fw_dump.reserve_dump_area_start = base;
609
610 /*
611 * Calculate the kernel metadata address and register it with
612 * f/w if the platform supports.
613 */
614 if (fw_dump.ops->fadump_setup_metadata &&
615 (fw_dump.ops->fadump_setup_metadata(&fw_dump) < 0))
616 goto error_out;
617
618 if (memblock_reserve(base, size)) {
619 pr_err("Failed to reserve memory!\n");
620 goto error_out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000621 }
622
David Brazdil0f672f62019-12-10 10:32:29 +0000623 pr_info("Reserved %lldMB of memory at %#016llx (System RAM: %lldMB)\n",
624 (size >> 20), base, (memblock_phys_mem_size() >> 20));
625
626 ret = fadump_cma_init();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000627 }
628
David Brazdil0f672f62019-12-10 10:32:29 +0000629 return ret;
630error_out:
631 fw_dump.fadump_enabled = 0;
632 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000633}
634
635/* Look for fadump= cmdline option. */
636static int __init early_fadump_param(char *p)
637{
638 if (!p)
639 return 1;
640
641 if (strncmp(p, "on", 2) == 0)
642 fw_dump.fadump_enabled = 1;
643 else if (strncmp(p, "off", 3) == 0)
644 fw_dump.fadump_enabled = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000645 else if (strncmp(p, "nocma", 5) == 0) {
646 fw_dump.fadump_enabled = 1;
647 fw_dump.nocma = 1;
648 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000649
650 return 0;
651}
652early_param("fadump", early_fadump_param);
653
654/*
655 * Look for fadump_reserve_mem= cmdline option
656 * TODO: Remove references to 'fadump_reserve_mem=' parameter,
657 * the sooner 'crashkernel=' parameter is accustomed to.
658 */
659static int __init early_fadump_reserve_mem(char *p)
660{
661 if (p)
662 fw_dump.reserve_bootvar = memparse(p, &p);
663 return 0;
664}
665early_param("fadump_reserve_mem", early_fadump_reserve_mem);
666
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000667void crash_fadump(struct pt_regs *regs, const char *str)
668{
669 struct fadump_crash_info_header *fdh = NULL;
670 int old_cpu, this_cpu;
671
672 if (!should_fadump_crash())
673 return;
674
675 /*
676 * old_cpu == -1 means this is the first CPU which has come here,
677 * go ahead and trigger fadump.
678 *
679 * old_cpu != -1 means some other CPU has already on it's way
680 * to trigger fadump, just keep looping here.
681 */
682 this_cpu = smp_processor_id();
683 old_cpu = cmpxchg(&crashing_cpu, -1, this_cpu);
684
685 if (old_cpu != -1) {
686 /*
687 * We can't loop here indefinitely. Wait as long as fadump
688 * is in force. If we race with fadump un-registration this
689 * loop will break and then we go down to normal panic path
690 * and reboot. If fadump is in force the first crashing
691 * cpu will definitely trigger fadump.
692 */
693 while (fw_dump.dump_registered)
694 cpu_relax();
695 return;
696 }
697
698 fdh = __va(fw_dump.fadumphdr_addr);
699 fdh->crashing_cpu = crashing_cpu;
700 crash_save_vmcoreinfo();
701
702 if (regs)
703 fdh->regs = *regs;
704 else
705 ppc_save_regs(&fdh->regs);
706
707 fdh->online_mask = *cpu_online_mask;
708
David Brazdil0f672f62019-12-10 10:32:29 +0000709 fw_dump.ops->fadump_trigger(fdh, str);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000710}
711
David Brazdil0f672f62019-12-10 10:32:29 +0000712u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000713{
714 struct elf_prstatus prstatus;
715
716 memset(&prstatus, 0, sizeof(prstatus));
717 /*
718 * FIXME: How do i get PID? Do I really need it?
719 * prstatus.pr_pid = ????
720 */
721 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
722 buf = append_elf_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS,
723 &prstatus, sizeof(prstatus));
724 return buf;
725}
726
David Brazdil0f672f62019-12-10 10:32:29 +0000727void fadump_update_elfcore_header(char *bufp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000728{
729 struct elfhdr *elf;
730 struct elf_phdr *phdr;
731
732 elf = (struct elfhdr *)bufp;
733 bufp += sizeof(struct elfhdr);
734
735 /* First note is a place holder for cpu notes info. */
736 phdr = (struct elf_phdr *)bufp;
737
738 if (phdr->p_type == PT_NOTE) {
David Brazdil0f672f62019-12-10 10:32:29 +0000739 phdr->p_paddr = __pa(fw_dump.cpu_notes_buf_vaddr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000740 phdr->p_offset = phdr->p_paddr;
741 phdr->p_filesz = fw_dump.cpu_notes_buf_size;
742 phdr->p_memsz = fw_dump.cpu_notes_buf_size;
743 }
744 return;
745}
746
David Brazdil0f672f62019-12-10 10:32:29 +0000747static void *fadump_alloc_buffer(unsigned long size)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000748{
David Brazdil0f672f62019-12-10 10:32:29 +0000749 unsigned long count, i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000750 struct page *page;
David Brazdil0f672f62019-12-10 10:32:29 +0000751 void *vaddr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000752
David Brazdil0f672f62019-12-10 10:32:29 +0000753 vaddr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000754 if (!vaddr)
755 return NULL;
756
David Brazdil0f672f62019-12-10 10:32:29 +0000757 count = PAGE_ALIGN(size) / PAGE_SIZE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000758 page = virt_to_page(vaddr);
759 for (i = 0; i < count; i++)
David Brazdil0f672f62019-12-10 10:32:29 +0000760 mark_page_reserved(page + i);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000761 return vaddr;
762}
763
David Brazdil0f672f62019-12-10 10:32:29 +0000764static void fadump_free_buffer(unsigned long vaddr, unsigned long size)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000765{
David Brazdil0f672f62019-12-10 10:32:29 +0000766 free_reserved_area((void *)vaddr, (void *)(vaddr + size), -1, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000767}
768
David Brazdil0f672f62019-12-10 10:32:29 +0000769s32 fadump_setup_cpu_notes_buf(u32 num_cpus)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000770{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000771 /* Allocate buffer to hold cpu crash notes. */
772 fw_dump.cpu_notes_buf_size = num_cpus * sizeof(note_buf_t);
773 fw_dump.cpu_notes_buf_size = PAGE_ALIGN(fw_dump.cpu_notes_buf_size);
David Brazdil0f672f62019-12-10 10:32:29 +0000774 fw_dump.cpu_notes_buf_vaddr =
775 (unsigned long)fadump_alloc_buffer(fw_dump.cpu_notes_buf_size);
776 if (!fw_dump.cpu_notes_buf_vaddr) {
777 pr_err("Failed to allocate %ld bytes for CPU notes buffer\n",
778 fw_dump.cpu_notes_buf_size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000779 return -ENOMEM;
780 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000781
David Brazdil0f672f62019-12-10 10:32:29 +0000782 pr_debug("Allocated buffer for cpu notes of size %ld at 0x%lx\n",
783 fw_dump.cpu_notes_buf_size,
784 fw_dump.cpu_notes_buf_vaddr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000785 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000786}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000787
David Brazdil0f672f62019-12-10 10:32:29 +0000788void fadump_free_cpu_notes_buf(void)
789{
790 if (!fw_dump.cpu_notes_buf_vaddr)
791 return;
792
793 fadump_free_buffer(fw_dump.cpu_notes_buf_vaddr,
794 fw_dump.cpu_notes_buf_size);
795 fw_dump.cpu_notes_buf_vaddr = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000796 fw_dump.cpu_notes_buf_size = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000797}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000798
David Brazdil0f672f62019-12-10 10:32:29 +0000799static void fadump_free_mem_ranges(struct fadump_mrange_info *mrange_info)
800{
Olivier Deprez0e641232021-09-23 10:07:05 +0200801 if (mrange_info->is_static) {
802 mrange_info->mem_range_cnt = 0;
803 return;
804 }
805
David Brazdil0f672f62019-12-10 10:32:29 +0000806 kfree(mrange_info->mem_ranges);
Olivier Deprez0e641232021-09-23 10:07:05 +0200807 memset((void *)((u64)mrange_info + RNG_NAME_SZ), 0,
808 (sizeof(struct fadump_mrange_info) - RNG_NAME_SZ));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000809}
810
811/*
David Brazdil0f672f62019-12-10 10:32:29 +0000812 * Allocate or reallocate mem_ranges array in incremental units
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000813 * of PAGE_SIZE.
814 */
David Brazdil0f672f62019-12-10 10:32:29 +0000815static int fadump_alloc_mem_ranges(struct fadump_mrange_info *mrange_info)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000816{
David Brazdil0f672f62019-12-10 10:32:29 +0000817 struct fadump_memory_range *new_array;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000818 u64 new_size;
819
David Brazdil0f672f62019-12-10 10:32:29 +0000820 new_size = mrange_info->mem_ranges_sz + PAGE_SIZE;
821 pr_debug("Allocating %llu bytes of memory for %s memory ranges\n",
822 new_size, mrange_info->name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000823
David Brazdil0f672f62019-12-10 10:32:29 +0000824 new_array = krealloc(mrange_info->mem_ranges, new_size, GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000825 if (new_array == NULL) {
David Brazdil0f672f62019-12-10 10:32:29 +0000826 pr_err("Insufficient memory for setting up %s memory ranges\n",
827 mrange_info->name);
828 fadump_free_mem_ranges(mrange_info);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000829 return -ENOMEM;
830 }
831
David Brazdil0f672f62019-12-10 10:32:29 +0000832 mrange_info->mem_ranges = new_array;
833 mrange_info->mem_ranges_sz = new_size;
834 mrange_info->max_mem_ranges = (new_size /
835 sizeof(struct fadump_memory_range));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000836 return 0;
837}
838
David Brazdil0f672f62019-12-10 10:32:29 +0000839static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
840 u64 base, u64 end)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000841{
David Brazdil0f672f62019-12-10 10:32:29 +0000842 struct fadump_memory_range *mem_ranges = mrange_info->mem_ranges;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000843 bool is_adjacent = false;
David Brazdil0f672f62019-12-10 10:32:29 +0000844 u64 start, size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000845
846 if (base == end)
847 return 0;
848
849 /*
850 * Fold adjacent memory ranges to bring down the memory ranges/
851 * PT_LOAD segments count.
852 */
David Brazdil0f672f62019-12-10 10:32:29 +0000853 if (mrange_info->mem_range_cnt) {
854 start = mem_ranges[mrange_info->mem_range_cnt - 1].base;
855 size = mem_ranges[mrange_info->mem_range_cnt - 1].size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000856
857 if ((start + size) == base)
858 is_adjacent = true;
859 }
860 if (!is_adjacent) {
861 /* resize the array on reaching the limit */
David Brazdil0f672f62019-12-10 10:32:29 +0000862 if (mrange_info->mem_range_cnt == mrange_info->max_mem_ranges) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000863 int ret;
864
Olivier Deprez0e641232021-09-23 10:07:05 +0200865 if (mrange_info->is_static) {
866 pr_err("Reached array size limit for %s memory ranges\n",
867 mrange_info->name);
868 return -ENOSPC;
869 }
870
David Brazdil0f672f62019-12-10 10:32:29 +0000871 ret = fadump_alloc_mem_ranges(mrange_info);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000872 if (ret)
873 return ret;
David Brazdil0f672f62019-12-10 10:32:29 +0000874
875 /* Update to the new resized array */
876 mem_ranges = mrange_info->mem_ranges;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000877 }
878
879 start = base;
David Brazdil0f672f62019-12-10 10:32:29 +0000880 mem_ranges[mrange_info->mem_range_cnt].base = start;
881 mrange_info->mem_range_cnt++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000882 }
883
David Brazdil0f672f62019-12-10 10:32:29 +0000884 mem_ranges[mrange_info->mem_range_cnt - 1].size = (end - start);
885 pr_debug("%s_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
886 mrange_info->name, (mrange_info->mem_range_cnt - 1),
887 start, end - 1, (end - start));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000888 return 0;
889}
890
David Brazdil0f672f62019-12-10 10:32:29 +0000891static int fadump_exclude_reserved_area(u64 start, u64 end)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000892{
David Brazdil0f672f62019-12-10 10:32:29 +0000893 u64 ra_start, ra_end;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000894 int ret = 0;
895
896 ra_start = fw_dump.reserve_dump_area_start;
897 ra_end = ra_start + fw_dump.reserve_dump_area_size;
898
899 if ((ra_start < end) && (ra_end > start)) {
900 if ((start < ra_start) && (end > ra_end)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000901 ret = fadump_add_mem_range(&crash_mrange_info,
902 start, ra_start);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000903 if (ret)
904 return ret;
905
David Brazdil0f672f62019-12-10 10:32:29 +0000906 ret = fadump_add_mem_range(&crash_mrange_info,
907 ra_end, end);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000908 } else if (start < ra_start) {
David Brazdil0f672f62019-12-10 10:32:29 +0000909 ret = fadump_add_mem_range(&crash_mrange_info,
910 start, ra_start);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000911 } else if (ra_end < end) {
David Brazdil0f672f62019-12-10 10:32:29 +0000912 ret = fadump_add_mem_range(&crash_mrange_info,
913 ra_end, end);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000914 }
915 } else
David Brazdil0f672f62019-12-10 10:32:29 +0000916 ret = fadump_add_mem_range(&crash_mrange_info, start, end);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000917
918 return ret;
919}
920
921static int fadump_init_elfcore_header(char *bufp)
922{
923 struct elfhdr *elf;
924
925 elf = (struct elfhdr *) bufp;
926 bufp += sizeof(struct elfhdr);
927 memcpy(elf->e_ident, ELFMAG, SELFMAG);
928 elf->e_ident[EI_CLASS] = ELF_CLASS;
929 elf->e_ident[EI_DATA] = ELF_DATA;
930 elf->e_ident[EI_VERSION] = EV_CURRENT;
931 elf->e_ident[EI_OSABI] = ELF_OSABI;
932 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
933 elf->e_type = ET_CORE;
934 elf->e_machine = ELF_ARCH;
935 elf->e_version = EV_CURRENT;
936 elf->e_entry = 0;
937 elf->e_phoff = sizeof(struct elfhdr);
938 elf->e_shoff = 0;
939#if defined(_CALL_ELF)
940 elf->e_flags = _CALL_ELF;
941#else
942 elf->e_flags = 0;
943#endif
944 elf->e_ehsize = sizeof(struct elfhdr);
945 elf->e_phentsize = sizeof(struct elf_phdr);
946 elf->e_phnum = 0;
947 elf->e_shentsize = 0;
948 elf->e_shnum = 0;
949 elf->e_shstrndx = 0;
950
951 return 0;
952}
953
954/*
955 * Traverse through memblock structure and setup crash memory ranges. These
956 * ranges will be used create PT_LOAD program headers in elfcore header.
957 */
958static int fadump_setup_crash_memory_ranges(void)
959{
960 struct memblock_region *reg;
David Brazdil0f672f62019-12-10 10:32:29 +0000961 u64 start, end;
962 int i, ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000963
964 pr_debug("Setup crash memory ranges.\n");
David Brazdil0f672f62019-12-10 10:32:29 +0000965 crash_mrange_info.mem_range_cnt = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000966
967 /*
David Brazdil0f672f62019-12-10 10:32:29 +0000968 * Boot memory region(s) registered with firmware are moved to
969 * different location at the time of crash. Create separate program
970 * header(s) for this memory chunk(s) with the correct offset.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000971 */
David Brazdil0f672f62019-12-10 10:32:29 +0000972 for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
973 start = fw_dump.boot_mem_addr[i];
974 end = start + fw_dump.boot_mem_sz[i];
975 ret = fadump_add_mem_range(&crash_mrange_info, start, end);
976 if (ret)
977 return ret;
978 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000979
980 for_each_memblock(memory, reg) {
David Brazdil0f672f62019-12-10 10:32:29 +0000981 start = (u64)reg->base;
982 end = start + (u64)reg->size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000983
984 /*
David Brazdil0f672f62019-12-10 10:32:29 +0000985 * skip the memory chunk that is already added
986 * (0 through boot_memory_top).
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000987 */
David Brazdil0f672f62019-12-10 10:32:29 +0000988 if (start < fw_dump.boot_mem_top) {
989 if (end > fw_dump.boot_mem_top)
990 start = fw_dump.boot_mem_top;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000991 else
992 continue;
993 }
994
995 /* add this range excluding the reserved dump area. */
996 ret = fadump_exclude_reserved_area(start, end);
997 if (ret)
998 return ret;
999 }
1000
1001 return 0;
1002}
1003
1004/*
1005 * If the given physical address falls within the boot memory region then
1006 * return the relocated address that points to the dump region reserved
1007 * for saving initial boot memory contents.
1008 */
1009static inline unsigned long fadump_relocate(unsigned long paddr)
1010{
David Brazdil0f672f62019-12-10 10:32:29 +00001011 unsigned long raddr, rstart, rend, rlast, hole_size;
1012 int i;
1013
1014 hole_size = 0;
1015 rlast = 0;
1016 raddr = paddr;
1017 for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
1018 rstart = fw_dump.boot_mem_addr[i];
1019 rend = rstart + fw_dump.boot_mem_sz[i];
1020 hole_size += (rstart - rlast);
1021
1022 if (paddr >= rstart && paddr < rend) {
1023 raddr += fw_dump.boot_mem_dest_addr - hole_size;
1024 break;
1025 }
1026
1027 rlast = rend;
1028 }
1029
1030 pr_debug("vmcoreinfo: paddr = 0x%lx, raddr = 0x%lx\n", paddr, raddr);
1031 return raddr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001032}
1033
1034static int fadump_create_elfcore_headers(char *bufp)
1035{
David Brazdil0f672f62019-12-10 10:32:29 +00001036 unsigned long long raddr, offset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001037 struct elf_phdr *phdr;
David Brazdil0f672f62019-12-10 10:32:29 +00001038 struct elfhdr *elf;
1039 int i, j;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001040
1041 fadump_init_elfcore_header(bufp);
1042 elf = (struct elfhdr *)bufp;
1043 bufp += sizeof(struct elfhdr);
1044
1045 /*
1046 * setup ELF PT_NOTE, place holder for cpu notes info. The notes info
1047 * will be populated during second kernel boot after crash. Hence
1048 * this PT_NOTE will always be the first elf note.
1049 *
1050 * NOTE: Any new ELF note addition should be placed after this note.
1051 */
1052 phdr = (struct elf_phdr *)bufp;
1053 bufp += sizeof(struct elf_phdr);
1054 phdr->p_type = PT_NOTE;
1055 phdr->p_flags = 0;
1056 phdr->p_vaddr = 0;
1057 phdr->p_align = 0;
1058
1059 phdr->p_offset = 0;
1060 phdr->p_paddr = 0;
1061 phdr->p_filesz = 0;
1062 phdr->p_memsz = 0;
1063
1064 (elf->e_phnum)++;
1065
1066 /* setup ELF PT_NOTE for vmcoreinfo */
1067 phdr = (struct elf_phdr *)bufp;
1068 bufp += sizeof(struct elf_phdr);
1069 phdr->p_type = PT_NOTE;
1070 phdr->p_flags = 0;
1071 phdr->p_vaddr = 0;
1072 phdr->p_align = 0;
1073
1074 phdr->p_paddr = fadump_relocate(paddr_vmcoreinfo_note());
1075 phdr->p_offset = phdr->p_paddr;
1076 phdr->p_memsz = phdr->p_filesz = VMCOREINFO_NOTE_SIZE;
1077
1078 /* Increment number of program headers. */
1079 (elf->e_phnum)++;
1080
1081 /* setup PT_LOAD sections. */
David Brazdil0f672f62019-12-10 10:32:29 +00001082 j = 0;
1083 offset = 0;
1084 raddr = fw_dump.boot_mem_addr[0];
1085 for (i = 0; i < crash_mrange_info.mem_range_cnt; i++) {
1086 u64 mbase, msize;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001087
David Brazdil0f672f62019-12-10 10:32:29 +00001088 mbase = crash_mrange_info.mem_ranges[i].base;
1089 msize = crash_mrange_info.mem_ranges[i].size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001090 if (!msize)
1091 continue;
1092
1093 phdr = (struct elf_phdr *)bufp;
1094 bufp += sizeof(struct elf_phdr);
1095 phdr->p_type = PT_LOAD;
1096 phdr->p_flags = PF_R|PF_W|PF_X;
1097 phdr->p_offset = mbase;
1098
David Brazdil0f672f62019-12-10 10:32:29 +00001099 if (mbase == raddr) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001100 /*
David Brazdil0f672f62019-12-10 10:32:29 +00001101 * The entire real memory region will be moved by
1102 * firmware to the specified destination_address.
1103 * Hence set the correct offset.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001104 */
David Brazdil0f672f62019-12-10 10:32:29 +00001105 phdr->p_offset = fw_dump.boot_mem_dest_addr + offset;
1106 if (j < (fw_dump.boot_mem_regs_cnt - 1)) {
1107 offset += fw_dump.boot_mem_sz[j];
1108 raddr = fw_dump.boot_mem_addr[++j];
1109 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001110 }
1111
1112 phdr->p_paddr = mbase;
1113 phdr->p_vaddr = (unsigned long)__va(mbase);
1114 phdr->p_filesz = msize;
1115 phdr->p_memsz = msize;
1116 phdr->p_align = 0;
1117
1118 /* Increment number of program headers. */
1119 (elf->e_phnum)++;
1120 }
1121 return 0;
1122}
1123
1124static unsigned long init_fadump_header(unsigned long addr)
1125{
1126 struct fadump_crash_info_header *fdh;
1127
1128 if (!addr)
1129 return 0;
1130
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001131 fdh = __va(addr);
1132 addr += sizeof(struct fadump_crash_info_header);
1133
1134 memset(fdh, 0, sizeof(struct fadump_crash_info_header));
1135 fdh->magic_number = FADUMP_CRASH_INFO_MAGIC;
1136 fdh->elfcorehdr_addr = addr;
1137 /* We will set the crashing cpu id in crash_fadump() during crash. */
David Brazdil0f672f62019-12-10 10:32:29 +00001138 fdh->crashing_cpu = FADUMP_CPU_UNKNOWN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001139
1140 return addr;
1141}
1142
1143static int register_fadump(void)
1144{
1145 unsigned long addr;
1146 void *vaddr;
1147 int ret;
1148
1149 /*
1150 * If no memory is reserved then we can not register for firmware-
1151 * assisted dump.
1152 */
1153 if (!fw_dump.reserve_dump_area_size)
1154 return -ENODEV;
1155
1156 ret = fadump_setup_crash_memory_ranges();
1157 if (ret)
1158 return ret;
1159
David Brazdil0f672f62019-12-10 10:32:29 +00001160 addr = fw_dump.fadumphdr_addr;
1161
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001162 /* Initialize fadump crash info header. */
1163 addr = init_fadump_header(addr);
1164 vaddr = __va(addr);
1165
1166 pr_debug("Creating ELF core headers at %#016lx\n", addr);
1167 fadump_create_elfcore_headers(vaddr);
1168
1169 /* register the future kernel dump with firmware. */
David Brazdil0f672f62019-12-10 10:32:29 +00001170 pr_debug("Registering for firmware-assisted kernel dump...\n");
1171 return fw_dump.ops->fadump_register(&fw_dump);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001172}
1173
1174void fadump_cleanup(void)
1175{
David Brazdil0f672f62019-12-10 10:32:29 +00001176 if (!fw_dump.fadump_supported)
1177 return;
1178
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001179 /* Invalidate the registration only if dump is active. */
1180 if (fw_dump.dump_active) {
David Brazdil0f672f62019-12-10 10:32:29 +00001181 pr_debug("Invalidating firmware-assisted dump registration\n");
1182 fw_dump.ops->fadump_invalidate(&fw_dump);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001183 } else if (fw_dump.dump_registered) {
1184 /* Un-register Firmware-assisted dump if it was registered. */
David Brazdil0f672f62019-12-10 10:32:29 +00001185 fw_dump.ops->fadump_unregister(&fw_dump);
1186 fadump_free_mem_ranges(&crash_mrange_info);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001187 }
David Brazdil0f672f62019-12-10 10:32:29 +00001188
1189 if (fw_dump.ops->fadump_cleanup)
1190 fw_dump.ops->fadump_cleanup(&fw_dump);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001191}
1192
1193static void fadump_free_reserved_memory(unsigned long start_pfn,
1194 unsigned long end_pfn)
1195{
1196 unsigned long pfn;
1197 unsigned long time_limit = jiffies + HZ;
1198
1199 pr_info("freeing reserved memory (0x%llx - 0x%llx)\n",
1200 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
1201
1202 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1203 free_reserved_page(pfn_to_page(pfn));
1204
1205 if (time_after(jiffies, time_limit)) {
1206 cond_resched();
1207 time_limit = jiffies + HZ;
1208 }
1209 }
1210}
1211
1212/*
1213 * Skip memory holes and free memory that was actually reserved.
1214 */
David Brazdil0f672f62019-12-10 10:32:29 +00001215static void fadump_release_reserved_area(u64 start, u64 end)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001216{
David Brazdil0f672f62019-12-10 10:32:29 +00001217 u64 tstart, tend, spfn, epfn;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001218 struct memblock_region *reg;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001219
David Brazdil0f672f62019-12-10 10:32:29 +00001220 spfn = PHYS_PFN(start);
1221 epfn = PHYS_PFN(end);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001222 for_each_memblock(memory, reg) {
David Brazdil0f672f62019-12-10 10:32:29 +00001223 tstart = max_t(u64, spfn, memblock_region_memory_base_pfn(reg));
1224 tend = min_t(u64, epfn, memblock_region_memory_end_pfn(reg));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001225 if (tstart < tend) {
1226 fadump_free_reserved_memory(tstart, tend);
1227
David Brazdil0f672f62019-12-10 10:32:29 +00001228 if (tend == epfn)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001229 break;
1230
David Brazdil0f672f62019-12-10 10:32:29 +00001231 spfn = tend;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001232 }
1233 }
1234}
1235
1236/*
David Brazdil0f672f62019-12-10 10:32:29 +00001237 * Sort the mem ranges in-place and merge adjacent ranges
1238 * to minimize the memory ranges count.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001239 */
David Brazdil0f672f62019-12-10 10:32:29 +00001240static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001241{
David Brazdil0f672f62019-12-10 10:32:29 +00001242 struct fadump_memory_range *mem_ranges;
1243 struct fadump_memory_range tmp_range;
1244 u64 base, size;
1245 int i, j, idx;
1246
1247 if (!reserved_mrange_info.mem_range_cnt)
1248 return;
1249
1250 /* Sort the memory ranges */
1251 mem_ranges = mrange_info->mem_ranges;
1252 for (i = 0; i < mrange_info->mem_range_cnt; i++) {
1253 idx = i;
1254 for (j = (i + 1); j < mrange_info->mem_range_cnt; j++) {
1255 if (mem_ranges[idx].base > mem_ranges[j].base)
1256 idx = j;
1257 }
1258 if (idx != i) {
1259 tmp_range = mem_ranges[idx];
1260 mem_ranges[idx] = mem_ranges[i];
1261 mem_ranges[i] = tmp_range;
1262 }
1263 }
1264
1265 /* Merge adjacent reserved ranges */
1266 idx = 0;
1267 for (i = 1; i < mrange_info->mem_range_cnt; i++) {
1268 base = mem_ranges[i-1].base;
1269 size = mem_ranges[i-1].size;
1270 if (mem_ranges[i].base == (base + size))
1271 mem_ranges[idx].size += mem_ranges[i].size;
1272 else {
1273 idx++;
1274 if (i == idx)
1275 continue;
1276
1277 mem_ranges[idx] = mem_ranges[i];
1278 }
1279 }
1280 mrange_info->mem_range_cnt = idx + 1;
1281}
1282
1283/*
1284 * Scan reserved-ranges to consider them while reserving/releasing
1285 * memory for FADump.
1286 */
Olivier Deprez0e641232021-09-23 10:07:05 +02001287static void __init early_init_dt_scan_reserved_ranges(unsigned long node)
David Brazdil0f672f62019-12-10 10:32:29 +00001288{
David Brazdil0f672f62019-12-10 10:32:29 +00001289 const __be32 *prop;
1290 int len, ret = -1;
1291 unsigned long i;
1292
Olivier Deprez0e641232021-09-23 10:07:05 +02001293 /* reserved-ranges already scanned */
1294 if (reserved_mrange_info.mem_range_cnt != 0)
1295 return;
David Brazdil0f672f62019-12-10 10:32:29 +00001296
Olivier Deprez0e641232021-09-23 10:07:05 +02001297 prop = of_get_flat_dt_prop(node, "reserved-ranges", &len);
David Brazdil0f672f62019-12-10 10:32:29 +00001298 if (!prop)
Olivier Deprez0e641232021-09-23 10:07:05 +02001299 return;
David Brazdil0f672f62019-12-10 10:32:29 +00001300
1301 /*
1302 * Each reserved range is an (address,size) pair, 2 cells each,
1303 * totalling 4 cells per range.
1304 */
1305 for (i = 0; i < len / (sizeof(*prop) * 4); i++) {
1306 u64 base, size;
1307
1308 base = of_read_number(prop + (i * 4) + 0, 2);
1309 size = of_read_number(prop + (i * 4) + 2, 2);
1310
1311 if (size) {
1312 ret = fadump_add_mem_range(&reserved_mrange_info,
1313 base, base + size);
1314 if (ret < 0) {
1315 pr_warn("some reserved ranges are ignored!\n");
1316 break;
1317 }
1318 }
1319 }
1320
Olivier Deprez0e641232021-09-23 10:07:05 +02001321 /* Compact reserved ranges */
1322 sort_and_merge_mem_ranges(&reserved_mrange_info);
David Brazdil0f672f62019-12-10 10:32:29 +00001323}
1324
1325/*
1326 * Release the memory that was reserved during early boot to preserve the
1327 * crash'ed kernel's memory contents except reserved dump area (permanent
1328 * reservation) and reserved ranges used by F/W. The released memory will
1329 * be available for general use.
1330 */
1331static void fadump_release_memory(u64 begin, u64 end)
1332{
1333 u64 ra_start, ra_end, tstart;
1334 int i, ret;
1335
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001336 ra_start = fw_dump.reserve_dump_area_start;
1337 ra_end = ra_start + fw_dump.reserve_dump_area_size;
1338
1339 /*
Olivier Deprez0e641232021-09-23 10:07:05 +02001340 * If reserved ranges array limit is hit, overwrite the last reserved
1341 * memory range with reserved dump area to ensure it is excluded from
1342 * the memory being released (reused for next FADump registration).
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001343 */
Olivier Deprez0e641232021-09-23 10:07:05 +02001344 if (reserved_mrange_info.mem_range_cnt ==
1345 reserved_mrange_info.max_mem_ranges)
1346 reserved_mrange_info.mem_range_cnt--;
David Brazdil0f672f62019-12-10 10:32:29 +00001347
Olivier Deprez0e641232021-09-23 10:07:05 +02001348 ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end);
1349 if (ret != 0)
David Brazdil0f672f62019-12-10 10:32:29 +00001350 return;
David Brazdil0f672f62019-12-10 10:32:29 +00001351
1352 /* Get the reserved ranges list in order first. */
1353 sort_and_merge_mem_ranges(&reserved_mrange_info);
1354
1355 /* Exclude reserved ranges and release remaining memory */
1356 tstart = begin;
1357 for (i = 0; i < reserved_mrange_info.mem_range_cnt; i++) {
1358 ra_start = reserved_mrange_info.mem_ranges[i].base;
1359 ra_end = ra_start + reserved_mrange_info.mem_ranges[i].size;
1360
1361 if (tstart >= ra_end)
1362 continue;
1363
1364 if (tstart < ra_start)
1365 fadump_release_reserved_area(tstart, ra_start);
1366 tstart = ra_end;
1367 }
1368
1369 if (tstart < end)
1370 fadump_release_reserved_area(tstart, end);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001371}
1372
1373static void fadump_invalidate_release_mem(void)
1374{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001375 mutex_lock(&fadump_mutex);
1376 if (!fw_dump.dump_active) {
1377 mutex_unlock(&fadump_mutex);
1378 return;
1379 }
1380
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001381 fadump_cleanup();
1382 mutex_unlock(&fadump_mutex);
1383
David Brazdil0f672f62019-12-10 10:32:29 +00001384 fadump_release_memory(fw_dump.boot_mem_top, memblock_end_of_DRAM());
1385 fadump_free_cpu_notes_buf();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001386
David Brazdil0f672f62019-12-10 10:32:29 +00001387 /*
1388 * Setup kernel metadata and initialize the kernel dump
1389 * memory structure for FADump re-registration.
1390 */
1391 if (fw_dump.ops->fadump_setup_metadata &&
1392 (fw_dump.ops->fadump_setup_metadata(&fw_dump) < 0))
1393 pr_warn("Failed to setup kernel metadata!\n");
1394 fw_dump.ops->fadump_init_mem_struct(&fw_dump);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001395}
1396
1397static ssize_t fadump_release_memory_store(struct kobject *kobj,
1398 struct kobj_attribute *attr,
1399 const char *buf, size_t count)
1400{
1401 int input = -1;
1402
1403 if (!fw_dump.dump_active)
1404 return -EPERM;
1405
1406 if (kstrtoint(buf, 0, &input))
1407 return -EINVAL;
1408
1409 if (input == 1) {
1410 /*
1411 * Take away the '/proc/vmcore'. We are releasing the dump
1412 * memory, hence it will not be valid anymore.
1413 */
1414#ifdef CONFIG_PROC_VMCORE
1415 vmcore_cleanup();
1416#endif
1417 fadump_invalidate_release_mem();
1418
1419 } else
1420 return -EINVAL;
1421 return count;
1422}
1423
1424static ssize_t fadump_enabled_show(struct kobject *kobj,
1425 struct kobj_attribute *attr,
1426 char *buf)
1427{
1428 return sprintf(buf, "%d\n", fw_dump.fadump_enabled);
1429}
1430
1431static ssize_t fadump_register_show(struct kobject *kobj,
1432 struct kobj_attribute *attr,
1433 char *buf)
1434{
1435 return sprintf(buf, "%d\n", fw_dump.dump_registered);
1436}
1437
1438static ssize_t fadump_register_store(struct kobject *kobj,
1439 struct kobj_attribute *attr,
1440 const char *buf, size_t count)
1441{
1442 int ret = 0;
1443 int input = -1;
1444
David Brazdil0f672f62019-12-10 10:32:29 +00001445 if (!fw_dump.fadump_enabled || fw_dump.dump_active)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001446 return -EPERM;
1447
1448 if (kstrtoint(buf, 0, &input))
1449 return -EINVAL;
1450
1451 mutex_lock(&fadump_mutex);
1452
1453 switch (input) {
1454 case 0:
1455 if (fw_dump.dump_registered == 0) {
1456 goto unlock_out;
1457 }
David Brazdil0f672f62019-12-10 10:32:29 +00001458
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001459 /* Un-register Firmware-assisted dump */
David Brazdil0f672f62019-12-10 10:32:29 +00001460 pr_debug("Un-register firmware-assisted dump\n");
1461 fw_dump.ops->fadump_unregister(&fw_dump);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001462 break;
1463 case 1:
1464 if (fw_dump.dump_registered == 1) {
David Brazdil0f672f62019-12-10 10:32:29 +00001465 /* Un-register Firmware-assisted dump */
1466 fw_dump.ops->fadump_unregister(&fw_dump);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001467 }
1468 /* Register Firmware-assisted dump */
1469 ret = register_fadump();
1470 break;
1471 default:
1472 ret = -EINVAL;
1473 break;
1474 }
1475
1476unlock_out:
1477 mutex_unlock(&fadump_mutex);
1478 return ret < 0 ? ret : count;
1479}
1480
1481static int fadump_region_show(struct seq_file *m, void *private)
1482{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001483 if (!fw_dump.fadump_enabled)
1484 return 0;
1485
1486 mutex_lock(&fadump_mutex);
David Brazdil0f672f62019-12-10 10:32:29 +00001487 fw_dump.ops->fadump_region_show(&fw_dump, m);
1488 mutex_unlock(&fadump_mutex);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001489 return 0;
1490}
1491
1492static struct kobj_attribute fadump_release_attr = __ATTR(fadump_release_mem,
1493 0200, NULL,
1494 fadump_release_memory_store);
1495static struct kobj_attribute fadump_attr = __ATTR(fadump_enabled,
1496 0444, fadump_enabled_show,
1497 NULL);
1498static struct kobj_attribute fadump_register_attr = __ATTR(fadump_registered,
1499 0644, fadump_register_show,
1500 fadump_register_store);
1501
David Brazdil0f672f62019-12-10 10:32:29 +00001502DEFINE_SHOW_ATTRIBUTE(fadump_region);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001503
1504static void fadump_init_files(void)
1505{
1506 struct dentry *debugfs_file;
1507 int rc = 0;
1508
1509 rc = sysfs_create_file(kernel_kobj, &fadump_attr.attr);
1510 if (rc)
1511 printk(KERN_ERR "fadump: unable to create sysfs file"
1512 " fadump_enabled (%d)\n", rc);
1513
1514 rc = sysfs_create_file(kernel_kobj, &fadump_register_attr.attr);
1515 if (rc)
1516 printk(KERN_ERR "fadump: unable to create sysfs file"
1517 " fadump_registered (%d)\n", rc);
1518
1519 debugfs_file = debugfs_create_file("fadump_region", 0444,
1520 powerpc_debugfs_root, NULL,
1521 &fadump_region_fops);
1522 if (!debugfs_file)
1523 printk(KERN_ERR "fadump: unable to create debugfs file"
1524 " fadump_region\n");
1525
1526 if (fw_dump.dump_active) {
1527 rc = sysfs_create_file(kernel_kobj, &fadump_release_attr.attr);
1528 if (rc)
1529 printk(KERN_ERR "fadump: unable to create sysfs file"
1530 " fadump_release_mem (%d)\n", rc);
1531 }
1532 return;
1533}
1534
1535/*
1536 * Prepare for firmware-assisted dump.
1537 */
1538int __init setup_fadump(void)
1539{
1540 if (!fw_dump.fadump_enabled)
1541 return 0;
1542
1543 if (!fw_dump.fadump_supported) {
1544 printk(KERN_ERR "Firmware-assisted dump is not supported on"
1545 " this hardware\n");
1546 return 0;
1547 }
1548
1549 fadump_show_config();
1550 /*
1551 * If dump data is available then see if it is valid and prepare for
1552 * saving it to the disk.
1553 */
1554 if (fw_dump.dump_active) {
1555 /*
1556 * if dump process fails then invalidate the registration
1557 * and release memory before proceeding for re-registration.
1558 */
David Brazdil0f672f62019-12-10 10:32:29 +00001559 if (fw_dump.ops->fadump_process(&fw_dump) < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001560 fadump_invalidate_release_mem();
1561 }
1562 /* Initialize the kernel dump memory structure for FAD registration. */
1563 else if (fw_dump.reserve_dump_area_size)
David Brazdil0f672f62019-12-10 10:32:29 +00001564 fw_dump.ops->fadump_init_mem_struct(&fw_dump);
1565
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001566 fadump_init_files();
1567
1568 return 1;
1569}
1570subsys_initcall(setup_fadump);
David Brazdil0f672f62019-12-10 10:32:29 +00001571#else /* !CONFIG_PRESERVE_FA_DUMP */
1572
1573/* Scan the Firmware Assisted dump configuration details. */
1574int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
1575 int depth, void *data)
1576{
1577 if ((depth != 1) || (strcmp(uname, "ibm,opal") != 0))
1578 return 0;
1579
1580 opal_fadump_dt_scan(&fw_dump, node);
1581 return 1;
1582}
1583
1584/*
1585 * When dump is active but PRESERVE_FA_DUMP is enabled on the kernel,
1586 * preserve crash data. The subsequent memory preserving kernel boot
1587 * is likely to process this crash data.
1588 */
1589int __init fadump_reserve_mem(void)
1590{
1591 if (fw_dump.dump_active) {
1592 /*
1593 * If last boot has crashed then reserve all the memory
1594 * above boot memory to preserve crash data.
1595 */
1596 pr_info("Preserving crash data for processing in next boot.\n");
1597 fadump_reserve_crash_area(fw_dump.boot_mem_top);
1598 } else
1599 pr_debug("FADump-aware kernel..\n");
1600
1601 return 1;
1602}
1603#endif /* CONFIG_PRESERVE_FA_DUMP */
1604
1605/* Preserve everything above the base address */
1606static void __init fadump_reserve_crash_area(u64 base)
1607{
1608 struct memblock_region *reg;
1609 u64 mstart, msize;
1610
1611 for_each_memblock(memory, reg) {
1612 mstart = reg->base;
1613 msize = reg->size;
1614
1615 if ((mstart + msize) < base)
1616 continue;
1617
1618 if (mstart < base) {
1619 msize -= (base - mstart);
1620 mstart = base;
1621 }
1622
1623 pr_info("Reserving %lluMB of memory at %#016llx for preserving crash data",
1624 (msize >> 20), mstart);
1625 memblock_reserve(mstart, msize);
1626 }
1627}
1628
1629unsigned long __init arch_reserved_kernel_pages(void)
1630{
1631 return memblock_reserved_size() / PAGE_SIZE;
1632}