blob: b06602cea99c786bc478bdd6b23e835efa9885b2 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * linux/arch/arm/kernel/setup.c
4 *
5 * Copyright (C) 1995-2001 Russell King
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006 */
7#include <linux/efi.h>
8#include <linux/export.h>
9#include <linux/kernel.h>
10#include <linux/stddef.h>
11#include <linux/ioport.h>
12#include <linux/delay.h>
13#include <linux/utsname.h>
14#include <linux/initrd.h>
15#include <linux/console.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000016#include <linux/seq_file.h>
17#include <linux/screen_info.h>
18#include <linux/of_platform.h>
19#include <linux/init.h>
20#include <linux/kexec.h>
Olivier Deprez0e641232021-09-23 10:07:05 +020021#include <linux/libfdt.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000022#include <linux/of_fdt.h>
23#include <linux/cpu.h>
24#include <linux/interrupt.h>
25#include <linux/smp.h>
26#include <linux/proc_fs.h>
27#include <linux/memblock.h>
28#include <linux/bug.h>
29#include <linux/compiler.h>
30#include <linux/sort.h>
31#include <linux/psci.h>
32
33#include <asm/unified.h>
34#include <asm/cp15.h>
35#include <asm/cpu.h>
36#include <asm/cputype.h>
37#include <asm/efi.h>
38#include <asm/elf.h>
39#include <asm/early_ioremap.h>
40#include <asm/fixmap.h>
41#include <asm/procinfo.h>
42#include <asm/psci.h>
43#include <asm/sections.h>
44#include <asm/setup.h>
45#include <asm/smp_plat.h>
46#include <asm/mach-types.h>
47#include <asm/cacheflush.h>
48#include <asm/cachetype.h>
49#include <asm/tlbflush.h>
50#include <asm/xen/hypervisor.h>
51
52#include <asm/prom.h>
53#include <asm/mach/arch.h>
54#include <asm/mach/irq.h>
55#include <asm/mach/time.h>
56#include <asm/system_info.h>
57#include <asm/system_misc.h>
58#include <asm/traps.h>
59#include <asm/unwind.h>
60#include <asm/memblock.h>
61#include <asm/virt.h>
62
63#include "atags.h"
64
65
66#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
67char fpe_type[8];
68
69static int __init fpe_setup(char *line)
70{
71 memcpy(fpe_type, line, 8);
72 return 1;
73}
74
75__setup("fpe=", fpe_setup);
76#endif
77
78extern void init_default_cache_policy(unsigned long);
79extern void paging_init(const struct machine_desc *desc);
80extern void early_mm_init(const struct machine_desc *);
81extern void adjust_lowmem_bounds(void);
82extern enum reboot_mode reboot_mode;
83extern void setup_dma_zone(const struct machine_desc *desc);
84
85unsigned int processor_id;
86EXPORT_SYMBOL(processor_id);
87unsigned int __machine_arch_type __read_mostly;
88EXPORT_SYMBOL(__machine_arch_type);
89unsigned int cacheid __read_mostly;
90EXPORT_SYMBOL(cacheid);
91
92unsigned int __atags_pointer __initdata;
93
94unsigned int system_rev;
95EXPORT_SYMBOL(system_rev);
96
97const char *system_serial;
98EXPORT_SYMBOL(system_serial);
99
100unsigned int system_serial_low;
101EXPORT_SYMBOL(system_serial_low);
102
103unsigned int system_serial_high;
104EXPORT_SYMBOL(system_serial_high);
105
106unsigned int elf_hwcap __read_mostly;
107EXPORT_SYMBOL(elf_hwcap);
108
109unsigned int elf_hwcap2 __read_mostly;
110EXPORT_SYMBOL(elf_hwcap2);
111
112
113#ifdef MULTI_CPU
114struct processor processor __ro_after_init;
David Brazdil0f672f62019-12-10 10:32:29 +0000115#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
116struct processor *cpu_vtable[NR_CPUS] = {
117 [0] = &processor,
118};
119#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000120#endif
121#ifdef MULTI_TLB
122struct cpu_tlb_fns cpu_tlb __ro_after_init;
123#endif
124#ifdef MULTI_USER
125struct cpu_user_fns cpu_user __ro_after_init;
126#endif
127#ifdef MULTI_CACHE
128struct cpu_cache_fns cpu_cache __ro_after_init;
129#endif
130#ifdef CONFIG_OUTER_CACHE
131struct outer_cache_fns outer_cache __ro_after_init;
132EXPORT_SYMBOL(outer_cache);
133#endif
134
135/*
136 * Cached cpu_architecture() result for use by assembler code.
137 * C code should use the cpu_architecture() function instead of accessing this
138 * variable directly.
139 */
140int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
141
142struct stack {
143 u32 irq[3];
144 u32 abt[3];
145 u32 und[3];
146 u32 fiq[3];
147} ____cacheline_aligned;
148
149#ifndef CONFIG_CPU_V7M
150static struct stack stacks[NR_CPUS];
151#endif
152
153char elf_platform[ELF_PLATFORM_SIZE];
154EXPORT_SYMBOL(elf_platform);
155
156static const char *cpu_name;
157static const char *machine_name;
158static char __initdata cmd_line[COMMAND_LINE_SIZE];
159const struct machine_desc *machine_desc __initdata;
160
161static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
162#define ENDIANNESS ((char)endian_test.l)
163
164DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
165
166/*
167 * Standard memory resources
168 */
169static struct resource mem_res[] = {
170 {
171 .name = "Video RAM",
172 .start = 0,
173 .end = 0,
174 .flags = IORESOURCE_MEM
175 },
176 {
177 .name = "Kernel code",
178 .start = 0,
179 .end = 0,
180 .flags = IORESOURCE_SYSTEM_RAM
181 },
182 {
183 .name = "Kernel data",
184 .start = 0,
185 .end = 0,
186 .flags = IORESOURCE_SYSTEM_RAM
187 }
188};
189
190#define video_ram mem_res[0]
191#define kernel_code mem_res[1]
192#define kernel_data mem_res[2]
193
194static struct resource io_res[] = {
195 {
196 .name = "reserved",
197 .start = 0x3bc,
198 .end = 0x3be,
199 .flags = IORESOURCE_IO | IORESOURCE_BUSY
200 },
201 {
202 .name = "reserved",
203 .start = 0x378,
204 .end = 0x37f,
205 .flags = IORESOURCE_IO | IORESOURCE_BUSY
206 },
207 {
208 .name = "reserved",
209 .start = 0x278,
210 .end = 0x27f,
211 .flags = IORESOURCE_IO | IORESOURCE_BUSY
212 }
213};
214
215#define lp0 io_res[0]
216#define lp1 io_res[1]
217#define lp2 io_res[2]
218
219static const char *proc_arch[] = {
220 "undefined/unknown",
221 "3",
222 "4",
223 "4T",
224 "5",
225 "5T",
226 "5TE",
227 "5TEJ",
228 "6TEJ",
229 "7",
230 "7M",
231 "?(12)",
232 "?(13)",
233 "?(14)",
234 "?(15)",
235 "?(16)",
236 "?(17)",
237};
238
239#ifdef CONFIG_CPU_V7M
240static int __get_cpu_architecture(void)
241{
242 return CPU_ARCH_ARMv7M;
243}
244#else
245static int __get_cpu_architecture(void)
246{
247 int cpu_arch;
248
249 if ((read_cpuid_id() & 0x0008f000) == 0) {
250 cpu_arch = CPU_ARCH_UNKNOWN;
251 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
252 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
253 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
254 cpu_arch = (read_cpuid_id() >> 16) & 7;
255 if (cpu_arch)
256 cpu_arch += CPU_ARCH_ARMv3;
257 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
258 /* Revised CPUID format. Read the Memory Model Feature
259 * Register 0 and check for VMSAv7 or PMSAv7 */
260 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
261 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
262 (mmfr0 & 0x000000f0) >= 0x00000030)
263 cpu_arch = CPU_ARCH_ARMv7;
264 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
265 (mmfr0 & 0x000000f0) == 0x00000020)
266 cpu_arch = CPU_ARCH_ARMv6;
267 else
268 cpu_arch = CPU_ARCH_UNKNOWN;
269 } else
270 cpu_arch = CPU_ARCH_UNKNOWN;
271
272 return cpu_arch;
273}
274#endif
275
276int __pure cpu_architecture(void)
277{
278 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
279
280 return __cpu_architecture;
281}
282
283static int cpu_has_aliasing_icache(unsigned int arch)
284{
285 int aliasing_icache;
286 unsigned int id_reg, num_sets, line_size;
287
288 /* PIPT caches never alias. */
289 if (icache_is_pipt())
290 return 0;
291
292 /* arch specifies the register format */
293 switch (arch) {
294 case CPU_ARCH_ARMv7:
295 set_csselr(CSSELR_ICACHE | CSSELR_L1);
296 isb();
297 id_reg = read_ccsidr();
298 line_size = 4 << ((id_reg & 0x7) + 2);
299 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
300 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
301 break;
302 case CPU_ARCH_ARMv6:
303 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
304 break;
305 default:
306 /* I-cache aliases will be handled by D-cache aliasing code */
307 aliasing_icache = 0;
308 }
309
310 return aliasing_icache;
311}
312
313static void __init cacheid_init(void)
314{
315 unsigned int arch = cpu_architecture();
316
317 if (arch >= CPU_ARCH_ARMv6) {
318 unsigned int cachetype = read_cpuid_cachetype();
319
320 if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
321 cacheid = 0;
322 } else if ((cachetype & (7 << 29)) == 4 << 29) {
323 /* ARMv7 register format */
324 arch = CPU_ARCH_ARMv7;
325 cacheid = CACHEID_VIPT_NONALIASING;
326 switch (cachetype & (3 << 14)) {
327 case (1 << 14):
328 cacheid |= CACHEID_ASID_TAGGED;
329 break;
330 case (3 << 14):
331 cacheid |= CACHEID_PIPT;
332 break;
333 }
334 } else {
335 arch = CPU_ARCH_ARMv6;
336 if (cachetype & (1 << 23))
337 cacheid = CACHEID_VIPT_ALIASING;
338 else
339 cacheid = CACHEID_VIPT_NONALIASING;
340 }
341 if (cpu_has_aliasing_icache(arch))
342 cacheid |= CACHEID_VIPT_I_ALIASING;
343 } else {
344 cacheid = CACHEID_VIVT;
345 }
346
347 pr_info("CPU: %s data cache, %s instruction cache\n",
348 cache_is_vivt() ? "VIVT" :
349 cache_is_vipt_aliasing() ? "VIPT aliasing" :
350 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
351 cache_is_vivt() ? "VIVT" :
352 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
353 icache_is_vipt_aliasing() ? "VIPT aliasing" :
354 icache_is_pipt() ? "PIPT" :
355 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
356}
357
358/*
359 * These functions re-use the assembly code in head.S, which
360 * already provide the required functionality.
361 */
362extern struct proc_info_list *lookup_processor_type(unsigned int);
363
364void __init early_print(const char *str, ...)
365{
366 extern void printascii(const char *);
367 char buf[256];
368 va_list ap;
369
370 va_start(ap, str);
371 vsnprintf(buf, sizeof(buf), str, ap);
372 va_end(ap);
373
374#ifdef CONFIG_DEBUG_LL
375 printascii(buf);
376#endif
377 printk("%s", buf);
378}
379
380#ifdef CONFIG_ARM_PATCH_IDIV
381
382static inline u32 __attribute_const__ sdiv_instruction(void)
383{
384 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
385 /* "sdiv r0, r0, r1" */
386 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
387 return __opcode_to_mem_thumb32(insn);
388 }
389
390 /* "sdiv r0, r0, r1" */
391 return __opcode_to_mem_arm(0xe710f110);
392}
393
394static inline u32 __attribute_const__ udiv_instruction(void)
395{
396 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
397 /* "udiv r0, r0, r1" */
398 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
399 return __opcode_to_mem_thumb32(insn);
400 }
401
402 /* "udiv r0, r0, r1" */
403 return __opcode_to_mem_arm(0xe730f110);
404}
405
406static inline u32 __attribute_const__ bx_lr_instruction(void)
407{
408 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
409 /* "bx lr; nop" */
410 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
411 return __opcode_to_mem_thumb32(insn);
412 }
413
414 /* "bx lr" */
415 return __opcode_to_mem_arm(0xe12fff1e);
416}
417
418static void __init patch_aeabi_idiv(void)
419{
420 extern void __aeabi_uidiv(void);
421 extern void __aeabi_idiv(void);
422 uintptr_t fn_addr;
423 unsigned int mask;
424
425 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
426 if (!(elf_hwcap & mask))
427 return;
428
429 pr_info("CPU: div instructions available: patching division code\n");
430
431 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
432 asm ("" : "+g" (fn_addr));
433 ((u32 *)fn_addr)[0] = udiv_instruction();
434 ((u32 *)fn_addr)[1] = bx_lr_instruction();
435 flush_icache_range(fn_addr, fn_addr + 8);
436
437 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
438 asm ("" : "+g" (fn_addr));
439 ((u32 *)fn_addr)[0] = sdiv_instruction();
440 ((u32 *)fn_addr)[1] = bx_lr_instruction();
441 flush_icache_range(fn_addr, fn_addr + 8);
442}
443
444#else
445static inline void patch_aeabi_idiv(void) { }
446#endif
447
448static void __init cpuid_init_hwcaps(void)
449{
450 int block;
451 u32 isar5;
452
453 if (cpu_architecture() < CPU_ARCH_ARMv7)
454 return;
455
456 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
457 if (block >= 2)
458 elf_hwcap |= HWCAP_IDIVA;
459 if (block >= 1)
460 elf_hwcap |= HWCAP_IDIVT;
461
462 /* LPAE implies atomic ldrd/strd instructions */
463 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
464 if (block >= 5)
465 elf_hwcap |= HWCAP_LPAE;
466
467 /* check for supported v8 Crypto instructions */
468 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
469
470 block = cpuid_feature_extract_field(isar5, 4);
471 if (block >= 2)
472 elf_hwcap2 |= HWCAP2_PMULL;
473 if (block >= 1)
474 elf_hwcap2 |= HWCAP2_AES;
475
476 block = cpuid_feature_extract_field(isar5, 8);
477 if (block >= 1)
478 elf_hwcap2 |= HWCAP2_SHA1;
479
480 block = cpuid_feature_extract_field(isar5, 12);
481 if (block >= 1)
482 elf_hwcap2 |= HWCAP2_SHA2;
483
484 block = cpuid_feature_extract_field(isar5, 16);
485 if (block >= 1)
486 elf_hwcap2 |= HWCAP2_CRC32;
487}
488
489static void __init elf_hwcap_fixup(void)
490{
491 unsigned id = read_cpuid_id();
492
493 /*
494 * HWCAP_TLS is available only on 1136 r1p0 and later,
495 * see also kuser_get_tls_init.
496 */
497 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
498 ((id >> 20) & 3) == 0) {
499 elf_hwcap &= ~HWCAP_TLS;
500 return;
501 }
502
503 /* Verify if CPUID scheme is implemented */
504 if ((id & 0x000f0000) != 0x000f0000)
505 return;
506
507 /*
508 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
509 * avoid advertising SWP; it may not be atomic with
510 * multiprocessing cores.
511 */
512 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
513 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
514 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
515 elf_hwcap &= ~HWCAP_SWP;
516}
517
518/*
519 * cpu_init - initialise one CPU.
520 *
521 * cpu_init sets up the per-CPU stacks.
522 */
523void notrace cpu_init(void)
524{
525#ifndef CONFIG_CPU_V7M
526 unsigned int cpu = smp_processor_id();
527 struct stack *stk = &stacks[cpu];
528
529 if (cpu >= NR_CPUS) {
530 pr_crit("CPU%u: bad primary CPU number\n", cpu);
531 BUG();
532 }
533
534 /*
535 * This only works on resume and secondary cores. For booting on the
536 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
537 */
538 set_my_cpu_offset(per_cpu_offset(cpu));
539
540 cpu_proc_init();
541
542 /*
543 * Define the placement constraint for the inline asm directive below.
544 * In Thumb-2, msr with an immediate value is not allowed.
545 */
546#ifdef CONFIG_THUMB2_KERNEL
Olivier Deprez0e641232021-09-23 10:07:05 +0200547#define PLC_l "l"
548#define PLC_r "r"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000549#else
Olivier Deprez0e641232021-09-23 10:07:05 +0200550#define PLC_l "I"
551#define PLC_r "I"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000552#endif
553
554 /*
555 * setup stacks for re-entrant exception handlers
556 */
557 __asm__ (
558 "msr cpsr_c, %1\n\t"
559 "add r14, %0, %2\n\t"
560 "mov sp, r14\n\t"
561 "msr cpsr_c, %3\n\t"
562 "add r14, %0, %4\n\t"
563 "mov sp, r14\n\t"
564 "msr cpsr_c, %5\n\t"
565 "add r14, %0, %6\n\t"
566 "mov sp, r14\n\t"
567 "msr cpsr_c, %7\n\t"
568 "add r14, %0, %8\n\t"
569 "mov sp, r14\n\t"
570 "msr cpsr_c, %9"
571 :
572 : "r" (stk),
Olivier Deprez0e641232021-09-23 10:07:05 +0200573 PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000574 "I" (offsetof(struct stack, irq[0])),
Olivier Deprez0e641232021-09-23 10:07:05 +0200575 PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000576 "I" (offsetof(struct stack, abt[0])),
Olivier Deprez0e641232021-09-23 10:07:05 +0200577 PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000578 "I" (offsetof(struct stack, und[0])),
Olivier Deprez0e641232021-09-23 10:07:05 +0200579 PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000580 "I" (offsetof(struct stack, fiq[0])),
Olivier Deprez0e641232021-09-23 10:07:05 +0200581 PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000582 : "r14");
583#endif
584}
585
586u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
587
588void __init smp_setup_processor_id(void)
589{
590 int i;
591 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
592 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
593
594 cpu_logical_map(0) = cpu;
595 for (i = 1; i < nr_cpu_ids; ++i)
596 cpu_logical_map(i) = i == cpu ? 0 : i;
597
598 /*
599 * clear __my_cpu_offset on boot CPU to avoid hang caused by
600 * using percpu variable early, for example, lockdep will
601 * access percpu variable inside lock_release
602 */
603 set_my_cpu_offset(0);
604
605 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
606}
607
608struct mpidr_hash mpidr_hash;
609#ifdef CONFIG_SMP
610/**
611 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
612 * level in order to build a linear index from an
613 * MPIDR value. Resulting algorithm is a collision
614 * free hash carried out through shifting and ORing
615 */
616static void __init smp_build_mpidr_hash(void)
617{
618 u32 i, affinity;
619 u32 fs[3], bits[3], ls, mask = 0;
620 /*
621 * Pre-scan the list of MPIDRS and filter out bits that do
622 * not contribute to affinity levels, ie they never toggle.
623 */
624 for_each_possible_cpu(i)
625 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
626 pr_debug("mask of set bits 0x%x\n", mask);
627 /*
628 * Find and stash the last and first bit set at all affinity levels to
629 * check how many bits are required to represent them.
630 */
631 for (i = 0; i < 3; i++) {
632 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
633 /*
634 * Find the MSB bit and LSB bits position
635 * to determine how many bits are required
636 * to express the affinity level.
637 */
638 ls = fls(affinity);
639 fs[i] = affinity ? ffs(affinity) - 1 : 0;
640 bits[i] = ls - fs[i];
641 }
642 /*
643 * An index can be created from the MPIDR by isolating the
644 * significant bits at each affinity level and by shifting
645 * them in order to compress the 24 bits values space to a
646 * compressed set of values. This is equivalent to hashing
647 * the MPIDR through shifting and ORing. It is a collision free
648 * hash though not minimal since some levels might contain a number
649 * of CPUs that is not an exact power of 2 and their bit
650 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
651 */
652 mpidr_hash.shift_aff[0] = fs[0];
653 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
654 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
655 (bits[1] + bits[0]);
656 mpidr_hash.mask = mask;
657 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
658 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
659 mpidr_hash.shift_aff[0],
660 mpidr_hash.shift_aff[1],
661 mpidr_hash.shift_aff[2],
662 mpidr_hash.mask,
663 mpidr_hash.bits);
664 /*
665 * 4x is an arbitrary value used to warn on a hash table much bigger
666 * than expected on most systems.
667 */
668 if (mpidr_hash_size() > 4 * num_possible_cpus())
669 pr_warn("Large number of MPIDR hash buckets detected\n");
670 sync_cache_w(&mpidr_hash);
671}
672#endif
673
David Brazdil0f672f62019-12-10 10:32:29 +0000674/*
675 * locate processor in the list of supported processor types. The linker
676 * builds this table for us from the entries in arch/arm/mm/proc-*.S
677 */
678struct proc_info_list *lookup_processor(u32 midr)
679{
680 struct proc_info_list *list = lookup_processor_type(midr);
681
682 if (!list) {
683 pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
684 smp_processor_id(), midr);
685 while (1)
686 /* can't use cpu_relax() here as it may require MMU setup */;
687 }
688
689 return list;
690}
691
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000692static void __init setup_processor(void)
693{
David Brazdil0f672f62019-12-10 10:32:29 +0000694 unsigned int midr = read_cpuid_id();
695 struct proc_info_list *list = lookup_processor(midr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000696
697 cpu_name = list->cpu_name;
698 __cpu_architecture = __get_cpu_architecture();
699
David Brazdil0f672f62019-12-10 10:32:29 +0000700 init_proc_vtable(list->proc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000701#ifdef MULTI_TLB
702 cpu_tlb = *list->tlb;
703#endif
704#ifdef MULTI_USER
705 cpu_user = *list->user;
706#endif
707#ifdef MULTI_CACHE
708 cpu_cache = *list->cache;
709#endif
710
711 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
David Brazdil0f672f62019-12-10 10:32:29 +0000712 list->cpu_name, midr, midr & 15,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000713 proc_arch[cpu_architecture()], get_cr());
714
715 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
716 list->arch_name, ENDIANNESS);
717 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
718 list->elf_name, ENDIANNESS);
719 elf_hwcap = list->elf_hwcap;
720
721 cpuid_init_hwcaps();
722 patch_aeabi_idiv();
723
724#ifndef CONFIG_ARM_THUMB
725 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
726#endif
727#ifdef CONFIG_MMU
728 init_default_cache_policy(list->__cpu_mm_mmu_flags);
729#endif
730 erratum_a15_798181_init();
731
732 elf_hwcap_fixup();
733
734 cacheid_init();
735 cpu_init();
736}
737
738void __init dump_machine_table(void)
739{
740 const struct machine_desc *p;
741
742 early_print("Available machine support:\n\nID (hex)\tNAME\n");
743 for_each_machine_desc(p)
744 early_print("%08x\t%s\n", p->nr, p->name);
745
746 early_print("\nPlease check your kernel config and/or bootloader.\n");
747
748 while (true)
749 /* can't use cpu_relax() here as it may require MMU setup */;
750}
751
752int __init arm_add_memory(u64 start, u64 size)
753{
754 u64 aligned_start;
755
756 /*
757 * Ensure that start/size are aligned to a page boundary.
758 * Size is rounded down, start is rounded up.
759 */
760 aligned_start = PAGE_ALIGN(start);
761 if (aligned_start > start + size)
762 size = 0;
763 else
764 size -= aligned_start - start;
765
766#ifndef CONFIG_PHYS_ADDR_T_64BIT
767 if (aligned_start > ULONG_MAX) {
768 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
769 (long long)start);
770 return -EINVAL;
771 }
772
773 if (aligned_start + size > ULONG_MAX) {
774 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
775 (long long)start);
776 /*
777 * To ensure bank->start + bank->size is representable in
778 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
779 * This means we lose a page after masking.
780 */
781 size = ULONG_MAX - aligned_start;
782 }
783#endif
784
785 if (aligned_start < PHYS_OFFSET) {
786 if (aligned_start + size <= PHYS_OFFSET) {
787 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
788 aligned_start, aligned_start + size);
789 return -EINVAL;
790 }
791
792 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
793 aligned_start, (u64)PHYS_OFFSET);
794
795 size -= PHYS_OFFSET - aligned_start;
796 aligned_start = PHYS_OFFSET;
797 }
798
799 start = aligned_start;
800 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
801
802 /*
803 * Check whether this memory region has non-zero size or
804 * invalid node number.
805 */
806 if (size == 0)
807 return -EINVAL;
808
809 memblock_add(start, size);
810 return 0;
811}
812
813/*
814 * Pick out the memory size. We look for mem=size@start,
815 * where start and size are "size[KkMm]"
816 */
817
818static int __init early_mem(char *p)
819{
820 static int usermem __initdata = 0;
821 u64 size;
822 u64 start;
823 char *endp;
824
825 /*
826 * If the user specifies memory size, we
827 * blow away any automatically generated
828 * size.
829 */
830 if (usermem == 0) {
831 usermem = 1;
832 memblock_remove(memblock_start_of_DRAM(),
833 memblock_end_of_DRAM() - memblock_start_of_DRAM());
834 }
835
836 start = PHYS_OFFSET;
837 size = memparse(p, &endp);
838 if (*endp == '@')
839 start = memparse(endp + 1, NULL);
840
841 arm_add_memory(start, size);
842
843 return 0;
844}
845early_param("mem", early_mem);
846
847static void __init request_standard_resources(const struct machine_desc *mdesc)
848{
Olivier Deprez157378f2022-04-04 15:47:50 +0200849 phys_addr_t start, end, res_end;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000850 struct resource *res;
Olivier Deprez157378f2022-04-04 15:47:50 +0200851 u64 i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000852
853 kernel_code.start = virt_to_phys(_text);
854 kernel_code.end = virt_to_phys(__init_begin - 1);
855 kernel_data.start = virt_to_phys(_sdata);
856 kernel_data.end = virt_to_phys(_end - 1);
857
Olivier Deprez157378f2022-04-04 15:47:50 +0200858 for_each_mem_range(i, &start, &end) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000859 unsigned long boot_alias_start;
860
861 /*
Olivier Deprez157378f2022-04-04 15:47:50 +0200862 * In memblock, end points to the first byte after the
863 * range while in resourses, end points to the last byte in
864 * the range.
865 */
866 res_end = end - 1;
867
868 /*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000869 * Some systems have a special memory alias which is only
870 * used for booting. We need to advertise this region to
871 * kexec-tools so they know where bootable RAM is located.
872 */
873 boot_alias_start = phys_to_idmap(start);
874 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
David Brazdil0f672f62019-12-10 10:32:29 +0000875 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
876 if (!res)
877 panic("%s: Failed to allocate %zu bytes\n",
878 __func__, sizeof(*res));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000879 res->name = "System RAM (boot alias)";
880 res->start = boot_alias_start;
Olivier Deprez157378f2022-04-04 15:47:50 +0200881 res->end = phys_to_idmap(res_end);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000882 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
883 request_resource(&iomem_resource, res);
884 }
885
David Brazdil0f672f62019-12-10 10:32:29 +0000886 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
887 if (!res)
888 panic("%s: Failed to allocate %zu bytes\n", __func__,
889 sizeof(*res));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000890 res->name = "System RAM";
891 res->start = start;
Olivier Deprez157378f2022-04-04 15:47:50 +0200892 res->end = res_end;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000893 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
894
895 request_resource(&iomem_resource, res);
896
897 if (kernel_code.start >= res->start &&
898 kernel_code.end <= res->end)
899 request_resource(res, &kernel_code);
900 if (kernel_data.start >= res->start &&
901 kernel_data.end <= res->end)
902 request_resource(res, &kernel_data);
903 }
904
905 if (mdesc->video_start) {
906 video_ram.start = mdesc->video_start;
907 video_ram.end = mdesc->video_end;
908 request_resource(&iomem_resource, &video_ram);
909 }
910
911 /*
912 * Some machines don't have the possibility of ever
913 * possessing lp0, lp1 or lp2
914 */
915 if (mdesc->reserve_lp0)
916 request_resource(&ioport_resource, &lp0);
917 if (mdesc->reserve_lp1)
918 request_resource(&ioport_resource, &lp1);
919 if (mdesc->reserve_lp2)
920 request_resource(&ioport_resource, &lp2);
921}
922
923#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
924 defined(CONFIG_EFI)
925struct screen_info screen_info = {
926 .orig_video_lines = 30,
927 .orig_video_cols = 80,
928 .orig_video_mode = 0,
929 .orig_video_ega_bx = 0,
930 .orig_video_isVGA = 1,
931 .orig_video_points = 8
932};
933#endif
934
935static int __init customize_machine(void)
936{
937 /*
938 * customizes platform devices, or adds new ones
939 * On DT based machines, we fall back to populating the
940 * machine from the device tree, if no callback is provided,
941 * otherwise we would always need an init_machine callback.
942 */
943 if (machine_desc->init_machine)
944 machine_desc->init_machine();
945
946 return 0;
947}
948arch_initcall(customize_machine);
949
950static int __init init_machine_late(void)
951{
952 struct device_node *root;
953 int ret;
954
955 if (machine_desc->init_late)
956 machine_desc->init_late();
957
958 root = of_find_node_by_path("/");
959 if (root) {
960 ret = of_property_read_string(root, "serial-number",
961 &system_serial);
962 if (ret)
963 system_serial = NULL;
964 }
965
966 if (!system_serial)
967 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
968 system_serial_high,
969 system_serial_low);
970
971 return 0;
972}
973late_initcall(init_machine_late);
974
975#ifdef CONFIG_KEXEC
976/*
977 * The crash region must be aligned to 128MB to avoid
978 * zImage relocating below the reserved region.
979 */
980#define CRASH_ALIGN (128 << 20)
981
982static inline unsigned long long get_total_mem(void)
983{
984 unsigned long total;
985
986 total = max_low_pfn - min_low_pfn;
987 return total << PAGE_SHIFT;
988}
989
990/**
991 * reserve_crashkernel() - reserves memory are for crash kernel
992 *
993 * This function reserves memory area given in "crashkernel=" kernel command
994 * line parameter. The memory reserved is used by a dump capture kernel when
995 * primary kernel is crashing.
996 */
997static void __init reserve_crashkernel(void)
998{
999 unsigned long long crash_size, crash_base;
1000 unsigned long long total_mem;
1001 int ret;
1002
1003 total_mem = get_total_mem();
1004 ret = parse_crashkernel(boot_command_line, total_mem,
1005 &crash_size, &crash_base);
1006 if (ret)
1007 return;
1008
1009 if (crash_base <= 0) {
1010 unsigned long long crash_max = idmap_to_phys((u32)~0);
1011 unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
1012 if (crash_max > lowmem_max)
1013 crash_max = lowmem_max;
1014 crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
1015 crash_size, CRASH_ALIGN);
1016 if (!crash_base) {
1017 pr_err("crashkernel reservation failed - No suitable area found.\n");
1018 return;
1019 }
1020 } else {
1021 unsigned long long start;
1022
1023 start = memblock_find_in_range(crash_base,
1024 crash_base + crash_size,
1025 crash_size, SECTION_SIZE);
1026 if (start != crash_base) {
1027 pr_err("crashkernel reservation failed - memory is in use.\n");
1028 return;
1029 }
1030 }
1031
1032 ret = memblock_reserve(crash_base, crash_size);
1033 if (ret < 0) {
1034 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
1035 (unsigned long)crash_base);
1036 return;
1037 }
1038
1039 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1040 (unsigned long)(crash_size >> 20),
1041 (unsigned long)(crash_base >> 20),
1042 (unsigned long)(total_mem >> 20));
1043
1044 /* The crashk resource must always be located in normal mem */
1045 crashk_res.start = crash_base;
1046 crashk_res.end = crash_base + crash_size - 1;
1047 insert_resource(&iomem_resource, &crashk_res);
1048
1049 if (arm_has_idmap_alias()) {
1050 /*
1051 * If we have a special RAM alias for use at boot, we
1052 * need to advertise to kexec tools where the alias is.
1053 */
1054 static struct resource crashk_boot_res = {
1055 .name = "Crash kernel (boot alias)",
1056 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1057 };
1058
1059 crashk_boot_res.start = phys_to_idmap(crash_base);
1060 crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1061 insert_resource(&iomem_resource, &crashk_boot_res);
1062 }
1063}
1064#else
1065static inline void reserve_crashkernel(void) {}
1066#endif /* CONFIG_KEXEC */
1067
1068void __init hyp_mode_check(void)
1069{
1070#ifdef CONFIG_ARM_VIRT_EXT
1071 sync_boot_mode();
1072
1073 if (is_hyp_mode_available()) {
1074 pr_info("CPU: All CPU(s) started in HYP mode.\n");
1075 pr_info("CPU: Virtualization extensions available.\n");
1076 } else if (is_hyp_mode_mismatched()) {
1077 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1078 __boot_cpu_mode & MODE_MASK);
1079 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1080 } else
1081 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1082#endif
1083}
1084
1085void __init setup_arch(char **cmdline_p)
1086{
Olivier Deprez0e641232021-09-23 10:07:05 +02001087 const struct machine_desc *mdesc = NULL;
1088 void *atags_vaddr = NULL;
1089
1090 if (__atags_pointer)
1091 atags_vaddr = FDT_VIRT_BASE(__atags_pointer);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001092
1093 setup_processor();
Olivier Deprez0e641232021-09-23 10:07:05 +02001094 if (atags_vaddr) {
1095 mdesc = setup_machine_fdt(atags_vaddr);
1096 if (mdesc)
1097 memblock_reserve(__atags_pointer,
1098 fdt_totalsize(atags_vaddr));
1099 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001100 if (!mdesc)
Olivier Deprez0e641232021-09-23 10:07:05 +02001101 mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001102 if (!mdesc) {
1103 early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1104 early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1105 __atags_pointer);
1106 if (__atags_pointer)
Olivier Deprez0e641232021-09-23 10:07:05 +02001107 early_print(" r2[]=%*ph\n", 16, atags_vaddr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001108 dump_machine_table();
1109 }
1110
1111 machine_desc = mdesc;
1112 machine_name = mdesc->name;
1113 dump_stack_set_arch_desc("%s", mdesc->name);
1114
1115 if (mdesc->reboot_mode != REBOOT_HARD)
1116 reboot_mode = mdesc->reboot_mode;
1117
1118 init_mm.start_code = (unsigned long) _text;
1119 init_mm.end_code = (unsigned long) _etext;
1120 init_mm.end_data = (unsigned long) _edata;
1121 init_mm.brk = (unsigned long) _end;
1122
1123 /* populate cmd_line too for later use, preserving boot_command_line */
1124 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1125 *cmdline_p = cmd_line;
1126
1127 early_fixmap_init();
1128 early_ioremap_init();
1129
1130 parse_early_param();
1131
1132#ifdef CONFIG_MMU
1133 early_mm_init(mdesc);
1134#endif
1135 setup_dma_zone(mdesc);
1136 xen_early_init();
1137 efi_init();
1138 /*
1139 * Make sure the calculation for lowmem/highmem is set appropriately
1140 * before reserving/allocating any mmeory
1141 */
1142 adjust_lowmem_bounds();
1143 arm_memblock_init(mdesc);
1144 /* Memory may have been removed so recalculate the bounds. */
1145 adjust_lowmem_bounds();
1146
1147 early_ioremap_reset();
1148
1149 paging_init(mdesc);
1150 request_standard_resources(mdesc);
1151
1152 if (mdesc->restart)
1153 arm_pm_restart = mdesc->restart;
1154
1155 unflatten_device_tree();
1156
1157 arm_dt_init_cpu_maps();
1158 psci_dt_init();
1159#ifdef CONFIG_SMP
1160 if (is_smp()) {
1161 if (!mdesc->smp_init || !mdesc->smp_init()) {
1162 if (psci_smp_available())
1163 smp_set_ops(&psci_smp_ops);
1164 else if (mdesc->smp)
1165 smp_set_ops(mdesc->smp);
1166 }
1167 smp_init_cpus();
1168 smp_build_mpidr_hash();
1169 }
1170#endif
1171
1172 if (!is_smp())
1173 hyp_mode_check();
1174
1175 reserve_crashkernel();
1176
1177#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
1178 handle_arch_irq = mdesc->handle_irq;
1179#endif
1180
1181#ifdef CONFIG_VT
1182#if defined(CONFIG_VGA_CONSOLE)
1183 conswitchp = &vga_con;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001184#endif
1185#endif
1186
1187 if (mdesc->init_early)
1188 mdesc->init_early();
1189}
1190
1191
1192static int __init topology_init(void)
1193{
1194 int cpu;
1195
1196 for_each_possible_cpu(cpu) {
1197 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1198 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1199 register_cpu(&cpuinfo->cpu, cpu);
1200 }
1201
1202 return 0;
1203}
1204subsys_initcall(topology_init);
1205
1206#ifdef CONFIG_HAVE_PROC_CPU
1207static int __init proc_cpu_init(void)
1208{
1209 struct proc_dir_entry *res;
1210
1211 res = proc_mkdir("cpu", NULL);
1212 if (!res)
1213 return -ENOMEM;
1214 return 0;
1215}
1216fs_initcall(proc_cpu_init);
1217#endif
1218
1219static const char *hwcap_str[] = {
1220 "swp",
1221 "half",
1222 "thumb",
1223 "26bit",
1224 "fastmult",
1225 "fpa",
1226 "vfp",
1227 "edsp",
1228 "java",
1229 "iwmmxt",
1230 "crunch",
1231 "thumbee",
1232 "neon",
1233 "vfpv3",
1234 "vfpv3d16",
1235 "tls",
1236 "vfpv4",
1237 "idiva",
1238 "idivt",
1239 "vfpd32",
1240 "lpae",
1241 "evtstrm",
1242 NULL
1243};
1244
1245static const char *hwcap2_str[] = {
1246 "aes",
1247 "pmull",
1248 "sha1",
1249 "sha2",
1250 "crc32",
1251 NULL
1252};
1253
1254static int c_show(struct seq_file *m, void *v)
1255{
1256 int i, j;
1257 u32 cpuid;
1258
1259 for_each_online_cpu(i) {
1260 /*
1261 * glibc reads /proc/cpuinfo to determine the number of
1262 * online processors, looking for lines beginning with
1263 * "processor". Give glibc what it expects.
1264 */
1265 seq_printf(m, "processor\t: %d\n", i);
1266 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1267 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1268 cpu_name, cpuid & 15, elf_platform);
1269
1270#if defined(CONFIG_SMP)
1271 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1272 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1273 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1274#else
1275 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1276 loops_per_jiffy / (500000/HZ),
1277 (loops_per_jiffy / (5000/HZ)) % 100);
1278#endif
1279 /* dump out the processor features */
1280 seq_puts(m, "Features\t: ");
1281
1282 for (j = 0; hwcap_str[j]; j++)
1283 if (elf_hwcap & (1 << j))
1284 seq_printf(m, "%s ", hwcap_str[j]);
1285
1286 for (j = 0; hwcap2_str[j]; j++)
1287 if (elf_hwcap2 & (1 << j))
1288 seq_printf(m, "%s ", hwcap2_str[j]);
1289
1290 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1291 seq_printf(m, "CPU architecture: %s\n",
1292 proc_arch[cpu_architecture()]);
1293
1294 if ((cpuid & 0x0008f000) == 0x00000000) {
1295 /* pre-ARM7 */
1296 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1297 } else {
1298 if ((cpuid & 0x0008f000) == 0x00007000) {
1299 /* ARM7 */
1300 seq_printf(m, "CPU variant\t: 0x%02x\n",
1301 (cpuid >> 16) & 127);
1302 } else {
1303 /* post-ARM7 */
1304 seq_printf(m, "CPU variant\t: 0x%x\n",
1305 (cpuid >> 20) & 15);
1306 }
1307 seq_printf(m, "CPU part\t: 0x%03x\n",
1308 (cpuid >> 4) & 0xfff);
1309 }
1310 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1311 }
1312
1313 seq_printf(m, "Hardware\t: %s\n", machine_name);
1314 seq_printf(m, "Revision\t: %04x\n", system_rev);
1315 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1316
1317 return 0;
1318}
1319
1320static void *c_start(struct seq_file *m, loff_t *pos)
1321{
1322 return *pos < 1 ? (void *)1 : NULL;
1323}
1324
1325static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1326{
1327 ++*pos;
1328 return NULL;
1329}
1330
1331static void c_stop(struct seq_file *m, void *v)
1332{
1333}
1334
1335const struct seq_operations cpuinfo_op = {
1336 .start = c_start,
1337 .next = c_next,
1338 .stop = c_stop,
1339 .show = c_show
1340};