blob: 4d164e865b392eb271bd612fa5b6d11a2c5320e4 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * bpf_jit64.h: BPF JIT compiler for PPC64
4 *
5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6 * IBM Corporation
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8#ifndef _BPF_JIT64_H
9#define _BPF_JIT64_H
10
11#include "bpf_jit.h"
12
13/*
14 * Stack layout:
15 * Ensure the top half (upto local_tmp_var) stays consistent
16 * with our redzone usage.
17 *
18 * [ prev sp ] <-------------
Olivier Deprez157378f2022-04-04 15:47:50 +020019 * [ nv gpr save area ] 5*8 |
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000020 * [ tail_call_cnt ] 8 |
Olivier Deprez157378f2022-04-04 15:47:50 +020021 * [ local_tmp_var ] 16 |
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000022 * fp (r31) --> [ ebpf stack space ] upto 512 |
23 * [ frame header ] 32/112 |
24 * sp (r1) ---> [ stack pointer ] --------------
25 */
26
27/* for gpr non volatile registers BPG_REG_6 to 10 */
Olivier Deprez157378f2022-04-04 15:47:50 +020028#define BPF_PPC_STACK_SAVE (5*8)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000029/* for bpf JIT code internal usage */
Olivier Deprez157378f2022-04-04 15:47:50 +020030#define BPF_PPC_STACK_LOCALS 24
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000031/* stack frame excluding BPF stack, ensure this is quadword aligned */
32#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
33 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
34
35#ifndef __ASSEMBLY__
36
37/* BPF register usage */
38#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
39#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
40
41/* BPF to ppc register mappings */
42static const int b2p[] = {
43 /* function return value */
44 [BPF_REG_0] = 8,
45 /* function arguments */
46 [BPF_REG_1] = 3,
47 [BPF_REG_2] = 4,
48 [BPF_REG_3] = 5,
49 [BPF_REG_4] = 6,
50 [BPF_REG_5] = 7,
51 /* non volatile registers */
52 [BPF_REG_6] = 27,
53 [BPF_REG_7] = 28,
54 [BPF_REG_8] = 29,
55 [BPF_REG_9] = 30,
56 /* frame pointer aka BPF_REG_10 */
57 [BPF_REG_FP] = 31,
58 /* eBPF jit internal registers */
59 [BPF_REG_AX] = 2,
60 [TMP_REG_1] = 9,
61 [TMP_REG_2] = 10
62};
63
64/* PPC NVR range -- update this if we ever use NVRs below r27 */
65#define BPF_PPC_NVR_MIN 27
66
David Brazdil0f672f62019-12-10 10:32:29 +000067/*
68 * WARNING: These can use TMP_REG_2 if the offset is not at word boundary,
69 * so ensure that it isn't in use already.
70 */
71#define PPC_BPF_LL(r, base, i) do { \
72 if ((i) % 4) { \
Olivier Deprez157378f2022-04-04 15:47:50 +020073 EMIT(PPC_RAW_LI(b2p[TMP_REG_2], (i)));\
74 EMIT(PPC_RAW_LDX(r, base, \
75 b2p[TMP_REG_2])); \
David Brazdil0f672f62019-12-10 10:32:29 +000076 } else \
Olivier Deprez157378f2022-04-04 15:47:50 +020077 EMIT(PPC_RAW_LD(r, base, i)); \
David Brazdil0f672f62019-12-10 10:32:29 +000078 } while(0)
79#define PPC_BPF_STL(r, base, i) do { \
80 if ((i) % 4) { \
Olivier Deprez157378f2022-04-04 15:47:50 +020081 EMIT(PPC_RAW_LI(b2p[TMP_REG_2], (i)));\
82 EMIT(PPC_RAW_STDX(r, base, \
83 b2p[TMP_REG_2])); \
David Brazdil0f672f62019-12-10 10:32:29 +000084 } else \
Olivier Deprez157378f2022-04-04 15:47:50 +020085 EMIT(PPC_RAW_STD(r, base, i)); \
David Brazdil0f672f62019-12-10 10:32:29 +000086 } while(0)
Olivier Deprez157378f2022-04-04 15:47:50 +020087#define PPC_BPF_STLU(r, base, i) do { EMIT(PPC_RAW_STDU(r, base, i)); } while(0)
David Brazdil0f672f62019-12-10 10:32:29 +000088
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000089#define SEEN_FUNC 0x1000 /* might call external helpers */
90#define SEEN_STACK 0x2000 /* uses BPF stack */
91#define SEEN_TAILCALL 0x4000 /* uses tail calls */
92
93struct codegen_context {
94 /*
95 * This is used to track register usage as well
96 * as calls to external helpers.
97 * - register usage is tracked with corresponding
98 * bits (r3-r10 and r27-r31)
99 * - rest of the bits can be used to track other
100 * things -- for now, we use bits 16 to 23
101 * encoded in SEEN_* macros above
102 */
103 unsigned int seen;
104 unsigned int idx;
105 unsigned int stack_size;
106};
107
108#endif /* !__ASSEMBLY__ */
109
110#endif