blob: df011ac3340222236c25a7ad4f17a07cc316c9ee [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2017 - 2018 Intel Corporation. */
3
David Brazdil0f672f62019-12-10 10:32:29 +00004#include <asm/barrier.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005#include <errno.h>
6#include <getopt.h>
7#include <libgen.h>
8#include <linux/bpf.h>
David Brazdil0f672f62019-12-10 10:32:29 +00009#include <linux/compiler.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010#include <linux/if_link.h>
11#include <linux/if_xdp.h>
12#include <linux/if_ether.h>
David Brazdil0f672f62019-12-10 10:32:29 +000013#include <locale.h>
14#include <net/ethernet.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015#include <net/if.h>
David Brazdil0f672f62019-12-10 10:32:29 +000016#include <poll.h>
17#include <pthread.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000018#include <signal.h>
19#include <stdbool.h>
20#include <stdio.h>
21#include <stdlib.h>
22#include <string.h>
David Brazdil0f672f62019-12-10 10:32:29 +000023#include <sys/mman.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000024#include <sys/resource.h>
25#include <sys/socket.h>
David Brazdil0f672f62019-12-10 10:32:29 +000026#include <sys/types.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000027#include <time.h>
28#include <unistd.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000029
David Brazdil0f672f62019-12-10 10:32:29 +000030#include "libbpf.h"
31#include "xsk.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000032#include <bpf/bpf.h>
33
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000034#ifndef SOL_XDP
35#define SOL_XDP 283
36#endif
37
38#ifndef AF_XDP
39#define AF_XDP 44
40#endif
41
42#ifndef PF_XDP
43#define PF_XDP AF_XDP
44#endif
45
David Brazdil0f672f62019-12-10 10:32:29 +000046#define NUM_FRAMES (4 * 1024)
47#define BATCH_SIZE 64
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000048
49#define DEBUG_HEXDUMP 0
David Brazdil0f672f62019-12-10 10:32:29 +000050#define MAX_SOCKS 8
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051
52typedef __u64 u64;
53typedef __u32 u32;
54
55static unsigned long prev_time;
56
57enum benchmark_type {
58 BENCH_RXDROP = 0,
59 BENCH_TXONLY = 1,
60 BENCH_L2FWD = 2,
61};
62
63static enum benchmark_type opt_bench = BENCH_RXDROP;
David Brazdil0f672f62019-12-10 10:32:29 +000064static u32 opt_xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000065static const char *opt_if = "";
66static int opt_ifindex;
67static int opt_queue;
68static int opt_poll;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000069static int opt_interval = 1;
David Brazdil0f672f62019-12-10 10:32:29 +000070static u32 opt_xdp_bind_flags = XDP_USE_NEED_WAKEUP;
71static u32 opt_umem_flags;
72static int opt_unaligned_chunks;
73static int opt_mmap_flags;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000074static u32 opt_xdp_bind_flags;
David Brazdil0f672f62019-12-10 10:32:29 +000075static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
76static int opt_timeout = 1000;
77static bool opt_need_wakeup = true;
78static __u32 prog_id;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000079
David Brazdil0f672f62019-12-10 10:32:29 +000080struct xsk_umem_info {
81 struct xsk_ring_prod fq;
82 struct xsk_ring_cons cq;
83 struct xsk_umem *umem;
84 void *buffer;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000085};
86
David Brazdil0f672f62019-12-10 10:32:29 +000087struct xsk_socket_info {
88 struct xsk_ring_cons rx;
89 struct xsk_ring_prod tx;
90 struct xsk_umem_info *umem;
91 struct xsk_socket *xsk;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000092 unsigned long rx_npkts;
93 unsigned long tx_npkts;
94 unsigned long prev_rx_npkts;
95 unsigned long prev_tx_npkts;
David Brazdil0f672f62019-12-10 10:32:29 +000096 u32 outstanding_tx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000097};
98
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000099static int num_socks;
David Brazdil0f672f62019-12-10 10:32:29 +0000100struct xsk_socket_info *xsks[MAX_SOCKS];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000101
102static unsigned long get_nsecs(void)
103{
104 struct timespec ts;
105
106 clock_gettime(CLOCK_MONOTONIC, &ts);
107 return ts.tv_sec * 1000000000UL + ts.tv_nsec;
108}
109
David Brazdil0f672f62019-12-10 10:32:29 +0000110static void print_benchmark(bool running)
111{
112 const char *bench_str = "INVALID";
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000113
David Brazdil0f672f62019-12-10 10:32:29 +0000114 if (opt_bench == BENCH_RXDROP)
115 bench_str = "rxdrop";
116 else if (opt_bench == BENCH_TXONLY)
117 bench_str = "txonly";
118 else if (opt_bench == BENCH_L2FWD)
119 bench_str = "l2fwd";
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000120
David Brazdil0f672f62019-12-10 10:32:29 +0000121 printf("%s:%d %s ", opt_if, opt_queue, bench_str);
122 if (opt_xdp_flags & XDP_FLAGS_SKB_MODE)
123 printf("xdp-skb ");
124 else if (opt_xdp_flags & XDP_FLAGS_DRV_MODE)
125 printf("xdp-drv ");
126 else
127 printf(" ");
128
129 if (opt_poll)
130 printf("poll() ");
131
132 if (running) {
133 printf("running...");
134 fflush(stdout);
135 }
136}
137
138static void dump_stats(void)
139{
140 unsigned long now = get_nsecs();
141 long dt = now - prev_time;
142 int i;
143
144 prev_time = now;
145
146 for (i = 0; i < num_socks && xsks[i]; i++) {
147 char *fmt = "%-15s %'-11.0f %'-11lu\n";
148 double rx_pps, tx_pps;
149
150 rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) *
151 1000000000. / dt;
152 tx_pps = (xsks[i]->tx_npkts - xsks[i]->prev_tx_npkts) *
153 1000000000. / dt;
154
155 printf("\n sock%d@", i);
156 print_benchmark(false);
157 printf("\n");
158
159 printf("%-15s %-11s %-11s %-11.2f\n", "", "pps", "pkts",
160 dt / 1000000000.);
161 printf(fmt, "rx", rx_pps, xsks[i]->rx_npkts);
162 printf(fmt, "tx", tx_pps, xsks[i]->tx_npkts);
163
164 xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts;
165 xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts;
166 }
167}
168
169static void *poller(void *arg)
170{
171 (void)arg;
172 for (;;) {
173 sleep(opt_interval);
174 dump_stats();
175 }
176
177 return NULL;
178}
179
180static void remove_xdp_program(void)
181{
182 __u32 curr_prog_id = 0;
183
184 if (bpf_get_link_xdp_id(opt_ifindex, &curr_prog_id, opt_xdp_flags)) {
185 printf("bpf_get_link_xdp_id failed\n");
186 exit(EXIT_FAILURE);
187 }
188 if (prog_id == curr_prog_id)
189 bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags);
190 else if (!curr_prog_id)
191 printf("couldn't find a prog id on a given interface\n");
192 else
193 printf("program on interface changed, not removing\n");
194}
195
196static void int_exit(int sig)
197{
198 struct xsk_umem *umem = xsks[0]->umem->umem;
199
200 (void)sig;
201
202 dump_stats();
203 xsk_socket__delete(xsks[0]->xsk);
204 (void)xsk_umem__delete(umem);
205 remove_xdp_program();
206
207 exit(EXIT_SUCCESS);
208}
209
210static void __exit_with_error(int error, const char *file, const char *func,
211 int line)
212{
213 fprintf(stderr, "%s:%s:%i: errno: %d/\"%s\"\n", file, func,
214 line, error, strerror(error));
215 dump_stats();
216 remove_xdp_program();
217 exit(EXIT_FAILURE);
218}
219
220#define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, \
221 __LINE__)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000222
223static const char pkt_data[] =
224 "\x3c\xfd\xfe\x9e\x7f\x71\xec\xb1\xd7\x98\x3a\xc0\x08\x00\x45\x00"
225 "\x00\x2e\x00\x00\x00\x00\x40\x11\x88\x97\x05\x08\x07\x08\xc8\x14"
226 "\x1e\x04\x10\x92\x10\x92\x00\x1a\x6d\xa3\x34\x33\x1f\x69\x40\x6b"
227 "\x54\x59\xb6\x14\x2d\x11\x44\xbf\xaf\xd9\xbe\xaa";
228
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000229static void swap_mac_addresses(void *data)
230{
231 struct ether_header *eth = (struct ether_header *)data;
232 struct ether_addr *src_addr = (struct ether_addr *)&eth->ether_shost;
233 struct ether_addr *dst_addr = (struct ether_addr *)&eth->ether_dhost;
234 struct ether_addr tmp;
235
236 tmp = *src_addr;
237 *src_addr = *dst_addr;
238 *dst_addr = tmp;
239}
240
241static void hex_dump(void *pkt, size_t length, u64 addr)
242{
243 const unsigned char *address = (unsigned char *)pkt;
244 const unsigned char *line = address;
245 size_t line_size = 32;
246 unsigned char c;
247 char buf[32];
248 int i = 0;
249
250 if (!DEBUG_HEXDUMP)
251 return;
252
253 sprintf(buf, "addr=%llu", addr);
254 printf("length = %zu\n", length);
255 printf("%s | ", buf);
256 while (length-- > 0) {
257 printf("%02X ", *address++);
258 if (!(++i % line_size) || (length == 0 && i % line_size)) {
259 if (length == 0) {
260 while (i++ % line_size)
261 printf("__ ");
262 }
263 printf(" | "); /* right close */
264 while (line < address) {
265 c = *line++;
266 printf("%c", (c < 33 || c == 255) ? 0x2E : c);
267 }
268 printf("\n");
269 if (length > 0)
270 printf("%s | ", buf);
271 }
272 }
273 printf("\n");
274}
275
David Brazdil0f672f62019-12-10 10:32:29 +0000276static size_t gen_eth_frame(struct xsk_umem_info *umem, u64 addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000277{
David Brazdil0f672f62019-12-10 10:32:29 +0000278 memcpy(xsk_umem__get_data(umem->buffer, addr), pkt_data,
279 sizeof(pkt_data) - 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000280 return sizeof(pkt_data) - 1;
281}
282
David Brazdil0f672f62019-12-10 10:32:29 +0000283static struct xsk_umem_info *xsk_configure_umem(void *buffer, u64 size)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000284{
David Brazdil0f672f62019-12-10 10:32:29 +0000285 struct xsk_umem_info *umem;
286 struct xsk_umem_config cfg = {
287 .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
288 .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
289 .frame_size = opt_xsk_frame_size,
290 .frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM,
291 .flags = opt_umem_flags
292 };
293
294 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000295
296 umem = calloc(1, sizeof(*umem));
David Brazdil0f672f62019-12-10 10:32:29 +0000297 if (!umem)
298 exit_with_error(errno);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000299
David Brazdil0f672f62019-12-10 10:32:29 +0000300 ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq,
301 &cfg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000302
David Brazdil0f672f62019-12-10 10:32:29 +0000303 if (ret)
304 exit_with_error(-ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000305
David Brazdil0f672f62019-12-10 10:32:29 +0000306 umem->buffer = buffer;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000307 return umem;
308}
309
David Brazdil0f672f62019-12-10 10:32:29 +0000310static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000311{
David Brazdil0f672f62019-12-10 10:32:29 +0000312 struct xsk_socket_config cfg;
313 struct xsk_socket_info *xsk;
314 int ret;
315 u32 idx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000316 int i;
317
David Brazdil0f672f62019-12-10 10:32:29 +0000318 xsk = calloc(1, sizeof(*xsk));
319 if (!xsk)
320 exit_with_error(errno);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000321
David Brazdil0f672f62019-12-10 10:32:29 +0000322 xsk->umem = umem;
323 cfg.rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
324 cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
325 cfg.libbpf_flags = 0;
326 cfg.xdp_flags = opt_xdp_flags;
327 cfg.bind_flags = opt_xdp_bind_flags;
328 ret = xsk_socket__create(&xsk->xsk, opt_if, opt_queue, umem->umem,
329 &xsk->rx, &xsk->tx, &cfg);
330 if (ret)
331 exit_with_error(-ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000332
David Brazdil0f672f62019-12-10 10:32:29 +0000333 ret = bpf_get_link_xdp_id(opt_ifindex, &prog_id, opt_xdp_flags);
334 if (ret)
335 exit_with_error(-ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000336
David Brazdil0f672f62019-12-10 10:32:29 +0000337 ret = xsk_ring_prod__reserve(&xsk->umem->fq,
338 XSK_RING_PROD__DEFAULT_NUM_DESCS,
339 &idx);
340 if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS)
341 exit_with_error(-ret);
342 for (i = 0; i < XSK_RING_PROD__DEFAULT_NUM_DESCS; i++)
343 *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx++) =
344 i * opt_xsk_frame_size;
345 xsk_ring_prod__submit(&xsk->umem->fq,
346 XSK_RING_PROD__DEFAULT_NUM_DESCS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000347
David Brazdil0f672f62019-12-10 10:32:29 +0000348 return xsk;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000349}
350
351static struct option long_options[] = {
352 {"rxdrop", no_argument, 0, 'r'},
353 {"txonly", no_argument, 0, 't'},
354 {"l2fwd", no_argument, 0, 'l'},
355 {"interface", required_argument, 0, 'i'},
356 {"queue", required_argument, 0, 'q'},
357 {"poll", no_argument, 0, 'p'},
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000358 {"xdp-skb", no_argument, 0, 'S'},
359 {"xdp-native", no_argument, 0, 'N'},
360 {"interval", required_argument, 0, 'n'},
David Brazdil0f672f62019-12-10 10:32:29 +0000361 {"zero-copy", no_argument, 0, 'z'},
362 {"copy", no_argument, 0, 'c'},
363 {"frame-size", required_argument, 0, 'f'},
364 {"no-need-wakeup", no_argument, 0, 'm'},
365 {"unaligned", no_argument, 0, 'u'},
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000366 {0, 0, 0, 0}
367};
368
369static void usage(const char *prog)
370{
371 const char *str =
372 " Usage: %s [OPTIONS]\n"
373 " Options:\n"
374 " -r, --rxdrop Discard all incoming packets (default)\n"
375 " -t, --txonly Only send packets\n"
376 " -l, --l2fwd MAC swap L2 forwarding\n"
377 " -i, --interface=n Run on interface n\n"
378 " -q, --queue=n Use queue n (default 0)\n"
379 " -p, --poll Use poll syscall\n"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000380 " -S, --xdp-skb=n Use XDP skb-mod\n"
381 " -N, --xdp-native=n Enfore XDP native mode\n"
382 " -n, --interval=n Specify statistics update interval (default 1 sec).\n"
David Brazdil0f672f62019-12-10 10:32:29 +0000383 " -z, --zero-copy Force zero-copy mode.\n"
384 " -c, --copy Force copy mode.\n"
385 " -f, --frame-size=n Set the frame size (must be a power of two, default is %d).\n"
386 " -m, --no-need-wakeup Turn off use of driver need wakeup flag.\n"
387 " -f, --frame-size=n Set the frame size (must be a power of two in aligned mode, default is %d).\n"
388 " -u, --unaligned Enable unaligned chunk placement\n"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000389 "\n";
David Brazdil0f672f62019-12-10 10:32:29 +0000390 fprintf(stderr, str, prog, XSK_UMEM__DEFAULT_FRAME_SIZE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000391 exit(EXIT_FAILURE);
392}
393
394static void parse_command_line(int argc, char **argv)
395{
396 int option_index, c;
397
398 opterr = 0;
399
400 for (;;) {
David Brazdil0f672f62019-12-10 10:32:29 +0000401 c = getopt_long(argc, argv, "Frtli:q:psSNn:czf:mu",
402 long_options, &option_index);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000403 if (c == -1)
404 break;
405
406 switch (c) {
407 case 'r':
408 opt_bench = BENCH_RXDROP;
409 break;
410 case 't':
411 opt_bench = BENCH_TXONLY;
412 break;
413 case 'l':
414 opt_bench = BENCH_L2FWD;
415 break;
416 case 'i':
417 opt_if = optarg;
418 break;
419 case 'q':
420 opt_queue = atoi(optarg);
421 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000422 case 'p':
423 opt_poll = 1;
424 break;
425 case 'S':
426 opt_xdp_flags |= XDP_FLAGS_SKB_MODE;
427 opt_xdp_bind_flags |= XDP_COPY;
428 break;
429 case 'N':
430 opt_xdp_flags |= XDP_FLAGS_DRV_MODE;
431 break;
432 case 'n':
433 opt_interval = atoi(optarg);
434 break;
David Brazdil0f672f62019-12-10 10:32:29 +0000435 case 'z':
436 opt_xdp_bind_flags |= XDP_ZEROCOPY;
437 break;
438 case 'c':
439 opt_xdp_bind_flags |= XDP_COPY;
440 break;
441 case 'u':
442 opt_umem_flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG;
443 opt_unaligned_chunks = 1;
444 opt_mmap_flags = MAP_HUGETLB;
445 break;
446 case 'F':
447 opt_xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
448 break;
449 case 'f':
450 opt_xsk_frame_size = atoi(optarg);
451 case 'm':
452 opt_need_wakeup = false;
453 opt_xdp_bind_flags &= ~XDP_USE_NEED_WAKEUP;
454 break;
455
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000456 default:
457 usage(basename(argv[0]));
458 }
459 }
460
461 opt_ifindex = if_nametoindex(opt_if);
462 if (!opt_ifindex) {
463 fprintf(stderr, "ERROR: interface \"%s\" does not exist\n",
464 opt_if);
465 usage(basename(argv[0]));
466 }
David Brazdil0f672f62019-12-10 10:32:29 +0000467
468 if ((opt_xsk_frame_size & (opt_xsk_frame_size - 1)) &&
469 !opt_unaligned_chunks) {
470 fprintf(stderr, "--frame-size=%d is not a power of two\n",
471 opt_xsk_frame_size);
472 usage(basename(argv[0]));
473 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000474}
475
David Brazdil0f672f62019-12-10 10:32:29 +0000476static void kick_tx(struct xsk_socket_info *xsk)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000477{
478 int ret;
479
David Brazdil0f672f62019-12-10 10:32:29 +0000480 ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000481 if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY)
482 return;
David Brazdil0f672f62019-12-10 10:32:29 +0000483 exit_with_error(errno);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000484}
485
David Brazdil0f672f62019-12-10 10:32:29 +0000486static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk,
487 struct pollfd *fds)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000488{
David Brazdil0f672f62019-12-10 10:32:29 +0000489 struct xsk_umem_info *umem = xsk->umem;
490 u32 idx_cq = 0, idx_fq = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000491 unsigned int rcvd;
492 size_t ndescs;
493
494 if (!xsk->outstanding_tx)
495 return;
496
David Brazdil0f672f62019-12-10 10:32:29 +0000497 if (!opt_need_wakeup || xsk_ring_prod__needs_wakeup(&xsk->tx))
498 kick_tx(xsk);
499
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000500 ndescs = (xsk->outstanding_tx > BATCH_SIZE) ? BATCH_SIZE :
David Brazdil0f672f62019-12-10 10:32:29 +0000501 xsk->outstanding_tx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000502
503 /* re-add completed Tx buffers */
David Brazdil0f672f62019-12-10 10:32:29 +0000504 rcvd = xsk_ring_cons__peek(&umem->cq, ndescs, &idx_cq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000505 if (rcvd > 0) {
David Brazdil0f672f62019-12-10 10:32:29 +0000506 unsigned int i;
507 int ret;
508
509 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
510 while (ret != rcvd) {
511 if (ret < 0)
512 exit_with_error(-ret);
513 if (xsk_ring_prod__needs_wakeup(&umem->fq))
514 ret = poll(fds, num_socks, opt_timeout);
515 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
516 }
517
518 for (i = 0; i < rcvd; i++)
519 *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) =
520 *xsk_ring_cons__comp_addr(&umem->cq, idx_cq++);
521
522 xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
523 xsk_ring_cons__release(&xsk->umem->cq, rcvd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000524 xsk->outstanding_tx -= rcvd;
525 xsk->tx_npkts += rcvd;
526 }
527}
528
David Brazdil0f672f62019-12-10 10:32:29 +0000529static inline void complete_tx_only(struct xsk_socket_info *xsk)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000530{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000531 unsigned int rcvd;
David Brazdil0f672f62019-12-10 10:32:29 +0000532 u32 idx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000533
534 if (!xsk->outstanding_tx)
535 return;
536
David Brazdil0f672f62019-12-10 10:32:29 +0000537 if (!opt_need_wakeup || xsk_ring_prod__needs_wakeup(&xsk->tx))
538 kick_tx(xsk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000539
David Brazdil0f672f62019-12-10 10:32:29 +0000540 rcvd = xsk_ring_cons__peek(&xsk->umem->cq, BATCH_SIZE, &idx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000541 if (rcvd > 0) {
David Brazdil0f672f62019-12-10 10:32:29 +0000542 xsk_ring_cons__release(&xsk->umem->cq, rcvd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000543 xsk->outstanding_tx -= rcvd;
544 xsk->tx_npkts += rcvd;
545 }
546}
547
David Brazdil0f672f62019-12-10 10:32:29 +0000548static void rx_drop(struct xsk_socket_info *xsk, struct pollfd *fds)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000549{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000550 unsigned int rcvd, i;
David Brazdil0f672f62019-12-10 10:32:29 +0000551 u32 idx_rx = 0, idx_fq = 0;
552 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000553
David Brazdil0f672f62019-12-10 10:32:29 +0000554 rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
555 if (!rcvd) {
556 if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))
557 ret = poll(fds, num_socks, opt_timeout);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000558 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000559 }
560
David Brazdil0f672f62019-12-10 10:32:29 +0000561 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
562 while (ret != rcvd) {
563 if (ret < 0)
564 exit_with_error(-ret);
565 if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))
566 ret = poll(fds, num_socks, opt_timeout);
567 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
568 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000569
David Brazdil0f672f62019-12-10 10:32:29 +0000570 for (i = 0; i < rcvd; i++) {
571 u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr;
572 u32 len = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++)->len;
573 u64 orig = xsk_umem__extract_addr(addr);
574
575 addr = xsk_umem__add_offset_to_addr(addr);
576 char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
577
578 hex_dump(pkt, len, addr);
579 *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = orig;
580 }
581
582 xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
583 xsk_ring_cons__release(&xsk->rx, rcvd);
584 xsk->rx_npkts += rcvd;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000585}
586
587static void rx_drop_all(void)
588{
589 struct pollfd fds[MAX_SOCKS + 1];
David Brazdil0f672f62019-12-10 10:32:29 +0000590 int i, ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000591
592 memset(fds, 0, sizeof(fds));
593
594 for (i = 0; i < num_socks; i++) {
David Brazdil0f672f62019-12-10 10:32:29 +0000595 fds[i].fd = xsk_socket__fd(xsks[i]->xsk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000596 fds[i].events = POLLIN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000597 }
598
599 for (;;) {
600 if (opt_poll) {
David Brazdil0f672f62019-12-10 10:32:29 +0000601 ret = poll(fds, num_socks, opt_timeout);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000602 if (ret <= 0)
603 continue;
604 }
605
606 for (i = 0; i < num_socks; i++)
David Brazdil0f672f62019-12-10 10:32:29 +0000607 rx_drop(xsks[i], fds);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000608 }
609}
610
David Brazdil0f672f62019-12-10 10:32:29 +0000611static void tx_only(struct xsk_socket_info *xsk, u32 frame_nb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000612{
David Brazdil0f672f62019-12-10 10:32:29 +0000613 u32 idx;
614
615 if (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) == BATCH_SIZE) {
616 unsigned int i;
617
618 for (i = 0; i < BATCH_SIZE; i++) {
619 xsk_ring_prod__tx_desc(&xsk->tx, idx + i)->addr =
620 (frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT;
621 xsk_ring_prod__tx_desc(&xsk->tx, idx + i)->len =
622 sizeof(pkt_data) - 1;
623 }
624
625 xsk_ring_prod__submit(&xsk->tx, BATCH_SIZE);
626 xsk->outstanding_tx += BATCH_SIZE;
627 frame_nb += BATCH_SIZE;
628 frame_nb %= NUM_FRAMES;
629 }
630
631 complete_tx_only(xsk);
632}
633
634static void tx_only_all(void)
635{
636 struct pollfd fds[MAX_SOCKS];
637 u32 frame_nb[MAX_SOCKS] = {};
638 int i, ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000639
640 memset(fds, 0, sizeof(fds));
David Brazdil0f672f62019-12-10 10:32:29 +0000641 for (i = 0; i < num_socks; i++) {
642 fds[0].fd = xsk_socket__fd(xsks[i]->xsk);
643 fds[0].events = POLLOUT;
644 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000645
646 for (;;) {
647 if (opt_poll) {
David Brazdil0f672f62019-12-10 10:32:29 +0000648 ret = poll(fds, num_socks, opt_timeout);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000649 if (ret <= 0)
650 continue;
651
David Brazdil0f672f62019-12-10 10:32:29 +0000652 if (!(fds[0].revents & POLLOUT))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000653 continue;
654 }
655
David Brazdil0f672f62019-12-10 10:32:29 +0000656 for (i = 0; i < num_socks; i++)
657 tx_only(xsks[i], frame_nb[i]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000658 }
659}
660
David Brazdil0f672f62019-12-10 10:32:29 +0000661static void l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000662{
David Brazdil0f672f62019-12-10 10:32:29 +0000663 unsigned int rcvd, i;
664 u32 idx_rx = 0, idx_tx = 0;
665 int ret;
666
667 complete_tx_l2fwd(xsk, fds);
668
669 rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
670 if (!rcvd) {
671 if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))
672 ret = poll(fds, num_socks, opt_timeout);
673 return;
674 }
675
676 ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
677 while (ret != rcvd) {
678 if (ret < 0)
679 exit_with_error(-ret);
680 if (xsk_ring_prod__needs_wakeup(&xsk->tx))
681 kick_tx(xsk);
682 ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
683 }
684
685 for (i = 0; i < rcvd; i++) {
686 u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr;
687 u32 len = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++)->len;
688 u64 orig = addr;
689
690 addr = xsk_umem__add_offset_to_addr(addr);
691 char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
692
693 swap_mac_addresses(pkt);
694
695 hex_dump(pkt, len, addr);
696 xsk_ring_prod__tx_desc(&xsk->tx, idx_tx)->addr = orig;
697 xsk_ring_prod__tx_desc(&xsk->tx, idx_tx++)->len = len;
698 }
699
700 xsk_ring_prod__submit(&xsk->tx, rcvd);
701 xsk_ring_cons__release(&xsk->rx, rcvd);
702
703 xsk->rx_npkts += rcvd;
704 xsk->outstanding_tx += rcvd;
705}
706
707static void l2fwd_all(void)
708{
709 struct pollfd fds[MAX_SOCKS];
710 int i, ret;
711
712 memset(fds, 0, sizeof(fds));
713
714 for (i = 0; i < num_socks; i++) {
715 fds[i].fd = xsk_socket__fd(xsks[i]->xsk);
716 fds[i].events = POLLOUT | POLLIN;
717 }
718
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000719 for (;;) {
David Brazdil0f672f62019-12-10 10:32:29 +0000720 if (opt_poll) {
721 ret = poll(fds, num_socks, opt_timeout);
722 if (ret <= 0)
723 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000724 }
725
David Brazdil0f672f62019-12-10 10:32:29 +0000726 for (i = 0; i < num_socks; i++)
727 l2fwd(xsks[i], fds);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000728 }
729}
730
731int main(int argc, char **argv)
732{
733 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
David Brazdil0f672f62019-12-10 10:32:29 +0000734 struct xsk_umem_info *umem;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000735 pthread_t pt;
David Brazdil0f672f62019-12-10 10:32:29 +0000736 void *bufs;
737 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000738
739 parse_command_line(argc, argv);
740
741 if (setrlimit(RLIMIT_MEMLOCK, &r)) {
742 fprintf(stderr, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n",
743 strerror(errno));
744 exit(EXIT_FAILURE);
745 }
746
David Brazdil0f672f62019-12-10 10:32:29 +0000747 /* Reserve memory for the umem. Use hugepages if unaligned chunk mode */
748 bufs = mmap(NULL, NUM_FRAMES * opt_xsk_frame_size,
749 PROT_READ | PROT_WRITE,
750 MAP_PRIVATE | MAP_ANONYMOUS | opt_mmap_flags, -1, 0);
751 if (bufs == MAP_FAILED) {
752 printf("ERROR: mmap failed\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000753 exit(EXIT_FAILURE);
754 }
David Brazdil0f672f62019-12-10 10:32:29 +0000755 /* Create sockets... */
756 umem = xsk_configure_umem(bufs, NUM_FRAMES * opt_xsk_frame_size);
757 xsks[num_socks++] = xsk_configure_socket(umem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000758
David Brazdil0f672f62019-12-10 10:32:29 +0000759 if (opt_bench == BENCH_TXONLY) {
760 int i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000761
David Brazdil0f672f62019-12-10 10:32:29 +0000762 for (i = 0; i < NUM_FRAMES; i++)
763 (void)gen_eth_frame(umem, i * opt_xsk_frame_size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000764 }
765
766 signal(SIGINT, int_exit);
767 signal(SIGTERM, int_exit);
768 signal(SIGABRT, int_exit);
769
770 setlocale(LC_ALL, "");
771
772 ret = pthread_create(&pt, NULL, poller, NULL);
David Brazdil0f672f62019-12-10 10:32:29 +0000773 if (ret)
774 exit_with_error(ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000775
776 prev_time = get_nsecs();
777
778 if (opt_bench == BENCH_RXDROP)
779 rx_drop_all();
780 else if (opt_bench == BENCH_TXONLY)
David Brazdil0f672f62019-12-10 10:32:29 +0000781 tx_only_all();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000782 else
David Brazdil0f672f62019-12-10 10:32:29 +0000783 l2fwd_all();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000784
785 return 0;
786}