blob: d85c9f608564cf2d6fe0be75020fb63cf282977f [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
David Brazdil0f672f62019-12-10 10:32:29 +00002#include "debug.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003#include "evlist.h"
4#include "evsel.h"
David Brazdil0f672f62019-12-10 10:32:29 +00005#include "target.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006#include "thread_map.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007#include "tests.h"
David Brazdil0f672f62019-12-10 10:32:29 +00008#include "util/mmap.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009
10#include <errno.h>
11#include <signal.h>
David Brazdil0f672f62019-12-10 10:32:29 +000012#include <linux/string.h>
13#include <perf/cpumap.h>
14#include <perf/evlist.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015
16static int exited;
17static int nr_exit;
18
19static void sig_handler(int sig __maybe_unused)
20{
21 exited = 1;
22}
23
24/*
25 * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
26 * we asked by setting its exec_error to this handler.
27 */
28static void workload_exec_failed_signal(int signo __maybe_unused,
29 siginfo_t *info __maybe_unused,
30 void *ucontext __maybe_unused)
31{
32 exited = 1;
33 nr_exit = -1;
34}
35
36/*
37 * This test will start a workload that does nothing then it checks
38 * if the number of exit event reported by the kernel is 1 or not
39 * in order to check the kernel returns correct number of event.
40 */
41int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused)
42{
43 int err = -1;
44 union perf_event *event;
David Brazdil0f672f62019-12-10 10:32:29 +000045 struct evsel *evsel;
46 struct evlist *evlist;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000047 struct target target = {
48 .uid = UINT_MAX,
49 .uses_mmap = true,
50 };
51 const char *argv[] = { "true", NULL };
52 char sbuf[STRERR_BUFSIZE];
David Brazdil0f672f62019-12-10 10:32:29 +000053 struct perf_cpu_map *cpus;
54 struct perf_thread_map *threads;
55 struct mmap *md;
Olivier Deprez0e641232021-09-23 10:07:05 +020056 int retry_count = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000057
58 signal(SIGCHLD, sig_handler);
59
60 evlist = perf_evlist__new_default();
61 if (evlist == NULL) {
62 pr_debug("perf_evlist__new_default\n");
63 return -1;
64 }
65
66 /*
67 * Create maps of threads and cpus to monitor. In this case
68 * we start with all threads and cpus (-1, -1) but then in
69 * perf_evlist__prepare_workload we'll fill in the only thread
70 * we're monitoring, the one forked there.
71 */
David Brazdil0f672f62019-12-10 10:32:29 +000072 cpus = perf_cpu_map__dummy_new();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073 threads = thread_map__new_by_tid(-1);
74 if (!cpus || !threads) {
75 err = -ENOMEM;
76 pr_debug("Not enough memory to create thread/cpu maps\n");
77 goto out_free_maps;
78 }
79
David Brazdil0f672f62019-12-10 10:32:29 +000080 perf_evlist__set_maps(&evlist->core, cpus, threads);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000081
82 cpus = NULL;
83 threads = NULL;
84
85 err = perf_evlist__prepare_workload(evlist, &target, argv, false,
86 workload_exec_failed_signal);
87 if (err < 0) {
88 pr_debug("Couldn't run the workload!\n");
89 goto out_delete_evlist;
90 }
91
David Brazdil0f672f62019-12-10 10:32:29 +000092 evsel = evlist__first(evlist);
93 evsel->core.attr.task = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000094#ifdef __s390x__
David Brazdil0f672f62019-12-10 10:32:29 +000095 evsel->core.attr.sample_freq = 1000000;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000096#else
David Brazdil0f672f62019-12-10 10:32:29 +000097 evsel->core.attr.sample_freq = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000098#endif
David Brazdil0f672f62019-12-10 10:32:29 +000099 evsel->core.attr.inherit = 0;
100 evsel->core.attr.watermark = 0;
101 evsel->core.attr.wakeup_events = 1;
102 evsel->core.attr.exclude_kernel = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000103
David Brazdil0f672f62019-12-10 10:32:29 +0000104 err = evlist__open(evlist);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000105 if (err < 0) {
106 pr_debug("Couldn't open the evlist: %s\n",
107 str_error_r(-err, sbuf, sizeof(sbuf)));
108 goto out_delete_evlist;
109 }
110
David Brazdil0f672f62019-12-10 10:32:29 +0000111 if (evlist__mmap(evlist, 128) < 0) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000112 pr_debug("failed to mmap events: %d (%s)\n", errno,
113 str_error_r(errno, sbuf, sizeof(sbuf)));
Olivier Deprez0e641232021-09-23 10:07:05 +0200114 err = -1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000115 goto out_delete_evlist;
116 }
117
118 perf_evlist__start_workload(evlist);
119
120retry:
121 md = &evlist->mmap[0];
122 if (perf_mmap__read_init(md) < 0)
123 goto out_init;
124
125 while ((event = perf_mmap__read_event(md)) != NULL) {
126 if (event->header.type == PERF_RECORD_EXIT)
127 nr_exit++;
128
129 perf_mmap__consume(md);
130 }
131 perf_mmap__read_done(md);
132
133out_init:
134 if (!exited || !nr_exit) {
David Brazdil0f672f62019-12-10 10:32:29 +0000135 evlist__poll(evlist, -1);
Olivier Deprez0e641232021-09-23 10:07:05 +0200136
137 if (retry_count++ > 1000) {
138 pr_debug("Failed after retrying 1000 times\n");
139 err = -1;
140 goto out_free_maps;
141 }
142
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000143 goto retry;
144 }
145
146 if (nr_exit != 1) {
147 pr_debug("received %d EXIT records\n", nr_exit);
148 err = -1;
149 }
150
151out_free_maps:
David Brazdil0f672f62019-12-10 10:32:29 +0000152 perf_cpu_map__put(cpus);
153 perf_thread_map__put(threads);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000154out_delete_evlist:
David Brazdil0f672f62019-12-10 10:32:29 +0000155 evlist__delete(evlist);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000156 return err;
157}