v4.19.13 snapshot.
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Build b/tools/perf/scripts/python/Perf-Trace-Util/Build
new file mode 100644
index 0000000..aefc15c
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/Build
@@ -0,0 +1,3 @@
+libperf-y += Context.o
+
+CFLAGS_Context.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Context.c b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
new file mode 100644
index 0000000..1a0d277
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
@@ -0,0 +1,116 @@
+/*
+ * Context.c.  Python interfaces for perf script.
+ *
+ * Copyright (C) 2010 Tom Zanussi <tzanussi@gmail.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <Python.h>
+#include "../../../perf.h"
+#include "../../../util/trace-event.h"
+
+#if PY_MAJOR_VERSION < 3
+#define _PyCapsule_GetPointer(arg1, arg2) \
+  PyCObject_AsVoidPtr(arg1)
+
+PyMODINIT_FUNC initperf_trace_context(void);
+#else
+#define _PyCapsule_GetPointer(arg1, arg2) \
+  PyCapsule_GetPointer((arg1), (arg2))
+
+PyMODINIT_FUNC PyInit_perf_trace_context(void);
+#endif
+
+static PyObject *perf_trace_context_common_pc(PyObject *obj, PyObject *args)
+{
+	static struct scripting_context *scripting_context;
+	PyObject *context;
+	int retval;
+
+	if (!PyArg_ParseTuple(args, "O", &context))
+		return NULL;
+
+	scripting_context = _PyCapsule_GetPointer(context, NULL);
+	retval = common_pc(scripting_context);
+
+	return Py_BuildValue("i", retval);
+}
+
+static PyObject *perf_trace_context_common_flags(PyObject *obj,
+						 PyObject *args)
+{
+	static struct scripting_context *scripting_context;
+	PyObject *context;
+	int retval;
+
+	if (!PyArg_ParseTuple(args, "O", &context))
+		return NULL;
+
+	scripting_context = _PyCapsule_GetPointer(context, NULL);
+	retval = common_flags(scripting_context);
+
+	return Py_BuildValue("i", retval);
+}
+
+static PyObject *perf_trace_context_common_lock_depth(PyObject *obj,
+						      PyObject *args)
+{
+	static struct scripting_context *scripting_context;
+	PyObject *context;
+	int retval;
+
+	if (!PyArg_ParseTuple(args, "O", &context))
+		return NULL;
+
+	scripting_context = _PyCapsule_GetPointer(context, NULL);
+	retval = common_lock_depth(scripting_context);
+
+	return Py_BuildValue("i", retval);
+}
+
+static PyMethodDef ContextMethods[] = {
+	{ "common_pc", perf_trace_context_common_pc, METH_VARARGS,
+	  "Get the common preempt count event field value."},
+	{ "common_flags", perf_trace_context_common_flags, METH_VARARGS,
+	  "Get the common flags event field value."},
+	{ "common_lock_depth", perf_trace_context_common_lock_depth,
+	  METH_VARARGS,	"Get the common lock depth event field value."},
+	{ NULL, NULL, 0, NULL}
+};
+
+#if PY_MAJOR_VERSION < 3
+PyMODINIT_FUNC initperf_trace_context(void)
+{
+	(void) Py_InitModule("perf_trace_context", ContextMethods);
+}
+#else
+PyMODINIT_FUNC PyInit_perf_trace_context(void)
+{
+	static struct PyModuleDef moduledef = {
+		PyModuleDef_HEAD_INIT,
+		"perf_trace_context",	/* m_name */
+		"",			/* m_doc */
+		-1,			/* m_size */
+		ContextMethods,		/* m_methods */
+		NULL,			/* m_reload */
+		NULL,			/* m_traverse */
+		NULL,			/* m_clear */
+		NULL,			/* m_free */
+	};
+	return PyModule_Create(&moduledef);
+}
+#endif
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
new file mode 100644
index 0000000..54ace2f
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
@@ -0,0 +1,116 @@
+# Core.py - Python extension for perf script, core functions
+#
+# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
+#
+# This software may be distributed under the terms of the GNU General
+# Public License ("GPL") version 2 as published by the Free Software
+# Foundation.
+
+from collections import defaultdict
+
+def autodict():
+    return defaultdict(autodict)
+
+flag_fields = autodict()
+symbolic_fields = autodict()
+
+def define_flag_field(event_name, field_name, delim):
+    flag_fields[event_name][field_name]['delim'] = delim
+
+def define_flag_value(event_name, field_name, value, field_str):
+    flag_fields[event_name][field_name]['values'][value] = field_str
+
+def define_symbolic_field(event_name, field_name):
+    # nothing to do, really
+    pass
+
+def define_symbolic_value(event_name, field_name, value, field_str):
+    symbolic_fields[event_name][field_name]['values'][value] = field_str
+
+def flag_str(event_name, field_name, value):
+    string = ""
+
+    if flag_fields[event_name][field_name]:
+        print_delim = 0
+        for idx in sorted(flag_fields[event_name][field_name]['values']):
+            if not value and not idx:
+                string += flag_fields[event_name][field_name]['values'][idx]
+                break
+            if idx and (value & idx) == idx:
+                if print_delim and flag_fields[event_name][field_name]['delim']:
+                    string += " " + flag_fields[event_name][field_name]['delim'] + " "
+                string += flag_fields[event_name][field_name]['values'][idx]
+                print_delim = 1
+                value &= ~idx
+
+    return string
+
+def symbol_str(event_name, field_name, value):
+    string = ""
+
+    if symbolic_fields[event_name][field_name]:
+        for idx in sorted(symbolic_fields[event_name][field_name]['values']):
+            if not value and not idx:
+                string = symbolic_fields[event_name][field_name]['values'][idx]
+                break
+            if (value == idx):
+                string = symbolic_fields[event_name][field_name]['values'][idx]
+                break
+
+    return string
+
+trace_flags = { 0x00: "NONE", \
+                    0x01: "IRQS_OFF", \
+                    0x02: "IRQS_NOSUPPORT", \
+                    0x04: "NEED_RESCHED", \
+                    0x08: "HARDIRQ", \
+                    0x10: "SOFTIRQ" }
+
+def trace_flag_str(value):
+    string = ""
+    print_delim = 0
+
+    for idx in trace_flags:
+        if not value and not idx:
+            string += "NONE"
+            break
+
+        if idx and (value & idx) == idx:
+            if print_delim:
+                string += " | ";
+            string += trace_flags[idx]
+            print_delim = 1
+            value &= ~idx
+
+    return string
+
+
+def taskState(state):
+	states = {
+		0 : "R",
+		1 : "S",
+		2 : "D",
+		64: "DEAD"
+	}
+
+	if state not in states:
+		return "Unknown"
+
+	return states[state]
+
+
+class EventHeaders:
+	def __init__(self, common_cpu, common_secs, common_nsecs,
+		     common_pid, common_comm, common_callchain):
+		self.cpu = common_cpu
+		self.secs = common_secs
+		self.nsecs = common_nsecs
+		self.pid = common_pid
+		self.comm = common_comm
+		self.callchain = common_callchain
+
+	def ts(self):
+		return (self.secs * (10 ** 9)) + self.nsecs
+
+	def ts_format(self):
+		return "%d.%d" % (self.secs, int(self.nsecs / 1000))
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
new file mode 100755
index 0000000..21a7a12
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
@@ -0,0 +1,97 @@
+# EventClass.py
+# SPDX-License-Identifier: GPL-2.0
+#
+# This is a library defining some events types classes, which could
+# be used by other scripts to analyzing the perf samples.
+#
+# Currently there are just a few classes defined for examples,
+# PerfEvent is the base class for all perf event sample, PebsEvent
+# is a HW base Intel x86 PEBS event, and user could add more SW/HW
+# event classes based on requirements.
+from __future__ import print_function
+
+import struct
+
+# Event types, user could add more here
+EVTYPE_GENERIC  = 0
+EVTYPE_PEBS     = 1     # Basic PEBS event
+EVTYPE_PEBS_LL  = 2     # PEBS event with load latency info
+EVTYPE_IBS      = 3
+
+#
+# Currently we don't have good way to tell the event type, but by
+# the size of raw buffer, raw PEBS event with load latency data's
+# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
+#
+def create_event(name, comm, dso, symbol, raw_buf):
+        if (len(raw_buf) == 144):
+                event = PebsEvent(name, comm, dso, symbol, raw_buf)
+        elif (len(raw_buf) == 176):
+                event = PebsNHM(name, comm, dso, symbol, raw_buf)
+        else:
+                event = PerfEvent(name, comm, dso, symbol, raw_buf)
+
+        return event
+
+class PerfEvent(object):
+        event_num = 0
+        def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
+                self.name       = name
+                self.comm       = comm
+                self.dso        = dso
+                self.symbol     = symbol
+                self.raw_buf    = raw_buf
+                self.ev_type    = ev_type
+                PerfEvent.event_num += 1
+
+        def show(self):
+                print("PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" %
+                      (self.name, self.symbol, self.comm, self.dso))
+
+#
+# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
+# contains the context info when that event happened: the EFLAGS and
+# linear IP info, as well as all the registers.
+#
+class PebsEvent(PerfEvent):
+        pebs_num = 0
+        def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
+                tmp_buf=raw_buf[0:80]
+                flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
+                self.flags = flags
+                self.ip    = ip
+                self.ax    = ax
+                self.bx    = bx
+                self.cx    = cx
+                self.dx    = dx
+                self.si    = si
+                self.di    = di
+                self.bp    = bp
+                self.sp    = sp
+
+                PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
+                PebsEvent.pebs_num += 1
+                del tmp_buf
+
+#
+# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
+# in the four 64 bit words write after the PEBS data:
+#       Status: records the IA32_PERF_GLOBAL_STATUS register value
+#       DLA:    Data Linear Address (EIP)
+#       DSE:    Data Source Encoding, where the latency happens, hit or miss
+#               in L1/L2/L3 or IO operations
+#       LAT:    the actual latency in cycles
+#
+class PebsNHM(PebsEvent):
+        pebs_nhm_num = 0
+        def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
+                tmp_buf=raw_buf[144:176]
+                status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
+                self.status = status
+                self.dla = dla
+                self.dse = dse
+                self.lat = lat
+
+                PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
+                PebsNHM.pebs_nhm_num += 1
+                del tmp_buf
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
new file mode 100644
index 0000000..cac7b25
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
@@ -0,0 +1,184 @@
+# SchedGui.py - Python extension for perf script, basic GUI code for
+#		traces drawing and overview.
+#
+# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
+#
+# This software is distributed under the terms of the GNU General
+# Public License ("GPL") version 2 as published by the Free Software
+# Foundation.
+
+
+try:
+	import wx
+except ImportError:
+	raise ImportError("You need to install the wxpython lib for this script")
+
+
+class RootFrame(wx.Frame):
+	Y_OFFSET = 100
+	RECT_HEIGHT = 100
+	RECT_SPACE = 50
+	EVENT_MARKING_WIDTH = 5
+
+	def __init__(self, sched_tracer, title, parent = None, id = -1):
+		wx.Frame.__init__(self, parent, id, title)
+
+		(self.screen_width, self.screen_height) = wx.GetDisplaySize()
+		self.screen_width -= 10
+		self.screen_height -= 10
+		self.zoom = 0.5
+		self.scroll_scale = 20
+		self.sched_tracer = sched_tracer
+		self.sched_tracer.set_root_win(self)
+		(self.ts_start, self.ts_end) = sched_tracer.interval()
+		self.update_width_virtual()
+		self.nr_rects = sched_tracer.nr_rectangles() + 1
+		self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
+
+		# whole window panel
+		self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
+
+		# scrollable container
+		self.scroll = wx.ScrolledWindow(self.panel)
+		self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
+		self.scroll.EnableScrolling(True, True)
+		self.scroll.SetFocus()
+
+		# scrollable drawing area
+		self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
+		self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
+		self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
+		self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
+		self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
+		self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
+		self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
+
+		self.scroll.Fit()
+		self.Fit()
+
+		self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
+
+		self.txt = None
+
+		self.Show(True)
+
+	def us_to_px(self, val):
+		return val / (10 ** 3) * self.zoom
+
+	def px_to_us(self, val):
+		return (val / self.zoom) * (10 ** 3)
+
+	def scroll_start(self):
+		(x, y) = self.scroll.GetViewStart()
+		return (x * self.scroll_scale, y * self.scroll_scale)
+
+	def scroll_start_us(self):
+		(x, y) = self.scroll_start()
+		return self.px_to_us(x)
+
+	def paint_rectangle_zone(self, nr, color, top_color, start, end):
+		offset_px = self.us_to_px(start - self.ts_start)
+		width_px = self.us_to_px(end - self.ts_start)
+
+		offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
+		width_py = RootFrame.RECT_HEIGHT
+
+		dc = self.dc
+
+		if top_color is not None:
+			(r, g, b) = top_color
+			top_color = wx.Colour(r, g, b)
+			brush = wx.Brush(top_color, wx.SOLID)
+			dc.SetBrush(brush)
+			dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
+			width_py -= RootFrame.EVENT_MARKING_WIDTH
+			offset_py += RootFrame.EVENT_MARKING_WIDTH
+
+		(r ,g, b) = color
+		color = wx.Colour(r, g, b)
+		brush = wx.Brush(color, wx.SOLID)
+		dc.SetBrush(brush)
+		dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
+
+	def update_rectangles(self, dc, start, end):
+		start += self.ts_start
+		end += self.ts_start
+		self.sched_tracer.fill_zone(start, end)
+
+	def on_paint(self, event):
+		dc = wx.PaintDC(self.scroll_panel)
+		self.dc = dc
+
+		width = min(self.width_virtual, self.screen_width)
+		(x, y) = self.scroll_start()
+		start = self.px_to_us(x)
+		end = self.px_to_us(x + width)
+		self.update_rectangles(dc, start, end)
+
+	def rect_from_ypixel(self, y):
+		y -= RootFrame.Y_OFFSET
+		rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
+		height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
+
+		if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
+			return -1
+
+		return rect
+
+	def update_summary(self, txt):
+		if self.txt:
+			self.txt.Destroy()
+		self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
+
+
+	def on_mouse_down(self, event):
+		(x, y) = event.GetPositionTuple()
+		rect = self.rect_from_ypixel(y)
+		if rect == -1:
+			return
+
+		t = self.px_to_us(x) + self.ts_start
+
+		self.sched_tracer.mouse_down(rect, t)
+
+
+	def update_width_virtual(self):
+		self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
+
+	def __zoom(self, x):
+		self.update_width_virtual()
+		(xpos, ypos) = self.scroll.GetViewStart()
+		xpos = self.us_to_px(x) / self.scroll_scale
+		self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
+		self.Refresh()
+
+	def zoom_in(self):
+		x = self.scroll_start_us()
+		self.zoom *= 2
+		self.__zoom(x)
+
+	def zoom_out(self):
+		x = self.scroll_start_us()
+		self.zoom /= 2
+		self.__zoom(x)
+
+
+	def on_key_press(self, event):
+		key = event.GetRawKeyCode()
+		if key == ord("+"):
+			self.zoom_in()
+			return
+		if key == ord("-"):
+			self.zoom_out()
+			return
+
+		key = event.GetKeyCode()
+		(x, y) = self.scroll.GetViewStart()
+		if key == wx.WXK_RIGHT:
+			self.scroll.Scroll(x + 1, y)
+		elif key == wx.WXK_LEFT:
+			self.scroll.Scroll(x - 1, y)
+		elif key == wx.WXK_DOWN:
+			self.scroll.Scroll(x, y + 1)
+		elif key == wx.WXK_UP:
+			self.scroll.Scroll(x, y - 1)
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
new file mode 100644
index 0000000..7384dcb
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
@@ -0,0 +1,91 @@
+# Util.py - Python extension for perf script, miscellaneous utility code
+#
+# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
+#
+# This software may be distributed under the terms of the GNU General
+# Public License ("GPL") version 2 as published by the Free Software
+# Foundation.
+from __future__ import print_function
+
+import errno, os
+
+FUTEX_WAIT = 0
+FUTEX_WAKE = 1
+FUTEX_PRIVATE_FLAG = 128
+FUTEX_CLOCK_REALTIME = 256
+FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
+
+NSECS_PER_SEC    = 1000000000
+
+def avg(total, n):
+    return total / n
+
+def nsecs(secs, nsecs):
+    return secs * NSECS_PER_SEC + nsecs
+
+def nsecs_secs(nsecs):
+    return nsecs / NSECS_PER_SEC
+
+def nsecs_nsecs(nsecs):
+    return nsecs % NSECS_PER_SEC
+
+def nsecs_str(nsecs):
+    str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
+    return str
+
+def add_stats(dict, key, value):
+	if key not in dict:
+		dict[key] = (value, value, value, 1)
+	else:
+		min, max, avg, count = dict[key]
+		if value < min:
+			min = value
+		if value > max:
+			max = value
+		avg = (avg + value) / 2
+		dict[key] = (min, max, avg, count + 1)
+
+def clear_term():
+    print("\x1b[H\x1b[2J")
+
+audit_package_warned = False
+
+try:
+	import audit
+	machine_to_id = {
+		'x86_64': audit.MACH_86_64,
+		'alpha'	: audit.MACH_ALPHA,
+		'ia64'	: audit.MACH_IA64,
+		'ppc'	: audit.MACH_PPC,
+		'ppc64'	: audit.MACH_PPC64,
+		'ppc64le' : audit.MACH_PPC64LE,
+		's390'	: audit.MACH_S390,
+		's390x'	: audit.MACH_S390X,
+		'i386'	: audit.MACH_X86,
+		'i586'	: audit.MACH_X86,
+		'i686'	: audit.MACH_X86,
+	}
+	try:
+		machine_to_id['armeb'] = audit.MACH_ARMEB
+	except:
+		pass
+	machine_id = machine_to_id[os.uname()[4]]
+except:
+	if not audit_package_warned:
+		audit_package_warned = True
+		print("Install the audit-libs-python package to get syscall names.\n"
+                    "For example:\n  # apt-get install python-audit (Ubuntu)"
+                    "\n  # yum install audit-libs-python (Fedora)"
+                    "\n  etc.\n")
+
+def syscall_name(id):
+	try:
+		return audit.audit_syscall_to_name(id, machine_id)
+	except:
+		return str(id)
+
+def strerror(nr):
+	try:
+		return errno.errorcode[abs(nr)]
+	except:
+		return "Unknown %d errno" % nr
diff --git a/tools/perf/scripts/python/bin/compaction-times-record b/tools/perf/scripts/python/bin/compaction-times-record
new file mode 100644
index 0000000..6edcd40
--- /dev/null
+++ b/tools/perf/scripts/python/bin/compaction-times-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -e compaction:mm_compaction_begin -e compaction:mm_compaction_end -e compaction:mm_compaction_migratepages -e compaction:mm_compaction_isolate_migratepages -e compaction:mm_compaction_isolate_freepages $@
diff --git a/tools/perf/scripts/python/bin/compaction-times-report b/tools/perf/scripts/python/bin/compaction-times-report
new file mode 100644
index 0000000..3dc1389
--- /dev/null
+++ b/tools/perf/scripts/python/bin/compaction-times-report
@@ -0,0 +1,4 @@
+#!/bin/bash
+#description: display time taken by mm compaction
+#args: [-h] [-u] [-p|-pv] [-t | [-m] [-fs] [-ms]] [pid|pid-range|comm-regex]
+perf script -s "$PERF_EXEC_PATH"/scripts/python/compaction-times.py $@
diff --git a/tools/perf/scripts/python/bin/event_analyzing_sample-record b/tools/perf/scripts/python/bin/event_analyzing_sample-record
new file mode 100644
index 0000000..5ce652d
--- /dev/null
+++ b/tools/perf/scripts/python/bin/event_analyzing_sample-record
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+#
+# event_analyzing_sample.py can cover all type of perf samples including
+# the tracepoints, so no special record requirements, just record what
+# you want to analyze.
+#
+perf record $@
diff --git a/tools/perf/scripts/python/bin/event_analyzing_sample-report b/tools/perf/scripts/python/bin/event_analyzing_sample-report
new file mode 100644
index 0000000..0941fc9
--- /dev/null
+++ b/tools/perf/scripts/python/bin/event_analyzing_sample-report
@@ -0,0 +1,3 @@
+#!/bin/bash
+# description: analyze all perf samples
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/event_analyzing_sample.py
diff --git a/tools/perf/scripts/python/bin/export-to-postgresql-record b/tools/perf/scripts/python/bin/export-to-postgresql-record
new file mode 100644
index 0000000..221d66e
--- /dev/null
+++ b/tools/perf/scripts/python/bin/export-to-postgresql-record
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+#
+# export perf data to a postgresql database. Can cover
+# perf ip samples (excluding the tracepoints). No special
+# record requirements, just record what you want to export.
+#
+perf record $@
diff --git a/tools/perf/scripts/python/bin/export-to-postgresql-report b/tools/perf/scripts/python/bin/export-to-postgresql-report
new file mode 100644
index 0000000..cd335b6
--- /dev/null
+++ b/tools/perf/scripts/python/bin/export-to-postgresql-report
@@ -0,0 +1,29 @@
+#!/bin/bash
+# description: export perf data to a postgresql database
+# args: [database name] [columns] [calls]
+n_args=0
+for i in "$@"
+do
+    if expr match "$i" "-" > /dev/null ; then
+	break
+    fi
+    n_args=$(( $n_args + 1 ))
+done
+if [ "$n_args" -gt 3 ] ; then
+    echo "usage: export-to-postgresql-report [database name] [columns] [calls]"
+    exit
+fi
+if [ "$n_args" -gt 2 ] ; then
+    dbname=$1
+    columns=$2
+    calls=$3
+    shift 3
+elif [ "$n_args" -gt 1 ] ; then
+    dbname=$1
+    columns=$2
+    shift 2
+elif [ "$n_args" -gt 0 ] ; then
+    dbname=$1
+    shift
+fi
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/export-to-postgresql.py $dbname $columns $calls
diff --git a/tools/perf/scripts/python/bin/export-to-sqlite-record b/tools/perf/scripts/python/bin/export-to-sqlite-record
new file mode 100644
index 0000000..070204f
--- /dev/null
+++ b/tools/perf/scripts/python/bin/export-to-sqlite-record
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+#
+# export perf data to a sqlite3 database. Can cover
+# perf ip samples (excluding the tracepoints). No special
+# record requirements, just record what you want to export.
+#
+perf record $@
diff --git a/tools/perf/scripts/python/bin/export-to-sqlite-report b/tools/perf/scripts/python/bin/export-to-sqlite-report
new file mode 100644
index 0000000..5ff6033
--- /dev/null
+++ b/tools/perf/scripts/python/bin/export-to-sqlite-report
@@ -0,0 +1,29 @@
+#!/bin/bash
+# description: export perf data to a sqlite3 database
+# args: [database name] [columns] [calls]
+n_args=0
+for i in "$@"
+do
+    if expr match "$i" "-" > /dev/null ; then
+	break
+    fi
+    n_args=$(( $n_args + 1 ))
+done
+if [ "$n_args" -gt 3 ] ; then
+    echo "usage: export-to-sqlite-report [database name] [columns] [calls]"
+    exit
+fi
+if [ "$n_args" -gt 2 ] ; then
+    dbname=$1
+    columns=$2
+    calls=$3
+    shift 3
+elif [ "$n_args" -gt 1 ] ; then
+    dbname=$1
+    columns=$2
+    shift 2
+elif [ "$n_args" -gt 0 ] ; then
+    dbname=$1
+    shift
+fi
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/export-to-sqlite.py $dbname $columns $calls
diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
new file mode 100644
index 0000000..74685f3
--- /dev/null
+++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
@@ -0,0 +1,3 @@
+#!/bin/bash
+(perf record -e raw_syscalls:sys_exit $@ || \
+ perf record -e syscalls:sys_exit $@) 2> /dev/null
diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report
new file mode 100644
index 0000000..fda5096
--- /dev/null
+++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report
@@ -0,0 +1,10 @@
+#!/bin/bash
+# description: system-wide failed syscalls, by pid
+# args: [comm]
+if [ $# -gt 0 ] ; then
+    if ! expr match "$1" "-" > /dev/null ; then
+	comm=$1
+	shift
+    fi
+fi
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/failed-syscalls-by-pid.py $comm
diff --git a/tools/perf/scripts/python/bin/futex-contention-record b/tools/perf/scripts/python/bin/futex-contention-record
new file mode 100644
index 0000000..b1495c9
--- /dev/null
+++ b/tools/perf/scripts/python/bin/futex-contention-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -e syscalls:sys_enter_futex -e syscalls:sys_exit_futex $@
diff --git a/tools/perf/scripts/python/bin/futex-contention-report b/tools/perf/scripts/python/bin/futex-contention-report
new file mode 100644
index 0000000..6c44271
--- /dev/null
+++ b/tools/perf/scripts/python/bin/futex-contention-report
@@ -0,0 +1,4 @@
+#!/bin/bash
+# description: futext contention measurement
+
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/futex-contention.py
diff --git a/tools/perf/scripts/python/bin/intel-pt-events-record b/tools/perf/scripts/python/bin/intel-pt-events-record
new file mode 100644
index 0000000..10fe2b6
--- /dev/null
+++ b/tools/perf/scripts/python/bin/intel-pt-events-record
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+#
+# print Intel PT Power Events and PTWRITE. The intel_pt PMU event needs
+# to be specified with appropriate config terms.
+#
+if ! echo "$@" | grep -q intel_pt ; then
+	echo "Options must include the Intel PT event e.g. -e intel_pt/pwr_evt,ptw/"
+	echo "and for power events it probably needs to be system wide i.e. -a option"
+	echo "For example: -a -e intel_pt/pwr_evt,branch=0/ sleep 1"
+	exit 1
+fi
+perf record $@
diff --git a/tools/perf/scripts/python/bin/intel-pt-events-report b/tools/perf/scripts/python/bin/intel-pt-events-report
new file mode 100644
index 0000000..9a9c92f
--- /dev/null
+++ b/tools/perf/scripts/python/bin/intel-pt-events-report
@@ -0,0 +1,3 @@
+#!/bin/bash
+# description: print Intel PT Power Events and PTWRITE
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/intel-pt-events.py
\ No newline at end of file
diff --git a/tools/perf/scripts/python/bin/mem-phys-addr-record b/tools/perf/scripts/python/bin/mem-phys-addr-record
new file mode 100644
index 0000000..5a87512
--- /dev/null
+++ b/tools/perf/scripts/python/bin/mem-phys-addr-record
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+#
+# Profiling physical memory by all retired load instructions/uops event
+# MEM_INST_RETIRED.ALL_LOADS or MEM_UOPS_RETIRED.ALL_LOADS
+#
+
+load=`perf list | grep mem_inst_retired.all_loads`
+if [ -z "$load" ]; then
+	load=`perf list | grep mem_uops_retired.all_loads`
+fi
+if [ -z "$load" ]; then
+	echo "There is no event to count all retired load instructions/uops."
+	exit 1
+fi
+
+arg=$(echo $load | tr -d ' ')
+arg="$arg:P"
+perf record --phys-data -e $arg $@
diff --git a/tools/perf/scripts/python/bin/mem-phys-addr-report b/tools/perf/scripts/python/bin/mem-phys-addr-report
new file mode 100644
index 0000000..3f2b847
--- /dev/null
+++ b/tools/perf/scripts/python/bin/mem-phys-addr-report
@@ -0,0 +1,3 @@
+#!/bin/bash
+# description: resolve physical address samples
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/mem-phys-addr.py
diff --git a/tools/perf/scripts/python/bin/net_dropmonitor-record b/tools/perf/scripts/python/bin/net_dropmonitor-record
new file mode 100755
index 0000000..423fb81
--- /dev/null
+++ b/tools/perf/scripts/python/bin/net_dropmonitor-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -e skb:kfree_skb $@
diff --git a/tools/perf/scripts/python/bin/net_dropmonitor-report b/tools/perf/scripts/python/bin/net_dropmonitor-report
new file mode 100755
index 0000000..8d698f5
--- /dev/null
+++ b/tools/perf/scripts/python/bin/net_dropmonitor-report
@@ -0,0 +1,4 @@
+#!/bin/bash
+# description: display a table of dropped frames
+
+perf script -s "$PERF_EXEC_PATH"/scripts/python/net_dropmonitor.py $@
diff --git a/tools/perf/scripts/python/bin/netdev-times-record b/tools/perf/scripts/python/bin/netdev-times-record
new file mode 100644
index 0000000..558754b
--- /dev/null
+++ b/tools/perf/scripts/python/bin/netdev-times-record
@@ -0,0 +1,8 @@
+#!/bin/bash
+perf record -e net:net_dev_xmit -e net:net_dev_queue		\
+		-e net:netif_receive_skb -e net:netif_rx		\
+		-e skb:consume_skb -e skb:kfree_skb			\
+		-e skb:skb_copy_datagram_iovec -e napi:napi_poll	\
+		-e irq:irq_handler_entry -e irq:irq_handler_exit	\
+		-e irq:softirq_entry -e irq:softirq_exit		\
+		-e irq:softirq_raise $@
diff --git a/tools/perf/scripts/python/bin/netdev-times-report b/tools/perf/scripts/python/bin/netdev-times-report
new file mode 100644
index 0000000..8f75929
--- /dev/null
+++ b/tools/perf/scripts/python/bin/netdev-times-report
@@ -0,0 +1,5 @@
+#!/bin/bash
+# description: display a process of packet and processing time
+# args: [tx] [rx] [dev=] [debug]
+
+perf script -s "$PERF_EXEC_PATH"/scripts/python/netdev-times.py $@
diff --git a/tools/perf/scripts/python/bin/powerpc-hcalls-record b/tools/perf/scripts/python/bin/powerpc-hcalls-record
new file mode 100644
index 0000000..b7402aa
--- /dev/null
+++ b/tools/perf/scripts/python/bin/powerpc-hcalls-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -e "{powerpc:hcall_entry,powerpc:hcall_exit}" $@
diff --git a/tools/perf/scripts/python/bin/powerpc-hcalls-report b/tools/perf/scripts/python/bin/powerpc-hcalls-report
new file mode 100644
index 0000000..dd32ad7
--- /dev/null
+++ b/tools/perf/scripts/python/bin/powerpc-hcalls-report
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/powerpc-hcalls.py
diff --git a/tools/perf/scripts/python/bin/sched-migration-record b/tools/perf/scripts/python/bin/sched-migration-record
new file mode 100644
index 0000000..7493fdd
--- /dev/null
+++ b/tools/perf/scripts/python/bin/sched-migration-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -m 16384 -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched:sched_migrate_task $@
diff --git a/tools/perf/scripts/python/bin/sched-migration-report b/tools/perf/scripts/python/bin/sched-migration-report
new file mode 100644
index 0000000..68b037a
--- /dev/null
+++ b/tools/perf/scripts/python/bin/sched-migration-report
@@ -0,0 +1,3 @@
+#!/bin/bash
+# description: sched migration overview
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/sched-migration.py
diff --git a/tools/perf/scripts/python/bin/sctop-record b/tools/perf/scripts/python/bin/sctop-record
new file mode 100644
index 0000000..d694084
--- /dev/null
+++ b/tools/perf/scripts/python/bin/sctop-record
@@ -0,0 +1,3 @@
+#!/bin/bash
+(perf record -e raw_syscalls:sys_enter $@ || \
+ perf record -e syscalls:sys_enter $@) 2> /dev/null
diff --git a/tools/perf/scripts/python/bin/sctop-report b/tools/perf/scripts/python/bin/sctop-report
new file mode 100644
index 0000000..c32db29
--- /dev/null
+++ b/tools/perf/scripts/python/bin/sctop-report
@@ -0,0 +1,24 @@
+#!/bin/bash
+# description: syscall top
+# args: [comm] [interval]
+n_args=0
+for i in "$@"
+do
+    if expr match "$i" "-" > /dev/null ; then
+	break
+    fi
+    n_args=$(( $n_args + 1 ))
+done
+if [ "$n_args" -gt 2 ] ; then
+    echo "usage: sctop-report [comm] [interval]"
+    exit
+fi
+if [ "$n_args" -gt 1 ] ; then
+    comm=$1
+    interval=$2
+    shift 2
+elif [ "$n_args" -gt 0 ] ; then
+    interval=$1
+    shift
+fi
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/sctop.py $comm $interval
diff --git a/tools/perf/scripts/python/bin/stackcollapse-record b/tools/perf/scripts/python/bin/stackcollapse-record
new file mode 100755
index 0000000..9d8f9f0
--- /dev/null
+++ b/tools/perf/scripts/python/bin/stackcollapse-record
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+#
+# stackcollapse.py can cover all type of perf samples including
+# the tracepoints, so no special record requirements, just record what
+# you want to analyze.
+#
+perf record "$@"
diff --git a/tools/perf/scripts/python/bin/stackcollapse-report b/tools/perf/scripts/python/bin/stackcollapse-report
new file mode 100755
index 0000000..356b965
--- /dev/null
+++ b/tools/perf/scripts/python/bin/stackcollapse-report
@@ -0,0 +1,3 @@
+#!/bin/sh
+# description: produce callgraphs in short form for scripting use
+perf script -s "$PERF_EXEC_PATH"/scripts/python/stackcollapse.py -- "$@"
diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
new file mode 100644
index 0000000..d694084
--- /dev/null
+++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
@@ -0,0 +1,3 @@
+#!/bin/bash
+(perf record -e raw_syscalls:sys_enter $@ || \
+ perf record -e syscalls:sys_enter $@) 2> /dev/null
diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-report b/tools/perf/scripts/python/bin/syscall-counts-by-pid-report
new file mode 100644
index 0000000..16eb8d6
--- /dev/null
+++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-report
@@ -0,0 +1,10 @@
+#!/bin/bash
+# description: system-wide syscall counts, by pid
+# args: [comm]
+if [ $# -gt 0 ] ; then
+    if ! expr match "$1" "-" > /dev/null ; then
+	comm=$1
+	shift
+    fi
+fi
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/syscall-counts-by-pid.py $comm
diff --git a/tools/perf/scripts/python/bin/syscall-counts-record b/tools/perf/scripts/python/bin/syscall-counts-record
new file mode 100644
index 0000000..d694084
--- /dev/null
+++ b/tools/perf/scripts/python/bin/syscall-counts-record
@@ -0,0 +1,3 @@
+#!/bin/bash
+(perf record -e raw_syscalls:sys_enter $@ || \
+ perf record -e syscalls:sys_enter $@) 2> /dev/null
diff --git a/tools/perf/scripts/python/bin/syscall-counts-report b/tools/perf/scripts/python/bin/syscall-counts-report
new file mode 100644
index 0000000..0f0e9d4
--- /dev/null
+++ b/tools/perf/scripts/python/bin/syscall-counts-report
@@ -0,0 +1,10 @@
+#!/bin/bash
+# description: system-wide syscall counts
+# args: [comm]
+if [ $# -gt 0 ] ; then
+    if ! expr match "$1" "-" > /dev/null ; then
+	comm=$1
+	shift
+    fi
+fi
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/syscall-counts.py $comm
diff --git a/tools/perf/scripts/python/call-graph-from-sql.py b/tools/perf/scripts/python/call-graph-from-sql.py
new file mode 100644
index 0000000..b494a67
--- /dev/null
+++ b/tools/perf/scripts/python/call-graph-from-sql.py
@@ -0,0 +1,339 @@
+#!/usr/bin/python2
+# call-graph-from-sql.py: create call-graph from sql database
+# Copyright (c) 2014-2017, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+
+# To use this script you will need to have exported data using either the
+# export-to-sqlite.py or the export-to-postgresql.py script.  Refer to those
+# scripts for details.
+#
+# Following on from the example in the export scripts, a
+# call-graph can be displayed for the pt_example database like this:
+#
+#	python tools/perf/scripts/python/call-graph-from-sql.py pt_example
+#
+# Note that for PostgreSQL, this script supports connecting to remote databases
+# by setting hostname, port, username, password, and dbname e.g.
+#
+#	python tools/perf/scripts/python/call-graph-from-sql.py "hostname=myhost username=myuser password=mypassword dbname=pt_example"
+#
+# The result is a GUI window with a tree representing a context-sensitive
+# call-graph.  Expanding a couple of levels of the tree and adjusting column
+# widths to suit will display something like:
+#
+#                                         Call Graph: pt_example
+# Call Path                          Object      Count   Time(ns)  Time(%)  Branch Count   Branch Count(%)
+# v- ls
+#     v- 2638:2638
+#         v- _start                  ld-2.19.so    1     10074071   100.0         211135            100.0
+#           |- unknown               unknown       1        13198     0.1              1              0.0
+#           >- _dl_start             ld-2.19.so    1      1400980    13.9          19637              9.3
+#           >- _d_linit_internal     ld-2.19.so    1       448152     4.4          11094              5.3
+#           v-__libc_start_main@plt  ls            1      8211741    81.5         180397             85.4
+#              >- _dl_fixup          ld-2.19.so    1         7607     0.1            108              0.1
+#              >- __cxa_atexit       libc-2.19.so  1        11737     0.1             10              0.0
+#              >- __libc_csu_init    ls            1        10354     0.1             10              0.0
+#              |- _setjmp            libc-2.19.so  1            0     0.0              4              0.0
+#              v- main               ls            1      8182043    99.6         180254             99.9
+#
+# Points to note:
+#	The top level is a command name (comm)
+#	The next level is a thread (pid:tid)
+#	Subsequent levels are functions
+#	'Count' is the number of calls
+#	'Time' is the elapsed time until the function returns
+#	Percentages are relative to the level above
+#	'Branch Count' is the total number of branches for that function and all
+#       functions that it calls
+
+import sys
+from PySide.QtCore import *
+from PySide.QtGui import *
+from PySide.QtSql import *
+from decimal import *
+
+class TreeItem():
+
+	def __init__(self, db, row, parent_item):
+		self.db = db
+		self.row = row
+		self.parent_item = parent_item
+		self.query_done = False;
+		self.child_count = 0
+		self.child_items = []
+		self.data = ["", "", "", "", "", "", ""]
+		self.comm_id = 0
+		self.thread_id = 0
+		self.call_path_id = 1
+		self.branch_count = 0
+		self.time = 0
+		if not parent_item:
+			self.setUpRoot()
+
+	def setUpRoot(self):
+		self.query_done = True
+		query = QSqlQuery(self.db)
+		ret = query.exec_('SELECT id, comm FROM comms')
+		if not ret:
+			raise Exception("Query failed: " + query.lastError().text())
+		while query.next():
+			if not query.value(0):
+				continue
+			child_item = TreeItem(self.db, self.child_count, self)
+			self.child_items.append(child_item)
+			self.child_count += 1
+			child_item.setUpLevel1(query.value(0), query.value(1))
+
+	def setUpLevel1(self, comm_id, comm):
+		self.query_done = True;
+		self.comm_id = comm_id
+		self.data[0] = comm
+		self.child_items = []
+		self.child_count = 0
+		query = QSqlQuery(self.db)
+		ret = query.exec_('SELECT thread_id, ( SELECT pid FROM threads WHERE id = thread_id ), ( SELECT tid FROM threads WHERE id = thread_id ) FROM comm_threads WHERE comm_id = ' + str(comm_id))
+		if not ret:
+			raise Exception("Query failed: " + query.lastError().text())
+		while query.next():
+			child_item = TreeItem(self.db, self.child_count, self)
+			self.child_items.append(child_item)
+			self.child_count += 1
+			child_item.setUpLevel2(comm_id, query.value(0), query.value(1), query.value(2))
+
+	def setUpLevel2(self, comm_id, thread_id, pid, tid):
+		self.comm_id = comm_id
+		self.thread_id = thread_id
+		self.data[0] = str(pid) + ":" + str(tid)
+
+	def getChildItem(self, row):
+		return self.child_items[row]
+
+	def getParentItem(self):
+		return self.parent_item
+
+	def getRow(self):
+		return self.row
+
+	def timePercent(self, b):
+		if not self.time:
+			return "0.0"
+		x = (b * Decimal(100)) / self.time
+		return str(x.quantize(Decimal('.1'), rounding=ROUND_HALF_UP))
+
+	def branchPercent(self, b):
+		if not self.branch_count:
+			return "0.0"
+		x = (b * Decimal(100)) / self.branch_count
+		return str(x.quantize(Decimal('.1'), rounding=ROUND_HALF_UP))
+
+	def addChild(self, call_path_id, name, dso, count, time, branch_count):
+		child_item = TreeItem(self.db, self.child_count, self)
+		child_item.comm_id = self.comm_id
+		child_item.thread_id = self.thread_id
+		child_item.call_path_id = call_path_id
+		child_item.branch_count = branch_count
+		child_item.time = time
+		child_item.data[0] = name
+		if dso == "[kernel.kallsyms]":
+			dso = "[kernel]"
+		child_item.data[1] = dso
+		child_item.data[2] = str(count)
+		child_item.data[3] = str(time)
+		child_item.data[4] = self.timePercent(time)
+		child_item.data[5] = str(branch_count)
+		child_item.data[6] = self.branchPercent(branch_count)
+		self.child_items.append(child_item)
+		self.child_count += 1
+
+	def selectCalls(self):
+		self.query_done = True;
+		query = QSqlQuery(self.db)
+		ret = query.exec_('SELECT id, call_path_id, branch_count, call_time, return_time, '
+				  '( SELECT name FROM symbols WHERE id = ( SELECT symbol_id FROM call_paths WHERE id = call_path_id ) ), '
+				  '( SELECT short_name FROM dsos WHERE id = ( SELECT dso_id FROM symbols WHERE id = ( SELECT symbol_id FROM call_paths WHERE id = call_path_id ) ) ), '
+				  '( SELECT ip FROM call_paths where id = call_path_id ) '
+				  'FROM calls WHERE parent_call_path_id = ' + str(self.call_path_id) + ' AND comm_id = ' + str(self.comm_id) + ' AND thread_id = ' + str(self.thread_id) +
+				  ' ORDER BY call_path_id')
+		if not ret:
+			raise Exception("Query failed: " + query.lastError().text())
+		last_call_path_id = 0
+		name = ""
+		dso = ""
+		count = 0
+		branch_count = 0
+		total_branch_count = 0
+		time = 0
+		total_time = 0
+		while query.next():
+			if query.value(1) == last_call_path_id:
+				count += 1
+				branch_count += query.value(2)
+				time += query.value(4) - query.value(3)
+			else:
+				if count:
+					self.addChild(last_call_path_id, name, dso, count, time, branch_count)
+				last_call_path_id = query.value(1)
+				name = query.value(5)
+				dso = query.value(6)
+				count = 1
+				total_branch_count += branch_count
+				total_time += time
+				branch_count = query.value(2)
+				time = query.value(4) - query.value(3)
+		if count:
+			self.addChild(last_call_path_id, name, dso, count, time, branch_count)
+		total_branch_count += branch_count
+		total_time += time
+		# Top level does not have time or branch count, so fix that here
+		if total_branch_count > self.branch_count:
+			self.branch_count = total_branch_count
+			if self.branch_count:
+				for child_item in self.child_items:
+					child_item.data[6] = self.branchPercent(child_item.branch_count)
+		if total_time > self.time:
+			self.time = total_time
+			if self.time:
+				for child_item in self.child_items:
+					child_item.data[4] = self.timePercent(child_item.time)
+
+	def childCount(self):
+		if not self.query_done:
+			self.selectCalls()
+		return self.child_count
+
+	def columnCount(self):
+		return 7
+
+	def columnHeader(self, column):
+		headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
+		return headers[column]
+
+	def getData(self, column):
+		return self.data[column]
+
+class TreeModel(QAbstractItemModel):
+
+	def __init__(self, db, parent=None):
+		super(TreeModel, self).__init__(parent)
+		self.db = db
+		self.root = TreeItem(db, 0, None)
+
+	def columnCount(self, parent):
+		return self.root.columnCount()
+
+	def rowCount(self, parent):
+		if parent.isValid():
+			parent_item = parent.internalPointer()
+		else:
+			parent_item = self.root
+		return parent_item.childCount()
+
+	def headerData(self, section, orientation, role):
+		if role == Qt.TextAlignmentRole:
+			if section > 1:
+				return Qt.AlignRight
+		if role != Qt.DisplayRole:
+			return None
+		if orientation != Qt.Horizontal:
+			return None
+		return self.root.columnHeader(section)
+
+	def parent(self, child):
+		child_item = child.internalPointer()
+		if child_item is self.root:
+			return QModelIndex()
+		parent_item = child_item.getParentItem()
+		return self.createIndex(parent_item.getRow(), 0, parent_item)
+
+	def index(self, row, column, parent):
+		if parent.isValid():
+			parent_item = parent.internalPointer()
+		else:
+			parent_item = self.root
+		child_item = parent_item.getChildItem(row)
+		return self.createIndex(row, column, child_item)
+
+	def data(self, index, role):
+		if role == Qt.TextAlignmentRole:
+			if index.column() > 1:
+				return Qt.AlignRight
+		if role != Qt.DisplayRole:
+			return None
+		index_item = index.internalPointer()
+		return index_item.getData(index.column())
+
+class MainWindow(QMainWindow):
+
+	def __init__(self, db, dbname, parent=None):
+		super(MainWindow, self).__init__(parent)
+
+		self.setObjectName("MainWindow")
+		self.setWindowTitle("Call Graph: " + dbname)
+		self.move(100, 100)
+		self.resize(800, 600)
+		style = self.style()
+		icon = style.standardIcon(QStyle.SP_MessageBoxInformation)
+		self.setWindowIcon(icon);
+
+		self.model = TreeModel(db)
+
+		self.view = QTreeView()
+		self.view.setModel(self.model)
+
+		self.setCentralWidget(self.view)
+
+if __name__ == '__main__':
+	if (len(sys.argv) < 2):
+		print >> sys.stderr, "Usage is: call-graph-from-sql.py <database name>"
+		raise Exception("Too few arguments")
+
+	dbname = sys.argv[1]
+
+	is_sqlite3 = False
+	try:
+		f = open(dbname)
+		if f.read(15) == "SQLite format 3":
+			is_sqlite3 = True
+		f.close()
+	except:
+		pass
+
+	if is_sqlite3:
+		db = QSqlDatabase.addDatabase('QSQLITE')
+	else:
+		db = QSqlDatabase.addDatabase('QPSQL')
+		opts = dbname.split()
+		for opt in opts:
+			if '=' in opt:
+				opt = opt.split('=')
+				if opt[0] == 'hostname':
+					db.setHostName(opt[1])
+				elif opt[0] == 'port':
+					db.setPort(int(opt[1]))
+				elif opt[0] == 'username':
+					db.setUserName(opt[1])
+				elif opt[0] == 'password':
+					db.setPassword(opt[1])
+				elif opt[0] == 'dbname':
+					dbname = opt[1]
+			else:
+				dbname = opt
+
+	db.setDatabaseName(dbname)
+	if not db.open():
+		raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text())
+
+	app = QApplication(sys.argv)
+	window = MainWindow(db, dbname)
+	window.show()
+	err = app.exec_()
+	db.close()
+	sys.exit(err)
diff --git a/tools/perf/scripts/python/check-perf-trace.py b/tools/perf/scripts/python/check-perf-trace.py
new file mode 100644
index 0000000..334599c
--- /dev/null
+++ b/tools/perf/scripts/python/check-perf-trace.py
@@ -0,0 +1,82 @@
+# perf script event handlers, generated by perf script -g python
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# This script tests basic functionality such as flag and symbol
+# strings, common_xxx() calls back into perf, begin, end, unhandled
+# events, etc.  Basically, if this script runs successfully and
+# displays expected results, Python scripting support should be ok.
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+	'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from Core import *
+from perf_trace_context import *
+
+unhandled = autodict()
+
+def trace_begin():
+	print "trace_begin"
+	pass
+
+def trace_end():
+        print_unhandled()
+
+def irq__softirq_entry(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, vec):
+		print_header(event_name, common_cpu, common_secs, common_nsecs,
+			common_pid, common_comm)
+
+                print_uncommon(context)
+
+		print "vec=%s\n" % \
+		(symbol_str("irq__softirq_entry", "vec", vec)),
+
+def kmem__kmalloc(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, call_site, ptr, bytes_req, bytes_alloc,
+	gfp_flags):
+		print_header(event_name, common_cpu, common_secs, common_nsecs,
+			common_pid, common_comm)
+
+                print_uncommon(context)
+
+		print "call_site=%u, ptr=%u, bytes_req=%u, " \
+		"bytes_alloc=%u, gfp_flags=%s\n" % \
+		(call_site, ptr, bytes_req, bytes_alloc,
+
+		flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
+
+def trace_unhandled(event_name, context, event_fields_dict):
+    try:
+        unhandled[event_name] += 1
+    except TypeError:
+        unhandled[event_name] = 1
+
+def print_header(event_name, cpu, secs, nsecs, pid, comm):
+	print "%-20s %5u %05u.%09u %8u %-20s " % \
+	(event_name, cpu, secs, nsecs, pid, comm),
+
+# print trace fields not included in handler args
+def print_uncommon(context):
+    print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
+        % (common_pc(context), trace_flag_str(common_flags(context)), \
+               common_lock_depth(context))
+
+def print_unhandled():
+    keys = unhandled.keys()
+    if not keys:
+        return
+
+    print "\nunhandled events:\n\n",
+
+    print "%-40s  %10s\n" % ("event", "count"),
+    print "%-40s  %10s\n" % ("----------------------------------------", \
+                                 "-----------"),
+
+    for event_name in keys:
+	print "%-40s  %10d\n" % (event_name, unhandled[event_name])
diff --git a/tools/perf/scripts/python/compaction-times.py b/tools/perf/scripts/python/compaction-times.py
new file mode 100644
index 0000000..239cb05
--- /dev/null
+++ b/tools/perf/scripts/python/compaction-times.py
@@ -0,0 +1,311 @@
+# report time spent in compaction
+# Licensed under the terms of the GNU GPL License version 2
+
+# testing:
+# 'echo 1 > /proc/sys/vm/compact_memory' to force compaction of all zones
+
+import os
+import sys
+import re
+
+import signal
+signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+usage = "usage: perf script report compaction-times.py -- [-h] [-u] [-p|-pv] [-t | [-m] [-fs] [-ms]] [pid|pid-range|comm-regex]\n"
+
+class popt:
+	DISP_DFL = 0
+	DISP_PROC = 1
+	DISP_PROC_VERBOSE=2
+
+class topt:
+	DISP_TIME = 0
+	DISP_MIG = 1
+	DISP_ISOLFREE = 2
+	DISP_ISOLMIG = 4
+	DISP_ALL = 7
+
+class comm_filter:
+	def __init__(self, re):
+		self.re = re
+
+	def filter(self, pid, comm):
+		m = self.re.search(comm)
+		return m == None or m.group() == ""
+
+class pid_filter:
+	def __init__(self, low, high):
+		self.low = (0 if low == "" else int(low))
+		self.high = (0 if high == "" else int(high))
+
+	def filter(self, pid, comm):
+		return not (pid >= self.low and (self.high == 0 or pid <= self.high))
+
+def set_type(t):
+	global opt_disp
+	opt_disp = (t if opt_disp == topt.DISP_ALL else opt_disp|t)
+
+def ns(sec, nsec):
+	return (sec * 1000000000) + nsec
+
+def time(ns):
+	return "%dns" % ns if opt_ns else "%dus" % (round(ns, -3) / 1000)
+
+class pair:
+	def __init__(self, aval, bval, alabel = None, blabel = None):
+		self.alabel = alabel
+		self.blabel = blabel
+		self.aval = aval
+		self.bval = bval
+
+	def __add__(self, rhs):
+		self.aval += rhs.aval
+		self.bval += rhs.bval
+		return self
+
+	def __str__(self):
+		return "%s=%d %s=%d" % (self.alabel, self.aval, self.blabel, self.bval)
+
+class cnode:
+	def __init__(self, ns):
+		self.ns = ns
+		self.migrated = pair(0, 0, "moved", "failed")
+		self.fscan = pair(0,0, "scanned", "isolated")
+		self.mscan = pair(0,0, "scanned", "isolated")
+
+	def __add__(self, rhs):
+		self.ns += rhs.ns
+		self.migrated += rhs.migrated
+		self.fscan += rhs.fscan
+		self.mscan += rhs.mscan
+		return self
+
+	def __str__(self):
+		prev = 0
+		s = "%s " % time(self.ns)
+		if (opt_disp & topt.DISP_MIG):
+			s += "migration: %s" % self.migrated
+			prev = 1
+		if (opt_disp & topt.DISP_ISOLFREE):
+			s += "%sfree_scanner: %s" % (" " if prev else "", self.fscan)
+			prev = 1
+		if (opt_disp & topt.DISP_ISOLMIG):
+			s += "%smigration_scanner: %s" % (" " if prev else "", self.mscan)
+		return s
+
+	def complete(self, secs, nsecs):
+		self.ns = ns(secs, nsecs) - self.ns
+
+	def increment(self, migrated, fscan, mscan):
+		if (migrated != None):
+			self.migrated += migrated
+		if (fscan != None):
+			self.fscan += fscan
+		if (mscan != None):
+			self.mscan += mscan
+
+
+class chead:
+	heads = {}
+	val = cnode(0);
+	fobj = None
+
+	@classmethod
+	def add_filter(cls, filter):
+		cls.fobj = filter
+
+	@classmethod
+	def create_pending(cls, pid, comm, start_secs, start_nsecs):
+		filtered = 0
+		try:
+			head = cls.heads[pid]
+			filtered = head.is_filtered()
+		except KeyError:
+			if cls.fobj != None:
+				filtered = cls.fobj.filter(pid, comm)
+			head = cls.heads[pid] = chead(comm, pid, filtered)
+
+		if not filtered:
+			head.mark_pending(start_secs, start_nsecs)
+
+	@classmethod
+	def increment_pending(cls, pid, migrated, fscan, mscan):
+		head = cls.heads[pid]
+		if not head.is_filtered():
+			if head.is_pending():
+				head.do_increment(migrated, fscan, mscan)
+			else:
+				sys.stderr.write("missing start compaction event for pid %d\n" % pid)
+
+	@classmethod
+	def complete_pending(cls, pid, secs, nsecs):
+		head = cls.heads[pid]
+		if not head.is_filtered():
+			if head.is_pending():
+				head.make_complete(secs, nsecs)
+			else:
+				sys.stderr.write("missing start compaction event for pid %d\n" % pid)
+
+	@classmethod
+	def gen(cls):
+		if opt_proc != popt.DISP_DFL:
+			for i in cls.heads:
+				yield cls.heads[i]
+
+	@classmethod
+	def str(cls):
+		return cls.val
+
+	def __init__(self, comm, pid, filtered):
+		self.comm = comm
+		self.pid = pid
+		self.val = cnode(0)
+		self.pending = None
+		self.filtered = filtered
+		self.list = []
+
+	def __add__(self, rhs):
+		self.ns += rhs.ns
+		self.val += rhs.val
+		return self
+
+	def mark_pending(self, secs, nsecs):
+		self.pending = cnode(ns(secs, nsecs))
+
+	def do_increment(self, migrated, fscan, mscan):
+		self.pending.increment(migrated, fscan, mscan)
+
+	def make_complete(self, secs, nsecs):
+		self.pending.complete(secs, nsecs)
+		chead.val += self.pending
+
+		if opt_proc != popt.DISP_DFL:
+			self.val += self.pending
+
+			if opt_proc == popt.DISP_PROC_VERBOSE:
+				self.list.append(self.pending)
+		self.pending = None
+
+	def enumerate(self):
+		if opt_proc == popt.DISP_PROC_VERBOSE and not self.is_filtered():
+			for i, pelem in enumerate(self.list):
+				sys.stdout.write("%d[%s].%d: %s\n" % (self.pid, self.comm, i+1, pelem))
+
+	def is_pending(self):
+		return self.pending != None
+
+	def is_filtered(self):
+		return self.filtered
+
+	def display(self):
+		if not self.is_filtered():
+			sys.stdout.write("%d[%s]: %s\n" % (self.pid, self.comm, self.val))
+
+
+def trace_end():
+	sys.stdout.write("total: %s\n" % chead.str())
+	for i in chead.gen():
+		i.display(),
+		i.enumerate()
+
+def compaction__mm_compaction_migratepages(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, nr_migrated, nr_failed):
+
+	chead.increment_pending(common_pid,
+		pair(nr_migrated, nr_failed), None, None)
+
+def compaction__mm_compaction_isolate_freepages(event_name, context, common_cpu,
+        common_secs, common_nsecs, common_pid, common_comm,
+        common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+
+	chead.increment_pending(common_pid,
+		None, pair(nr_scanned, nr_taken), None)
+
+def compaction__mm_compaction_isolate_migratepages(event_name, context, common_cpu,
+        common_secs, common_nsecs, common_pid, common_comm,
+        common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+
+	chead.increment_pending(common_pid,
+		None, None, pair(nr_scanned, nr_taken))
+
+def compaction__mm_compaction_end(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, zone_start, migrate_start, free_start, zone_end,
+	sync, status):
+
+	chead.complete_pending(common_pid, common_secs, common_nsecs)
+
+def compaction__mm_compaction_begin(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, zone_start, migrate_start, free_start, zone_end,
+	sync):
+
+	chead.create_pending(common_pid, common_comm, common_secs, common_nsecs)
+
+def pr_help():
+	global usage
+
+	sys.stdout.write(usage)
+	sys.stdout.write("\n")
+	sys.stdout.write("-h	display this help\n")
+	sys.stdout.write("-p	display by process\n")
+	sys.stdout.write("-pv	display by process (verbose)\n")
+	sys.stdout.write("-t	display stall times only\n")
+	sys.stdout.write("-m	display stats for migration\n")
+	sys.stdout.write("-fs	display stats for free scanner\n")
+	sys.stdout.write("-ms	display stats for migration scanner\n")
+	sys.stdout.write("-u	display results in microseconds (default nanoseconds)\n")
+
+
+comm_re = None
+pid_re = None
+pid_regex = "^(\d*)-(\d*)$|^(\d*)$"
+
+opt_proc = popt.DISP_DFL
+opt_disp = topt.DISP_ALL
+
+opt_ns = True
+
+argc = len(sys.argv) - 1
+if argc >= 1:
+	pid_re = re.compile(pid_regex)
+
+	for i, opt in enumerate(sys.argv[1:]):
+		if opt[0] == "-":
+			if opt == "-h":
+				pr_help()
+				exit(0);
+			elif opt == "-p":
+				opt_proc = popt.DISP_PROC
+			elif opt == "-pv":
+				opt_proc = popt.DISP_PROC_VERBOSE
+			elif opt == '-u':
+				opt_ns = False
+			elif opt == "-t":
+				set_type(topt.DISP_TIME)
+			elif opt == "-m":
+				set_type(topt.DISP_MIG)
+			elif opt == "-fs":
+				set_type(topt.DISP_ISOLFREE)
+			elif opt == "-ms":
+				set_type(topt.DISP_ISOLMIG)
+			else:
+				sys.exit(usage)
+
+		elif i == argc - 1:
+			m = pid_re.search(opt)
+			if m != None and m.group() != "":
+				if m.group(3) != None:
+					f = pid_filter(m.group(3), m.group(3))
+				else:
+					f = pid_filter(m.group(1), m.group(2))
+			else:
+				try:
+					comm_re=re.compile(opt)
+				except:
+					sys.stderr.write("invalid regex '%s'" % opt)
+					sys.exit(usage)
+				f = comm_filter(comm_re)
+
+			chead.add_filter(f)
diff --git a/tools/perf/scripts/python/event_analyzing_sample.py b/tools/perf/scripts/python/event_analyzing_sample.py
new file mode 100644
index 0000000..4e843b9
--- /dev/null
+++ b/tools/perf/scripts/python/event_analyzing_sample.py
@@ -0,0 +1,190 @@
+# event_analyzing_sample.py: general event handler in python
+# SPDX-License-Identifier: GPL-2.0
+#
+# Current perf report is already very powerful with the annotation integrated,
+# and this script is not trying to be as powerful as perf report, but
+# providing end user/developer a flexible way to analyze the events other
+# than trace points.
+#
+# The 2 database related functions in this script just show how to gather
+# the basic information, and users can modify and write their own functions
+# according to their specific requirement.
+#
+# The first function "show_general_events" just does a basic grouping for all
+# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
+# for a x86 HW PMU event: PEBS with load latency data.
+#
+
+import os
+import sys
+import math
+import struct
+import sqlite3
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+        '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from EventClass import *
+
+#
+# If the perf.data has a big number of samples, then the insert operation
+# will be very time consuming (about 10+ minutes for 10000 samples) if the
+# .db database is on disk. Move the .db file to RAM based FS to speedup
+# the handling, which will cut the time down to several seconds.
+#
+con = sqlite3.connect("/dev/shm/perf.db")
+con.isolation_level = None
+
+def trace_begin():
+	print "In trace_begin:\n"
+
+        #
+        # Will create several tables at the start, pebs_ll is for PEBS data with
+        # load latency info, while gen_events is for general event.
+        #
+        con.execute("""
+                create table if not exists gen_events (
+                        name text,
+                        symbol text,
+                        comm text,
+                        dso text
+                );""")
+        con.execute("""
+                create table if not exists pebs_ll (
+                        name text,
+                        symbol text,
+                        comm text,
+                        dso text,
+                        flags integer,
+                        ip integer,
+                        status integer,
+                        dse integer,
+                        dla integer,
+                        lat integer
+                );""")
+
+#
+# Create and insert event object to a database so that user could
+# do more analysis with simple database commands.
+#
+def process_event(param_dict):
+        event_attr = param_dict["attr"]
+        sample     = param_dict["sample"]
+        raw_buf    = param_dict["raw_buf"]
+        comm       = param_dict["comm"]
+        name       = param_dict["ev_name"]
+
+        # Symbol and dso info are not always resolved
+        if (param_dict.has_key("dso")):
+                dso = param_dict["dso"]
+        else:
+                dso = "Unknown_dso"
+
+        if (param_dict.has_key("symbol")):
+                symbol = param_dict["symbol"]
+        else:
+                symbol = "Unknown_symbol"
+
+        # Create the event object and insert it to the right table in database
+        event = create_event(name, comm, dso, symbol, raw_buf)
+        insert_db(event)
+
+def insert_db(event):
+        if event.ev_type == EVTYPE_GENERIC:
+                con.execute("insert into gen_events values(?, ?, ?, ?)",
+                                (event.name, event.symbol, event.comm, event.dso))
+        elif event.ev_type == EVTYPE_PEBS_LL:
+                event.ip &= 0x7fffffffffffffff
+                event.dla &= 0x7fffffffffffffff
+                con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
+                        (event.name, event.symbol, event.comm, event.dso, event.flags,
+                                event.ip, event.status, event.dse, event.dla, event.lat))
+
+def trace_end():
+	print "In trace_end:\n"
+        # We show the basic info for the 2 type of event classes
+        show_general_events()
+        show_pebs_ll()
+        con.close()
+
+#
+# As the event number may be very big, so we can't use linear way
+# to show the histogram in real number, but use a log2 algorithm.
+#
+
+def num2sym(num):
+        # Each number will have at least one '#'
+        snum = '#' * (int)(math.log(num, 2) + 1)
+        return snum
+
+def show_general_events():
+
+        # Check the total record number in the table
+        count = con.execute("select count(*) from gen_events")
+        for t in count:
+                print "There is %d records in gen_events table" % t[0]
+                if t[0] == 0:
+                        return
+
+        print "Statistics about the general events grouped by thread/symbol/dso: \n"
+
+         # Group by thread
+        commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
+        print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
+        for row in commq:
+             print "%16s %8d     %s" % (row[0], row[1], num2sym(row[1]))
+
+        # Group by symbol
+        print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
+        symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
+        for row in symbolq:
+             print "%32s %8d     %s" % (row[0], row[1], num2sym(row[1]))
+
+        # Group by dso
+        print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
+        dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
+        for row in dsoq:
+             print "%40s %8d     %s" % (row[0], row[1], num2sym(row[1]))
+
+#
+# This function just shows the basic info, and we could do more with the
+# data in the tables, like checking the function parameters when some
+# big latency events happen.
+#
+def show_pebs_ll():
+
+        count = con.execute("select count(*) from pebs_ll")
+        for t in count:
+                print "There is %d records in pebs_ll table" % t[0]
+                if t[0] == 0:
+                        return
+
+        print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
+
+        # Group by thread
+        commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
+        print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
+        for row in commq:
+             print "%16s %8d     %s" % (row[0], row[1], num2sym(row[1]))
+
+        # Group by symbol
+        print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
+        symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
+        for row in symbolq:
+             print "%32s %8d     %s" % (row[0], row[1], num2sym(row[1]))
+
+        # Group by dse
+        dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
+        print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
+        for row in dseq:
+             print "%32s %8d     %s" % (row[0], row[1], num2sym(row[1]))
+
+        # Group by latency
+        latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
+        print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
+        for row in latq:
+             print "%32s %8d     %s" % (row[0], row[1], num2sym(row[1]))
+
+def trace_unhandled(event_name, context, event_fields_dict):
+		print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
new file mode 100644
index 0000000..e46f51b
--- /dev/null
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -0,0 +1,734 @@
+# export-to-postgresql.py: export perf data to a postgresql database
+# Copyright (c) 2014, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+
+import os
+import sys
+import struct
+import datetime
+
+# To use this script you will need to have installed package python-pyside which
+# provides LGPL-licensed Python bindings for Qt.  You will also need the package
+# libqt4-sql-psql for Qt postgresql support.
+#
+# The script assumes postgresql is running on the local machine and that the
+# user has postgresql permissions to create databases. Examples of installing
+# postgresql and adding such a user are:
+#
+# fedora:
+#
+#	$ sudo yum install postgresql postgresql-server python-pyside qt-postgresql
+#	$ sudo su - postgres -c initdb
+#	$ sudo service postgresql start
+#	$ sudo su - postgres
+#	$ createuser <your user id here>
+#	Shall the new role be a superuser? (y/n) y
+#
+# ubuntu:
+#
+#	$ sudo apt-get install postgresql python-pyside.qtsql libqt4-sql-psql
+#	$ sudo su - postgres
+#	$ createuser -s <your user id here>
+#
+# An example of using this script with Intel PT:
+#
+#	$ perf record -e intel_pt//u ls
+#	$ perf script -s ~/libexec/perf-core/scripts/python/export-to-postgresql.py pt_example branches calls
+#	2015-05-29 12:49:23.464364 Creating database...
+#	2015-05-29 12:49:26.281717 Writing to intermediate files...
+#	2015-05-29 12:49:27.190383 Copying to database...
+#	2015-05-29 12:49:28.140451 Removing intermediate files...
+#	2015-05-29 12:49:28.147451 Adding primary keys
+#	2015-05-29 12:49:28.655683 Adding foreign keys
+#	2015-05-29 12:49:29.365350 Done
+#
+# To browse the database, psql can be used e.g.
+#
+#	$ psql pt_example
+#	pt_example=# select * from samples_view where id < 100;
+#	pt_example=# \d+
+#	pt_example=# \d+ samples_view
+#	pt_example=# \q
+#
+# An example of using the database is provided by the script
+# call-graph-from-sql.py.  Refer to that script for details.
+#
+# Tables:
+#
+#	The tables largely correspond to perf tools' data structures.  They are largely self-explanatory.
+#
+#	samples
+#
+#		'samples' is the main table. It represents what instruction was executing at a point in time
+#		when something (a selected event) happened.  The memory address is the instruction pointer or 'ip'.
+#
+#	calls
+#
+#		'calls' represents function calls and is related to 'samples' by 'call_id' and 'return_id'.
+#		'calls' is only created when the 'calls' option to this script is specified.
+#
+#	call_paths
+#
+#		'call_paths' represents all the call stacks.  Each 'call' has an associated record in 'call_paths'.
+#		'calls_paths' is only created when the 'calls' option to this script is specified.
+#
+#	branch_types
+#
+#		'branch_types' provides descriptions for each type of branch.
+#
+#	comm_threads
+#
+#		'comm_threads' shows how 'comms' relates to 'threads'.
+#
+#	comms
+#
+#		'comms' contains a record for each 'comm' - the name given to the executable that is running.
+#
+#	dsos
+#
+#		'dsos' contains a record for each executable file or library.
+#
+#	machines
+#
+#		'machines' can be used to distinguish virtual machines if virtualization is supported.
+#
+#	selected_events
+#
+#		'selected_events' contains a record for each kind of event that has been sampled.
+#
+#	symbols
+#
+#		'symbols' contains a record for each symbol.  Only symbols that have samples are present.
+#
+#	threads
+#
+#		'threads' contains a record for each thread.
+#
+# Views:
+#
+#	Most of the tables have views for more friendly display.  The views are:
+#
+#		calls_view
+#		call_paths_view
+#		comm_threads_view
+#		dsos_view
+#		machines_view
+#		samples_view
+#		symbols_view
+#		threads_view
+#
+# More examples of browsing the database with psql:
+#   Note that some of the examples are not the most optimal SQL query.
+#   Note that call information is only available if the script's 'calls' option has been used.
+#
+#	Top 10 function calls (not aggregated by symbol):
+#
+#		SELECT * FROM calls_view ORDER BY elapsed_time DESC LIMIT 10;
+#
+#	Top 10 function calls (aggregated by symbol):
+#
+#		SELECT symbol_id,(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,
+#			SUM(elapsed_time) AS tot_elapsed_time,SUM(branch_count) AS tot_branch_count
+#			FROM calls_view GROUP BY symbol_id ORDER BY tot_elapsed_time DESC LIMIT 10;
+#
+#		Note that the branch count gives a rough estimation of cpu usage, so functions
+#		that took a long time but have a relatively low branch count must have spent time
+#		waiting.
+#
+#	Find symbols by pattern matching on part of the name (e.g. names containing 'alloc'):
+#
+#		SELECT * FROM symbols_view WHERE name LIKE '%alloc%';
+#
+#	Top 10 function calls for a specific symbol (e.g. whose symbol_id is 187):
+#
+#		SELECT * FROM calls_view WHERE symbol_id = 187 ORDER BY elapsed_time DESC LIMIT 10;
+#
+#	Show function calls made by function in the same context (i.e. same call path) (e.g. one with call_path_id 254):
+#
+#		SELECT * FROM calls_view WHERE parent_call_path_id = 254;
+#
+#	Show branches made during a function call (e.g. where call_id is 29357 and return_id is 29370 and tid is 29670)
+#
+#		SELECT * FROM samples_view WHERE id >= 29357 AND id <= 29370 AND tid = 29670 AND event LIKE 'branches%';
+#
+#	Show transactions:
+#
+#		SELECT * FROM samples_view WHERE event = 'transactions';
+#
+#		Note transaction start has 'in_tx' true whereas, transaction end has 'in_tx' false.
+#		Transaction aborts have branch_type_name 'transaction abort'
+#
+#	Show transaction aborts:
+#
+#		SELECT * FROM samples_view WHERE event = 'transactions' AND branch_type_name = 'transaction abort';
+#
+# To print a call stack requires walking the call_paths table.  For example this python script:
+#   #!/usr/bin/python2
+#
+#   import sys
+#   from PySide.QtSql import *
+#
+#   if __name__ == '__main__':
+#           if (len(sys.argv) < 3):
+#                   print >> sys.stderr, "Usage is: printcallstack.py <database name> <call_path_id>"
+#                   raise Exception("Too few arguments")
+#           dbname = sys.argv[1]
+#           call_path_id = sys.argv[2]
+#           db = QSqlDatabase.addDatabase('QPSQL')
+#           db.setDatabaseName(dbname)
+#           if not db.open():
+#                   raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text())
+#           query = QSqlQuery(db)
+#           print "    id          ip  symbol_id  symbol                          dso_id  dso_short_name"
+#           while call_path_id != 0 and call_path_id != 1:
+#                   ret = query.exec_('SELECT * FROM call_paths_view WHERE id = ' + str(call_path_id))
+#                   if not ret:
+#                           raise Exception("Query failed: " + query.lastError().text())
+#                   if not query.next():
+#                           raise Exception("Query failed")
+#                   print "{0:>6}  {1:>10}  {2:>9}  {3:<30}  {4:>6}  {5:<30}".format(query.value(0), query.value(1), query.value(2), query.value(3), query.value(4), query.value(5))
+#                   call_path_id = query.value(6)
+
+from PySide.QtSql import *
+
+# Need to access PostgreSQL C library directly to use COPY FROM STDIN
+from ctypes import *
+libpq = CDLL("libpq.so.5")
+PQconnectdb = libpq.PQconnectdb
+PQconnectdb.restype = c_void_p
+PQconnectdb.argtypes = [ c_char_p ]
+PQfinish = libpq.PQfinish
+PQfinish.argtypes = [ c_void_p ]
+PQstatus = libpq.PQstatus
+PQstatus.restype = c_int
+PQstatus.argtypes = [ c_void_p ]
+PQexec = libpq.PQexec
+PQexec.restype = c_void_p
+PQexec.argtypes = [ c_void_p, c_char_p ]
+PQresultStatus = libpq.PQresultStatus
+PQresultStatus.restype = c_int
+PQresultStatus.argtypes = [ c_void_p ]
+PQputCopyData = libpq.PQputCopyData
+PQputCopyData.restype = c_int
+PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
+PQputCopyEnd = libpq.PQputCopyEnd
+PQputCopyEnd.restype = c_int
+PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+	'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+# These perf imports are not used at present
+#from perf_trace_context import *
+#from Core import *
+
+perf_db_export_mode = True
+perf_db_export_calls = False
+perf_db_export_callchains = False
+
+
+def usage():
+	print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]"
+	print >> sys.stderr, "where:	columns		'all' or 'branches'"
+	print >> sys.stderr, "		calls		'calls' => create calls and call_paths table"
+	print >> sys.stderr, "		callchains	'callchains' => create call_paths table"
+	raise Exception("Too few arguments")
+
+if (len(sys.argv) < 2):
+	usage()
+
+dbname = sys.argv[1]
+
+if (len(sys.argv) >= 3):
+	columns = sys.argv[2]
+else:
+	columns = "all"
+
+if columns not in ("all", "branches"):
+	usage()
+
+branches = (columns == "branches")
+
+for i in range(3,len(sys.argv)):
+	if (sys.argv[i] == "calls"):
+		perf_db_export_calls = True
+	elif (sys.argv[i] == "callchains"):
+		perf_db_export_callchains = True
+	else:
+		usage()
+
+output_dir_name = os.getcwd() + "/" + dbname + "-perf-data"
+os.mkdir(output_dir_name)
+
+def do_query(q, s):
+	if (q.exec_(s)):
+		return
+	raise Exception("Query failed: " + q.lastError().text())
+
+print datetime.datetime.today(), "Creating database..."
+
+db = QSqlDatabase.addDatabase('QPSQL')
+query = QSqlQuery(db)
+db.setDatabaseName('postgres')
+db.open()
+try:
+	do_query(query, 'CREATE DATABASE ' + dbname)
+except:
+	os.rmdir(output_dir_name)
+	raise
+query.finish()
+query.clear()
+db.close()
+
+db.setDatabaseName(dbname)
+db.open()
+
+query = QSqlQuery(db)
+do_query(query, 'SET client_min_messages TO WARNING')
+
+do_query(query, 'CREATE TABLE selected_events ('
+		'id		bigint		NOT NULL,'
+		'name		varchar(80))')
+do_query(query, 'CREATE TABLE machines ('
+		'id		bigint		NOT NULL,'
+		'pid		integer,'
+		'root_dir 	varchar(4096))')
+do_query(query, 'CREATE TABLE threads ('
+		'id		bigint		NOT NULL,'
+		'machine_id	bigint,'
+		'process_id	bigint,'
+		'pid		integer,'
+		'tid		integer)')
+do_query(query, 'CREATE TABLE comms ('
+		'id		bigint		NOT NULL,'
+		'comm		varchar(16))')
+do_query(query, 'CREATE TABLE comm_threads ('
+		'id		bigint		NOT NULL,'
+		'comm_id	bigint,'
+		'thread_id	bigint)')
+do_query(query, 'CREATE TABLE dsos ('
+		'id		bigint		NOT NULL,'
+		'machine_id	bigint,'
+		'short_name	varchar(256),'
+		'long_name	varchar(4096),'
+		'build_id	varchar(64))')
+do_query(query, 'CREATE TABLE symbols ('
+		'id		bigint		NOT NULL,'
+		'dso_id		bigint,'
+		'sym_start	bigint,'
+		'sym_end	bigint,'
+		'binding	integer,'
+		'name		varchar(2048))')
+do_query(query, 'CREATE TABLE branch_types ('
+		'id		integer		NOT NULL,'
+		'name		varchar(80))')
+
+if branches:
+	do_query(query, 'CREATE TABLE samples ('
+		'id		bigint		NOT NULL,'
+		'evsel_id	bigint,'
+		'machine_id	bigint,'
+		'thread_id	bigint,'
+		'comm_id	bigint,'
+		'dso_id		bigint,'
+		'symbol_id	bigint,'
+		'sym_offset	bigint,'
+		'ip		bigint,'
+		'time		bigint,'
+		'cpu		integer,'
+		'to_dso_id	bigint,'
+		'to_symbol_id	bigint,'
+		'to_sym_offset	bigint,'
+		'to_ip		bigint,'
+		'branch_type	integer,'
+		'in_tx		boolean,'
+		'call_path_id	bigint)')
+else:
+	do_query(query, 'CREATE TABLE samples ('
+		'id		bigint		NOT NULL,'
+		'evsel_id	bigint,'
+		'machine_id	bigint,'
+		'thread_id	bigint,'
+		'comm_id	bigint,'
+		'dso_id		bigint,'
+		'symbol_id	bigint,'
+		'sym_offset	bigint,'
+		'ip		bigint,'
+		'time		bigint,'
+		'cpu		integer,'
+		'to_dso_id	bigint,'
+		'to_symbol_id	bigint,'
+		'to_sym_offset	bigint,'
+		'to_ip		bigint,'
+		'period		bigint,'
+		'weight		bigint,'
+		'transaction	bigint,'
+		'data_src	bigint,'
+		'branch_type	integer,'
+		'in_tx		boolean,'
+		'call_path_id	bigint)')
+
+if perf_db_export_calls or perf_db_export_callchains:
+	do_query(query, 'CREATE TABLE call_paths ('
+		'id		bigint		NOT NULL,'
+		'parent_id	bigint,'
+		'symbol_id	bigint,'
+		'ip		bigint)')
+if perf_db_export_calls:
+	do_query(query, 'CREATE TABLE calls ('
+		'id		bigint		NOT NULL,'
+		'thread_id	bigint,'
+		'comm_id	bigint,'
+		'call_path_id	bigint,'
+		'call_time	bigint,'
+		'return_time	bigint,'
+		'branch_count	bigint,'
+		'call_id	bigint,'
+		'return_id	bigint,'
+		'parent_call_path_id	bigint,'
+		'flags		integer)')
+
+do_query(query, 'CREATE VIEW machines_view AS '
+	'SELECT '
+		'id,'
+		'pid,'
+		'root_dir,'
+		'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest'
+	' FROM machines')
+
+do_query(query, 'CREATE VIEW dsos_view AS '
+	'SELECT '
+		'id,'
+		'machine_id,'
+		'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
+		'short_name,'
+		'long_name,'
+		'build_id'
+	' FROM dsos')
+
+do_query(query, 'CREATE VIEW symbols_view AS '
+	'SELECT '
+		'id,'
+		'name,'
+		'(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,'
+		'dso_id,'
+		'sym_start,'
+		'sym_end,'
+		'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding'
+	' FROM symbols')
+
+do_query(query, 'CREATE VIEW threads_view AS '
+	'SELECT '
+		'id,'
+		'machine_id,'
+		'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
+		'process_id,'
+		'pid,'
+		'tid'
+	' FROM threads')
+
+do_query(query, 'CREATE VIEW comm_threads_view AS '
+	'SELECT '
+		'comm_id,'
+		'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
+		'thread_id,'
+		'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
+		'(SELECT tid FROM threads WHERE id = thread_id) AS tid'
+	' FROM comm_threads')
+
+if perf_db_export_calls or perf_db_export_callchains:
+	do_query(query, 'CREATE VIEW call_paths_view AS '
+		'SELECT '
+			'c.id,'
+			'to_hex(c.ip) AS ip,'
+			'c.symbol_id,'
+			'(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,'
+			'(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,'
+			'(SELECT dso FROM symbols_view  WHERE id = c.symbol_id) AS dso_short_name,'
+			'c.parent_id,'
+			'to_hex(p.ip) AS parent_ip,'
+			'p.symbol_id AS parent_symbol_id,'
+			'(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,'
+			'(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,'
+			'(SELECT dso FROM symbols_view  WHERE id = p.symbol_id) AS parent_dso_short_name'
+		' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id')
+if perf_db_export_calls:
+	do_query(query, 'CREATE VIEW calls_view AS '
+		'SELECT '
+			'calls.id,'
+			'thread_id,'
+			'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
+			'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
+			'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
+			'call_path_id,'
+			'to_hex(ip) AS ip,'
+			'symbol_id,'
+			'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
+			'call_time,'
+			'return_time,'
+			'return_time - call_time AS elapsed_time,'
+			'branch_count,'
+			'call_id,'
+			'return_id,'
+			'CASE WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' ELSE \'\' END AS flags,'
+			'parent_call_path_id'
+		' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
+
+do_query(query, 'CREATE VIEW samples_view AS '
+	'SELECT '
+		'id,'
+		'time,'
+		'cpu,'
+		'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
+		'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
+		'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
+		'(SELECT name FROM selected_events WHERE id = evsel_id) AS event,'
+		'to_hex(ip) AS ip_hex,'
+		'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
+		'sym_offset,'
+		'(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,'
+		'to_hex(to_ip) AS to_ip_hex,'
+		'(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,'
+		'to_sym_offset,'
+		'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
+		'(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
+		'in_tx'
+	' FROM samples')
+
+
+file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0)
+file_trailer = "\377\377"
+
+def open_output_file(file_name):
+	path_name = output_dir_name + "/" + file_name
+	file = open(path_name, "w+")
+	file.write(file_header)
+	return file
+
+def close_output_file(file):
+	file.write(file_trailer)
+	file.close()
+
+def copy_output_file_direct(file, table_name):
+	close_output_file(file)
+	sql = "COPY " + table_name + " FROM '" + file.name + "' (FORMAT 'binary')"
+	do_query(query, sql)
+
+# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
+def copy_output_file(file, table_name):
+	conn = PQconnectdb("dbname = " + dbname)
+	if (PQstatus(conn)):
+		raise Exception("COPY FROM STDIN PQconnectdb failed")
+	file.write(file_trailer)
+	file.seek(0)
+	sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
+	res = PQexec(conn, sql)
+	if (PQresultStatus(res) != 4):
+		raise Exception("COPY FROM STDIN PQexec failed")
+	data = file.read(65536)
+	while (len(data)):
+		ret = PQputCopyData(conn, data, len(data))
+		if (ret != 1):
+			raise Exception("COPY FROM STDIN PQputCopyData failed, error " + str(ret))
+		data = file.read(65536)
+	ret = PQputCopyEnd(conn, None)
+	if (ret != 1):
+		raise Exception("COPY FROM STDIN PQputCopyEnd failed, error " + str(ret))
+	PQfinish(conn)
+
+def remove_output_file(file):
+	name = file.name
+	file.close()
+	os.unlink(name)
+
+evsel_file		= open_output_file("evsel_table.bin")
+machine_file		= open_output_file("machine_table.bin")
+thread_file		= open_output_file("thread_table.bin")
+comm_file		= open_output_file("comm_table.bin")
+comm_thread_file	= open_output_file("comm_thread_table.bin")
+dso_file		= open_output_file("dso_table.bin")
+symbol_file		= open_output_file("symbol_table.bin")
+branch_type_file	= open_output_file("branch_type_table.bin")
+sample_file		= open_output_file("sample_table.bin")
+if perf_db_export_calls or perf_db_export_callchains:
+	call_path_file		= open_output_file("call_path_table.bin")
+if perf_db_export_calls:
+	call_file		= open_output_file("call_table.bin")
+
+def trace_begin():
+	print datetime.datetime.today(), "Writing to intermediate files..."
+	# id == 0 means unknown.  It is easier to create records for them than replace the zeroes with NULLs
+	evsel_table(0, "unknown")
+	machine_table(0, 0, "unknown")
+	thread_table(0, 0, 0, -1, -1)
+	comm_table(0, "unknown")
+	dso_table(0, 0, "unknown", "unknown", "")
+	symbol_table(0, 0, 0, 0, 0, "unknown")
+	sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+	if perf_db_export_calls or perf_db_export_callchains:
+		call_path_table(0, 0, 0, 0)
+
+unhandled_count = 0
+
+def trace_end():
+	print datetime.datetime.today(), "Copying to database..."
+	copy_output_file(evsel_file,		"selected_events")
+	copy_output_file(machine_file,		"machines")
+	copy_output_file(thread_file,		"threads")
+	copy_output_file(comm_file,		"comms")
+	copy_output_file(comm_thread_file,	"comm_threads")
+	copy_output_file(dso_file,		"dsos")
+	copy_output_file(symbol_file,		"symbols")
+	copy_output_file(branch_type_file,	"branch_types")
+	copy_output_file(sample_file,		"samples")
+	if perf_db_export_calls or perf_db_export_callchains:
+		copy_output_file(call_path_file,	"call_paths")
+	if perf_db_export_calls:
+		copy_output_file(call_file,		"calls")
+
+	print datetime.datetime.today(), "Removing intermediate files..."
+	remove_output_file(evsel_file)
+	remove_output_file(machine_file)
+	remove_output_file(thread_file)
+	remove_output_file(comm_file)
+	remove_output_file(comm_thread_file)
+	remove_output_file(dso_file)
+	remove_output_file(symbol_file)
+	remove_output_file(branch_type_file)
+	remove_output_file(sample_file)
+	if perf_db_export_calls or perf_db_export_callchains:
+		remove_output_file(call_path_file)
+	if perf_db_export_calls:
+		remove_output_file(call_file)
+	os.rmdir(output_dir_name)
+	print datetime.datetime.today(), "Adding primary keys"
+	do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
+	do_query(query, 'ALTER TABLE machines        ADD PRIMARY KEY (id)')
+	do_query(query, 'ALTER TABLE threads         ADD PRIMARY KEY (id)')
+	do_query(query, 'ALTER TABLE comms           ADD PRIMARY KEY (id)')
+	do_query(query, 'ALTER TABLE comm_threads    ADD PRIMARY KEY (id)')
+	do_query(query, 'ALTER TABLE dsos            ADD PRIMARY KEY (id)')
+	do_query(query, 'ALTER TABLE symbols         ADD PRIMARY KEY (id)')
+	do_query(query, 'ALTER TABLE branch_types    ADD PRIMARY KEY (id)')
+	do_query(query, 'ALTER TABLE samples         ADD PRIMARY KEY (id)')
+	if perf_db_export_calls or perf_db_export_callchains:
+		do_query(query, 'ALTER TABLE call_paths      ADD PRIMARY KEY (id)')
+	if perf_db_export_calls:
+		do_query(query, 'ALTER TABLE calls           ADD PRIMARY KEY (id)')
+
+	print datetime.datetime.today(), "Adding foreign keys"
+	do_query(query, 'ALTER TABLE threads '
+					'ADD CONSTRAINT machinefk  FOREIGN KEY (machine_id)   REFERENCES machines   (id),'
+					'ADD CONSTRAINT processfk  FOREIGN KEY (process_id)   REFERENCES threads    (id)')
+	do_query(query, 'ALTER TABLE comm_threads '
+					'ADD CONSTRAINT commfk     FOREIGN KEY (comm_id)      REFERENCES comms      (id),'
+					'ADD CONSTRAINT threadfk   FOREIGN KEY (thread_id)    REFERENCES threads    (id)')
+	do_query(query, 'ALTER TABLE dsos '
+					'ADD CONSTRAINT machinefk  FOREIGN KEY (machine_id)   REFERENCES machines   (id)')
+	do_query(query, 'ALTER TABLE symbols '
+					'ADD CONSTRAINT dsofk      FOREIGN KEY (dso_id)       REFERENCES dsos       (id)')
+	do_query(query, 'ALTER TABLE samples '
+					'ADD CONSTRAINT evselfk    FOREIGN KEY (evsel_id)     REFERENCES selected_events (id),'
+					'ADD CONSTRAINT machinefk  FOREIGN KEY (machine_id)   REFERENCES machines   (id),'
+					'ADD CONSTRAINT threadfk   FOREIGN KEY (thread_id)    REFERENCES threads    (id),'
+					'ADD CONSTRAINT commfk     FOREIGN KEY (comm_id)      REFERENCES comms      (id),'
+					'ADD CONSTRAINT dsofk      FOREIGN KEY (dso_id)       REFERENCES dsos       (id),'
+					'ADD CONSTRAINT symbolfk   FOREIGN KEY (symbol_id)    REFERENCES symbols    (id),'
+					'ADD CONSTRAINT todsofk    FOREIGN KEY (to_dso_id)    REFERENCES dsos       (id),'
+					'ADD CONSTRAINT tosymbolfk FOREIGN KEY (to_symbol_id) REFERENCES symbols    (id)')
+	if perf_db_export_calls or perf_db_export_callchains:
+		do_query(query, 'ALTER TABLE call_paths '
+					'ADD CONSTRAINT parentfk    FOREIGN KEY (parent_id)    REFERENCES call_paths (id),'
+					'ADD CONSTRAINT symbolfk    FOREIGN KEY (symbol_id)    REFERENCES symbols    (id)')
+	if perf_db_export_calls:
+		do_query(query, 'ALTER TABLE calls '
+					'ADD CONSTRAINT threadfk    FOREIGN KEY (thread_id)    REFERENCES threads    (id),'
+					'ADD CONSTRAINT commfk      FOREIGN KEY (comm_id)      REFERENCES comms      (id),'
+					'ADD CONSTRAINT call_pathfk FOREIGN KEY (call_path_id) REFERENCES call_paths (id),'
+					'ADD CONSTRAINT callfk      FOREIGN KEY (call_id)      REFERENCES samples    (id),'
+					'ADD CONSTRAINT returnfk    FOREIGN KEY (return_id)    REFERENCES samples    (id),'
+					'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)')
+		do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
+
+	if (unhandled_count):
+		print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
+	print datetime.datetime.today(), "Done"
+
+def trace_unhandled(event_name, context, event_fields_dict):
+	global unhandled_count
+	unhandled_count += 1
+
+def sched__sched_switch(*x):
+	pass
+
+def evsel_table(evsel_id, evsel_name, *x):
+	n = len(evsel_name)
+	fmt = "!hiqi" + str(n) + "s"
+	value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
+	evsel_file.write(value)
+
+def machine_table(machine_id, pid, root_dir, *x):
+	n = len(root_dir)
+	fmt = "!hiqiii" + str(n) + "s"
+	value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
+	machine_file.write(value)
+
+def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
+	value = struct.pack("!hiqiqiqiiii", 5, 8, thread_id, 8, machine_id, 8, process_id, 4, pid, 4, tid)
+	thread_file.write(value)
+
+def comm_table(comm_id, comm_str, *x):
+	n = len(comm_str)
+	fmt = "!hiqi" + str(n) + "s"
+	value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
+	comm_file.write(value)
+
+def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
+	fmt = "!hiqiqiq"
+	value = struct.pack(fmt, 3, 8, comm_thread_id, 8, comm_id, 8, thread_id)
+	comm_thread_file.write(value)
+
+def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
+	n1 = len(short_name)
+	n2 = len(long_name)
+	n3 = len(build_id)
+	fmt = "!hiqiqi" + str(n1) + "si"  + str(n2) + "si" + str(n3) + "s"
+	value = struct.pack(fmt, 5, 8, dso_id, 8, machine_id, n1, short_name, n2, long_name, n3, build_id)
+	dso_file.write(value)
+
+def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
+	n = len(symbol_name)
+	fmt = "!hiqiqiqiqiii" + str(n) + "s"
+	value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
+	symbol_file.write(value)
+
+def branch_type_table(branch_type, name, *x):
+	n = len(name)
+	fmt = "!hiii" + str(n) + "s"
+	value = struct.pack(fmt, 2, 4, branch_type, n, name)
+	branch_type_file.write(value)
+
+def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, *x):
+	if branches:
+		value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiBiq", 18, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx, 8, call_path_id)
+	else:
+		value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiBiq", 22, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx, 8, call_path_id)
+	sample_file.write(value)
+
+def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
+	fmt = "!hiqiqiqiq"
+	value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip)
+	call_path_file.write(value)
+
+def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, *x):
+	fmt = "!hiqiqiqiqiqiqiqiqiqiqii"
+	value = struct.pack(fmt, 11, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags)
+	call_file.write(value)
diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py
new file mode 100644
index 0000000..e4bb82c
--- /dev/null
+++ b/tools/perf/scripts/python/export-to-sqlite.py
@@ -0,0 +1,455 @@
+# export-to-sqlite.py: export perf data to a sqlite3 database
+# Copyright (c) 2017, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+
+import os
+import sys
+import struct
+import datetime
+
+# To use this script you will need to have installed package python-pyside which
+# provides LGPL-licensed Python bindings for Qt.  You will also need the package
+# libqt4-sql-sqlite for Qt sqlite3 support.
+#
+# An example of using this script with Intel PT:
+#
+#	$ perf record -e intel_pt//u ls
+#	$ perf script -s ~/libexec/perf-core/scripts/python/export-to-sqlite.py pt_example branches calls
+#	2017-07-31 14:26:07.326913 Creating database...
+#	2017-07-31 14:26:07.538097 Writing records...
+#	2017-07-31 14:26:09.889292 Adding indexes
+#	2017-07-31 14:26:09.958746 Done
+#
+# To browse the database, sqlite3 can be used e.g.
+#
+#	$ sqlite3 pt_example
+#	sqlite> .header on
+#	sqlite> select * from samples_view where id < 10;
+#	sqlite> .mode column
+#	sqlite> select * from samples_view where id < 10;
+#	sqlite> .tables
+#	sqlite> .schema samples_view
+#	sqlite> .quit
+#
+# An example of using the database is provided by the script
+# call-graph-from-sql.py.  Refer to that script for details.
+#
+# The database structure is practically the same as created by the script
+# export-to-postgresql.py. Refer to that script for details.  A notable
+# difference is  the 'transaction' column of the 'samples' table which is
+# renamed 'transaction_' in sqlite because 'transaction' is a reserved word.
+
+from PySide.QtSql import *
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+	'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+# These perf imports are not used at present
+#from perf_trace_context import *
+#from Core import *
+
+perf_db_export_mode = True
+perf_db_export_calls = False
+perf_db_export_callchains = False
+
+def usage():
+	print >> sys.stderr, "Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]"
+	print >> sys.stderr, "where:	columns		'all' or 'branches'"
+	print >> sys.stderr, "		calls		'calls' => create calls and call_paths table"
+	print >> sys.stderr, "		callchains	'callchains' => create call_paths table"
+	raise Exception("Too few arguments")
+
+if (len(sys.argv) < 2):
+	usage()
+
+dbname = sys.argv[1]
+
+if (len(sys.argv) >= 3):
+	columns = sys.argv[2]
+else:
+	columns = "all"
+
+if columns not in ("all", "branches"):
+	usage()
+
+branches = (columns == "branches")
+
+for i in range(3,len(sys.argv)):
+	if (sys.argv[i] == "calls"):
+		perf_db_export_calls = True
+	elif (sys.argv[i] == "callchains"):
+		perf_db_export_callchains = True
+	else:
+		usage()
+
+def do_query(q, s):
+	if (q.exec_(s)):
+		return
+	raise Exception("Query failed: " + q.lastError().text())
+
+def do_query_(q):
+	if (q.exec_()):
+		return
+	raise Exception("Query failed: " + q.lastError().text())
+
+print datetime.datetime.today(), "Creating database..."
+
+db_exists = False
+try:
+	f = open(dbname)
+	f.close()
+	db_exists = True
+except:
+	pass
+
+if db_exists:
+	raise Exception(dbname + " already exists")
+
+db = QSqlDatabase.addDatabase('QSQLITE')
+db.setDatabaseName(dbname)
+db.open()
+
+query = QSqlQuery(db)
+
+do_query(query, 'PRAGMA journal_mode = OFF')
+do_query(query, 'BEGIN TRANSACTION')
+
+do_query(query, 'CREATE TABLE selected_events ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'name		varchar(80))')
+do_query(query, 'CREATE TABLE machines ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'pid		integer,'
+		'root_dir 	varchar(4096))')
+do_query(query, 'CREATE TABLE threads ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'machine_id	bigint,'
+		'process_id	bigint,'
+		'pid		integer,'
+		'tid		integer)')
+do_query(query, 'CREATE TABLE comms ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'comm		varchar(16))')
+do_query(query, 'CREATE TABLE comm_threads ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'comm_id	bigint,'
+		'thread_id	bigint)')
+do_query(query, 'CREATE TABLE dsos ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'machine_id	bigint,'
+		'short_name	varchar(256),'
+		'long_name	varchar(4096),'
+		'build_id	varchar(64))')
+do_query(query, 'CREATE TABLE symbols ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'dso_id		bigint,'
+		'sym_start	bigint,'
+		'sym_end	bigint,'
+		'binding	integer,'
+		'name		varchar(2048))')
+do_query(query, 'CREATE TABLE branch_types ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'name		varchar(80))')
+
+if branches:
+	do_query(query, 'CREATE TABLE samples ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'evsel_id	bigint,'
+		'machine_id	bigint,'
+		'thread_id	bigint,'
+		'comm_id	bigint,'
+		'dso_id		bigint,'
+		'symbol_id	bigint,'
+		'sym_offset	bigint,'
+		'ip		bigint,'
+		'time		bigint,'
+		'cpu		integer,'
+		'to_dso_id	bigint,'
+		'to_symbol_id	bigint,'
+		'to_sym_offset	bigint,'
+		'to_ip		bigint,'
+		'branch_type	integer,'
+		'in_tx		boolean,'
+		'call_path_id	bigint)')
+else:
+	do_query(query, 'CREATE TABLE samples ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'evsel_id	bigint,'
+		'machine_id	bigint,'
+		'thread_id	bigint,'
+		'comm_id	bigint,'
+		'dso_id		bigint,'
+		'symbol_id	bigint,'
+		'sym_offset	bigint,'
+		'ip		bigint,'
+		'time		bigint,'
+		'cpu		integer,'
+		'to_dso_id	bigint,'
+		'to_symbol_id	bigint,'
+		'to_sym_offset	bigint,'
+		'to_ip		bigint,'
+		'period		bigint,'
+		'weight		bigint,'
+		'transaction_	bigint,'
+		'data_src	bigint,'
+		'branch_type	integer,'
+		'in_tx		boolean,'
+		'call_path_id	bigint)')
+
+if perf_db_export_calls or perf_db_export_callchains:
+	do_query(query, 'CREATE TABLE call_paths ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'parent_id	bigint,'
+		'symbol_id	bigint,'
+		'ip		bigint)')
+if perf_db_export_calls:
+	do_query(query, 'CREATE TABLE calls ('
+		'id		integer		NOT NULL	PRIMARY KEY,'
+		'thread_id	bigint,'
+		'comm_id	bigint,'
+		'call_path_id	bigint,'
+		'call_time	bigint,'
+		'return_time	bigint,'
+		'branch_count	bigint,'
+		'call_id	bigint,'
+		'return_id	bigint,'
+		'parent_call_path_id	bigint,'
+		'flags		integer)')
+
+# printf was added to sqlite in version 3.8.3
+sqlite_has_printf = False
+try:
+	do_query(query, 'SELECT printf("") FROM machines')
+	sqlite_has_printf = True
+except:
+	pass
+
+def emit_to_hex(x):
+	if sqlite_has_printf:
+		return 'printf("%x", ' + x + ')'
+	else:
+		return x
+
+do_query(query, 'CREATE VIEW machines_view AS '
+	'SELECT '
+		'id,'
+		'pid,'
+		'root_dir,'
+		'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest'
+	' FROM machines')
+
+do_query(query, 'CREATE VIEW dsos_view AS '
+	'SELECT '
+		'id,'
+		'machine_id,'
+		'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
+		'short_name,'
+		'long_name,'
+		'build_id'
+	' FROM dsos')
+
+do_query(query, 'CREATE VIEW symbols_view AS '
+	'SELECT '
+		'id,'
+		'name,'
+		'(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,'
+		'dso_id,'
+		'sym_start,'
+		'sym_end,'
+		'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding'
+	' FROM symbols')
+
+do_query(query, 'CREATE VIEW threads_view AS '
+	'SELECT '
+		'id,'
+		'machine_id,'
+		'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
+		'process_id,'
+		'pid,'
+		'tid'
+	' FROM threads')
+
+do_query(query, 'CREATE VIEW comm_threads_view AS '
+	'SELECT '
+		'comm_id,'
+		'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
+		'thread_id,'
+		'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
+		'(SELECT tid FROM threads WHERE id = thread_id) AS tid'
+	' FROM comm_threads')
+
+if perf_db_export_calls or perf_db_export_callchains:
+	do_query(query, 'CREATE VIEW call_paths_view AS '
+		'SELECT '
+			'c.id,'
+			+ emit_to_hex('c.ip') + ' AS ip,'
+			'c.symbol_id,'
+			'(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,'
+			'(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,'
+			'(SELECT dso FROM symbols_view  WHERE id = c.symbol_id) AS dso_short_name,'
+			'c.parent_id,'
+			+ emit_to_hex('p.ip') + ' AS parent_ip,'
+			'p.symbol_id AS parent_symbol_id,'
+			'(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,'
+			'(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,'
+			'(SELECT dso FROM symbols_view  WHERE id = p.symbol_id) AS parent_dso_short_name'
+		' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id')
+if perf_db_export_calls:
+	do_query(query, 'CREATE VIEW calls_view AS '
+		'SELECT '
+			'calls.id,'
+			'thread_id,'
+			'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
+			'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
+			'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
+			'call_path_id,'
+			+ emit_to_hex('ip') + ' AS ip,'
+			'symbol_id,'
+			'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
+			'call_time,'
+			'return_time,'
+			'return_time - call_time AS elapsed_time,'
+			'branch_count,'
+			'call_id,'
+			'return_id,'
+			'CASE WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' ELSE \'\' END AS flags,'
+			'parent_call_path_id'
+		' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
+
+do_query(query, 'CREATE VIEW samples_view AS '
+	'SELECT '
+		'id,'
+		'time,'
+		'cpu,'
+		'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
+		'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
+		'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
+		'(SELECT name FROM selected_events WHERE id = evsel_id) AS event,'
+		+ emit_to_hex('ip') + ' AS ip_hex,'
+		'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
+		'sym_offset,'
+		'(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,'
+		+ emit_to_hex('to_ip') + ' AS to_ip_hex,'
+		'(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,'
+		'to_sym_offset,'
+		'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
+		'(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
+		'in_tx'
+	' FROM samples')
+
+do_query(query, 'END TRANSACTION')
+
+evsel_query = QSqlQuery(db)
+evsel_query.prepare("INSERT INTO selected_events VALUES (?, ?)")
+machine_query = QSqlQuery(db)
+machine_query.prepare("INSERT INTO machines VALUES (?, ?, ?)")
+thread_query = QSqlQuery(db)
+thread_query.prepare("INSERT INTO threads VALUES (?, ?, ?, ?, ?)")
+comm_query = QSqlQuery(db)
+comm_query.prepare("INSERT INTO comms VALUES (?, ?)")
+comm_thread_query = QSqlQuery(db)
+comm_thread_query.prepare("INSERT INTO comm_threads VALUES (?, ?, ?)")
+dso_query = QSqlQuery(db)
+dso_query.prepare("INSERT INTO dsos VALUES (?, ?, ?, ?, ?)")
+symbol_query = QSqlQuery(db)
+symbol_query.prepare("INSERT INTO symbols VALUES (?, ?, ?, ?, ?, ?)")
+branch_type_query = QSqlQuery(db)
+branch_type_query.prepare("INSERT INTO branch_types VALUES (?, ?)")
+sample_query = QSqlQuery(db)
+if branches:
+	sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
+else:
+	sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
+if perf_db_export_calls or perf_db_export_callchains:
+	call_path_query = QSqlQuery(db)
+	call_path_query.prepare("INSERT INTO call_paths VALUES (?, ?, ?, ?)")
+if perf_db_export_calls:
+	call_query = QSqlQuery(db)
+	call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
+
+def trace_begin():
+	print datetime.datetime.today(), "Writing records..."
+	do_query(query, 'BEGIN TRANSACTION')
+	# id == 0 means unknown.  It is easier to create records for them than replace the zeroes with NULLs
+	evsel_table(0, "unknown")
+	machine_table(0, 0, "unknown")
+	thread_table(0, 0, 0, -1, -1)
+	comm_table(0, "unknown")
+	dso_table(0, 0, "unknown", "unknown", "")
+	symbol_table(0, 0, 0, 0, 0, "unknown")
+	sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+	if perf_db_export_calls or perf_db_export_callchains:
+		call_path_table(0, 0, 0, 0)
+
+unhandled_count = 0
+
+def trace_end():
+	do_query(query, 'END TRANSACTION')
+
+	print datetime.datetime.today(), "Adding indexes"
+	if perf_db_export_calls:
+		do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
+
+	if (unhandled_count):
+		print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
+	print datetime.datetime.today(), "Done"
+
+def trace_unhandled(event_name, context, event_fields_dict):
+	global unhandled_count
+	unhandled_count += 1
+
+def sched__sched_switch(*x):
+	pass
+
+def bind_exec(q, n, x):
+	for xx in x[0:n]:
+		q.addBindValue(str(xx))
+	do_query_(q)
+
+def evsel_table(*x):
+	bind_exec(evsel_query, 2, x)
+
+def machine_table(*x):
+	bind_exec(machine_query, 3, x)
+
+def thread_table(*x):
+	bind_exec(thread_query, 5, x)
+
+def comm_table(*x):
+	bind_exec(comm_query, 2, x)
+
+def comm_thread_table(*x):
+	bind_exec(comm_thread_query, 3, x)
+
+def dso_table(*x):
+	bind_exec(dso_query, 5, x)
+
+def symbol_table(*x):
+	bind_exec(symbol_query, 6, x)
+
+def branch_type_table(*x):
+	bind_exec(branch_type_query, 2, x)
+
+def sample_table(*x):
+	if branches:
+		for xx in x[0:15]:
+			sample_query.addBindValue(str(xx))
+		for xx in x[19:22]:
+			sample_query.addBindValue(str(xx))
+		do_query_(sample_query)
+	else:
+		bind_exec(sample_query, 22, x)
+
+def call_path_table(*x):
+	bind_exec(call_path_query, 4, x)
+
+def call_return_table(*x):
+	bind_exec(call_query, 11, x)
diff --git a/tools/perf/scripts/python/failed-syscalls-by-pid.py b/tools/perf/scripts/python/failed-syscalls-by-pid.py
new file mode 100644
index 0000000..cafeff3
--- /dev/null
+++ b/tools/perf/scripts/python/failed-syscalls-by-pid.py
@@ -0,0 +1,78 @@
+# failed system call counts, by pid
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Displays system-wide failed system call totals, broken down by pid.
+# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+	'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import *
+
+usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
+
+for_comm = None
+for_pid = None
+
+if len(sys.argv) > 2:
+	sys.exit(usage)
+
+if len(sys.argv) > 1:
+	try:
+		for_pid = int(sys.argv[1])
+	except:
+		for_comm = sys.argv[1]
+
+syscalls = autodict()
+
+def trace_begin():
+	print "Press control+C to stop and show the summary"
+
+def trace_end():
+	print_error_totals()
+
+def raw_syscalls__sys_exit(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, id, ret):
+	if (for_comm and common_comm != for_comm) or \
+	   (for_pid  and common_pid  != for_pid ):
+		return
+
+	if ret < 0:
+		try:
+			syscalls[common_comm][common_pid][id][ret] += 1
+		except TypeError:
+			syscalls[common_comm][common_pid][id][ret] = 1
+
+def syscalls__sys_exit(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	id, ret):
+	raw_syscalls__sys_exit(**locals())
+
+def print_error_totals():
+    if for_comm is not None:
+	    print "\nsyscall errors for %s:\n\n" % (for_comm),
+    else:
+	    print "\nsyscall errors:\n\n",
+
+    print "%-30s  %10s\n" % ("comm [pid]", "count"),
+    print "%-30s  %10s\n" % ("------------------------------", \
+                                 "----------"),
+
+    comm_keys = syscalls.keys()
+    for comm in comm_keys:
+	    pid_keys = syscalls[comm].keys()
+	    for pid in pid_keys:
+		    print "\n%s [%d]\n" % (comm, pid),
+		    id_keys = syscalls[comm][pid].keys()
+		    for id in id_keys:
+			    print "  syscall: %-16s\n" % syscall_name(id),
+			    ret_keys = syscalls[comm][pid][id].keys()
+			    for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k),  reverse = True):
+				    print "    err = %-20s  %10d\n" % (strerror(ret), val),
diff --git a/tools/perf/scripts/python/futex-contention.py b/tools/perf/scripts/python/futex-contention.py
new file mode 100644
index 0000000..0f5cf43
--- /dev/null
+++ b/tools/perf/scripts/python/futex-contention.py
@@ -0,0 +1,50 @@
+# futex contention
+# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Translation of:
+#
+# http://sourceware.org/systemtap/wiki/WSFutexContention
+#
+# to perf python scripting.
+#
+# Measures futex contention
+
+import os, sys
+sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+from Util import *
+
+process_names = {}
+thread_thislock = {}
+thread_blocktime = {}
+
+lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
+process_names = {} # long-lived pid-to-execname mapping
+
+def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
+			      nr, uaddr, op, val, utime, uaddr2, val3):
+	cmd = op & FUTEX_CMD_MASK
+	if cmd != FUTEX_WAIT:
+		return # we don't care about originators of WAKE events
+
+	process_names[tid] = comm
+	thread_thislock[tid] = uaddr
+	thread_blocktime[tid] = nsecs(s, ns)
+
+def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
+			     nr, ret):
+	if thread_blocktime.has_key(tid):
+		elapsed = nsecs(s, ns) - thread_blocktime[tid]
+		add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
+		del thread_blocktime[tid]
+		del thread_thislock[tid]
+
+def trace_begin():
+	print "Press control+C to stop and show the summary"
+
+def trace_end():
+	for (tid, lock) in lock_waits:
+		min, max, avg, count = lock_waits[tid, lock]
+		print "%s[%d] lock %x contended %d times, %d avg ns" % \
+		      (process_names[tid], tid, lock, count, avg)
+
diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
new file mode 100644
index 0000000..b19172d
--- /dev/null
+++ b/tools/perf/scripts/python/intel-pt-events.py
@@ -0,0 +1,128 @@
+# intel-pt-events.py: Print Intel PT Power Events and PTWRITE
+# Copyright (c) 2017, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+
+import os
+import sys
+import struct
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+	'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+# These perf imports are not used at present
+#from perf_trace_context import *
+#from Core import *
+
+def trace_begin():
+	print "Intel PT Power Events and PTWRITE"
+
+def trace_end():
+	print "End"
+
+def trace_unhandled(event_name, context, event_fields_dict):
+		print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
+
+def print_ptwrite(raw_buf):
+	data = struct.unpack_from("<IQ", raw_buf)
+	flags = data[0]
+	payload = data[1]
+	exact_ip = flags & 1
+	print "IP: %u payload: %#x" % (exact_ip, payload),
+
+def print_cbr(raw_buf):
+	data = struct.unpack_from("<BBBBII", raw_buf)
+	cbr = data[0]
+	f = (data[4] + 500) / 1000
+	p = ((cbr * 1000 / data[2]) + 5) / 10
+	print "%3u  freq: %4u MHz  (%3u%%)" % (cbr, f, p),
+
+def print_mwait(raw_buf):
+	data = struct.unpack_from("<IQ", raw_buf)
+	payload = data[1]
+	hints = payload & 0xff
+	extensions = (payload >> 32) & 0x3
+	print "hints: %#x extensions: %#x" % (hints, extensions),
+
+def print_pwre(raw_buf):
+	data = struct.unpack_from("<IQ", raw_buf)
+	payload = data[1]
+	hw = (payload >> 7) & 1
+	cstate = (payload >> 12) & 0xf
+	subcstate = (payload >> 8) & 0xf
+	print "hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
+
+def print_exstop(raw_buf):
+	data = struct.unpack_from("<I", raw_buf)
+	flags = data[0]
+	exact_ip = flags & 1
+	print "IP: %u" % (exact_ip),
+
+def print_pwrx(raw_buf):
+	data = struct.unpack_from("<IQ", raw_buf)
+	payload = data[1]
+	deepest_cstate = payload & 0xf
+	last_cstate = (payload >> 4) & 0xf
+	wake_reason = (payload >> 8) & 0xf
+	print "deepest cstate: %u last cstate: %u wake reason: %#x" % (deepest_cstate, last_cstate, wake_reason),
+
+def print_common_start(comm, sample, name):
+	ts = sample["time"]
+	cpu = sample["cpu"]
+	pid = sample["pid"]
+	tid = sample["tid"]
+	print "%16s %5u/%-5u [%03u] %9u.%09u %7s:" % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
+
+def print_common_ip(sample, symbol, dso):
+	ip = sample["ip"]
+	print "%16x %s (%s)" % (ip, symbol, dso)
+
+def process_event(param_dict):
+        event_attr = param_dict["attr"]
+        sample     = param_dict["sample"]
+        raw_buf    = param_dict["raw_buf"]
+        comm       = param_dict["comm"]
+        name       = param_dict["ev_name"]
+
+        # Symbol and dso info are not always resolved
+        if (param_dict.has_key("dso")):
+                dso = param_dict["dso"]
+        else:
+                dso = "[unknown]"
+
+        if (param_dict.has_key("symbol")):
+                symbol = param_dict["symbol"]
+        else:
+                symbol = "[unknown]"
+
+	if name == "ptwrite":
+		print_common_start(comm, sample, name)
+		print_ptwrite(raw_buf)
+		print_common_ip(sample, symbol, dso)
+	elif name == "cbr":
+		print_common_start(comm, sample, name)
+		print_cbr(raw_buf)
+		print_common_ip(sample, symbol, dso)
+	elif name == "mwait":
+		print_common_start(comm, sample, name)
+		print_mwait(raw_buf)
+		print_common_ip(sample, symbol, dso)
+	elif name == "pwre":
+		print_common_start(comm, sample, name)
+		print_pwre(raw_buf)
+		print_common_ip(sample, symbol, dso)
+	elif name == "exstop":
+		print_common_start(comm, sample, name)
+		print_exstop(raw_buf)
+		print_common_ip(sample, symbol, dso)
+	elif name == "pwrx":
+		print_common_start(comm, sample, name)
+		print_pwrx(raw_buf)
+		print_common_ip(sample, symbol, dso)
diff --git a/tools/perf/scripts/python/mem-phys-addr.py b/tools/perf/scripts/python/mem-phys-addr.py
new file mode 100644
index 0000000..ebee2c5
--- /dev/null
+++ b/tools/perf/scripts/python/mem-phys-addr.py
@@ -0,0 +1,95 @@
+# mem-phys-addr.py: Resolve physical address samples
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2018, Intel Corporation.
+
+from __future__ import division
+import os
+import sys
+import struct
+import re
+import bisect
+import collections
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+	'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+#physical address ranges for System RAM
+system_ram = []
+#physical address ranges for Persistent Memory
+pmem = []
+#file object for proc iomem
+f = None
+#Count for each type of memory
+load_mem_type_cnt = collections.Counter()
+#perf event name
+event_name = None
+
+def parse_iomem():
+	global f
+	f = open('/proc/iomem', 'r')
+	for i, j in enumerate(f):
+		m = re.split('-|:',j,2)
+		if m[2].strip() == 'System RAM':
+			system_ram.append(long(m[0], 16))
+			system_ram.append(long(m[1], 16))
+		if m[2].strip() == 'Persistent Memory':
+			pmem.append(long(m[0], 16))
+			pmem.append(long(m[1], 16))
+
+def print_memory_type():
+	print "Event: %s" % (event_name)
+	print "%-40s  %10s  %10s\n" % ("Memory type", "count", "percentage"),
+	print "%-40s  %10s  %10s\n" % ("----------------------------------------", \
+					"-----------", "-----------"),
+	total = sum(load_mem_type_cnt.values())
+	for mem_type, count in sorted(load_mem_type_cnt.most_common(), \
+					key = lambda(k, v): (v, k), reverse = True):
+		print "%-40s  %10d  %10.1f%%\n" % (mem_type, count, 100 * count / total),
+
+def trace_begin():
+	parse_iomem()
+
+def trace_end():
+	print_memory_type()
+	f.close()
+
+def is_system_ram(phys_addr):
+	#/proc/iomem is sorted
+	position = bisect.bisect(system_ram, phys_addr)
+	if position % 2 == 0:
+		return False
+	return True
+
+def is_persistent_mem(phys_addr):
+	position = bisect.bisect(pmem, phys_addr)
+	if position % 2 == 0:
+		return False
+	return True
+
+def find_memory_type(phys_addr):
+	if phys_addr == 0:
+		return "N/A"
+	if is_system_ram(phys_addr):
+		return "System RAM"
+
+	if is_persistent_mem(phys_addr):
+		return "Persistent Memory"
+
+	#slow path, search all
+	f.seek(0, 0)
+	for j in f:
+		m = re.split('-|:',j,2)
+		if long(m[0], 16) <= phys_addr <= long(m[1], 16):
+			return m[2]
+	return "N/A"
+
+def process_event(param_dict):
+	name       = param_dict["ev_name"]
+	sample     = param_dict["sample"]
+	phys_addr  = sample["phys_addr"]
+
+	global event_name
+	if event_name == None:
+		event_name = name
+	load_mem_type_cnt[find_memory_type(phys_addr)] += 1
diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py
new file mode 100755
index 0000000..a150164
--- /dev/null
+++ b/tools/perf/scripts/python/net_dropmonitor.py
@@ -0,0 +1,76 @@
+# Monitor the system for dropped packets and proudce a report of drop locations and counts
+# SPDX-License-Identifier: GPL-2.0
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+		'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import *
+
+drop_log = {}
+kallsyms = []
+
+def get_kallsyms_table():
+	global kallsyms
+
+	try:
+		f = open("/proc/kallsyms", "r")
+	except:
+		return
+
+	for line in f:
+		loc = int(line.split()[0], 16)
+		name = line.split()[2]
+		kallsyms.append((loc, name))
+	kallsyms.sort()
+
+def get_sym(sloc):
+	loc = int(sloc)
+
+	# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
+	#            kallsyms[i][0] > loc for all end <= i < len(kallsyms)
+	start, end = -1, len(kallsyms)
+	while end != start + 1:
+		pivot = (start + end) // 2
+		if loc < kallsyms[pivot][0]:
+			end = pivot
+		else:
+			start = pivot
+
+	# Now (start == -1 or kallsyms[start][0] <= loc)
+	# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
+	if start >= 0:
+		symloc, name = kallsyms[start]
+		return (name, loc - symloc)
+	else:
+		return (None, 0)
+
+def print_drop_table():
+	print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
+	for i in drop_log.keys():
+		(sym, off) = get_sym(i)
+		if sym == None:
+			sym = i
+		print "%25s %25s %25s" % (sym, off, drop_log[i])
+
+
+def trace_begin():
+	print "Starting trace (Ctrl-C to dump results)"
+
+def trace_end():
+	print "Gathering kallsyms data"
+	get_kallsyms_table()
+	print_drop_table()
+
+# called from perf, when it finds a correspoinding event
+def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
+		   skbaddr, location, protocol):
+	slocation = str(location)
+	try:
+		drop_log[slocation] = drop_log[slocation] + 1
+	except:
+		drop_log[slocation] = 1
diff --git a/tools/perf/scripts/python/netdev-times.py b/tools/perf/scripts/python/netdev-times.py
new file mode 100644
index 0000000..9b2050f
--- /dev/null
+++ b/tools/perf/scripts/python/netdev-times.py
@@ -0,0 +1,468 @@
+# Display a process of packets and processed time.
+# SPDX-License-Identifier: GPL-2.0
+# It helps us to investigate networking or network device.
+#
+# options
+# tx: show only tx chart
+# rx: show only rx chart
+# dev=: show only thing related to specified device
+# debug: work with debug mode. It shows buffer status.
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+	'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import *
+
+all_event_list = []; # insert all tracepoint event related with this script
+irq_dic = {}; # key is cpu and value is a list which stacks irqs
+              # which raise NET_RX softirq
+net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
+		 # and a list which stacks receive
+receive_hunk_list = []; # a list which include a sequence of receive events
+rx_skb_list = []; # received packet list for matching
+		       # skb_copy_datagram_iovec
+
+buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
+		       # tx_xmit_list
+of_count_rx_skb_list = 0; # overflow count
+
+tx_queue_list = []; # list of packets which pass through dev_queue_xmit
+of_count_tx_queue_list = 0; # overflow count
+
+tx_xmit_list = [];  # list of packets which pass through dev_hard_start_xmit
+of_count_tx_xmit_list = 0; # overflow count
+
+tx_free_list = [];  # list of packets which is freed
+
+# options
+show_tx = 0;
+show_rx = 0;
+dev = 0; # store a name of device specified by option "dev="
+debug = 0;
+
+# indices of event_info tuple
+EINFO_IDX_NAME=   0
+EINFO_IDX_CONTEXT=1
+EINFO_IDX_CPU=    2
+EINFO_IDX_TIME=   3
+EINFO_IDX_PID=    4
+EINFO_IDX_COMM=   5
+
+# Calculate a time interval(msec) from src(nsec) to dst(nsec)
+def diff_msec(src, dst):
+	return (dst - src) / 1000000.0
+
+# Display a process of transmitting a packet
+def print_transmit(hunk):
+	if dev != 0 and hunk['dev'].find(dev) < 0:
+		return
+	print "%7s %5d %6d.%06dsec %12.3fmsec      %12.3fmsec" % \
+		(hunk['dev'], hunk['len'],
+		nsecs_secs(hunk['queue_t']),
+		nsecs_nsecs(hunk['queue_t'])/1000,
+		diff_msec(hunk['queue_t'], hunk['xmit_t']),
+		diff_msec(hunk['xmit_t'], hunk['free_t']))
+
+# Format for displaying rx packet processing
+PF_IRQ_ENTRY= "  irq_entry(+%.3fmsec irq=%d:%s)"
+PF_SOFT_ENTRY="  softirq_entry(+%.3fmsec)"
+PF_NAPI_POLL= "  napi_poll_exit(+%.3fmsec %s)"
+PF_JOINT=     "         |"
+PF_WJOINT=    "         |            |"
+PF_NET_RECV=  "         |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
+PF_NET_RX=    "         |---netif_rx(+%.3fmsec skb=%x)"
+PF_CPY_DGRAM= "         |      skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
+PF_KFREE_SKB= "         |      kfree_skb(+%.3fmsec location=%x)"
+PF_CONS_SKB=  "         |      consume_skb(+%.3fmsec)"
+
+# Display a process of received packets and interrputs associated with
+# a NET_RX softirq
+def print_receive(hunk):
+	show_hunk = 0
+	irq_list = hunk['irq_list']
+	cpu = irq_list[0]['cpu']
+	base_t = irq_list[0]['irq_ent_t']
+	# check if this hunk should be showed
+	if dev != 0:
+		for i in range(len(irq_list)):
+			if irq_list[i]['name'].find(dev) >= 0:
+				show_hunk = 1
+				break
+	else:
+		show_hunk = 1
+	if show_hunk == 0:
+		return
+
+	print "%d.%06dsec cpu=%d" % \
+		(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
+	for i in range(len(irq_list)):
+		print PF_IRQ_ENTRY % \
+			(diff_msec(base_t, irq_list[i]['irq_ent_t']),
+			irq_list[i]['irq'], irq_list[i]['name'])
+		print PF_JOINT
+		irq_event_list = irq_list[i]['event_list']
+		for j in range(len(irq_event_list)):
+			irq_event = irq_event_list[j]
+			if irq_event['event'] == 'netif_rx':
+				print PF_NET_RX % \
+					(diff_msec(base_t, irq_event['time']),
+					irq_event['skbaddr'])
+				print PF_JOINT
+	print PF_SOFT_ENTRY % \
+		diff_msec(base_t, hunk['sirq_ent_t'])
+	print PF_JOINT
+	event_list = hunk['event_list']
+	for i in range(len(event_list)):
+		event = event_list[i]
+		if event['event_name'] == 'napi_poll':
+			print PF_NAPI_POLL % \
+			    (diff_msec(base_t, event['event_t']), event['dev'])
+			if i == len(event_list) - 1:
+				print ""
+			else:
+				print PF_JOINT
+		else:
+			print PF_NET_RECV % \
+			    (diff_msec(base_t, event['event_t']), event['skbaddr'],
+				event['len'])
+			if 'comm' in event.keys():
+				print PF_WJOINT
+				print PF_CPY_DGRAM % \
+					(diff_msec(base_t, event['comm_t']),
+					event['pid'], event['comm'])
+			elif 'handle' in event.keys():
+				print PF_WJOINT
+				if event['handle'] == "kfree_skb":
+					print PF_KFREE_SKB % \
+						(diff_msec(base_t,
+						event['comm_t']),
+						event['location'])
+				elif event['handle'] == "consume_skb":
+					print PF_CONS_SKB % \
+						diff_msec(base_t,
+							event['comm_t'])
+			print PF_JOINT
+
+def trace_begin():
+	global show_tx
+	global show_rx
+	global dev
+	global debug
+
+	for i in range(len(sys.argv)):
+		if i == 0:
+			continue
+		arg = sys.argv[i]
+		if arg == 'tx':
+			show_tx = 1
+		elif arg =='rx':
+			show_rx = 1
+		elif arg.find('dev=',0, 4) >= 0:
+			dev = arg[4:]
+		elif arg == 'debug':
+			debug = 1
+	if show_tx == 0  and show_rx == 0:
+		show_tx = 1
+		show_rx = 1
+
+def trace_end():
+	# order all events in time
+	all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
+					    b[EINFO_IDX_TIME]))
+	# process all events
+	for i in range(len(all_event_list)):
+		event_info = all_event_list[i]
+		name = event_info[EINFO_IDX_NAME]
+		if name == 'irq__softirq_exit':
+			handle_irq_softirq_exit(event_info)
+		elif name == 'irq__softirq_entry':
+			handle_irq_softirq_entry(event_info)
+		elif name == 'irq__softirq_raise':
+			handle_irq_softirq_raise(event_info)
+		elif name == 'irq__irq_handler_entry':
+			handle_irq_handler_entry(event_info)
+		elif name == 'irq__irq_handler_exit':
+			handle_irq_handler_exit(event_info)
+		elif name == 'napi__napi_poll':
+			handle_napi_poll(event_info)
+		elif name == 'net__netif_receive_skb':
+			handle_netif_receive_skb(event_info)
+		elif name == 'net__netif_rx':
+			handle_netif_rx(event_info)
+		elif name == 'skb__skb_copy_datagram_iovec':
+			handle_skb_copy_datagram_iovec(event_info)
+		elif name == 'net__net_dev_queue':
+			handle_net_dev_queue(event_info)
+		elif name == 'net__net_dev_xmit':
+			handle_net_dev_xmit(event_info)
+		elif name == 'skb__kfree_skb':
+			handle_kfree_skb(event_info)
+		elif name == 'skb__consume_skb':
+			handle_consume_skb(event_info)
+	# display receive hunks
+	if show_rx:
+		for i in range(len(receive_hunk_list)):
+			print_receive(receive_hunk_list[i])
+	# display transmit hunks
+	if show_tx:
+		print "   dev    len      Qdisc        " \
+			"       netdevice             free"
+		for i in range(len(tx_free_list)):
+			print_transmit(tx_free_list[i])
+	if debug:
+		print "debug buffer status"
+		print "----------------------------"
+		print "xmit Qdisc:remain:%d overflow:%d" % \
+			(len(tx_queue_list), of_count_tx_queue_list)
+		print "xmit netdevice:remain:%d overflow:%d" % \
+			(len(tx_xmit_list), of_count_tx_xmit_list)
+		print "receive:remain:%d overflow:%d" % \
+			(len(rx_skb_list), of_count_rx_skb_list)
+
+# called from perf, when it finds a correspoinding event
+def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
+	if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
+		return
+	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
+	all_event_list.append(event_info)
+
+def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
+	if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
+		return
+	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
+	all_event_list.append(event_info)
+
+def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
+	if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
+		return
+	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
+	all_event_list.append(event_info)
+
+def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
+			callchain, irq, irq_name):
+	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+			irq, irq_name)
+	all_event_list.append(event_info)
+
+def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret):
+	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
+	all_event_list.append(event_info)
+
+def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi,
+                    dev_name, work=None, budget=None):
+	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+			napi, dev_name, work, budget)
+	all_event_list.append(event_info)
+
+def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
+			skblen, dev_name):
+	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+			skbaddr, skblen, dev_name)
+	all_event_list.append(event_info)
+
+def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
+			skblen, dev_name):
+	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+			skbaddr, skblen, dev_name)
+	all_event_list.append(event_info)
+
+def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain,
+			skbaddr, skblen, dev_name):
+	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+			skbaddr, skblen, dev_name)
+	all_event_list.append(event_info)
+
+def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain,
+			skbaddr, skblen, rc, dev_name):
+	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+			skbaddr, skblen, rc ,dev_name)
+	all_event_list.append(event_info)
+
+def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
+			skbaddr, protocol, location):
+	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+			skbaddr, protocol, location)
+	all_event_list.append(event_info)
+
+def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr):
+	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+			skbaddr)
+	all_event_list.append(event_info)
+
+def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain,
+	skbaddr, skblen):
+	event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+			skbaddr, skblen)
+	all_event_list.append(event_info)
+
+def handle_irq_handler_entry(event_info):
+	(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
+	if cpu not in irq_dic.keys():
+		irq_dic[cpu] = []
+	irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
+	irq_dic[cpu].append(irq_record)
+
+def handle_irq_handler_exit(event_info):
+	(name, context, cpu, time, pid, comm, irq, ret) = event_info
+	if cpu not in irq_dic.keys():
+		return
+	irq_record = irq_dic[cpu].pop()
+	if irq != irq_record['irq']:
+		return
+	irq_record.update({'irq_ext_t':time})
+	# if an irq doesn't include NET_RX softirq, drop.
+	if 'event_list' in irq_record.keys():
+		irq_dic[cpu].append(irq_record)
+
+def handle_irq_softirq_raise(event_info):
+	(name, context, cpu, time, pid, comm, vec) = event_info
+	if cpu not in irq_dic.keys() \
+	or len(irq_dic[cpu]) == 0:
+		return
+	irq_record = irq_dic[cpu].pop()
+	if 'event_list' in irq_record.keys():
+		irq_event_list = irq_record['event_list']
+	else:
+		irq_event_list = []
+	irq_event_list.append({'time':time, 'event':'sirq_raise'})
+	irq_record.update({'event_list':irq_event_list})
+	irq_dic[cpu].append(irq_record)
+
+def handle_irq_softirq_entry(event_info):
+	(name, context, cpu, time, pid, comm, vec) = event_info
+	net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
+
+def handle_irq_softirq_exit(event_info):
+	(name, context, cpu, time, pid, comm, vec) = event_info
+	irq_list = []
+	event_list = 0
+	if cpu in irq_dic.keys():
+		irq_list = irq_dic[cpu]
+		del irq_dic[cpu]
+	if cpu in net_rx_dic.keys():
+		sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
+		event_list = net_rx_dic[cpu]['event_list']
+		del net_rx_dic[cpu]
+	if irq_list == [] or event_list == 0:
+		return
+	rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
+		    'irq_list':irq_list, 'event_list':event_list}
+	# merge information realted to a NET_RX softirq
+	receive_hunk_list.append(rec_data)
+
+def handle_napi_poll(event_info):
+	(name, context, cpu, time, pid, comm, napi, dev_name,
+		work, budget) = event_info
+	if cpu in net_rx_dic.keys():
+		event_list = net_rx_dic[cpu]['event_list']
+		rec_data = {'event_name':'napi_poll',
+				'dev':dev_name, 'event_t':time,
+				'work':work, 'budget':budget}
+		event_list.append(rec_data)
+
+def handle_netif_rx(event_info):
+	(name, context, cpu, time, pid, comm,
+		skbaddr, skblen, dev_name) = event_info
+	if cpu not in irq_dic.keys() \
+	or len(irq_dic[cpu]) == 0:
+		return
+	irq_record = irq_dic[cpu].pop()
+	if 'event_list' in irq_record.keys():
+		irq_event_list = irq_record['event_list']
+	else:
+		irq_event_list = []
+	irq_event_list.append({'time':time, 'event':'netif_rx',
+		'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
+	irq_record.update({'event_list':irq_event_list})
+	irq_dic[cpu].append(irq_record)
+
+def handle_netif_receive_skb(event_info):
+	global of_count_rx_skb_list
+
+	(name, context, cpu, time, pid, comm,
+		skbaddr, skblen, dev_name) = event_info
+	if cpu in net_rx_dic.keys():
+		rec_data = {'event_name':'netif_receive_skb',
+			    'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
+		event_list = net_rx_dic[cpu]['event_list']
+		event_list.append(rec_data)
+		rx_skb_list.insert(0, rec_data)
+		if len(rx_skb_list) > buffer_budget:
+			rx_skb_list.pop()
+			of_count_rx_skb_list += 1
+
+def handle_net_dev_queue(event_info):
+	global of_count_tx_queue_list
+
+	(name, context, cpu, time, pid, comm,
+		skbaddr, skblen, dev_name) = event_info
+	skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
+	tx_queue_list.insert(0, skb)
+	if len(tx_queue_list) > buffer_budget:
+		tx_queue_list.pop()
+		of_count_tx_queue_list += 1
+
+def handle_net_dev_xmit(event_info):
+	global of_count_tx_xmit_list
+
+	(name, context, cpu, time, pid, comm,
+		skbaddr, skblen, rc, dev_name) = event_info
+	if rc == 0: # NETDEV_TX_OK
+		for i in range(len(tx_queue_list)):
+			skb = tx_queue_list[i]
+			if skb['skbaddr'] == skbaddr:
+				skb['xmit_t'] = time
+				tx_xmit_list.insert(0, skb)
+				del tx_queue_list[i]
+				if len(tx_xmit_list) > buffer_budget:
+					tx_xmit_list.pop()
+					of_count_tx_xmit_list += 1
+				return
+
+def handle_kfree_skb(event_info):
+	(name, context, cpu, time, pid, comm,
+		skbaddr, protocol, location) = event_info
+	for i in range(len(tx_queue_list)):
+		skb = tx_queue_list[i]
+		if skb['skbaddr'] == skbaddr:
+			del tx_queue_list[i]
+			return
+	for i in range(len(tx_xmit_list)):
+		skb = tx_xmit_list[i]
+		if skb['skbaddr'] == skbaddr:
+			skb['free_t'] = time
+			tx_free_list.append(skb)
+			del tx_xmit_list[i]
+			return
+	for i in range(len(rx_skb_list)):
+		rec_data = rx_skb_list[i]
+		if rec_data['skbaddr'] == skbaddr:
+			rec_data.update({'handle':"kfree_skb",
+					'comm':comm, 'pid':pid, 'comm_t':time})
+			del rx_skb_list[i]
+			return
+
+def handle_consume_skb(event_info):
+	(name, context, cpu, time, pid, comm, skbaddr) = event_info
+	for i in range(len(tx_xmit_list)):
+		skb = tx_xmit_list[i]
+		if skb['skbaddr'] == skbaddr:
+			skb['free_t'] = time
+			tx_free_list.append(skb)
+			del tx_xmit_list[i]
+			return
+
+def handle_skb_copy_datagram_iovec(event_info):
+	(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
+	for i in range(len(rx_skb_list)):
+		rec_data = rx_skb_list[i]
+		if skbaddr == rec_data['skbaddr']:
+			rec_data.update({'handle':"skb_copy_datagram_iovec",
+					'comm':comm, 'pid':pid, 'comm_t':time})
+			del rx_skb_list[i]
+			return
diff --git a/tools/perf/scripts/python/powerpc-hcalls.py b/tools/perf/scripts/python/powerpc-hcalls.py
new file mode 100644
index 0000000..00e0e74
--- /dev/null
+++ b/tools/perf/scripts/python/powerpc-hcalls.py
@@ -0,0 +1,200 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (C) 2018 Ravi Bangoria, IBM Corporation
+#
+# Hypervisor call statisics
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+	'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import *
+
+# output: {
+#	opcode: {
+#		'min': minimum time nsec
+#		'max': maximum time nsec
+#		'time': average time nsec
+#		'cnt': counter
+#	} ...
+# }
+output = {}
+
+# d_enter: {
+#	cpu: {
+#		opcode: nsec
+#	} ...
+# }
+d_enter = {}
+
+hcall_table = {
+	4: 'H_REMOVE',
+	8: 'H_ENTER',
+	12: 'H_READ',
+	16: 'H_CLEAR_MOD',
+	20: 'H_CLEAR_REF',
+	24: 'H_PROTECT',
+	28: 'H_GET_TCE',
+	32: 'H_PUT_TCE',
+	36: 'H_SET_SPRG0',
+	40: 'H_SET_DABR',
+	44: 'H_PAGE_INIT',
+	48: 'H_SET_ASR',
+	52: 'H_ASR_ON',
+	56: 'H_ASR_OFF',
+	60: 'H_LOGICAL_CI_LOAD',
+	64: 'H_LOGICAL_CI_STORE',
+	68: 'H_LOGICAL_CACHE_LOAD',
+	72: 'H_LOGICAL_CACHE_STORE',
+	76: 'H_LOGICAL_ICBI',
+	80: 'H_LOGICAL_DCBF',
+	84: 'H_GET_TERM_CHAR',
+	88: 'H_PUT_TERM_CHAR',
+	92: 'H_REAL_TO_LOGICAL',
+	96: 'H_HYPERVISOR_DATA',
+	100: 'H_EOI',
+	104: 'H_CPPR',
+	108: 'H_IPI',
+	112: 'H_IPOLL',
+	116: 'H_XIRR',
+	120: 'H_MIGRATE_DMA',
+	124: 'H_PERFMON',
+	220: 'H_REGISTER_VPA',
+	224: 'H_CEDE',
+	228: 'H_CONFER',
+	232: 'H_PROD',
+	236: 'H_GET_PPP',
+	240: 'H_SET_PPP',
+	244: 'H_PURR',
+	248: 'H_PIC',
+	252: 'H_REG_CRQ',
+	256: 'H_FREE_CRQ',
+	260: 'H_VIO_SIGNAL',
+	264: 'H_SEND_CRQ',
+	272: 'H_COPY_RDMA',
+	276: 'H_REGISTER_LOGICAL_LAN',
+	280: 'H_FREE_LOGICAL_LAN',
+	284: 'H_ADD_LOGICAL_LAN_BUFFER',
+	288: 'H_SEND_LOGICAL_LAN',
+	292: 'H_BULK_REMOVE',
+	304: 'H_MULTICAST_CTRL',
+	308: 'H_SET_XDABR',
+	312: 'H_STUFF_TCE',
+	316: 'H_PUT_TCE_INDIRECT',
+	332: 'H_CHANGE_LOGICAL_LAN_MAC',
+	336: 'H_VTERM_PARTNER_INFO',
+	340: 'H_REGISTER_VTERM',
+	344: 'H_FREE_VTERM',
+	348: 'H_RESET_EVENTS',
+	352: 'H_ALLOC_RESOURCE',
+	356: 'H_FREE_RESOURCE',
+	360: 'H_MODIFY_QP',
+	364: 'H_QUERY_QP',
+	368: 'H_REREGISTER_PMR',
+	372: 'H_REGISTER_SMR',
+	376: 'H_QUERY_MR',
+	380: 'H_QUERY_MW',
+	384: 'H_QUERY_HCA',
+	388: 'H_QUERY_PORT',
+	392: 'H_MODIFY_PORT',
+	396: 'H_DEFINE_AQP1',
+	400: 'H_GET_TRACE_BUFFER',
+	404: 'H_DEFINE_AQP0',
+	408: 'H_RESIZE_MR',
+	412: 'H_ATTACH_MCQP',
+	416: 'H_DETACH_MCQP',
+	420: 'H_CREATE_RPT',
+	424: 'H_REMOVE_RPT',
+	428: 'H_REGISTER_RPAGES',
+	432: 'H_DISABLE_AND_GETC',
+	436: 'H_ERROR_DATA',
+	440: 'H_GET_HCA_INFO',
+	444: 'H_GET_PERF_COUNT',
+	448: 'H_MANAGE_TRACE',
+	468: 'H_FREE_LOGICAL_LAN_BUFFER',
+	472: 'H_POLL_PENDING',
+	484: 'H_QUERY_INT_STATE',
+	580: 'H_ILLAN_ATTRIBUTES',
+	592: 'H_MODIFY_HEA_QP',
+	596: 'H_QUERY_HEA_QP',
+	600: 'H_QUERY_HEA',
+	604: 'H_QUERY_HEA_PORT',
+	608: 'H_MODIFY_HEA_PORT',
+	612: 'H_REG_BCMC',
+	616: 'H_DEREG_BCMC',
+	620: 'H_REGISTER_HEA_RPAGES',
+	624: 'H_DISABLE_AND_GET_HEA',
+	628: 'H_GET_HEA_INFO',
+	632: 'H_ALLOC_HEA_RESOURCE',
+	644: 'H_ADD_CONN',
+	648: 'H_DEL_CONN',
+	664: 'H_JOIN',
+	676: 'H_VASI_STATE',
+	688: 'H_ENABLE_CRQ',
+	696: 'H_GET_EM_PARMS',
+	720: 'H_SET_MPP',
+	724: 'H_GET_MPP',
+	748: 'H_HOME_NODE_ASSOCIATIVITY',
+	756: 'H_BEST_ENERGY',
+	764: 'H_XIRR_X',
+	768: 'H_RANDOM',
+	772: 'H_COP',
+	788: 'H_GET_MPP_X',
+	796: 'H_SET_MODE',
+	61440: 'H_RTAS',
+}
+
+def hcall_table_lookup(opcode):
+	if (hcall_table.has_key(opcode)):
+		return hcall_table[opcode]
+	else:
+		return opcode
+
+print_ptrn = '%-28s%10s%10s%10s%10s'
+
+def trace_end():
+	print print_ptrn % ('hcall', 'count', 'min(ns)', 'max(ns)', 'avg(ns)')
+	print '-' * 68
+	for opcode in output:
+		h_name = hcall_table_lookup(opcode)
+		time = output[opcode]['time']
+		cnt = output[opcode]['cnt']
+		min_t = output[opcode]['min']
+		max_t = output[opcode]['max']
+
+		print print_ptrn % (h_name, cnt, min_t, max_t, time/cnt)
+
+def powerpc__hcall_exit(name, context, cpu, sec, nsec, pid, comm, callchain,
+			opcode, retval):
+	if (d_enter.has_key(cpu) and d_enter[cpu].has_key(opcode)):
+		diff = nsecs(sec, nsec) - d_enter[cpu][opcode]
+
+		if (output.has_key(opcode)):
+			output[opcode]['time'] += diff
+			output[opcode]['cnt'] += 1
+			if (output[opcode]['min'] > diff):
+				output[opcode]['min'] = diff
+			if (output[opcode]['max'] < diff):
+				output[opcode]['max'] = diff
+		else:
+			output[opcode] = {
+				'time': diff,
+				'cnt': 1,
+				'min': diff,
+				'max': diff,
+			}
+
+		del d_enter[cpu][opcode]
+#	else:
+#		print "Can't find matching hcall_enter event. Ignoring sample"
+
+def powerpc__hcall_entry(event_name, context, cpu, sec, nsec, pid, comm,
+			 callchain, opcode):
+		if (d_enter.has_key(cpu)):
+			d_enter[cpu][opcode] = nsecs(sec, nsec)
+		else:
+			d_enter[cpu] = {opcode: nsecs(sec, nsec)}
diff --git a/tools/perf/scripts/python/sched-migration.py b/tools/perf/scripts/python/sched-migration.py
new file mode 100644
index 0000000..3473e7f
--- /dev/null
+++ b/tools/perf/scripts/python/sched-migration.py
@@ -0,0 +1,464 @@
+#!/usr/bin/python
+#
+# Cpu task migration overview toy
+#
+# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
+#
+# perf script event handlers have been generated by perf script -g python
+#
+# This software is distributed under the terms of the GNU General
+# Public License ("GPL") version 2 as published by the Free Software
+# Foundation.
+from __future__ import print_function
+
+import os
+import sys
+
+from collections import defaultdict
+try:
+    from UserList import UserList
+except ImportError:
+    # Python 3: UserList moved to the collections package
+    from collections import UserList
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+	'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from SchedGui import *
+
+
+threads = { 0 : "idle"}
+
+def thread_name(pid):
+	return "%s:%d" % (threads[pid], pid)
+
+class RunqueueEventUnknown:
+	@staticmethod
+	def color():
+		return None
+
+	def __repr__(self):
+		return "unknown"
+
+class RunqueueEventSleep:
+	@staticmethod
+	def color():
+		return (0, 0, 0xff)
+
+	def __init__(self, sleeper):
+		self.sleeper = sleeper
+
+	def __repr__(self):
+		return "%s gone to sleep" % thread_name(self.sleeper)
+
+class RunqueueEventWakeup:
+	@staticmethod
+	def color():
+		return (0xff, 0xff, 0)
+
+	def __init__(self, wakee):
+		self.wakee = wakee
+
+	def __repr__(self):
+		return "%s woke up" % thread_name(self.wakee)
+
+class RunqueueEventFork:
+	@staticmethod
+	def color():
+		return (0, 0xff, 0)
+
+	def __init__(self, child):
+		self.child = child
+
+	def __repr__(self):
+		return "new forked task %s" % thread_name(self.child)
+
+class RunqueueMigrateIn:
+	@staticmethod
+	def color():
+		return (0, 0xf0, 0xff)
+
+	def __init__(self, new):
+		self.new = new
+
+	def __repr__(self):
+		return "task migrated in %s" % thread_name(self.new)
+
+class RunqueueMigrateOut:
+	@staticmethod
+	def color():
+		return (0xff, 0, 0xff)
+
+	def __init__(self, old):
+		self.old = old
+
+	def __repr__(self):
+		return "task migrated out %s" % thread_name(self.old)
+
+class RunqueueSnapshot:
+	def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
+		self.tasks = tuple(tasks)
+		self.event = event
+
+	def sched_switch(self, prev, prev_state, next):
+		event = RunqueueEventUnknown()
+
+		if taskState(prev_state) == "R" and next in self.tasks \
+			and prev in self.tasks:
+			return self
+
+		if taskState(prev_state) != "R":
+			event = RunqueueEventSleep(prev)
+
+		next_tasks = list(self.tasks[:])
+		if prev in self.tasks:
+			if taskState(prev_state) != "R":
+				next_tasks.remove(prev)
+		elif taskState(prev_state) == "R":
+			next_tasks.append(prev)
+
+		if next not in next_tasks:
+			next_tasks.append(next)
+
+		return RunqueueSnapshot(next_tasks, event)
+
+	def migrate_out(self, old):
+		if old not in self.tasks:
+			return self
+		next_tasks = [task for task in self.tasks if task != old]
+
+		return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
+
+	def __migrate_in(self, new, event):
+		if new in self.tasks:
+			self.event = event
+			return self
+		next_tasks = self.tasks[:] + tuple([new])
+
+		return RunqueueSnapshot(next_tasks, event)
+
+	def migrate_in(self, new):
+		return self.__migrate_in(new, RunqueueMigrateIn(new))
+
+	def wake_up(self, new):
+		return self.__migrate_in(new, RunqueueEventWakeup(new))
+
+	def wake_up_new(self, new):
+		return self.__migrate_in(new, RunqueueEventFork(new))
+
+	def load(self):
+		""" Provide the number of tasks on the runqueue.
+		    Don't count idle"""
+		return len(self.tasks) - 1
+
+	def __repr__(self):
+		ret = self.tasks.__repr__()
+		ret += self.origin_tostring()
+
+		return ret
+
+class TimeSlice:
+	def __init__(self, start, prev):
+		self.start = start
+		self.prev = prev
+		self.end = start
+		# cpus that triggered the event
+		self.event_cpus = []
+		if prev is not None:
+			self.total_load = prev.total_load
+			self.rqs = prev.rqs.copy()
+		else:
+			self.rqs = defaultdict(RunqueueSnapshot)
+			self.total_load = 0
+
+	def __update_total_load(self, old_rq, new_rq):
+		diff = new_rq.load() - old_rq.load()
+		self.total_load += diff
+
+	def sched_switch(self, ts_list, prev, prev_state, next, cpu):
+		old_rq = self.prev.rqs[cpu]
+		new_rq = old_rq.sched_switch(prev, prev_state, next)
+
+		if old_rq is new_rq:
+			return
+
+		self.rqs[cpu] = new_rq
+		self.__update_total_load(old_rq, new_rq)
+		ts_list.append(self)
+		self.event_cpus = [cpu]
+
+	def migrate(self, ts_list, new, old_cpu, new_cpu):
+		if old_cpu == new_cpu:
+			return
+		old_rq = self.prev.rqs[old_cpu]
+		out_rq = old_rq.migrate_out(new)
+		self.rqs[old_cpu] = out_rq
+		self.__update_total_load(old_rq, out_rq)
+
+		new_rq = self.prev.rqs[new_cpu]
+		in_rq = new_rq.migrate_in(new)
+		self.rqs[new_cpu] = in_rq
+		self.__update_total_load(new_rq, in_rq)
+
+		ts_list.append(self)
+
+		if old_rq is not out_rq:
+			self.event_cpus.append(old_cpu)
+		self.event_cpus.append(new_cpu)
+
+	def wake_up(self, ts_list, pid, cpu, fork):
+		old_rq = self.prev.rqs[cpu]
+		if fork:
+			new_rq = old_rq.wake_up_new(pid)
+		else:
+			new_rq = old_rq.wake_up(pid)
+
+		if new_rq is old_rq:
+			return
+		self.rqs[cpu] = new_rq
+		self.__update_total_load(old_rq, new_rq)
+		ts_list.append(self)
+		self.event_cpus = [cpu]
+
+	def next(self, t):
+		self.end = t
+		return TimeSlice(t, self)
+
+class TimeSliceList(UserList):
+	def __init__(self, arg = []):
+		self.data = arg
+
+	def get_time_slice(self, ts):
+		if len(self.data) == 0:
+			slice = TimeSlice(ts, TimeSlice(-1, None))
+		else:
+			slice = self.data[-1].next(ts)
+		return slice
+
+	def find_time_slice(self, ts):
+		start = 0
+		end = len(self.data)
+		found = -1
+		searching = True
+		while searching:
+			if start == end or start == end - 1:
+				searching = False
+
+			i = (end + start) / 2
+			if self.data[i].start <= ts and self.data[i].end >= ts:
+				found = i
+				end = i
+				continue
+
+			if self.data[i].end < ts:
+				start = i
+
+			elif self.data[i].start > ts:
+				end = i
+
+		return found
+
+	def set_root_win(self, win):
+		self.root_win = win
+
+	def mouse_down(self, cpu, t):
+		idx = self.find_time_slice(t)
+		if idx == -1:
+			return
+
+		ts = self[idx]
+		rq = ts.rqs[cpu]
+		raw = "CPU: %d\n" % cpu
+		raw += "Last event : %s\n" % rq.event.__repr__()
+		raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
+		raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
+		raw += "Load = %d\n" % rq.load()
+		for t in rq.tasks:
+			raw += "%s \n" % thread_name(t)
+
+		self.root_win.update_summary(raw)
+
+	def update_rectangle_cpu(self, slice, cpu):
+		rq = slice.rqs[cpu]
+
+		if slice.total_load != 0:
+			load_rate = rq.load() / float(slice.total_load)
+		else:
+			load_rate = 0
+
+		red_power = int(0xff - (0xff * load_rate))
+		color = (0xff, red_power, red_power)
+
+		top_color = None
+
+		if cpu in slice.event_cpus:
+			top_color = rq.event.color()
+
+		self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
+
+	def fill_zone(self, start, end):
+		i = self.find_time_slice(start)
+		if i == -1:
+			return
+
+		for i in range(i, len(self.data)):
+			timeslice = self.data[i]
+			if timeslice.start > end:
+				return
+
+			for cpu in timeslice.rqs:
+				self.update_rectangle_cpu(timeslice, cpu)
+
+	def interval(self):
+		if len(self.data) == 0:
+			return (0, 0)
+
+		return (self.data[0].start, self.data[-1].end)
+
+	def nr_rectangles(self):
+		last_ts = self.data[-1]
+		max_cpu = 0
+		for cpu in last_ts.rqs:
+			if cpu > max_cpu:
+				max_cpu = cpu
+		return max_cpu
+
+
+class SchedEventProxy:
+	def __init__(self):
+		self.current_tsk = defaultdict(lambda : -1)
+		self.timeslices = TimeSliceList()
+
+	def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
+			 next_comm, next_pid, next_prio):
+		""" Ensure the task we sched out this cpu is really the one
+		    we logged. Otherwise we may have missed traces """
+
+		on_cpu_task = self.current_tsk[headers.cpu]
+
+		if on_cpu_task != -1 and on_cpu_task != prev_pid:
+			print("Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
+				headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
+
+		threads[prev_pid] = prev_comm
+		threads[next_pid] = next_comm
+		self.current_tsk[headers.cpu] = next_pid
+
+		ts = self.timeslices.get_time_slice(headers.ts())
+		ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
+
+	def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
+		ts = self.timeslices.get_time_slice(headers.ts())
+		ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
+
+	def wake_up(self, headers, comm, pid, success, target_cpu, fork):
+		if success == 0:
+			return
+		ts = self.timeslices.get_time_slice(headers.ts())
+		ts.wake_up(self.timeslices, pid, target_cpu, fork)
+
+
+def trace_begin():
+	global parser
+	parser = SchedEventProxy()
+
+def trace_end():
+	app = wx.App(False)
+	timeslices = parser.timeslices
+	frame = RootFrame(timeslices, "Migration")
+	app.MainLoop()
+
+def sched__sched_stat_runtime(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, comm, pid, runtime, vruntime):
+	pass
+
+def sched__sched_stat_iowait(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, comm, pid, delay):
+	pass
+
+def sched__sched_stat_sleep(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, comm, pid, delay):
+	pass
+
+def sched__sched_stat_wait(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, comm, pid, delay):
+	pass
+
+def sched__sched_process_fork(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, parent_comm, parent_pid, child_comm, child_pid):
+	pass
+
+def sched__sched_process_wait(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, comm, pid, prio):
+	pass
+
+def sched__sched_process_exit(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, comm, pid, prio):
+	pass
+
+def sched__sched_process_free(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, comm, pid, prio):
+	pass
+
+def sched__sched_migrate_task(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, comm, pid, prio, orig_cpu,
+	dest_cpu):
+	headers = EventHeaders(common_cpu, common_secs, common_nsecs,
+				common_pid, common_comm, common_callchain)
+	parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
+
+def sched__sched_switch(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm, common_callchain,
+	prev_comm, prev_pid, prev_prio, prev_state,
+	next_comm, next_pid, next_prio):
+
+	headers = EventHeaders(common_cpu, common_secs, common_nsecs,
+				common_pid, common_comm, common_callchain)
+	parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
+			 next_comm, next_pid, next_prio)
+
+def sched__sched_wakeup_new(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, comm, pid, prio, success,
+	target_cpu):
+	headers = EventHeaders(common_cpu, common_secs, common_nsecs,
+				common_pid, common_comm, common_callchain)
+	parser.wake_up(headers, comm, pid, success, target_cpu, 1)
+
+def sched__sched_wakeup(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, comm, pid, prio, success,
+	target_cpu):
+	headers = EventHeaders(common_cpu, common_secs, common_nsecs,
+				common_pid, common_comm, common_callchain)
+	parser.wake_up(headers, comm, pid, success, target_cpu, 0)
+
+def sched__sched_wait_task(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, comm, pid, prio):
+	pass
+
+def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, ret):
+	pass
+
+def sched__sched_kthread_stop(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, comm, pid):
+	pass
+
+def trace_unhandled(event_name, context, event_fields_dict):
+	pass
diff --git a/tools/perf/scripts/python/sctop.py b/tools/perf/scripts/python/sctop.py
new file mode 100644
index 0000000..61621b9
--- /dev/null
+++ b/tools/perf/scripts/python/sctop.py
@@ -0,0 +1,80 @@
+# system call top
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Periodically displays system-wide system call totals, broken down by
+# syscall.  If a [comm] arg is specified, only syscalls called by
+# [comm] are displayed. If an [interval] arg is specified, the display
+# will be refreshed every [interval] seconds.  The default interval is
+# 3 seconds.
+
+import os, sys, thread, time
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+	'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import *
+
+usage = "perf script -s sctop.py [comm] [interval]\n";
+
+for_comm = None
+default_interval = 3
+interval = default_interval
+
+if len(sys.argv) > 3:
+	sys.exit(usage)
+
+if len(sys.argv) > 2:
+	for_comm = sys.argv[1]
+	interval = int(sys.argv[2])
+elif len(sys.argv) > 1:
+	try:
+		interval = int(sys.argv[1])
+	except ValueError:
+		for_comm = sys.argv[1]
+		interval = default_interval
+
+syscalls = autodict()
+
+def trace_begin():
+	thread.start_new_thread(print_syscall_totals, (interval,))
+	pass
+
+def raw_syscalls__sys_enter(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, id, args):
+	if for_comm is not None:
+		if common_comm != for_comm:
+			return
+	try:
+		syscalls[id] += 1
+	except TypeError:
+		syscalls[id] = 1
+
+def syscalls__sys_enter(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	id, args):
+	raw_syscalls__sys_enter(**locals())
+
+def print_syscall_totals(interval):
+	while 1:
+		clear_term()
+		if for_comm is not None:
+			print "\nsyscall events for %s:\n\n" % (for_comm),
+		else:
+			print "\nsyscall events:\n\n",
+
+		print "%-40s  %10s\n" % ("event", "count"),
+		print "%-40s  %10s\n" % ("----------------------------------------", \
+						 "----------"),
+
+		for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
+					      reverse = True):
+			try:
+				print "%-40s  %10d\n" % (syscall_name(id), val),
+			except TypeError:
+				pass
+		syscalls.clear()
+		time.sleep(interval)
diff --git a/tools/perf/scripts/python/stackcollapse.py b/tools/perf/scripts/python/stackcollapse.py
new file mode 100755
index 0000000..1697b5e
--- /dev/null
+++ b/tools/perf/scripts/python/stackcollapse.py
@@ -0,0 +1,126 @@
+# stackcollapse.py - format perf samples with one line per distinct call stack
+# SPDX-License-Identifier: GPL-2.0
+#
+# This script's output has two space-separated fields.  The first is a semicolon
+# separated stack including the program name (from the "comm" field) and the
+# function names from the call stack.  The second is a count:
+#
+#  swapper;start_kernel;rest_init;cpu_idle;default_idle;native_safe_halt 2
+#
+# The file is sorted according to the first field.
+#
+# Input may be created and processed using:
+#
+#  perf record -a -g -F 99 sleep 60
+#  perf script report stackcollapse > out.stacks-folded
+#
+# (perf script record stackcollapse works too).
+#
+# Written by Paolo Bonzini <pbonzini@redhat.com>
+# Based on Brendan Gregg's stackcollapse-perf.pl script.
+
+import os
+import sys
+from collections import defaultdict
+from optparse import OptionParser, make_option
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+                '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from EventClass import *
+
+# command line parsing
+
+option_list = [
+    # formatting options for the bottom entry of the stack
+    make_option("--include-tid", dest="include_tid",
+                 action="store_true", default=False,
+                 help="include thread id in stack"),
+    make_option("--include-pid", dest="include_pid",
+                 action="store_true", default=False,
+                 help="include process id in stack"),
+    make_option("--no-comm", dest="include_comm",
+                 action="store_false", default=True,
+                 help="do not separate stacks according to comm"),
+    make_option("--tidy-java", dest="tidy_java",
+                 action="store_true", default=False,
+                 help="beautify Java signatures"),
+    make_option("--kernel", dest="annotate_kernel",
+                 action="store_true", default=False,
+                 help="annotate kernel functions with _[k]")
+]
+
+parser = OptionParser(option_list=option_list)
+(opts, args) = parser.parse_args()
+
+if len(args) != 0:
+    parser.error("unexpected command line argument")
+if opts.include_tid and not opts.include_comm:
+    parser.error("requesting tid but not comm is invalid")
+if opts.include_pid and not opts.include_comm:
+    parser.error("requesting pid but not comm is invalid")
+
+# event handlers
+
+lines = defaultdict(lambda: 0)
+
+def process_event(param_dict):
+    def tidy_function_name(sym, dso):
+        if sym is None:
+            sym = '[unknown]'
+
+        sym = sym.replace(';', ':')
+        if opts.tidy_java:
+            # the original stackcollapse-perf.pl script gives the
+            # example of converting this:
+            #    Lorg/mozilla/javascript/MemberBox;.<init>(Ljava/lang/reflect/Method;)V
+            # to this:
+            #    org/mozilla/javascript/MemberBox:.init
+            sym = sym.replace('<', '')
+            sym = sym.replace('>', '')
+            if sym[0] == 'L' and sym.find('/'):
+                sym = sym[1:]
+            try:
+                sym = sym[:sym.index('(')]
+            except ValueError:
+                pass
+
+        if opts.annotate_kernel and dso == '[kernel.kallsyms]':
+            return sym + '_[k]'
+        else:
+            return sym
+
+    stack = list()
+    if 'callchain' in param_dict:
+        for entry in param_dict['callchain']:
+            entry.setdefault('sym', dict())
+            entry['sym'].setdefault('name', None)
+            entry.setdefault('dso', None)
+            stack.append(tidy_function_name(entry['sym']['name'],
+                                            entry['dso']))
+    else:
+        param_dict.setdefault('symbol', None)
+        param_dict.setdefault('dso', None)
+        stack.append(tidy_function_name(param_dict['symbol'],
+                                        param_dict['dso']))
+
+    if opts.include_comm:
+        comm = param_dict["comm"].replace(' ', '_')
+        sep = "-"
+        if opts.include_pid:
+            comm = comm + sep + str(param_dict['sample']['pid'])
+            sep = "/"
+        if opts.include_tid:
+            comm = comm + sep + str(param_dict['sample']['tid'])
+        stack.append(comm)
+
+    stack_string = ';'.join(reversed(stack))
+    lines[stack_string] = lines[stack_string] + 1
+
+def trace_end():
+    list = lines.keys()
+    list.sort()
+    for stack in list:
+        print "%s %d" % (stack, lines[stack])
diff --git a/tools/perf/scripts/python/stat-cpi.py b/tools/perf/scripts/python/stat-cpi.py
new file mode 100644
index 0000000..8410672
--- /dev/null
+++ b/tools/perf/scripts/python/stat-cpi.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: GPL-2.0
+
+data    = {}
+times   = []
+threads = []
+cpus    = []
+
+def get_key(time, event, cpu, thread):
+    return "%d-%s-%d-%d" % (time, event, cpu, thread)
+
+def store_key(time, cpu, thread):
+    if (time not in times):
+        times.append(time)
+
+    if (cpu not in cpus):
+        cpus.append(cpu)
+
+    if (thread not in threads):
+        threads.append(thread)
+
+def store(time, event, cpu, thread, val, ena, run):
+    #print "event %s cpu %d, thread %d, time %d, val %d, ena %d, run %d" % \
+    #      (event, cpu, thread, time, val, ena, run)
+
+    store_key(time, cpu, thread)
+    key = get_key(time, event, cpu, thread)
+    data[key] = [ val, ena, run]
+
+def get(time, event, cpu, thread):
+    key = get_key(time, event, cpu, thread)
+    return data[key][0]
+
+def stat__cycles_k(cpu, thread, time, val, ena, run):
+    store(time, "cycles", cpu, thread, val, ena, run);
+
+def stat__instructions_k(cpu, thread, time, val, ena, run):
+    store(time, "instructions", cpu, thread, val, ena, run);
+
+def stat__cycles_u(cpu, thread, time, val, ena, run):
+    store(time, "cycles", cpu, thread, val, ena, run);
+
+def stat__instructions_u(cpu, thread, time, val, ena, run):
+    store(time, "instructions", cpu, thread, val, ena, run);
+
+def stat__cycles(cpu, thread, time, val, ena, run):
+    store(time, "cycles", cpu, thread, val, ena, run);
+
+def stat__instructions(cpu, thread, time, val, ena, run):
+    store(time, "instructions", cpu, thread, val, ena, run);
+
+def stat__interval(time):
+    for cpu in cpus:
+        for thread in threads:
+            cyc = get(time, "cycles", cpu, thread)
+            ins = get(time, "instructions", cpu, thread)
+            cpi = 0
+
+            if ins != 0:
+                cpi = cyc/float(ins)
+
+            print "%15f: cpu %d, thread %d -> cpi %f (%d/%d)" % (time/(float(1000000000)), cpu, thread, cpi, cyc, ins)
+
+def trace_end():
+    pass
+# XXX trace_end callback could be used as an alternative place
+#     to compute same values as in the script above:
+#
+#    for time in times:
+#        for cpu in cpus:
+#            for thread in threads:
+#                cyc = get(time, "cycles", cpu, thread)
+#                ins = get(time, "instructions", cpu, thread)
+#
+#                if ins != 0:
+#                    cpi = cyc/float(ins)
+#
+#                print "time %.9f, cpu %d, thread %d -> cpi %f" % (time/(float(1000000000)), cpu, thread, cpi)
diff --git a/tools/perf/scripts/python/syscall-counts-by-pid.py b/tools/perf/scripts/python/syscall-counts-by-pid.py
new file mode 100644
index 0000000..daf314c
--- /dev/null
+++ b/tools/perf/scripts/python/syscall-counts-by-pid.py
@@ -0,0 +1,74 @@
+# system call counts, by pid
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Displays system-wide system call totals, broken down by syscall.
+# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+
+import os, sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+	'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import syscall_name
+
+usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
+
+for_comm = None
+for_pid = None
+
+if len(sys.argv) > 2:
+	sys.exit(usage)
+
+if len(sys.argv) > 1:
+	try:
+		for_pid = int(sys.argv[1])
+	except:
+		for_comm = sys.argv[1]
+
+syscalls = autodict()
+
+def trace_begin():
+	print "Press control+C to stop and show the summary"
+
+def trace_end():
+	print_syscall_totals()
+
+def raw_syscalls__sys_enter(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, id, args):
+
+	if (for_comm and common_comm != for_comm) or \
+	   (for_pid  and common_pid  != for_pid ):
+		return
+	try:
+		syscalls[common_comm][common_pid][id] += 1
+	except TypeError:
+		syscalls[common_comm][common_pid][id] = 1
+
+def syscalls__sys_enter(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	id, args):
+	raw_syscalls__sys_enter(**locals())
+
+def print_syscall_totals():
+    if for_comm is not None:
+	    print "\nsyscall events for %s:\n\n" % (for_comm),
+    else:
+	    print "\nsyscall events by comm/pid:\n\n",
+
+    print "%-40s  %10s\n" % ("comm [pid]/syscalls", "count"),
+    print "%-40s  %10s\n" % ("----------------------------------------", \
+                                 "----------"),
+
+    comm_keys = syscalls.keys()
+    for comm in comm_keys:
+	    pid_keys = syscalls[comm].keys()
+	    for pid in pid_keys:
+		    print "\n%s [%d]\n" % (comm, pid),
+		    id_keys = syscalls[comm][pid].keys()
+		    for id, val in sorted(syscalls[comm][pid].iteritems(), \
+				  key = lambda(k, v): (v, k),  reverse = True):
+			    print "  %-38s  %10d\n" % (syscall_name(id), val),
diff --git a/tools/perf/scripts/python/syscall-counts.py b/tools/perf/scripts/python/syscall-counts.py
new file mode 100644
index 0000000..e66a773
--- /dev/null
+++ b/tools/perf/scripts/python/syscall-counts.py
@@ -0,0 +1,64 @@
+# system call counts
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Displays system-wide system call totals, broken down by syscall.
+# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+	'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import syscall_name
+
+usage = "perf script -s syscall-counts.py [comm]\n";
+
+for_comm = None
+
+if len(sys.argv) > 2:
+	sys.exit(usage)
+
+if len(sys.argv) > 1:
+	for_comm = sys.argv[1]
+
+syscalls = autodict()
+
+def trace_begin():
+	print "Press control+C to stop and show the summary"
+
+def trace_end():
+	print_syscall_totals()
+
+def raw_syscalls__sys_enter(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, id, args):
+	if for_comm is not None:
+		if common_comm != for_comm:
+			return
+	try:
+		syscalls[id] += 1
+	except TypeError:
+		syscalls[id] = 1
+
+def syscalls__sys_enter(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	id, args):
+	raw_syscalls__sys_enter(**locals())
+
+def print_syscall_totals():
+    if for_comm is not None:
+	    print "\nsyscall events for %s:\n\n" % (for_comm),
+    else:
+	    print "\nsyscall events:\n\n",
+
+    print "%-40s  %10s\n" % ("event", "count"),
+    print "%-40s  %10s\n" % ("----------------------------------------", \
+                                 "-----------"),
+
+    for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
+				  reverse = True):
+	    print "%-40s  %10d\n" % (syscall_name(id), val),