Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/hwtracing/intel_th/Kconfig b/drivers/hwtracing/intel_th/Kconfig
index ca0527d..4b63593 100644
--- a/drivers/hwtracing/intel_th/Kconfig
+++ b/drivers/hwtracing/intel_th/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
config INTEL_TH
tristate "Intel(R) Trace Hub controller"
depends on HAS_DMA && HAS_IOMEM
diff --git a/drivers/hwtracing/intel_th/Makefile b/drivers/hwtracing/intel_th/Makefile
index d9252fa..b63eb8f 100644
--- a/drivers/hwtracing/intel_th/Makefile
+++ b/drivers/hwtracing/intel_th/Makefile
@@ -20,3 +20,6 @@
obj-$(CONFIG_INTEL_TH_PTI) += intel_th_pti.o
intel_th_pti-y := pti.o
+
+obj-$(CONFIG_INTEL_TH_MSU) += intel_th_msu_sink.o
+intel_th_msu_sink-y := msu-sink.o
diff --git a/drivers/hwtracing/intel_th/acpi.c b/drivers/hwtracing/intel_th/acpi.c
index 87bc374..87f9024 100644
--- a/drivers/hwtracing/intel_th/acpi.c
+++ b/drivers/hwtracing/intel_th/acpi.c
@@ -37,15 +37,21 @@
static int intel_th_acpi_probe(struct platform_device *pdev)
{
struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ struct resource resource[TH_MMIO_END];
const struct acpi_device_id *id;
struct intel_th *th;
+ int i, r;
id = acpi_match_device(intel_th_acpi_ids, &pdev->dev);
if (!id)
return -ENODEV;
- th = intel_th_alloc(&pdev->dev, (void *)id->driver_data,
- pdev->resource, pdev->num_resources, -1);
+ for (i = 0, r = 0; i < pdev->num_resources && r < TH_MMIO_END; i++)
+ if (pdev->resource[i].flags &
+ (IORESOURCE_IRQ | IORESOURCE_MEM))
+ resource[r++] = pdev->resource[i];
+
+ th = intel_th_alloc(&pdev->dev, (void *)id->driver_data, resource, r);
if (IS_ERR(th))
return PTR_ERR(th);
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index fc6b7f8..d5c1821 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -422,6 +422,7 @@
unsigned nres;
unsigned type;
unsigned otype;
+ bool mknode;
unsigned scrpd;
int id;
} intel_th_subdevices[] = {
@@ -429,9 +430,9 @@
.nres = 1,
.res = {
{
- /* Handle TSCU from GTH driver */
+ /* Handle TSCU and CTS from GTH driver */
.start = REG_GTH_OFFSET,
- .end = REG_TSCU_OFFSET + REG_TSCU_LENGTH - 1,
+ .end = REG_CTS_OFFSET + REG_CTS_LENGTH - 1,
.flags = IORESOURCE_MEM,
},
},
@@ -456,6 +457,7 @@
.name = "msc",
.id = 0,
.type = INTEL_TH_OUTPUT,
+ .mknode = true,
.otype = GTH_MSU,
.scrpd = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC0_IS_ENABLED,
},
@@ -476,6 +478,7 @@
.name = "msc",
.id = 1,
.type = INTEL_TH_OUTPUT,
+ .mknode = true,
.otype = GTH_MSU,
.scrpd = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC1_IS_ENABLED,
},
@@ -488,7 +491,7 @@
.flags = IORESOURCE_MEM,
},
{
- .start = 1, /* use resource[1] */
+ .start = TH_MMIO_SW,
.end = 0,
.flags = IORESOURCE_MEM,
},
@@ -498,6 +501,24 @@
.type = INTEL_TH_SOURCE,
},
{
+ .nres = 2,
+ .res = {
+ {
+ .start = REG_STH_OFFSET,
+ .end = REG_STH_OFFSET + REG_STH_LENGTH - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = TH_MMIO_RTIT,
+ .end = 0,
+ .flags = IORESOURCE_MEM,
+ },
+ },
+ .id = -1,
+ .name = "rtit",
+ .type = INTEL_TH_SOURCE,
+ },
+ {
.nres = 1,
.res = {
{
@@ -581,7 +602,6 @@
struct intel_th_device *thdev;
struct resource res[3];
unsigned int req = 0;
- bool is64bit = false;
int r, err;
thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
@@ -591,18 +611,12 @@
thdev->drvdata = th->drvdata;
- for (r = 0; r < th->num_resources; r++)
- if (th->resource[r].flags & IORESOURCE_MEM_64) {
- is64bit = true;
- break;
- }
-
memcpy(res, subdev->res,
sizeof(struct resource) * subdev->nres);
for (r = 0; r < subdev->nres; r++) {
struct resource *devres = th->resource;
- int bar = 0; /* cut subdevices' MMIO from resource[0] */
+ int bar = TH_MMIO_CONFIG;
/*
* Take .end == 0 to mean 'take the whole bar',
@@ -611,8 +625,9 @@
*/
if (!res[r].end && res[r].flags == IORESOURCE_MEM) {
bar = res[r].start;
- if (is64bit)
- bar *= 2;
+ err = -ENODEV;
+ if (bar >= th->num_resources)
+ goto fail_put_device;
res[r].start = 0;
res[r].end = resource_size(&devres[bar]) - 1;
}
@@ -624,7 +639,12 @@
dev_dbg(th->dev, "%s:%d @ %pR\n",
subdev->name, r, &res[r]);
} else if (res[r].flags & IORESOURCE_IRQ) {
- res[r].start = th->irq;
+ /*
+ * Only pass on the IRQ if we have useful interrupts:
+ * the ones that can be configured via MINTCTL.
+ */
+ if (INTEL_TH_CAP(th, has_mintctl) && th->irq != -1)
+ res[r].start = th->irq;
}
}
@@ -635,7 +655,8 @@
}
if (subdev->type == INTEL_TH_OUTPUT) {
- thdev->dev.devt = MKDEV(th->major, th->num_thdevs);
+ if (subdev->mknode)
+ thdev->dev.devt = MKDEV(th->major, th->num_thdevs);
thdev->output.type = subdev->otype;
thdev->output.port = -1;
thdev->output.scratchpad = subdev->scrpd;
@@ -754,8 +775,13 @@
thdev = intel_th_subdevice_alloc(th, subdev);
/* note: caller should free subdevices from th::thdev[] */
- if (IS_ERR(thdev))
+ if (IS_ERR(thdev)) {
+ /* ENODEV for individual subdevices is allowed */
+ if (PTR_ERR(thdev) == -ENODEV)
+ continue;
+
return PTR_ERR(thdev);
+ }
th->thdev[th->num_thdevs++] = thdev;
}
@@ -763,13 +789,6 @@
return 0;
}
-static int match_devt(struct device *dev, void *data)
-{
- dev_t devt = (dev_t)(unsigned long)data;
-
- return dev->devt == devt;
-}
-
static int intel_th_output_open(struct inode *inode, struct file *file)
{
const struct file_operations *fops;
@@ -777,9 +796,7 @@
struct device *dev;
int err;
- dev = bus_find_device(&intel_th_bus, NULL,
- (void *)(unsigned long)inode->i_rdev,
- match_devt);
+ dev = bus_find_device_by_devt(&intel_th_bus, inode->i_rdev);
if (!dev || !dev->driver)
return -ENODEV;
@@ -805,26 +822,40 @@
.llseek = noop_llseek,
};
+static irqreturn_t intel_th_irq(int irq, void *data)
+{
+ struct intel_th *th = data;
+ irqreturn_t ret = IRQ_NONE;
+ struct intel_th_driver *d;
+ int i;
+
+ for (i = 0; i < th->num_thdevs; i++) {
+ if (th->thdev[i]->type != INTEL_TH_OUTPUT)
+ continue;
+
+ d = to_intel_th_driver(th->thdev[i]->dev.driver);
+ if (d && d->irq)
+ ret |= d->irq(th->thdev[i]);
+ }
+
+ if (ret == IRQ_NONE)
+ pr_warn_ratelimited("nobody cared for irq\n");
+
+ return ret;
+}
+
/**
* intel_th_alloc() - allocate a new Intel TH device and its subdevices
* @dev: parent device
- * @devres: parent's resources
- * @ndevres: number of resources
+ * @devres: resources indexed by th_mmio_idx
* @irq: irq number
*/
struct intel_th *
intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
- struct resource *devres, unsigned int ndevres, int irq)
+ struct resource *devres, unsigned int ndevres)
{
+ int err, r, nr_mmios = 0;
struct intel_th *th;
- int err, r;
-
- if (irq == -1)
- for (r = 0; r < ndevres; r++)
- if (devres[r].flags & IORESOURCE_IRQ) {
- irq = devres[r].start;
- break;
- }
th = kzalloc(sizeof(*th), GFP_KERNEL);
if (!th)
@@ -842,12 +873,32 @@
err = th->major;
goto err_ida;
}
+ th->irq = -1;
th->dev = dev;
th->drvdata = drvdata;
- th->resource = devres;
- th->num_resources = ndevres;
- th->irq = irq;
+ for (r = 0; r < ndevres; r++)
+ switch (devres[r].flags & IORESOURCE_TYPE_BITS) {
+ case IORESOURCE_MEM:
+ th->resource[nr_mmios++] = devres[r];
+ break;
+ case IORESOURCE_IRQ:
+ err = devm_request_irq(dev, devres[r].start,
+ intel_th_irq, IRQF_SHARED,
+ dev_name(dev), th);
+ if (err)
+ goto err_chrdev;
+
+ if (th->irq == -1)
+ th->irq = devres[r].start;
+ break;
+ default:
+ dev_warn(dev, "Unknown resource type %lx\n",
+ devres[r].flags);
+ break;
+ }
+
+ th->num_resources = nr_mmios;
dev_set_drvdata(dev, th);
@@ -864,6 +915,10 @@
return th;
+err_chrdev:
+ __unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS,
+ "intel_th/output");
+
err_ida:
ida_simple_remove(&intel_th_ida, th->id);
@@ -924,6 +979,27 @@
EXPORT_SYMBOL_GPL(intel_th_trace_enable);
/**
+ * intel_th_trace_switch() - execute a switch sequence
+ * @thdev: output device that requests tracing switch
+ */
+int intel_th_trace_switch(struct intel_th_device *thdev)
+{
+ struct intel_th_device *hub = to_intel_th_device(thdev->dev.parent);
+ struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver);
+
+ if (WARN_ON_ONCE(hub->type != INTEL_TH_SWITCH))
+ return -EINVAL;
+
+ if (WARN_ON_ONCE(thdev->type != INTEL_TH_OUTPUT))
+ return -EINVAL;
+
+ hubdrv->trig_switch(hub, &thdev->output);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_th_trace_switch);
+
+/**
* intel_th_trace_disable() - disable tracing for an output device
* @thdev: output device that requests tracing be disabled
*/
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index 8426b79..f72803a 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -308,6 +308,11 @@
iowrite32(0, gth->base + REG_GTH_SCR);
iowrite32(0xfc, gth->base + REG_GTH_SCR2);
+ /* setup CTS for single trigger */
+ iowrite32(CTS_EVENT_ENABLE_IF_ANYTHING, gth->base + REG_CTS_C0S0_EN);
+ iowrite32(CTS_ACTION_CONTROL_SET_STATE(CTS_STATE_IDLE) |
+ CTS_ACTION_CONTROL_TRIGGER, gth->base + REG_CTS_C0S0_ACT);
+
return 0;
}
@@ -457,6 +462,68 @@
}
/**
+ * intel_th_gth_stop() - stop tracing to an output device
+ * @gth: GTH device
+ * @output: output device's descriptor
+ * @capture_done: set when no more traces will be captured
+ *
+ * This will stop tracing using force storeEn off signal and wait for the
+ * pipelines to be empty for the corresponding output port.
+ */
+static void intel_th_gth_stop(struct gth_device *gth,
+ struct intel_th_output *output,
+ bool capture_done)
+{
+ struct intel_th_device *outdev =
+ container_of(output, struct intel_th_device, output);
+ struct intel_th_driver *outdrv =
+ to_intel_th_driver(outdev->dev.driver);
+ unsigned long count;
+ u32 reg;
+ u32 scr2 = 0xfc | (capture_done ? 1 : 0);
+
+ iowrite32(0, gth->base + REG_GTH_SCR);
+ iowrite32(scr2, gth->base + REG_GTH_SCR2);
+
+ /* wait on pipeline empty for the given port */
+ for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH;
+ count && !(reg & BIT(output->port)); count--) {
+ reg = ioread32(gth->base + REG_GTH_STAT);
+ cpu_relax();
+ }
+
+ if (!count)
+ dev_dbg(gth->dev, "timeout waiting for GTH[%d] PLE\n",
+ output->port);
+
+ /* wait on output piepline empty */
+ if (outdrv->wait_empty)
+ outdrv->wait_empty(outdev);
+
+ /* clear force capture done for next captures */
+ iowrite32(0xfc, gth->base + REG_GTH_SCR2);
+}
+
+/**
+ * intel_th_gth_start() - start tracing to an output device
+ * @gth: GTH device
+ * @output: output device's descriptor
+ *
+ * This will start tracing using force storeEn signal.
+ */
+static void intel_th_gth_start(struct gth_device *gth,
+ struct intel_th_output *output)
+{
+ u32 scr = 0xfc0000;
+
+ if (output->multiblock)
+ scr |= 0xff;
+
+ iowrite32(scr, gth->base + REG_GTH_SCR);
+ iowrite32(0, gth->base + REG_GTH_SCR2);
+}
+
+/**
* intel_th_gth_disable() - disable tracing to an output device
* @thdev: GTH device
* @output: output device's descriptor
@@ -469,7 +536,6 @@
struct intel_th_output *output)
{
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
- unsigned long count;
int master;
u32 reg;
@@ -482,22 +548,7 @@
}
spin_unlock(>h->gth_lock);
- iowrite32(0, gth->base + REG_GTH_SCR);
- iowrite32(0xfd, gth->base + REG_GTH_SCR2);
-
- /* wait on pipeline empty for the given port */
- for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH;
- count && !(reg & BIT(output->port)); count--) {
- reg = ioread32(gth->base + REG_GTH_STAT);
- cpu_relax();
- }
-
- /* clear force capture done for next captures */
- iowrite32(0xfc, gth->base + REG_GTH_SCR2);
-
- if (!count)
- dev_dbg(&thdev->dev, "timeout waiting for GTH[%d] PLE\n",
- output->port);
+ intel_th_gth_stop(gth, output, true);
reg = ioread32(gth->base + REG_GTH_SCRPD0);
reg &= ~output->scratchpad;
@@ -526,8 +577,8 @@
{
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
struct intel_th *th = to_intel_th(thdev);
- u32 scr = 0xfc0000, scrpd;
int master;
+ u32 scrpd;
spin_lock(>h->gth_lock);
for_each_set_bit(master, gth->output[output->port].master,
@@ -535,9 +586,6 @@
gth_master_set(gth, master, output->port);
}
- if (output->multiblock)
- scr |= 0xff;
-
output->active = true;
spin_unlock(>h->gth_lock);
@@ -548,8 +596,41 @@
scrpd |= output->scratchpad;
iowrite32(scrpd, gth->base + REG_GTH_SCRPD0);
- iowrite32(scr, gth->base + REG_GTH_SCR);
- iowrite32(0, gth->base + REG_GTH_SCR2);
+ intel_th_gth_start(gth, output);
+}
+
+/**
+ * intel_th_gth_switch() - execute a switch sequence
+ * @thdev: GTH device
+ * @output: output device's descriptor
+ *
+ * This will execute a switch sequence that will trigger a switch window
+ * when tracing to MSC in multi-block mode.
+ */
+static void intel_th_gth_switch(struct intel_th_device *thdev,
+ struct intel_th_output *output)
+{
+ struct gth_device *gth = dev_get_drvdata(&thdev->dev);
+ unsigned long count;
+ u32 reg;
+
+ /* trigger */
+ iowrite32(0, gth->base + REG_CTS_CTL);
+ iowrite32(CTS_CTL_SEQUENCER_ENABLE, gth->base + REG_CTS_CTL);
+ /* wait on trigger status */
+ for (reg = 0, count = CTS_TRIG_WAITLOOP_DEPTH;
+ count && !(reg & BIT(4)); count--) {
+ reg = ioread32(gth->base + REG_CTS_STAT);
+ cpu_relax();
+ }
+ if (!count)
+ dev_dbg(&thdev->dev, "timeout waiting for CTS Trigger\n");
+
+ /* De-assert the trigger */
+ iowrite32(0, gth->base + REG_CTS_CTL);
+
+ intel_th_gth_stop(gth, output, false);
+ intel_th_gth_start(gth, output);
}
/**
@@ -607,6 +688,7 @@
{
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
int port = othdev->output.port;
+ int master;
if (thdev->host_mode)
return;
@@ -615,6 +697,9 @@
othdev->output.port = -1;
othdev->output.active = false;
gth->output[port].output = NULL;
+ for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
+ if (gth->master[master] == port)
+ gth->master[master] = -1;
spin_unlock(>h->gth_lock);
}
@@ -731,6 +816,7 @@
.unassign = intel_th_gth_unassign,
.set_output = intel_th_gth_set_output,
.enable = intel_th_gth_enable,
+ .trig_switch = intel_th_gth_switch,
.disable = intel_th_gth_disable,
.driver = {
.name = "gth",
diff --git a/drivers/hwtracing/intel_th/gth.h b/drivers/hwtracing/intel_th/gth.h
index 6f2b0b9..bfcc0fd 100644
--- a/drivers/hwtracing/intel_th/gth.h
+++ b/drivers/hwtracing/intel_th/gth.h
@@ -49,6 +49,12 @@
REG_GTH_SCRPD3 = 0xec, /* ScratchPad[3] */
REG_TSCU_TSUCTRL = 0x2000, /* TSCU control register */
REG_TSCU_TSCUSTAT = 0x2004, /* TSCU status register */
+
+ /* Common Capture Sequencer (CTS) registers */
+ REG_CTS_C0S0_EN = 0x30c0, /* clause_event_enable_c0s0 */
+ REG_CTS_C0S0_ACT = 0x3180, /* clause_action_control_c0s0 */
+ REG_CTS_STAT = 0x32a0, /* cts_status */
+ REG_CTS_CTL = 0x32a4, /* cts_control */
};
/* waiting for Pipeline Empty bit(s) to assert for GTH */
@@ -57,4 +63,17 @@
#define TSUCTRL_CTCRESYNC BIT(0)
#define TSCUSTAT_CTCSYNCING BIT(1)
+/* waiting for Trigger status to assert for CTS */
+#define CTS_TRIG_WAITLOOP_DEPTH 10000
+
+#define CTS_EVENT_ENABLE_IF_ANYTHING BIT(31)
+#define CTS_ACTION_CONTROL_STATE_OFF 27
+#define CTS_ACTION_CONTROL_SET_STATE(x) \
+ (((x) & 0x1f) << CTS_ACTION_CONTROL_STATE_OFF)
+#define CTS_ACTION_CONTROL_TRIGGER BIT(4)
+
+#define CTS_STATE_IDLE 0x10u
+
+#define CTS_CTL_SEQUENCER_ENABLE BIT(0)
+
#endif /* __INTEL_TH_GTH_H__ */
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 780206d..0df4800 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -8,6 +8,8 @@
#ifndef __INTEL_TH_H__
#define __INTEL_TH_H__
+#include <linux/irqreturn.h>
+
/* intel_th_device device types */
enum {
/* Devices that generate trace data */
@@ -18,6 +20,8 @@
INTEL_TH_SWITCH,
};
+struct intel_th_device;
+
/**
* struct intel_th_output - descriptor INTEL_TH_OUTPUT type devices
* @port: output port number, assigned by the switch
@@ -25,6 +29,7 @@
* @scratchpad: scratchpad bits to flag when this output is enabled
* @multiblock: true for multiblock output configuration
* @active: true when this output is enabled
+ * @wait_empty: wait for device pipeline to be empty
*
* Output port descriptor, used by switch driver to tell which output
* port this output device corresponds to. Filled in at output device's
@@ -42,10 +47,12 @@
/**
* struct intel_th_drvdata - describes hardware capabilities and quirks
* @tscu_enable: device needs SW to enable time stamping unit
+ * @has_mintctl: device has interrupt control (MINTCTL) register
* @host_mode_only: device can only operate in 'host debugger' mode
*/
struct intel_th_drvdata {
unsigned int tscu_enable : 1,
+ has_mintctl : 1,
host_mode_only : 1;
};
@@ -157,10 +164,13 @@
struct intel_th_device *othdev);
void (*enable)(struct intel_th_device *thdev,
struct intel_th_output *output);
+ void (*trig_switch)(struct intel_th_device *thdev,
+ struct intel_th_output *output);
void (*disable)(struct intel_th_device *thdev,
struct intel_th_output *output);
/* output ops */
- void (*irq)(struct intel_th_device *thdev);
+ irqreturn_t (*irq)(struct intel_th_device *thdev);
+ void (*wait_empty)(struct intel_th_device *thdev);
int (*activate)(struct intel_th_device *thdev);
void (*deactivate)(struct intel_th_device *thdev);
/* file_operations for those who want a device node */
@@ -213,21 +223,23 @@
struct intel_th *
intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
- struct resource *devres, unsigned int ndevres, int irq);
+ struct resource *devres, unsigned int ndevres);
void intel_th_free(struct intel_th *th);
int intel_th_driver_register(struct intel_th_driver *thdrv);
void intel_th_driver_unregister(struct intel_th_driver *thdrv);
int intel_th_trace_enable(struct intel_th_device *thdev);
+int intel_th_trace_switch(struct intel_th_device *thdev);
int intel_th_trace_disable(struct intel_th_device *thdev);
int intel_th_set_output(struct intel_th_device *thdev,
unsigned int master);
int intel_th_output_enable(struct intel_th *th, unsigned int otype);
-enum {
+enum th_mmio_idx {
TH_MMIO_CONFIG = 0,
- TH_MMIO_SW = 2,
+ TH_MMIO_SW = 1,
+ TH_MMIO_RTIT = 2,
TH_MMIO_END,
};
@@ -237,6 +249,9 @@
#define TH_CONFIGURABLE_MASTERS 256
#define TH_MSC_MAX 2
+/* Maximum IRQ vectors */
+#define TH_NVEC_MAX 8
+
/**
* struct intel_th - Intel TH controller
* @dev: driver core's device
@@ -244,7 +259,7 @@
* @hub: "switch" subdevice (GTH)
* @resource: resources of the entire controller
* @num_thdevs: number of devices in the @thdev array
- * @num_resources: number or resources in the @resource array
+ * @num_resources: number of resources in the @resource array
* @irq: irq number
* @id: this Intel TH controller's device ID in the system
* @major: device node major for output devices
@@ -256,7 +271,7 @@
struct intel_th_device *hub;
struct intel_th_drvdata *drvdata;
- struct resource *resource;
+ struct resource resource[TH_MMIO_END];
int (*activate)(struct intel_th *);
void (*deactivate)(struct intel_th *);
unsigned int num_thdevs;
@@ -296,6 +311,9 @@
REG_TSCU_OFFSET = 0x2000,
REG_TSCU_LENGTH = 0x1000,
+ REG_CTS_OFFSET = 0x3000,
+ REG_CTS_LENGTH = 0x1000,
+
/* Software Trace Hub (STH) [0x4000..0x4fff] */
REG_STH_OFFSET = 0x4000,
REG_STH_LENGTH = 0x2000,
diff --git a/drivers/hwtracing/intel_th/msu-sink.c b/drivers/hwtracing/intel_th/msu-sink.c
new file mode 100644
index 0000000..2c7f511
--- /dev/null
+++ b/drivers/hwtracing/intel_th/msu-sink.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * An example software sink buffer for Intel TH MSU.
+ *
+ * Copyright (C) 2019 Intel Corporation.
+ */
+
+#include <linux/intel_th.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+
+#define MAX_SGTS 16
+
+struct msu_sink_private {
+ struct device *dev;
+ struct sg_table **sgts;
+ unsigned int nr_sgts;
+};
+
+static void *msu_sink_assign(struct device *dev, int *mode)
+{
+ struct msu_sink_private *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+
+ priv->sgts = kcalloc(MAX_SGTS, sizeof(void *), GFP_KERNEL);
+ if (!priv->sgts) {
+ kfree(priv);
+ return NULL;
+ }
+
+ priv->dev = dev;
+ *mode = MSC_MODE_MULTI;
+
+ return priv;
+}
+
+static void msu_sink_unassign(void *data)
+{
+ struct msu_sink_private *priv = data;
+
+ kfree(priv->sgts);
+ kfree(priv);
+}
+
+/* See also: msc.c: __msc_buffer_win_alloc() */
+static int msu_sink_alloc_window(void *data, struct sg_table **sgt, size_t size)
+{
+ struct msu_sink_private *priv = data;
+ unsigned int nents;
+ struct scatterlist *sg_ptr;
+ void *block;
+ int ret, i;
+
+ if (priv->nr_sgts == MAX_SGTS)
+ return -ENOMEM;
+
+ nents = DIV_ROUND_UP(size, PAGE_SIZE);
+
+ ret = sg_alloc_table(*sgt, nents, GFP_KERNEL);
+ if (ret)
+ return -ENOMEM;
+
+ priv->sgts[priv->nr_sgts++] = *sgt;
+
+ for_each_sg((*sgt)->sgl, sg_ptr, nents, i) {
+ block = dma_alloc_coherent(priv->dev->parent->parent,
+ PAGE_SIZE, &sg_dma_address(sg_ptr),
+ GFP_KERNEL);
+ sg_set_buf(sg_ptr, block, PAGE_SIZE);
+ }
+
+ return nents;
+}
+
+/* See also: msc.c: __msc_buffer_win_free() */
+static void msu_sink_free_window(void *data, struct sg_table *sgt)
+{
+ struct msu_sink_private *priv = data;
+ struct scatterlist *sg_ptr;
+ int i;
+
+ for_each_sg(sgt->sgl, sg_ptr, sgt->nents, i) {
+ dma_free_coherent(priv->dev->parent->parent, PAGE_SIZE,
+ sg_virt(sg_ptr), sg_dma_address(sg_ptr));
+ }
+
+ sg_free_table(sgt);
+ priv->nr_sgts--;
+}
+
+static int msu_sink_ready(void *data, struct sg_table *sgt, size_t bytes)
+{
+ struct msu_sink_private *priv = data;
+
+ intel_th_msc_window_unlock(priv->dev, sgt);
+
+ return 0;
+}
+
+static const struct msu_buffer sink_mbuf = {
+ .name = "sink",
+ .assign = msu_sink_assign,
+ .unassign = msu_sink_unassign,
+ .alloc_window = msu_sink_alloc_window,
+ .free_window = msu_sink_free_window,
+ .ready = msu_sink_ready,
+};
+
+module_intel_th_msu_buffer(sink_mbuf);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index d293e55..6d240df 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -17,40 +17,63 @@
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/io.h>
+#include <linux/workqueue.h>
#include <linux/dma-mapping.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
#endif
+#include <linux/intel_th.h>
#include "intel_th.h"
#include "msu.h"
#define msc_dev(x) (&(x)->thdev->dev)
-/**
- * struct msc_block - multiblock mode block descriptor
- * @bdesc: pointer to hardware descriptor (beginning of the block)
- * @addr: physical address of the block
+/*
+ * Lockout state transitions:
+ * READY -> INUSE -+-> LOCKED -+-> READY -> etc.
+ * \-----------/
+ * WIN_READY: window can be used by HW
+ * WIN_INUSE: window is in use
+ * WIN_LOCKED: window is filled up and is being processed by the buffer
+ * handling code
+ *
+ * All state transitions happen automatically, except for the LOCKED->READY,
+ * which needs to be signalled by the buffer code by calling
+ * intel_th_msc_window_unlock().
+ *
+ * When the interrupt handler has to switch to the next window, it checks
+ * whether it's READY, and if it is, it performs the switch and tracing
+ * continues. If it's LOCKED, it stops the trace.
*/
-struct msc_block {
- struct msc_block_desc *bdesc;
- dma_addr_t addr;
+enum lockout_state {
+ WIN_READY = 0,
+ WIN_INUSE,
+ WIN_LOCKED
};
/**
* struct msc_window - multiblock mode window descriptor
* @entry: window list linkage (msc::win_list)
* @pgoff: page offset into the buffer that this window starts at
+ * @lockout: lockout state, see comment below
+ * @lo_lock: lockout state serialization
* @nr_blocks: number of blocks (pages) in this window
- * @block: array of block descriptors
+ * @nr_segs: number of segments in this window (<= @nr_blocks)
+ * @_sgt: array of block descriptors
+ * @sgt: array of block descriptors
*/
struct msc_window {
struct list_head entry;
unsigned long pgoff;
+ enum lockout_state lockout;
+ spinlock_t lo_lock;
unsigned int nr_blocks;
+ unsigned int nr_segs;
struct msc *msc;
- struct msc_block block[0];
+ struct sg_table _sgt;
+ struct sg_table *sgt;
};
/**
@@ -72,8 +95,8 @@
struct msc_window *start_win;
struct msc_window *win;
unsigned long offset;
- int start_block;
- int block;
+ struct scatterlist *start_block;
+ struct scatterlist *block;
unsigned int block_off;
unsigned int wrap_count;
unsigned int eof;
@@ -83,7 +106,11 @@
* struct msc - MSC device representation
* @reg_base: register window base address
* @thdev: intel_th_device pointer
+ * @mbuf: MSU buffer, if assigned
+ * @mbuf_priv MSU buffer's private data, if @mbuf
* @win_list: list of windows in multiblock mode
+ * @single_sgt: single mode buffer
+ * @cur_win: current window
* @nr_pages: total number of pages allocated for this buffer
* @single_sz: amount of data in single mode
* @single_wrap: single mode wrap occurred
@@ -101,14 +128,23 @@
*/
struct msc {
void __iomem *reg_base;
+ void __iomem *msu_base;
struct intel_th_device *thdev;
+ const struct msu_buffer *mbuf;
+ void *mbuf_priv;
+
+ struct work_struct work;
struct list_head win_list;
+ struct sg_table single_sgt;
+ struct msc_window *cur_win;
unsigned long nr_pages;
unsigned long single_sz;
unsigned int single_wrap : 1;
void *base;
dma_addr_t base_addr;
+ u32 orig_addr;
+ u32 orig_sz;
/* <0: no buffer, 0: no users, >0: active users */
atomic_t user_count;
@@ -120,12 +156,108 @@
/* config */
unsigned int enabled : 1,
- wrap : 1;
+ wrap : 1,
+ do_irq : 1;
unsigned int mode;
unsigned int burst_len;
unsigned int index;
};
+static LIST_HEAD(msu_buffer_list);
+static DEFINE_MUTEX(msu_buffer_mutex);
+
+/**
+ * struct msu_buffer_entry - internal MSU buffer bookkeeping
+ * @entry: link to msu_buffer_list
+ * @mbuf: MSU buffer object
+ * @owner: module that provides this MSU buffer
+ */
+struct msu_buffer_entry {
+ struct list_head entry;
+ const struct msu_buffer *mbuf;
+ struct module *owner;
+};
+
+static struct msu_buffer_entry *__msu_buffer_entry_find(const char *name)
+{
+ struct msu_buffer_entry *mbe;
+
+ lockdep_assert_held(&msu_buffer_mutex);
+
+ list_for_each_entry(mbe, &msu_buffer_list, entry) {
+ if (!strcmp(mbe->mbuf->name, name))
+ return mbe;
+ }
+
+ return NULL;
+}
+
+static const struct msu_buffer *
+msu_buffer_get(const char *name)
+{
+ struct msu_buffer_entry *mbe;
+
+ mutex_lock(&msu_buffer_mutex);
+ mbe = __msu_buffer_entry_find(name);
+ if (mbe && !try_module_get(mbe->owner))
+ mbe = NULL;
+ mutex_unlock(&msu_buffer_mutex);
+
+ return mbe ? mbe->mbuf : NULL;
+}
+
+static void msu_buffer_put(const struct msu_buffer *mbuf)
+{
+ struct msu_buffer_entry *mbe;
+
+ mutex_lock(&msu_buffer_mutex);
+ mbe = __msu_buffer_entry_find(mbuf->name);
+ if (mbe)
+ module_put(mbe->owner);
+ mutex_unlock(&msu_buffer_mutex);
+}
+
+int intel_th_msu_buffer_register(const struct msu_buffer *mbuf,
+ struct module *owner)
+{
+ struct msu_buffer_entry *mbe;
+ int ret = 0;
+
+ mbe = kzalloc(sizeof(*mbe), GFP_KERNEL);
+ if (!mbe)
+ return -ENOMEM;
+
+ mutex_lock(&msu_buffer_mutex);
+ if (__msu_buffer_entry_find(mbuf->name)) {
+ ret = -EEXIST;
+ kfree(mbe);
+ goto unlock;
+ }
+
+ mbe->mbuf = mbuf;
+ mbe->owner = owner;
+ list_add_tail(&mbe->entry, &msu_buffer_list);
+unlock:
+ mutex_unlock(&msu_buffer_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(intel_th_msu_buffer_register);
+
+void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf)
+{
+ struct msu_buffer_entry *mbe;
+
+ mutex_lock(&msu_buffer_mutex);
+ mbe = __msu_buffer_entry_find(mbuf->name);
+ if (mbe) {
+ list_del(&mbe->entry);
+ kfree(mbe);
+ }
+ mutex_unlock(&msu_buffer_mutex);
+}
+EXPORT_SYMBOL_GPL(intel_th_msu_buffer_unregister);
+
static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
{
/* header hasn't been written */
@@ -139,72 +271,25 @@
return false;
}
-/**
- * msc_oldest_window() - locate the window with oldest data
- * @msc: MSC device
- *
- * This should only be used in multiblock mode. Caller should hold the
- * msc::user_count reference.
- *
- * Return: the oldest window with valid data
- */
-static struct msc_window *msc_oldest_window(struct msc *msc)
+static inline struct scatterlist *msc_win_base_sg(struct msc_window *win)
{
- struct msc_window *win;
- u32 reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA);
- unsigned long win_addr = (unsigned long)reg << PAGE_SHIFT;
- unsigned int found = 0;
-
- if (list_empty(&msc->win_list))
- return NULL;
-
- /*
- * we might need a radix tree for this, depending on how
- * many windows a typical user would allocate; ideally it's
- * something like 2, in which case we're good
- */
- list_for_each_entry(win, &msc->win_list, entry) {
- if (win->block[0].addr == win_addr)
- found++;
-
- /* skip the empty ones */
- if (msc_block_is_empty(win->block[0].bdesc))
- continue;
-
- if (found)
- return win;
- }
-
- return list_entry(msc->win_list.next, struct msc_window, entry);
+ return win->sgt->sgl;
}
-/**
- * msc_win_oldest_block() - locate the oldest block in a given window
- * @win: window to look at
- *
- * Return: index of the block with the oldest data
- */
-static unsigned int msc_win_oldest_block(struct msc_window *win)
+static inline struct msc_block_desc *msc_win_base(struct msc_window *win)
{
- unsigned int blk;
- struct msc_block_desc *bdesc = win->block[0].bdesc;
+ return sg_virt(msc_win_base_sg(win));
+}
- /* without wrapping, first block is the oldest */
- if (!msc_block_wrapped(bdesc))
- return 0;
+static inline dma_addr_t msc_win_base_dma(struct msc_window *win)
+{
+ return sg_dma_address(msc_win_base_sg(win));
+}
- /*
- * with wrapping, last written block contains both the newest and the
- * oldest data for this window.
- */
- for (blk = 0; blk < win->nr_blocks; blk++) {
- bdesc = win->block[blk].bdesc;
-
- if (msc_block_last_written(bdesc))
- return blk;
- }
-
- return 0;
+static inline unsigned long
+msc_win_base_pfn(struct msc_window *win)
+{
+ return PFN_DOWN(msc_win_base_dma(win));
}
/**
@@ -226,22 +311,126 @@
static struct msc_window *msc_next_window(struct msc_window *win)
{
if (msc_is_last_win(win))
- return list_entry(win->msc->win_list.next, struct msc_window,
- entry);
+ return list_first_entry(&win->msc->win_list, struct msc_window,
+ entry);
- return list_entry(win->entry.next, struct msc_window, entry);
+ return list_next_entry(win, entry);
+}
+
+static size_t msc_win_total_sz(struct msc_window *win)
+{
+ struct scatterlist *sg;
+ unsigned int blk;
+ size_t size = 0;
+
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
+ struct msc_block_desc *bdesc = sg_virt(sg);
+
+ if (msc_block_wrapped(bdesc))
+ return (size_t)win->nr_blocks << PAGE_SHIFT;
+
+ size += msc_total_sz(bdesc);
+ if (msc_block_last_written(bdesc))
+ break;
+ }
+
+ return size;
+}
+
+/**
+ * msc_find_window() - find a window matching a given sg_table
+ * @msc: MSC device
+ * @sgt: SG table of the window
+ * @nonempty: skip over empty windows
+ *
+ * Return: MSC window structure pointer or NULL if the window
+ * could not be found.
+ */
+static struct msc_window *
+msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty)
+{
+ struct msc_window *win;
+ unsigned int found = 0;
+
+ if (list_empty(&msc->win_list))
+ return NULL;
+
+ /*
+ * we might need a radix tree for this, depending on how
+ * many windows a typical user would allocate; ideally it's
+ * something like 2, in which case we're good
+ */
+ list_for_each_entry(win, &msc->win_list, entry) {
+ if (win->sgt == sgt)
+ found++;
+
+ /* skip the empty ones */
+ if (nonempty && msc_block_is_empty(msc_win_base(win)))
+ continue;
+
+ if (found)
+ return win;
+ }
+
+ return NULL;
+}
+
+/**
+ * msc_oldest_window() - locate the window with oldest data
+ * @msc: MSC device
+ *
+ * This should only be used in multiblock mode. Caller should hold the
+ * msc::user_count reference.
+ *
+ * Return: the oldest window with valid data
+ */
+static struct msc_window *msc_oldest_window(struct msc *msc)
+{
+ struct msc_window *win;
+
+ if (list_empty(&msc->win_list))
+ return NULL;
+
+ win = msc_find_window(msc, msc_next_window(msc->cur_win)->sgt, true);
+ if (win)
+ return win;
+
+ return list_first_entry(&msc->win_list, struct msc_window, entry);
+}
+
+/**
+ * msc_win_oldest_sg() - locate the oldest block in a given window
+ * @win: window to look at
+ *
+ * Return: index of the block with the oldest data
+ */
+static struct scatterlist *msc_win_oldest_sg(struct msc_window *win)
+{
+ unsigned int blk;
+ struct scatterlist *sg;
+ struct msc_block_desc *bdesc = msc_win_base(win);
+
+ /* without wrapping, first block is the oldest */
+ if (!msc_block_wrapped(bdesc))
+ return msc_win_base_sg(win);
+
+ /*
+ * with wrapping, last written block contains both the newest and the
+ * oldest data for this window.
+ */
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
+ struct msc_block_desc *bdesc = sg_virt(sg);
+
+ if (msc_block_last_written(bdesc))
+ return sg;
+ }
+
+ return msc_win_base_sg(win);
}
static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
{
- return iter->win->block[iter->block].bdesc;
-}
-
-static void msc_iter_init(struct msc_iter *iter)
-{
- memset(iter, 0, sizeof(*iter));
- iter->start_block = -1;
- iter->block = -1;
+ return sg_virt(iter->block);
}
static struct msc_iter *msc_iter_install(struct msc *msc)
@@ -266,7 +455,6 @@
goto unlock;
}
- msc_iter_init(iter);
iter->msc = msc;
list_add_tail(&iter->entry, &msc->iter_list);
@@ -287,10 +475,10 @@
static void msc_iter_block_start(struct msc_iter *iter)
{
- if (iter->start_block != -1)
+ if (iter->start_block)
return;
- iter->start_block = msc_win_oldest_block(iter->win);
+ iter->start_block = msc_win_oldest_sg(iter->win);
iter->block = iter->start_block;
iter->wrap_count = 0;
@@ -314,7 +502,7 @@
return -EINVAL;
iter->win = iter->start_win;
- iter->start_block = -1;
+ iter->start_block = NULL;
msc_iter_block_start(iter);
@@ -324,7 +512,7 @@
static int msc_iter_win_advance(struct msc_iter *iter)
{
iter->win = msc_next_window(iter->win);
- iter->start_block = -1;
+ iter->start_block = NULL;
if (iter->win == iter->start_win) {
iter->eof++;
@@ -354,8 +542,10 @@
return msc_iter_win_advance(iter);
/* block advance */
- if (++iter->block == iter->win->nr_blocks)
- iter->block = 0;
+ if (sg_is_last(iter->block))
+ iter->block = msc_win_base_sg(iter->win);
+ else
+ iter->block = sg_next(iter->block);
/* no wrapping, sanity check in case there is no last written block */
if (!iter->wrap_count && iter->block == iter->start_block)
@@ -460,20 +650,101 @@
static void msc_buffer_clear_hw_header(struct msc *msc)
{
struct msc_window *win;
+ struct scatterlist *sg;
list_for_each_entry(win, &msc->win_list, entry) {
unsigned int blk;
size_t hw_sz = sizeof(struct msc_block_desc) -
offsetof(struct msc_block_desc, hw_tag);
- for (blk = 0; blk < win->nr_blocks; blk++) {
- struct msc_block_desc *bdesc = win->block[blk].bdesc;
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
+ struct msc_block_desc *bdesc = sg_virt(sg);
memset(&bdesc->hw_tag, 0, hw_sz);
}
}
}
+static int intel_th_msu_init(struct msc *msc)
+{
+ u32 mintctl, msusts;
+
+ if (!msc->do_irq)
+ return 0;
+
+ if (!msc->mbuf)
+ return 0;
+
+ mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
+ mintctl |= msc->index ? M1BLIE : M0BLIE;
+ iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
+ if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) {
+ dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n");
+ msc->do_irq = 0;
+ return 0;
+ }
+
+ msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
+ iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
+
+ return 0;
+}
+
+static void intel_th_msu_deinit(struct msc *msc)
+{
+ u32 mintctl;
+
+ if (!msc->do_irq)
+ return;
+
+ mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
+ mintctl &= msc->index ? ~M1BLIE : ~M0BLIE;
+ iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
+}
+
+static int msc_win_set_lockout(struct msc_window *win,
+ enum lockout_state expect,
+ enum lockout_state new)
+{
+ enum lockout_state old;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!win->msc->mbuf)
+ return 0;
+
+ spin_lock_irqsave(&win->lo_lock, flags);
+ old = win->lockout;
+
+ if (old != expect) {
+ ret = -EINVAL;
+ dev_warn_ratelimited(msc_dev(win->msc),
+ "expected lockout state %d, got %d\n",
+ expect, old);
+ goto unlock;
+ }
+
+ win->lockout = new;
+
+ if (old == expect && new == WIN_LOCKED)
+ atomic_inc(&win->msc->user_count);
+ else if (old == expect && old == WIN_LOCKED)
+ atomic_dec(&win->msc->user_count);
+
+unlock:
+ spin_unlock_irqrestore(&win->lo_lock, flags);
+
+ if (ret) {
+ if (expect == WIN_READY && old == WIN_LOCKED)
+ return -EBUSY;
+
+ /* from intel_th_msc_window_unlock(), don't warn if not locked */
+ if (expect == WIN_LOCKED && old == new)
+ return 0;
+ }
+
+ return ret;
+}
/**
* msc_configure() - set up MSC hardware
* @msc: the MSC device to configure
@@ -491,8 +762,15 @@
if (msc->mode > MSC_MODE_MULTI)
return -ENOTSUPP;
- if (msc->mode == MSC_MODE_MULTI)
+ if (msc->mode == MSC_MODE_MULTI) {
+ if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE))
+ return -EBUSY;
+
msc_buffer_clear_hw_header(msc);
+ }
+
+ msc->orig_addr = ioread32(msc->reg_base + REG_MSU_MSC0BAR);
+ msc->orig_sz = ioread32(msc->reg_base + REG_MSU_MSC0SIZE);
reg = msc->base_addr >> PAGE_SHIFT;
iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR);
@@ -514,10 +792,14 @@
iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
+ intel_th_msu_init(msc);
+
msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI;
intel_th_trace_enable(msc->thdev);
msc->enabled = 1;
+ if (msc->mbuf && msc->mbuf->activate)
+ msc->mbuf->activate(msc->mbuf_priv);
return 0;
}
@@ -531,23 +813,21 @@
*/
static void msc_disable(struct msc *msc)
{
- unsigned long count;
+ struct msc_window *win = msc->cur_win;
u32 reg;
lockdep_assert_held(&msc->buf_mutex);
+ if (msc->mode == MSC_MODE_MULTI)
+ msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
+
+ if (msc->mbuf && msc->mbuf->deactivate)
+ msc->mbuf->deactivate(msc->mbuf_priv);
+ intel_th_msu_deinit(msc);
intel_th_trace_disable(msc->thdev);
- for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
- count && !(reg & MSCSTS_PLE); count--) {
- reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
- cpu_relax();
- }
-
- if (!count)
- dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
-
if (msc->mode == MSC_MODE_SINGLE) {
+ reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT);
reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP);
@@ -559,16 +839,25 @@
reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
reg &= ~MSC_EN;
iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
+
+ if (msc->mbuf && msc->mbuf->ready)
+ msc->mbuf->ready(msc->mbuf_priv, win->sgt,
+ msc_win_total_sz(win));
+
msc->enabled = 0;
- iowrite32(0, msc->reg_base + REG_MSU_MSC0BAR);
- iowrite32(0, msc->reg_base + REG_MSU_MSC0SIZE);
+ iowrite32(msc->orig_addr, msc->reg_base + REG_MSU_MSC0BAR);
+ iowrite32(msc->orig_sz, msc->reg_base + REG_MSU_MSC0SIZE);
dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n",
ioread32(msc->reg_base + REG_MSU_MSC0NWSA));
reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg);
+
+ reg = ioread32(msc->reg_base + REG_MSU_MSUSTS);
+ reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
+ iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS);
}
static int intel_th_msc_activate(struct intel_th_device *thdev)
@@ -617,22 +906,45 @@
*/
static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
{
+ unsigned long nr_pages = size >> PAGE_SHIFT;
unsigned int order = get_order(size);
struct page *page;
+ int ret;
if (!size)
return 0;
- page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
+ if (ret)
+ goto err_out;
+
+ ret = -ENOMEM;
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order);
if (!page)
- return -ENOMEM;
+ goto err_free_sgt;
split_page(page, order);
- msc->nr_pages = size >> PAGE_SHIFT;
+ sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
+
+ ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
+ DMA_FROM_DEVICE);
+ if (ret < 0)
+ goto err_free_pages;
+
+ msc->nr_pages = nr_pages;
msc->base = page_address(page);
- msc->base_addr = page_to_phys(page);
+ msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
return 0;
+
+err_free_pages:
+ __free_pages(page, order);
+
+err_free_sgt:
+ sg_free_table(&msc->single_sgt);
+
+err_out:
+ return ret;
}
/**
@@ -643,6 +955,10 @@
{
unsigned long off;
+ dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
+ 1, DMA_FROM_DEVICE);
+ sg_free_table(&msc->single_sgt);
+
for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
struct page *page = virt_to_page(msc->base + off);
@@ -669,6 +985,69 @@
return virt_to_page(msc->base + (pgoff << PAGE_SHIFT));
}
+static int __msc_buffer_win_alloc(struct msc_window *win,
+ unsigned int nr_segs)
+{
+ struct scatterlist *sg_ptr;
+ void *block;
+ int i, ret;
+
+ ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL);
+ if (ret)
+ return -ENOMEM;
+
+ for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
+ block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent,
+ PAGE_SIZE, &sg_dma_address(sg_ptr),
+ GFP_KERNEL);
+ if (!block)
+ goto err_nomem;
+
+ sg_set_buf(sg_ptr, block, PAGE_SIZE);
+ }
+
+ return nr_segs;
+
+err_nomem:
+ for_each_sg(win->sgt->sgl, sg_ptr, i, ret)
+ dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
+ sg_virt(sg_ptr), sg_dma_address(sg_ptr));
+
+ sg_free_table(win->sgt);
+
+ return -ENOMEM;
+}
+
+#ifdef CONFIG_X86
+static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs)
+{
+ struct scatterlist *sg_ptr;
+ int i;
+
+ for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
+ /* Set the page as uncached */
+ set_memory_uc((unsigned long)sg_virt(sg_ptr),
+ PFN_DOWN(sg_ptr->length));
+ }
+}
+
+static void msc_buffer_set_wb(struct msc_window *win)
+{
+ struct scatterlist *sg_ptr;
+ int i;
+
+ for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) {
+ /* Reset the page to write-back */
+ set_memory_wb((unsigned long)sg_virt(sg_ptr),
+ PFN_DOWN(sg_ptr->length));
+ }
+}
+#else /* !X86 */
+static inline void
+msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) {}
+static inline void msc_buffer_set_wb(struct msc_window *win) {}
+#endif /* CONFIG_X86 */
+
/**
* msc_buffer_win_alloc() - alloc a window for a multiblock mode
* @msc: MSC device
@@ -682,44 +1061,46 @@
static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
{
struct msc_window *win;
- unsigned long size = PAGE_SIZE;
- int i, ret = -ENOMEM;
+ int ret = -ENOMEM;
if (!nr_blocks)
return 0;
- win = kzalloc(offsetof(struct msc_window, block[nr_blocks]),
- GFP_KERNEL);
+ win = kzalloc(sizeof(*win), GFP_KERNEL);
if (!win)
return -ENOMEM;
+ win->msc = msc;
+ win->sgt = &win->_sgt;
+ win->lockout = WIN_READY;
+ spin_lock_init(&win->lo_lock);
+
if (!list_empty(&msc->win_list)) {
- struct msc_window *prev = list_entry(msc->win_list.prev,
- struct msc_window, entry);
+ struct msc_window *prev = list_last_entry(&msc->win_list,
+ struct msc_window,
+ entry);
win->pgoff = prev->pgoff + prev->nr_blocks;
}
- for (i = 0; i < nr_blocks; i++) {
- win->block[i].bdesc =
- dma_alloc_coherent(msc_dev(msc)->parent->parent, size,
- &win->block[i].addr, GFP_KERNEL);
+ if (msc->mbuf && msc->mbuf->alloc_window)
+ ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt,
+ nr_blocks << PAGE_SHIFT);
+ else
+ ret = __msc_buffer_win_alloc(win, nr_blocks);
- if (!win->block[i].bdesc)
- goto err_nomem;
+ if (ret <= 0)
+ goto err_nomem;
-#ifdef CONFIG_X86
- /* Set the page as uncached */
- set_memory_uc((unsigned long)win->block[i].bdesc, 1);
-#endif
- }
+ msc_buffer_set_uc(win, ret);
- win->msc = msc;
+ win->nr_segs = ret;
win->nr_blocks = nr_blocks;
if (list_empty(&msc->win_list)) {
- msc->base = win->block[0].bdesc;
- msc->base_addr = win->block[0].addr;
+ msc->base = msc_win_base(win);
+ msc->base_addr = msc_win_base_dma(win);
+ msc->cur_win = win;
}
list_add_tail(&win->entry, &msc->win_list);
@@ -728,19 +1109,26 @@
return 0;
err_nomem:
- for (i--; i >= 0; i--) {
-#ifdef CONFIG_X86
- /* Reset the page to write-back before releasing */
- set_memory_wb((unsigned long)win->block[i].bdesc, 1);
-#endif
- dma_free_coherent(msc_dev(msc)->parent->parent, size,
- win->block[i].bdesc, win->block[i].addr);
- }
kfree(win);
return ret;
}
+static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) {
+ struct page *page = sg_page(sg);
+
+ page->mapping = NULL;
+ dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
+ sg_virt(sg), sg_dma_address(sg));
+ }
+ sg_free_table(win->sgt);
+}
+
/**
* msc_buffer_win_free() - free a window from MSC's window list
* @msc: MSC device
@@ -751,8 +1139,6 @@
*/
static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
{
- int i;
-
msc->nr_pages -= win->nr_blocks;
list_del(&win->entry);
@@ -761,17 +1147,12 @@
msc->base_addr = 0;
}
- for (i = 0; i < win->nr_blocks; i++) {
- struct page *page = virt_to_page(win->block[i].bdesc);
+ msc_buffer_set_wb(win);
- page->mapping = NULL;
-#ifdef CONFIG_X86
- /* Reset the page to write-back before releasing */
- set_memory_wb((unsigned long)win->block[i].bdesc, 1);
-#endif
- dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
- win->block[i].bdesc, win->block[i].addr);
- }
+ if (msc->mbuf && msc->mbuf->free_window)
+ msc->mbuf->free_window(msc->mbuf_priv, win->sgt);
+ else
+ __msc_buffer_win_free(msc, win);
kfree(win);
}
@@ -789,6 +1170,7 @@
/* call with msc::mutex locked */
list_for_each_entry(win, &msc->win_list, entry) {
+ struct scatterlist *sg;
unsigned int blk;
u32 sw_tag = 0;
@@ -798,35 +1180,34 @@
*/
if (msc_is_last_win(win)) {
sw_tag |= MSC_SW_TAG_LASTWIN;
- next_win = list_entry(msc->win_list.next,
- struct msc_window, entry);
+ next_win = list_first_entry(&msc->win_list,
+ struct msc_window, entry);
} else {
- next_win = list_entry(win->entry.next,
- struct msc_window, entry);
+ next_win = list_next_entry(win, entry);
}
- for (blk = 0; blk < win->nr_blocks; blk++) {
- struct msc_block_desc *bdesc = win->block[blk].bdesc;
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
+ struct msc_block_desc *bdesc = sg_virt(sg);
memset(bdesc, 0, sizeof(*bdesc));
- bdesc->next_win = next_win->block[0].addr >> PAGE_SHIFT;
+ bdesc->next_win = msc_win_base_pfn(next_win);
/*
* Similarly to last window, last block should point
* to the first one.
*/
- if (blk == win->nr_blocks - 1) {
+ if (blk == win->nr_segs - 1) {
sw_tag |= MSC_SW_TAG_LASTBLK;
- bdesc->next_blk =
- win->block[0].addr >> PAGE_SHIFT;
+ bdesc->next_blk = msc_win_base_pfn(win);
} else {
- bdesc->next_blk =
- win->block[blk + 1].addr >> PAGE_SHIFT;
+ dma_addr_t addr = sg_dma_address(sg_next(sg));
+
+ bdesc->next_blk = PFN_DOWN(addr);
}
bdesc->sw_tag = sw_tag;
- bdesc->block_sz = PAGE_SIZE / 64;
+ bdesc->block_sz = sg->length / 64;
}
}
@@ -985,6 +1366,8 @@
static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
{
struct msc_window *win;
+ struct scatterlist *sg;
+ unsigned int blk;
if (msc->mode == MSC_MODE_SINGLE)
return msc_buffer_contig_get_page(msc, pgoff);
@@ -997,7 +1380,18 @@
found:
pgoff -= win->pgoff;
- return virt_to_page(win->block[pgoff].bdesc);
+
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
+ struct page *page = sg_page(sg);
+ size_t pgsz = PFN_DOWN(sg->length);
+
+ if (pgoff < pgsz)
+ return page + pgoff;
+
+ pgoff -= pgsz;
+ }
+
+ return NULL;
}
/**
@@ -1250,6 +1644,22 @@
.owner = THIS_MODULE,
};
+static void intel_th_msc_wait_empty(struct intel_th_device *thdev)
+{
+ struct msc *msc = dev_get_drvdata(&thdev->dev);
+ unsigned long count;
+ u32 reg;
+
+ for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
+ count && !(reg & MSCSTS_PLE); count--) {
+ reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS);
+ cpu_relax();
+ }
+
+ if (!count)
+ dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
+}
+
static int intel_th_msc_init(struct msc *msc)
{
atomic_set(&msc->user_count, -1);
@@ -1266,6 +1676,97 @@
return 0;
}
+static void msc_win_switch(struct msc *msc)
+{
+ struct msc_window *first;
+
+ first = list_first_entry(&msc->win_list, struct msc_window, entry);
+
+ if (msc_is_last_win(msc->cur_win))
+ msc->cur_win = first;
+ else
+ msc->cur_win = list_next_entry(msc->cur_win, entry);
+
+ msc->base = msc_win_base(msc->cur_win);
+ msc->base_addr = msc_win_base_dma(msc->cur_win);
+
+ intel_th_trace_switch(msc->thdev);
+}
+
+/**
+ * intel_th_msc_window_unlock - put the window back in rotation
+ * @dev: MSC device to which this relates
+ * @sgt: buffer's sg_table for the window, does nothing if NULL
+ */
+void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt)
+{
+ struct msc *msc = dev_get_drvdata(dev);
+ struct msc_window *win;
+
+ if (!sgt)
+ return;
+
+ win = msc_find_window(msc, sgt, false);
+ if (!win)
+ return;
+
+ msc_win_set_lockout(win, WIN_LOCKED, WIN_READY);
+}
+EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock);
+
+static void msc_work(struct work_struct *work)
+{
+ struct msc *msc = container_of(work, struct msc, work);
+
+ intel_th_msc_deactivate(msc->thdev);
+}
+
+static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev)
+{
+ struct msc *msc = dev_get_drvdata(&thdev->dev);
+ u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
+ u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
+ struct msc_window *win, *next_win;
+
+ if (!msc->do_irq || !msc->mbuf)
+ return IRQ_NONE;
+
+ msusts &= mask;
+
+ if (!msusts)
+ return msc->enabled ? IRQ_HANDLED : IRQ_NONE;
+
+ iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
+
+ if (!msc->enabled)
+ return IRQ_NONE;
+
+ /* grab the window before we do the switch */
+ win = msc->cur_win;
+ if (!win)
+ return IRQ_HANDLED;
+ next_win = msc_next_window(win);
+ if (!next_win)
+ return IRQ_HANDLED;
+
+ /* next window: if READY, proceed, if LOCKED, stop the trace */
+ if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) {
+ schedule_work(&msc->work);
+ return IRQ_HANDLED;
+ }
+
+ /* current window: INUSE -> LOCKED */
+ msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
+
+ msc_win_switch(msc);
+
+ if (msc->mbuf && msc->mbuf->ready)
+ msc->mbuf->ready(msc->mbuf_priv, win->sgt,
+ msc_win_total_sz(win));
+
+ return IRQ_HANDLED;
+}
+
static const char * const msc_mode[] = {
[MSC_MODE_SINGLE] = "single",
[MSC_MODE_MULTI] = "multi",
@@ -1300,21 +1801,43 @@
static DEVICE_ATTR_RW(wrap);
+static void msc_buffer_unassign(struct msc *msc)
+{
+ lockdep_assert_held(&msc->buf_mutex);
+
+ if (!msc->mbuf)
+ return;
+
+ msc->mbuf->unassign(msc->mbuf_priv);
+ msu_buffer_put(msc->mbuf);
+ msc->mbuf_priv = NULL;
+ msc->mbuf = NULL;
+}
+
static ssize_t
mode_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct msc *msc = dev_get_drvdata(dev);
+ const char *mode = msc_mode[msc->mode];
+ ssize_t ret;
- return scnprintf(buf, PAGE_SIZE, "%s\n", msc_mode[msc->mode]);
+ mutex_lock(&msc->buf_mutex);
+ if (msc->mbuf)
+ mode = msc->mbuf->name;
+ ret = scnprintf(buf, PAGE_SIZE, "%s\n", mode);
+ mutex_unlock(&msc->buf_mutex);
+
+ return ret;
}
static ssize_t
mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
size_t size)
{
+ const struct msu_buffer *mbuf = NULL;
struct msc *msc = dev_get_drvdata(dev);
size_t len = size;
- char *cp;
+ char *cp, *mode;
int i, ret;
if (!capable(CAP_SYS_RAWIO))
@@ -1324,17 +1847,64 @@
if (cp)
len = cp - buf;
- for (i = 0; i < ARRAY_SIZE(msc_mode); i++)
- if (!strncmp(msc_mode[i], buf, len))
- goto found;
+ mode = kstrndup(buf, len, GFP_KERNEL);
+ if (!mode)
+ return -ENOMEM;
+
+ i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode);
+ if (i >= 0) {
+ kfree(mode);
+ goto found;
+ }
+
+ /* Buffer sinks only work with a usable IRQ */
+ if (!msc->do_irq) {
+ kfree(mode);
+ return -EINVAL;
+ }
+
+ mbuf = msu_buffer_get(mode);
+ kfree(mode);
+ if (mbuf)
+ goto found;
return -EINVAL;
found:
mutex_lock(&msc->buf_mutex);
+ ret = 0;
+
+ /* Same buffer: do nothing */
+ if (mbuf && mbuf == msc->mbuf) {
+ /* put the extra reference we just got */
+ msu_buffer_put(mbuf);
+ goto unlock;
+ }
+
ret = msc_buffer_unlocked_free_unless_used(msc);
- if (!ret)
- msc->mode = i;
+ if (ret)
+ goto unlock;
+
+ if (mbuf) {
+ void *mbuf_priv = mbuf->assign(dev, &i);
+
+ if (!mbuf_priv) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ msc_buffer_unassign(msc);
+ msc->mbuf_priv = mbuf_priv;
+ msc->mbuf = mbuf;
+ } else {
+ msc_buffer_unassign(msc);
+ }
+
+ msc->mode = i;
+
+unlock:
+ if (ret && mbuf)
+ msu_buffer_put(mbuf);
mutex_unlock(&msc->buf_mutex);
return ret ? ret : size;
@@ -1423,7 +1993,8 @@
if (!end)
break;
- len -= end - p;
+ /* consume the number and the following comma, hence +1 */
+ len -= end - p + 1;
p = end + 1;
} while (len);
@@ -1439,10 +2010,43 @@
static DEVICE_ATTR_RW(nr_pages);
+static ssize_t
+win_switch_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct msc *msc = dev_get_drvdata(dev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (val != 1)
+ return -EINVAL;
+
+ mutex_lock(&msc->buf_mutex);
+ /*
+ * Window switch can only happen in the "multi" mode.
+ * If a external buffer is engaged, they have the full
+ * control over window switching.
+ */
+ if (msc->mode != MSC_MODE_MULTI || msc->mbuf)
+ ret = -ENOTSUPP;
+ else
+ msc_win_switch(msc);
+ mutex_unlock(&msc->buf_mutex);
+
+ return ret ? ret : size;
+}
+
+static DEVICE_ATTR_WO(win_switch);
+
static struct attribute *msc_output_attrs[] = {
&dev_attr_wrap.attr,
&dev_attr_mode.attr,
&dev_attr_nr_pages.attr,
+ &dev_attr_win_switch.attr,
NULL,
};
@@ -1470,11 +2074,17 @@
if (!msc)
return -ENOMEM;
+ res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1);
+ if (!res)
+ msc->do_irq = 1;
+
msc->index = thdev->id;
msc->thdev = thdev;
msc->reg_base = base + msc->index * 0x100;
+ msc->msu_base = base;
+ INIT_WORK(&msc->work, msc_work);
err = intel_th_msc_init(msc);
if (err)
return err;
@@ -1503,6 +2113,8 @@
static struct intel_th_driver intel_th_msc_driver = {
.probe = intel_th_msc_probe,
.remove = intel_th_msc_remove,
+ .irq = intel_th_msc_interrupt,
+ .wait_empty = intel_th_msc_wait_empty,
.activate = intel_th_msc_activate,
.deactivate = intel_th_msc_deactivate,
.fops = &intel_th_msc_fops,
diff --git a/drivers/hwtracing/intel_th/msu.h b/drivers/hwtracing/intel_th/msu.h
index 9cc8ace..e771f50 100644
--- a/drivers/hwtracing/intel_th/msu.h
+++ b/drivers/hwtracing/intel_th/msu.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Intel(R) Trace Hub Memory Storage Unit (MSU) data structures
*
@@ -11,6 +11,7 @@
enum {
REG_MSU_MSUPARAMS = 0x0000,
REG_MSU_MSUSTS = 0x0008,
+ REG_MSU_MINTCTL = 0x0004, /* MSU-global interrupt control */
REG_MSU_MSC0CTL = 0x0100, /* MSC0 control */
REG_MSU_MSC0STS = 0x0104, /* MSC0 status */
REG_MSU_MSC0BAR = 0x0108, /* MSC0 output base address */
@@ -28,6 +29,8 @@
/* MSUSTS bits */
#define MSUSTS_MSU_INT BIT(0)
+#define MSUSTS_MSC0BLAST BIT(16)
+#define MSUSTS_MSC1BLAST BIT(24)
/* MSCnCTL bits */
#define MSC_EN BIT(0)
@@ -36,13 +39,10 @@
#define MSC_MODE (BIT(4) | BIT(5))
#define MSC_LEN (BIT(8) | BIT(9) | BIT(10))
-/* MSC operating modes (MSC_MODE) */
-enum {
- MSC_MODE_SINGLE = 0,
- MSC_MODE_MULTI,
- MSC_MODE_EXI,
- MSC_MODE_DEBUG,
-};
+/* MINTCTL bits */
+#define MICDE BIT(0)
+#define M0BLIE BIT(16)
+#define M1BLIE BIT(24)
/* MSCnSTS bits */
#define MSCSTS_WRAPSTAT BIT(1) /* Wrap occurred */
@@ -85,9 +85,19 @@
return bdesc->valid_dw * 4 - MSC_BDESC;
}
+static inline unsigned long msc_total_sz(struct msc_block_desc *bdesc)
+{
+ return bdesc->valid_dw * 4;
+}
+
+static inline unsigned long msc_block_sz(struct msc_block_desc *bdesc)
+{
+ return bdesc->block_sz * 64 - MSC_BDESC;
+}
+
static inline bool msc_block_wrapped(struct msc_block_desc *bdesc)
{
- if (bdesc->hw_tag & MSC_HW_TAG_BLOCKWRAP)
+ if (bdesc->hw_tag & (MSC_HW_TAG_BLOCKWRAP | MSC_HW_TAG_WINWRAP))
return true;
return false;
@@ -96,7 +106,7 @@
static inline bool msc_block_last_written(struct msc_block_desc *bdesc)
{
if ((bdesc->hw_tag & MSC_HW_TAG_ENDBIT) ||
- (msc_data_sz(bdesc) != DATA_IN_PAGE))
+ (msc_data_sz(bdesc) != msc_block_sz(bdesc)))
return true;
return false;
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 1cf6290..03ca5b1 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -17,7 +17,13 @@
#define DRIVER_NAME "intel_th_pci"
-#define BAR_MASK (BIT(TH_MMIO_CONFIG) | BIT(TH_MMIO_SW))
+enum {
+ TH_PCI_CONFIG_BAR = 0,
+ TH_PCI_STH_SW_BAR = 2,
+ TH_PCI_RTIT_BAR = 4,
+};
+
+#define BAR_MASK (BIT(TH_PCI_CONFIG_BAR) | BIT(TH_PCI_STH_SW_BAR))
#define PCI_REG_NPKDSC 0x80
#define NPKDSC_TSACT BIT(5)
@@ -66,8 +72,12 @@
const struct pci_device_id *id)
{
struct intel_th_drvdata *drvdata = (void *)id->driver_data;
+ struct resource resource[TH_MMIO_END + TH_NVEC_MAX] = {
+ [TH_MMIO_CONFIG] = pdev->resource[TH_PCI_CONFIG_BAR],
+ [TH_MMIO_SW] = pdev->resource[TH_PCI_STH_SW_BAR],
+ };
+ int err, r = TH_MMIO_SW + 1, i;
struct intel_th *th;
- int err;
err = pcim_enable_device(pdev);
if (err)
@@ -77,8 +87,19 @@
if (err)
return err;
- th = intel_th_alloc(&pdev->dev, drvdata, pdev->resource,
- DEVICE_COUNT_RESOURCE, pdev->irq);
+ if (pdev->resource[TH_PCI_RTIT_BAR].start) {
+ resource[TH_MMIO_RTIT] = pdev->resource[TH_PCI_RTIT_BAR];
+ r++;
+ }
+
+ err = pci_alloc_irq_vectors(pdev, 1, 8, PCI_IRQ_ALL_TYPES);
+ if (err > 0)
+ for (i = 0; i < err; i++, r++) {
+ resource[r].flags = IORESOURCE_IRQ;
+ resource[r].start = pci_irq_vector(pdev, i);
+ }
+
+ th = intel_th_alloc(&pdev->dev, drvdata, resource, r);
if (IS_ERR(th))
return PTR_ERR(th);
@@ -95,10 +116,13 @@
struct intel_th *th = pci_get_drvdata(pdev);
intel_th_free(th);
+
+ pci_free_irq_vectors(pdev);
}
static const struct intel_th_drvdata intel_th_2x = {
.tscu_enable = 1,
+ .has_mintctl = 1,
};
static const struct pci_device_id intel_th_pci_id_table[] = {
@@ -141,6 +165,11 @@
.driver_data = (kernel_ulong_t)0,
},
{
+ /* Lewisburg PCH */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa226),
+ .driver_data = (kernel_ulong_t)0,
+ },
+ {
/* Gemini Lake */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
.driver_data = (kernel_ulong_t)&intel_th_2x,
@@ -165,6 +194,31 @@
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
+ {
+ /* Comet Lake */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x02a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Comet Lake PCH */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x06a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Ice Lake NNPI */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Tiger Lake PCH */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Jasper Lake PCH */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
{ 0 },
};
diff --git a/drivers/hwtracing/intel_th/pti.c b/drivers/hwtracing/intel_th/pti.c
index 5669433..0da6b78 100644
--- a/drivers/hwtracing/intel_th/pti.c
+++ b/drivers/hwtracing/intel_th/pti.c
@@ -272,19 +272,17 @@
const char *buf, size_t size)
{
struct pti_device *pti = dev_get_drvdata(dev);
- ssize_t ret = -EINVAL;
int i;
- for (i = 0; i < ARRAY_SIZE(lpp_dest_str); i++)
- if (sysfs_streq(buf, lpp_dest_str[i]))
- break;
+ i = sysfs_match_string(lpp_dest_str, buf);
+ if (i < 0)
+ return i;
- if (i < ARRAY_SIZE(lpp_dest_str) && pti->lpp_dest_mask & BIT(i)) {
- pti->lpp_dest = i;
- ret = size;
- }
+ if (!(pti->lpp_dest_mask & BIT(i)))
+ return -EINVAL;
- return ret;
+ pti->lpp_dest = i;
+ return size;
}
static DEVICE_ATTR_RW(lpp_dest);
diff --git a/drivers/hwtracing/intel_th/pti.h b/drivers/hwtracing/intel_th/pti.h
index e9381ba..7dfc043 100644
--- a/drivers/hwtracing/intel_th/pti.h
+++ b/drivers/hwtracing/intel_th/pti.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Intel(R) Trace Hub PTI output data structures
*
diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c
index 4b7ae47..3a1f4e6 100644
--- a/drivers/hwtracing/intel_th/sth.c
+++ b/drivers/hwtracing/intel_th/sth.c
@@ -84,8 +84,12 @@
/* Global packets (GERR, XSYNC, TRIG) are sent with register writes */
case STP_PACKET_GERR:
reg += 4;
+ /* fall through */
+
case STP_PACKET_XSYNC:
reg += 8;
+ /* fall through */
+
case STP_PACKET_TRIG:
if (flags & STP_PACKET_TIMESTAMPED)
reg += 4;