Update Linux to v5.10.157
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.157.tar.xz
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
Change-Id: I7b30d9e98d8c465d6b44de8e7433b4a40b3289ba
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index 4801966..63c5444 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -1780,7 +1780,7 @@
{
unsigned long flags;
- if (!speakup_console[vc->vc_num] || spk_parked)
+ if (!speakup_console[vc->vc_num] || spk_parked || !synth)
return;
if (!spin_trylock_irqsave(&speakup_info.spinlock, flags))
/* Speakup output, discard */
diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c
index 6284aff..da3a245 100644
--- a/drivers/accessibility/speakup/spk_ttyio.c
+++ b/drivers/accessibility/speakup/spk_ttyio.c
@@ -88,7 +88,7 @@
}
if (!ldisc_data->buf_free)
- /* ttyio_in will tty_schedule_flip */
+ /* ttyio_in will tty_flip_buffer_push */
return 0;
/* Make sure the consumer has read buf before we have seen
@@ -334,7 +334,7 @@
mb();
ldisc_data->buf_free = true;
/* Let TTY push more characters */
- tty_schedule_flip(speakup_tty->port);
+ tty_flip_buffer_push(speakup_tty->port);
return rv;
}
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index 72f1fb7..e648158 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -12,6 +12,7 @@
#include <linux/ratelimit.h>
#include <linux/edac.h>
#include <linux/ras.h>
+#include <acpi/ghes.h>
#include <asm/cpu.h>
#include <asm/mce.h>
@@ -138,8 +139,8 @@
int cpu = mce->extcpu;
struct acpi_hest_generic_status *estatus, *tmp;
struct acpi_hest_generic_data *gdata;
- const guid_t *fru_id = &guid_null;
- char *fru_text = "";
+ const guid_t *fru_id;
+ char *fru_text;
guid_t *sec_type;
static u32 err_seq;
@@ -160,17 +161,23 @@
/* log event via trace */
err_seq++;
- gdata = (struct acpi_hest_generic_data *)(tmp + 1);
- if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
- fru_id = (guid_t *)gdata->fru_id;
- if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
- fru_text = gdata->fru_text;
- sec_type = (guid_t *)gdata->section_type;
- if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
- struct cper_sec_mem_err *mem = (void *)(gdata + 1);
- if (gdata->error_data_length >= sizeof(*mem))
- trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
- (u8)gdata->error_severity);
+ apei_estatus_for_each_section(tmp, gdata) {
+ if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
+ fru_id = (guid_t *)gdata->fru_id;
+ else
+ fru_id = &guid_null;
+ if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
+ fru_text = gdata->fru_text;
+ else
+ fru_text = "";
+ sec_type = (guid_t *)gdata->section_type;
+ if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
+ struct cper_sec_mem_err *mem = (void *)(gdata + 1);
+
+ if (gdata->error_data_length >= sizeof(*mem))
+ trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
+ (u8)gdata->error_severity);
+ }
}
out:
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index be73974..6ff8102 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -401,6 +401,9 @@
if (!lpss_clk_dev)
lpt_register_clock_device();
+ if (IS_ERR(lpss_clk_dev))
+ return PTR_ERR(lpss_clk_dev);
+
clk_data = platform_get_drvdata(lpss_clk_dev);
if (!clk_data)
return -ENODEV;
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index eb04b2f..cf6c9ff 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -498,6 +498,22 @@
DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE R830"),
},
},
+ {
+ .callback = video_disable_backlight_sysfs_if,
+ .ident = "Toshiba Satellite Z830",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE Z830"),
+ },
+ },
+ {
+ .callback = video_disable_backlight_sysfs_if,
+ .ident = "Toshiba Portege Z830",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE Z830"),
+ },
+ },
/*
* Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set
* but the IDs actually follow the Device ID Scheme.
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index b7f3e86..901fa5c 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -169,6 +169,9 @@
if (start_node == ACPI_ROOT_OBJECT) {
start_node = acpi_gbl_root_node;
+ if (!start_node) {
+ return_ACPI_STATUS(AE_NO_NAMESPACE);
+ }
}
/* Null child means "get first node" */
diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c
index 19e50fc..45973aa 100644
--- a/drivers/acpi/apei/bert.c
+++ b/drivers/acpi/apei/bert.c
@@ -30,14 +30,25 @@
#undef pr_fmt
#define pr_fmt(fmt) "BERT: " fmt
+#define ACPI_BERT_PRINT_MAX_RECORDS 5
+#define ACPI_BERT_PRINT_MAX_LEN 1024
+
static int bert_disable;
+/*
+ * Print "all" the error records in the BERT table, but avoid huge spam to
+ * the console if the BIOS included oversize records, or too many records.
+ * Skipping some records here does not lose anything because the full
+ * data is available to user tools in:
+ * /sys/firmware/acpi/tables/data/BERT
+ */
static void __init bert_print_all(struct acpi_bert_region *region,
unsigned int region_len)
{
struct acpi_hest_generic_status *estatus =
(struct acpi_hest_generic_status *)region;
int remain = region_len;
+ int printed = 0, skipped = 0;
u32 estatus_len;
while (remain >= sizeof(struct acpi_bert_region)) {
@@ -45,21 +56,26 @@
if (remain < estatus_len) {
pr_err(FW_BUG "Truncated status block (length: %u).\n",
estatus_len);
- return;
+ break;
}
/* No more error records. */
if (!estatus->block_status)
- return;
+ break;
if (cper_estatus_check(estatus)) {
pr_err(FW_BUG "Invalid error record.\n");
- return;
+ break;
}
- pr_info_once("Error records from previous boot:\n");
-
- cper_estatus_print(KERN_INFO HW_ERR, estatus);
+ if (estatus_len < ACPI_BERT_PRINT_MAX_LEN &&
+ printed < ACPI_BERT_PRINT_MAX_RECORDS) {
+ pr_info_once("Error records from previous boot:\n");
+ cper_estatus_print(KERN_INFO HW_ERR, estatus);
+ printed++;
+ } else {
+ skipped++;
+ }
/*
* Because the boot error source is "one-time polled" type,
@@ -71,13 +87,16 @@
estatus = (void *)estatus + estatus_len;
remain -= estatus_len;
}
+
+ if (skipped)
+ pr_info(HW_ERR "Skipped %d error records\n", skipped);
}
static int __init setup_bert_disable(char *str)
{
bert_disable = 1;
- return 0;
+ return 1;
}
__setup("bert_disable", setup_bert_disable);
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 1331567..c281d5b 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -544,6 +544,8 @@
((region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
!= REGION_INTERSECTS) &&
(region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY)
+ != REGION_INTERSECTS) &&
+ (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_SOFT_RESERVED)
!= REGION_INTERSECTS)))
return -EINVAL;
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 2e0b0fc..83efb52 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -891,7 +891,7 @@
static int __init setup_erst_disable(char *str)
{
erst_disable = 1;
- return 0;
+ return 1;
}
__setup("erst_disable", setup_erst_disable);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 0c8330e..9bdb5bd 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -163,7 +163,7 @@
clear_fixmap(fixmap_idx);
}
-int ghes_estatus_pool_init(int num_ghes)
+int ghes_estatus_pool_init(unsigned int num_ghes)
{
unsigned long addr, len;
int rc;
@@ -985,7 +985,7 @@
ghes_estatus_cache_add(generic, estatus);
}
- if (task_work_pending && current->mm != &init_mm) {
+ if (task_work_pending && current->mm) {
estatus_node->task_work.func = ghes_kick_task_work;
estatus_node->task_work_cpu = smp_processor_id();
ret = task_work_add(current, &estatus_node->task_work,
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 6e980fe..7bf48c2 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -219,7 +219,7 @@
static int __init setup_hest_disable(char *str)
{
hest_disable = HEST_DISABLED;
- return 0;
+ return 1;
}
__setup("hest_disable", setup_hest_disable);
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 0a2da06..b62348a 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -607,33 +607,6 @@
return 0;
}
-/* Check if CPPC revision + num_ent combination is supported */
-static bool is_cppc_supported(int revision, int num_ent)
-{
- int expected_num_ent;
-
- switch (revision) {
- case CPPC_V2_REV:
- expected_num_ent = CPPC_V2_NUM_ENT;
- break;
- case CPPC_V3_REV:
- expected_num_ent = CPPC_V3_NUM_ENT;
- break;
- default:
- pr_debug("Firmware exports unsupported CPPC revision: %d\n",
- revision);
- return false;
- }
-
- if (expected_num_ent != num_ent) {
- pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n",
- num_ent, expected_num_ent, revision);
- return false;
- }
-
- return true;
-}
-
/*
* An example CPC table looks like the following.
*
@@ -719,12 +692,16 @@
cpc_obj = &out_obj->package.elements[0];
if (cpc_obj->type == ACPI_TYPE_INTEGER) {
num_ent = cpc_obj->integer.value;
+ if (num_ent <= 1) {
+ pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
+ num_ent, pr->id);
+ goto out_free;
+ }
} else {
pr_debug("Unexpected entry type(%d) for NumEntries\n",
cpc_obj->type);
goto out_free;
}
- cpc_ptr->num_entries = num_ent;
/* Second entry should be revision. */
cpc_obj = &out_obj->package.elements[1];
@@ -735,10 +712,32 @@
cpc_obj->type);
goto out_free;
}
- cpc_ptr->version = cpc_rev;
- if (!is_cppc_supported(cpc_rev, num_ent))
+ if (cpc_rev < CPPC_V2_REV) {
+ pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev,
+ pr->id);
goto out_free;
+ }
+
+ /*
+ * Disregard _CPC if the number of entries in the return pachage is not
+ * as expected, but support future revisions being proper supersets of
+ * the v3 and only causing more entries to be returned by _CPC.
+ */
+ if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) ||
+ (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) ||
+ (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) {
+ pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n",
+ num_ent, pr->id);
+ goto out_free;
+ }
+ if (cpc_rev > CPPC_V3_REV) {
+ num_ent = CPPC_V3_NUM_ENT;
+ cpc_rev = CPPC_V3_REV;
+ }
+
+ cpc_ptr->num_entries = num_ent;
+ cpc_ptr->version = cpc_rev;
/* Iterate through remaining entries in _CPC */
for (i = 2; i < num_ent; i++) {
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 3f2e5ea..4707d18 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -183,7 +183,6 @@
static struct workqueue_struct *ec_query_wq;
static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
-static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
static int EC_FLAGS_TRUST_DSDT_GPE; /* Needs DSDT GPE as correction setting */
static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
@@ -1405,24 +1404,16 @@
if (ec->data_addr == 0 || ec->command_addr == 0)
return AE_OK;
- if (boot_ec && boot_ec_is_ecdt && EC_FLAGS_IGNORE_DSDT_GPE) {
- /*
- * Always inherit the GPE number setting from the ECDT
- * EC.
- */
- ec->gpe = boot_ec->gpe;
- } else {
- /* Get GPE bit assignment (EC events). */
- /* TODO: Add support for _GPE returning a package */
- status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
- if (ACPI_SUCCESS(status))
- ec->gpe = tmp;
+ /* Get GPE bit assignment (EC events). */
+ /* TODO: Add support for _GPE returning a package */
+ status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
+ if (ACPI_SUCCESS(status))
+ ec->gpe = tmp;
+ /*
+ * Errors are non-fatal, allowing for ACPI Reduced Hardware
+ * platforms which use GpioInt instead of GPE.
+ */
- /*
- * Errors are non-fatal, allowing for ACPI Reduced Hardware
- * platforms which use GpioInt instead of GPE.
- */
- }
/* Use the global lock for all EC transactions? */
tmp = 0;
acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
@@ -1860,60 +1851,12 @@
return 0;
}
-/*
- * Some DSDTs contain wrong GPE setting.
- * Asus FX502VD/VE, GL702VMK, X550VXK, X580VD
- * https://bugzilla.kernel.org/show_bug.cgi?id=195651
- */
-static int ec_honor_ecdt_gpe(const struct dmi_system_id *id)
-{
- pr_debug("Detected system needing ignore DSDT GPE setting.\n");
- EC_FLAGS_IGNORE_DSDT_GPE = 1;
- return 0;
-}
-
static const struct dmi_system_id ec_dmi_table[] __initconst = {
{
ec_correct_ecdt, "MSI MS-171F", {
DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
{
- ec_honor_ecdt_gpe, "ASUS FX502VD", {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "FX502VD"),}, NULL},
- {
- ec_honor_ecdt_gpe, "ASUS FX502VE", {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "FX502VE"),}, NULL},
- {
- ec_honor_ecdt_gpe, "ASUS GL702VMK", {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL},
- {
- ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BA", {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X505BA"),}, NULL},
- {
- ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BP", {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X505BP"),}, NULL},
- {
- ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BA", {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X542BA"),}, NULL},
- {
- ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BP", {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X542BP"),}, NULL},
- {
- ec_honor_ecdt_gpe, "ASUS X550VXK", {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL},
- {
- ec_honor_ecdt_gpe, "ASUS X580VD", {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL},
- {
/* https://bugzilla.kernel.org/show_bug.cgi?id=209989 */
ec_honor_dsdt_gpe, "HP Pavilion Gaming Laptop 15-cx0xxx", {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
@@ -2181,13 +2124,6 @@
},
},
{
- .ident = "ThinkPad X1 Carbon 6th",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Carbon 6th"),
- },
- },
- {
.ident = "ThinkPad X1 Yoga 3rd",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index 95f23ac..2709ef2 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -41,6 +41,8 @@
static struct mcfg_fixup mcfg_quirks[] = {
/* { OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */
+#ifdef CONFIG_ARM64
+
#define AL_ECAM(table_id, rev, seg, ops) \
{ "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops }
@@ -162,6 +164,7 @@
ALTRA_ECAM_QUIRK(1, 13),
ALTRA_ECAM_QUIRK(1, 14),
ALTRA_ECAM_QUIRK(1, 15),
+#endif /* ARM64 */
};
static char mcfg_oem_id[ACPI_OEM_ID_SIZE];
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 8377c3e..e5dd87d 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -609,7 +609,7 @@
* @cx: Target state context
* @index: index of target state
*/
-static int acpi_idle_enter_bm(struct cpuidle_driver *drv,
+static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv,
struct acpi_processor *pr,
struct acpi_processor_cx *cx,
int index)
@@ -666,7 +666,7 @@
return index;
}
-static int acpi_idle_enter(struct cpuidle_device *dev,
+static int __cpuidle acpi_idle_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
@@ -695,7 +695,7 @@
return index;
}
-static int acpi_idle_enter_s2idle(struct cpuidle_device *dev,
+static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
@@ -1080,6 +1080,11 @@
return 0;
}
+int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
+{
+ return -EOPNOTSUPP;
+}
+
static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
{
int ret, i;
@@ -1088,6 +1093,11 @@
struct acpi_device *d = NULL;
struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
+ /* make sure our architecture has support */
+ ret = acpi_processor_ffh_lpi_probe(pr->id);
+ if (ret == -EOPNOTSUPP)
+ return ret;
+
if (!osc_pc_lpi_support_confirmed)
return -EOPNOTSUPP;
@@ -1139,11 +1149,6 @@
return 0;
}
-int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
-{
- return -ENODEV;
-}
-
int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
{
return -ENODEV;
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 6c7d05b..7df0c6e 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -148,7 +148,7 @@
unsigned int cpu;
for_each_cpu(cpu, policy->related_cpus) {
- struct acpi_processor *pr = per_cpu(processors, policy->cpu);
+ struct acpi_processor *pr = per_cpu(processors, cpu);
if (pr)
freq_qos_remove_request(&pr->thermal_req);
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 18bd428..80e92c2 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -155,10 +155,10 @@
return acpi_nondev_subnode_data_ok(handle, link, list, parent);
}
-static int acpi_add_nondev_subnodes(acpi_handle scope,
- const union acpi_object *links,
- struct list_head *list,
- struct fwnode_handle *parent)
+static bool acpi_add_nondev_subnodes(acpi_handle scope,
+ const union acpi_object *links,
+ struct list_head *list,
+ struct fwnode_handle *parent)
{
bool ret = false;
int i;
@@ -433,6 +433,16 @@
acpi_extract_apple_properties(adev);
}
+static void acpi_free_device_properties(struct list_head *list)
+{
+ struct acpi_device_properties *props, *tmp;
+
+ list_for_each_entry_safe(props, tmp, list, list) {
+ list_del(&props->list);
+ kfree(props);
+ }
+}
+
static void acpi_destroy_nondev_subnodes(struct list_head *list)
{
struct acpi_data_node *dn, *next;
@@ -445,22 +455,18 @@
wait_for_completion(&dn->kobj_done);
list_del(&dn->sibling);
ACPI_FREE((void *)dn->data.pointer);
+ acpi_free_device_properties(&dn->data.properties);
kfree(dn);
}
}
void acpi_free_properties(struct acpi_device *adev)
{
- struct acpi_device_properties *props, *tmp;
-
acpi_destroy_nondev_subnodes(&adev->data.subnodes);
ACPI_FREE((void *)adev->data.pointer);
adev->data.of_compatible = NULL;
adev->data.pointer = NULL;
- list_for_each_entry_safe(props, tmp, &adev->data.properties, list) {
- list_del(&props->list);
- kfree(props);
- }
+ acpi_free_device_properties(&adev->data.properties);
}
/**
@@ -685,7 +691,7 @@
*/
if (obj->type == ACPI_TYPE_LOCAL_REFERENCE) {
if (index)
- return -EINVAL;
+ return -ENOENT;
ret = acpi_bus_get_device(obj->reference.handle, &device);
if (ret)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 503935b..097a5b5 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -364,6 +364,14 @@
DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
},
},
+ {
+ .callback = init_nvs_save_s3,
+ .ident = "Lenovo G40-45",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "80E1"),
+ },
+ },
/*
* ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
* the Low Power S0 Idle firmware interface (see
@@ -377,6 +385,18 @@
DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
},
},
+ /*
+ * ASUS B1400CEAE hangs on resume from suspend (see
+ * https://bugzilla.kernel.org/show_bug.cgi?id=215742).
+ */
+ {
+ .callback = init_default_s3,
+ .ident = "ASUS B1400CEAE",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK B1400CEAE"),
+ },
+ },
{},
};
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index a5cc4f3..1d94c46 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -439,18 +439,29 @@
{
struct acpi_data_attr *data_attr;
void __iomem *base;
- ssize_t rc;
+ ssize_t size;
data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
+ size = data_attr->attr.size;
- base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size);
+ if (offset < 0)
+ return -EINVAL;
+
+ if (offset >= size)
+ return 0;
+
+ if (count > size - offset)
+ count = size - offset;
+
+ base = acpi_os_map_iomem(data_attr->addr, size);
if (!base)
return -ENOMEM;
- rc = memory_read_from_buffer(buf, count, &offset, base,
- data_attr->attr.size);
- acpi_os_unmap_memory(base, data_attr->attr.size);
- return rc;
+ memcpy_fromio(buf, base + offset, count);
+
+ acpi_os_unmap_iomem(base, size);
+
+ return count;
}
static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 7b9793c..b137131 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -424,23 +424,6 @@
.callback = video_detect_force_native,
.ident = "Clevo NL5xRU",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
- DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
- },
- },
- {
- .callback = video_detect_force_native,
- .ident = "Clevo NL5xRU",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
- DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
- },
- },
- {
- .callback = video_detect_force_native,
- .ident = "Clevo NL5xRU",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
},
},
@@ -464,27 +447,123 @@
.callback = video_detect_force_native,
.ident = "Clevo NL5xNU",
.matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ },
+ },
+ /*
+ * The TongFang PF5PU1G, PF4NU1F, PF5NU1G, and PF5LUXG/TUXEDO BA15 Gen10,
+ * Pulse 14/15 Gen1, and Pulse 15 Gen2 have the same problem as the Clevo
+ * NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description
+ * above.
+ */
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang PF5PU1G",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PF5PU1G"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang PF4NU1F",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PF4NU1F"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang PF4NU1F",
+ .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
- DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ DMI_MATCH(DMI_BOARD_NAME, "PULSE1401"),
},
},
{
.callback = video_detect_force_native,
- .ident = "Clevo NL5xNU",
+ .ident = "TongFang PF5NU1G",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
- DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ DMI_MATCH(DMI_BOARD_NAME, "PF5NU1G"),
},
},
{
.callback = video_detect_force_native,
- .ident = "Clevo NL5xNU",
+ .ident = "TongFang PF5NU1G",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
- DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "PULSE1501"),
},
},
-
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang PF5LUXG",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PF5LUXG"),
+ },
+ },
+ /*
+ * More Tongfang devices with the same issue as the Clevo NL5xRU and
+ * NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description above.
+ */
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang GKxNRxx",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GKxNRxx"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang GKxNRxx",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "POLARIS1501A1650TI"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang GKxNRxx",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "POLARIS1501A2060"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang GKxNRxx",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "POLARIS1701A1650TI"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang GKxNRxx",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "POLARIS1701A2060"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang GMxNGxx",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxNGxx"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang GMxZGxx",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxZGxx"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "TongFang GMxRGxx",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
+ },
+ },
/*
* Desktops which falsely report a backlight and which our heuristics
* for this do not catch.
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 8f4ae6e..47c7244 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -299,11 +299,10 @@
{
struct amba_device *pcdev = to_amba_device(dev);
struct amba_driver *drv = to_amba_driver(dev->driver);
- int ret = 0;
pm_runtime_get_sync(dev);
if (drv->remove)
- ret = drv->remove(pcdev);
+ drv->remove(pcdev);
pm_runtime_put_noidle(dev);
/* Undo the runtime PM settings in amba_probe() */
@@ -314,7 +313,7 @@
amba_put_disable_pclk(pcdev);
dev_pm_domain_detach(dev, true);
- return ret;
+ return 0;
}
static void amba_shutdown(struct device *dev)
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 366b124..4473ade 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1744,6 +1744,18 @@
}
ret = binder_inc_ref_olocked(ref, strong, target_list);
*rdata = ref->data;
+ if (ret && ref == new_ref) {
+ /*
+ * Cleanup the failed reference here as the target
+ * could now be dead and have already released its
+ * references by now. Calling on the new reference
+ * with strong=0 and a tmp_refs will not decrement
+ * the node. The new_ref gets kfree'd below.
+ */
+ binder_cleanup_ref_olocked(new_ref);
+ ref = NULL;
+ }
+
binder_proc_unlock(proc);
if (new_ref && ref != new_ref)
/*
@@ -1996,15 +2008,21 @@
/**
* binder_get_object() - gets object and checks for valid metadata
* @proc: binder_proc owning the buffer
+ * @u: sender's user pointer to base of buffer
* @buffer: binder_buffer that we're parsing.
* @offset: offset in the @buffer at which to validate an object.
* @object: struct binder_object to read into
*
- * Return: If there's a valid metadata object at @offset in @buffer, the
+ * Copy the binder object at the given offset into @object. If @u is
+ * provided then the copy is from the sender's buffer. If not, then
+ * it is copied from the target's @buffer.
+ *
+ * Return: If there's a valid metadata object at @offset, the
* size of that object. Otherwise, it returns zero. The object
* is read into the struct binder_object pointed to by @object.
*/
static size_t binder_get_object(struct binder_proc *proc,
+ const void __user *u,
struct binder_buffer *buffer,
unsigned long offset,
struct binder_object *object)
@@ -2014,10 +2032,16 @@
size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
- if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
- binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
- offset, read_size))
+ if (offset > buffer->data_size || read_size < sizeof(*hdr))
return 0;
+ if (u) {
+ if (copy_from_user(object, u + offset, read_size))
+ return 0;
+ } else {
+ if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
+ offset, read_size))
+ return 0;
+ }
/* Ok, now see if we read a complete object. */
hdr = &object->hdr;
@@ -2090,7 +2114,7 @@
b, buffer_offset,
sizeof(object_offset)))
return NULL;
- object_size = binder_get_object(proc, b, object_offset, object);
+ object_size = binder_get_object(proc, NULL, b, object_offset, object);
if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
return NULL;
if (object_offsetp)
@@ -2155,7 +2179,8 @@
unsigned long buffer_offset;
struct binder_object last_object;
struct binder_buffer_object *last_bbo;
- size_t object_size = binder_get_object(proc, b, last_obj_offset,
+ size_t object_size = binder_get_object(proc, NULL, b,
+ last_obj_offset,
&last_object);
if (object_size != sizeof(*last_bbo))
return false;
@@ -2270,7 +2295,7 @@
if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
buffer, buffer_offset,
sizeof(object_offset)))
- object_size = binder_get_object(proc, buffer,
+ object_size = binder_get_object(proc, NULL, buffer,
object_offset, &object);
if (object_size == 0) {
pr_err("transaction release %d bad object at offset %lld, size %zd\n",
@@ -2608,16 +2633,266 @@
return ret;
}
-static int binder_translate_fd_array(struct binder_fd_array_object *fda,
+/**
+ * struct binder_ptr_fixup - data to be fixed-up in target buffer
+ * @offset offset in target buffer to fixup
+ * @skip_size bytes to skip in copy (fixup will be written later)
+ * @fixup_data data to write at fixup offset
+ * @node list node
+ *
+ * This is used for the pointer fixup list (pf) which is created and consumed
+ * during binder_transaction() and is only accessed locally. No
+ * locking is necessary.
+ *
+ * The list is ordered by @offset.
+ */
+struct binder_ptr_fixup {
+ binder_size_t offset;
+ size_t skip_size;
+ binder_uintptr_t fixup_data;
+ struct list_head node;
+};
+
+/**
+ * struct binder_sg_copy - scatter-gather data to be copied
+ * @offset offset in target buffer
+ * @sender_uaddr user address in source buffer
+ * @length bytes to copy
+ * @node list node
+ *
+ * This is used for the sg copy list (sgc) which is created and consumed
+ * during binder_transaction() and is only accessed locally. No
+ * locking is necessary.
+ *
+ * The list is ordered by @offset.
+ */
+struct binder_sg_copy {
+ binder_size_t offset;
+ const void __user *sender_uaddr;
+ size_t length;
+ struct list_head node;
+};
+
+/**
+ * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
+ * @alloc: binder_alloc associated with @buffer
+ * @buffer: binder buffer in target process
+ * @sgc_head: list_head of scatter-gather copy list
+ * @pf_head: list_head of pointer fixup list
+ *
+ * Processes all elements of @sgc_head, applying fixups from @pf_head
+ * and copying the scatter-gather data from the source process' user
+ * buffer to the target's buffer. It is expected that the list creation
+ * and processing all occurs during binder_transaction() so these lists
+ * are only accessed in local context.
+ *
+ * Return: 0=success, else -errno
+ */
+static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ struct list_head *sgc_head,
+ struct list_head *pf_head)
+{
+ int ret = 0;
+ struct binder_sg_copy *sgc, *tmpsgc;
+ struct binder_ptr_fixup *tmppf;
+ struct binder_ptr_fixup *pf =
+ list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
+ node);
+
+ list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
+ size_t bytes_copied = 0;
+
+ while (bytes_copied < sgc->length) {
+ size_t copy_size;
+ size_t bytes_left = sgc->length - bytes_copied;
+ size_t offset = sgc->offset + bytes_copied;
+
+ /*
+ * We copy up to the fixup (pointed to by pf)
+ */
+ copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
+ : bytes_left;
+ if (!ret && copy_size)
+ ret = binder_alloc_copy_user_to_buffer(
+ alloc, buffer,
+ offset,
+ sgc->sender_uaddr + bytes_copied,
+ copy_size);
+ bytes_copied += copy_size;
+ if (copy_size != bytes_left) {
+ BUG_ON(!pf);
+ /* we stopped at a fixup offset */
+ if (pf->skip_size) {
+ /*
+ * we are just skipping. This is for
+ * BINDER_TYPE_FDA where the translated
+ * fds will be fixed up when we get
+ * to target context.
+ */
+ bytes_copied += pf->skip_size;
+ } else {
+ /* apply the fixup indicated by pf */
+ if (!ret)
+ ret = binder_alloc_copy_to_buffer(
+ alloc, buffer,
+ pf->offset,
+ &pf->fixup_data,
+ sizeof(pf->fixup_data));
+ bytes_copied += sizeof(pf->fixup_data);
+ }
+ list_del(&pf->node);
+ kfree(pf);
+ pf = list_first_entry_or_null(pf_head,
+ struct binder_ptr_fixup, node);
+ }
+ }
+ list_del(&sgc->node);
+ kfree(sgc);
+ }
+ list_for_each_entry_safe(pf, tmppf, pf_head, node) {
+ BUG_ON(pf->skip_size == 0);
+ list_del(&pf->node);
+ kfree(pf);
+ }
+ BUG_ON(!list_empty(sgc_head));
+
+ return ret > 0 ? -EINVAL : ret;
+}
+
+/**
+ * binder_cleanup_deferred_txn_lists() - free specified lists
+ * @sgc_head: list_head of scatter-gather copy list
+ * @pf_head: list_head of pointer fixup list
+ *
+ * Called to clean up @sgc_head and @pf_head if there is an
+ * error.
+ */
+static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
+ struct list_head *pf_head)
+{
+ struct binder_sg_copy *sgc, *tmpsgc;
+ struct binder_ptr_fixup *pf, *tmppf;
+
+ list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
+ list_del(&sgc->node);
+ kfree(sgc);
+ }
+ list_for_each_entry_safe(pf, tmppf, pf_head, node) {
+ list_del(&pf->node);
+ kfree(pf);
+ }
+}
+
+/**
+ * binder_defer_copy() - queue a scatter-gather buffer for copy
+ * @sgc_head: list_head of scatter-gather copy list
+ * @offset: binder buffer offset in target process
+ * @sender_uaddr: user address in source process
+ * @length: bytes to copy
+ *
+ * Specify a scatter-gather block to be copied. The actual copy must
+ * be deferred until all the needed fixups are identified and queued.
+ * Then the copy and fixups are done together so un-translated values
+ * from the source are never visible in the target buffer.
+ *
+ * We are guaranteed that repeated calls to this function will have
+ * monotonically increasing @offset values so the list will naturally
+ * be ordered.
+ *
+ * Return: 0=success, else -errno
+ */
+static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
+ const void __user *sender_uaddr, size_t length)
+{
+ struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
+
+ if (!bc)
+ return -ENOMEM;
+
+ bc->offset = offset;
+ bc->sender_uaddr = sender_uaddr;
+ bc->length = length;
+ INIT_LIST_HEAD(&bc->node);
+
+ /*
+ * We are guaranteed that the deferred copies are in-order
+ * so just add to the tail.
+ */
+ list_add_tail(&bc->node, sgc_head);
+
+ return 0;
+}
+
+/**
+ * binder_add_fixup() - queue a fixup to be applied to sg copy
+ * @pf_head: list_head of binder ptr fixup list
+ * @offset: binder buffer offset in target process
+ * @fixup: bytes to be copied for fixup
+ * @skip_size: bytes to skip when copying (fixup will be applied later)
+ *
+ * Add the specified fixup to a list ordered by @offset. When copying
+ * the scatter-gather buffers, the fixup will be copied instead of
+ * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
+ * will be applied later (in target process context), so we just skip
+ * the bytes specified by @skip_size. If @skip_size is 0, we copy the
+ * value in @fixup.
+ *
+ * This function is called *mostly* in @offset order, but there are
+ * exceptions. Since out-of-order inserts are relatively uncommon,
+ * we insert the new element by searching backward from the tail of
+ * the list.
+ *
+ * Return: 0=success, else -errno
+ */
+static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
+ binder_uintptr_t fixup, size_t skip_size)
+{
+ struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
+ struct binder_ptr_fixup *tmppf;
+
+ if (!pf)
+ return -ENOMEM;
+
+ pf->offset = offset;
+ pf->fixup_data = fixup;
+ pf->skip_size = skip_size;
+ INIT_LIST_HEAD(&pf->node);
+
+ /* Fixups are *mostly* added in-order, but there are some
+ * exceptions. Look backwards through list for insertion point.
+ */
+ list_for_each_entry_reverse(tmppf, pf_head, node) {
+ if (tmppf->offset < pf->offset) {
+ list_add(&pf->node, &tmppf->node);
+ return 0;
+ }
+ }
+ /*
+ * if we get here, then the new offset is the lowest so
+ * insert at the head
+ */
+ list_add(&pf->node, pf_head);
+ return 0;
+}
+
+static int binder_translate_fd_array(struct list_head *pf_head,
+ struct binder_fd_array_object *fda,
+ const void __user *sender_ubuffer,
struct binder_buffer_object *parent,
+ struct binder_buffer_object *sender_uparent,
struct binder_transaction *t,
struct binder_thread *thread,
struct binder_transaction *in_reply_to)
{
binder_size_t fdi, fd_buf_size;
binder_size_t fda_offset;
+ const void __user *sender_ufda_base;
struct binder_proc *proc = thread->proc;
- struct binder_proc *target_proc = t->to_proc;
+ int ret;
+
+ if (fda->num_fds == 0)
+ return 0;
fd_buf_size = sizeof(u32) * fda->num_fds;
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
@@ -2641,19 +2916,25 @@
*/
fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
fda->parent_offset;
- if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
+ sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
+ fda->parent_offset;
+
+ if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
+ !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
proc->pid, thread->pid);
return -EINVAL;
}
+ ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
+ if (ret)
+ return ret;
+
for (fdi = 0; fdi < fda->num_fds; fdi++) {
u32 fd;
- int ret;
binder_size_t offset = fda_offset + fdi * sizeof(fd);
+ binder_size_t sender_uoffset = fdi * sizeof(fd);
- ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
- &fd, t->buffer,
- offset, sizeof(fd));
+ ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
if (!ret)
ret = binder_translate_fd(fd, offset, t, thread,
in_reply_to);
@@ -2663,7 +2944,8 @@
return 0;
}
-static int binder_fixup_parent(struct binder_transaction *t,
+static int binder_fixup_parent(struct list_head *pf_head,
+ struct binder_transaction *t,
struct binder_thread *thread,
struct binder_buffer_object *bp,
binder_size_t off_start_offset,
@@ -2709,14 +2991,7 @@
}
buffer_offset = bp->parent_offset +
(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
- if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
- &bp->buffer, sizeof(bp->buffer))) {
- binder_user_error("%d:%d got transaction with invalid parent offset\n",
- proc->pid, thread->pid);
- return -EINVAL;
- }
-
- return 0;
+ return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
}
/**
@@ -2836,6 +3111,7 @@
binder_size_t off_start_offset, off_end_offset;
binder_size_t off_min;
binder_size_t sg_buf_offset, sg_buf_end_offset;
+ binder_size_t user_offset = 0;
struct binder_proc *target_proc = NULL;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
@@ -2850,6 +3126,12 @@
int t_debug_id = atomic_inc_return(&binder_last_id);
char *secctx = NULL;
u32 secctx_sz = 0;
+ struct list_head sgc_head;
+ struct list_head pf_head;
+ const void __user *user_buffer = (const void __user *)
+ (uintptr_t)tr->data.ptr.buffer;
+ INIT_LIST_HEAD(&sgc_head);
+ INIT_LIST_HEAD(&pf_head);
e = binder_transaction_log_add(&binder_transaction_log);
e->debug_id = t_debug_id;
@@ -3163,19 +3445,6 @@
if (binder_alloc_copy_user_to_buffer(
&target_proc->alloc,
- t->buffer, 0,
- (const void __user *)
- (uintptr_t)tr->data.ptr.buffer,
- tr->data_size)) {
- binder_user_error("%d:%d got transaction with invalid data ptr\n",
- proc->pid, thread->pid);
- return_error = BR_FAILED_REPLY;
- return_error_param = -EFAULT;
- return_error_line = __LINE__;
- goto err_copy_data_failed;
- }
- if (binder_alloc_copy_user_to_buffer(
- &target_proc->alloc,
t->buffer,
ALIGN(tr->data_size, sizeof(void *)),
(const void __user *)
@@ -3218,6 +3487,7 @@
size_t object_size;
struct binder_object object;
binder_size_t object_offset;
+ binder_size_t copy_size;
if (binder_alloc_copy_from_buffer(&target_proc->alloc,
&object_offset,
@@ -3229,8 +3499,27 @@
return_error_line = __LINE__;
goto err_bad_offset;
}
- object_size = binder_get_object(target_proc, t->buffer,
- object_offset, &object);
+
+ /*
+ * Copy the source user buffer up to the next object
+ * that will be processed.
+ */
+ copy_size = object_offset - user_offset;
+ if (copy_size && (user_offset > object_offset ||
+ binder_alloc_copy_user_to_buffer(
+ &target_proc->alloc,
+ t->buffer, user_offset,
+ user_buffer + user_offset,
+ copy_size))) {
+ binder_user_error("%d:%d got transaction with invalid data ptr\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
+ goto err_copy_data_failed;
+ }
+ object_size = binder_get_object(target_proc, user_buffer,
+ t->buffer, object_offset, &object);
if (object_size == 0 || object_offset < off_min) {
binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
proc->pid, thread->pid,
@@ -3242,6 +3531,11 @@
return_error_line = __LINE__;
goto err_bad_offset;
}
+ /*
+ * Set offset to the next buffer fragment to be
+ * copied
+ */
+ user_offset = object_offset + object_size;
hdr = &object.hdr;
off_min = object_offset + object_size;
@@ -3304,6 +3598,8 @@
case BINDER_TYPE_FDA: {
struct binder_object ptr_object;
binder_size_t parent_offset;
+ struct binder_object user_object;
+ size_t user_parent_size;
struct binder_fd_array_object *fda =
to_binder_fd_array_object(hdr);
size_t num_valid = (buffer_offset - off_start_offset) /
@@ -3335,11 +3631,35 @@
return_error_line = __LINE__;
goto err_bad_parent;
}
- ret = binder_translate_fd_array(fda, parent, t, thread,
- in_reply_to);
- if (ret < 0) {
+ /*
+ * We need to read the user version of the parent
+ * object to get the original user offset
+ */
+ user_parent_size =
+ binder_get_object(proc, user_buffer, t->buffer,
+ parent_offset, &user_object);
+ if (user_parent_size != sizeof(user_object.bbo)) {
+ binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
+ proc->pid, thread->pid,
+ user_parent_size,
+ sizeof(user_object.bbo));
return_error = BR_FAILED_REPLY;
- return_error_param = ret;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
+ goto err_bad_parent;
+ }
+ ret = binder_translate_fd_array(&pf_head, fda,
+ user_buffer, parent,
+ &user_object.bbo, t,
+ thread, in_reply_to);
+ if (!ret)
+ ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer,
+ object_offset,
+ fda, sizeof(*fda));
+ if (ret) {
+ return_error = BR_FAILED_REPLY;
+ return_error_param = ret > 0 ? -EINVAL : ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
@@ -3361,19 +3681,14 @@
return_error_line = __LINE__;
goto err_bad_offset;
}
- if (binder_alloc_copy_user_to_buffer(
- &target_proc->alloc,
- t->buffer,
- sg_buf_offset,
- (const void __user *)
- (uintptr_t)bp->buffer,
- bp->length)) {
- binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
- proc->pid, thread->pid);
- return_error_param = -EFAULT;
+ ret = binder_defer_copy(&sgc_head, sg_buf_offset,
+ (const void __user *)(uintptr_t)bp->buffer,
+ bp->length);
+ if (ret) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
return_error_line = __LINE__;
- goto err_copy_data_failed;
+ goto err_translate_failed;
}
/* Fixup buffer pointer to target proc address space */
bp->buffer = (uintptr_t)
@@ -3382,7 +3697,8 @@
num_valid = (buffer_offset - off_start_offset) /
sizeof(binder_size_t);
- ret = binder_fixup_parent(t, thread, bp,
+ ret = binder_fixup_parent(&pf_head, t,
+ thread, bp,
off_start_offset,
num_valid,
last_fixup_obj_off,
@@ -3409,6 +3725,30 @@
goto err_bad_object_type;
}
}
+ /* Done processing objects, copy the rest of the buffer */
+ if (binder_alloc_copy_user_to_buffer(
+ &target_proc->alloc,
+ t->buffer, user_offset,
+ user_buffer + user_offset,
+ tr->data_size - user_offset)) {
+ binder_user_error("%d:%d got transaction with invalid data ptr\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
+ goto err_copy_data_failed;
+ }
+
+ ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
+ &sgc_head, &pf_head);
+ if (ret) {
+ binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
+ goto err_copy_data_failed;
+ }
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
t->work.type = BINDER_WORK_TRANSACTION;
@@ -3475,6 +3815,7 @@
err_bad_offset:
err_bad_parent:
err_copy_data_failed:
+ binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
binder_free_txn_fixups(t);
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, NULL, t->buffer,
@@ -6069,6 +6410,7 @@
.open = binder_open,
.flush = binder_flush,
.release = binder_release,
+ .may_pollfree = true,
};
static int __init init_binder_device(const char *name)
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 95ca4f9..a77ed66 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -212,7 +212,7 @@
mm = alloc->vma_vm_mm;
if (mm) {
- mmap_read_lock(mm);
+ mmap_write_lock(mm);
vma = alloc->vma;
}
@@ -270,7 +270,7 @@
trace_binder_alloc_page_end(alloc, index);
}
if (mm) {
- mmap_read_unlock(mm);
+ mmap_write_unlock(mm);
mmput(mm);
}
return 0;
@@ -303,7 +303,7 @@
}
err_no_vma:
if (mm) {
- mmap_read_unlock(mm);
+ mmap_write_unlock(mm);
mmput(mm);
}
return vma ? -ENOMEM : -ESRCH;
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index d1f284f..1ce8973 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -254,7 +254,7 @@
PCS_7 = 0x94, /* 7+ port PCS (Denverton) */
/* em constants */
- EM_MAX_SLOTS = 8,
+ EM_MAX_SLOTS = SATA_PMP_MAX_PORTS,
EM_MAX_RETRY = 5,
/* em_ctl bits */
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 388baf5..189f75d 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -1230,4 +1230,4 @@
MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver");
MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("ahci:imx");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 0910441..64d6da0 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -451,14 +451,24 @@
}
}
- hpriv->nports = child_nodes = of_get_child_count(dev->of_node);
+ /*
+ * Too many sub-nodes most likely means having something wrong with
+ * the firmware.
+ */
+ child_nodes = of_get_child_count(dev->of_node);
+ if (child_nodes > AHCI_MAX_PORTS) {
+ rc = -EINVAL;
+ goto err_out;
+ }
/*
* If no sub-node was found, we still need to set nports to
* one in order to be able to use the
* ahci_platform_[en|dis]able_[phys|regulators] functions.
*/
- if (!child_nodes)
+ if (child_nodes)
+ hpriv->nports = child_nodes;
+ else
hpriv->nports = 1;
hpriv->phys = devm_kcalloc(dev, hpriv->nports, sizeof(*hpriv->phys), GFP_KERNEL);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index d2b544b..d13474c 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3936,6 +3936,10 @@
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
+ /* These specific Pioneer models have LPM issues */
+ { "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM },
+ { "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM },
+
/* Crucial BX100 SSD 500GB has broken LPM support */
{ "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
@@ -3974,6 +3978,9 @@
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_NO_DMA_LOG |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
@@ -5472,7 +5479,7 @@
const struct ata_port_info * const * ppi,
int n_ports)
{
- const struct ata_port_info *pi;
+ const struct ata_port_info *pi = &ata_dummy_port_info;
struct ata_host *host;
int i, j;
@@ -5480,7 +5487,7 @@
if (!host)
return NULL;
- for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
+ for (i = 0, j = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
if (ppi[j])
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 018ed87..973f4d3 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2131,6 +2131,7 @@
{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
{ ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
{ ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
+ { ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" },
{ ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" },
{ ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" },
{ ATA_CMD_PIO_READ, "READ SECTOR(S)" },
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index a0e788b..f1755ef 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3303,6 +3303,7 @@
case REPORT_LUNS:
case REQUEST_SENSE:
case SYNCHRONIZE_CACHE:
+ case SYNCHRONIZE_CACHE_16:
case REZERO_UNIT:
case SEEK_6:
case SEEK_10:
@@ -3969,6 +3970,7 @@
return ata_scsi_write_same_xlat;
case SYNCHRONIZE_CACHE:
+ case SYNCHRONIZE_CACHE_16:
if (ata_try_flush_cache(dev))
return ata_scsi_flush_xlat;
break;
@@ -4030,44 +4032,51 @@
int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
{
+ struct ata_port *ap = dev->link->ap;
u8 scsi_op = scmd->cmnd[0];
ata_xlat_func_t xlat_func;
- int rc = 0;
+
+ /*
+ * scsi_queue_rq() will defer commands if scsi_host_in_recovery().
+ * However, this check is done without holding the ap->lock (a libata
+ * specific lock), so we can have received an error irq since then,
+ * therefore we must check if EH is pending, while holding ap->lock.
+ */
+ if (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS))
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+
+ if (unlikely(!scmd->cmd_len))
+ goto bad_cdb_len;
if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
- if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len))
+ if (unlikely(scmd->cmd_len > dev->cdb_len))
goto bad_cdb_len;
xlat_func = ata_get_xlat_func(dev, scsi_op);
- } else {
- if (unlikely(!scmd->cmd_len))
+ } else if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
+ /* relay SCSI command to ATAPI device */
+ int len = COMMAND_SIZE(scsi_op);
+
+ if (unlikely(len > scmd->cmd_len ||
+ len > dev->cdb_len ||
+ scmd->cmd_len > ATAPI_CDB_LEN))
goto bad_cdb_len;
- xlat_func = NULL;
- if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
- /* relay SCSI command to ATAPI device */
- int len = COMMAND_SIZE(scsi_op);
- if (unlikely(len > scmd->cmd_len ||
- len > dev->cdb_len ||
- scmd->cmd_len > ATAPI_CDB_LEN))
- goto bad_cdb_len;
+ xlat_func = atapi_xlat;
+ } else {
+ /* ATA_16 passthru, treat as an ATA command */
+ if (unlikely(scmd->cmd_len > 16))
+ goto bad_cdb_len;
- xlat_func = atapi_xlat;
- } else {
- /* ATA_16 passthru, treat as an ATA command */
- if (unlikely(scmd->cmd_len > 16))
- goto bad_cdb_len;
-
- xlat_func = ata_get_xlat_func(dev, scsi_op);
- }
+ xlat_func = ata_get_xlat_func(dev, scsi_op);
}
if (xlat_func)
- rc = ata_scsi_translate(dev, scmd, xlat_func);
- else
- ata_scsi_simulate(dev, scmd);
+ return ata_scsi_translate(dev, scmd, xlat_func);
- return rc;
+ ata_scsi_simulate(dev, scmd);
+
+ return 0;
bad_cdb_len:
DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
@@ -4215,6 +4224,7 @@
* turning this into a no-op.
*/
case SYNCHRONIZE_CACHE:
+ case SYNCHRONIZE_CACHE_16:
fallthrough;
/* no-op's, complete with success */
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 6a40e3c..31a66fc 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -196,7 +196,7 @@
{ XFER_PIO_0, "XFER_PIO_0" },
{ XFER_PIO_SLOW, "XFER_PIO_SLOW" }
};
-ata_bitfield_name_match(xfer,ata_xfer_names)
+ata_bitfield_name_search(xfer, ata_xfer_names)
/*
* ATA Port attributes
@@ -301,7 +301,9 @@
pm_runtime_enable(dev);
pm_runtime_forbid(dev);
- transport_add_device(dev);
+ error = transport_add_device(dev);
+ if (error)
+ goto tport_transport_add_err;
transport_configure_device(dev);
error = ata_tlink_add(&ap->link);
@@ -312,12 +314,12 @@
tport_link_err:
transport_remove_device(dev);
+ tport_transport_add_err:
device_del(dev);
tport_err:
transport_destroy_device(dev);
put_device(dev);
- ata_host_put(ap->host);
return error;
}
@@ -426,7 +428,9 @@
goto tlink_err;
}
- transport_add_device(dev);
+ error = transport_add_device(dev);
+ if (error)
+ goto tlink_transport_err;
transport_configure_device(dev);
ata_for_each_dev(ata_dev, link, ALL) {
@@ -441,6 +445,7 @@
ata_tdev_delete(ata_dev);
}
transport_remove_device(dev);
+ tlink_transport_err:
device_del(dev);
tlink_err:
transport_destroy_device(dev);
@@ -678,7 +683,13 @@
return error;
}
- transport_add_device(dev);
+ error = transport_add_device(dev);
+ if (error) {
+ device_del(dev);
+ ata_tdev_free(ata_dev);
+ return error;
+ }
+
transport_configure_device(dev);
return 0;
}
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index d91ba47..4405d25 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -278,9 +278,10 @@
outb(inb(0x1F4) & 0x07, 0x1F4);
rt = inb(0x1F3);
- rt &= 0x07 << (3 * adev->devno);
+ rt &= ~(0x07 << (3 * !adev->devno));
if (pio)
- rt |= (1 + 3 * pio) << (3 * adev->devno);
+ rt |= (1 + 3 * pio) << (3 * !adev->devno);
+ outb(rt, 0x1F3);
udelay(100);
outb(inb(0x1F2) | 0x01, 0x1F2);
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index b066809..c56f404 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -83,6 +83,8 @@
switch(ap->port_no)
{
case 0:
+ if (!ap->ioaddr.bmdma_addr)
+ return ATA_CBL_PATA_UNK;
if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index b5a3f71..4cc8a10 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -888,12 +888,14 @@
int i;
res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0);
if (!res_dma) {
+ put_device(&dma_dev->dev);
of_node_put(dma_node);
return -EINVAL;
}
cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start,
resource_size(res_dma));
if (!cf_port->dma_base) {
+ put_device(&dma_dev->dev);
of_node_put(dma_node);
return -EINVAL;
}
@@ -903,6 +905,7 @@
irq = i;
irq_handler = octeon_cf_interrupt;
}
+ put_device(&dma_dev->dev);
}
of_node_put(dma_node);
}
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 982fe91..464260f 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -145,7 +145,11 @@
#endif
};
-#define SATA_DWC_QCMD_MAX 32
+/*
+ * Allow one extra special slot for commands and DMA management
+ * to account for libata internal commands.
+ */
+#define SATA_DWC_QCMD_MAX (ATA_MAX_QUEUE + 1)
struct sata_dwc_device_port {
struct sata_dwc_device *hsdev;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 5f0472c..82f6f1f 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3767,6 +3767,7 @@
card = idt77252_chain;
dev = card->atmdev;
idt77252_chain = card->next;
+ del_timer_sync(&card->tst_timer);
if (dev->phy->stop)
dev->phy->stop(dev);
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index de8587c..5164792 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -515,7 +515,7 @@
for_each_online_cpu(cpu) {
cpu_topo = &cpu_topology[cpu];
- if (cpuid_topo->llc_id == cpu_topo->llc_id) {
+ if (cpu_topo->llc_id != -1 && cpuid_topo->llc_id == cpu_topo->llc_id) {
cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
}
@@ -596,4 +596,23 @@
else if (of_have_populated_dt() && parse_dt_topology())
reset_cpu_topology();
}
+
+void store_cpu_topology(unsigned int cpuid)
+{
+ struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
+
+ if (cpuid_topo->package_id != -1)
+ goto topology_populated;
+
+ cpuid_topo->thread_id = -1;
+ cpuid_topo->core_id = cpuid;
+ cpuid_topo->package_id = cpu_to_node(cpuid);
+
+ pr_debug("CPU%u: package %d core %d thread %d\n",
+ cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
+ cpuid_topo->thread_id);
+
+topology_populated:
+ update_siblings_masks(cpuid);
+}
#endif
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index a9c23ec..df85e92 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -621,7 +621,7 @@
if (drv->bus->p->drivers_autoprobe) {
error = driver_attach(drv);
if (error)
- goto out_unregister;
+ goto out_del_list;
}
module_add_driver(drv->owner, drv);
@@ -648,6 +648,8 @@
return 0;
+out_del_list:
+ klist_del(&priv->knode_bus);
out_unregister:
kobject_put(&priv->kobj);
/* drv->p is freed in driver_release() */
diff --git a/drivers/base/core.c b/drivers/base/core.c
index c0566af..9a874a5 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -348,7 +348,8 @@
/* Ensure that all references to the link object have been dropped. */
device_link_synchronize_removal();
- pm_runtime_release_supplier(link, true);
+ pm_runtime_release_supplier(link);
+ pm_request_idle(link->supplier);
put_device(link->consumer);
put_device(link->supplier);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 8f1d656..24dd532 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -566,6 +566,18 @@
return sysfs_emit(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_mmio_stale_data(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_retbleed(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
@@ -575,6 +587,8 @@
static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
+static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
+static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -586,6 +600,8 @@
&dev_attr_tsx_async_abort.attr,
&dev_attr_itlb_multihit.attr,
&dev_attr_srbds.attr,
+ &dev_attr_mmio_stale_data.attr,
+ &dev_attr_retbleed.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 64ff137..72ef9e8 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -250,7 +250,6 @@
int driver_deferred_probe_timeout;
EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout);
-static DECLARE_WAIT_QUEUE_HEAD(probe_timeout_waitqueue);
static int __init deferred_probe_timeout_setup(char *str)
{
@@ -302,7 +301,6 @@
list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
dev_info(p->device, "deferred probe pending\n");
mutex_unlock(&deferred_probe_mutex);
- wake_up_all(&probe_timeout_waitqueue);
}
static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
@@ -706,9 +704,6 @@
*/
void wait_for_device_probe(void)
{
- /* wait for probe timeout */
- wait_event(probe_timeout_waitqueue, !driver_deferred_probe_timeout);
-
/* wait for the deferred probe workqueue to finish */
flush_work(&deferred_probe_work);
@@ -771,7 +766,7 @@
pr_warn("Too long list of driver names for 'driver_async_probe'!\n");
strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
- return 0;
+ return 1;
}
__setup("driver_async_probe=", save_async_options);
@@ -842,6 +837,11 @@
} else if (ret == -EPROBE_DEFER) {
dev_dbg(dev, "Device match requests probe deferral\n");
driver_deferred_probe_add(dev);
+ /*
+ * Device can't match with a driver right now, so don't attempt
+ * to match or bind with other drivers on the bus.
+ */
+ return ret;
} else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret;
@@ -897,6 +897,7 @@
static int __device_attach(struct device *dev, bool allow_async)
{
int ret = 0;
+ bool async = false;
device_lock(dev);
if (dev->p->dead) {
@@ -935,7 +936,7 @@
*/
dev_dbg(dev, "scheduling asynchronous probe\n");
get_device(dev);
- async_schedule_dev(__device_attach_async_helper, dev);
+ async = true;
} else {
pm_request_idle(dev);
}
@@ -945,6 +946,8 @@
}
out_unlock:
device_unlock(dev);
+ if (async)
+ async_schedule_dev(__device_attach_async_helper, dev);
return ret;
}
@@ -1058,6 +1061,7 @@
static int __driver_attach(struct device *dev, void *data)
{
struct device_driver *drv = data;
+ bool async = false;
int ret;
/*
@@ -1077,6 +1081,11 @@
} else if (ret == -EPROBE_DEFER) {
dev_dbg(dev, "Device match requests probe deferral\n");
driver_deferred_probe_add(dev);
+ /*
+ * Driver could not match with device, but may match with
+ * another device on the bus.
+ */
+ return 0;
} else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret;
@@ -1095,9 +1104,11 @@
if (!dev->driver) {
get_device(dev);
dev->p->async_driver = drv;
- async_schedule_dev(__driver_attach_async_helper, dev);
+ async = true;
}
device_unlock(dev);
+ if (async)
+ async_schedule_dev(__driver_attach_async_helper, dev);
return 0;
}
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 1372f40..a4dd500 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -793,6 +793,8 @@
size_t offset, u32 opt_flags)
{
struct firmware *fw = NULL;
+ struct cred *kern_cred = NULL;
+ const struct cred *old_cred;
bool nondirect = false;
int ret;
@@ -809,6 +811,18 @@
if (ret <= 0) /* error or already assigned */
goto out;
+ /*
+ * We are about to try to access the firmware file. Because we may have been
+ * called by a driver when serving an unrelated request from userland, we use
+ * the kernel credentials to read the file.
+ */
+ kern_cred = prepare_kernel_cred(NULL);
+ if (!kern_cred) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ old_cred = override_creds(kern_cred);
+
ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
/* Only full reads can support decompression, platform, and sysfs. */
@@ -834,6 +848,9 @@
} else
ret = assign_fw(fw, device);
+ revert_creds(old_cred);
+ put_cred(kern_cred);
+
out:
if (ret < 0) {
fw_abort_batch_reqs(fw);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index de058d1..49eb142 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -560,10 +560,9 @@
}
ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory,
GFP_KERNEL));
- if (ret) {
- put_device(&memory->dev);
+ if (ret)
device_unregister(&memory->dev);
- }
+
return ret;
}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 21965de..5f745c9 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -655,6 +655,7 @@
*/
void unregister_node(struct node *node)
{
+ compaction_unregister_node(node);
hugetlb_unregister_node(node); /* no-op, if memoryless node */
node_remove_accesses(node);
node_remove_caches(node);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 7432689..d0ba545 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2789,6 +2789,10 @@
np = it.node;
if (!of_match_node(idle_state_match, np))
continue;
+
+ if (!of_device_is_available(np))
+ continue;
+
if (states) {
ret = genpd_parse_state(&states[i], np);
if (ret) {
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 4167e2a..1dbaadd 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1994,7 +1994,9 @@
void device_pm_check_callbacks(struct device *dev)
{
- spin_lock_irq(&dev->power.lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
dev->power.no_pm_callbacks =
(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
!dev->bus->suspend && !dev->bus->resume)) &&
@@ -2003,7 +2005,7 @@
(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
!dev->driver->suspend && !dev->driver->resume));
- spin_unlock_irq(&dev->power.lock);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
}
bool dev_pm_skip_suspend(struct device *dev)
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 1573319..835a39e 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -308,13 +308,10 @@
/**
* pm_runtime_release_supplier - Drop references to device link's supplier.
* @link: Target device link.
- * @check_idle: Whether or not to check if the supplier device is idle.
*
- * Drop all runtime PM references associated with @link to its supplier device
- * and if @check_idle is set, check if that device is idle (and so it can be
- * suspended).
+ * Drop all runtime PM references associated with @link to its supplier device.
*/
-void pm_runtime_release_supplier(struct device_link *link, bool check_idle)
+void pm_runtime_release_supplier(struct device_link *link)
{
struct device *supplier = link->supplier;
@@ -327,9 +324,6 @@
while (refcount_dec_not_one(&link->rpm_active) &&
atomic_read(&supplier->power.usage_count) > 0)
pm_runtime_put_noidle(supplier);
-
- if (check_idle)
- pm_request_idle(supplier);
}
static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
@@ -337,8 +331,11 @@
struct device_link *link;
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
- device_links_read_lock_held())
- pm_runtime_release_supplier(link, try_to_suspend);
+ device_links_read_lock_held()) {
+ pm_runtime_release_supplier(link);
+ if (try_to_suspend)
+ pm_request_idle(link->supplier);
+ }
}
static void rpm_put_suppliers(struct device *dev)
@@ -1776,7 +1773,8 @@
return;
pm_runtime_drop_link_count(link->consumer);
- pm_runtime_release_supplier(link, true);
+ pm_runtime_release_supplier(link);
+ pm_request_idle(link->supplier);
}
static bool pm_runtime_need_not_resume(struct device *dev)
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 87c5c42..4466f8b 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -220,6 +220,7 @@
struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
struct regmap *map = d->map;
const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
+ unsigned int reg = irq_data->reg_offset / map->reg_stride;
unsigned int mask, type;
type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
@@ -236,14 +237,14 @@
* at the corresponding offset in regmap_irq_set_type().
*/
if (d->chip->type_in_mask && type)
- mask = d->type_buf[irq_data->reg_offset / map->reg_stride];
+ mask = d->type_buf[reg] & irq_data->mask;
else
mask = irq_data->mask;
if (d->chip->clear_on_unmask)
d->clear_status = true;
- d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask;
+ d->mask_buf[reg] &= ~mask;
}
static void regmap_irq_disable(struct irq_data *data)
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index f254804..40c5363 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -39,6 +39,22 @@
To compile this driver as a module, choose M here: the
module will be called floppy.
+config BLK_DEV_FD_RAWCMD
+ bool "Support for raw floppy disk commands (DEPRECATED)"
+ depends on BLK_DEV_FD
+ help
+ If you want to use actual physical floppies and expect to do
+ special low-level hardware accesses to them (access and use
+ non-standard formats, for example), then enable this.
+
+ Note that the code enabled by this option is rarely used and
+ might be unstable or insecure, and distros should not enable it.
+
+ Note: FDRAWCMD is deprecated and will be removed from the kernel
+ in the near future.
+
+ If unsure, say N.
+
config AMIGA_FLOPPY
tristate "Amiga floppy support"
depends on AMIGA
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 8f879e5..60b9ca5 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1644,22 +1644,22 @@
};
void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
-extern void notify_resource_state(struct sk_buff *,
+extern int notify_resource_state(struct sk_buff *,
unsigned int,
struct drbd_resource *,
struct resource_info *,
enum drbd_notification_type);
-extern void notify_device_state(struct sk_buff *,
+extern int notify_device_state(struct sk_buff *,
unsigned int,
struct drbd_device *,
struct device_info *,
enum drbd_notification_type);
-extern void notify_connection_state(struct sk_buff *,
+extern int notify_connection_state(struct sk_buff *,
unsigned int,
struct drbd_connection *,
struct connection_info *,
enum drbd_notification_type);
-extern void notify_peer_device_state(struct sk_buff *,
+extern int notify_peer_device_state(struct sk_buff *,
unsigned int,
struct drbd_peer_device *,
struct peer_device_info *,
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 65b95ae..51450f7 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -184,7 +184,7 @@
unsigned int set_size)
{
struct drbd_request *r;
- struct drbd_request *req = NULL;
+ struct drbd_request *req = NULL, *tmp = NULL;
int expect_epoch = 0;
int expect_size = 0;
@@ -238,8 +238,11 @@
* to catch requests being barrier-acked "unexpectedly".
* It usually should find the same req again, or some READ preceding it. */
list_for_each_entry(req, &connection->transfer_log, tl_requests)
- if (req->epoch == expect_epoch)
+ if (req->epoch == expect_epoch) {
+ tmp = req;
break;
+ }
+ req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests);
list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
if (req->epoch != expect_epoch)
break;
@@ -2717,7 +2720,7 @@
enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
{
struct drbd_resource *resource = adm_ctx->resource;
- struct drbd_connection *connection;
+ struct drbd_connection *connection, *n;
struct drbd_device *device;
struct drbd_peer_device *peer_device, *tmp_peer_device;
struct gendisk *disk;
@@ -2836,7 +2839,7 @@
out_idr_remove_vol:
idr_remove(&connection->peer_devices, vnr);
out_idr_remove_from_resource:
- for_each_connection(connection, resource) {
+ for_each_connection_safe(connection, n, resource) {
peer_device = idr_remove(&connection->peer_devices, vnr);
if (peer_device)
kref_put(&connection->kref, drbd_destroy_connection);
@@ -3628,9 +3631,8 @@
* when we want to support more than
* one PRO_VERSION */
static const char *cmdnames[] = {
+
[P_DATA] = "Data",
- [P_WSAME] = "WriteSame",
- [P_TRIM] = "Trim",
[P_DATA_REPLY] = "DataReply",
[P_RS_DATA_REPLY] = "RSDataReply",
[P_BARRIER] = "Barrier",
@@ -3641,7 +3643,6 @@
[P_DATA_REQUEST] = "DataRequest",
[P_RS_DATA_REQUEST] = "RSDataRequest",
[P_SYNC_PARAM] = "SyncParam",
- [P_SYNC_PARAM89] = "SyncParam89",
[P_PROTOCOL] = "ReportProtocol",
[P_UUIDS] = "ReportUUIDs",
[P_SIZES] = "ReportSizes",
@@ -3649,6 +3650,7 @@
[P_SYNC_UUID] = "ReportSyncUUID",
[P_AUTH_CHALLENGE] = "AuthChallenge",
[P_AUTH_RESPONSE] = "AuthResponse",
+ [P_STATE_CHG_REQ] = "StateChgRequest",
[P_PING] = "Ping",
[P_PING_ACK] = "PingAck",
[P_RECV_ACK] = "RecvAck",
@@ -3659,24 +3661,26 @@
[P_NEG_DREPLY] = "NegDReply",
[P_NEG_RS_DREPLY] = "NegRSDReply",
[P_BARRIER_ACK] = "BarrierAck",
- [P_STATE_CHG_REQ] = "StateChgRequest",
[P_STATE_CHG_REPLY] = "StateChgReply",
[P_OV_REQUEST] = "OVRequest",
[P_OV_REPLY] = "OVReply",
[P_OV_RESULT] = "OVResult",
[P_CSUM_RS_REQUEST] = "CsumRSRequest",
[P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
+ [P_SYNC_PARAM89] = "SyncParam89",
[P_COMPRESSED_BITMAP] = "CBitmap",
[P_DELAY_PROBE] = "DelayProbe",
[P_OUT_OF_SYNC] = "OutOfSync",
- [P_RETRY_WRITE] = "RetryWrite",
[P_RS_CANCEL] = "RSCancel",
[P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
[P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
[P_RETRY_WRITE] = "retry_write",
[P_PROTOCOL_UPDATE] = "protocol_update",
+ [P_TRIM] = "Trim",
[P_RS_THIN_REQ] = "rs_thin_req",
[P_RS_DEALLOCATED] = "rs_deallocated",
+ [P_WSAME] = "WriteSame",
+ [P_ZEROES] = "Zeroes",
/* enum drbd_packet, but not commands - obsoleted flags:
* P_MAY_IGNORE
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index bf7de4c..54f77b4 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -790,9 +790,11 @@
mutex_lock(&adm_ctx.resource->adm_mutex);
if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
- retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
+ retcode = (enum drbd_ret_code)drbd_set_role(adm_ctx.device,
+ R_PRIMARY, parms.assume_uptodate);
else
- retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
+ retcode = (enum drbd_ret_code)drbd_set_role(adm_ctx.device,
+ R_SECONDARY, 0);
mutex_unlock(&adm_ctx.resource->adm_mutex);
genl_lock();
@@ -1962,7 +1964,7 @@
drbd_flush_workqueue(&connection->sender_work);
rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
- retcode = rv; /* FIXME: Type mismatch. */
+ retcode = (enum drbd_ret_code)rv;
drbd_resume_io(device);
if (rv < SS_SUCCESS)
goto fail;
@@ -2687,7 +2689,8 @@
}
rcu_read_unlock();
- retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
+ retcode = (enum drbd_ret_code)conn_request_state(connection,
+ NS(conn, C_UNCONNECTED), CS_VERBOSE);
conn_reconfig_done(connection);
mutex_unlock(&adm_ctx.resource->adm_mutex);
@@ -2800,7 +2803,7 @@
mutex_lock(&adm_ctx.resource->adm_mutex);
rv = conn_try_disconnect(connection, parms.force_disconnect);
if (rv < SS_SUCCESS)
- retcode = rv; /* FIXME: Type mismatch. */
+ retcode = (enum drbd_ret_code)rv;
else
retcode = NO_ERROR;
mutex_unlock(&adm_ctx.resource->adm_mutex);
@@ -4614,7 +4617,7 @@
return drbd_notification_header_to_skb(msg, &nh, true);
}
-void notify_resource_state(struct sk_buff *skb,
+int notify_resource_state(struct sk_buff *skb,
unsigned int seq,
struct drbd_resource *resource,
struct resource_info *resource_info,
@@ -4656,16 +4659,17 @@
if (err && err != -ESRCH)
goto failed;
}
- return;
+ return 0;
nla_put_failure:
nlmsg_free(skb);
failed:
drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
err, seq);
+ return err;
}
-void notify_device_state(struct sk_buff *skb,
+int notify_device_state(struct sk_buff *skb,
unsigned int seq,
struct drbd_device *device,
struct device_info *device_info,
@@ -4705,16 +4709,17 @@
if (err && err != -ESRCH)
goto failed;
}
- return;
+ return 0;
nla_put_failure:
nlmsg_free(skb);
failed:
drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
err, seq);
+ return err;
}
-void notify_connection_state(struct sk_buff *skb,
+int notify_connection_state(struct sk_buff *skb,
unsigned int seq,
struct drbd_connection *connection,
struct connection_info *connection_info,
@@ -4754,16 +4759,17 @@
if (err && err != -ESRCH)
goto failed;
}
- return;
+ return 0;
nla_put_failure:
nlmsg_free(skb);
failed:
drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
err, seq);
+ return err;
}
-void notify_peer_device_state(struct sk_buff *skb,
+int notify_peer_device_state(struct sk_buff *skb,
unsigned int seq,
struct drbd_peer_device *peer_device,
struct peer_device_info *peer_device_info,
@@ -4804,13 +4810,14 @@
if (err && err != -ESRCH)
goto failed;
}
- return;
+ return 0;
nla_put_failure:
nlmsg_free(skb);
failed:
drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
err, seq);
+ return err;
}
void notify_helper(enum drbd_notification_type type,
@@ -4861,7 +4868,7 @@
err, seq);
}
-static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
+static int notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
{
struct drbd_genlmsghdr *dh;
int err;
@@ -4875,11 +4882,12 @@
if (nla_put_notification_header(skb, NOTIFY_EXISTS))
goto nla_put_failure;
genlmsg_end(skb, dh);
- return;
+ return 0;
nla_put_failure:
nlmsg_free(skb);
pr_err("Error %d sending event. Event seq:%u\n", err, seq);
+ return err;
}
static void free_state_changes(struct list_head *list)
@@ -4906,6 +4914,7 @@
unsigned int seq = cb->args[2];
unsigned int n;
enum drbd_notification_type flags = 0;
+ int err = 0;
/* There is no need for taking notification_mutex here: it doesn't
matter if the initial state events mix with later state chage
@@ -4914,32 +4923,32 @@
cb->args[5]--;
if (cb->args[5] == 1) {
- notify_initial_state_done(skb, seq);
+ err = notify_initial_state_done(skb, seq);
goto out;
}
n = cb->args[4]++;
if (cb->args[4] < cb->args[3])
flags |= NOTIFY_CONTINUES;
if (n < 1) {
- notify_resource_state_change(skb, seq, state_change->resource,
+ err = notify_resource_state_change(skb, seq, state_change->resource,
NOTIFY_EXISTS | flags);
goto next;
}
n--;
if (n < state_change->n_connections) {
- notify_connection_state_change(skb, seq, &state_change->connections[n],
+ err = notify_connection_state_change(skb, seq, &state_change->connections[n],
NOTIFY_EXISTS | flags);
goto next;
}
n -= state_change->n_connections;
if (n < state_change->n_devices) {
- notify_device_state_change(skb, seq, &state_change->devices[n],
+ err = notify_device_state_change(skb, seq, &state_change->devices[n],
NOTIFY_EXISTS | flags);
goto next;
}
n -= state_change->n_devices;
if (n < state_change->n_devices * state_change->n_connections) {
- notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
+ err = notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
NOTIFY_EXISTS | flags);
goto next;
}
@@ -4954,7 +4963,10 @@
cb->args[4] = 0;
}
out:
- return skb->len;
+ if (err)
+ return err;
+ else
+ return skb->len;
}
int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 330f851..6963814 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -177,7 +177,8 @@
void complete_master_bio(struct drbd_device *device,
struct bio_and_error *m)
{
- m->bio->bi_status = errno_to_blk_status(m->error);
+ if (unlikely(m->error))
+ m->bio->bi_status = errno_to_blk_status(m->error);
bio_endio(m->bio);
dec_ap_bio(device);
}
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 0067d32..5fbaea6 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -1537,7 +1537,7 @@
return rv;
}
-void notify_resource_state_change(struct sk_buff *skb,
+int notify_resource_state_change(struct sk_buff *skb,
unsigned int seq,
struct drbd_resource_state_change *resource_state_change,
enum drbd_notification_type type)
@@ -1550,10 +1550,10 @@
.res_susp_fen = resource_state_change->susp_fen[NEW],
};
- notify_resource_state(skb, seq, resource, &resource_info, type);
+ return notify_resource_state(skb, seq, resource, &resource_info, type);
}
-void notify_connection_state_change(struct sk_buff *skb,
+int notify_connection_state_change(struct sk_buff *skb,
unsigned int seq,
struct drbd_connection_state_change *connection_state_change,
enum drbd_notification_type type)
@@ -1564,10 +1564,10 @@
.conn_role = connection_state_change->peer_role[NEW],
};
- notify_connection_state(skb, seq, connection, &connection_info, type);
+ return notify_connection_state(skb, seq, connection, &connection_info, type);
}
-void notify_device_state_change(struct sk_buff *skb,
+int notify_device_state_change(struct sk_buff *skb,
unsigned int seq,
struct drbd_device_state_change *device_state_change,
enum drbd_notification_type type)
@@ -1577,10 +1577,10 @@
.dev_disk_state = device_state_change->disk_state[NEW],
};
- notify_device_state(skb, seq, device, &device_info, type);
+ return notify_device_state(skb, seq, device, &device_info, type);
}
-void notify_peer_device_state_change(struct sk_buff *skb,
+int notify_peer_device_state_change(struct sk_buff *skb,
unsigned int seq,
struct drbd_peer_device_state_change *p,
enum drbd_notification_type type)
@@ -1594,7 +1594,7 @@
.peer_resync_susp_dependency = p->resync_susp_dependency[NEW],
};
- notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type);
+ return notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type);
}
static void broadcast_state_change(struct drbd_state_change *state_change)
@@ -1602,7 +1602,7 @@
struct drbd_resource_state_change *resource_state_change = &state_change->resource[0];
bool resource_state_has_changed;
unsigned int n_device, n_connection, n_peer_device, n_peer_devices;
- void (*last_func)(struct sk_buff *, unsigned int, void *,
+ int (*last_func)(struct sk_buff *, unsigned int, void *,
enum drbd_notification_type) = NULL;
void *last_arg = NULL;
diff --git a/drivers/block/drbd/drbd_state_change.h b/drivers/block/drbd/drbd_state_change.h
index ba80f61..d5b0479 100644
--- a/drivers/block/drbd/drbd_state_change.h
+++ b/drivers/block/drbd/drbd_state_change.h
@@ -44,19 +44,19 @@
extern void copy_old_to_new_state_change(struct drbd_state_change *);
extern void forget_state_change(struct drbd_state_change *);
-extern void notify_resource_state_change(struct sk_buff *,
+extern int notify_resource_state_change(struct sk_buff *,
unsigned int,
struct drbd_resource_state_change *,
enum drbd_notification_type type);
-extern void notify_connection_state_change(struct sk_buff *,
+extern int notify_connection_state_change(struct sk_buff *,
unsigned int,
struct drbd_connection_state_change *,
enum drbd_notification_type type);
-extern void notify_device_state_change(struct sk_buff *,
+extern int notify_device_state_change(struct sk_buff *,
unsigned int,
struct drbd_device_state_change *,
enum drbd_notification_type type);
-extern void notify_peer_device_state_change(struct sk_buff *,
+extern int notify_peer_device_state_change(struct sk_buff *,
unsigned int,
struct drbd_peer_device_state_change *,
enum drbd_notification_type type);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index aaee150..4ef407a 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -509,8 +509,8 @@
static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
static DECLARE_WAIT_QUEUE_HEAD(command_done);
-/* Errors during formatting are counted here. */
-static int format_errors;
+/* errors encountered on the current (or last) request */
+static int floppy_errors;
/* Format request descriptor. */
static struct format_descr format_req;
@@ -530,7 +530,6 @@
static char *floppy_track_buffer;
static int max_buffer_sectors;
-static int *errors;
typedef void (*done_f)(int);
static const struct cont_t {
void (*interrupt)(void);
@@ -1455,7 +1454,7 @@
if (drive_params[current_drive].flags & FTD_MSG)
DPRINT("Over/Underrun - retrying\n");
bad = 0;
- } else if (*errors >= drive_params[current_drive].max_errors.reporting) {
+ } else if (floppy_errors >= drive_params[current_drive].max_errors.reporting) {
print_errors();
}
if (reply_buffer[ST2] & ST2_WC || reply_buffer[ST2] & ST2_BC)
@@ -2095,7 +2094,7 @@
if (!next_valid_format(current_drive))
return;
}
- err_count = ++(*errors);
+ err_count = ++floppy_errors;
INFBOUND(write_errors[current_drive].badness, err_count);
if (err_count > drive_params[current_drive].max_errors.abort)
cont->done(0);
@@ -2240,9 +2239,8 @@
return -EINVAL;
}
format_req = *tmp_format_req;
- format_errors = 0;
cont = &format_cont;
- errors = &format_errors;
+ floppy_errors = 0;
ret = wait_til_done(redo_format, true);
if (ret == -EINTR)
return -EINTR;
@@ -2721,7 +2719,7 @@
*/
if (!direct ||
(indirect * 2 > direct * 3 &&
- *errors < drive_params[current_drive].max_errors.read_track &&
+ floppy_errors < drive_params[current_drive].max_errors.read_track &&
((!probing ||
(drive_params[current_drive].read_track & (1 << drive_state[current_drive].probed_format)))))) {
max_size = blk_rq_sectors(current_req);
@@ -2846,10 +2844,11 @@
current_req = list_first_entry_or_null(&floppy_reqs, struct request,
queuelist);
if (current_req) {
- current_req->error_count = 0;
+ floppy_errors = 0;
list_del_init(¤t_req->queuelist);
+ return 1;
}
- return current_req != NULL;
+ return 0;
}
/* Starts or continues processing request. Will automatically unlock the
@@ -2908,7 +2907,6 @@
_floppy = floppy_type + drive_params[current_drive].autodetect[drive_state[current_drive].probed_format];
} else
probing = 0;
- errors = &(current_req->error_count);
tmp = make_raw_rw_request();
if (tmp < 2) {
request_done(tmp);
@@ -3069,6 +3067,8 @@
return "(null)";
}
+#ifdef CONFIG_BLK_DEV_FD_RAWCMD
+
/* raw commands */
static void raw_cmd_done(int flag)
{
@@ -3273,6 +3273,35 @@
return ret;
}
+static int floppy_raw_cmd_ioctl(int type, int drive, int cmd,
+ void __user *param)
+{
+ int ret;
+
+ pr_warn_once("Note: FDRAWCMD is deprecated and will be removed from the kernel in the near future.\n");
+
+ if (type)
+ return -EINVAL;
+ if (lock_fdc(drive))
+ return -EINTR;
+ set_floppy(drive);
+ ret = raw_cmd_ioctl(cmd, param);
+ if (ret == -EINTR)
+ return -EINTR;
+ process_fd_request();
+ return ret;
+}
+
+#else /* CONFIG_BLK_DEV_FD_RAWCMD */
+
+static int floppy_raw_cmd_ioctl(int type, int drive, int cmd,
+ void __user *param)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
+
static int invalidate_drive(struct block_device *bdev)
{
/* invalidate the buffer track to force a reread */
@@ -3461,7 +3490,6 @@
{
int drive = (long)bdev->bd_disk->private_data;
int type = ITYPE(drive_state[drive].fd_device);
- int i;
int ret;
int size;
union inparam {
@@ -3612,16 +3640,7 @@
outparam = &write_errors[drive];
break;
case FDRAWCMD:
- if (type)
- return -EINVAL;
- if (lock_fdc(drive))
- return -EINTR;
- set_floppy(drive);
- i = raw_cmd_ioctl(cmd, (void __user *)param);
- if (i == -EINTR)
- return -EINTR;
- process_fd_request();
- return i;
+ return floppy_raw_cmd_ioctl(type, drive, cmd, (void __user *)param);
case FDTWADDLE:
if (lock_fdc(drive))
return -EINTR;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ee537a9..b104105 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -797,33 +797,33 @@
static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
{
- return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset);
+ return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_offset);
}
static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
{
- return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
+ return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
}
static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
{
int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
- return sprintf(buf, "%s\n", autoclear ? "1" : "0");
+ return sysfs_emit(buf, "%s\n", autoclear ? "1" : "0");
}
static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf)
{
int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN);
- return sprintf(buf, "%s\n", partscan ? "1" : "0");
+ return sysfs_emit(buf, "%s\n", partscan ? "1" : "0");
}
static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf)
{
int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO);
- return sprintf(buf, "%s\n", dio ? "1" : "0");
+ return sysfs_emit(buf, "%s\n", dio ? "1" : "0");
}
LOOP_ATTR_RO(backing_file);
@@ -1031,6 +1031,11 @@
lo->lo_offset = info->lo_offset;
lo->lo_sizelimit = info->lo_sizelimit;
+
+ /* loff_t vars have been assigned __u64 */
+ if (lo->lo_offset < 0 || lo->lo_sizelimit < 0)
+ return -EOVERFLOW;
+
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 59c452f..b0d3dad 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -880,11 +880,15 @@
struct nbd_config *config = nbd->config;
if (!config->dead_conn_timeout)
return 0;
- if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
+
+ if (!wait_event_timeout(config->conn_wait,
+ test_bit(NBD_RT_DISCONNECTED,
+ &config->runtime_flags) ||
+ atomic_read(&config->live_connections) > 0,
+ config->dead_conn_timeout))
return 0;
- return wait_event_timeout(config->conn_wait,
- atomic_read(&config->live_connections) > 0,
- config->dead_conn_timeout) > 0;
+
+ return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
}
static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
@@ -1338,10 +1342,12 @@
mutex_unlock(&nbd->config_lock);
ret = wait_event_interruptible(config->recv_wq,
atomic_read(&config->recv_threads) == 0);
- if (ret)
+ if (ret) {
sock_shutdown(nbd);
- flush_workqueue(nbd->recv_workq);
+ nbd_clear_que(nbd);
+ }
+ flush_workqueue(nbd->recv_workq);
mutex_lock(&nbd->config_lock);
nbd_bdev_reset(bdev);
/* user requested, ignore socket errors */
@@ -1355,7 +1361,7 @@
static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
struct block_device *bdev)
{
- sock_shutdown(nbd);
+ nbd_clear_sock(nbd);
__invalidate_device(bdev, true);
nbd_bdev_reset(bdev);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
@@ -1468,15 +1474,20 @@
{
struct nbd_config *config;
+ if (!try_module_get(THIS_MODULE))
+ return ERR_PTR(-ENODEV);
+
config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
- if (!config)
- return NULL;
+ if (!config) {
+ module_put(THIS_MODULE);
+ return ERR_PTR(-ENOMEM);
+ }
+
atomic_set(&config->recv_threads, 0);
init_waitqueue_head(&config->recv_wq);
init_waitqueue_head(&config->conn_wait);
config->blksize = NBD_DEF_BLKSIZE;
atomic_set(&config->live_connections, 0);
- try_module_get(THIS_MODULE);
return config;
}
@@ -1503,12 +1514,13 @@
mutex_unlock(&nbd->config_lock);
goto out;
}
- config = nbd->config = nbd_alloc_config();
- if (!config) {
- ret = -ENOMEM;
+ config = nbd_alloc_config();
+ if (IS_ERR(config)) {
+ ret = PTR_ERR(config);
mutex_unlock(&nbd->config_lock);
goto out;
}
+ nbd->config = config;
refcount_set(&nbd->config_refs, 1);
refcount_inc(&nbd->refs);
mutex_unlock(&nbd->config_lock);
@@ -1930,13 +1942,14 @@
nbd_put(nbd);
return -EINVAL;
}
- config = nbd->config = nbd_alloc_config();
- if (!nbd->config) {
+ config = nbd_alloc_config();
+ if (IS_ERR(config)) {
mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
printk(KERN_ERR "nbd: couldn't allocate config\n");
- return -ENOMEM;
+ return PTR_ERR(config);
}
+ nbd->config = config;
refcount_set(&nbd->config_refs, 1);
set_bit(NBD_RT_BOUND, &config->runtime_flags);
@@ -2029,6 +2042,7 @@
mutex_lock(&nbd->config_lock);
nbd_disconnect(nbd);
sock_shutdown(nbd);
+ wake_up(&nbd->config->conn_wait);
/*
* Make sure recv thread has finished, so it does not drop the last
* config ref and try to destroy the workqueue from inside the work
@@ -2456,6 +2470,12 @@
struct nbd_device *nbd;
LIST_HEAD(del_list);
+ /*
+ * Unregister netlink interface prior to waiting
+ * for the completion of netlink commands.
+ */
+ genl_unregister_family(&nbd_genl_family);
+
nbd_dbg_close();
mutex_lock(&nbd_index_mutex);
@@ -2465,13 +2485,15 @@
while (!list_empty(&del_list)) {
nbd = list_first_entry(&del_list, struct nbd_device, list);
list_del_init(&nbd->list);
+ if (refcount_read(&nbd->config_refs))
+ printk(KERN_ERR "nbd: possibly leaking nbd_config (ref %d)\n",
+ refcount_read(&nbd->config_refs));
if (refcount_read(&nbd->refs) != 1)
printk(KERN_ERR "nbd: possibly leaking a device\n");
nbd_put(nbd);
}
idr_destroy(&nbd_index_idr);
- genl_unregister_family(&nbd_genl_family);
unregister_blkdev(NBD_MAJOR, "nbd");
}
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index bb3686c..c6ba8f9 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1876,8 +1876,13 @@
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
mutex_lock(&lock);
- nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
- dev->index = nullb->index;
+ rv = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
+ if (rv < 0) {
+ mutex_unlock(&lock);
+ goto out_cleanup_zone;
+ }
+ nullb->index = rv;
+ dev->index = rv;
mutex_unlock(&lock);
blk_queue_logical_block_size(nullb->q, dev->blocksize);
@@ -1889,13 +1894,16 @@
rv = null_gendisk_register(nullb);
if (rv)
- goto out_cleanup_zone;
+ goto out_ida_free;
mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
mutex_unlock(&lock);
return 0;
+
+out_ida_free:
+ ida_free(&nullb_indexes, nullb->index);
out_cleanup_zone:
null_free_zoned_dev(dev);
out_cleanup_blk_queue:
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index a033901..9b54eec 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -825,9 +825,17 @@
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
struct virtio_blk_config, blk_size,
&blk_size);
- if (!err)
+ if (!err) {
+ err = blk_validate_block_size(blk_size);
+ if (err) {
+ dev_err(&vdev->dev,
+ "virtio_blk: invalid block size: 0x%x\n",
+ blk_size);
+ goto out_free_tags;
+ }
+
blk_queue_logical_block_size(q, blk_size);
- else
+ } else
blk_size = queue_logical_block_size(q);
/* Use topology information if available */
@@ -857,11 +865,12 @@
blk_queue_io_opt(q, blk_size * opt_io_size);
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
- q->limits.discard_granularity = blk_size;
-
virtio_cread(vdev, struct virtio_blk_config,
discard_sector_alignment, &v);
- q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0;
+ if (v)
+ q->limits.discard_granularity = v << SECTOR_SHIFT;
+ else
+ q->limits.discard_granularity = blk_size;
virtio_cread(vdev, struct virtio_blk_config,
max_discard_sectors, &v);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 040829e..5eff347 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -226,6 +226,9 @@
sector_t size;
unsigned int flush_support:1;
unsigned int discard_secure:1;
+ /* Connect-time cached feature_persistent parameter value */
+ unsigned int feature_gnt_persistent_parm:1;
+ /* Persistent grants feature negotiation result */
unsigned int feature_gnt_persistent:1;
unsigned int overflow_max_grants:1;
};
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 6c5e937..ddea362 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -157,6 +157,11 @@
return 0;
}
+/* Enable the persistent grants feature. */
+static bool feature_persistent = true;
+module_param(feature_persistent, bool, 0644);
+MODULE_PARM_DESC(feature_persistent, "Enables the persistent grants feature");
+
static struct xen_blkif *xen_blkif_alloc(domid_t domid)
{
struct xen_blkif *blkif;
@@ -472,12 +477,6 @@
vbd->bdev = NULL;
}
-/* Enable the persistent grants feature. */
-static bool feature_persistent = true;
-module_param(feature_persistent, bool, 0644);
-MODULE_PARM_DESC(feature_persistent,
- "Enables the persistent grants feature");
-
static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
unsigned major, unsigned minor, int readonly,
int cdrom)
@@ -523,8 +522,6 @@
if (q && blk_queue_secure_erase(q))
vbd->discard_secure = true;
- vbd->feature_gnt_persistent = feature_persistent;
-
pr_debug("Successful creation of handle=%04x (dom=%u)\n",
handle, blkif->domid);
return 0;
@@ -914,7 +911,7 @@
xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
- be->blkif->vbd.feature_gnt_persistent);
+ be->blkif->vbd.feature_gnt_persistent_parm);
if (err) {
xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
dev->nodename);
@@ -1091,10 +1088,11 @@
xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
return -ENOSYS;
}
- if (blkif->vbd.feature_gnt_persistent)
- blkif->vbd.feature_gnt_persistent =
- xenbus_read_unsigned(dev->otherend,
- "feature-persistent", 0);
+
+ blkif->vbd.feature_gnt_persistent_parm = feature_persistent;
+ blkif->vbd.feature_gnt_persistent =
+ blkif->vbd.feature_gnt_persistent_parm &&
+ xenbus_read_unsigned(dev->otherend, "feature-persistent", 0);
blkif->vbd.overflow_max_grants = 0;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 47d4bb2..6f33d62 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -151,6 +151,10 @@
module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
+static bool __read_mostly xen_blkif_trusted = true;
+module_param_named(trusted, xen_blkif_trusted, bool, 0644);
+MODULE_PARM_DESC(trusted, "Is the backend trusted");
+
#define BLK_RING_SIZE(info) \
__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
@@ -207,7 +211,11 @@
unsigned int feature_fua:1;
unsigned int feature_discard:1;
unsigned int feature_secdiscard:1;
+ /* Connect-time cached feature_persistent parameter */
+ unsigned int feature_persistent_parm:1;
+ /* Persistent grants feature negotiation result */
unsigned int feature_persistent:1;
+ unsigned int bounce:1;
unsigned int discard_granularity;
unsigned int discard_alignment;
/* Number of 4KB segments handled */
@@ -310,8 +318,8 @@
if (!gnt_list_entry)
goto out_of_memory;
- if (info->feature_persistent) {
- granted_page = alloc_page(GFP_NOIO);
+ if (info->bounce) {
+ granted_page = alloc_page(GFP_NOIO | __GFP_ZERO);
if (!granted_page) {
kfree(gnt_list_entry);
goto out_of_memory;
@@ -330,7 +338,7 @@
list_for_each_entry_safe(gnt_list_entry, n,
&rinfo->grants, node) {
list_del(&gnt_list_entry->node);
- if (info->feature_persistent)
+ if (info->bounce)
__free_page(gnt_list_entry->page);
kfree(gnt_list_entry);
i--;
@@ -376,7 +384,7 @@
/* Assign a gref to this page */
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
BUG_ON(gnt_list_entry->gref == -ENOSPC);
- if (info->feature_persistent)
+ if (info->bounce)
grant_foreign_access(gnt_list_entry, info);
else {
/* Grant access to the GFN passed by the caller */
@@ -400,7 +408,7 @@
/* Assign a gref to this page */
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
BUG_ON(gnt_list_entry->gref == -ENOSPC);
- if (!info->feature_persistent) {
+ if (!info->bounce) {
struct page *indirect_page;
/* Fetch a pre-allocated page to use for indirect grefs */
@@ -715,7 +723,7 @@
.grant_idx = 0,
.segments = NULL,
.rinfo = rinfo,
- .need_copy = rq_data_dir(req) && info->feature_persistent,
+ .need_copy = rq_data_dir(req) && info->bounce,
};
/*
@@ -1035,11 +1043,12 @@
{
blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
info->feature_fua ? true : false);
- pr_info("blkfront: %s: %s %s %s %s %s\n",
+ pr_info("blkfront: %s: %s %s %s %s %s %s %s\n",
info->gd->disk_name, flush_info(info),
"persistent grants:", info->feature_persistent ?
"enabled;" : "disabled;", "indirect descriptors:",
- info->max_indirect_segments ? "enabled;" : "disabled;");
+ info->max_indirect_segments ? "enabled;" : "disabled;",
+ "bounce buffer:", info->bounce ? "enabled" : "disabled;");
}
static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
@@ -1273,7 +1282,7 @@
if (!list_empty(&rinfo->indirect_pages)) {
struct page *indirect_page, *n;
- BUG_ON(info->feature_persistent);
+ BUG_ON(info->bounce);
list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
list_del(&indirect_page->lru);
__free_page(indirect_page);
@@ -1290,7 +1299,7 @@
0, 0UL);
rinfo->persistent_gnts_c--;
}
- if (info->feature_persistent)
+ if (info->bounce)
__free_page(persistent_gnt->page);
kfree(persistent_gnt);
}
@@ -1311,7 +1320,7 @@
for (j = 0; j < segs; j++) {
persistent_gnt = rinfo->shadow[i].grants_used[j];
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
- if (info->feature_persistent)
+ if (info->bounce)
__free_page(persistent_gnt->page);
kfree(persistent_gnt);
}
@@ -1501,7 +1510,7 @@
data.s = s;
num_sg = s->num_sg;
- if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
+ if (bret->operation == BLKIF_OP_READ && info->bounce) {
for_each_sg(s->sg, sg, num_sg, i) {
BUG_ON(sg->offset + sg->length > PAGE_SIZE);
@@ -1560,7 +1569,7 @@
* Add the used indirect page back to the list of
* available pages for indirect grefs.
*/
- if (!info->feature_persistent) {
+ if (!info->bounce) {
indirect_page = s->indirect_grants[i]->page;
list_add(&indirect_page->lru, &rinfo->indirect_pages);
}
@@ -1753,7 +1762,7 @@
for (i = 0; i < info->nr_ring_pages; i++)
rinfo->ring_ref[i] = GRANT_INVALID_REF;
- sring = alloc_pages_exact(ring_size, GFP_NOIO);
+ sring = alloc_pages_exact(ring_size, GFP_NOIO | __GFP_ZERO);
if (!sring) {
xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
return -ENOMEM;
@@ -1843,6 +1852,12 @@
kfree(info);
}
+/* Enable the persistent grants feature. */
+static bool feature_persistent = true;
+module_param(feature_persistent, bool, 0644);
+MODULE_PARM_DESC(feature_persistent,
+ "Enables the persistent grants feature");
+
/* Common code used when first setting up, and when resuming. */
static int talk_to_blkback(struct xenbus_device *dev,
struct blkfront_info *info)
@@ -1857,6 +1872,10 @@
if (!info)
return -ENODEV;
+ /* Check if backend is trusted. */
+ info->bounce = !xen_blkif_trusted ||
+ !xenbus_read_unsigned(dev->nodename, "trusted", 1);
+
max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
"max-ring-page-order", 0);
ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
@@ -1930,8 +1949,9 @@
message = "writing protocol";
goto abort_transaction;
}
+ info->feature_persistent_parm = feature_persistent;
err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
- info->feature_persistent);
+ info->feature_persistent_parm);
if (err)
dev_warn(&dev->dev,
"writing persistent grants feature to xenbus");
@@ -2006,12 +2026,6 @@
return 0;
}
-/* Enable the persistent grants feature. */
-static bool feature_persistent = true;
-module_param(feature_persistent, bool, 0644);
-MODULE_PARM_DESC(feature_persistent,
- "Enables the persistent grants feature");
-
/**
* Entry point to this code when a new device is created. Allocate the basic
* structures and the ring buffer for communication with the backend, and
@@ -2078,8 +2092,6 @@
info->vdevice = vdevice;
info->connected = BLKIF_STATE_DISCONNECTED;
- info->feature_persistent = feature_persistent;
-
/* Front end dir is a number, which is used as the id. */
info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
dev_set_drvdata(&dev->dev, info);
@@ -2283,17 +2295,18 @@
if (err)
goto out_of_memory;
- if (!info->feature_persistent && info->max_indirect_segments) {
+ if (!info->bounce && info->max_indirect_segments) {
/*
- * We are using indirect descriptors but not persistent
- * grants, we need to allocate a set of pages that can be
+ * We are using indirect descriptors but don't have a bounce
+ * buffer, we need to allocate a set of pages that can be
* used for mapping indirect grefs
*/
int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
BUG_ON(!list_empty(&rinfo->indirect_pages));
for (i = 0; i < num; i++) {
- struct page *indirect_page = alloc_page(GFP_KERNEL);
+ struct page *indirect_page = alloc_page(GFP_KERNEL |
+ __GFP_ZERO);
if (!indirect_page)
goto out_of_memory;
list_add(&indirect_page->lru, &rinfo->indirect_pages);
@@ -2382,10 +2395,12 @@
if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
blkfront_setup_discard(info);
- if (info->feature_persistent)
+ if (info->feature_persistent_parm)
info->feature_persistent =
!!xenbus_read_unsigned(info->xbdev->otherend,
"feature-persistent", 0);
+ if (info->feature_persistent)
+ info->bounce = true;
indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
"feature-max-indirect-segments", 0);
@@ -2759,6 +2774,13 @@
struct blkfront_info *info;
bool need_schedule_work = false;
+ /*
+ * Note that when using bounce buffers but not persistent grants
+ * there's no need to run blkfront_delay_work because grants are
+ * revoked in blkif_completion or else an error is reported and the
+ * connection is closed.
+ */
+
mutex_lock(&blkfront_mutex);
list_for_each_entry(info, &info_list, info_list) {
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 33e3b76..b086504 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -61,12 +61,6 @@
bool zcomp_available_algorithm(const char *comp)
{
- int i;
-
- i = sysfs_match_string(backends, comp);
- if (i >= 0)
- return true;
-
/*
* Crypto does not ignore a trailing new line symbol,
* so make sure you don't supply a string containing
@@ -215,6 +209,11 @@
struct zcomp *comp;
int error;
+ /*
+ * Crypto API will execute /sbin/modprobe if the compression module
+ * is not loaded yet. We must do it here, otherwise we are about to
+ * call /sbin/modprobe under CPU hot-plug lock.
+ */
if (!zcomp_available_algorithm(compress))
return ERR_PTR(-EINVAL);
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 1b9743b..d263eac 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -401,6 +401,8 @@
{ 0x6606, "BCM4345C5" }, /* 003.006.006 */
{ 0x230f, "BCM4356A2" }, /* 001.003.015 */
{ 0x220e, "BCM20702A1" }, /* 001.002.014 */
+ { 0x420d, "BCM4349B1" }, /* 002.002.013 */
+ { 0x420e, "BCM4349B1" }, /* 002.002.014 */
{ 0x4217, "BCM4329B1" }, /* 002.002.023 */
{ 0x6106, "BCM4359C0" }, /* 003.001.006 */
{ 0x4106, "BCM4335A0" }, /* 002.001.006 */
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index 74856a5..c41560b 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -981,6 +981,8 @@
hdev->manufacturer = 70;
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ sdio_set_drvdata(func, bdev);
+
err = hci_register_dev(hdev);
if (err < 0) {
dev_err(&func->dev, "Can't register HCI device\n");
@@ -988,8 +990,6 @@
return err;
}
- sdio_set_drvdata(func, bdev);
-
/* pm_runtime_enable would be done after the firmware is being
* downloaded because the core layer probably already enables
* runtime PM for this func such as the case host->caps &
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 538232b..6efd981 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -399,6 +399,18 @@
{ USB_DEVICE(0x0bda, 0xc822), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Realtek 8852CE Bluetooth devices */
+ { USB_DEVICE(0x04ca, 0x4007), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04c5, 0x1675), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cb8, 0xc558), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3587), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3586), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+
/* Realtek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_REALTEK },
@@ -416,6 +428,9 @@
{ USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
@@ -2801,6 +2816,7 @@
enum {
BTMTK_WMT_INVALID,
BTMTK_WMT_PATCH_UNDONE,
+ BTMTK_WMT_PATCH_PROGRESS,
BTMTK_WMT_PATCH_DONE,
BTMTK_WMT_ON_UNDONE,
BTMTK_WMT_ON_DONE,
@@ -2816,7 +2832,7 @@
struct btmtk_hci_wmt_cmd {
struct btmtk_wmt_hdr hdr;
- u8 data[256];
+ u8 data[];
} __packed;
struct btmtk_hci_wmt_evt {
@@ -2919,7 +2935,7 @@
* to generate the event. Otherwise, the WMT event cannot return from
* the device successfully.
*/
- udelay(100);
+ udelay(500);
usb_anchor_urb(urb, &data->ctrl_anchor);
err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -2995,7 +3011,7 @@
struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
u32 hlen, status = BTMTK_WMT_INVALID;
struct btmtk_hci_wmt_evt *wmt_evt;
- struct btmtk_hci_wmt_cmd wc;
+ struct btmtk_hci_wmt_cmd *wc;
struct btmtk_wmt_hdr *hdr;
int err;
@@ -3004,24 +3020,42 @@
if (hlen > 255)
return -EINVAL;
- hdr = (struct btmtk_wmt_hdr *)&wc;
+ wc = kzalloc(hlen, GFP_KERNEL);
+ if (!wc)
+ return -ENOMEM;
+
+ hdr = &wc->hdr;
hdr->dir = 1;
hdr->op = wmt_params->op;
hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
hdr->flag = wmt_params->flag;
- memcpy(wc.data, wmt_params->data, wmt_params->dlen);
+ memcpy(wc->data, wmt_params->data, wmt_params->dlen);
set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
- err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
+ /* WMT cmd/event doesn't follow up the generic HCI cmd/event handling,
+ * it needs constantly polling control pipe until the host received the
+ * WMT event, thus, we should require to specifically acquire PM counter
+ * on the USB to prevent the interface from entering auto suspended
+ * while WMT cmd/event in progress.
+ */
+ err = usb_autopm_get_interface(data->intf);
+ if (err < 0)
+ goto err_free_wc;
+
+ err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
if (err < 0) {
clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
- return err;
+ usb_autopm_put_interface(data->intf);
+ goto err_free_wc;
}
/* Submit control IN URB on demand to process the WMT event */
err = btusb_mtk_submit_wmt_recv_urb(hdev);
+
+ usb_autopm_put_interface(data->intf);
+
if (err < 0)
return err;
@@ -3039,13 +3073,14 @@
if (err == -EINTR) {
bt_dev_err(hdev, "Execution of wmt command interrupted");
clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
- return err;
+ goto err_free_wc;
}
if (err) {
bt_dev_err(hdev, "Execution of wmt command timed out");
clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
- return -ETIMEDOUT;
+ err = -ETIMEDOUT;
+ goto err_free_wc;
}
/* Parse and handle the return WMT event */
@@ -3081,7 +3116,8 @@
err_free_skb:
kfree_skb(data->evt_skb);
data->evt_skb = NULL;
-
+err_free_wc:
+ kfree(wc);
return err;
}
@@ -3223,9 +3259,9 @@
return err;
}
-static int btusb_mtk_id_get(struct btusb_data *data, u32 *id)
+static int btusb_mtk_id_get(struct btusb_data *data, u32 reg, u32 *id)
{
- return btusb_mtk_reg_read(data, 0x80000008, id);
+ return btusb_mtk_reg_read(data, reg, id);
}
static int btusb_mtk_setup(struct hci_dev *hdev)
@@ -3243,7 +3279,7 @@
calltime = ktime_get();
- err = btusb_mtk_id_get(data, &dev_id);
+ err = btusb_mtk_id_get(data, 0x80000008, &dev_id);
if (err < 0) {
bt_dev_err(hdev, "Failed to get device id (%d)", err);
return err;
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 259a643..3f6e96a 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -1489,8 +1489,10 @@
{ .compatible = "brcm,bcm4345c5" },
{ .compatible = "brcm,bcm4330-bt" },
{ .compatible = "brcm,bcm43438-bt", .data = &bcm43438_device_data },
+ { .compatible = "brcm,bcm4349-bt", .data = &bcm43438_device_data },
{ .compatible = "brcm,bcm43540-bt", .data = &bcm4354_device_data },
{ .compatible = "brcm,bcm4335a0" },
+ { .compatible = "infineon,cyw55572-bt" },
{ },
};
MODULE_DEVICE_TABLE(of, bcm_bluetooth_of_match);
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index b20a40f..d5d2fee 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -1214,7 +1214,11 @@
int __init intel_init(void)
{
- platform_driver_register(&intel_driver);
+ int err;
+
+ err = platform_driver_register(&intel_driver);
+ if (err)
+ return err;
return hci_uart_register_proto(&intel_proto);
}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 637c5b8..726d5c8 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -490,6 +490,11 @@
BT_ERR("Can't allocate control structure");
return -ENFILE;
}
+ if (percpu_init_rwsem(&hu->proto_lock)) {
+ BT_ERR("Can't allocate semaphore structure");
+ kfree(hu);
+ return -ENOMEM;
+ }
tty->disc_data = hu;
hu->tty = tty;
@@ -502,8 +507,6 @@
INIT_WORK(&hu->init_ready, hci_uart_init_work);
INIT_WORK(&hu->write_work, hci_uart_write_work);
- percpu_init_rwsem(&hu->proto_lock);
-
/* Flush any pending characters in the driver */
tty_driver_flush_buffer(tty);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index dc7ee5d..eea18ae 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -689,9 +689,9 @@
skb_queue_purge(&qca->tx_wait_q);
skb_queue_purge(&qca->txq);
skb_queue_purge(&qca->rx_memdump_q);
- del_timer(&qca->tx_idle_timer);
- del_timer(&qca->wake_retrans_timer);
destroy_workqueue(qca->workqueue);
+ del_timer_sync(&qca->tx_idle_timer);
+ del_timer_sync(&qca->wake_retrans_timer);
qca->hu = NULL;
kfree_skb(qca->rx_skb);
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 9e03402..f2e2e55 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -301,9 +301,12 @@
serdev_device_set_client_ops(hu->serdev, &hci_serdev_client_ops);
+ if (percpu_init_rwsem(&hu->proto_lock))
+ return -ENOMEM;
+
err = serdev_device_open(hu->serdev);
if (err)
- return err;
+ goto err_rwsem;
err = p->open(hu);
if (err)
@@ -327,7 +330,6 @@
INIT_WORK(&hu->init_ready, hci_uart_init_work);
INIT_WORK(&hu->write_work, hci_uart_write_work);
- percpu_init_rwsem(&hu->proto_lock);
/* Only when vendor specific setup callback is provided, consider
* the manufacturer information valid. This avoids filling in the
@@ -374,6 +376,8 @@
p->close(hu);
err_open:
serdev_device_close(hu->serdev);
+err_rwsem:
+ percpu_free_rwsem(&hu->proto_lock);
return err;
}
EXPORT_SYMBOL_GPL(hci_uart_register_device);
@@ -395,5 +399,6 @@
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
serdev_device_close(hu->serdev);
}
+ percpu_free_rwsem(&hu->proto_lock);
}
EXPORT_SYMBOL_GPL(hci_uart_unregister_device);
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index 378f5d6..e7eaa87 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -503,13 +503,13 @@
{
struct acpi_device *adev = ACPI_COMPANION(hostdev);
struct acpi_device *child;
+ struct platform_device *pdev;
int ret;
/* Only consider the children of the host */
list_for_each_entry(child, &adev->children, node) {
const char *hid = acpi_device_hid(child);
const struct hisi_lpc_acpi_cell *cell;
- struct platform_device *pdev;
const struct resource *res;
bool found = false;
int num_res;
@@ -571,22 +571,24 @@
ret = platform_device_add_resources(pdev, res, num_res);
if (ret)
- goto fail;
+ goto fail_put_device;
ret = platform_device_add_data(pdev, cell->pdata,
cell->pdata_size);
if (ret)
- goto fail;
+ goto fail_put_device;
ret = platform_device_add(pdev);
if (ret)
- goto fail;
+ goto fail_put_device;
acpi_device_set_enumerated(child);
}
return 0;
+fail_put_device:
+ platform_device_put(pdev);
fail:
hisi_lpc_acpi_remove(hostdev);
return ret;
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
index 626dedd..fca0d06 100644
--- a/drivers/bus/mips_cdmm.c
+++ b/drivers/bus/mips_cdmm.c
@@ -351,6 +351,7 @@
np = of_find_compatible_node(NULL, NULL, "mti,mips-cdmm");
if (np) {
err = of_address_to_resource(np, 0, &res);
+ of_node_put(np);
if (!err)
return res.start;
}
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 1bb00a9..f8c29b8 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -224,6 +224,8 @@
dev_dbg(&rdev->dev, "device %s registered\n", dev_name(&rdev->dev));
+ return rdev;
+
err_device_add:
put_device(&rdev->dev);
@@ -266,6 +268,9 @@
/* common code that starts a transfer */
static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb)
{
+ u32 int_mask, status;
+ bool timeout;
+
if (readl(rsb->regs + RSB_CTRL) & RSB_CTRL_START_TRANS) {
dev_dbg(rsb->dev, "RSB transfer still in progress\n");
return -EBUSY;
@@ -273,13 +278,23 @@
reinit_completion(&rsb->complete);
- writel(RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER,
- rsb->regs + RSB_INTE);
+ int_mask = RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER;
+ writel(int_mask, rsb->regs + RSB_INTE);
writel(RSB_CTRL_START_TRANS | RSB_CTRL_GLOBAL_INT_ENB,
rsb->regs + RSB_CTRL);
- if (!wait_for_completion_io_timeout(&rsb->complete,
- msecs_to_jiffies(100))) {
+ if (irqs_disabled()) {
+ timeout = readl_poll_timeout_atomic(rsb->regs + RSB_INTS,
+ status, (status & int_mask),
+ 10, 100000);
+ writel(status, rsb->regs + RSB_INTS);
+ } else {
+ timeout = !wait_for_completion_io_timeout(&rsb->complete,
+ msecs_to_jiffies(100));
+ status = rsb->status;
+ }
+
+ if (timeout) {
dev_dbg(rsb->dev, "RSB timeout\n");
/* abort the transfer */
@@ -291,18 +306,18 @@
return -ETIMEDOUT;
}
- if (rsb->status & RSB_INTS_LOAD_BSY) {
+ if (status & RSB_INTS_LOAD_BSY) {
dev_dbg(rsb->dev, "RSB busy\n");
return -EBUSY;
}
- if (rsb->status & RSB_INTS_TRANS_ERR) {
- if (rsb->status & RSB_INTS_TRANS_ERR_ACK) {
+ if (status & RSB_INTS_TRANS_ERR) {
+ if (status & RSB_INTS_TRANS_ERR_ACK) {
dev_dbg(rsb->dev, "RSB slave nack\n");
return -EINVAL;
}
- if (rsb->status & RSB_INTS_TRANS_ERR_DATA) {
+ if (status & RSB_INTS_TRANS_ERR_DATA) {
dev_dbg(rsb->dev, "RSB transfer data error\n");
return -EIO;
}
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 18f0650..4ee20be 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -3130,13 +3130,27 @@
*/
static int sysc_check_active_timer(struct sysc *ddata)
{
+ int error;
+
if (ddata->cap->type != TI_SYSC_OMAP2_TIMER &&
ddata->cap->type != TI_SYSC_OMAP4_TIMER)
return 0;
+ /*
+ * Quirk for omap3 beagleboard revision A to B4 to use gpt12.
+ * Revision C and later are fixed with commit 23885389dbbb ("ARM:
+ * dts: Fix timer regression for beagleboard revision c"). This all
+ * can be dropped if we stop supporting old beagleboard revisions
+ * A to B4 at some point.
+ */
+ if (sysc_soc->soc == SOC_3430)
+ error = -ENXIO;
+ else
+ error = -EBUSY;
+
if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
(ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
- return -ENXIO;
+ return error;
return 0;
}
@@ -3277,7 +3291,9 @@
struct sysc *ddata = platform_get_drvdata(pdev);
int error;
- cancel_delayed_work_sync(&ddata->idle_work);
+ /* Device can still be enabled, see deferred idle quirk in probe */
+ if (cancel_delayed_work_sync(&ddata->idle_work))
+ ti_sysc_idle(&ddata->idle_work.work);
error = pm_runtime_get_sync(ddata->dev);
if (error < 0) {
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index d229a2d..b4e65d1 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -471,28 +471,41 @@
and SSM (Silicon Secured Memory). Intended consumers of this
driver include crash and makedumpfile.
-endmenu
-
config RANDOM_TRUST_CPU
- bool "Trust the CPU manufacturer to initialize Linux's CRNG"
+ bool "Initialize RNG using CPU RNG instructions"
+ default y
depends on ARCH_RANDOM
- default n
help
- Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
- RDRAND, IBM for the S390 and Power PC architectures) is trustworthy
- for the purposes of initializing Linux's CRNG. Since this is not
- something that can be independently audited, this amounts to trusting
- that CPU manufacturer (perhaps with the insistence or mandate
- of a Nation State's intelligence or law enforcement agencies)
- has not installed a hidden back door to compromise the CPU's
- random number generation facilities. This can also be configured
- at boot with "random.trust_cpu=on/off".
+ Initialize the RNG using random numbers supplied by the CPU's
+ RNG instructions (e.g. RDRAND), if supported and available. These
+ random numbers are never used directly, but are rather hashed into
+ the main input pool, and this happens regardless of whether or not
+ this option is enabled. Instead, this option controls whether the
+ they are credited and hence can initialize the RNG. Additionally,
+ other sources of randomness are always used, regardless of this
+ setting. Enabling this implies trusting that the CPU can supply high
+ quality and non-backdoored random numbers.
+
+ Say Y here unless you have reason to mistrust your CPU or believe
+ its RNG facilities may be faulty. This may also be configured at
+ boot time with "random.trust_cpu=on/off".
config RANDOM_TRUST_BOOTLOADER
- bool "Trust the bootloader to initialize Linux's CRNG"
+ bool "Initialize RNG using bootloader-supplied seed"
+ default y
help
- Some bootloaders can provide entropy to increase the kernel's initial
- device randomness. Say Y here to assume the entropy provided by the
- booloader is trustworthy so it will be added to the kernel's entropy
- pool. Otherwise, say N here so it will be regarded as device input that
- only mixes the entropy pool.
+ Initialize the RNG using a seed supplied by the bootloader or boot
+ environment (e.g. EFI or a bootloader-generated device tree). This
+ seed is not used directly, but is rather hashed into the main input
+ pool, and this happens regardless of whether or not this option is
+ enabled. Instead, this option controls whether the seed is credited
+ and hence can initialize the RNG. Additionally, other sources of
+ randomness are always used, regardless of this setting. Enabling
+ this implies trusting that the bootloader can supply high quality and
+ non-backdoored seeds.
+
+ Say Y here unless you have reason to mistrust your bootloader or
+ believe its RNG facilities may be faulty. This may also be configured
+ at boot time with "random.trust_bootloader=on/off".
+
+endmenu
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 5952210..a7d9e46 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -427,7 +427,7 @@
config HW_RANDOM_CAVIUM
tristate "Cavium ThunderX Random Number Generator support"
- depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT))
+ depends on HW_RANDOM && PCI && ARCH_THUNDER
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index ecb71c4..8cf0ef5 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -114,6 +114,7 @@
err_register:
clk_disable_unprepare(trng->clk);
+ atmel_trng_disable(trng);
return ret;
}
diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c
index 3de4a6a..6f66919 100644
--- a/drivers/char/hw_random/cavium-rng-vf.c
+++ b/drivers/char/hw_random/cavium-rng-vf.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Hardware Random Number Generator support for Cavium, Inc.
- * Thunder processor family.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
+ * Hardware Random Number Generator support.
+ * Cavium Thunder, Marvell OcteonTx/Tx2 processor families.
*
* Copyright (C) 2016 Cavium, Inc.
*/
@@ -15,16 +12,146 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
+#include <asm/arch_timer.h>
+
+/* PCI device IDs */
+#define PCI_DEVID_CAVIUM_RNG_PF 0xA018
+#define PCI_DEVID_CAVIUM_RNG_VF 0xA033
+
+#define HEALTH_STATUS_REG 0x38
+
+/* RST device info */
+#define PCI_DEVICE_ID_RST_OTX2 0xA085
+#define RST_BOOT_REG 0x1600ULL
+#define CLOCK_BASE_RATE 50000000ULL
+#define MSEC_TO_NSEC(x) (x * 1000000)
+
struct cavium_rng {
struct hwrng ops;
void __iomem *result;
+ void __iomem *pf_regbase;
+ struct pci_dev *pdev;
+ u64 clock_rate;
+ u64 prev_error;
+ u64 prev_time;
};
+static inline bool is_octeontx(struct pci_dev *pdev)
+{
+ if (midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX_83XX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(3, 0)) ||
+ midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX_81XX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(3, 0)) ||
+ midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(3, 0)))
+ return true;
+
+ return false;
+}
+
+static u64 rng_get_coprocessor_clkrate(void)
+{
+ u64 ret = CLOCK_BASE_RATE * 16; /* Assume 800Mhz as default */
+ struct pci_dev *pdev;
+ void __iomem *base;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_RST_OTX2, NULL);
+ if (!pdev)
+ goto error;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto error_put_pdev;
+
+ /* RST: PNR_MUL * 50Mhz gives clockrate */
+ ret = CLOCK_BASE_RATE * ((readq(base + RST_BOOT_REG) >> 33) & 0x3F);
+
+ iounmap(base);
+
+error_put_pdev:
+ pci_dev_put(pdev);
+
+error:
+ return ret;
+}
+
+static int check_rng_health(struct cavium_rng *rng)
+{
+ u64 cur_err, cur_time;
+ u64 status, cycles;
+ u64 time_elapsed;
+
+
+ /* Skip checking health for OcteonTx */
+ if (!rng->pf_regbase)
+ return 0;
+
+ status = readq(rng->pf_regbase + HEALTH_STATUS_REG);
+ if (status & BIT_ULL(0)) {
+ dev_err(&rng->pdev->dev, "HWRNG: Startup health test failed\n");
+ return -EIO;
+ }
+
+ cycles = status >> 1;
+ if (!cycles)
+ return 0;
+
+ cur_time = arch_timer_read_counter();
+
+ /* RNM_HEALTH_STATUS[CYCLES_SINCE_HEALTH_FAILURE]
+ * Number of coprocessor cycles times 2 since the last failure.
+ * This field doesn't get cleared/updated until another failure.
+ */
+ cycles = cycles / 2;
+ cur_err = (cycles * 1000000000) / rng->clock_rate; /* In nanosec */
+
+ /* Ignore errors that happenned a long time ago, these
+ * are most likely false positive errors.
+ */
+ if (cur_err > MSEC_TO_NSEC(10)) {
+ rng->prev_error = 0;
+ rng->prev_time = 0;
+ return 0;
+ }
+
+ if (rng->prev_error) {
+ /* Calculate time elapsed since last error
+ * '1' tick of CNTVCT is 10ns, since it runs at 100Mhz.
+ */
+ time_elapsed = (cur_time - rng->prev_time) * 10;
+ time_elapsed += rng->prev_error;
+
+ /* Check if current error is a new one or the old one itself.
+ * If error is a new one then consider there is a persistent
+ * issue with entropy, declare hardware failure.
+ */
+ if (cur_err < time_elapsed) {
+ dev_err(&rng->pdev->dev, "HWRNG failure detected\n");
+ rng->prev_error = cur_err;
+ rng->prev_time = cur_time;
+ return -EIO;
+ }
+ }
+
+ rng->prev_error = cur_err;
+ rng->prev_time = cur_time;
+ return 0;
+}
+
/* Read data from the RNG unit */
static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait)
{
struct cavium_rng *p = container_of(rng, struct cavium_rng, ops);
unsigned int size = max;
+ int err = 0;
+
+ err = check_rng_health(p);
+ if (err)
+ return err;
while (size >= 8) {
*((u64 *)dat) = readq(p->result);
@@ -39,6 +166,39 @@
return max;
}
+static int cavium_map_pf_regs(struct cavium_rng *rng)
+{
+ struct pci_dev *pdev;
+
+ /* Health status is not supported on 83xx, skip mapping PF CSRs */
+ if (is_octeontx(rng->pdev)) {
+ rng->pf_regbase = NULL;
+ return 0;
+ }
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_CAVIUM_RNG_PF, NULL);
+ if (!pdev) {
+ dev_err(&pdev->dev, "Cannot find RNG PF device\n");
+ return -EIO;
+ }
+
+ rng->pf_regbase = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!rng->pf_regbase) {
+ dev_err(&pdev->dev, "Failed to map PF CSR region\n");
+ pci_dev_put(pdev);
+ return -ENOMEM;
+ }
+
+ pci_dev_put(pdev);
+
+ /* Get co-processor clock rate */
+ rng->clock_rate = rng_get_coprocessor_clkrate();
+
+ return 0;
+}
+
/* Map Cavium RNG to an HWRNG object */
static int cavium_rng_probe_vf(struct pci_dev *pdev,
const struct pci_device_id *id)
@@ -50,6 +210,8 @@
if (!rng)
return -ENOMEM;
+ rng->pdev = pdev;
+
/* Map the RNG result */
rng->result = pcim_iomap(pdev, 0, 0);
if (!rng->result) {
@@ -67,6 +229,11 @@
pci_set_drvdata(pdev, rng);
+ /* Health status is available only at PF, hence map PF registers. */
+ ret = cavium_map_pf_regs(rng);
+ if (ret)
+ return ret;
+
ret = devm_hwrng_register(&pdev->dev, &rng->ops);
if (ret) {
dev_err(&pdev->dev, "Error registering device as HWRNG.\n");
@@ -76,10 +243,18 @@
return 0;
}
+/* Remove the VF */
+static void cavium_rng_remove_vf(struct pci_dev *pdev)
+{
+ struct cavium_rng *rng;
+
+ rng = pci_get_drvdata(pdev);
+ iounmap(rng->pf_regbase);
+}
static const struct pci_device_id cavium_rng_vf_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa033), 0, 0, 0},
- {0,},
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CAVIUM_RNG_VF) },
+ { 0, }
};
MODULE_DEVICE_TABLE(pci, cavium_rng_vf_id_table);
@@ -87,8 +262,9 @@
.name = "cavium_rng_vf",
.id_table = cavium_rng_vf_id_table,
.probe = cavium_rng_probe_vf,
+ .remove = cavium_rng_remove_vf,
};
module_pci_driver(cavium_rng_vf_driver);
MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/cavium-rng.c b/drivers/char/hw_random/cavium-rng.c
index 63d6e68..b965792 100644
--- a/drivers/char/hw_random/cavium-rng.c
+++ b/drivers/char/hw_random/cavium-rng.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Hardware Random Number Generator support for Cavium Inc.
- * Thunder processor family.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
+ * Hardware Random Number Generator support.
+ * Cavium Thunder, Marvell OcteonTx/Tx2 processor families.
*
* Copyright (C) 2016 Cavium, Inc.
*/
@@ -91,4 +88,4 @@
module_pci_driver(cavium_rng_pf_driver);
MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 8c1c47d..5749998 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -15,6 +15,7 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/hw_random.h>
+#include <linux/random.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/sched/signal.h>
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index 61c844b..9b182e5 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -272,13 +272,6 @@
goto err;
}
- ret = devm_request_irq(&pdev->dev,
- irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
- if (ret) {
- dev_err(rngc->dev, "Can't get interrupt working.\n");
- goto err;
- }
-
init_completion(&rngc->rng_op_done);
rngc->rng.name = pdev->name;
@@ -292,6 +285,13 @@
imx_rngc_irq_mask_clear(rngc);
+ ret = devm_request_irq(&pdev->dev,
+ irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
+ if (ret) {
+ dev_err(rngc->dev, "Can't get interrupt working.\n");
+ return ret;
+ }
+
if (self_test) {
ret = imx_rngc_self_test(rngc);
if (ret) {
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index b0ded41..e8f9621 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -65,15 +65,14 @@
out_release:
amba_release_regions(dev);
out_clk:
- clk_disable(rng_clk);
+ clk_disable_unprepare(rng_clk);
return ret;
}
-static int nmk_rng_remove(struct amba_device *dev)
+static void nmk_rng_remove(struct amba_device *dev)
{
amba_release_regions(dev);
- clk_disable(rng_clk);
- return 0;
+ clk_disable_unprepare(rng_clk);
}
static const struct amba_id nmk_rng_ids[] = {
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
index e0d77fa..f06e4f9 100644
--- a/drivers/char/hw_random/omap3-rom-rng.c
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -92,7 +92,7 @@
r = ddata->rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT);
if (r != 0) {
- clk_disable(ddata->clk);
+ clk_disable_unprepare(ddata->clk);
dev_err(dev, "HW init failed: %d\n", r);
return -EIO;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 8f14727..05e7339 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -11,8 +11,8 @@
* Copyright 2002 MontaVista Software Inc.
*/
-#define pr_fmt(fmt) "%s" fmt, "IPMI message handler: "
-#define dev_fmt pr_fmt
+#define pr_fmt(fmt) "IPMI message handler: " fmt
+#define dev_fmt(fmt) pr_fmt(fmt)
#include <linux/module.h>
#include <linux/errno.h>
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 3de6797..4771397 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -840,6 +840,14 @@
break;
case SSIF_GETTING_EVENTS:
+ if (!msg) {
+ /* Should never happen, but just in case. */
+ dev_warn(&ssif_info->client->dev,
+ "No message set while getting events\n");
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ break;
+ }
+
if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
/* Error getting event, probably done. */
msg->done(msg);
@@ -864,6 +872,14 @@
break;
case SSIF_GETTING_MESSAGES:
+ if (!msg) {
+ /* Should never happen, but just in case. */
+ dev_warn(&ssif_info->client->dev,
+ "No message set while getting messages\n");
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ break;
+ }
+
if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
/* Error getting event, probably done. */
msg->done(msg);
@@ -887,6 +903,13 @@
deliver_recv_msg(ssif_info, msg);
}
break;
+
+ default:
+ /* Should never happen, but just in case. */
+ dev_warn(&ssif_info->client->dev,
+ "Invalid state in message done handling: %d\n",
+ ssif_info->ssif_state);
+ ipmi_ssif_unlock_cond(ssif_info, flags);
}
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 94c2b55..7d483c3 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -981,8 +981,8 @@
#endif
[5] = { "zero", 0666, &zero_fops, 0 },
[7] = { "full", 0666, &full_fops, 0 },
- [8] = { "random", 0666, &random_fops, 0 },
- [9] = { "urandom", 0666, &urandom_fops, 0 },
+ [8] = { "random", 0666, &random_fops, FMODE_NOWAIT },
+ [9] = { "urandom", 0666, &urandom_fops, FMODE_NOWAIT },
#ifdef CONFIG_PRINTK
[11] = { "kmsg", 0644, &kmsg_fops, 0 },
#endif
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 5f541c9..b54481e 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1,310 +1,26 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
- * random.c -- A strong random number generator
- *
- * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
- * Rights Reserved.
- *
+ * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
* Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
+ * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
*
- * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
- * rights reserved.
+ * This driver produces cryptographically secure pseudorandom data. It is divided
+ * into roughly six sections, each with a section header:
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, and the entire permission notice in its entirety,
- * including the disclaimer of warranties.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote
- * products derived from this software without specific prior
- * written permission.
+ * - Initialization and readiness waiting.
+ * - Fast key erasure RNG, the "crng".
+ * - Entropy accumulation and extraction routines.
+ * - Entropy collection routines.
+ * - Userspace reader/writer interfaces.
+ * - Sysctl interface.
*
- * ALTERNATIVELY, this product may be distributed under the terms of
- * the GNU General Public License, in which case the provisions of the GPL are
- * required INSTEAD OF the above restrictions. (This clause is
- * necessary due to a potential bad interaction between the GPL and
- * the restrictions contained in a BSD-style copyright.)
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
- * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
- * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- */
-
-/*
- * (now, with legal B.S. out of the way.....)
- *
- * This routine gathers environmental noise from device drivers, etc.,
- * and returns good random numbers, suitable for cryptographic use.
- * Besides the obvious cryptographic uses, these numbers are also good
- * for seeding TCP sequence numbers, and other places where it is
- * desirable to have numbers which are not only random, but hard to
- * predict by an attacker.
- *
- * Theory of operation
- * ===================
- *
- * Computers are very predictable devices. Hence it is extremely hard
- * to produce truly random numbers on a computer --- as opposed to
- * pseudo-random numbers, which can easily generated by using a
- * algorithm. Unfortunately, it is very easy for attackers to guess
- * the sequence of pseudo-random number generators, and for some
- * applications this is not acceptable. So instead, we must try to
- * gather "environmental noise" from the computer's environment, which
- * must be hard for outside attackers to observe, and use that to
- * generate random numbers. In a Unix environment, this is best done
- * from inside the kernel.
- *
- * Sources of randomness from the environment include inter-keyboard
- * timings, inter-interrupt timings from some interrupts, and other
- * events which are both (a) non-deterministic and (b) hard for an
- * outside observer to measure. Randomness from these sources are
- * added to an "entropy pool", which is mixed using a CRC-like function.
- * This is not cryptographically strong, but it is adequate assuming
- * the randomness is not chosen maliciously, and it is fast enough that
- * the overhead of doing it on every interrupt is very reasonable.
- * As random bytes are mixed into the entropy pool, the routines keep
- * an *estimate* of how many bits of randomness have been stored into
- * the random number generator's internal state.
- *
- * When random bytes are desired, they are obtained by taking the SHA
- * hash of the contents of the "entropy pool". The SHA hash avoids
- * exposing the internal state of the entropy pool. It is believed to
- * be computationally infeasible to derive any useful information
- * about the input of SHA from its output. Even if it is possible to
- * analyze SHA in some clever way, as long as the amount of data
- * returned from the generator is less than the inherent entropy in
- * the pool, the output data is totally unpredictable. For this
- * reason, the routine decreases its internal estimate of how many
- * bits of "true randomness" are contained in the entropy pool as it
- * outputs random numbers.
- *
- * If this estimate goes to zero, the routine can still generate
- * random numbers; however, an attacker may (at least in theory) be
- * able to infer the future output of the generator from prior
- * outputs. This requires successful cryptanalysis of SHA, which is
- * not believed to be feasible, but there is a remote possibility.
- * Nonetheless, these numbers should be useful for the vast majority
- * of purposes.
- *
- * Exported interfaces ---- output
- * ===============================
- *
- * There are four exported interfaces; two for use within the kernel,
- * and two or use from userspace.
- *
- * Exported interfaces ---- userspace output
- * -----------------------------------------
- *
- * The userspace interfaces are two character devices /dev/random and
- * /dev/urandom. /dev/random is suitable for use when very high
- * quality randomness is desired (for example, for key generation or
- * one-time pads), as it will only return a maximum of the number of
- * bits of randomness (as estimated by the random number generator)
- * contained in the entropy pool.
- *
- * The /dev/urandom device does not have this limit, and will return
- * as many bytes as are requested. As more and more random bytes are
- * requested without giving time for the entropy pool to recharge,
- * this will result in random numbers that are merely cryptographically
- * strong. For many applications, however, this is acceptable.
- *
- * Exported interfaces ---- kernel output
- * --------------------------------------
- *
- * The primary kernel interface is
- *
- * void get_random_bytes(void *buf, int nbytes);
- *
- * This interface will return the requested number of random bytes,
- * and place it in the requested buffer. This is equivalent to a
- * read from /dev/urandom.
- *
- * For less critical applications, there are the functions:
- *
- * u32 get_random_u32()
- * u64 get_random_u64()
- * unsigned int get_random_int()
- * unsigned long get_random_long()
- *
- * These are produced by a cryptographic RNG seeded from get_random_bytes,
- * and so do not deplete the entropy pool as much. These are recommended
- * for most in-kernel operations *if the result is going to be stored in
- * the kernel*.
- *
- * Specifically, the get_random_int() family do not attempt to do
- * "anti-backtracking". If you capture the state of the kernel (e.g.
- * by snapshotting the VM), you can figure out previous get_random_int()
- * return values. But if the value is stored in the kernel anyway,
- * this is not a problem.
- *
- * It *is* safe to expose get_random_int() output to attackers (e.g. as
- * network cookies); given outputs 1..n, it's not feasible to predict
- * outputs 0 or n+1. The only concern is an attacker who breaks into
- * the kernel later; the get_random_int() engine is not reseeded as
- * often as the get_random_bytes() one.
- *
- * get_random_bytes() is needed for keys that need to stay secret after
- * they are erased from the kernel. For example, any key that will
- * be wrapped and stored encrypted. And session encryption keys: we'd
- * like to know that after the session is closed and the keys erased,
- * the plaintext is unrecoverable to someone who recorded the ciphertext.
- *
- * But for network ports/cookies, stack canaries, PRNG seeds, address
- * space layout randomization, session *authentication* keys, or other
- * applications where the sensitive data is stored in the kernel in
- * plaintext for as long as it's sensitive, the get_random_int() family
- * is just fine.
- *
- * Consider ASLR. We want to keep the address space secret from an
- * outside attacker while the process is running, but once the address
- * space is torn down, it's of no use to an attacker any more. And it's
- * stored in kernel data structures as long as it's alive, so worrying
- * about an attacker's ability to extrapolate it from the get_random_int()
- * CRNG is silly.
- *
- * Even some cryptographic keys are safe to generate with get_random_int().
- * In particular, keys for SipHash are generally fine. Here, knowledge
- * of the key authorizes you to do something to a kernel object (inject
- * packets to a network connection, or flood a hash table), and the
- * key is stored with the object being protected. Once it goes away,
- * we no longer care if anyone knows the key.
- *
- * prandom_u32()
- * -------------
- *
- * For even weaker applications, see the pseudorandom generator
- * prandom_u32(), prandom_max(), and prandom_bytes(). If the random
- * numbers aren't security-critical at all, these are *far* cheaper.
- * Useful for self-tests, random error simulation, randomized backoffs,
- * and any other application where you trust that nobody is trying to
- * maliciously mess with you by guessing the "random" numbers.
- *
- * Exported interfaces ---- input
- * ==============================
- *
- * The current exported interfaces for gathering environmental noise
- * from the devices are:
- *
- * void add_device_randomness(const void *buf, unsigned int size);
- * void add_input_randomness(unsigned int type, unsigned int code,
- * unsigned int value);
- * void add_interrupt_randomness(int irq, int irq_flags);
- * void add_disk_randomness(struct gendisk *disk);
- *
- * add_device_randomness() is for adding data to the random pool that
- * is likely to differ between two devices (or possibly even per boot).
- * This would be things like MAC addresses or serial numbers, or the
- * read-out of the RTC. This does *not* add any actual entropy to the
- * pool, but it initializes the pool to different values for devices
- * that might otherwise be identical and have very little entropy
- * available to them (particularly common in the embedded world).
- *
- * add_input_randomness() uses the input layer interrupt timing, as well as
- * the event type information from the hardware.
- *
- * add_interrupt_randomness() uses the interrupt timing as random
- * inputs to the entropy pool. Using the cycle counters and the irq source
- * as inputs, it feeds the randomness roughly once a second.
- *
- * add_disk_randomness() uses what amounts to the seek time of block
- * layer request events, on a per-disk_devt basis, as input to the
- * entropy pool. Note that high-speed solid state drives with very low
- * seek times do not make for good sources of entropy, as their seek
- * times are usually fairly consistent.
- *
- * All of these routines try to estimate how many bits of randomness a
- * particular randomness source. They do this by keeping track of the
- * first and second order deltas of the event timings.
- *
- * Ensuring unpredictability at system startup
- * ============================================
- *
- * When any operating system starts up, it will go through a sequence
- * of actions that are fairly predictable by an adversary, especially
- * if the start-up does not involve interaction with a human operator.
- * This reduces the actual number of bits of unpredictability in the
- * entropy pool below the value in entropy_count. In order to
- * counteract this effect, it helps to carry information in the
- * entropy pool across shut-downs and start-ups. To do this, put the
- * following lines an appropriate script which is run during the boot
- * sequence:
- *
- * echo "Initializing random number generator..."
- * random_seed=/var/run/random-seed
- * # Carry a random seed from start-up to start-up
- * # Load and then save the whole entropy pool
- * if [ -f $random_seed ]; then
- * cat $random_seed >/dev/urandom
- * else
- * touch $random_seed
- * fi
- * chmod 600 $random_seed
- * dd if=/dev/urandom of=$random_seed count=1 bs=512
- *
- * and the following lines in an appropriate script which is run as
- * the system is shutdown:
- *
- * # Carry a random seed from shut-down to start-up
- * # Save the whole entropy pool
- * echo "Saving random seed..."
- * random_seed=/var/run/random-seed
- * touch $random_seed
- * chmod 600 $random_seed
- * dd if=/dev/urandom of=$random_seed count=1 bs=512
- *
- * For example, on most modern systems using the System V init
- * scripts, such code fragments would be found in
- * /etc/rc.d/init.d/random. On older Linux systems, the correct script
- * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0.
- *
- * Effectively, these commands cause the contents of the entropy pool
- * to be saved at shut-down time and reloaded into the entropy pool at
- * start-up. (The 'dd' in the addition to the bootup script is to
- * make sure that /etc/random-seed is different for every start-up,
- * even if the system crashes without executing rc.0.) Even with
- * complete knowledge of the start-up activities, predicting the state
- * of the entropy pool requires knowledge of the previous history of
- * the system.
- *
- * Configuring the /dev/random driver under Linux
- * ==============================================
- *
- * The /dev/random driver under Linux uses minor numbers 8 and 9 of
- * the /dev/mem major number (#1). So if your system does not have
- * /dev/random and /dev/urandom created already, they can be created
- * by using the commands:
- *
- * mknod /dev/random c 1 8
- * mknod /dev/urandom c 1 9
- *
- * Acknowledgements:
- * =================
- *
- * Ideas for constructing this random number generator were derived
- * from Pretty Good Privacy's random number generator, and from private
- * discussions with Phil Karn. Colin Plumb provided a faster random
- * number generator, which speed up the mixing function of the entropy
- * pool, taken from PGPfone. Dale Worley has also contributed many
- * useful ideas and suggestions to improve this driver.
- *
- * Any flaws in the design are solely my responsibility, and should
- * not be attributed to the Phil, Colin, or any of authors of PGP.
- *
- * Further background information on this topic may be obtained from
- * RFC 1750, "Randomness Recommendations for Security", by Donald
- * Eastlake, Steve Crocker, and Jeff Schiller.
+ * The high level overview is that there is one input pool, into which
+ * various pieces of data are hashed. Prior to initialization, some of that
+ * data is then "credited" as having a certain number of bits of entropy.
+ * When enough bits of entropy are available, the hash is finalized and
+ * handed as a key to a stream cipher that expands it indefinitely for
+ * various consumers. This key is periodically refreshed as the various
+ * entropy collectors, described below, add data to the input pool.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -327,7 +43,6 @@
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/percpu.h>
-#include <linux/fips.h>
#include <linux/ptrace.h>
#include <linux/workqueue.h>
#include <linux/irq.h>
@@ -335,804 +50,799 @@
#include <linux/syscalls.h>
#include <linux/completion.h>
#include <linux/uuid.h>
-#include <crypto/chacha.h>
-#include <crypto/sha.h>
-
-#include <asm/processor.h>
#include <linux/uaccess.h>
+#include <linux/siphash.h>
+#include <linux/uio.h>
+#include <crypto/chacha.h>
+#include <crypto/blake2s.h>
+#include <asm/processor.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/io.h>
-#define CREATE_TRACE_POINTS
-#include <trace/events/random.h>
-
-/* #define ADD_INTERRUPT_BENCH */
-
-/*
- * Configuration information
- */
-#define INPUT_POOL_SHIFT 12
-#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
-#define OUTPUT_POOL_SHIFT 10
-#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
-#define EXTRACT_SIZE 10
-
-
-#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
-
-/*
- * To allow fractional bits to be tracked, the entropy_count field is
- * denominated in units of 1/8th bits.
- *
- * 2*(ENTROPY_SHIFT + poolbitshift) must <= 31, or the multiply in
- * credit_entropy_bits() needs to be 64 bits wide.
- */
-#define ENTROPY_SHIFT 3
-#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
-
-/*
- * If the entropy count falls under this number of bits, then we
- * should wake up processes which are selecting or polling on write
- * access to /dev/random.
- */
-static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
-
-/*
- * Originally, we used a primitive polynomial of degree .poolwords
- * over GF(2). The taps for various sizes are defined below. They
- * were chosen to be evenly spaced except for the last tap, which is 1
- * to get the twisting happening as fast as possible.
- *
- * For the purposes of better mixing, we use the CRC-32 polynomial as
- * well to make a (modified) twisted Generalized Feedback Shift
- * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR
- * generators. ACM Transactions on Modeling and Computer Simulation
- * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted
- * GFSR generators II. ACM Transactions on Modeling and Computer
- * Simulation 4:254-266)
- *
- * Thanks to Colin Plumb for suggesting this.
- *
- * The mixing operation is much less sensitive than the output hash,
- * where we use SHA-1. All that we want of mixing operation is that
- * it be a good non-cryptographic hash; i.e. it not produce collisions
- * when fed "random" data of the sort we expect to see. As long as
- * the pool state differs for different inputs, we have preserved the
- * input entropy and done a good job. The fact that an intelligent
- * attacker can construct inputs that will produce controlled
- * alterations to the pool's state is not important because we don't
- * consider such inputs to contribute any randomness. The only
- * property we need with respect to them is that the attacker can't
- * increase his/her knowledge of the pool's state. Since all
- * additions are reversible (knowing the final state and the input,
- * you can reconstruct the initial state), if an attacker has any
- * uncertainty about the initial state, he/she can only shuffle that
- * uncertainty about, but never cause any collisions (which would
- * decrease the uncertainty).
- *
- * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and
- * Videau in their paper, "The Linux Pseudorandom Number Generator
- * Revisited" (see: http://eprint.iacr.org/2012/251.pdf). In their
- * paper, they point out that we are not using a true Twisted GFSR,
- * since Matsumoto & Kurita used a trinomial feedback polynomial (that
- * is, with only three taps, instead of the six that we are using).
- * As a result, the resulting polynomial is neither primitive nor
- * irreducible, and hence does not have a maximal period over
- * GF(2**32). They suggest a slight change to the generator
- * polynomial which improves the resulting TGFSR polynomial to be
- * irreducible, which we have made here.
- */
-static const struct poolinfo {
- int poolbitshift, poolwords, poolbytes, poolfracbits;
-#define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5)
- int tap1, tap2, tap3, tap4, tap5;
-} poolinfo_table[] = {
- /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
- /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
- { S(128), 104, 76, 51, 25, 1 },
-};
-
-/*
- * Static global variables
- */
-static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
-static struct fasync_struct *fasync;
-
-static DEFINE_SPINLOCK(random_ready_list_lock);
-static LIST_HEAD(random_ready_list);
-
-struct crng_state {
- __u32 state[16];
- unsigned long init_time;
- spinlock_t lock;
-};
-
-static struct crng_state primary_crng = {
- .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
-};
-
-/*
- * crng_init = 0 --> Uninitialized
- * 1 --> Initialized
- * 2 --> Initialized from input_pool
- *
- * crng_init is protected by primary_crng->lock, and only increases
- * its value (from 0->1->2).
- */
-static int crng_init = 0;
-static bool crng_need_final_init = false;
-#define crng_ready() (likely(crng_init > 1))
-static int crng_init_cnt = 0;
-static unsigned long crng_global_init_time = 0;
-#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE)
-static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]);
-static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA_BLOCK_SIZE], int used);
-static void process_random_ready_list(void);
-static void _get_random_bytes(void *buf, int nbytes);
-
-static struct ratelimit_state unseeded_warning =
- RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
-static struct ratelimit_state urandom_warning =
- RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
-
-static int ratelimit_disable __read_mostly;
-
-module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
-MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
-
-/**********************************************************************
- *
- * OS independent entropy store. Here are the functions which handle
- * storing entropy in an entropy pool.
- *
- **********************************************************************/
-
-struct entropy_store;
-struct entropy_store {
- /* read-only data: */
- const struct poolinfo *poolinfo;
- __u32 *pool;
- const char *name;
-
- /* read-write data: */
- spinlock_t lock;
- unsigned short add_ptr;
- unsigned short input_rotate;
- int entropy_count;
- unsigned int initialized:1;
- unsigned int last_data_init:1;
- __u8 last_data[EXTRACT_SIZE];
-};
-
-static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int min, int rsvd);
-static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int fips);
-
-static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
-static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
-
-static struct entropy_store input_pool = {
- .poolinfo = &poolinfo_table[0],
- .name = "input",
- .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
- .pool = input_pool_data
-};
-
-static __u32 const twist_table[8] = {
- 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
- 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
-
-/*
- * This function adds bytes into the entropy "pool". It does not
- * update the entropy estimate. The caller should call
- * credit_entropy_bits if this is appropriate.
- *
- * The pool is stirred with a primitive polynomial of the appropriate
- * degree, and then twisted. We twist by three bits at a time because
- * it's cheap to do so and helps slightly in the expected case where
- * the entropy is concentrated in the low-order bits.
- */
-static void _mix_pool_bytes(struct entropy_store *r, const void *in,
- int nbytes)
-{
- unsigned long i, tap1, tap2, tap3, tap4, tap5;
- int input_rotate;
- int wordmask = r->poolinfo->poolwords - 1;
- const char *bytes = in;
- __u32 w;
-
- tap1 = r->poolinfo->tap1;
- tap2 = r->poolinfo->tap2;
- tap3 = r->poolinfo->tap3;
- tap4 = r->poolinfo->tap4;
- tap5 = r->poolinfo->tap5;
-
- input_rotate = r->input_rotate;
- i = r->add_ptr;
-
- /* mix one byte at a time to simplify size handling and churn faster */
- while (nbytes--) {
- w = rol32(*bytes++, input_rotate);
- i = (i - 1) & wordmask;
-
- /* XOR in the various taps */
- w ^= r->pool[i];
- w ^= r->pool[(i + tap1) & wordmask];
- w ^= r->pool[(i + tap2) & wordmask];
- w ^= r->pool[(i + tap3) & wordmask];
- w ^= r->pool[(i + tap4) & wordmask];
- w ^= r->pool[(i + tap5) & wordmask];
-
- /* Mix the result back in with a twist */
- r->pool[i] = (w >> 3) ^ twist_table[w & 7];
-
- /*
- * Normally, we add 7 bits of rotation to the pool.
- * At the beginning of the pool, add an extra 7 bits
- * rotation, so that successive passes spread the
- * input bits across the pool evenly.
- */
- input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
- }
-
- r->input_rotate = input_rotate;
- r->add_ptr = i;
-}
-
-static void __mix_pool_bytes(struct entropy_store *r, const void *in,
- int nbytes)
-{
- trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
- _mix_pool_bytes(r, in, nbytes);
-}
-
-static void mix_pool_bytes(struct entropy_store *r, const void *in,
- int nbytes)
-{
- unsigned long flags;
-
- trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
- spin_lock_irqsave(&r->lock, flags);
- _mix_pool_bytes(r, in, nbytes);
- spin_unlock_irqrestore(&r->lock, flags);
-}
-
-struct fast_pool {
- __u32 pool[4];
- unsigned long last;
- unsigned short reg_idx;
- unsigned char count;
-};
-
-/*
- * This is a fast mixing routine used by the interrupt randomness
- * collector. It's hardcoded for an 128 bit pool and assumes that any
- * locks that might be needed are taken by the caller.
- */
-static void fast_mix(struct fast_pool *f)
-{
- __u32 a = f->pool[0], b = f->pool[1];
- __u32 c = f->pool[2], d = f->pool[3];
-
- a += b; c += d;
- b = rol32(b, 6); d = rol32(d, 27);
- d ^= a; b ^= c;
-
- a += b; c += d;
- b = rol32(b, 16); d = rol32(d, 14);
- d ^= a; b ^= c;
-
- a += b; c += d;
- b = rol32(b, 6); d = rol32(d, 27);
- d ^= a; b ^= c;
-
- a += b; c += d;
- b = rol32(b, 16); d = rol32(d, 14);
- d ^= a; b ^= c;
-
- f->pool[0] = a; f->pool[1] = b;
- f->pool[2] = c; f->pool[3] = d;
- f->count++;
-}
-
-static void process_random_ready_list(void)
-{
- unsigned long flags;
- struct random_ready_callback *rdy, *tmp;
-
- spin_lock_irqsave(&random_ready_list_lock, flags);
- list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
- struct module *owner = rdy->owner;
-
- list_del_init(&rdy->list);
- rdy->func(rdy);
- module_put(owner);
- }
- spin_unlock_irqrestore(&random_ready_list_lock, flags);
-}
-
-/*
- * Credit (or debit) the entropy store with n bits of entropy.
- * Use credit_entropy_bits_safe() if the value comes from userspace
- * or otherwise should be checked for extreme values.
- */
-static void credit_entropy_bits(struct entropy_store *r, int nbits)
-{
- int entropy_count, orig, has_initialized = 0;
- const int pool_size = r->poolinfo->poolfracbits;
- int nfrac = nbits << ENTROPY_SHIFT;
-
- if (!nbits)
- return;
-
-retry:
- entropy_count = orig = READ_ONCE(r->entropy_count);
- if (nfrac < 0) {
- /* Debit */
- entropy_count += nfrac;
- } else {
- /*
- * Credit: we have to account for the possibility of
- * overwriting already present entropy. Even in the
- * ideal case of pure Shannon entropy, new contributions
- * approach the full value asymptotically:
- *
- * entropy <- entropy + (pool_size - entropy) *
- * (1 - exp(-add_entropy/pool_size))
- *
- * For add_entropy <= pool_size/2 then
- * (1 - exp(-add_entropy/pool_size)) >=
- * (add_entropy/pool_size)*0.7869...
- * so we can approximate the exponential with
- * 3/4*add_entropy/pool_size and still be on the
- * safe side by adding at most pool_size/2 at a time.
- *
- * The use of pool_size-2 in the while statement is to
- * prevent rounding artifacts from making the loop
- * arbitrarily long; this limits the loop to log2(pool_size)*2
- * turns no matter how large nbits is.
- */
- int pnfrac = nfrac;
- const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
- /* The +2 corresponds to the /4 in the denominator */
-
- do {
- unsigned int anfrac = min(pnfrac, pool_size/2);
- unsigned int add =
- ((pool_size - entropy_count)*anfrac*3) >> s;
-
- entropy_count += add;
- pnfrac -= anfrac;
- } while (unlikely(entropy_count < pool_size-2 && pnfrac));
- }
-
- if (WARN_ON(entropy_count < 0)) {
- pr_warn("negative entropy/overflow: pool %s count %d\n",
- r->name, entropy_count);
- entropy_count = 0;
- } else if (entropy_count > pool_size)
- entropy_count = pool_size;
- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
- goto retry;
-
- if (has_initialized) {
- r->initialized = 1;
- kill_fasync(&fasync, SIGIO, POLL_IN);
- }
-
- trace_credit_entropy_bits(r->name, nbits,
- entropy_count >> ENTROPY_SHIFT, _RET_IP_);
-
- if (r == &input_pool) {
- int entropy_bits = entropy_count >> ENTROPY_SHIFT;
-
- if (crng_init < 2) {
- if (entropy_bits < 128)
- return;
- crng_reseed(&primary_crng, r);
- entropy_bits = ENTROPY_BITS(r);
- }
- }
-}
-
-static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
-{
- const int nbits_max = r->poolinfo->poolwords * 32;
-
- if (nbits < 0)
- return -EINVAL;
-
- /* Cap the value to avoid overflows */
- nbits = min(nbits, nbits_max);
-
- credit_entropy_bits(r, nbits);
- return 0;
-}
-
/*********************************************************************
*
- * CRNG using CHACHA20
+ * Initialization and readiness waiting.
+ *
+ * Much of the RNG infrastructure is devoted to various dependencies
+ * being able to wait until the RNG has collected enough entropy and
+ * is ready for safe consumption.
*
*********************************************************************/
-#define CRNG_RESEED_INTERVAL (300*HZ)
-
-static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
-
-#ifdef CONFIG_NUMA
/*
- * Hack to deal with crazy userspace progams when they are all trying
- * to access /dev/urandom in parallel. The programs are almost
- * certainly doing something terribly wrong, but we'll work around
- * their brain damage.
+ * crng_init is protected by base_crng->lock, and only increases
+ * its value (from empty->early->ready).
*/
-static struct crng_state **crng_node_pool __read_mostly;
+static enum {
+ CRNG_EMPTY = 0, /* Little to no entropy collected */
+ CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */
+ CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */
+} crng_init __read_mostly = CRNG_EMPTY;
+#define crng_ready() (likely(crng_init >= CRNG_READY))
+/* Various types of waiters for crng_init->CRNG_READY transition. */
+static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
+static struct fasync_struct *fasync;
+static DEFINE_SPINLOCK(random_ready_chain_lock);
+static RAW_NOTIFIER_HEAD(random_ready_chain);
+
+/* Control how we warn userspace. */
+static struct ratelimit_state urandom_warning =
+ RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE);
+static int ratelimit_disable __read_mostly =
+ IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
+module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
+MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
+
+/*
+ * Returns whether or not the input pool has been seeded and thus guaranteed
+ * to supply cryptographically secure random numbers. This applies to: the
+ * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
+ * ,u64,int,long} family of functions.
+ *
+ * Returns: true if the input pool has been seeded.
+ * false if the input pool has not been seeded.
+ */
+bool rng_is_initialized(void)
+{
+ return crng_ready();
+}
+EXPORT_SYMBOL(rng_is_initialized);
+
+/* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
+static void try_to_generate_entropy(void);
+
+/*
+ * Wait for the input pool to be seeded and thus guaranteed to supply
+ * cryptographically secure random numbers. This applies to: the /dev/urandom
+ * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
+ * family of functions. Using any of these functions without first calling
+ * this function forfeits the guarantee of security.
+ *
+ * Returns: 0 if the input pool has been seeded.
+ * -ERESTARTSYS if the function was interrupted by a signal.
+ */
+int wait_for_random_bytes(void)
+{
+ while (!crng_ready()) {
+ int ret;
+
+ try_to_generate_entropy();
+ ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
+ if (ret)
+ return ret > 0 ? 0 : ret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(wait_for_random_bytes);
+
+/*
+ * Add a callback function that will be invoked when the input
+ * pool is initialised.
+ *
+ * returns: 0 if callback is successfully added
+ * -EALREADY if pool is already initialised (callback not called)
+ */
+int __cold register_random_ready_notifier(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret = -EALREADY;
+
+ if (crng_ready())
+ return ret;
+
+ spin_lock_irqsave(&random_ready_chain_lock, flags);
+ if (!crng_ready())
+ ret = raw_notifier_chain_register(&random_ready_chain, nb);
+ spin_unlock_irqrestore(&random_ready_chain_lock, flags);
+ return ret;
+}
+
+/*
+ * Delete a previously registered readiness callback function.
+ */
+int __cold unregister_random_ready_notifier(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&random_ready_chain_lock, flags);
+ ret = raw_notifier_chain_unregister(&random_ready_chain, nb);
+ spin_unlock_irqrestore(&random_ready_chain_lock, flags);
+ return ret;
+}
+
+static void __cold process_random_ready_list(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&random_ready_chain_lock, flags);
+ raw_notifier_call_chain(&random_ready_chain, 0, NULL);
+ spin_unlock_irqrestore(&random_ready_chain_lock, flags);
+}
+
+#define warn_unseeded_randomness() \
+ if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
+ printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
+ __func__, (void *)_RET_IP_, crng_init)
+
+
+/*********************************************************************
+ *
+ * Fast key erasure RNG, the "crng".
+ *
+ * These functions expand entropy from the entropy extractor into
+ * long streams for external consumption using the "fast key erasure"
+ * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
+ *
+ * There are a few exported interfaces for use by other drivers:
+ *
+ * void get_random_bytes(void *buf, size_t len)
+ * u32 get_random_u32()
+ * u64 get_random_u64()
+ * unsigned int get_random_int()
+ * unsigned long get_random_long()
+ *
+ * These interfaces will return the requested number of random bytes
+ * into the given buffer or as a return value. This is equivalent to
+ * a read from /dev/urandom. The u32, u64, int, and long family of
+ * functions may be higher performance for one-off random integers,
+ * because they do a bit of buffering and do not invoke reseeding
+ * until the buffer is emptied.
+ *
+ *********************************************************************/
+
+enum {
+ CRNG_RESEED_START_INTERVAL = HZ,
+ CRNG_RESEED_INTERVAL = 60 * HZ
+};
+
+static struct {
+ u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
+ unsigned long birth;
+ unsigned long generation;
+ spinlock_t lock;
+} base_crng = {
+ .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
+};
+
+struct crng {
+ u8 key[CHACHA_KEY_SIZE];
+ unsigned long generation;
+ local_lock_t lock;
+};
+
+static DEFINE_PER_CPU(struct crng, crngs) = {
+ .generation = ULONG_MAX,
+ .lock = INIT_LOCAL_LOCK(crngs.lock),
+};
+
+/* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */
+static void extract_entropy(void *buf, size_t len);
+
+/* This extracts a new crng key from the input pool. */
+static void crng_reseed(void)
+{
+ unsigned long flags;
+ unsigned long next_gen;
+ u8 key[CHACHA_KEY_SIZE];
+
+ extract_entropy(key, sizeof(key));
+
+ /*
+ * We copy the new key into the base_crng, overwriting the old one,
+ * and update the generation counter. We avoid hitting ULONG_MAX,
+ * because the per-cpu crngs are initialized to ULONG_MAX, so this
+ * forces new CPUs that come online to always initialize.
+ */
+ spin_lock_irqsave(&base_crng.lock, flags);
+ memcpy(base_crng.key, key, sizeof(base_crng.key));
+ next_gen = base_crng.generation + 1;
+ if (next_gen == ULONG_MAX)
+ ++next_gen;
+ WRITE_ONCE(base_crng.generation, next_gen);
+ WRITE_ONCE(base_crng.birth, jiffies);
+ if (!crng_ready())
+ crng_init = CRNG_READY;
+ spin_unlock_irqrestore(&base_crng.lock, flags);
+ memzero_explicit(key, sizeof(key));
+}
+
+/*
+ * This generates a ChaCha block using the provided key, and then
+ * immediately overwites that key with half the block. It returns
+ * the resultant ChaCha state to the user, along with the second
+ * half of the block containing 32 bytes of random data that may
+ * be used; random_data_len may not be greater than 32.
+ *
+ * The returned ChaCha state contains within it a copy of the old
+ * key value, at index 4, so the state should always be zeroed out
+ * immediately after using in order to maintain forward secrecy.
+ * If the state cannot be erased in a timely manner, then it is
+ * safer to set the random_data parameter to &chacha_state[4] so
+ * that this function overwrites it before returning.
+ */
+static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
+ u32 chacha_state[CHACHA_STATE_WORDS],
+ u8 *random_data, size_t random_data_len)
+{
+ u8 first_block[CHACHA_BLOCK_SIZE];
+
+ BUG_ON(random_data_len > 32);
+
+ chacha_init_consts(chacha_state);
+ memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
+ memset(&chacha_state[12], 0, sizeof(u32) * 4);
+ chacha20_block(chacha_state, first_block);
+
+ memcpy(key, first_block, CHACHA_KEY_SIZE);
+ memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
+ memzero_explicit(first_block, sizeof(first_block));
+}
+
+/*
+ * Return whether the crng seed is considered to be sufficiently old
+ * that a reseeding is needed. This happens if the last reseeding
+ * was CRNG_RESEED_INTERVAL ago, or during early boot, at an interval
+ * proportional to the uptime.
+ */
+static bool crng_has_old_seed(void)
+{
+ static bool early_boot = true;
+ unsigned long interval = CRNG_RESEED_INTERVAL;
+
+ if (unlikely(READ_ONCE(early_boot))) {
+ time64_t uptime = ktime_get_seconds();
+ if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
+ WRITE_ONCE(early_boot, false);
+ else
+ interval = max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
+ (unsigned int)uptime / 2 * HZ);
+ }
+ return time_is_before_jiffies(READ_ONCE(base_crng.birth) + interval);
+}
+
+/*
+ * This function returns a ChaCha state that you may use for generating
+ * random data. It also returns up to 32 bytes on its own of random data
+ * that may be used; random_data_len may not be greater than 32.
+ */
+static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
+ u8 *random_data, size_t random_data_len)
+{
+ unsigned long flags;
+ struct crng *crng;
+
+ BUG_ON(random_data_len > 32);
+
+ /*
+ * For the fast path, we check whether we're ready, unlocked first, and
+ * then re-check once locked later. In the case where we're really not
+ * ready, we do fast key erasure with the base_crng directly, extracting
+ * when crng_init is CRNG_EMPTY.
+ */
+ if (!crng_ready()) {
+ bool ready;
+
+ spin_lock_irqsave(&base_crng.lock, flags);
+ ready = crng_ready();
+ if (!ready) {
+ if (crng_init == CRNG_EMPTY)
+ extract_entropy(base_crng.key, sizeof(base_crng.key));
+ crng_fast_key_erasure(base_crng.key, chacha_state,
+ random_data, random_data_len);
+ }
+ spin_unlock_irqrestore(&base_crng.lock, flags);
+ if (!ready)
+ return;
+ }
+
+ /*
+ * If the base_crng is old enough, we reseed, which in turn bumps the
+ * generation counter that we check below.
+ */
+ if (unlikely(crng_has_old_seed()))
+ crng_reseed();
+
+ local_lock_irqsave(&crngs.lock, flags);
+ crng = raw_cpu_ptr(&crngs);
+
+ /*
+ * If our per-cpu crng is older than the base_crng, then it means
+ * somebody reseeded the base_crng. In that case, we do fast key
+ * erasure on the base_crng, and use its output as the new key
+ * for our per-cpu crng. This brings us up to date with base_crng.
+ */
+ if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
+ spin_lock(&base_crng.lock);
+ crng_fast_key_erasure(base_crng.key, chacha_state,
+ crng->key, sizeof(crng->key));
+ crng->generation = base_crng.generation;
+ spin_unlock(&base_crng.lock);
+ }
+
+ /*
+ * Finally, when we've made it this far, our per-cpu crng has an up
+ * to date key, and we can do fast key erasure with it to produce
+ * some random data and a ChaCha state for the caller. All other
+ * branches of this function are "unlikely", so most of the time we
+ * should wind up here immediately.
+ */
+ crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
+ local_unlock_irqrestore(&crngs.lock, flags);
+}
+
+static void _get_random_bytes(void *buf, size_t len)
+{
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ u8 tmp[CHACHA_BLOCK_SIZE];
+ size_t first_block_len;
+
+ if (!len)
+ return;
+
+ first_block_len = min_t(size_t, 32, len);
+ crng_make_state(chacha_state, buf, first_block_len);
+ len -= first_block_len;
+ buf += first_block_len;
+
+ while (len) {
+ if (len < CHACHA_BLOCK_SIZE) {
+ chacha20_block(chacha_state, tmp);
+ memcpy(buf, tmp, len);
+ memzero_explicit(tmp, sizeof(tmp));
+ break;
+ }
+
+ chacha20_block(chacha_state, buf);
+ if (unlikely(chacha_state[12] == 0))
+ ++chacha_state[13];
+ len -= CHACHA_BLOCK_SIZE;
+ buf += CHACHA_BLOCK_SIZE;
+ }
+
+ memzero_explicit(chacha_state, sizeof(chacha_state));
+}
+
+/*
+ * This function is the exported kernel interface. It returns some
+ * number of good random numbers, suitable for key generation, seeding
+ * TCP sequence numbers, etc. It does not rely on the hardware random
+ * number generator. For random bytes direct from the hardware RNG
+ * (when available), use get_random_bytes_arch(). In order to ensure
+ * that the randomness provided by this function is okay, the function
+ * wait_for_random_bytes() should be called and return 0 at least once
+ * at any point prior.
+ */
+void get_random_bytes(void *buf, size_t len)
+{
+ warn_unseeded_randomness();
+ _get_random_bytes(buf, len);
+}
+EXPORT_SYMBOL(get_random_bytes);
+
+static ssize_t get_random_bytes_user(struct iov_iter *iter)
+{
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ u8 block[CHACHA_BLOCK_SIZE];
+ size_t ret = 0, copied;
+
+ if (unlikely(!iov_iter_count(iter)))
+ return 0;
+
+ /*
+ * Immediately overwrite the ChaCha key at index 4 with random
+ * bytes, in case userspace causes copy_to_iter() below to sleep
+ * forever, so that we still retain forward secrecy in that case.
+ */
+ crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
+ /*
+ * However, if we're doing a read of len <= 32, we don't need to
+ * use chacha_state after, so we can simply return those bytes to
+ * the user directly.
+ */
+ if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
+ ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter);
+ goto out_zero_chacha;
+ }
+
+ for (;;) {
+ chacha20_block(chacha_state, block);
+ if (unlikely(chacha_state[12] == 0))
+ ++chacha_state[13];
+
+ copied = copy_to_iter(block, sizeof(block), iter);
+ ret += copied;
+ if (!iov_iter_count(iter) || copied != sizeof(block))
+ break;
+
+ BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
+ if (ret % PAGE_SIZE == 0) {
+ if (signal_pending(current))
+ break;
+ cond_resched();
+ }
+ }
+
+ memzero_explicit(block, sizeof(block));
+out_zero_chacha:
+ memzero_explicit(chacha_state, sizeof(chacha_state));
+ return ret ? ret : -EFAULT;
+}
+
+/*
+ * Batched entropy returns random integers. The quality of the random
+ * number is good as /dev/urandom. In order to ensure that the randomness
+ * provided by this function is okay, the function wait_for_random_bytes()
+ * should be called and return 0 at least once at any point prior.
+ */
+
+#define DEFINE_BATCHED_ENTROPY(type) \
+struct batch_ ##type { \
+ /* \
+ * We make this 1.5x a ChaCha block, so that we get the \
+ * remaining 32 bytes from fast key erasure, plus one full \
+ * block from the detached ChaCha state. We can increase \
+ * the size of this later if needed so long as we keep the \
+ * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. \
+ */ \
+ type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \
+ local_lock_t lock; \
+ unsigned long generation; \
+ unsigned int position; \
+}; \
+ \
+static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \
+ .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock), \
+ .position = UINT_MAX \
+}; \
+ \
+type get_random_ ##type(void) \
+{ \
+ type ret; \
+ unsigned long flags; \
+ struct batch_ ##type *batch; \
+ unsigned long next_gen; \
+ \
+ warn_unseeded_randomness(); \
+ \
+ if (!crng_ready()) { \
+ _get_random_bytes(&ret, sizeof(ret)); \
+ return ret; \
+ } \
+ \
+ local_lock_irqsave(&batched_entropy_ ##type.lock, flags); \
+ batch = raw_cpu_ptr(&batched_entropy_##type); \
+ \
+ next_gen = READ_ONCE(base_crng.generation); \
+ if (batch->position >= ARRAY_SIZE(batch->entropy) || \
+ next_gen != batch->generation) { \
+ _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \
+ batch->position = 0; \
+ batch->generation = next_gen; \
+ } \
+ \
+ ret = batch->entropy[batch->position]; \
+ batch->entropy[batch->position] = 0; \
+ ++batch->position; \
+ local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags); \
+ return ret; \
+} \
+EXPORT_SYMBOL(get_random_ ##type);
+
+DEFINE_BATCHED_ENTROPY(u64)
+DEFINE_BATCHED_ENTROPY(u32)
+
+#ifdef CONFIG_SMP
+/*
+ * This function is called when the CPU is coming up, with entry
+ * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
+ */
+int __cold random_prepare_cpu(unsigned int cpu)
+{
+ /*
+ * When the cpu comes back online, immediately invalidate both
+ * the per-cpu crng and all batches, so that we serve fresh
+ * randomness.
+ */
+ per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
+ per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
+ per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
+ return 0;
+}
#endif
-static void invalidate_batched_entropy(void);
-static void numa_crng_init(void);
+/*
+ * This function will use the architecture-specific hardware random
+ * number generator if it is available. It is not recommended for
+ * use. Use get_random_bytes() instead. It returns the number of
+ * bytes filled in.
+ */
+size_t __must_check get_random_bytes_arch(void *buf, size_t len)
+{
+ size_t left = len;
+ u8 *p = buf;
+
+ while (left) {
+ unsigned long v;
+ size_t block_len = min_t(size_t, left, sizeof(unsigned long));
+
+ if (!arch_get_random_long(&v))
+ break;
+
+ memcpy(p, &v, block_len);
+ p += block_len;
+ left -= block_len;
+ }
+
+ return len - left;
+}
+EXPORT_SYMBOL(get_random_bytes_arch);
+
+
+/**********************************************************************
+ *
+ * Entropy accumulation and extraction routines.
+ *
+ * Callers may add entropy via:
+ *
+ * static void mix_pool_bytes(const void *buf, size_t len)
+ *
+ * After which, if added entropy should be credited:
+ *
+ * static void credit_init_bits(size_t bits)
+ *
+ * Finally, extract entropy via:
+ *
+ * static void extract_entropy(void *buf, size_t len)
+ *
+ **********************************************************************/
+
+enum {
+ POOL_BITS = BLAKE2S_HASH_SIZE * 8,
+ POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */
+ POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */
+};
+
+static struct {
+ struct blake2s_state hash;
+ spinlock_t lock;
+ unsigned int init_bits;
+} input_pool = {
+ .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
+ BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
+ BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
+ .hash.outlen = BLAKE2S_HASH_SIZE,
+ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
+};
+
+static void _mix_pool_bytes(const void *buf, size_t len)
+{
+ blake2s_update(&input_pool.hash, buf, len);
+}
+
+/*
+ * This function adds bytes into the input pool. It does not
+ * update the initialization bit counter; the caller should call
+ * credit_init_bits if this is appropriate.
+ */
+static void mix_pool_bytes(const void *buf, size_t len)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(buf, len);
+ spin_unlock_irqrestore(&input_pool.lock, flags);
+}
+
+/*
+ * This is an HKDF-like construction for using the hashed collected entropy
+ * as a PRF key, that's then expanded block-by-block.
+ */
+static void extract_entropy(void *buf, size_t len)
+{
+ unsigned long flags;
+ u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
+ struct {
+ unsigned long rdseed[32 / sizeof(long)];
+ size_t counter;
+ } block;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
+ if (!arch_get_random_seed_long(&block.rdseed[i]) &&
+ !arch_get_random_long(&block.rdseed[i]))
+ block.rdseed[i] = random_get_entropy();
+ }
+
+ spin_lock_irqsave(&input_pool.lock, flags);
+
+ /* seed = HASHPRF(last_key, entropy_input) */
+ blake2s_final(&input_pool.hash, seed);
+
+ /* next_key = HASHPRF(seed, RDSEED || 0) */
+ block.counter = 0;
+ blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
+ blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
+
+ spin_unlock_irqrestore(&input_pool.lock, flags);
+ memzero_explicit(next_key, sizeof(next_key));
+
+ while (len) {
+ i = min_t(size_t, len, BLAKE2S_HASH_SIZE);
+ /* output = HASHPRF(seed, RDSEED || ++counter) */
+ ++block.counter;
+ blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
+ len -= i;
+ buf += i;
+ }
+
+ memzero_explicit(seed, sizeof(seed));
+ memzero_explicit(&block, sizeof(block));
+}
+
+#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
+
+static void __cold _credit_init_bits(size_t bits)
+{
+ unsigned int new, orig, add;
+ unsigned long flags;
+
+ if (!bits)
+ return;
+
+ add = min_t(size_t, bits, POOL_BITS);
+
+ do {
+ orig = READ_ONCE(input_pool.init_bits);
+ new = min_t(unsigned int, POOL_BITS, orig + add);
+ } while (cmpxchg(&input_pool.init_bits, orig, new) != orig);
+
+ if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
+ crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
+ process_random_ready_list();
+ wake_up_interruptible(&crng_init_wait);
+ kill_fasync(&fasync, SIGIO, POLL_IN);
+ pr_notice("crng init done\n");
+ if (urandom_warning.missed)
+ pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
+ urandom_warning.missed);
+ } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
+ spin_lock_irqsave(&base_crng.lock, flags);
+ /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
+ if (crng_init == CRNG_EMPTY) {
+ extract_entropy(base_crng.key, sizeof(base_crng.key));
+ crng_init = CRNG_EARLY;
+ }
+ spin_unlock_irqrestore(&base_crng.lock, flags);
+ }
+}
+
+
+/**********************************************************************
+ *
+ * Entropy collection routines.
+ *
+ * The following exported functions are used for pushing entropy into
+ * the above entropy accumulation routines:
+ *
+ * void add_device_randomness(const void *buf, size_t len);
+ * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
+ * void add_bootloader_randomness(const void *buf, size_t len);
+ * void add_interrupt_randomness(int irq);
+ * void add_input_randomness(unsigned int type, unsigned int code, unsigned int value);
+ * void add_disk_randomness(struct gendisk *disk);
+ *
+ * add_device_randomness() adds data to the input pool that
+ * is likely to differ between two devices (or possibly even per boot).
+ * This would be things like MAC addresses or serial numbers, or the
+ * read-out of the RTC. This does *not* credit any actual entropy to
+ * the pool, but it initializes the pool to different values for devices
+ * that might otherwise be identical and have very little entropy
+ * available to them (particularly common in the embedded world).
+ *
+ * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
+ * entropy as specified by the caller. If the entropy pool is full it will
+ * block until more entropy is needed.
+ *
+ * add_bootloader_randomness() is called by bootloader drivers, such as EFI
+ * and device tree, and credits its input depending on whether or not the
+ * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
+ *
+ * add_interrupt_randomness() uses the interrupt timing as random
+ * inputs to the entropy pool. Using the cycle counters and the irq source
+ * as inputs, it feeds the input pool roughly once a second or after 64
+ * interrupts, crediting 1 bit of entropy for whichever comes first.
+ *
+ * add_input_randomness() uses the input layer interrupt timing, as well
+ * as the event type information from the hardware.
+ *
+ * add_disk_randomness() uses what amounts to the seek time of block
+ * layer request events, on a per-disk_devt basis, as input to the
+ * entropy pool. Note that high-speed solid state drives with very low
+ * seek times do not make for good sources of entropy, as their seek
+ * times are usually fairly consistent.
+ *
+ * The last two routines try to estimate how many bits of entropy
+ * to credit. They do this by keeping track of the first and second
+ * order deltas of the event timings.
+ *
+ **********************************************************************/
static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
+static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
static int __init parse_trust_cpu(char *arg)
{
return kstrtobool(arg, &trust_cpu);
}
+static int __init parse_trust_bootloader(char *arg)
+{
+ return kstrtobool(arg, &trust_bootloader);
+}
early_param("random.trust_cpu", parse_trust_cpu);
+early_param("random.trust_bootloader", parse_trust_bootloader);
-static bool crng_init_try_arch(struct crng_state *crng)
+/*
+ * The first collection of entropy occurs at system boot while interrupts
+ * are still turned off. Here we push in latent entropy, RDSEED, a timestamp,
+ * utsname(), and the command line. Depending on the above configuration knob,
+ * RDSEED may be considered sufficient for initialization. Note that much
+ * earlier setup may already have pushed entropy into the input pool by the
+ * time we get here.
+ */
+int __init random_init(const char *command_line)
{
- int i;
- bool arch_init = true;
- unsigned long rv;
+ ktime_t now = ktime_get_real();
+ unsigned int i, arch_bytes;
+ unsigned long entropy;
- for (i = 4; i < 16; i++) {
- if (!arch_get_random_seed_long(&rv) &&
- !arch_get_random_long(&rv)) {
- rv = random_get_entropy();
- arch_init = false;
- }
- crng->state[i] ^= rv;
- }
-
- return arch_init;
-}
-
-static bool __init crng_init_try_arch_early(struct crng_state *crng)
-{
- int i;
- bool arch_init = true;
- unsigned long rv;
-
- for (i = 4; i < 16; i++) {
- if (!arch_get_random_seed_long_early(&rv) &&
- !arch_get_random_long_early(&rv)) {
- rv = random_get_entropy();
- arch_init = false;
- }
- crng->state[i] ^= rv;
- }
-
- return arch_init;
-}
-
-static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
-{
- chacha_init_consts(crng->state);
- _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
- crng_init_try_arch(crng);
- crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
-}
-
-static void __init crng_initialize_primary(struct crng_state *crng)
-{
- chacha_init_consts(crng->state);
- _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
- if (crng_init_try_arch_early(crng) && trust_cpu) {
- invalidate_batched_entropy();
- numa_crng_init();
- crng_init = 2;
- pr_notice("crng done (trusting CPU's manufacturer)\n");
- }
- crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
-}
-
-static void crng_finalize_init(struct crng_state *crng)
-{
- if (crng != &primary_crng || crng_init >= 2)
- return;
- if (!system_wq) {
- /* We can't call numa_crng_init until we have workqueues,
- * so mark this for processing later. */
- crng_need_final_init = true;
- return;
- }
-
- invalidate_batched_entropy();
- numa_crng_init();
- crng_init = 2;
- process_random_ready_list();
- wake_up_interruptible(&crng_init_wait);
- kill_fasync(&fasync, SIGIO, POLL_IN);
- pr_notice("crng init done\n");
- if (unseeded_warning.missed) {
- pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
- unseeded_warning.missed);
- unseeded_warning.missed = 0;
- }
- if (urandom_warning.missed) {
- pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
- urandom_warning.missed);
- urandom_warning.missed = 0;
- }
-}
-
-#ifdef CONFIG_NUMA
-static void do_numa_crng_init(struct work_struct *work)
-{
- int i;
- struct crng_state *crng;
- struct crng_state **pool;
-
- pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
- for_each_online_node(i) {
- crng = kmalloc_node(sizeof(struct crng_state),
- GFP_KERNEL | __GFP_NOFAIL, i);
- spin_lock_init(&crng->lock);
- crng_initialize_secondary(crng);
- pool[i] = crng;
- }
- /* pairs with READ_ONCE() in select_crng() */
- if (cmpxchg_release(&crng_node_pool, NULL, pool) != NULL) {
- for_each_node(i)
- kfree(pool[i]);
- kfree(pool);
- }
-}
-
-static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
-
-static void numa_crng_init(void)
-{
- schedule_work(&numa_crng_init_work);
-}
-
-static struct crng_state *select_crng(void)
-{
- struct crng_state **pool;
- int nid = numa_node_id();
-
- /* pairs with cmpxchg_release() in do_numa_crng_init() */
- pool = READ_ONCE(crng_node_pool);
- if (pool && pool[nid])
- return pool[nid];
-
- return &primary_crng;
-}
-#else
-static void numa_crng_init(void) {}
-
-static struct crng_state *select_crng(void)
-{
- return &primary_crng;
-}
+#if defined(LATENT_ENTROPY_PLUGIN)
+ static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
+ _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
#endif
-/*
- * crng_fast_load() can be called by code in the interrupt service
- * path. So we can't afford to dilly-dally. Returns the number of
- * bytes processed from cp.
- */
-static size_t crng_fast_load(const char *cp, size_t len)
-{
- unsigned long flags;
- char *p;
- size_t ret = 0;
-
- if (!spin_trylock_irqsave(&primary_crng.lock, flags))
- return 0;
- if (crng_init != 0) {
- spin_unlock_irqrestore(&primary_crng.lock, flags);
- return 0;
- }
- p = (unsigned char *) &primary_crng.state[4];
- while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
- p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
- cp++; crng_init_cnt++; len--; ret++;
- }
- spin_unlock_irqrestore(&primary_crng.lock, flags);
- if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
- invalidate_batched_entropy();
- crng_init = 1;
- pr_notice("fast init done\n");
- }
- return ret;
-}
-
-/*
- * crng_slow_load() is called by add_device_randomness, which has two
- * attributes. (1) We can't trust the buffer passed to it is
- * guaranteed to be unpredictable (so it might not have any entropy at
- * all), and (2) it doesn't have the performance constraints of
- * crng_fast_load().
- *
- * So we do something more comprehensive which is guaranteed to touch
- * all of the primary_crng's state, and which uses a LFSR with a
- * period of 255 as part of the mixing algorithm. Finally, we do
- * *not* advance crng_init_cnt since buffer we may get may be something
- * like a fixed DMI table (for example), which might very well be
- * unique to the machine, but is otherwise unvarying.
- */
-static int crng_slow_load(const char *cp, size_t len)
-{
- unsigned long flags;
- static unsigned char lfsr = 1;
- unsigned char tmp;
- unsigned i, max = CHACHA_KEY_SIZE;
- const char * src_buf = cp;
- char * dest_buf = (char *) &primary_crng.state[4];
-
- if (!spin_trylock_irqsave(&primary_crng.lock, flags))
- return 0;
- if (crng_init != 0) {
- spin_unlock_irqrestore(&primary_crng.lock, flags);
- return 0;
- }
- if (len > max)
- max = len;
-
- for (i = 0; i < max ; i++) {
- tmp = lfsr;
- lfsr >>= 1;
- if (tmp & 1)
- lfsr ^= 0xE1;
- tmp = dest_buf[i % CHACHA_KEY_SIZE];
- dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
- lfsr += (tmp << 3) | (tmp >> 5);
- }
- spin_unlock_irqrestore(&primary_crng.lock, flags);
- return 1;
-}
-
-static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
-{
- unsigned long flags;
- int i, num;
- union {
- __u8 block[CHACHA_BLOCK_SIZE];
- __u32 key[8];
- } buf;
-
- if (r) {
- num = extract_entropy(r, &buf, 32, 16, 0);
- if (num == 0)
- return;
- } else {
- _extract_crng(&primary_crng, buf.block);
- _crng_backtrack_protect(&primary_crng, buf.block,
- CHACHA_KEY_SIZE);
- }
- spin_lock_irqsave(&crng->lock, flags);
- for (i = 0; i < 8; i++) {
- unsigned long rv;
- if (!arch_get_random_seed_long(&rv) &&
- !arch_get_random_long(&rv))
- rv = random_get_entropy();
- crng->state[i+4] ^= buf.key[i] ^ rv;
- }
- memzero_explicit(&buf, sizeof(buf));
- WRITE_ONCE(crng->init_time, jiffies);
- spin_unlock_irqrestore(&crng->lock, flags);
- crng_finalize_init(crng);
-}
-
-static void _extract_crng(struct crng_state *crng,
- __u8 out[CHACHA_BLOCK_SIZE])
-{
- unsigned long v, flags, init_time;
-
- if (crng_ready()) {
- init_time = READ_ONCE(crng->init_time);
- if (time_after(READ_ONCE(crng_global_init_time), init_time) ||
- time_after(jiffies, init_time + CRNG_RESEED_INTERVAL))
- crng_reseed(crng, crng == &primary_crng ?
- &input_pool : NULL);
- }
- spin_lock_irqsave(&crng->lock, flags);
- if (arch_get_random_long(&v))
- crng->state[14] ^= v;
- chacha20_block(&crng->state[0], out);
- if (crng->state[12] == 0)
- crng->state[13]++;
- spin_unlock_irqrestore(&crng->lock, flags);
-}
-
-static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
-{
- _extract_crng(select_crng(), out);
-}
-
-/*
- * Use the leftover bytes from the CRNG block output (if there is
- * enough) to mutate the CRNG key to provide backtracking protection.
- */
-static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA_BLOCK_SIZE], int used)
-{
- unsigned long flags;
- __u32 *s, *d;
- int i;
-
- used = round_up(used, sizeof(__u32));
- if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
- extract_crng(tmp);
- used = 0;
- }
- spin_lock_irqsave(&crng->lock, flags);
- s = (__u32 *) &tmp[used];
- d = &crng->state[4];
- for (i=0; i < 8; i++)
- *d++ ^= *s++;
- spin_unlock_irqrestore(&crng->lock, flags);
-}
-
-static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
-{
- _crng_backtrack_protect(select_crng(), tmp, used);
-}
-
-static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
-{
- ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
- __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
- int large_request = (nbytes > 256);
-
- while (nbytes) {
- if (large_request && need_resched()) {
- if (signal_pending(current)) {
- if (ret == 0)
- ret = -ERESTARTSYS;
- break;
- }
- schedule();
+ for (i = 0, arch_bytes = BLAKE2S_BLOCK_SIZE;
+ i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) {
+ if (!arch_get_random_seed_long_early(&entropy) &&
+ !arch_get_random_long_early(&entropy)) {
+ entropy = random_get_entropy();
+ arch_bytes -= sizeof(entropy);
}
-
- extract_crng(tmp);
- i = min_t(int, nbytes, CHACHA_BLOCK_SIZE);
- if (copy_to_user(buf, tmp, i)) {
- ret = -EFAULT;
- break;
- }
-
- nbytes -= i;
- buf += i;
- ret += i;
+ _mix_pool_bytes(&entropy, sizeof(entropy));
}
- crng_backtrack_protect(tmp, i);
+ _mix_pool_bytes(&now, sizeof(now));
+ _mix_pool_bytes(utsname(), sizeof(*(utsname())));
+ _mix_pool_bytes(command_line, strlen(command_line));
+ add_latent_entropy();
- /* Wipe data just written to memory */
- memzero_explicit(tmp, sizeof(tmp));
+ if (crng_ready())
+ crng_reseed();
+ else if (trust_cpu)
+ credit_init_bits(arch_bytes * 8);
- return ret;
+ return 0;
}
-
-/*********************************************************************
- *
- * Entropy input management
- *
- *********************************************************************/
-
-/* There is one of these per entropy source */
-struct timer_rand_state {
- cycles_t last_time;
- long last_delta, last_delta2;
-};
-
-#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
-
/*
* Add device- or boot-specific data to the input pool to help
* initialize it.
@@ -1141,57 +851,212 @@
* the entropy pool having similar initial state across largely
* identical devices.
*/
-void add_device_randomness(const void *buf, unsigned int size)
+void add_device_randomness(const void *buf, size_t len)
{
- unsigned long time = random_get_entropy() ^ jiffies;
+ unsigned long entropy = random_get_entropy();
unsigned long flags;
- if (!crng_ready() && size)
- crng_slow_load(buf, size);
-
- trace_add_device_randomness(size, _RET_IP_);
spin_lock_irqsave(&input_pool.lock, flags);
- _mix_pool_bytes(&input_pool, buf, size);
- _mix_pool_bytes(&input_pool, &time, sizeof(time));
+ _mix_pool_bytes(&entropy, sizeof(entropy));
+ _mix_pool_bytes(buf, len);
spin_unlock_irqrestore(&input_pool.lock, flags);
}
EXPORT_SYMBOL(add_device_randomness);
-static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
+/*
+ * Interface for in-kernel drivers of true hardware RNGs.
+ * Those devices may produce endless random bits and will be throttled
+ * when our pool is full.
+ */
+void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy)
+{
+ mix_pool_bytes(buf, len);
+ credit_init_bits(entropy);
+
+ /*
+ * Throttle writing to once every CRNG_RESEED_INTERVAL, unless
+ * we're not yet initialized.
+ */
+ if (!kthread_should_stop() && crng_ready())
+ schedule_timeout_interruptible(CRNG_RESEED_INTERVAL);
+}
+EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
+
+/*
+ * Handle random seed passed by bootloader, and credit it if
+ * CONFIG_RANDOM_TRUST_BOOTLOADER is set.
+ */
+void __cold add_bootloader_randomness(const void *buf, size_t len)
+{
+ mix_pool_bytes(buf, len);
+ if (trust_bootloader)
+ credit_init_bits(len * 8);
+}
+EXPORT_SYMBOL_GPL(add_bootloader_randomness);
+
+struct fast_pool {
+ unsigned long pool[4];
+ unsigned long last;
+ unsigned int count;
+ struct timer_list mix;
+};
+
+static void mix_interrupt_randomness(struct timer_list *work);
+
+static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
+#ifdef CONFIG_64BIT
+#define FASTMIX_PERM SIPHASH_PERMUTATION
+ .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 },
+#else
+#define FASTMIX_PERM HSIPHASH_PERMUTATION
+ .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 },
+#endif
+ .mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0)
+};
+
+/*
+ * This is [Half]SipHash-1-x, starting from an empty key. Because
+ * the key is fixed, it assumes that its inputs are non-malicious,
+ * and therefore this has no security on its own. s represents the
+ * four-word SipHash state, while v represents a two-word input.
+ */
+static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
+{
+ s[3] ^= v1;
+ FASTMIX_PERM(s[0], s[1], s[2], s[3]);
+ s[0] ^= v1;
+ s[3] ^= v2;
+ FASTMIX_PERM(s[0], s[1], s[2], s[3]);
+ s[0] ^= v2;
+}
+
+#ifdef CONFIG_SMP
+/*
+ * This function is called when the CPU has just come online, with
+ * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
+ */
+int __cold random_online_cpu(unsigned int cpu)
+{
+ /*
+ * During CPU shutdown and before CPU onlining, add_interrupt_
+ * randomness() may schedule mix_interrupt_randomness(), and
+ * set the MIX_INFLIGHT flag. However, because the worker can
+ * be scheduled on a different CPU during this period, that
+ * flag will never be cleared. For that reason, we zero out
+ * the flag here, which runs just after workqueues are onlined
+ * for the CPU again. This also has the effect of setting the
+ * irq randomness count to zero so that new accumulated irqs
+ * are fresh.
+ */
+ per_cpu_ptr(&irq_randomness, cpu)->count = 0;
+ return 0;
+}
+#endif
+
+static void mix_interrupt_randomness(struct timer_list *work)
+{
+ struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
+ /*
+ * The size of the copied stack pool is explicitly 2 longs so that we
+ * only ever ingest half of the siphash output each time, retaining
+ * the other half as the next "key" that carries over. The entropy is
+ * supposed to be sufficiently dispersed between bits so on average
+ * we don't wind up "losing" some.
+ */
+ unsigned long pool[2];
+ unsigned int count;
+
+ /* Check to see if we're running on the wrong CPU due to hotplug. */
+ local_irq_disable();
+ if (fast_pool != this_cpu_ptr(&irq_randomness)) {
+ local_irq_enable();
+ return;
+ }
+
+ /*
+ * Copy the pool to the stack so that the mixer always has a
+ * consistent view, before we reenable irqs again.
+ */
+ memcpy(pool, fast_pool->pool, sizeof(pool));
+ count = fast_pool->count;
+ fast_pool->count = 0;
+ fast_pool->last = jiffies;
+ local_irq_enable();
+
+ mix_pool_bytes(pool, sizeof(pool));
+ credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8));
+
+ memzero_explicit(pool, sizeof(pool));
+}
+
+void add_interrupt_randomness(int irq)
+{
+ enum { MIX_INFLIGHT = 1U << 31 };
+ unsigned long entropy = random_get_entropy();
+ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
+ struct pt_regs *regs = get_irq_regs();
+ unsigned int new_count;
+
+ fast_mix(fast_pool->pool, entropy,
+ (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq));
+ new_count = ++fast_pool->count;
+
+ if (new_count & MIX_INFLIGHT)
+ return;
+
+ if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
+ return;
+
+ fast_pool->count |= MIX_INFLIGHT;
+ if (!timer_pending(&fast_pool->mix)) {
+ fast_pool->mix.expires = jiffies;
+ add_timer_on(&fast_pool->mix, raw_smp_processor_id());
+ }
+}
+EXPORT_SYMBOL_GPL(add_interrupt_randomness);
+
+/* There is one of these per entropy source */
+struct timer_rand_state {
+ unsigned long last_time;
+ long last_delta, last_delta2;
+};
/*
* This function adds entropy to the entropy "pool" by using timing
- * delays. It uses the timer_rand_state structure to make an estimate
- * of how many bits of entropy this call has added to the pool.
- *
- * The number "num" is also added to the pool - it should somehow describe
- * the type of event which just happened. This is currently 0-255 for
- * keyboard scan codes, and 256 upwards for interrupts.
- *
+ * delays. It uses the timer_rand_state structure to make an estimate
+ * of how many bits of entropy this call has added to the pool. The
+ * value "num" is also added to the pool; it should somehow describe
+ * the type of event that just happened.
*/
-static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
{
- struct entropy_store *r;
- struct {
- long jiffies;
- unsigned cycles;
- unsigned num;
- } sample;
+ unsigned long entropy = random_get_entropy(), now = jiffies, flags;
long delta, delta2, delta3;
+ unsigned int bits;
- sample.jiffies = jiffies;
- sample.cycles = random_get_entropy();
- sample.num = num;
- r = &input_pool;
- mix_pool_bytes(r, &sample, sizeof(sample));
+ /*
+ * If we're in a hard IRQ, add_interrupt_randomness() will be called
+ * sometime after, so mix into the fast pool.
+ */
+ if (in_irq()) {
+ fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num);
+ } else {
+ spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(&entropy, sizeof(entropy));
+ _mix_pool_bytes(&num, sizeof(num));
+ spin_unlock_irqrestore(&input_pool.lock, flags);
+ }
+
+ if (crng_ready())
+ return;
/*
* Calculate number of bits of randomness we probably added.
* We take into account the first, second and third-order deltas
* in order to make our estimate.
*/
- delta = sample.jiffies - READ_ONCE(state->last_time);
- WRITE_ONCE(state->last_time, sample.jiffies);
+ delta = now - READ_ONCE(state->last_time);
+ WRITE_ONCE(state->last_time, now);
delta2 = delta - READ_ONCE(state->last_delta);
WRITE_ONCE(state->last_delta, delta);
@@ -1211,627 +1076,50 @@
delta = delta3;
/*
- * delta is now minimum absolute delta.
- * Round down by 1 bit on general principles,
- * and limit entropy estimate to 12 bits.
+ * delta is now minimum absolute delta. Round down by 1 bit
+ * on general principles, and limit entropy estimate to 11 bits.
*/
- credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
+ bits = min(fls(delta >> 1), 11);
+
+ /*
+ * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness()
+ * will run after this, which uses a different crediting scheme of 1 bit
+ * per every 64 interrupts. In order to let that function do accounting
+ * close to the one in this function, we credit a full 64/64 bit per bit,
+ * and then subtract one to account for the extra one added.
+ */
+ if (in_irq())
+ this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
+ else
+ _credit_init_bits(bits);
}
-void add_input_randomness(unsigned int type, unsigned int code,
- unsigned int value)
+void add_input_randomness(unsigned int type, unsigned int code, unsigned int value)
{
static unsigned char last_value;
+ static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
- /* ignore autorepeat and the like */
+ /* Ignore autorepeat and the like. */
if (value == last_value)
return;
last_value = value;
add_timer_randomness(&input_timer_state,
(type << 4) ^ code ^ (code >> 4) ^ value);
- trace_add_input_randomness(ENTROPY_BITS(&input_pool));
}
EXPORT_SYMBOL_GPL(add_input_randomness);
-static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
-
-#ifdef ADD_INTERRUPT_BENCH
-static unsigned long avg_cycles, avg_deviation;
-
-#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */
-#define FIXED_1_2 (1 << (AVG_SHIFT-1))
-
-static void add_interrupt_bench(cycles_t start)
-{
- long delta = random_get_entropy() - start;
-
- /* Use a weighted moving average */
- delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
- avg_cycles += delta;
- /* And average deviation */
- delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
- avg_deviation += delta;
-}
-#else
-#define add_interrupt_bench(x)
-#endif
-
-static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
-{
- __u32 *ptr = (__u32 *) regs;
- unsigned int idx;
-
- if (regs == NULL)
- return 0;
- idx = READ_ONCE(f->reg_idx);
- if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
- idx = 0;
- ptr += idx++;
- WRITE_ONCE(f->reg_idx, idx);
- return *ptr;
-}
-
-void add_interrupt_randomness(int irq, int irq_flags)
-{
- struct entropy_store *r;
- struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
- struct pt_regs *regs = get_irq_regs();
- unsigned long now = jiffies;
- cycles_t cycles = random_get_entropy();
- __u32 c_high, j_high;
- __u64 ip;
- unsigned long seed;
- int credit = 0;
-
- if (cycles == 0)
- cycles = get_reg(fast_pool, regs);
- c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
- j_high = (sizeof(now) > 4) ? now >> 32 : 0;
- fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
- fast_pool->pool[1] ^= now ^ c_high;
- ip = regs ? instruction_pointer(regs) : _RET_IP_;
- fast_pool->pool[2] ^= ip;
- fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
- get_reg(fast_pool, regs);
-
- fast_mix(fast_pool);
- add_interrupt_bench(cycles);
-
- if (unlikely(crng_init == 0)) {
- if ((fast_pool->count >= 64) &&
- crng_fast_load((char *) fast_pool->pool,
- sizeof(fast_pool->pool)) > 0) {
- fast_pool->count = 0;
- fast_pool->last = now;
- }
- return;
- }
-
- if ((fast_pool->count < 64) &&
- !time_after(now, fast_pool->last + HZ))
- return;
-
- r = &input_pool;
- if (!spin_trylock(&r->lock))
- return;
-
- fast_pool->last = now;
- __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
-
- /*
- * If we have architectural seed generator, produce a seed and
- * add it to the pool. For the sake of paranoia don't let the
- * architectural seed generator dominate the input from the
- * interrupt noise.
- */
- if (arch_get_random_seed_long(&seed)) {
- __mix_pool_bytes(r, &seed, sizeof(seed));
- credit = 1;
- }
- spin_unlock(&r->lock);
-
- fast_pool->count = 0;
-
- /* award one bit for the contents of the fast pool */
- credit_entropy_bits(r, credit + 1);
-}
-EXPORT_SYMBOL_GPL(add_interrupt_randomness);
-
#ifdef CONFIG_BLOCK
void add_disk_randomness(struct gendisk *disk)
{
if (!disk || !disk->random)
return;
- /* first major is 1, so we get >= 0x200 here */
+ /* First major is 1, so we get >= 0x200 here. */
add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
- trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
}
EXPORT_SYMBOL_GPL(add_disk_randomness);
-#endif
-/*********************************************************************
- *
- * Entropy extraction routines
- *
- *********************************************************************/
-
-/*
- * This function decides how many bytes to actually take from the
- * given pool, and also debits the entropy count accordingly.
- */
-static size_t account(struct entropy_store *r, size_t nbytes, int min,
- int reserved)
-{
- int entropy_count, orig, have_bytes;
- size_t ibytes, nfrac;
-
- BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
-
- /* Can we pull enough? */
-retry:
- entropy_count = orig = READ_ONCE(r->entropy_count);
- ibytes = nbytes;
- /* never pull more than available */
- have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
-
- if ((have_bytes -= reserved) < 0)
- have_bytes = 0;
- ibytes = min_t(size_t, ibytes, have_bytes);
- if (ibytes < min)
- ibytes = 0;
-
- if (WARN_ON(entropy_count < 0)) {
- pr_warn("negative entropy count: pool %s count %d\n",
- r->name, entropy_count);
- entropy_count = 0;
- }
- nfrac = ibytes << (ENTROPY_SHIFT + 3);
- if ((size_t) entropy_count > nfrac)
- entropy_count -= nfrac;
- else
- entropy_count = 0;
-
- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
- goto retry;
-
- trace_debit_entropy(r->name, 8 * ibytes);
- if (ibytes && ENTROPY_BITS(r) < random_write_wakeup_bits) {
- wake_up_interruptible(&random_write_wait);
- kill_fasync(&fasync, SIGIO, POLL_OUT);
- }
-
- return ibytes;
-}
-
-/*
- * This function does the actual extraction for extract_entropy and
- * extract_entropy_user.
- *
- * Note: we assume that .poolwords is a multiple of 16 words.
- */
-static void extract_buf(struct entropy_store *r, __u8 *out)
-{
- int i;
- union {
- __u32 w[5];
- unsigned long l[LONGS(20)];
- } hash;
- __u32 workspace[SHA1_WORKSPACE_WORDS];
- unsigned long flags;
-
- /*
- * If we have an architectural hardware random number
- * generator, use it for SHA's initial vector
- */
- sha1_init(hash.w);
- for (i = 0; i < LONGS(20); i++) {
- unsigned long v;
- if (!arch_get_random_long(&v))
- break;
- hash.l[i] = v;
- }
-
- /* Generate a hash across the pool, 16 words (512 bits) at a time */
- spin_lock_irqsave(&r->lock, flags);
- for (i = 0; i < r->poolinfo->poolwords; i += 16)
- sha1_transform(hash.w, (__u8 *)(r->pool + i), workspace);
-
- /*
- * We mix the hash back into the pool to prevent backtracking
- * attacks (where the attacker knows the state of the pool
- * plus the current outputs, and attempts to find previous
- * ouputs), unless the hash function can be inverted. By
- * mixing at least a SHA1 worth of hash data back, we make
- * brute-forcing the feedback as hard as brute-forcing the
- * hash.
- */
- __mix_pool_bytes(r, hash.w, sizeof(hash.w));
- spin_unlock_irqrestore(&r->lock, flags);
-
- memzero_explicit(workspace, sizeof(workspace));
-
- /*
- * In case the hash function has some recognizable output
- * pattern, we fold it in half. Thus, we always feed back
- * twice as much data as we output.
- */
- hash.w[0] ^= hash.w[3];
- hash.w[1] ^= hash.w[4];
- hash.w[2] ^= rol32(hash.w[2], 16);
-
- memcpy(out, &hash, EXTRACT_SIZE);
- memzero_explicit(&hash, sizeof(hash));
-}
-
-static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int fips)
-{
- ssize_t ret = 0, i;
- __u8 tmp[EXTRACT_SIZE];
- unsigned long flags;
-
- while (nbytes) {
- extract_buf(r, tmp);
-
- if (fips) {
- spin_lock_irqsave(&r->lock, flags);
- if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
- panic("Hardware RNG duplicated output!\n");
- memcpy(r->last_data, tmp, EXTRACT_SIZE);
- spin_unlock_irqrestore(&r->lock, flags);
- }
- i = min_t(int, nbytes, EXTRACT_SIZE);
- memcpy(buf, tmp, i);
- nbytes -= i;
- buf += i;
- ret += i;
- }
-
- /* Wipe data just returned from memory */
- memzero_explicit(tmp, sizeof(tmp));
-
- return ret;
-}
-
-/*
- * This function extracts randomness from the "entropy pool", and
- * returns it in a buffer.
- *
- * The min parameter specifies the minimum amount we can pull before
- * failing to avoid races that defeat catastrophic reseeding while the
- * reserved parameter indicates how much entropy we must leave in the
- * pool after each pull to avoid starving other readers.
- */
-static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int min, int reserved)
-{
- __u8 tmp[EXTRACT_SIZE];
- unsigned long flags;
-
- /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
- if (fips_enabled) {
- spin_lock_irqsave(&r->lock, flags);
- if (!r->last_data_init) {
- r->last_data_init = 1;
- spin_unlock_irqrestore(&r->lock, flags);
- trace_extract_entropy(r->name, EXTRACT_SIZE,
- ENTROPY_BITS(r), _RET_IP_);
- extract_buf(r, tmp);
- spin_lock_irqsave(&r->lock, flags);
- memcpy(r->last_data, tmp, EXTRACT_SIZE);
- }
- spin_unlock_irqrestore(&r->lock, flags);
- }
-
- trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
- nbytes = account(r, nbytes, min, reserved);
-
- return _extract_entropy(r, buf, nbytes, fips_enabled);
-}
-
-#define warn_unseeded_randomness(previous) \
- _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
-
-static void _warn_unseeded_randomness(const char *func_name, void *caller,
- void **previous)
-{
-#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
- const bool print_once = false;
-#else
- static bool print_once __read_mostly;
-#endif
-
- if (print_once ||
- crng_ready() ||
- (previous && (caller == READ_ONCE(*previous))))
- return;
- WRITE_ONCE(*previous, caller);
-#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
- print_once = true;
-#endif
- if (__ratelimit(&unseeded_warning))
- printk_deferred(KERN_NOTICE "random: %s called from %pS "
- "with crng_init=%d\n", func_name, caller,
- crng_init);
-}
-
-/*
- * This function is the exported kernel interface. It returns some
- * number of good random numbers, suitable for key generation, seeding
- * TCP sequence numbers, etc. It does not rely on the hardware random
- * number generator. For random bytes direct from the hardware RNG
- * (when available), use get_random_bytes_arch(). In order to ensure
- * that the randomness provided by this function is okay, the function
- * wait_for_random_bytes() should be called and return 0 at least once
- * at any point prior.
- */
-static void _get_random_bytes(void *buf, int nbytes)
-{
- __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
-
- trace_get_random_bytes(nbytes, _RET_IP_);
-
- while (nbytes >= CHACHA_BLOCK_SIZE) {
- extract_crng(buf);
- buf += CHACHA_BLOCK_SIZE;
- nbytes -= CHACHA_BLOCK_SIZE;
- }
-
- if (nbytes > 0) {
- extract_crng(tmp);
- memcpy(buf, tmp, nbytes);
- crng_backtrack_protect(tmp, nbytes);
- } else
- crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE);
- memzero_explicit(tmp, sizeof(tmp));
-}
-
-void get_random_bytes(void *buf, int nbytes)
-{
- static void *previous;
-
- warn_unseeded_randomness(&previous);
- _get_random_bytes(buf, nbytes);
-}
-EXPORT_SYMBOL(get_random_bytes);
-
-
-/*
- * Each time the timer fires, we expect that we got an unpredictable
- * jump in the cycle counter. Even if the timer is running on another
- * CPU, the timer activity will be touching the stack of the CPU that is
- * generating entropy..
- *
- * Note that we don't re-arm the timer in the timer itself - we are
- * happy to be scheduled away, since that just makes the load more
- * complex, but we do not want the timer to keep ticking unless the
- * entropy loop is running.
- *
- * So the re-arming always happens in the entropy loop itself.
- */
-static void entropy_timer(struct timer_list *t)
-{
- credit_entropy_bits(&input_pool, 1);
-}
-
-/*
- * If we have an actual cycle counter, see if we can
- * generate enough entropy with timing noise
- */
-static void try_to_generate_entropy(void)
-{
- struct {
- unsigned long now;
- struct timer_list timer;
- } stack;
-
- stack.now = random_get_entropy();
-
- /* Slow counter - or none. Don't even bother */
- if (stack.now == random_get_entropy())
- return;
-
- timer_setup_on_stack(&stack.timer, entropy_timer, 0);
- while (!crng_ready()) {
- if (!timer_pending(&stack.timer))
- mod_timer(&stack.timer, jiffies+1);
- mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
- schedule();
- stack.now = random_get_entropy();
- }
-
- del_timer_sync(&stack.timer);
- destroy_timer_on_stack(&stack.timer);
- mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
-}
-
-/*
- * Wait for the urandom pool to be seeded and thus guaranteed to supply
- * cryptographically secure random numbers. This applies to: the /dev/urandom
- * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
- * family of functions. Using any of these functions without first calling
- * this function forfeits the guarantee of security.
- *
- * Returns: 0 if the urandom pool has been seeded.
- * -ERESTARTSYS if the function was interrupted by a signal.
- */
-int wait_for_random_bytes(void)
-{
- if (likely(crng_ready()))
- return 0;
-
- do {
- int ret;
- ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
- if (ret)
- return ret > 0 ? 0 : ret;
-
- try_to_generate_entropy();
- } while (!crng_ready());
-
- return 0;
-}
-EXPORT_SYMBOL(wait_for_random_bytes);
-
-/*
- * Returns whether or not the urandom pool has been seeded and thus guaranteed
- * to supply cryptographically secure random numbers. This applies to: the
- * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
- * ,u64,int,long} family of functions.
- *
- * Returns: true if the urandom pool has been seeded.
- * false if the urandom pool has not been seeded.
- */
-bool rng_is_initialized(void)
-{
- return crng_ready();
-}
-EXPORT_SYMBOL(rng_is_initialized);
-
-/*
- * Add a callback function that will be invoked when the nonblocking
- * pool is initialised.
- *
- * returns: 0 if callback is successfully added
- * -EALREADY if pool is already initialised (callback not called)
- * -ENOENT if module for callback is not alive
- */
-int add_random_ready_callback(struct random_ready_callback *rdy)
-{
- struct module *owner;
- unsigned long flags;
- int err = -EALREADY;
-
- if (crng_ready())
- return err;
-
- owner = rdy->owner;
- if (!try_module_get(owner))
- return -ENOENT;
-
- spin_lock_irqsave(&random_ready_list_lock, flags);
- if (crng_ready())
- goto out;
-
- owner = NULL;
-
- list_add(&rdy->list, &random_ready_list);
- err = 0;
-
-out:
- spin_unlock_irqrestore(&random_ready_list_lock, flags);
-
- module_put(owner);
-
- return err;
-}
-EXPORT_SYMBOL(add_random_ready_callback);
-
-/*
- * Delete a previously registered readiness callback function.
- */
-void del_random_ready_callback(struct random_ready_callback *rdy)
-{
- unsigned long flags;
- struct module *owner = NULL;
-
- spin_lock_irqsave(&random_ready_list_lock, flags);
- if (!list_empty(&rdy->list)) {
- list_del_init(&rdy->list);
- owner = rdy->owner;
- }
- spin_unlock_irqrestore(&random_ready_list_lock, flags);
-
- module_put(owner);
-}
-EXPORT_SYMBOL(del_random_ready_callback);
-
-/*
- * This function will use the architecture-specific hardware random
- * number generator if it is available. The arch-specific hw RNG will
- * almost certainly be faster than what we can do in software, but it
- * is impossible to verify that it is implemented securely (as
- * opposed, to, say, the AES encryption of a sequence number using a
- * key known by the NSA). So it's useful if we need the speed, but
- * only if we're willing to trust the hardware manufacturer not to
- * have put in a back door.
- *
- * Return number of bytes filled in.
- */
-int __must_check get_random_bytes_arch(void *buf, int nbytes)
-{
- int left = nbytes;
- char *p = buf;
-
- trace_get_random_bytes_arch(left, _RET_IP_);
- while (left) {
- unsigned long v;
- int chunk = min_t(int, left, sizeof(unsigned long));
-
- if (!arch_get_random_long(&v))
- break;
-
- memcpy(p, &v, chunk);
- p += chunk;
- left -= chunk;
- }
-
- return nbytes - left;
-}
-EXPORT_SYMBOL(get_random_bytes_arch);
-
-/*
- * init_std_data - initialize pool with system data
- *
- * @r: pool to initialize
- *
- * This function clears the pool's entropy count and mixes some system
- * data into the pool to prepare it for use. The pool is not cleared
- * as that can only decrease the entropy in the pool.
- */
-static void __init init_std_data(struct entropy_store *r)
-{
- int i;
- ktime_t now = ktime_get_real();
- unsigned long rv;
-
- mix_pool_bytes(r, &now, sizeof(now));
- for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
- if (!arch_get_random_seed_long(&rv) &&
- !arch_get_random_long(&rv))
- rv = random_get_entropy();
- mix_pool_bytes(r, &rv, sizeof(rv));
- }
- mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
-}
-
-/*
- * Note that setup_arch() may call add_device_randomness()
- * long before we get here. This allows seeding of the pools
- * with some platform dependent data very early in the boot
- * process. But it limits our options here. We must use
- * statically allocated structures that already have all
- * initializations complete at compile time. We should also
- * take care not to overwrite the precious per platform data
- * we were given.
- */
-int __init rand_initialize(void)
-{
- init_std_data(&input_pool);
- if (crng_need_final_init)
- crng_finalize_init(&primary_crng);
- crng_initialize_primary(&primary_crng);
- crng_global_init_time = jiffies;
- if (ratelimit_disable) {
- urandom_warning.interval = 0;
- unseeded_warning.interval = 0;
- }
- return 0;
-}
-
-#ifdef CONFIG_BLOCK
-void rand_initialize_disk(struct gendisk *disk)
+void __cold rand_initialize_disk(struct gendisk *disk)
{
struct timer_rand_state *state;
@@ -1847,116 +1135,194 @@
}
#endif
-static ssize_t
-urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes,
- loff_t *ppos)
+/*
+ * Each time the timer fires, we expect that we got an unpredictable
+ * jump in the cycle counter. Even if the timer is running on another
+ * CPU, the timer activity will be touching the stack of the CPU that is
+ * generating entropy..
+ *
+ * Note that we don't re-arm the timer in the timer itself - we are
+ * happy to be scheduled away, since that just makes the load more
+ * complex, but we do not want the timer to keep ticking unless the
+ * entropy loop is running.
+ *
+ * So the re-arming always happens in the entropy loop itself.
+ */
+static void __cold entropy_timer(struct timer_list *t)
{
- int ret;
-
- nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
- ret = extract_crng_user(buf, nbytes);
- trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
- return ret;
+ credit_init_bits(1);
}
-static ssize_t
-urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+/*
+ * If we have an actual cycle counter, see if we can
+ * generate enough entropy with timing noise
+ */
+static void __cold try_to_generate_entropy(void)
{
- unsigned long flags;
- static int maxwarn = 10;
+ struct {
+ unsigned long entropy;
+ struct timer_list timer;
+ } stack;
- if (!crng_ready() && maxwarn > 0) {
- maxwarn--;
- if (__ratelimit(&urandom_warning))
- pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
- current->comm, nbytes);
- spin_lock_irqsave(&primary_crng.lock, flags);
- crng_init_cnt = 0;
- spin_unlock_irqrestore(&primary_crng.lock, flags);
+ stack.entropy = random_get_entropy();
+
+ /* Slow counter - or none. Don't even bother */
+ if (stack.entropy == random_get_entropy())
+ return;
+
+ timer_setup_on_stack(&stack.timer, entropy_timer, 0);
+ while (!crng_ready() && !signal_pending(current)) {
+ if (!timer_pending(&stack.timer))
+ mod_timer(&stack.timer, jiffies + 1);
+ mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
+ schedule();
+ stack.entropy = random_get_entropy();
}
- return urandom_read_nowarn(file, buf, nbytes, ppos);
+ del_timer_sync(&stack.timer);
+ destroy_timer_on_stack(&stack.timer);
+ mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
}
-static ssize_t
-random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+
+/**********************************************************************
+ *
+ * Userspace reader/writer interfaces.
+ *
+ * getrandom(2) is the primary modern interface into the RNG and should
+ * be used in preference to anything else.
+ *
+ * Reading from /dev/random has the same functionality as calling
+ * getrandom(2) with flags=0. In earlier versions, however, it had
+ * vastly different semantics and should therefore be avoided, to
+ * prevent backwards compatibility issues.
+ *
+ * Reading from /dev/urandom has the same functionality as calling
+ * getrandom(2) with flags=GRND_INSECURE. Because it does not block
+ * waiting for the RNG to be ready, it should not be used.
+ *
+ * Writing to either /dev/random or /dev/urandom adds entropy to
+ * the input pool but does not credit it.
+ *
+ * Polling on /dev/random indicates when the RNG is initialized, on
+ * the read side, and when it wants new entropy, on the write side.
+ *
+ * Both /dev/random and /dev/urandom have the same set of ioctls for
+ * adding entropy, getting the entropy count, zeroing the count, and
+ * reseeding the crng.
+ *
+ **********************************************************************/
+
+SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
+{
+ struct iov_iter iter;
+ struct iovec iov;
+ int ret;
+
+ if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
+ return -EINVAL;
+
+ /*
+ * Requesting insecure and blocking randomness at the same time makes
+ * no sense.
+ */
+ if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
+ return -EINVAL;
+
+ if (!crng_ready() && !(flags & GRND_INSECURE)) {
+ if (flags & GRND_NONBLOCK)
+ return -EAGAIN;
+ ret = wait_for_random_bytes();
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = import_single_range(READ, ubuf, len, &iov, &iter);
+ if (unlikely(ret))
+ return ret;
+ return get_random_bytes_user(&iter);
+}
+
+static __poll_t random_poll(struct file *file, poll_table *wait)
+{
+ poll_wait(file, &crng_init_wait, wait);
+ return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
+}
+
+static ssize_t write_pool_user(struct iov_iter *iter)
+{
+ u8 block[BLAKE2S_BLOCK_SIZE];
+ ssize_t ret = 0;
+ size_t copied;
+
+ if (unlikely(!iov_iter_count(iter)))
+ return 0;
+
+ for (;;) {
+ copied = copy_from_iter(block, sizeof(block), iter);
+ ret += copied;
+ mix_pool_bytes(block, copied);
+ if (!iov_iter_count(iter) || copied != sizeof(block))
+ break;
+
+ BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
+ if (ret % PAGE_SIZE == 0) {
+ if (signal_pending(current))
+ break;
+ cond_resched();
+ }
+ }
+
+ memzero_explicit(block, sizeof(block));
+ return ret ? ret : -EFAULT;
+}
+
+static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter)
+{
+ return write_pool_user(iter);
+}
+
+static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
+{
+ static int maxwarn = 10;
+
+ if (!crng_ready()) {
+ if (!ratelimit_disable && maxwarn <= 0)
+ ++urandom_warning.missed;
+ else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
+ --maxwarn;
+ pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
+ current->comm, iov_iter_count(iter));
+ }
+ }
+
+ return get_random_bytes_user(iter);
+}
+
+static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
{
int ret;
+ if (!crng_ready() &&
+ ((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) ||
+ (kiocb->ki_filp->f_flags & O_NONBLOCK)))
+ return -EAGAIN;
+
ret = wait_for_random_bytes();
if (ret != 0)
return ret;
- return urandom_read_nowarn(file, buf, nbytes, ppos);
-}
-
-static __poll_t
-random_poll(struct file *file, poll_table * wait)
-{
- __poll_t mask;
-
- poll_wait(file, &crng_init_wait, wait);
- poll_wait(file, &random_write_wait, wait);
- mask = 0;
- if (crng_ready())
- mask |= EPOLLIN | EPOLLRDNORM;
- if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
- mask |= EPOLLOUT | EPOLLWRNORM;
- return mask;
-}
-
-static int
-write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
-{
- size_t bytes;
- __u32 t, buf[16];
- const char __user *p = buffer;
-
- while (count > 0) {
- int b, i = 0;
-
- bytes = min(count, sizeof(buf));
- if (copy_from_user(&buf, p, bytes))
- return -EFAULT;
-
- for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
- if (!arch_get_random_int(&t))
- break;
- buf[i] ^= t;
- }
-
- count -= bytes;
- p += bytes;
-
- mix_pool_bytes(r, buf, bytes);
- cond_resched();
- }
-
- return 0;
-}
-
-static ssize_t random_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- size_t ret;
-
- ret = write_pool(&input_pool, buffer, count);
- if (ret)
- return ret;
-
- return (ssize_t)count;
+ return get_random_bytes_user(iter);
}
static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
- int size, ent_count;
int __user *p = (int __user *)arg;
- int retval;
+ int ent_count;
switch (cmd) {
case RNDGETENTCNT:
- /* inherently racy, no point locking */
- ent_count = ENTROPY_BITS(&input_pool);
- if (put_user(ent_count, p))
+ /* Inherently racy, no point locking. */
+ if (put_user(input_pool.init_bits, p))
return -EFAULT;
return 0;
case RNDADDTOENTCNT:
@@ -1964,41 +1330,48 @@
return -EPERM;
if (get_user(ent_count, p))
return -EFAULT;
- return credit_entropy_bits_safe(&input_pool, ent_count);
- case RNDADDENTROPY:
+ if (ent_count < 0)
+ return -EINVAL;
+ credit_init_bits(ent_count);
+ return 0;
+ case RNDADDENTROPY: {
+ struct iov_iter iter;
+ struct iovec iov;
+ ssize_t ret;
+ int len;
+
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (get_user(ent_count, p++))
return -EFAULT;
if (ent_count < 0)
return -EINVAL;
- if (get_user(size, p++))
+ if (get_user(len, p++))
return -EFAULT;
- retval = write_pool(&input_pool, (const char __user *)p,
- size);
- if (retval < 0)
- return retval;
- return credit_entropy_bits_safe(&input_pool, ent_count);
+ ret = import_single_range(WRITE, p, len, &iov, &iter);
+ if (unlikely(ret))
+ return ret;
+ ret = write_pool_user(&iter);
+ if (unlikely(ret < 0))
+ return ret;
+ /* Since we're crediting, enforce that it was all written into the pool. */
+ if (unlikely(ret != len))
+ return -EFAULT;
+ credit_init_bits(ent_count);
+ return 0;
+ }
case RNDZAPENTCNT:
case RNDCLEARPOOL:
- /*
- * Clear the entropy pool counters. We no longer clear
- * the entropy pool, as that's silly.
- */
+ /* No longer has any effect. */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (xchg(&input_pool.entropy_count, 0) && random_write_wakeup_bits) {
- wake_up_interruptible(&random_write_wait);
- kill_fasync(&fasync, SIGIO, POLL_OUT);
- }
return 0;
case RNDRESEEDCRNG:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (crng_init < 2)
+ if (!crng_ready())
return -ENODATA;
- crng_reseed(&primary_crng, &input_pool);
- WRITE_ONCE(crng_global_init_time, jiffies - 1);
+ crng_reseed();
return 0;
default:
return -EINVAL;
@@ -2011,55 +1384,56 @@
}
const struct file_operations random_fops = {
- .read = random_read,
- .write = random_write,
- .poll = random_poll,
+ .read_iter = random_read_iter,
+ .write_iter = random_write_iter,
+ .poll = random_poll,
.unlocked_ioctl = random_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.fasync = random_fasync,
.llseek = noop_llseek,
+ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
};
const struct file_operations urandom_fops = {
- .read = urandom_read,
- .write = random_write,
+ .read_iter = urandom_read_iter,
+ .write_iter = random_write_iter,
.unlocked_ioctl = random_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.fasync = random_fasync,
.llseek = noop_llseek,
+ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
};
-SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
- unsigned int, flags)
-{
- int ret;
-
- if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE))
- return -EINVAL;
-
- /*
- * Requesting insecure and blocking randomness at the same time makes
- * no sense.
- */
- if ((flags & (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM))
- return -EINVAL;
-
- if (count > INT_MAX)
- count = INT_MAX;
-
- if (!(flags & GRND_INSECURE) && !crng_ready()) {
- if (flags & GRND_NONBLOCK)
- return -EAGAIN;
- ret = wait_for_random_bytes();
- if (unlikely(ret))
- return ret;
- }
- return urandom_read_nowarn(NULL, buf, count, NULL);
-}
/********************************************************************
*
- * Sysctl interface
+ * Sysctl interface.
+ *
+ * These are partly unused legacy knobs with dummy values to not break
+ * userspace and partly still useful things. They are usually accessible
+ * in /proc/sys/kernel/random/ and are as follows:
+ *
+ * - boot_id - a UUID representing the current boot.
+ *
+ * - uuid - a random UUID, different each time the file is read.
+ *
+ * - poolsize - the number of bits of entropy that the input pool can
+ * hold, tied to the POOL_BITS constant.
+ *
+ * - entropy_avail - the number of bits of entropy currently in the
+ * input pool. Always <= poolsize.
+ *
+ * - write_wakeup_threshold - the amount of entropy in the input pool
+ * below which write polls to /dev/random will unblock, requesting
+ * more entropy, tied to the POOL_READY_BITS constant. It is writable
+ * to avoid breaking old userspaces, but writing to it does not
+ * change any behavior of the RNG.
+ *
+ * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
+ * It is writable to avoid breaking old userspaces, but writing
+ * to it does not change any behavior of the RNG.
*
********************************************************************/
@@ -2067,25 +1441,28 @@
#include <linux/sysctl.h>
-static int min_write_thresh;
-static int max_write_thresh = INPUT_POOL_WORDS * 32;
-static int random_min_urandom_seed = 60;
-static char sysctl_bootid[16];
+static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
+static int sysctl_random_write_wakeup_bits = POOL_READY_BITS;
+static int sysctl_poolsize = POOL_BITS;
+static u8 sysctl_bootid[UUID_SIZE];
/*
* This function is used to return both the bootid UUID, and random
- * UUID. The difference is in whether table->data is NULL; if it is,
+ * UUID. The difference is in whether table->data is NULL; if it is,
* then a new UUID is generated and returned to the user.
- *
- * If the user accesses this via the proc interface, the UUID will be
- * returned as an ASCII string in the standard UUID format; if via the
- * sysctl system call, as 16 bytes of binary data.
*/
-static int proc_do_uuid(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+static int proc_do_uuid(struct ctl_table *table, int write, void *buf,
+ size_t *lenp, loff_t *ppos)
{
- struct ctl_table fake_table;
- unsigned char buf[64], tmp_uuid[16], *uuid;
+ u8 tmp_uuid[UUID_SIZE], *uuid;
+ char uuid_string[UUID_STRING_LEN + 1];
+ struct ctl_table fake_table = {
+ .data = uuid_string,
+ .maxlen = UUID_STRING_LEN
+ };
+
+ if (write)
+ return -EPERM;
uuid = table->data;
if (!uuid) {
@@ -2100,32 +1477,17 @@
spin_unlock(&bootid_spinlock);
}
- sprintf(buf, "%pU", uuid);
-
- fake_table.data = buf;
- fake_table.maxlen = sizeof(buf);
-
- return proc_dostring(&fake_table, write, buffer, lenp, ppos);
+ snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
+ return proc_dostring(&fake_table, 0, buf, lenp, ppos);
}
-/*
- * Return entropy available scaled to integral bits
- */
-static int proc_do_entropy(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+/* The same as proc_dointvec, but writes don't change anything. */
+static int proc_do_rointvec(struct ctl_table *table, int write, void *buf,
+ size_t *lenp, loff_t *ppos)
{
- struct ctl_table fake_table;
- int entropy_count;
-
- entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
-
- fake_table.data = &entropy_count;
- fake_table.maxlen = sizeof(entropy_count);
-
- return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
+ return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
}
-static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
extern struct ctl_table random_table[];
struct ctl_table random_table[] = {
{
@@ -2137,222 +1499,36 @@
},
{
.procname = "entropy_avail",
+ .data = &input_pool.init_bits,
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_do_entropy,
- .data = &input_pool.entropy_count,
+ .proc_handler = proc_dointvec,
},
{
.procname = "write_wakeup_threshold",
- .data = &random_write_wakeup_bits,
+ .data = &sysctl_random_write_wakeup_bits,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_write_thresh,
- .extra2 = &max_write_thresh,
+ .proc_handler = proc_do_rointvec,
},
{
.procname = "urandom_min_reseed_secs",
- .data = &random_min_urandom_seed,
+ .data = &sysctl_random_min_urandom_seed,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_do_rointvec,
},
{
.procname = "boot_id",
.data = &sysctl_bootid,
- .maxlen = 16,
.mode = 0444,
.proc_handler = proc_do_uuid,
},
{
.procname = "uuid",
- .maxlen = 16,
.mode = 0444,
.proc_handler = proc_do_uuid,
},
-#ifdef ADD_INTERRUPT_BENCH
- {
- .procname = "add_interrupt_avg_cycles",
- .data = &avg_cycles,
- .maxlen = sizeof(avg_cycles),
- .mode = 0444,
- .proc_handler = proc_doulongvec_minmax,
- },
- {
- .procname = "add_interrupt_avg_deviation",
- .data = &avg_deviation,
- .maxlen = sizeof(avg_deviation),
- .mode = 0444,
- .proc_handler = proc_doulongvec_minmax,
- },
-#endif
{ }
};
-#endif /* CONFIG_SYSCTL */
-
-struct batched_entropy {
- union {
- u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)];
- u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
- };
- unsigned int position;
- spinlock_t batch_lock;
-};
-
-/*
- * Get a random word for internal kernel use only. The quality of the random
- * number is good as /dev/urandom, but there is no backtrack protection, with
- * the goal of being quite fast and not depleting entropy. In order to ensure
- * that the randomness provided by this function is okay, the function
- * wait_for_random_bytes() should be called and return 0 at least once at any
- * point prior.
- */
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
-};
-
-u64 get_random_u64(void)
-{
- u64 ret;
- unsigned long flags;
- struct batched_entropy *batch;
- static void *previous;
-
- warn_unseeded_randomness(&previous);
-
- batch = raw_cpu_ptr(&batched_entropy_u64);
- spin_lock_irqsave(&batch->batch_lock, flags);
- if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
- extract_crng((u8 *)batch->entropy_u64);
- batch->position = 0;
- }
- ret = batch->entropy_u64[batch->position++];
- spin_unlock_irqrestore(&batch->batch_lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(get_random_u64);
-
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
-};
-u32 get_random_u32(void)
-{
- u32 ret;
- unsigned long flags;
- struct batched_entropy *batch;
- static void *previous;
-
- warn_unseeded_randomness(&previous);
-
- batch = raw_cpu_ptr(&batched_entropy_u32);
- spin_lock_irqsave(&batch->batch_lock, flags);
- if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
- extract_crng((u8 *)batch->entropy_u32);
- batch->position = 0;
- }
- ret = batch->entropy_u32[batch->position++];
- spin_unlock_irqrestore(&batch->batch_lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(get_random_u32);
-
-/* It's important to invalidate all potential batched entropy that might
- * be stored before the crng is initialized, which we can do lazily by
- * simply resetting the counter to zero so that it's re-extracted on the
- * next usage. */
-static void invalidate_batched_entropy(void)
-{
- int cpu;
- unsigned long flags;
-
- for_each_possible_cpu (cpu) {
- struct batched_entropy *batched_entropy;
-
- batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
- spin_lock_irqsave(&batched_entropy->batch_lock, flags);
- batched_entropy->position = 0;
- spin_unlock(&batched_entropy->batch_lock);
-
- batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
- spin_lock(&batched_entropy->batch_lock);
- batched_entropy->position = 0;
- spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
- }
-}
-
-/**
- * randomize_page - Generate a random, page aligned address
- * @start: The smallest acceptable address the caller will take.
- * @range: The size of the area, starting at @start, within which the
- * random address must fall.
- *
- * If @start + @range would overflow, @range is capped.
- *
- * NOTE: Historical use of randomize_range, which this replaces, presumed that
- * @start was already page aligned. We now align it regardless.
- *
- * Return: A page aligned address within [start, start + range). On error,
- * @start is returned.
- */
-unsigned long
-randomize_page(unsigned long start, unsigned long range)
-{
- if (!PAGE_ALIGNED(start)) {
- range -= PAGE_ALIGN(start) - start;
- start = PAGE_ALIGN(start);
- }
-
- if (start > ULONG_MAX - range)
- range = ULONG_MAX - start;
-
- range >>= PAGE_SHIFT;
-
- if (range == 0)
- return start;
-
- return start + (get_random_long() % range << PAGE_SHIFT);
-}
-
-/* Interface for in-kernel drivers of true hardware RNGs.
- * Those devices may produce endless random bits and will be throttled
- * when our pool is full.
- */
-void add_hwgenerator_randomness(const char *buffer, size_t count,
- size_t entropy)
-{
- struct entropy_store *poolp = &input_pool;
-
- if (unlikely(crng_init == 0)) {
- size_t ret = crng_fast_load(buffer, count);
- count -= ret;
- buffer += ret;
- if (!count || crng_init == 0)
- return;
- }
-
- /* Suspend writing if we're above the trickle threshold.
- * We'll be woken up again once below random_write_wakeup_thresh,
- * or when the calling thread is about to terminate.
- */
- wait_event_interruptible(random_write_wait,
- !system_wq || kthread_should_stop() ||
- ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
- mix_pool_bytes(poolp, buffer, count);
- credit_entropy_bits(poolp, entropy);
-}
-EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
-
-/* Handle random seed passed by bootloader.
- * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
- * it would be regarded as device data.
- * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
- */
-void add_bootloader_randomness(const void *buf, unsigned int size)
-{
- if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
- add_hwgenerator_randomness(buf, size, size * 8);
- else
- add_device_randomness(buf, size);
-}
-EXPORT_SYMBOL_GPL(add_bootloader_randomness);
+#endif /* CONFIG_SYSCTL */
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index ddaeceb..ed60047 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -274,14 +274,6 @@
kfree(chip);
}
-static void tpm_devs_release(struct device *dev)
-{
- struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs);
-
- /* release the master device reference */
- put_device(&chip->dev);
-}
-
/**
* tpm_class_shutdown() - prepare the TPM device for loss of power.
* @dev: device to which the chip is associated.
@@ -344,7 +336,6 @@
chip->dev_num = rc;
device_initialize(&chip->dev);
- device_initialize(&chip->devs);
chip->dev.class = tpm_class;
chip->dev.class->shutdown_pre = tpm_class_shutdown;
@@ -352,39 +343,20 @@
chip->dev.parent = pdev;
chip->dev.groups = chip->groups;
- chip->devs.parent = pdev;
- chip->devs.class = tpmrm_class;
- chip->devs.release = tpm_devs_release;
- /* get extra reference on main device to hold on
- * behalf of devs. This holds the chip structure
- * while cdevs is in use. The corresponding put
- * is in the tpm_devs_release (TPM2 only)
- */
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- get_device(&chip->dev);
-
if (chip->dev_num == 0)
chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR);
else
chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num);
- chip->devs.devt =
- MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES);
-
rc = dev_set_name(&chip->dev, "tpm%d", chip->dev_num);
if (rc)
goto out;
- rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num);
- if (rc)
- goto out;
if (!pdev)
chip->flags |= TPM_CHIP_FLAG_VIRTUAL;
cdev_init(&chip->cdev, &tpm_fops);
- cdev_init(&chip->cdevs, &tpmrm_fops);
chip->cdev.owner = THIS_MODULE;
- chip->cdevs.owner = THIS_MODULE;
rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE);
if (rc) {
@@ -396,7 +368,6 @@
return chip;
out:
- put_device(&chip->devs);
put_device(&chip->dev);
return ERR_PTR(rc);
}
@@ -445,14 +416,9 @@
}
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
- rc = cdev_device_add(&chip->cdevs, &chip->devs);
- if (rc) {
- dev_err(&chip->devs,
- "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
- dev_name(&chip->devs), MAJOR(chip->devs.devt),
- MINOR(chip->devs.devt), rc);
- return rc;
- }
+ rc = tpm_devs_add(chip);
+ if (rc)
+ goto err_del_cdev;
}
/* Make the chip available. */
@@ -460,6 +426,10 @@
idr_replace(&dev_nums_idr, chip, chip->dev_num);
mutex_unlock(&idr_lock);
+ return 0;
+
+err_del_cdev:
+ cdev_device_del(&chip->cdev, &chip->dev);
return rc;
}
@@ -641,7 +611,7 @@
hwrng_unregister(&chip->hwrng);
tpm_bios_log_teardown(chip);
if (chip->flags & TPM_CHIP_FLAG_TPM2)
- cdev_device_del(&chip->cdevs, &chip->devs);
+ tpm_devs_remove(chip);
tpm_del_char_device(chip);
}
EXPORT_SYMBOL_GPL(tpm_chip_unregister);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 283f782..2163c6e 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -234,6 +234,8 @@
size_t cmdsiz);
int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf,
size_t *bufsiz);
+int tpm_devs_add(struct tpm_chip *chip);
+void tpm_devs_remove(struct tpm_chip *chip);
void tpm_bios_log_setup(struct tpm_chip *chip);
void tpm_bios_log_teardown(struct tpm_chip *chip);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index c84d239..d0e11d7 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -400,7 +400,16 @@
if (!rc) {
out = (struct tpm2_get_cap_out *)
&buf.data[TPM_HEADER_SIZE];
- *value = be32_to_cpu(out->value);
+ /*
+ * To prevent failing boot up of some systems, Infineon TPM2.0
+ * returns SUCCESS on TPM2_Startup in field upgrade mode. Also
+ * the TPM2_Getcapability command returns a zero length list
+ * in field upgrade mode.
+ */
+ if (be32_to_cpu(out->property_cnt) > 0)
+ *value = be32_to_cpu(out->value);
+ else
+ rc = -ENODATA;
}
tpm_buf_destroy(&buf);
return rc;
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index d222502..ffb35f0 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -574,3 +574,68 @@
dev_err(&chip->dev, "%s: error %d\n", __func__, rc);
return rc;
}
+
+/*
+ * Put the reference to the main device.
+ */
+static void tpm_devs_release(struct device *dev)
+{
+ struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs);
+
+ /* release the master device reference */
+ put_device(&chip->dev);
+}
+
+/*
+ * Remove the device file for exposed TPM spaces and release the device
+ * reference. This may also release the reference to the master device.
+ */
+void tpm_devs_remove(struct tpm_chip *chip)
+{
+ cdev_device_del(&chip->cdevs, &chip->devs);
+ put_device(&chip->devs);
+}
+
+/*
+ * Add a device file to expose TPM spaces. Also take a reference to the
+ * main device.
+ */
+int tpm_devs_add(struct tpm_chip *chip)
+{
+ int rc;
+
+ device_initialize(&chip->devs);
+ chip->devs.parent = chip->dev.parent;
+ chip->devs.class = tpmrm_class;
+
+ /*
+ * Get extra reference on main device to hold on behalf of devs.
+ * This holds the chip structure while cdevs is in use. The
+ * corresponding put is in the tpm_devs_release.
+ */
+ get_device(&chip->dev);
+ chip->devs.release = tpm_devs_release;
+ chip->devs.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES);
+ cdev_init(&chip->cdevs, &tpmrm_fops);
+ chip->cdevs.owner = THIS_MODULE;
+
+ rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num);
+ if (rc)
+ goto err_put_devs;
+
+ rc = cdev_device_add(&chip->cdevs, &chip->devs);
+ if (rc) {
+ dev_err(&chip->devs,
+ "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
+ dev_name(&chip->devs), MAJOR(chip->devs.devt),
+ MINOR(chip->devs.devt), rc);
+ goto err_put_devs;
+ }
+
+ return 0;
+
+err_put_devs:
+ put_device(&chip->devs);
+
+ return rc;
+}
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 3ca7528..a1ec722 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -683,6 +683,7 @@
if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
ibmvtpm->rtce_buf != NULL,
HZ)) {
+ rc = -ENODEV;
dev_err(dev, "CRQ response timed out\n");
goto init_irq_cleanup;
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 6735228..6d36142 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1959,6 +1959,13 @@
list_del(&portdev->list);
spin_unlock_irq(&pdrvdata_lock);
+ /* Device is going away, exit any polling for buffers */
+ virtio_break_device(vdev);
+ if (use_multiport(portdev))
+ flush_work(&portdev->control_work);
+ else
+ flush_work(&portdev->config_work);
+
/* Disable interrupts for vqs */
vdev->config->reset(vdev);
/* Finish up work that's lined up */
@@ -2232,7 +2239,7 @@
.remove = virtcons_remove,
};
-static int __init init(void)
+static int __init virtio_console_init(void)
{
int err;
@@ -2269,7 +2276,7 @@
return err;
}
-static void __exit fini(void)
+static void __exit virtio_console_fini(void)
{
reclaim_dma_bufs();
@@ -2279,8 +2286,8 @@
class_destroy(pdrvdata.class);
debugfs_remove_recursive(pdrvdata.debugfs_dir);
}
-module_init(init);
-module_exit(fini);
+module_init(virtio_console_init);
+module_exit(virtio_console_fini);
MODULE_DESCRIPTION("Virtio console driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/actions/owl-s700.c b/drivers/clk/actions/owl-s700.c
index a2f34d1..6ea7da1 100644
--- a/drivers/clk/actions/owl-s700.c
+++ b/drivers/clk/actions/owl-s700.c
@@ -162,6 +162,7 @@
static struct clk_div_table rmii_div_table[] = {
{0, 4}, {1, 10},
+ {0, 0}
};
/* divider clocks */
diff --git a/drivers/clk/actions/owl-s900.c b/drivers/clk/actions/owl-s900.c
index 7908909..5144ada 100644
--- a/drivers/clk/actions/owl-s900.c
+++ b/drivers/clk/actions/owl-s900.c
@@ -140,7 +140,7 @@
static struct clk_div_table usb3_mac_div_table[] = {
{ 1, 2 }, { 2, 3 }, { 3, 4 },
- { 0, 8 },
+ { 0, 0 }
};
static struct clk_div_table i2s_div_table[] = {
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index b656d25..fe772ba 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -106,6 +106,10 @@
tmp_rate = parent_rate;
else
tmp_rate = parent_rate / div;
+
+ if (tmp_rate < req->min_rate || tmp_rate > req->max_rate)
+ return;
+
tmp_diff = abs(req->rate - tmp_rate);
if (*best_diff < 0 || *best_diff >= tmp_diff) {
diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c
index a092a94..9d25b23 100644
--- a/drivers/clk/at91/sama7g5.c
+++ b/drivers/clk/at91/sama7g5.c
@@ -606,16 +606,16 @@
{ .n = "pdmc0_gclk",
.id = 68,
.r = { .max = 50000000 },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
- .pp_mux_table = { 5, 8, },
+ .pp = { "syspll_divpmcck", "audiopll_divpmcck", },
+ .pp_mux_table = { 5, 9, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
{ .n = "pdmc1_gclk",
.id = 69,
.r = { .max = 50000000, },
- .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
- .pp_mux_table = { 5, 8, },
+ .pp = { "syspll_divpmcck", "audiopll_divpmcck", },
+ .pp_mux_table = { 5, 9, },
.pp_count = 2,
.pp_chg_id = INT_MIN, },
diff --git a/drivers/clk/baikal-t1/ccu-div.c b/drivers/clk/baikal-t1/ccu-div.c
index 4062092..a6642f3 100644
--- a/drivers/clk/baikal-t1/ccu-div.c
+++ b/drivers/clk/baikal-t1/ccu-div.c
@@ -34,6 +34,7 @@
#define CCU_DIV_CTL_CLKDIV_MASK(_width) \
GENMASK((_width) + CCU_DIV_CTL_CLKDIV_FLD - 1, CCU_DIV_CTL_CLKDIV_FLD)
#define CCU_DIV_CTL_LOCK_SHIFTED BIT(27)
+#define CCU_DIV_CTL_GATE_REF_BUF BIT(28)
#define CCU_DIV_CTL_LOCK_NORMAL BIT(31)
#define CCU_DIV_RST_DELAY_US 1
@@ -170,6 +171,40 @@
return !!(val & CCU_DIV_CTL_EN);
}
+static int ccu_div_buf_enable(struct clk_hw *hw)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&div->lock, flags);
+ regmap_update_bits(div->sys_regs, div->reg_ctl,
+ CCU_DIV_CTL_GATE_REF_BUF, 0);
+ spin_unlock_irqrestore(&div->lock, flags);
+
+ return 0;
+}
+
+static void ccu_div_buf_disable(struct clk_hw *hw)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&div->lock, flags);
+ regmap_update_bits(div->sys_regs, div->reg_ctl,
+ CCU_DIV_CTL_GATE_REF_BUF, CCU_DIV_CTL_GATE_REF_BUF);
+ spin_unlock_irqrestore(&div->lock, flags);
+}
+
+static int ccu_div_buf_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ u32 val = 0;
+
+ regmap_read(div->sys_regs, div->reg_ctl, &val);
+
+ return !(val & CCU_DIV_CTL_GATE_REF_BUF);
+}
+
static unsigned long ccu_div_var_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -323,6 +358,7 @@
CCU_DIV_DBGFS_BIT_ATTR("div_en", CCU_DIV_CTL_EN),
CCU_DIV_DBGFS_BIT_ATTR("div_rst", CCU_DIV_CTL_RST),
CCU_DIV_DBGFS_BIT_ATTR("div_bypass", CCU_DIV_CTL_SET_CLKDIV),
+ CCU_DIV_DBGFS_BIT_ATTR("div_buf", CCU_DIV_CTL_GATE_REF_BUF),
CCU_DIV_DBGFS_BIT_ATTR("div_lock", CCU_DIV_CTL_LOCK_NORMAL)
};
@@ -441,6 +477,9 @@
continue;
}
+ if (!strcmp("div_buf", name))
+ continue;
+
bits[didx] = ccu_div_bits[bidx];
bits[didx].div = div;
@@ -477,6 +516,21 @@
&ccu_div_dbgfs_fixed_clkdiv_fops);
}
+static void ccu_div_buf_debug_init(struct clk_hw *hw, struct dentry *dentry)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ struct ccu_div_dbgfs_bit *bit;
+
+ bit = kmalloc(sizeof(*bit), GFP_KERNEL);
+ if (!bit)
+ return;
+
+ *bit = ccu_div_bits[3];
+ bit->div = div;
+ debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit,
+ &ccu_div_dbgfs_bit_fops);
+}
+
static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry)
{
struct ccu_div *div = to_ccu_div(hw);
@@ -489,6 +543,7 @@
#define ccu_div_var_debug_init NULL
#define ccu_div_gate_debug_init NULL
+#define ccu_div_buf_debug_init NULL
#define ccu_div_fixed_debug_init NULL
#endif /* !CONFIG_DEBUG_FS */
@@ -520,6 +575,13 @@
.debug_init = ccu_div_gate_debug_init
};
+static const struct clk_ops ccu_div_buf_ops = {
+ .enable = ccu_div_buf_enable,
+ .disable = ccu_div_buf_disable,
+ .is_enabled = ccu_div_buf_is_enabled,
+ .debug_init = ccu_div_buf_debug_init
+};
+
static const struct clk_ops ccu_div_fixed_ops = {
.recalc_rate = ccu_div_fixed_recalc_rate,
.round_rate = ccu_div_fixed_round_rate,
@@ -566,6 +628,8 @@
} else if (div_init->type == CCU_DIV_GATE) {
hw_init.ops = &ccu_div_gate_ops;
div->divider = div_init->divider;
+ } else if (div_init->type == CCU_DIV_BUF) {
+ hw_init.ops = &ccu_div_buf_ops;
} else if (div_init->type == CCU_DIV_FIXED) {
hw_init.ops = &ccu_div_fixed_ops;
div->divider = div_init->divider;
@@ -579,6 +643,7 @@
goto err_free_div;
}
parent_data.fw_name = div_init->parent_name;
+ parent_data.name = div_init->parent_name;
hw_init.parent_data = &parent_data;
hw_init.num_parents = 1;
diff --git a/drivers/clk/baikal-t1/ccu-div.h b/drivers/clk/baikal-t1/ccu-div.h
index 795665c..4eb49ff 100644
--- a/drivers/clk/baikal-t1/ccu-div.h
+++ b/drivers/clk/baikal-t1/ccu-div.h
@@ -14,6 +14,14 @@
#include <linux/of.h>
/*
+ * CCU Divider private clock IDs
+ * @CCU_SYS_SATA_CLK: CCU SATA internal clock
+ * @CCU_SYS_XGMAC_CLK: CCU XGMAC internal clock
+ */
+#define CCU_SYS_SATA_CLK -1
+#define CCU_SYS_XGMAC_CLK -2
+
+/*
* CCU Divider private flags
* @CCU_DIV_SKIP_ONE: Due to some reason divider can't be set to 1.
* It can be 0 though, which is functionally the same.
@@ -31,11 +39,13 @@
* enum ccu_div_type - CCU Divider types
* @CCU_DIV_VAR: Clocks gate with variable divider.
* @CCU_DIV_GATE: Clocks gate with fixed divider.
+ * @CCU_DIV_BUF: Clock gate with no divider.
* @CCU_DIV_FIXED: Ungateable clock with fixed divider.
*/
enum ccu_div_type {
CCU_DIV_VAR,
CCU_DIV_GATE,
+ CCU_DIV_BUF,
CCU_DIV_FIXED
};
diff --git a/drivers/clk/baikal-t1/clk-ccu-div.c b/drivers/clk/baikal-t1/clk-ccu-div.c
index f141fda..90f4fda 100644
--- a/drivers/clk/baikal-t1/clk-ccu-div.c
+++ b/drivers/clk/baikal-t1/clk-ccu-div.c
@@ -76,6 +76,16 @@
.divider = _divider \
}
+#define CCU_DIV_BUF_INFO(_id, _name, _pname, _base, _flags) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .base = _base, \
+ .type = CCU_DIV_BUF, \
+ .flags = _flags \
+ }
+
#define CCU_DIV_FIXED_INFO(_id, _name, _pname, _divider) \
{ \
.id = _id, \
@@ -188,11 +198,14 @@
* for the SoC devices registers IO-operations.
*/
static const struct ccu_div_info sys_info[] = {
- CCU_DIV_VAR_INFO(CCU_SYS_SATA_REF_CLK, "sys_sata_ref_clk",
+ CCU_DIV_VAR_INFO(CCU_SYS_SATA_CLK, "sys_sata_clk",
"sata_clk", CCU_SYS_SATA_REF_BASE, 4,
CLK_SET_RATE_GATE,
CCU_DIV_SKIP_ONE | CCU_DIV_LOCK_SHIFTED |
CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_BUF_INFO(CCU_SYS_SATA_REF_CLK, "sys_sata_ref_clk",
+ "sys_sata_clk", CCU_SYS_SATA_REF_BASE,
+ CLK_SET_RATE_PARENT),
CCU_DIV_VAR_INFO(CCU_SYS_APB_CLK, "sys_apb_clk",
"pcie_clk", CCU_SYS_APB_BASE, 5,
CLK_IS_CRITICAL, CCU_DIV_RESET_DOMAIN),
@@ -204,10 +217,12 @@
"eth_clk", CCU_SYS_GMAC1_BASE, 5),
CCU_DIV_FIXED_INFO(CCU_SYS_GMAC1_PTP_CLK, "sys_gmac1_ptp_clk",
"eth_clk", 10),
- CCU_DIV_GATE_INFO(CCU_SYS_XGMAC_REF_CLK, "sys_xgmac_ref_clk",
- "eth_clk", CCU_SYS_XGMAC_BASE, 8),
+ CCU_DIV_GATE_INFO(CCU_SYS_XGMAC_CLK, "sys_xgmac_clk",
+ "eth_clk", CCU_SYS_XGMAC_BASE, 1),
+ CCU_DIV_FIXED_INFO(CCU_SYS_XGMAC_REF_CLK, "sys_xgmac_ref_clk",
+ "sys_xgmac_clk", 8),
CCU_DIV_FIXED_INFO(CCU_SYS_XGMAC_PTP_CLK, "sys_xgmac_ptp_clk",
- "eth_clk", 10),
+ "sys_xgmac_clk", 8),
CCU_DIV_GATE_INFO(CCU_SYS_USB_CLK, "sys_usb_clk",
"eth_clk", CCU_SYS_USB_BASE, 10),
CCU_DIV_VAR_INFO(CCU_SYS_PVT_CLK, "sys_pvt_clk",
@@ -396,6 +411,9 @@
init.base = info->base;
init.sys_regs = data->sys_regs;
init.divider = info->divider;
+ } else if (init.type == CCU_DIV_BUF) {
+ init.base = info->base;
+ init.sys_regs = data->sys_regs;
} else {
init.divider = info->divider;
}
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 1788868..b7f8987 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -968,9 +968,9 @@
return div;
}
-static long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock,
- unsigned long parent_rate,
- u32 div)
+static unsigned long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock,
+ unsigned long parent_rate,
+ u32 div)
{
const struct bcm2835_clock_data *data = clock->data;
u64 temp;
@@ -1786,7 +1786,7 @@
.load_mask = CM_PLLC_LOADPER,
.hold_mask = CM_PLLC_HOLDPER,
.fixed_divider = 1,
- .flags = CLK_SET_RATE_PARENT),
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
/*
* PLLD is the display PLL, used to drive DSI display panels.
diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
index 274441e..8f0619f 100644
--- a/drivers/clk/bcm/clk-iproc-pll.c
+++ b/drivers/clk/bcm/clk-iproc-pll.c
@@ -736,6 +736,7 @@
const char *parent_name;
struct iproc_clk *iclk_array;
struct clk_hw_onecell_data *clk_data;
+ const char *clk_name;
if (WARN_ON(!pll_ctrl) || WARN_ON(!clk_ctrl))
return;
@@ -783,7 +784,12 @@
iclk = &iclk_array[0];
iclk->pll = pll;
- init.name = node->name;
+ ret = of_property_read_string_index(node, "clock-output-names",
+ 0, &clk_name);
+ if (WARN_ON(ret))
+ goto err_pll_register;
+
+ init.name = clk_name;
init.ops = &iproc_pll_ops;
init.flags = 0;
parent_name = of_clk_get_parent_name(node, 0);
@@ -803,13 +809,11 @@
goto err_pll_register;
clk_data->hws[0] = &iclk->hw;
+ parent_name = clk_name;
/* now initialize and register all leaf clocks */
for (i = 1; i < num_clks; i++) {
- const char *clk_name;
-
memset(&init, 0, sizeof(init));
- parent_name = node->name;
ret = of_property_read_string_index(node, "clock-output-names",
i, &clk_name);
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index f89b9cf..969227e 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -139,7 +139,7 @@
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_GET_CLOCK_RATE, &val);
if (ret)
- return ret;
+ return 0;
return val;
}
@@ -156,7 +156,7 @@
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_SET_CLOCK_RATE, &_rate);
if (ret)
- dev_err_ratelimited(rpi->dev, "Failed to change %s frequency: %d",
+ dev_err_ratelimited(rpi->dev, "Failed to change %s frequency: %d\n",
clk_hw_get_name(hw), ret);
return ret;
@@ -208,7 +208,7 @@
RPI_FIRMWARE_GET_MIN_CLOCK_RATE,
&min_rate);
if (ret) {
- dev_err(rpi->dev, "Failed to get clock %d min freq: %d",
+ dev_err(rpi->dev, "Failed to get clock %d min freq: %d\n",
id, ret);
return ERR_PTR(ret);
}
@@ -251,8 +251,13 @@
struct rpi_firmware_get_clocks_response *clks;
int ret;
+ /*
+ * The firmware doesn't guarantee that the last element of
+ * RPI_FIRMWARE_GET_CLOCKS is zeroed. So allocate an additional
+ * zero element as sentinel.
+ */
clks = devm_kcalloc(rpi->dev,
- sizeof(*clks), RPI_FIRMWARE_NUM_CLK_ID,
+ RPI_FIRMWARE_NUM_CLK_ID + 1, sizeof(*clks),
GFP_KERNEL);
if (!clks)
return -ENOMEM;
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c
index bccdfa0..67a9edb 100644
--- a/drivers/clk/berlin/bg2.c
+++ b/drivers/clk/berlin/bg2.c
@@ -500,12 +500,15 @@
int n, ret;
clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL);
- if (!clk_data)
+ if (!clk_data) {
+ of_node_put(parent_np);
return;
+ }
clk_data->num = MAX_CLKS;
hws = clk_data->hws;
gbase = of_iomap(parent_np, 0);
+ of_node_put(parent_np);
if (!gbase)
return;
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index e9518d3..dd2784b 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -286,19 +286,23 @@
int n, ret;
clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL);
- if (!clk_data)
+ if (!clk_data) {
+ of_node_put(parent_np);
return;
+ }
clk_data->num = MAX_CLKS;
hws = clk_data->hws;
gbase = of_iomap(parent_np, 0);
if (!gbase) {
+ of_node_put(parent_np);
pr_err("%pOF: Unable to map global base\n", np);
return;
}
/* BG2Q CPU PLL is not part of global registers */
cpupll_base = of_iomap(parent_np, 1);
+ of_node_put(parent_np);
if (!cpupll_base) {
pr_err("%pOF: Unable to map cpupll base\n", np);
iounmap(gbase);
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index 24dab23..9c3305b 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -622,7 +622,7 @@
regmap_write(map, 0x308, 0x12000); /* 3x3 = 9 */
/* P-Bus (BCLK) clock divider */
- hw = clk_hw_register_divider_table(dev, "bclk", "hpll", 0,
+ hw = clk_hw_register_divider_table(dev, "bclk", "epll", 0,
scu_g6_base + ASPEED_G6_CLK_SELECTION1, 20, 3, 0,
ast2600_div_table,
&aspeed_g6_clk_lock);
diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c
index a2c6486..f8417ee 100644
--- a/drivers/clk/clk-clps711x.c
+++ b/drivers/clk/clk-clps711x.c
@@ -28,11 +28,13 @@
{ .val = 1, .div = 8, },
{ .val = 2, .div = 2, },
{ .val = 3, .div = 1, },
+ { /* sentinel */ }
};
static const struct clk_div_table timer_div_table[] = {
{ .val = 0, .div = 256, },
{ .val = 1, .div = 1, },
+ { /* sentinel */ }
};
struct clps711x_clk {
diff --git a/drivers/clk/clk-oxnas.c b/drivers/clk/clk-oxnas.c
index 78d5ea6..2fe36f5 100644
--- a/drivers/clk/clk-oxnas.c
+++ b/drivers/clk/clk-oxnas.c
@@ -207,7 +207,7 @@
static int oxnas_stdclk_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = pdev->dev.of_node, *parent_np;
const struct oxnas_stdclk_data *data;
const struct of_device_id *id;
struct regmap *regmap;
@@ -219,7 +219,9 @@
return -ENODEV;
data = id->data;
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap)) {
dev_err(&pdev->dev, "failed to have parent regmap\n");
return PTR_ERR(regmap);
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 46101c6..585b9ac 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -1038,8 +1038,13 @@
*/
static void __init legacy_init_clockgen(struct device_node *np)
{
- if (!clockgen.node)
- _clockgen_init(of_get_parent(np), true);
+ if (!clockgen.node) {
+ struct device_node *parent_np;
+
+ parent_np = of_get_parent(np);
+ _clockgen_init(parent_np, true);
+ of_node_put(parent_np);
+ }
}
/* Legacy node */
@@ -1134,6 +1139,7 @@
sysclk = of_get_child_by_name(clockgen.node, "sysclk");
if (sysclk) {
clk = sysclk_from_fixed(sysclk, name);
+ of_node_put(sysclk);
if (!IS_ERR(clk))
return clk;
}
diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
index 772b48a..382a061 100644
--- a/drivers/clk/clk-si5341.c
+++ b/drivers/clk/clk-si5341.c
@@ -789,6 +789,15 @@
u32 r_divider;
u8 r[3];
+ err = regmap_read(output->data->regmap,
+ SI5341_OUT_CONFIG(output), &val);
+ if (err < 0)
+ return err;
+
+ /* If SI5341_OUT_CFG_RDIV_FORCE2 is set, r_divider is 2 */
+ if (val & SI5341_OUT_CFG_RDIV_FORCE2)
+ return parent_rate / 2;
+
err = regmap_bulk_read(output->data->regmap,
SI5341_OUT_R_REG(output), r, 3);
if (err < 0)
@@ -805,13 +814,6 @@
r_divider += 1;
r_divider <<= 1;
- err = regmap_read(output->data->regmap,
- SI5341_OUT_CONFIG(output), &val);
- if (err < 0)
- return err;
-
- if (val & SI5341_OUT_CFG_RDIV_FORCE2)
- r_divider = 2;
return parent_rate / r_divider;
}
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index 4e741f9..eb597ea 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -1116,7 +1116,7 @@
.model = IDT_VC6_5P49V6901,
.clk_fod_cnt = 4,
.clk_out_cnt = 5,
- .flags = VC5_HAS_PFD_FREQ_DBL,
+ .flags = VC5_HAS_PFD_FREQ_DBL | VC5_HAS_BYPASS_SYNC_BIT,
};
static const struct vc5_chip_info idt_5p49v6965_info = {
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index b8a0e3d..b355d3d 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -631,6 +631,24 @@
*max_rate = min(*max_rate, clk_user->max_rate);
}
+static bool clk_core_check_boundaries(struct clk_core *core,
+ unsigned long min_rate,
+ unsigned long max_rate)
+{
+ struct clk *user;
+
+ lockdep_assert_held(&prepare_lock);
+
+ if (min_rate > core->max_rate || max_rate < core->min_rate)
+ return false;
+
+ hlist_for_each_entry(user, &core->clks, clks_node)
+ if (min_rate > user->max_rate || max_rate < user->min_rate)
+ return false;
+
+ return true;
+}
+
void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
unsigned long max_rate)
{
@@ -828,10 +846,9 @@
if (core->ops->unprepare)
core->ops->unprepare(core->hw);
- clk_pm_runtime_put(core);
-
trace_clk_unprepare_complete(core);
clk_core_unprepare(core->parent);
+ clk_pm_runtime_put(core);
}
static void clk_core_unprepare_lock(struct clk_core *core)
@@ -2332,6 +2349,11 @@
clk->min_rate = min;
clk->max_rate = max;
+ if (!clk_core_check_boundaries(clk->core, min, max)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
rate = clk_core_get_rate_nolock(clk->core);
if (rate < min || rate > max) {
/*
@@ -2360,6 +2382,7 @@
}
}
+out:
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
@@ -3384,6 +3407,19 @@
__clk_set_parent_after(orphan, parent, NULL);
__clk_recalc_accuracies(orphan);
__clk_recalc_rates(orphan, 0);
+
+ /*
+ * __clk_init_parent() will set the initial req_rate to
+ * 0 if the clock doesn't have clk_ops::recalc_rate and
+ * is an orphan when it's registered.
+ *
+ * 'req_rate' is used by clk_set_rate_range() and
+ * clk_put() to trigger a clk_set_rate() call whenever
+ * the boundaries are modified. Let's make sure
+ * 'req_rate' is set to something non-zero so that
+ * clk_set_rate_range() doesn't drop the frequency.
+ */
+ orphan->req_rate = orphan->rate;
}
}
}
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index fc1bd23..598f3cf 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -280,13 +280,13 @@
hws[IMX6SX_CLK_SSI3_SEL] = imx_clk_hw_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
hws[IMX6SX_CLK_SSI2_SEL] = imx_clk_hw_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
hws[IMX6SX_CLK_SSI1_SEL] = imx_clk_hw_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
- hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux_flags("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels), CLK_SET_RATE_PARENT);
+ hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels));
hws[IMX6SX_CLK_PERCLK_SEL] = imx_clk_hw_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels));
hws[IMX6SX_CLK_VID_SEL] = imx_clk_hw_mux("vid_sel", base + 0x20, 21, 3, vid_sels, ARRAY_SIZE(vid_sels));
hws[IMX6SX_CLK_ESAI_SEL] = imx_clk_hw_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels));
hws[IMX6SX_CLK_CAN_SEL] = imx_clk_hw_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels));
hws[IMX6SX_CLK_UART_SEL] = imx_clk_hw_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels));
- hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux_flags("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels), CLK_SET_RATE_PARENT);
+ hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels));
hws[IMX6SX_CLK_SPDIF_SEL] = imx_clk_hw_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels));
hws[IMX6SX_CLK_AUDIO_SEL] = imx_clk_hw_mux("audio_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels));
hws[IMX6SX_CLK_ENET_PRE_SEL] = imx_clk_hw_mux("enet_pre_sel", base + 0x34, 15, 3, enet_pre_sels, ARRAY_SIZE(enet_pre_sels));
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index c4e0f1c..3f6fd7e 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -849,7 +849,6 @@
hws[IMX7D_WDOG4_ROOT_CLK] = imx_clk_hw_gate4("wdog4_root_clk", "wdog_post_div", base + 0x49f0, 0);
hws[IMX7D_KPP_ROOT_CLK] = imx_clk_hw_gate4("kpp_root_clk", "ipg_root_clk", base + 0x4aa0, 0);
hws[IMX7D_CSI_MCLK_ROOT_CLK] = imx_clk_hw_gate4("csi_mclk_root_clk", "csi_mclk_post_div", base + 0x4490, 0);
- hws[IMX7D_AUDIO_MCLK_ROOT_CLK] = imx_clk_hw_gate4("audio_mclk_root_clk", "audio_mclk_post_div", base + 0x4790, 0);
hws[IMX7D_WRCLK_ROOT_CLK] = imx_clk_hw_gate4("wrclk_root_clk", "wrclk_post_div", base + 0x47a0, 0);
hws[IMX7D_USB_CTRL_CLK] = imx_clk_hw_gate4("usb_ctrl_clk", "ahb_root_clk", base + 0x4680, 0);
hws[IMX7D_USB_PHY1_CLK] = imx_clk_hw_gate4("usb_phy1_clk", "pll_usb1_main_clk", base + 0x46a0, 0);
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index 0391f5b..36e8d61 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -691,7 +691,7 @@
hws[IMX8MP_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", ccm_base + 0x44a0, 0);
hws[IMX8MP_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", ccm_base + 0x44b0, 0);
hws[IMX8MP_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", ccm_base + 0x44c0, 0);
- hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "osc_32k", ccm_base + 0x44d0, 0);
+ hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "hsio_axi", ccm_base + 0x44d0, 0);
hws[IMX8MP_CLK_USB_PHY_ROOT] = imx_clk_hw_gate4("usb_phy_root_clk", "usb_phy_ref", ccm_base + 0x44f0, 0);
hws[IMX8MP_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", ccm_base + 0x4510, 0);
hws[IMX8MP_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", ccm_base + 0x4520, 0);
diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c
index 9382dc3..1999c11 100644
--- a/drivers/clk/ingenic/tcu.c
+++ b/drivers/clk/ingenic/tcu.c
@@ -100,15 +100,11 @@
bool enabled = false;
/*
- * If the SoC has no global TCU clock, we must ungate the channel's
- * clock to be able to access its registers.
- * If we have a TCU clock, it will be enabled automatically as it has
- * been attached to the regmap.
+ * According to the programming manual, a timer channel's registers can
+ * only be accessed when the channel's stop bit is clear.
*/
- if (!tcu->clk) {
- enabled = !!ingenic_tcu_is_enabled(hw);
- regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit));
- }
+ enabled = !!ingenic_tcu_is_enabled(hw);
+ regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit));
return enabled;
}
@@ -119,8 +115,7 @@
const struct ingenic_tcu_clk_info *info = tcu_clk->info;
struct ingenic_tcu *tcu = tcu_clk->tcu;
- if (!tcu->clk)
- regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit));
+ regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit));
}
static u8 ingenic_tcu_get_parent(struct clk_hw *hw)
diff --git a/drivers/clk/loongson1/clk-loongson1c.c b/drivers/clk/loongson1/clk-loongson1c.c
index 703f876..1ebf740 100644
--- a/drivers/clk/loongson1/clk-loongson1c.c
+++ b/drivers/clk/loongson1/clk-loongson1c.c
@@ -37,6 +37,7 @@
[1] = { .val = 1, .div = 4 },
[2] = { .val = 2, .div = 3 },
[3] = { .val = 3, .div = 3 },
+ [4] = { /* sentinel */ }
};
void __init ls1x_clk_init(void)
diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
index 37b4162..3a33014 100644
--- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
+++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
@@ -18,9 +18,9 @@
.sta_ofs = 0x0,
};
-#define GATE_MFG(_id, _name, _parent, _shift) \
- GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, \
- &mtk_clk_gate_ops_setclr)
+#define GATE_MFG(_id, _name, _parent, _shift) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &mfg_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, CLK_SET_RATE_PARENT)
static const struct mtk_gate mfg_clks[] = {
GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0)
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
index cb939c0..89916ac 100644
--- a/drivers/clk/mediatek/reset.c
+++ b/drivers/clk/mediatek/reset.c
@@ -25,7 +25,7 @@
struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev);
unsigned int reg = data->regofs + ((id / 32) << 4);
- return regmap_write(data->regmap, reg, 1);
+ return regmap_write(data->regmap, reg, BIT(id % 32));
}
static int mtk_reset_deassert_set_clr(struct reset_controller_dev *rcdev,
@@ -34,7 +34,7 @@
struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev);
unsigned int reg = data->regofs + ((id / 32) << 4) + 0x4;
- return regmap_write(data->regmap, reg, 1);
+ return regmap_write(data->regmap, reg, BIT(id % 32));
}
static int mtk_reset_assert(struct reset_controller_dev *rcdev,
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
index 3a6d84c..67d8a0d 100644
--- a/drivers/clk/meson/meson-aoclk.c
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -36,6 +36,7 @@
struct meson_aoclk_reset_controller *rstc;
struct meson_aoclk_data *data;
struct device *dev = &pdev->dev;
+ struct device_node *np;
struct regmap *regmap;
int ret, clkid;
@@ -47,7 +48,9 @@
if (!rstc)
return -ENOMEM;
- regmap = syscon_node_to_regmap(of_get_parent(dev->of_node));
+ np = of_get_parent(dev->of_node);
+ regmap = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(regmap)) {
dev_err(dev, "failed to get regmap\n");
return PTR_ERR(regmap);
diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c
index a7cb1e7..18ae387 100644
--- a/drivers/clk/meson/meson-eeclk.c
+++ b/drivers/clk/meson/meson-eeclk.c
@@ -17,6 +17,7 @@
{
const struct meson_eeclkc_data *data;
struct device *dev = &pdev->dev;
+ struct device_node *np;
struct regmap *map;
int ret, i;
@@ -25,7 +26,9 @@
return -EINVAL;
/* Get the hhi system controller node */
- map = syscon_node_to_regmap(of_get_parent(dev->of_node));
+ np = of_get_parent(dev->of_node);
+ map = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(map)) {
dev_err(dev,
"failed to get HHI regmap\n");
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 862f075..1da9d21 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -3735,13 +3735,16 @@
struct clk_hw_onecell_data *clk_hw_onecell_data)
{
struct meson8b_clk_reset *rstc;
+ struct device_node *parent_np;
const char *notifier_clk_name;
struct clk *notifier_clk;
void __iomem *clk_base;
struct regmap *map;
int i, ret;
- map = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ map = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(map)) {
pr_info("failed to get HHI regmap - Trying obsolete regs\n");
diff --git a/drivers/clk/qcom/apss-ipq6018.c b/drivers/clk/qcom/apss-ipq6018.c
index d78ff2f..b5d9365 100644
--- a/drivers/clk/qcom/apss-ipq6018.c
+++ b/drivers/clk/qcom/apss-ipq6018.c
@@ -57,7 +57,7 @@
.parent_hws = (const struct clk_hw *[]){
&apcs_alias0_clk_src.clkr.hw },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index 1b2cefe..a8a2cfa 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -1521,6 +1521,8 @@
},
};
+static struct gdsc titan_top_gdsc;
+
static struct gdsc bps_gdsc = {
.gdscr = 0x6004,
.pd = {
@@ -1554,6 +1556,7 @@
.name = "ife_0_gdsc",
},
.flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -1563,6 +1566,7 @@
.name = "ife_1_gdsc",
},
.flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 1a571c0..cf265ab 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -1379,7 +1379,7 @@
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
/**
- * clk_lucid_pll_configure - configure the lucid pll
+ * clk_trion_pll_configure - configure the trion pll
*
* @pll: clk alpha pll
* @regmap: register map
diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c
index 59f1af4..9004642 100644
--- a/drivers/clk/qcom/clk-krait.c
+++ b/drivers/clk/qcom/clk-krait.c
@@ -32,11 +32,16 @@
regval |= (sel & mux->mask) << (mux->shift + LPL_SHIFT);
}
krait_set_l2_indirect_reg(mux->offset, regval);
- spin_unlock_irqrestore(&krait_clock_reg_lock, flags);
/* Wait for switch to complete. */
mb();
udelay(1);
+
+ /*
+ * Unlock now to make sure the mux register is not
+ * modified while switching to the new parent.
+ */
+ spin_unlock_irqrestore(&krait_clock_reg_lock, flags);
}
static int krait_mux_set_parent(struct clk_hw *hw, u8 index)
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 59a5a0f..71a0d30 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -264,7 +264,7 @@
static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
{
- u32 cfg, mask;
+ u32 cfg, mask, d_val, not2d_val, n_minus_m;
struct clk_hw *hw = &rcg->clkr.hw;
int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
@@ -283,8 +283,17 @@
if (ret)
return ret;
+ /* Calculate 2d value */
+ d_val = f->n;
+
+ n_minus_m = f->n - f->m;
+ n_minus_m *= 2;
+
+ d_val = clamp_t(u32, d_val, f->m, n_minus_m);
+ not2d_val = ~d_val & mask;
+
ret = regmap_update_bits(rcg->clkr.regmap,
- RCG_D_OFFSET(rcg), mask, ~f->n);
+ RCG_D_OFFSET(rcg), mask, not2d_val);
if (ret)
return ret;
}
@@ -639,6 +648,7 @@
{ 2, 9 },
{ 4, 9 },
{ 1, 1 },
+ { 2, 3 },
{ }
};
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
index 108fe27..d6d5def 100644
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -60,11 +60,6 @@
{ P_GPLL0_DIV2, 4 },
};
-static const char * const gcc_xo_gpll0[] = {
- "xo",
- "gpll0",
-};
-
static const struct parent_map gcc_xo_gpll0_map[] = {
{ P_XO, 0 },
{ P_GPLL0, 1 },
@@ -667,6 +662,7 @@
},
.num_parents = 1,
.ops = &clk_branch2_ops,
+ .flags = CLK_IS_CRITICAL,
},
},
};
@@ -956,6 +952,11 @@
},
};
+static const struct clk_parent_data gcc_xo_gpll0[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+};
+
static const struct freq_tbl ftbl_pcie_axi_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(200000000, P_GPLL0, 4, 0, 0),
@@ -969,7 +970,7 @@
.parent_map = gcc_xo_gpll0_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "pcie0_axi_clk_src",
- .parent_names = gcc_xo_gpll0,
+ .parent_data = gcc_xo_gpll0,
.num_parents = 2,
.ops = &clk_rcg2_ops,
},
@@ -1016,7 +1017,7 @@
.parent_map = gcc_xo_gpll0_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "pcie1_axi_clk_src",
- .parent_names = gcc_xo_gpll0,
+ .parent_data = gcc_xo_gpll0,
.num_parents = 2,
.ops = &clk_rcg2_ops,
},
@@ -1074,7 +1075,7 @@
.name = "sdcc1_apps_clk_src",
.parent_names = gcc_xo_gpll0_gpll2_gpll0_out_main_div2,
.num_parents = 4,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -1330,7 +1331,7 @@
.parent_map = gcc_xo_gpll0_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "nss_ce_clk_src",
- .parent_names = gcc_xo_gpll0,
+ .parent_data = gcc_xo_gpll0,
.num_parents = 2,
.ops = &clk_rcg2_ops,
},
@@ -1788,8 +1789,10 @@
static const struct freq_tbl ftbl_nss_port5_rx_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(25000000, P_UNIPHY1_RX, 12.5, 0, 0),
+ F(25000000, P_UNIPHY0_RX, 5, 0, 0),
F(78125000, P_UNIPHY1_RX, 4, 0, 0),
F(125000000, P_UNIPHY1_RX, 2.5, 0, 0),
+ F(125000000, P_UNIPHY0_RX, 1, 0, 0),
F(156250000, P_UNIPHY1_RX, 2, 0, 0),
F(312500000, P_UNIPHY1_RX, 1, 0, 0),
{ }
@@ -1828,8 +1831,10 @@
static const struct freq_tbl ftbl_nss_port5_tx_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(25000000, P_UNIPHY1_TX, 12.5, 0, 0),
+ F(25000000, P_UNIPHY0_TX, 5, 0, 0),
F(78125000, P_UNIPHY1_TX, 4, 0, 0),
F(125000000, P_UNIPHY1_TX, 2.5, 0, 0),
+ F(125000000, P_UNIPHY0_TX, 1, 0, 0),
F(156250000, P_UNIPHY1_TX, 2, 0, 0),
F(312500000, P_UNIPHY1_TX, 1, 0, 0),
{ }
@@ -1867,8 +1872,10 @@
static const struct freq_tbl ftbl_nss_port6_rx_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_UNIPHY2_RX, 5, 0, 0),
F(25000000, P_UNIPHY2_RX, 12.5, 0, 0),
F(78125000, P_UNIPHY2_RX, 4, 0, 0),
+ F(125000000, P_UNIPHY2_RX, 1, 0, 0),
F(125000000, P_UNIPHY2_RX, 2.5, 0, 0),
F(156250000, P_UNIPHY2_RX, 2, 0, 0),
F(312500000, P_UNIPHY2_RX, 1, 0, 0),
@@ -1907,8 +1914,10 @@
static const struct freq_tbl ftbl_nss_port6_tx_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_UNIPHY2_TX, 5, 0, 0),
F(25000000, P_UNIPHY2_TX, 12.5, 0, 0),
F(78125000, P_UNIPHY2_TX, 4, 0, 0),
+ F(125000000, P_UNIPHY2_TX, 1, 0, 0),
F(125000000, P_UNIPHY2_TX, 2.5, 0, 0),
F(156250000, P_UNIPHY2_TX, 2, 0, 0),
F(312500000, P_UNIPHY2_TX, 1, 0, 0),
@@ -3346,6 +3355,7 @@
static struct clk_branch gcc_ubi0_ahb_clk = {
.halt_reg = 0x6820c,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x6820c,
.enable_mask = BIT(0),
@@ -3363,6 +3373,7 @@
static struct clk_branch gcc_ubi0_axi_clk = {
.halt_reg = 0x68200,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68200,
.enable_mask = BIT(0),
@@ -3380,6 +3391,7 @@
static struct clk_branch gcc_ubi0_nc_axi_clk = {
.halt_reg = 0x68204,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68204,
.enable_mask = BIT(0),
@@ -3397,6 +3409,7 @@
static struct clk_branch gcc_ubi0_core_clk = {
.halt_reg = 0x68210,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68210,
.enable_mask = BIT(0),
@@ -3414,6 +3427,7 @@
static struct clk_branch gcc_ubi0_mpt_clk = {
.halt_reg = 0x68208,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68208,
.enable_mask = BIT(0),
@@ -3431,6 +3445,7 @@
static struct clk_branch gcc_ubi1_ahb_clk = {
.halt_reg = 0x6822c,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x6822c,
.enable_mask = BIT(0),
@@ -3448,6 +3463,7 @@
static struct clk_branch gcc_ubi1_axi_clk = {
.halt_reg = 0x68220,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68220,
.enable_mask = BIT(0),
@@ -3465,6 +3481,7 @@
static struct clk_branch gcc_ubi1_nc_axi_clk = {
.halt_reg = 0x68224,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68224,
.enable_mask = BIT(0),
@@ -3482,6 +3499,7 @@
static struct clk_branch gcc_ubi1_core_clk = {
.halt_reg = 0x68230,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68230,
.enable_mask = BIT(0),
@@ -3499,6 +3517,7 @@
static struct clk_branch gcc_ubi1_mpt_clk = {
.halt_reg = 0x68228,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68228,
.enable_mask = BIT(0),
@@ -4329,8 +4348,7 @@
.parent_map = gcc_xo_gpll0_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "pcie0_rchng_clk_src",
- .parent_hws = (const struct clk_hw *[]) {
- &gpll0.clkr.hw },
+ .parent_data = gcc_xo_gpll0,
.num_parents = 2,
.ops = &clk_rcg2_ops,
},
@@ -4372,6 +4390,33 @@
},
};
+static const struct alpha_pll_config ubi32_pll_config = {
+ .l = 0x4e,
+ .config_ctl_val = 0x200d4aa8,
+ .config_ctl_hi_val = 0x3c2,
+ .main_output_mask = BIT(0),
+ .aux_output_mask = BIT(1),
+ .pre_div_val = 0x0,
+ .pre_div_mask = BIT(12),
+ .post_div_val = 0x0,
+ .post_div_mask = GENMASK(9, 8),
+};
+
+static const struct alpha_pll_config nss_crypto_pll_config = {
+ .l = 0x3e,
+ .alpha = 0x0,
+ .alpha_hi = 0x80,
+ .config_ctl_val = 0x4001055b,
+ .main_output_mask = BIT(0),
+ .pre_div_val = 0x0,
+ .pre_div_mask = GENMASK(14, 12),
+ .post_div_val = 0x1 << 8,
+ .post_div_mask = GENMASK(11, 8),
+ .vco_mask = GENMASK(21, 20),
+ .vco_val = 0x0,
+ .alpha_en_mask = BIT(24),
+};
+
static struct clk_hw *gcc_ipq8074_hws[] = {
&gpll0_out_main_div2.hw,
&gpll6_out_main_div2.hw,
@@ -4773,7 +4818,20 @@
static int gcc_ipq8074_probe(struct platform_device *pdev)
{
- return qcom_cc_probe(pdev, &gcc_ipq8074_desc);
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gcc_ipq8074_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* SW Workaround for UBI32 Huayra PLL */
+ regmap_update_bits(regmap, 0x2501c, BIT(26), BIT(26));
+
+ clk_alpha_pll_configure(&ubi32_pll_main, regmap, &ubi32_pll_config);
+ clk_alpha_pll_configure(&nss_crypto_pll_main, regmap,
+ &nss_crypto_pll_config);
+
+ return qcom_cc_really_probe(pdev, &gcc_ipq8074_desc, regmap);
}
static struct platform_driver gcc_ipq8074_driver = {
diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c
index 144d2ba..463a444 100644
--- a/drivers/clk/qcom/gcc-msm8994.c
+++ b/drivers/clk/qcom/gcc-msm8994.c
@@ -108,6 +108,7 @@
static struct clk_alpha_pll_postdiv gpll4 = {
.offset = 0x1dc0,
+ .width = 4,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr.hw.init = &(struct clk_init_data)
{
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index 892e91b..245150a 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -286,8 +286,8 @@
.name = "uart_group_012",
.type = K_BITSEL,
.source = 1 + R9A06G032_DIV_UART,
- /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG1_PR2 */
- .dual.sel = ((0xec / 4) << 5) | 24,
+ /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG0_0 */
+ .dual.sel = ((0x34 / 4) << 5) | 30,
.dual.group = 0,
},
{
@@ -295,8 +295,8 @@
.name = "uart_group_34567",
.type = K_BITSEL,
.source = 1 + R9A06G032_DIV_P2_PG,
- /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG0_0 */
- .dual.sel = ((0x34 / 4) << 5) | 30,
+ /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG1_PR2 */
+ .dual.sel = ((0xec / 4) << 5) | 24,
.dual.group = 1,
},
D_UGATE(CLK_UART0, "clk_uart0", UART_GROUP_012, 0, 0, 0x1b2, 0x1b3, 0x1b4, 0x1b5),
diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c
index d620bbb..ce81e40 100644
--- a/drivers/clk/sprd/common.c
+++ b/drivers/clk/sprd/common.c
@@ -41,7 +41,7 @@
{
void __iomem *base;
struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
+ struct device_node *node = dev->of_node, *np;
struct regmap *regmap;
if (of_find_property(node, "sprd,syscon", NULL)) {
@@ -50,9 +50,10 @@
pr_err("%s: failed to get syscon regmap\n", __func__);
return PTR_ERR(regmap);
}
- } else if (of_device_is_compatible(of_get_parent(dev->of_node),
- "syscon")) {
- regmap = device_node_to_regmap(of_get_parent(dev->of_node));
+ } else if (of_device_is_compatible(np = of_get_parent(node), "syscon") ||
+ (of_node_put(np), 0)) {
+ regmap = device_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(regmap)) {
dev_err(dev, "failed to get regmap from its parent.\n");
return PTR_ERR(regmap);
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index 542b31d..636bcf2 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -109,6 +109,8 @@
spin_lock_init(&data->lock);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -EINVAL;
/* one clock/reset pair per word */
count = DIV_ROUND_UP((resource_size(r)), SUN9I_MMC_WIDTH);
data->membase = devm_ioremap_resource(&pdev->dev, r);
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index bc9e47a..4e2b26e 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -1317,6 +1317,7 @@
}
pmc_base = of_iomap(node, 0);
+ of_node_put(node);
if (!pmc_base) {
pr_err("Can't map pmc registers\n");
WARN_ON(1);
diff --git a/drivers/clk/tegra/clk-tegra124-emc.c b/drivers/clk/tegra/clk-tegra124-emc.c
index 745f9fa..733a962 100644
--- a/drivers/clk/tegra/clk-tegra124-emc.c
+++ b/drivers/clk/tegra/clk-tegra124-emc.c
@@ -191,6 +191,7 @@
tegra->emc = platform_get_drvdata(pdev);
if (!tegra->emc) {
+ put_device(&pdev->dev);
pr_err("%s: cannot find EMC driver\n", __func__);
return NULL;
}
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 3efc651..d60ee6e 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -1128,6 +1128,7 @@
}
pmc_base = of_iomap(node, 0);
+ of_node_put(node);
if (!pmc_base) {
pr_err("Can't map pmc registers\n");
BUG();
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 68cbb98..1a0016d 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -3697,6 +3697,7 @@
}
pmc_base = of_iomap(node, 0);
+ of_node_put(node);
if (!pmc_base) {
pr_err("Can't map pmc registers\n");
WARN_ON(1);
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index 8d4c08b..e2e59d7 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -251,14 +251,16 @@
if (rc) {
pr_err("%s: failed to lookup atl clock %d\n", __func__,
i);
- return -EINVAL;
+ ret = -EINVAL;
+ goto pm_put;
}
clk = of_clk_get_from_provider(&clkspec);
if (IS_ERR(clk)) {
pr_err("%s: failed to get atl clock %d from provider\n",
__func__, i);
- return PTR_ERR(clk);
+ ret = PTR_ERR(clk);
+ goto pm_put;
}
cdesc = to_atl_desc(__clk_get_hw(clk));
@@ -291,8 +293,9 @@
if (cdesc->enabled)
atl_clk_enable(__clk_get_hw(clk));
}
- pm_runtime_put_sync(cinfo->dev);
+pm_put:
+ pm_runtime_put_sync(cinfo->dev);
return ret;
}
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index 3da33c7..29eafab 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -131,7 +131,7 @@
void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
{
struct ti_dt_clk *c;
- struct device_node *node, *parent;
+ struct device_node *node, *parent, *child;
struct clk *clk;
struct of_phandle_args clkspec;
char buf[64];
@@ -171,10 +171,13 @@
node = of_find_node_by_name(NULL, buf);
if (num_args && compat_mode) {
parent = node;
- node = of_get_child_by_name(parent, "clock");
- if (!node)
- node = of_get_child_by_name(parent, "clk");
- of_node_put(parent);
+ child = of_get_child_by_name(parent, "clock");
+ if (!child)
+ child = of_get_child_by_name(parent, "clk");
+ if (child) {
+ of_node_put(parent);
+ node = child;
+ }
}
clkspec.np = node;
diff --git a/drivers/clk/uniphier/clk-uniphier-fixed-rate.c b/drivers/clk/uniphier/clk-uniphier-fixed-rate.c
index 5319cd3..3bc55ab 100644
--- a/drivers/clk/uniphier/clk-uniphier-fixed-rate.c
+++ b/drivers/clk/uniphier/clk-uniphier-fixed-rate.c
@@ -24,6 +24,7 @@
init.name = name;
init.ops = &clk_fixed_rate_ops;
+ init.flags = 0;
init.parent_names = NULL;
init.num_parents = 0;
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index db8d0d7..9c82ae2 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -687,6 +687,13 @@
FIELD_PREP(CLK_ATTR_NODE_INDEX, i);
zynqmp_pm_clock_get_name(clock[i].clk_id, &name);
+
+ /*
+ * Terminate with NULL character in case name provided by firmware
+ * is longer and truncated due to size limit.
+ */
+ name.name[sizeof(name.name) - 1] = '\0';
+
if (!strcmp(name.name, RESERVED_CLK_NAME))
continue;
strncpy(clock[i].clk_name, name.name, MAX_NAME_LEN);
diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
index abe6afb..2ae7f91 100644
--- a/drivers/clk/zynqmp/pll.c
+++ b/drivers/clk/zynqmp/pll.c
@@ -99,26 +99,25 @@
unsigned long *prate)
{
u32 fbdiv;
- long rate_div, f;
+ u32 mult, div;
- /* Enable the fractional mode if needed */
- rate_div = (rate * FRAC_DIV) / *prate;
- f = rate_div % FRAC_DIV;
- if (f) {
- if (rate > PS_PLL_VCO_MAX) {
- fbdiv = rate / PS_PLL_VCO_MAX;
- rate = rate / (fbdiv + 1);
- }
- if (rate < PS_PLL_VCO_MIN) {
- fbdiv = DIV_ROUND_UP(PS_PLL_VCO_MIN, rate);
- rate = rate * fbdiv;
- }
- return rate;
+ /* Let rate fall inside the range PS_PLL_VCO_MIN ~ PS_PLL_VCO_MAX */
+ if (rate > PS_PLL_VCO_MAX) {
+ div = DIV_ROUND_UP(rate, PS_PLL_VCO_MAX);
+ rate = rate / div;
+ }
+ if (rate < PS_PLL_VCO_MIN) {
+ mult = DIV_ROUND_UP(PS_PLL_VCO_MIN, rate);
+ rate = rate * mult;
}
fbdiv = DIV_ROUND_CLOSEST(rate, *prate);
- fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
- return *prate * fbdiv;
+ if (fbdiv < PLL_FBDIV_MIN || fbdiv > PLL_FBDIV_MAX) {
+ fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
+ rate = *prate * fbdiv;
+ }
+
+ return rate;
}
/**
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index eb596ff..279ddff 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -229,8 +229,10 @@
int ret;
ret = kstrtouint(arg, 16, &base);
- if (ret)
- return ret;
+ if (ret) {
+ pr_warn("PMTMR: invalid 'pmtmr=' value: '%s'\n", arg);
+ return 1;
+ }
pr_info("PMTMR IOPort override: 0x%04x -> 0x%04x\n", pmtmr_ioport,
base);
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index fabad79..df194b0 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -494,11 +494,14 @@
return 0;
}
-static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
+static int __init exynos4_timer_resources(struct device_node *np)
{
- int err, cpu;
struct clk *mct_clk, *tick_clk;
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: unable to ioremap mct address space\n", __func__);
+
tick_clk = of_clk_get_by_name(np, "fin_pll");
if (IS_ERR(tick_clk))
panic("%s: unable to determine tick clock rate\n", __func__);
@@ -509,9 +512,32 @@
panic("%s: unable to retrieve mct clock instance\n", __func__);
clk_prepare_enable(mct_clk);
- reg_base = base;
- if (!reg_base)
- panic("%s: unable to ioremap mct address space\n", __func__);
+ return 0;
+}
+
+static int __init exynos4_timer_interrupts(struct device_node *np,
+ unsigned int int_type)
+{
+ int nr_irqs, i, err, cpu;
+
+ mct_int_type = int_type;
+
+ /* This driver uses only one global timer interrupt */
+ mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ);
+
+ /*
+ * Find out the number of local irqs specified. The local
+ * timer irqs are specified after the four global timer
+ * irqs are specified.
+ */
+ nr_irqs = of_irq_count(np);
+ if (nr_irqs > ARRAY_SIZE(mct_irqs)) {
+ pr_err("exynos-mct: too many (%d) interrupts configured in DT\n",
+ nr_irqs);
+ nr_irqs = ARRAY_SIZE(mct_irqs);
+ }
+ for (i = MCT_L0_IRQ; i < nr_irqs; i++)
+ mct_irqs[i] = irq_of_parse_and_map(np, i);
if (mct_int_type == MCT_INT_PPI) {
@@ -522,11 +548,14 @@
mct_irqs[MCT_L0_IRQ], err);
} else {
for_each_possible_cpu(cpu) {
- int mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
+ int mct_irq;
struct mct_clock_event_device *pcpu_mevt =
per_cpu_ptr(&percpu_mct_tick, cpu);
pcpu_mevt->evt.irq = -1;
+ if (MCT_L0_IRQ + cpu >= ARRAY_SIZE(mct_irqs))
+ break;
+ mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
irq_set_status_flags(mct_irq, IRQ_NOAUTOEN);
if (request_irq(mct_irq,
@@ -571,24 +600,13 @@
static int __init mct_init_dt(struct device_node *np, unsigned int int_type)
{
- u32 nr_irqs, i;
int ret;
- mct_int_type = int_type;
+ ret = exynos4_timer_resources(np);
+ if (ret)
+ return ret;
- /* This driver uses only one global timer interrupt */
- mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ);
-
- /*
- * Find out the number of local irqs specified. The local
- * timer irqs are specified after the four global timer
- * irqs are specified.
- */
- nr_irqs = of_irq_count(np);
- for (i = MCT_L0_IRQ; i < nr_irqs; i++)
- mct_irqs[i] = irq_of_parse_and_map(np, i);
-
- ret = exynos4_timer_resources(np, of_iomap(np, 0));
+ ret = exynos4_timer_interrupts(np, int_type);
if (ret)
return ret;
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index ba04cb3..7c617d8 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -472,4 +472,3 @@
hv_sched_clock_offset = hv_read_reference_counter();
hv_setup_sched_clock(read_hv_sched_clock_msr);
}
-EXPORT_SYMBOL_GPL(hv_init_clocksource);
diff --git a/drivers/clocksource/timer-ixp4xx.c b/drivers/clocksource/timer-ixp4xx.c
index 9396745..ad904bb 100644
--- a/drivers/clocksource/timer-ixp4xx.c
+++ b/drivers/clocksource/timer-ixp4xx.c
@@ -258,7 +258,6 @@
}
ixp4xx_timer_register(base, timer_irq, timer_freq);
}
-EXPORT_SYMBOL_GPL(ixp4xx_timer_setup);
#ifdef CONFIG_OF
static __init int ixp4xx_of_timer_init(struct device_node *np)
diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c
index 59e11ca..5c9485c 100644
--- a/drivers/clocksource/timer-microchip-pit64b.c
+++ b/drivers/clocksource/timer-microchip-pit64b.c
@@ -121,7 +121,7 @@
return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
}
-static u64 mchp_pit64b_sched_read_clk(void)
+static u64 notrace mchp_pit64b_sched_read_clk(void)
{
return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
}
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index 572da47..b965f20 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -157,9 +157,9 @@
of_base->base = of_base->name ?
of_io_request_and_map(np, of_base->index, of_base->name) :
of_iomap(np, of_base->index);
- if (IS_ERR(of_base->base)) {
- pr_err("Failed to iomap (%s)\n", of_base->name);
- return PTR_ERR(of_base->base);
+ if (IS_ERR_OR_NULL(of_base->base)) {
+ pr_err("Failed to iomap (%s:%s)\n", np->name, of_base->name);
+ return of_base->base ? PTR_ERR(of_base->base) : -ENOMEM;
}
return 0;
diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c
index 56c0cc3..d514b44 100644
--- a/drivers/clocksource/timer-oxnas-rps.c
+++ b/drivers/clocksource/timer-oxnas-rps.c
@@ -236,7 +236,7 @@
}
rps->irq = irq_of_parse_and_map(np, 0);
- if (rps->irq < 0) {
+ if (!rps->irq) {
ret = -EINVAL;
goto err_iomap;
}
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index c51c5ed..0e7748d 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -32,7 +32,7 @@
static unsigned int riscv_clock_event_irq;
static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
.name = "riscv_timer_clockevent",
- .features = CLOCK_EVT_FEAT_ONESHOT,
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP,
.rating = 100,
.set_next_event = riscv_clock_next_event,
};
diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
index 6e8ad4a..bedd357 100644
--- a/drivers/clocksource/timer-sp804.c
+++ b/drivers/clocksource/timer-sp804.c
@@ -274,6 +274,11 @@
struct clk *clk1, *clk2;
const char *name = of_get_property(np, "compatible", NULL);
+ if (initialized) {
+ pr_debug("%pOF: skipping further SP804 timer device\n", np);
+ return 0;
+ }
+
base = of_iomap(np, 0);
if (!base)
return -ENXIO;
@@ -285,11 +290,6 @@
writel(0, timer1_base + timer->ctrl);
writel(0, timer2_base + timer->ctrl);
- if (initialized || !of_device_is_available(np)) {
- ret = -EINVAL;
- goto err;
- }
-
clk1 = of_clk_get(np, 0);
if (IS_ERR(clk1))
clk1 = NULL;
diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
index 1fccb45..2737407 100644
--- a/drivers/clocksource/timer-ti-dm-systimer.c
+++ b/drivers/clocksource/timer-ti-dm-systimer.c
@@ -694,9 +694,9 @@
return 0;
}
- if (pa == 0x48034000) /* dra7 dmtimer3 */
+ if (pa == 0x4882c000) /* dra7 dmtimer15 */
return dmtimer_percpu_timer_init(np, 0);
- else if (pa == 0x48036000) /* dra7 dmtimer4 */
+ else if (pa == 0x4882e000) /* dra7 dmtimer16 */
return dmtimer_percpu_timer_init(np, 1);
return 0;
diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
index 710acc0..85fbbac 100644
--- a/drivers/counter/microchip-tcb-capture.c
+++ b/drivers/counter/microchip-tcb-capture.c
@@ -29,7 +29,6 @@
int qdec_mode;
int num_channels;
int channel[2];
- bool trig_inverted;
};
enum mchp_tc_count_function {
@@ -163,7 +162,7 @@
regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], SR), &sr);
- if (priv->trig_inverted)
+ if (signal->id == 1)
sigstatus = (sr & ATMEL_TC_MTIOB);
else
sigstatus = (sr & ATMEL_TC_MTIOA);
@@ -181,6 +180,17 @@
struct mchp_tc_data *const priv = counter->priv;
u32 cmr;
+ if (priv->qdec_mode) {
+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
+ return 0;
+ }
+
+ /* Only TIOA signal is evaluated in non-QDEC mode */
+ if (synapse->signal->id != 0) {
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
+ return 0;
+ }
+
regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], CMR), &cmr);
switch (cmr & ATMEL_TC_ETRGEDG) {
@@ -209,8 +219,8 @@
struct mchp_tc_data *const priv = counter->priv;
u32 edge = ATMEL_TC_ETRGEDG_NONE;
- /* QDEC mode is rising edge only */
- if (priv->qdec_mode)
+ /* QDEC mode is rising edge only; only TIOA handled in non-QDEC mode */
+ if (priv->qdec_mode || synapse->signal->id != 0)
return -EINVAL;
switch (action) {
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index a310372..82f6592 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -44,6 +44,8 @@
bool need_voltage_tracking;
};
+static struct platform_device *cpufreq_pdev;
+
static LIST_HEAD(dvfs_info_list);
static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu)
@@ -546,7 +548,6 @@
{
struct device_node *np;
const struct of_device_id *match;
- struct platform_device *pdev;
int err;
np = of_find_node_by_path("/");
@@ -570,15 +571,23 @@
* and the device registration codes are put here to handle defer
* probing.
*/
- pdev = platform_device_register_simple("mtk-cpufreq", -1, NULL, 0);
- if (IS_ERR(pdev)) {
+ cpufreq_pdev = platform_device_register_simple("mtk-cpufreq", -1, NULL, 0);
+ if (IS_ERR(cpufreq_pdev)) {
pr_err("failed to register mtk-cpufreq platform device\n");
- return PTR_ERR(pdev);
+ platform_driver_unregister(&mtk_cpufreq_platdrv);
+ return PTR_ERR(cpufreq_pdev);
}
return 0;
}
-device_initcall(mtk_cpufreq_driver_init);
+module_init(mtk_cpufreq_driver_init)
+
+static void __exit mtk_cpufreq_driver_exit(void)
+{
+ platform_device_unregister(cpufreq_pdev);
+ platform_driver_unregister(&mtk_cpufreq_platdrv);
+}
+module_exit(mtk_cpufreq_driver_exit)
MODULE_DESCRIPTION("MediaTek CPUFreq driver");
MODULE_AUTHOR("Pi-Cheng Chen <pi-cheng.chen@linaro.org>");
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index 73621bc..3704476 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -471,6 +471,10 @@
if (slew_done_gpio_np)
slew_done_gpio = read_gpio(slew_done_gpio_np);
+ of_node_put(volt_gpio_np);
+ of_node_put(freq_gpio_np);
+ of_node_put(slew_done_gpio_np);
+
/* If we use the frequency GPIOs, calculate the min/max speeds based
* on the bus frequencies
*/
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index fba9937..9b3d247 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -130,7 +130,7 @@
}
/* Check PVS_BLOW_STATUS */
- pte_efuse = *(((u32 *)buf) + 4);
+ pte_efuse = *(((u32 *)buf) + 1);
pte_efuse &= BIT(21);
if (pte_efuse) {
dev_dbg(cpu_dev, "PVS bin: %d\n", *pvs);
@@ -215,6 +215,7 @@
int speed = 0, pvs = 0, pvs_ver = 0;
u8 *speedbin;
size_t len;
+ int ret = 0;
speedbin = nvmem_cell_read(speedbin_nvmem, &len);
@@ -232,7 +233,8 @@
break;
default:
dev_err(cpu_dev, "Unable to read nvmem data. Defaulting to 0!\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto len_error;
}
snprintf(*pvs_name, sizeof("speedXX-pvsXX-vXX"), "speed%d-pvs%d-v%d",
@@ -240,8 +242,9 @@
drv->versions = (1 << speed);
+len_error:
kfree(speedbin);
- return 0;
+ return ret;
}
static const struct qcom_cpufreq_match_data match_data_kryo = {
@@ -264,7 +267,8 @@
struct nvmem_cell *speedbin_nvmem;
struct device_node *np;
struct device *cpu_dev;
- char *pvs_name = "speedXX-pvsXX-vXX";
+ char pvs_name_buffer[] = "speedXX-pvsXX-vXX";
+ char *pvs_name = pvs_name_buffer;
unsigned cpu;
const struct of_device_id *match;
int ret;
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 6b6b20d..573b417 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -275,6 +275,7 @@
np = of_find_matching_node(NULL, qoriq_cpufreq_blacklist);
if (np) {
+ of_node_put(np);
dev_info(&pdev->dev, "Disabling due to erratum A-008083");
return -ENODEV;
}
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 2deed8d..75e1bf3 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -98,8 +98,10 @@
return -ENOMEM;
ret = sun50i_cpufreq_get_efuse(&speed);
- if (ret)
+ if (ret) {
+ kfree(opp_tables);
return ret;
+ }
snprintf(name, MAX_NAME_LEN, "speed%d", speed);
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 33707a2..64133d4 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -11,6 +11,7 @@
* You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/
+#include <linux/bottom_half.h>
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -280,7 +281,9 @@
flow = rctx->flow;
err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
+ local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index 4c5a2c1..62c07a7 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -9,6 +9,7 @@
*
* You could find the datasheet in Documentation/arm/sunxi.rst
*/
+#include <linux/bottom_half.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
@@ -412,6 +413,8 @@
theend:
kfree(buf);
kfree(result);
+ local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 7c355bc..d095499 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -11,6 +11,7 @@
* You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/
+#include <linux/bottom_half.h>
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -92,6 +93,69 @@
return err;
}
+static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ss_dev *ss = op->ss;
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct scatterlist *sg = areq->src;
+ unsigned int todo, offset;
+ unsigned int len = areq->cryptlen;
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ struct sun8i_ss_flow *sf = &ss->flows[rctx->flow];
+ int i = 0;
+ u32 a;
+ int err;
+
+ rctx->ivlen = ivsize;
+ if (rctx->op_dir & SS_DECRYPTION) {
+ offset = areq->cryptlen - ivsize;
+ scatterwalk_map_and_copy(sf->biv, areq->src, offset,
+ ivsize, 0);
+ }
+
+ /* we need to copy all IVs from source in case DMA is bi-directionnal */
+ while (sg && len) {
+ if (sg_dma_len(sg) == 0) {
+ sg = sg_next(sg);
+ continue;
+ }
+ if (i == 0)
+ memcpy(sf->iv[0], areq->iv, ivsize);
+ a = dma_map_single(ss->dev, sf->iv[i], ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(ss->dev, a)) {
+ memzero_explicit(sf->iv[i], ivsize);
+ dev_err(ss->dev, "Cannot DMA MAP IV\n");
+ err = -EFAULT;
+ goto dma_iv_error;
+ }
+ rctx->p_iv[i] = a;
+ /* we need to setup all others IVs only in the decrypt way */
+ if (rctx->op_dir & SS_ENCRYPTION)
+ return 0;
+ todo = min(len, sg_dma_len(sg));
+ len -= todo;
+ i++;
+ if (i < MAX_SG) {
+ offset = sg->length - ivsize;
+ scatterwalk_map_and_copy(sf->iv[i], sg, offset, ivsize, 0);
+ }
+ rctx->niv = i;
+ sg = sg_next(sg);
+ }
+
+ return 0;
+dma_iv_error:
+ i--;
+ while (i >= 0) {
+ dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE);
+ memzero_explicit(sf->iv[i], ivsize);
+ i--;
+ }
+ return err;
+}
+
static int sun8i_ss_cipher(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
@@ -100,9 +164,9 @@
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct sun8i_ss_alg_template *algt;
+ struct sun8i_ss_flow *sf = &ss->flows[rctx->flow];
struct scatterlist *sg;
unsigned int todo, len, offset, ivsize;
- void *backup_iv = NULL;
int nr_sgs = 0;
int nr_sgd = 0;
int err = 0;
@@ -133,30 +197,9 @@
ivsize = crypto_skcipher_ivsize(tfm);
if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
- rctx->ivlen = ivsize;
- rctx->biv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
- if (!rctx->biv) {
- err = -ENOMEM;
+ err = sun8i_ss_setup_ivs(areq);
+ if (err)
goto theend_key;
- }
- if (rctx->op_dir & SS_DECRYPTION) {
- backup_iv = kzalloc(ivsize, GFP_KERNEL);
- if (!backup_iv) {
- err = -ENOMEM;
- goto theend_key;
- }
- offset = areq->cryptlen - ivsize;
- scatterwalk_map_and_copy(backup_iv, areq->src, offset,
- ivsize, 0);
- }
- memcpy(rctx->biv, areq->iv, ivsize);
- rctx->p_iv = dma_map_single(ss->dev, rctx->biv, rctx->ivlen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(ss->dev, rctx->p_iv)) {
- dev_err(ss->dev, "Cannot DMA MAP IV\n");
- err = -ENOMEM;
- goto theend_iv;
- }
}
if (areq->src == areq->dst) {
nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src),
@@ -239,21 +282,19 @@
}
theend_iv:
- if (rctx->p_iv)
- dma_unmap_single(ss->dev, rctx->p_iv, rctx->ivlen,
- DMA_TO_DEVICE);
-
if (areq->iv && ivsize > 0) {
- if (rctx->biv) {
- offset = areq->cryptlen - ivsize;
- if (rctx->op_dir & SS_DECRYPTION) {
- memcpy(areq->iv, backup_iv, ivsize);
- kfree_sensitive(backup_iv);
- } else {
- scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
- ivsize, 0);
- }
- kfree(rctx->biv);
+ for (i = 0; i < rctx->niv; i++) {
+ dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE);
+ memzero_explicit(sf->iv[i], ivsize);
+ }
+
+ offset = areq->cryptlen - ivsize;
+ if (rctx->op_dir & SS_DECRYPTION) {
+ memcpy(areq->iv, sf->biv, ivsize);
+ memzero_explicit(sf->biv, ivsize);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
+ ivsize, 0);
}
}
@@ -271,7 +312,9 @@
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
err = sun8i_ss_cipher(breq);
+ local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 80e8906..47b5828 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -30,6 +30,8 @@
static const struct ss_variant ss_a80_variant = {
.alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES,
},
+ .alg_hash = { SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP,
+ },
.op_mode = { SS_OP_ECB, SS_OP_CBC,
},
.ss_clks = {
@@ -64,6 +66,7 @@
const char *name)
{
int flow = rctx->flow;
+ unsigned int ivlen = rctx->ivlen;
u32 v = SS_START;
int i;
@@ -102,15 +105,14 @@
mutex_lock(&ss->mlock);
writel(rctx->p_key, ss->base + SS_KEY_ADR_REG);
- if (i == 0) {
- if (rctx->p_iv)
- writel(rctx->p_iv, ss->base + SS_IV_ADR_REG);
- } else {
- if (rctx->biv) {
- if (rctx->op_dir == SS_ENCRYPTION)
- writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG);
+ if (ivlen) {
+ if (rctx->op_dir == SS_ENCRYPTION) {
+ if (i == 0)
+ writel(rctx->p_iv[0], ss->base + SS_IV_ADR_REG);
else
- writel(rctx->t_src[i - 1].addr + rctx->t_src[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG);
+ writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - ivlen, ss->base + SS_IV_ADR_REG);
+ } else {
+ writel(rctx->p_iv[i], ss->base + SS_IV_ADR_REG);
}
}
@@ -462,7 +464,7 @@
*/
static int allocate_flows(struct sun8i_ss_dev *ss)
{
- int i, err;
+ int i, j, err;
ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow),
GFP_KERNEL);
@@ -472,6 +474,36 @@
for (i = 0; i < MAXFLOW; i++) {
init_completion(&ss->flows[i].complete);
+ ss->flows[i].biv = devm_kmalloc(ss->dev, AES_BLOCK_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!ss->flows[i].biv) {
+ err = -ENOMEM;
+ goto error_engine;
+ }
+
+ for (j = 0; j < MAX_SG; j++) {
+ ss->flows[i].iv[j] = devm_kmalloc(ss->dev, AES_BLOCK_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!ss->flows[i].iv[j]) {
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ }
+
+ /* the padding could be up to two block. */
+ ss->flows[i].pad = devm_kmalloc(ss->dev, SHA256_BLOCK_SIZE * 2,
+ GFP_KERNEL | GFP_DMA);
+ if (!ss->flows[i].pad) {
+ err = -ENOMEM;
+ goto error_engine;
+ }
+ ss->flows[i].result = devm_kmalloc(ss->dev, SHA256_DIGEST_SIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (!ss->flows[i].result) {
+ err = -ENOMEM;
+ goto error_engine;
+ }
+
ss->flows[i].engine = crypto_engine_alloc_init(ss->dev, true);
if (!ss->flows[i].engine) {
dev_err(ss->dev, "Cannot allocate engine\n");
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index 756d5a7..9804079 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -9,6 +9,7 @@
*
* You could find the datasheet in Documentation/arm/sunxi.rst
*/
+#include <linux/bottom_half.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
@@ -340,18 +341,11 @@
if (digestsize == SHA224_DIGEST_SIZE)
digestsize = SHA256_DIGEST_SIZE;
- /* the padding could be up to two block. */
- pad = kzalloc(algt->alg.hash.halg.base.cra_blocksize * 2, GFP_KERNEL | GFP_DMA);
- if (!pad)
- return -ENOMEM;
+ result = ss->flows[rctx->flow].result;
+ pad = ss->flows[rctx->flow].pad;
+ memset(pad, 0, algt->alg.hash.halg.base.cra_blocksize * 2);
bf = (__le32 *)pad;
- result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
- if (!result) {
- kfree(pad);
- return -ENOMEM;
- }
-
for (i = 0; i < MAX_SG; i++) {
rctx->t_dst[i].addr = 0;
rctx->t_dst[i].len = 0;
@@ -378,13 +372,21 @@
}
len = areq->nbytes;
- for_each_sg(areq->src, sg, nr_sgs, i) {
+ sg = areq->src;
+ i = 0;
+ while (len > 0 && sg) {
+ if (sg_dma_len(sg) == 0) {
+ sg = sg_next(sg);
+ continue;
+ }
rctx->t_src[i].addr = sg_dma_address(sg);
todo = min(len, sg_dma_len(sg));
rctx->t_src[i].len = todo / 4;
len -= todo;
rctx->t_dst[i].addr = addr_res;
rctx->t_dst[i].len = digestsize / 4;
+ sg = sg_next(sg);
+ i++;
}
if (len > 0) {
dev_err(ss->dev, "remaining len %d\n", len);
@@ -438,8 +440,8 @@
memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
theend:
- kfree(pad);
- kfree(result);
+ local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
index 1a66457..a97a790 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
@@ -120,11 +120,19 @@
* @complete: completion for the current task on this flow
* @status: set to 1 by interrupt if task is done
* @stat_req: number of request done by this flow
+ * @iv: list of IV to use for each step
+ * @biv: buffer which contain the backuped IV
+ * @pad: padding buffer for hash operations
+ * @result: buffer for storing the result of hash operations
*/
struct sun8i_ss_flow {
struct crypto_engine *engine;
struct completion complete;
int status;
+ u8 *iv[MAX_SG];
+ u8 *biv;
+ void *pad;
+ void *result;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
unsigned long stat_req;
#endif
@@ -163,28 +171,28 @@
* @t_src: list of mapped SGs with their size
* @t_dst: list of mapped SGs with their size
* @p_key: DMA address of the key
- * @p_iv: DMA address of the IV
+ * @p_iv: DMA address of the IVs
+ * @niv: Number of IVs DMA mapped
* @method: current algorithm for this request
* @op_mode: op_mode for this request
* @op_dir: direction (encrypt vs decrypt) for this request
* @flow: the flow to use for this request
- * @ivlen: size of biv
+ * @ivlen: size of IVs
* @keylen: keylen for this request
- * @biv: buffer which contain the IV
* @fallback_req: request struct for invoking the fallback skcipher TFM
*/
struct sun8i_cipher_req_ctx {
struct sginfo t_src[MAX_SG];
struct sginfo t_dst[MAX_SG];
u32 p_key;
- u32 p_iv;
+ u32 p_iv[MAX_SG];
+ int niv;
u32 method;
u32 op_mode;
u32 op_dir;
int flow;
unsigned int ivlen;
unsigned int keylen;
- void *biv;
struct skcipher_request fallback_req; // keep at the end
};
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index 8b5e073..652e72d 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -265,7 +265,9 @@
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
err = meson_cipher(breq);
+ local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index ca0361b..f87aa21 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -609,6 +609,13 @@
}
#endif
+static bool needs_entropy_delay_adjustment(void)
+{
+ if (of_machine_is_compatible("fsl,imx6sx"))
+ return true;
+ return false;
+}
+
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
@@ -855,6 +862,8 @@
* Also, if a handle was instantiated, do not change
* the TRNG parameters.
*/
+ if (needs_entropy_delay_adjustment())
+ ent_delay = 12000;
if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
dev_info(dev,
"Entropy delay = %u\n",
@@ -871,6 +880,15 @@
*/
ret = instantiate_rng(dev, inst_handles,
gen_sk);
+ /*
+ * Entropy delay is determined via TRNG characterization.
+ * TRNG characterization is run across different voltages
+ * and temperatures.
+ * If worst case value for ent_dly is identified,
+ * the loop can be skipped for that platform.
+ */
+ if (needs_entropy_delay_adjustment())
+ break;
if (ret == -EAGAIN)
/*
* if here, the loop will rerun,
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 7819490..d936219 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -254,6 +254,7 @@
const struct firmware *fw_entry;
struct device *dev = &cpt->pdev->dev;
struct ucode_header *ucode;
+ unsigned int code_length;
struct microcode *mcode;
int j, ret = 0;
@@ -264,11 +265,12 @@
ucode = (struct ucode_header *)fw_entry->data;
mcode = &cpt->mcode[cpt->next_mc_idx];
memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ);
- mcode->code_size = ntohl(ucode->code_length) * 2;
- if (!mcode->code_size) {
+ code_length = ntohl(ucode->code_length);
+ if (code_length == 0 || code_length >= INT_MAX / 2) {
ret = -EINVAL;
goto fw_release;
}
+ mcode->code_size = code_length * 2;
mcode->is_ae = is_ae;
mcode->core_mask = 0ULL;
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 0770a83..b9299de 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -633,6 +633,24 @@
return 0;
}
+static void ccp_dma_release(struct ccp_device *ccp)
+{
+ struct ccp_dma_chan *chan;
+ struct dma_chan *dma_chan;
+ unsigned int i;
+
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ chan = ccp->ccp_dma_chan + i;
+ dma_chan = &chan->dma_chan;
+
+ if (dma_chan->client_count)
+ dma_release_channel(dma_chan);
+
+ tasklet_kill(&chan->cleanup_tasklet);
+ list_del_rcu(&dma_chan->device_node);
+ }
+}
+
int ccp_dmaengine_register(struct ccp_device *ccp)
{
struct ccp_dma_chan *chan;
@@ -737,6 +755,7 @@
return 0;
err_reg:
+ ccp_dma_release(ccp);
kmem_cache_destroy(ccp->dma_desc_cache);
err_cache:
@@ -752,6 +771,7 @@
if (!dmaengine)
return;
+ ccp_dma_release(ccp);
dma_async_device_unregister(dma_dev);
kmem_cache_destroy(ccp->dma_desc_cache);
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 57b57d4..ed39a22 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -278,7 +278,7 @@
struct sev_device *sev = psp_master->sev_data;
int ret;
- if (sev->state == SEV_STATE_UNINIT)
+ if (!sev || sev->state == SEV_STATE_UNINIT)
return 0;
ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index a5e041d..6140e49 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -258,6 +258,13 @@
{
int ret = 0;
+ if (!nbytes) {
+ *mapped_nents = 0;
+ *lbytes = 0;
+ *nents = 0;
+ return 0;
+ }
+
*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
if (*nents > max_sg_nents) {
*nents = 0;
@@ -349,12 +356,14 @@
req_ctx->mlli_params.mlli_dma_addr);
}
- dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
- dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
-
if (src != dst) {
- dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE);
dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
+ dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
+ } else {
+ dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
}
}
@@ -370,6 +379,7 @@
u32 dummy = 0;
int rc = 0;
u32 mapped_nents = 0;
+ int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
mlli_params->curr_pool = NULL;
@@ -392,7 +402,7 @@
}
/* Map the src SGL */
- rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
+ rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
if (rc)
goto cipher_exit;
@@ -409,7 +419,7 @@
}
} else {
/* Map the dst sg */
- rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
+ rc = cc_map_sg(dev, dst, nbytes, DMA_FROM_DEVICE,
&req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
&dummy, &mapped_nents);
if (rc)
@@ -449,6 +459,7 @@
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+ int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
if (areq_ctx->mac_buf_dma_addr) {
dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
@@ -507,13 +518,11 @@
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
areq_ctx->assoclen, req->cryptlen);
- dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents,
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction);
if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
- dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents,
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE);
}
if (drvdata->coherent &&
areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
@@ -836,7 +845,7 @@
else
size_for_map -= authsize;
- rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
+ rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE,
&areq_ctx->dst.mapped_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
&dst_mapped_nents);
@@ -1049,7 +1058,8 @@
size_to_map += authsize;
}
- rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
+ rc = cc_map_sg(dev, req->src, size_to_map,
+ (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL),
&areq_ctx->src.mapped_nents,
(LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
LLI_MAX_NUM_OF_DATA_ENTRIES),
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index dafa657..c289e4d 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -254,8 +254,8 @@
&ctx_p->user.key_dma_addr);
/* Free key buffer in context */
- kfree_sensitive(ctx_p->user.key);
dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key);
+ kfree_sensitive(ctx_p->user.key);
}
struct tdes_keys {
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index a87f990..90c13eb 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -210,7 +210,7 @@
if (unlikely(shift < 0))
return -EINVAL;
- ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL);
+ ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_ATOMIC);
if (unlikely(!ptr))
return -ENOMEM;
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index 8ca945a..2066f8d 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -449,7 +449,7 @@
*/
}
- mutex_lock(&ctx->queue->queuelock);
+ spin_lock_bh(&ctx->queue->queuelock);
/* Put the IV in place for chained cases */
switch (ctx->cipher_alg) {
case SEC_C_AES_CBC_128:
@@ -509,7 +509,7 @@
list_del(&backlog_req->backlog_head);
}
}
- mutex_unlock(&ctx->queue->queuelock);
+ spin_unlock_bh(&ctx->queue->queuelock);
mutex_lock(&sec_req->lock);
list_del(&sec_req_el->head);
@@ -798,7 +798,7 @@
*/
/* Grab a big lock for a long time to avoid concurrency issues */
- mutex_lock(&queue->queuelock);
+ spin_lock_bh(&queue->queuelock);
/*
* Can go on to queue if we have space in either:
@@ -814,15 +814,15 @@
ret = -EBUSY;
if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
list_add_tail(&sec_req->backlog_head, &ctx->backlog);
- mutex_unlock(&queue->queuelock);
+ spin_unlock_bh(&queue->queuelock);
goto out;
}
- mutex_unlock(&queue->queuelock);
+ spin_unlock_bh(&queue->queuelock);
goto err_free_elements;
}
ret = sec_send_request(sec_req, queue);
- mutex_unlock(&queue->queuelock);
+ spin_unlock_bh(&queue->queuelock);
if (ret)
goto err_free_elements;
@@ -881,7 +881,7 @@
if (IS_ERR(ctx->queue))
return PTR_ERR(ctx->queue);
- mutex_init(&ctx->queue->queuelock);
+ spin_lock_init(&ctx->queue->queuelock);
ctx->queue->havesoftqueue = false;
return 0;
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h
index 4d9063a..0bf4d7c 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.h
+++ b/drivers/crypto/hisilicon/sec/sec_drv.h
@@ -347,7 +347,7 @@
DECLARE_BITMAP(unprocessed, SEC_QUEUE_LEN);
DECLARE_KFIFO_PTR(softqueue, typeof(struct sec_request_el *));
bool havesoftqueue;
- struct mutex queuelock;
+ spinlock_t queuelock;
void *shadow[SEC_QUEUE_LEN];
};
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 037762b..249735b 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -4,8 +4,6 @@
#ifndef __HISI_SEC_V2_H
#define __HISI_SEC_V2_H
-#include <linux/list.h>
-
#include "../qm.h"
#include "sec_crypto.h"
@@ -50,7 +48,7 @@
int err_type;
int req_id;
- int flag;
+ u32 flag;
/* Status of the SEC request */
bool fake_busy;
@@ -105,7 +103,7 @@
struct idr req_idr;
struct sec_alg_res res[QM_Q_DEPTH];
struct sec_ctx *ctx;
- struct mutex req_lock;
+ spinlock_t req_lock;
struct list_head backlog;
struct hisi_acc_sgl_pool *c_in_pool;
struct hisi_acc_sgl_pool *c_out_pool;
@@ -140,6 +138,7 @@
bool pbuf_supported;
struct sec_cipher_ctx c_ctx;
struct sec_auth_ctx a_ctx;
+ struct device *dev;
};
enum sec_endian {
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 630dcb5..2dbec63 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -42,7 +42,6 @@
#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
#define SEC_SGL_SGE_NR 128
-#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev)
#define SEC_CIPHER_AUTH 0xfe
#define SEC_AUTH_CIPHER 0x1
#define SEC_MAX_MAC_LEN 64
@@ -89,13 +88,13 @@
{
int req_id;
- mutex_lock(&qp_ctx->req_lock);
+ spin_lock_bh(&qp_ctx->req_lock);
req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
0, QM_Q_DEPTH, GFP_ATOMIC);
- mutex_unlock(&qp_ctx->req_lock);
+ spin_unlock_bh(&qp_ctx->req_lock);
if (unlikely(req_id < 0)) {
- dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
+ dev_err(req->ctx->dev, "alloc req id fail!\n");
return req_id;
}
@@ -110,16 +109,16 @@
int req_id = req->req_id;
if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
- dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n");
+ dev_err(req->ctx->dev, "free request id invalid!\n");
return;
}
qp_ctx->req_list[req_id] = NULL;
req->qp_ctx = NULL;
- mutex_lock(&qp_ctx->req_lock);
+ spin_lock_bh(&qp_ctx->req_lock);
idr_remove(&qp_ctx->req_idr, req_id);
- mutex_unlock(&qp_ctx->req_lock);
+ spin_unlock_bh(&qp_ctx->req_lock);
}
static int sec_aead_verify(struct sec_req *req)
@@ -136,7 +135,7 @@
aead_req->cryptlen + aead_req->assoclen -
authsize);
if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) {
- dev_err(SEC_CTX_DEV(req->ctx), "aead verify failure!\n");
+ dev_err(req->ctx->dev, "aead verify failure!\n");
return -EBADMSG;
}
@@ -175,7 +174,7 @@
if (unlikely(req->err_type || done != SEC_SQE_DONE ||
(ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) ||
(ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) {
- dev_err(SEC_CTX_DEV(ctx),
+ dev_err_ratelimited(ctx->dev,
"err_type[%d],done[%d],flag[%d]\n",
req->err_type, done, flag);
err = -EIO;
@@ -202,7 +201,7 @@
!(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
- mutex_lock(&qp_ctx->req_lock);
+ spin_lock_bh(&qp_ctx->req_lock);
ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
if (ctx->fake_req_limit <=
@@ -210,10 +209,10 @@
list_add_tail(&req->backlog_head, &qp_ctx->backlog);
atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
- mutex_unlock(&qp_ctx->req_lock);
+ spin_unlock_bh(&qp_ctx->req_lock);
return -EBUSY;
}
- mutex_unlock(&qp_ctx->req_lock);
+ spin_unlock_bh(&qp_ctx->req_lock);
if (unlikely(ret == -EBUSY))
return -ENOBUFS;
@@ -323,8 +322,8 @@
static int sec_alg_resource_alloc(struct sec_ctx *ctx,
struct sec_qp_ctx *qp_ctx)
{
- struct device *dev = SEC_CTX_DEV(ctx);
struct sec_alg_res *res = qp_ctx->res;
+ struct device *dev = ctx->dev;
int ret;
ret = sec_alloc_civ_resource(dev, res);
@@ -357,7 +356,7 @@
static void sec_alg_resource_free(struct sec_ctx *ctx,
struct sec_qp_ctx *qp_ctx)
{
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
sec_free_civ_resource(dev, qp_ctx->res);
@@ -370,7 +369,7 @@
static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
int qp_ctx_id, int alg_type)
{
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
struct sec_qp_ctx *qp_ctx;
struct hisi_qp *qp;
int ret = -ENOMEM;
@@ -383,7 +382,7 @@
qp_ctx->qp = qp;
qp_ctx->ctx = ctx;
- mutex_init(&qp_ctx->req_lock);
+ spin_lock_init(&qp_ctx->req_lock);
idr_init(&qp_ctx->req_idr);
INIT_LIST_HEAD(&qp_ctx->backlog);
@@ -426,7 +425,7 @@
static void sec_release_qp_ctx(struct sec_ctx *ctx,
struct sec_qp_ctx *qp_ctx)
{
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
hisi_qm_stop_qp(qp_ctx->qp);
sec_alg_resource_free(ctx, qp_ctx);
@@ -450,6 +449,7 @@
sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
ctx->sec = sec;
+ ctx->dev = &sec->qm.pdev->dev;
ctx->hlf_q_num = sec->ctx_q_num >> 1;
ctx->pbuf_supported = ctx->sec->iommu_used;
@@ -474,11 +474,9 @@
err_sec_release_qp_ctx:
for (i = i - 1; i >= 0; i--)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
-
kfree(ctx->qp_ctx);
err_destroy_qps:
sec_destroy_qps(ctx->qps, sec->ctx_q_num);
-
return ret;
}
@@ -497,7 +495,7 @@
{
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
&c_ctx->c_key_dma, GFP_KERNEL);
if (!c_ctx->c_key)
return -ENOMEM;
@@ -510,7 +508,7 @@
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
- dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
c_ctx->c_key, c_ctx->c_key_dma);
}
@@ -518,7 +516,7 @@
{
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
- a_ctx->a_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
&a_ctx->a_key_dma, GFP_KERNEL);
if (!a_ctx->a_key)
return -ENOMEM;
@@ -530,8 +528,8 @@
{
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
- memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE);
- dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE);
+ dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
a_ctx->a_key, a_ctx->a_key_dma);
}
@@ -631,12 +629,13 @@
{
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct device *dev = ctx->dev;
int ret;
if (c_mode == SEC_CMODE_XTS) {
ret = xts_verify_key(tfm, key, keylen);
if (ret) {
- dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n");
+ dev_err(dev, "xts mode key err!\n");
return ret;
}
}
@@ -657,7 +656,7 @@
}
if (ret) {
- dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n");
+ dev_err(dev, "set sec key err!\n");
return ret;
}
@@ -689,7 +688,7 @@
struct aead_request *aead_req = req->aead_req.aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
int copy_size, pbuf_length;
int req_id = req->req_id;
@@ -699,9 +698,8 @@
copy_size = c_req->c_len;
pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
- qp_ctx->res[req_id].pbuf,
- copy_size);
-
+ qp_ctx->res[req_id].pbuf,
+ copy_size);
if (unlikely(pbuf_length != copy_size)) {
dev_err(dev, "copy src data to pbuf error!\n");
return -EINVAL;
@@ -725,7 +723,7 @@
struct aead_request *aead_req = req->aead_req.aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
int copy_size, pbuf_length;
int req_id = req->req_id;
@@ -737,7 +735,6 @@
pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
qp_ctx->res[req_id].pbuf,
copy_size);
-
if (unlikely(pbuf_length != copy_size))
dev_err(dev, "copy pbuf data to dst error!\n");
@@ -750,7 +747,7 @@
struct sec_aead_req *a_req = &req->aead_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
struct sec_alg_res *res = &qp_ctx->res[req->req_id];
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
int ret;
if (req->use_pbuf) {
@@ -805,7 +802,7 @@
struct scatterlist *src, struct scatterlist *dst)
{
struct sec_cipher_req *c_req = &req->c_req;
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
if (req->use_pbuf) {
sec_cipher_pbuf_unmap(ctx, req, dst);
@@ -889,6 +886,7 @@
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct device *dev = ctx->dev;
struct crypto_authenc_keys keys;
int ret;
@@ -902,13 +900,13 @@
ret = sec_aead_aes_set_key(c_ctx, &keys);
if (ret) {
- dev_err(SEC_CTX_DEV(ctx), "set sec cipher key err!\n");
+ dev_err(dev, "set sec cipher key err!\n");
goto bad_key;
}
ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
if (ret) {
- dev_err(SEC_CTX_DEV(ctx), "set sec auth key err!\n");
+ dev_err(dev, "set sec auth key err!\n");
goto bad_key;
}
@@ -1061,7 +1059,7 @@
sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
cryptlen - iv_size);
if (unlikely(sz != iv_size))
- dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
+ dev_err(req->ctx->dev, "copy output iv error!\n");
}
static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
@@ -1069,7 +1067,7 @@
{
struct sec_req *backlog_req = NULL;
- mutex_lock(&qp_ctx->req_lock);
+ spin_lock_bh(&qp_ctx->req_lock);
if (ctx->fake_req_limit >=
atomic_read(&qp_ctx->qp->qp_status.used) &&
!list_empty(&qp_ctx->backlog)) {
@@ -1077,7 +1075,7 @@
typeof(*backlog_req), backlog_head);
list_del(&backlog_req->backlog_head);
}
- mutex_unlock(&qp_ctx->req_lock);
+ spin_unlock_bh(&qp_ctx->req_lock);
return backlog_req;
}
@@ -1160,7 +1158,7 @@
ret = sec_skcipher_bd_fill(ctx, req);
if (unlikely(ret)) {
- dev_err(SEC_CTX_DEV(ctx), "skcipher bd fill is error!\n");
+ dev_err(ctx->dev, "skcipher bd fill is error!\n");
return ret;
}
@@ -1194,7 +1192,7 @@
a_req->assoclen);
if (unlikely(sz != authsize)) {
- dev_err(SEC_CTX_DEV(req->ctx), "copy out mac err!\n");
+ dev_err(c->dev, "copy out mac err!\n");
err = -EINVAL;
}
}
@@ -1259,7 +1257,7 @@
ret = ctx->req_op->bd_send(ctx, req);
if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
(ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
- dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n");
+ dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
goto err_send_req;
}
@@ -1326,7 +1324,7 @@
ctx->alg_type = SEC_AEAD;
ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
- dev_err(SEC_CTX_DEV(ctx), "get error aead iv size!\n");
+ dev_err(ctx->dev, "get error aead iv size!\n");
return -EINVAL;
}
@@ -1376,7 +1374,7 @@
auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
if (IS_ERR(auth_ctx->hash_tfm)) {
- dev_err(SEC_CTX_DEV(ctx), "aead alloc shash error!\n");
+ dev_err(ctx->dev, "aead alloc shash error!\n");
sec_aead_exit(tfm);
return PTR_ERR(auth_ctx->hash_tfm);
}
@@ -1410,7 +1408,7 @@
static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
struct skcipher_request *sk_req = sreq->c_req.sk_req;
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
u8 c_alg = ctx->c_ctx.c_alg;
if (unlikely(!sk_req->src || !sk_req->dst)) {
@@ -1533,14 +1531,15 @@
static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
- u8 c_alg = ctx->c_ctx.c_alg;
struct aead_request *req = sreq->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
size_t authsize = crypto_aead_authsize(tfm);
+ struct device *dev = ctx->dev;
+ u8 c_alg = ctx->c_ctx.c_alg;
if (unlikely(!req->src || !req->dst || !req->cryptlen ||
req->assoclen > SEC_MAX_AAD_LEN)) {
- dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n");
+ dev_err(dev, "aead input param error!\n");
return -EINVAL;
}
@@ -1552,7 +1551,7 @@
/* Support AES only */
if (unlikely(c_alg != SEC_CALG_AES)) {
- dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n");
+ dev_err(dev, "aead crypto alg error!\n");
return -EINVAL;
}
@@ -1562,7 +1561,7 @@
sreq->c_req.c_len = req->cryptlen - authsize;
if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
- dev_err(SEC_CTX_DEV(ctx), "aead crypto length error!\n");
+ dev_err(dev, "aead crypto length error!\n");
return -EINVAL;
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index b2786e1..20f11e5 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -6,6 +6,7 @@
#define SEC_IV_SIZE 24
#define SEC_MAX_KEY_SIZE 64
+#define SEC_MAX_AKEY_SIZE 128
#define SEC_COMM_SCENE 0
enum sec_calg {
@@ -64,7 +65,6 @@
};
struct sec_sqe_type2 {
-
/*
* mac_len: 0~4 bits
* a_key_len: 5~10 bits
@@ -120,7 +120,6 @@
/* c_pad_len_field: 0~1 bits */
__le16 c_pad_len_field;
-
__le64 long_a_data_len;
__le64 a_ivin_addr;
__le64 a_key_addr;
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 08b4660..5db7cde 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -107,12 +107,12 @@
if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX)
return -EINVAL;
- return param_set_int(val, kp);
+ return param_set_ushort(val, kp);
}
static const struct kernel_param_ops sgl_sge_nr_ops = {
.set = sgl_sge_nr_set,
- .get = param_get_int,
+ .get = param_get_ushort,
};
static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 2e15621..fbcf52e 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1834,6 +1834,8 @@
{},
};
+MODULE_DEVICE_TABLE(of, safexcel_of_match_table);
+
static struct platform_driver crypto_safexcel = {
.probe = safexcel_probe,
.remove = safexcel_remove,
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 56d5ccb..1c9af02 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -381,7 +381,7 @@
u32 x;
x = ipad[i] ^ ipad[i + 4];
- cache[i] ^= swab(x);
+ cache[i] ^= swab32(x);
}
}
cache_len = AES_BLOCK_SIZE;
@@ -819,7 +819,7 @@
u32 *result = (void *)areq->result;
/* K3 */
- result[i] = swab(ctx->base.ipad.word[i + 4]);
+ result[i] = swab32(ctx->base.ipad.word[i + 4]);
}
areq->result[0] ^= 0x80; // 10- padding
crypto_cipher_encrypt_one(ctx->kaes, areq->result, areq->result);
@@ -2104,7 +2104,7 @@
crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + AES_BLOCK_SIZE,
"\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3");
for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++)
- ctx->base.ipad.word[i] = swab(key_tmp[i]);
+ ctx->base.ipad.word[i] = swab32(key_tmp[i]);
crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
@@ -2187,7 +2187,7 @@
return ret;
for (i = 0; i < len / sizeof(u32); i++)
- ctx->base.ipad.word[i + 8] = swab(aes.key_enc[i]);
+ ctx->base.ipad.word[i + 8] = swab32(aes.key_enc[i]);
/* precompute the CMAC key material */
crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index b4a6ff9..596a8c7 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -614,7 +614,6 @@
.decrypt = mv_cesa_ecb_des3_ede_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
.base = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "mv-ecb-des3-ede",
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
index 40b4821..a765eef 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
@@ -286,6 +286,7 @@
struct tar_ucode_info_t *tar_info;
struct otx_cpt_ucode_hdr *ucode_hdr;
int ucode_type, ucode_size;
+ unsigned int code_length;
/*
* If size is less than microcode header size then don't report
@@ -303,7 +304,13 @@
if (get_ucode_type(ucode_hdr, &ucode_type))
return 0;
- ucode_size = ntohl(ucode_hdr->code_length) * 2;
+ code_length = ntohl(ucode_hdr->code_length);
+ if (code_length >= INT_MAX / 2) {
+ dev_err(dev, "Invalid code_length %u\n", code_length);
+ return -EINVAL;
+ }
+
+ ucode_size = code_length * 2;
if (!ucode_size || (size < round_up(ucode_size, 16) +
sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
dev_err(dev, "Ucode %s invalid size\n", filename);
@@ -886,6 +893,7 @@
{
struct otx_cpt_ucode_hdr *ucode_hdr;
const struct firmware *fw;
+ unsigned int code_length;
int ret;
set_ucode_filename(ucode, ucode_filename);
@@ -896,7 +904,13 @@
ucode_hdr = (struct otx_cpt_ucode_hdr *) fw->data;
memcpy(ucode->ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
ucode->ver_num = ucode_hdr->ver_num;
- ucode->size = ntohl(ucode_hdr->code_length) * 2;
+ code_length = ntohl(ucode_hdr->code_length);
+ if (code_length >= INT_MAX / 2) {
+ dev_err(dev, "Ucode invalid code_length %u\n", code_length);
+ ret = -EINVAL;
+ goto release_fw;
+ }
+ ucode->size = code_length * 2;
if (!ucode->size || (fw->size < round_up(ucode->size, 16)
+ sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
dev_err(dev, "Ucode %s invalid size\n", ucode_filename);
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 5edc91c..a9d3e67 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -330,7 +330,7 @@
memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
}
- for_each_sg(req->src, src, sg_nents(src), i) {
+ for_each_sg(req->src, src, sg_nents(req->src), i) {
src_buf = sg_virt(src);
len = sg_dma_len(src);
tlen += len;
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index 13c65de..8a4f10b 100644
--- a/drivers/crypto/nx/nx-common-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -827,7 +827,7 @@
goto err_out;
vas_init_rx_win_attr(&rxattr, coproc->ct);
- rxattr.rx_fifo = (void *)rx_fifo;
+ rxattr.rx_fifo = rx_fifo;
rxattr.rx_fifo_size = fifo_size;
rxattr.lnotify_lpid = lpid;
rxattr.lnotify_pid = pid;
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 06abe1e..5b71768 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -34,19 +34,6 @@
static DEFINE_MUTEX(algs_lock);
static unsigned int active_devs;
-struct qat_alg_buf {
- u32 len;
- u32 resrvd;
- u64 addr;
-} __packed;
-
-struct qat_alg_buf_list {
- u64 resrvd;
- u32 num_bufs;
- u32 num_mapped_bufs;
- struct qat_alg_buf bufers[];
-} __packed __aligned(64);
-
/* Common content descriptor */
struct qat_alg_cd {
union {
@@ -637,14 +624,20 @@
dma_addr_t blpout = qat_req->buf.bloutp;
size_t sz = qat_req->buf.sz;
size_t sz_out = qat_req->buf.sz_out;
+ int bl_dma_dir;
int i;
+ bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
for (i = 0; i < bl->num_bufs; i++)
dma_unmap_single(dev, bl->bufers[i].addr,
- bl->bufers[i].len, DMA_BIDIRECTIONAL);
+ bl->bufers[i].len, bl_dma_dir);
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
- kfree(bl);
+
+ if (!qat_req->buf.sgl_src_valid)
+ kfree(bl);
+
if (blp != blpout) {
/* If out of place operation dma unmap only data */
int bufless = blout->num_bufs - blout->num_mapped_bufs;
@@ -652,10 +645,12 @@
for (i = bufless; i < blout->num_bufs; i++) {
dma_unmap_single(dev, blout->bufers[i].addr,
blout->bufers[i].len,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
}
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
- kfree(blout);
+
+ if (!qat_req->buf.sgl_dst_valid)
+ kfree(blout);
}
}
@@ -669,26 +664,34 @@
int n = sg_nents(sgl);
struct qat_alg_buf_list *bufl;
struct qat_alg_buf_list *buflout = NULL;
- dma_addr_t blp;
- dma_addr_t bloutp;
+ dma_addr_t blp = DMA_MAPPING_ERROR;
+ dma_addr_t bloutp = DMA_MAPPING_ERROR;
struct scatterlist *sg;
- size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
+ size_t sz_out, sz = struct_size(bufl, bufers, n);
+ int node = dev_to_node(&GET_DEV(inst->accel_dev));
+ int bufl_dma_dir;
if (unlikely(!n))
return -EINVAL;
- bufl = kzalloc_node(sz, GFP_ATOMIC,
- dev_to_node(&GET_DEV(inst->accel_dev)));
- if (unlikely(!bufl))
- return -ENOMEM;
+ qat_req->buf.sgl_src_valid = false;
+ qat_req->buf.sgl_dst_valid = false;
+
+ if (n > QAT_MAX_BUFF_DESC) {
+ bufl = kzalloc_node(sz, GFP_ATOMIC, node);
+ if (unlikely(!bufl))
+ return -ENOMEM;
+ } else {
+ bufl = &qat_req->buf.sgl_src.sgl_hdr;
+ memset(bufl, 0, sizeof(struct qat_alg_buf_list));
+ qat_req->buf.sgl_src_valid = true;
+ }
+
+ bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
for_each_sg(sgl, sg, n, i)
bufl->bufers[i].addr = DMA_MAPPING_ERROR;
- blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, blp)))
- goto err_in;
-
for_each_sg(sgl, sg, n, i) {
int y = sg_nctr;
@@ -697,13 +700,16 @@
bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length,
- DMA_BIDIRECTIONAL);
+ bufl_dma_dir);
bufl->bufers[y].len = sg->length;
if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
goto err_in;
sg_nctr++;
}
bufl->num_bufs = sg_nctr;
+ blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, blp)))
+ goto err_in;
qat_req->buf.bl = bufl;
qat_req->buf.blp = blp;
qat_req->buf.sz = sz;
@@ -712,20 +718,23 @@
struct qat_alg_buf *bufers;
n = sg_nents(sglout);
- sz_out = struct_size(buflout, bufers, n + 1);
+ sz_out = struct_size(buflout, bufers, n);
sg_nctr = 0;
- buflout = kzalloc_node(sz_out, GFP_ATOMIC,
- dev_to_node(&GET_DEV(inst->accel_dev)));
- if (unlikely(!buflout))
- goto err_in;
+
+ if (n > QAT_MAX_BUFF_DESC) {
+ buflout = kzalloc_node(sz_out, GFP_ATOMIC, node);
+ if (unlikely(!buflout))
+ goto err_in;
+ } else {
+ buflout = &qat_req->buf.sgl_dst.sgl_hdr;
+ memset(buflout, 0, sizeof(struct qat_alg_buf_list));
+ qat_req->buf.sgl_dst_valid = true;
+ }
bufers = buflout->bufers;
for_each_sg(sglout, sg, n, i)
bufers[i].addr = DMA_MAPPING_ERROR;
- bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, bloutp)))
- goto err_out;
for_each_sg(sglout, sg, n, i) {
int y = sg_nctr;
@@ -734,7 +743,7 @@
bufers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
goto err_out;
bufers[y].len = sg->length;
@@ -742,6 +751,9 @@
}
buflout->num_bufs = sg_nctr;
buflout->num_mapped_bufs = sg_nctr;
+ bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, bloutp)))
+ goto err_out;
qat_req->buf.blout = buflout;
qat_req->buf.bloutp = bloutp;
qat_req->buf.sz_out = sz_out;
@@ -753,27 +765,32 @@
return 0;
err_out:
+ if (!dma_mapping_error(dev, bloutp))
+ dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
+
n = sg_nents(sglout);
for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, buflout->bufers[i].addr))
dma_unmap_single(dev, buflout->bufers[i].addr,
buflout->bufers[i].len,
- DMA_BIDIRECTIONAL);
- if (!dma_mapping_error(dev, bloutp))
- dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
- kfree(buflout);
+ DMA_FROM_DEVICE);
+
+ if (!qat_req->buf.sgl_dst_valid)
+ kfree(buflout);
err_in:
+ if (!dma_mapping_error(dev, blp))
+ dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+
n = sg_nents(sgl);
for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
dma_unmap_single(dev, bufl->bufers[i].addr,
bufl->bufers[i].len,
- DMA_BIDIRECTIONAL);
+ bufl_dma_dir);
- if (!dma_mapping_error(dev, blp))
- dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
- kfree(bufl);
+ if (!qat_req->buf.sgl_src_valid)
+ kfree(bufl);
dev_err(dev, "Failed to map buf for dma\n");
return -ENOMEM;
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
index 12682d1..5f93282 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -20,6 +20,26 @@
atomic_t refctr;
};
+#define QAT_MAX_BUFF_DESC 4
+
+struct qat_alg_buf {
+ u32 len;
+ u32 resrvd;
+ u64 addr;
+} __packed;
+
+struct qat_alg_buf_list {
+ u64 resrvd;
+ u32 num_bufs;
+ u32 num_mapped_bufs;
+ struct qat_alg_buf bufers[];
+} __packed;
+
+struct qat_alg_fixed_buf_list {
+ struct qat_alg_buf_list sgl_hdr;
+ struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
+} __packed __aligned(64);
+
struct qat_crypto_request_buffs {
struct qat_alg_buf_list *bl;
dma_addr_t blp;
@@ -27,6 +47,10 @@
dma_addr_t bloutp;
size_t sz;
size_t sz_out;
+ bool sgl_src_valid;
+ bool sgl_dst_valid;
+ struct qat_alg_fixed_buf_list sgl_src;
+ struct qat_alg_fixed_buf_list sgl_dst;
};
struct qat_crypto_request;
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
index 11f30fd..031b5f7 100644
--- a/drivers/crypto/qcom-rng.c
+++ b/drivers/crypto/qcom-rng.c
@@ -65,6 +65,7 @@
} else {
/* copy only remaining bytes */
memcpy(data, &val, max - currsize);
+ break;
}
} while (currsize < max);
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
index 1cece1a..5bbf0d2 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -506,7 +506,6 @@
.exit = rk_ablk_exit_tfm,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
.setkey = rk_tdes_setkey,
.encrypt = rk_des3_ede_ecb_encrypt,
.decrypt = rk_des3_ede_ecb_decrypt,
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index d60679c..2043dd0 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -25,10 +25,10 @@
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/spinlock.h>
#define SHA_BUFFER_LEN PAGE_SIZE
#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
@@ -195,7 +195,7 @@
void __iomem *regs_base;
struct clk *clk_ipg;
struct clk *clk_ahb;
- struct mutex queue_mutex;
+ spinlock_t queue_spinlock;
struct task_struct *kthread;
struct completion dma_completion;
@@ -641,9 +641,9 @@
rctx->mode = mode;
- mutex_lock(&dev->queue_mutex);
+ spin_lock_bh(&dev->queue_spinlock);
err = crypto_enqueue_request(&dev->queue, &req->base);
- mutex_unlock(&dev->queue_mutex);
+ spin_unlock_bh(&dev->queue_spinlock);
wake_up_process(dev->kthread);
@@ -1042,10 +1042,10 @@
do {
__set_current_state(TASK_INTERRUPTIBLE);
- mutex_lock(&dev->queue_mutex);
+ spin_lock_bh(&dev->queue_spinlock);
backlog = crypto_get_backlog(&dev->queue);
async_req = crypto_dequeue_request(&dev->queue);
- mutex_unlock(&dev->queue_mutex);
+ spin_unlock_bh(&dev->queue_spinlock);
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -1091,9 +1091,9 @@
rctx->first = 1;
}
- mutex_lock(&dev->queue_mutex);
+ spin_lock_bh(&dev->queue_spinlock);
ret = crypto_enqueue_request(&dev->queue, &req->base);
- mutex_unlock(&dev->queue_mutex);
+ spin_unlock_bh(&dev->queue_spinlock);
wake_up_process(dev->kthread);
@@ -1454,7 +1454,7 @@
crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
- mutex_init(&dev->queue_mutex);
+ spin_lock_init(&dev->queue_spinlock);
dev_ptr = dev;
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index be1bf39..90a920e 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -384,8 +384,10 @@
struct stm32_crc *crc = platform_get_drvdata(pdev);
int ret = pm_runtime_get_sync(crc->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(crc->dev);
return ret;
+ }
spin_lock(&crc_list.lock);
list_del(&crc->list);
diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig
index c85fab7..b2c28b8 100644
--- a/drivers/crypto/vmx/Kconfig
+++ b/drivers/crypto/vmx/Kconfig
@@ -2,7 +2,11 @@
config CRYPTO_DEV_VMX_ENCRYPT
tristate "Encryption acceleration support on P8 CPU"
depends on CRYPTO_DEV_VMX
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_CTR
select CRYPTO_GHASH
+ select CRYPTO_XTS
default m
help
Support for VMX cryptographic acceleration instructions on Power8 CPU.
diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c
index cb6401c..acf31cc 100644
--- a/drivers/dax/hmem/device.c
+++ b/drivers/dax/hmem/device.c
@@ -15,6 +15,7 @@
.start = r->start,
.end = r->end,
.flags = IORESOURCE_MEM,
+ .desc = IORES_DESC_SOFT_RESERVED,
};
struct platform_device *pdev;
struct memregion_info info;
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index cadbd0a..260a247 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -723,6 +723,7 @@
static void dax_fs_exit(void)
{
kern_unmount(dax_mnt);
+ rcu_barrier();
kmem_cache_destroy(dax_cache);
}
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 17ed980..d6da9c3 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -514,15 +514,19 @@
count = of_get_child_count(events_np);
desc = devm_kcalloc(dev, count, sizeof(*desc), GFP_KERNEL);
- if (!desc)
+ if (!desc) {
+ of_node_put(events_np);
return -ENOMEM;
+ }
info->num_events = count;
of_id = of_match_device(exynos_ppmu_id_match, dev);
if (of_id)
info->ppmu_type = (enum exynos_ppmu_type)of_id->data;
- else
+ else {
+ of_node_put(events_np);
return -EINVAL;
+ }
j = 0;
for_each_child_of_node(events_np, node) {
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index 2e91216..7e52375 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -485,6 +485,8 @@
{
struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(&pdev->dev);
+ devfreq_event_disable_edev(dmcfreq->edev);
+
/*
* Before remove the opp table we need to unregister the opp notifier.
*/
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index 798f86f..dcbb023 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -209,18 +209,6 @@
return ERR_PTR(-EINVAL);
}
- /* check the name is unique */
- mutex_lock(&heap_list_lock);
- list_for_each_entry(h, &heap_list, list) {
- if (!strcmp(h->name, exp_info->name)) {
- mutex_unlock(&heap_list_lock);
- pr_err("dma_heap: Already registered heap named %s\n",
- exp_info->name);
- return ERR_PTR(-EINVAL);
- }
- }
- mutex_unlock(&heap_list_lock);
-
heap = kzalloc(sizeof(*heap), GFP_KERNEL);
if (!heap)
return ERR_PTR(-ENOMEM);
@@ -259,13 +247,27 @@
err_ret = ERR_CAST(dev_ret);
goto err2;
}
- /* Add heap to the list */
+
mutex_lock(&heap_list_lock);
+ /* check the name is unique */
+ list_for_each_entry(h, &heap_list, list) {
+ if (!strcmp(h->name, exp_info->name)) {
+ mutex_unlock(&heap_list_lock);
+ pr_err("dma_heap: Already registered heap named %s\n",
+ exp_info->name);
+ err_ret = ERR_PTR(-EINVAL);
+ goto err3;
+ }
+ }
+
+ /* Add heap to the list */
list_add(&heap->list, &heap_list);
mutex_unlock(&heap_list_lock);
return heap;
+err3:
+ device_destroy(dma_heap_class, heap->heap_devt);
err2:
cdev_del(&heap->heap_cdev);
err1:
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index db732f7..e359c5c 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -26,8 +26,11 @@
{
struct vm_area_struct *vma = vmf->vma;
struct udmabuf *ubuf = vma->vm_private_data;
+ pgoff_t pgoff = vmf->pgoff;
- vmf->page = ubuf->pages[vmf->pgoff];
+ if (pgoff >= ubuf->pagecount)
+ return VM_FAULT_SIGBUS;
+ vmf->page = ubuf->pages[pgoff];
get_page(vmf->page);
return 0;
}
@@ -115,17 +118,20 @@
{
struct udmabuf *ubuf = buf->priv;
struct device *dev = ubuf->device->this_device;
+ int ret = 0;
if (!ubuf->sg) {
ubuf->sg = get_sg_table(dev, buf, direction);
- if (IS_ERR(ubuf->sg))
- return PTR_ERR(ubuf->sg);
+ if (IS_ERR(ubuf->sg)) {
+ ret = PTR_ERR(ubuf->sg);
+ ubuf->sg = NULL;
+ }
} else {
dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
direction);
}
- return 0;
+ return ret;
}
static int end_cpu_udmabuf(struct dma_buf *buf,
@@ -181,6 +187,10 @@
if (ubuf->pagecount > pglimit)
goto err;
}
+
+ if (!ubuf->pagecount)
+ goto err;
+
ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
GFP_KERNEL);
if (!ubuf->pages) {
@@ -320,7 +330,23 @@
static int __init udmabuf_dev_init(void)
{
- return misc_register(&udmabuf_misc);
+ int ret;
+
+ ret = misc_register(&udmabuf_misc);
+ if (ret < 0) {
+ pr_err("Could not initialize udmabuf device\n");
+ return ret;
+ }
+
+ ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
+ DMA_BIT_MASK(64));
+ if (ret < 0) {
+ pr_err("Could not setup DMA mask for udmabuf device\n");
+ misc_deregister(&udmabuf_misc);
+ return ret;
+ }
+
+ return 0;
}
static void __exit udmabuf_dev_exit(void)
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 7eaee5b..6a4f969 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -237,6 +237,8 @@
ATC_SPIP_BOUNDARY(first->boundary));
channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
ATC_DPIP_BOUNDARY(first->boundary));
+ /* Don't allow CPU to reorder channel enable. */
+ wmb();
dma_writel(atdma, CHER, atchan->mask);
vdbg_dump_regs(atchan);
@@ -297,7 +299,8 @@
struct at_desc *desc_first = atc_first_active(atchan);
struct at_desc *desc;
int ret;
- u32 ctrla, dscr, trials;
+ u32 ctrla, dscr;
+ unsigned int i;
/*
* If the cookie doesn't match to the currently running transfer then
@@ -367,7 +370,7 @@
dscr = channel_readl(atchan, DSCR);
rmb(); /* ensure DSCR is read before CTRLA */
ctrla = channel_readl(atchan, CTRLA);
- for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
+ for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) {
u32 new_dscr;
rmb(); /* ensure DSCR is read after CTRLA */
@@ -393,7 +396,7 @@
rmb(); /* ensure DSCR is read before CTRLA */
ctrla = channel_readl(atchan, CTRLA);
}
- if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
+ if (unlikely(i == ATC_MAX_DSCR_TRIALS))
return -ETIMEDOUT;
/* for the first descriptor we can be more accurate */
@@ -443,18 +446,6 @@
if (!atc_chan_is_cyclic(atchan))
dma_cookie_complete(txd);
- /* If the transfer was a memset, free our temporary buffer */
- if (desc->memset_buffer) {
- dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
- desc->memset_paddr);
- desc->memset_buffer = false;
- }
-
- /* move children to free_list */
- list_splice_init(&desc->tx_list, &atchan->free_list);
- /* move myself to free_list */
- list_move(&desc->desc_node, &atchan->free_list);
-
spin_unlock_irqrestore(&atchan->lock, flags);
dma_descriptor_unmap(txd);
@@ -464,42 +455,20 @@
dmaengine_desc_get_callback_invoke(txd, NULL);
dma_run_dependencies(txd);
-}
-
-/**
- * atc_complete_all - finish work for all transactions
- * @atchan: channel to complete transactions for
- *
- * Eventually submit queued descriptors if any
- *
- * Assume channel is idle while calling this function
- * Called with atchan->lock held and bh disabled
- */
-static void atc_complete_all(struct at_dma_chan *atchan)
-{
- struct at_desc *desc, *_desc;
- LIST_HEAD(list);
- unsigned long flags;
-
- dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
spin_lock_irqsave(&atchan->lock, flags);
-
- /*
- * Submit queued descriptors ASAP, i.e. before we go through
- * the completed ones.
- */
- if (!list_empty(&atchan->queue))
- atc_dostart(atchan, atc_first_queued(atchan));
- /* empty active_list now it is completed */
- list_splice_init(&atchan->active_list, &list);
- /* empty queue list by moving descriptors (if any) to active_list */
- list_splice_init(&atchan->queue, &atchan->active_list);
-
+ /* move children to free_list */
+ list_splice_init(&desc->tx_list, &atchan->free_list);
+ /* add myself to free_list */
+ list_add(&desc->desc_node, &atchan->free_list);
spin_unlock_irqrestore(&atchan->lock, flags);
- list_for_each_entry_safe(desc, _desc, &list, desc_node)
- atc_chain_complete(atchan, desc);
+ /* If the transfer was a memset, free our temporary buffer */
+ if (desc->memset_buffer) {
+ dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
+ desc->memset_paddr);
+ desc->memset_buffer = false;
+ }
}
/**
@@ -508,26 +477,28 @@
*/
static void atc_advance_work(struct at_dma_chan *atchan)
{
+ struct at_desc *desc;
unsigned long flags;
- int ret;
dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
spin_lock_irqsave(&atchan->lock, flags);
- ret = atc_chan_is_enabled(atchan);
+ if (atc_chan_is_enabled(atchan) || list_empty(&atchan->active_list))
+ return spin_unlock_irqrestore(&atchan->lock, flags);
+
+ desc = atc_first_active(atchan);
+ /* Remove the transfer node from the active list. */
+ list_del_init(&desc->desc_node);
spin_unlock_irqrestore(&atchan->lock, flags);
- if (ret)
- return;
-
- if (list_empty(&atchan->active_list) ||
- list_is_singular(&atchan->active_list))
- return atc_complete_all(atchan);
-
- atc_chain_complete(atchan, atc_first_active(atchan));
+ atc_chain_complete(atchan, desc);
/* advance work */
spin_lock_irqsave(&atchan->lock, flags);
- atc_dostart(atchan, atc_first_active(atchan));
+ if (!list_empty(&atchan->active_list)) {
+ desc = atc_first_queued(atchan);
+ list_move_tail(&desc->desc_node, &atchan->active_list);
+ atc_dostart(atchan, desc);
+ }
spin_unlock_irqrestore(&atchan->lock, flags);
}
@@ -539,6 +510,7 @@
static void atc_handle_error(struct at_dma_chan *atchan)
{
struct at_desc *bad_desc;
+ struct at_desc *desc;
struct at_desc *child;
unsigned long flags;
@@ -551,13 +523,12 @@
bad_desc = atc_first_active(atchan);
list_del_init(&bad_desc->desc_node);
- /* As we are stopped, take advantage to push queued descriptors
- * in active_list */
- list_splice_init(&atchan->queue, atchan->active_list.prev);
-
/* Try to restart the controller */
- if (!list_empty(&atchan->active_list))
- atc_dostart(atchan, atc_first_active(atchan));
+ if (!list_empty(&atchan->active_list)) {
+ desc = atc_first_queued(atchan);
+ list_move_tail(&desc->desc_node, &atchan->active_list);
+ atc_dostart(atchan, desc);
+ }
/*
* KERN_CRITICAL may seem harsh, but since this only happens
@@ -672,19 +643,11 @@
spin_lock_irqsave(&atchan->lock, flags);
cookie = dma_cookie_assign(tx);
- if (list_empty(&atchan->active_list)) {
- dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
- desc->txd.cookie);
- atc_dostart(atchan, desc);
- list_add_tail(&desc->desc_node, &atchan->active_list);
- } else {
- dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
- desc->txd.cookie);
- list_add_tail(&desc->desc_node, &atchan->queue);
- }
-
+ list_add_tail(&desc->desc_node, &atchan->queue);
spin_unlock_irqrestore(&atchan->lock, flags);
+ dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
+ desc->txd.cookie);
return cookie;
}
@@ -1418,11 +1381,8 @@
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device);
int chan_id = atchan->chan_common.chan_id;
- struct at_desc *desc, *_desc;
unsigned long flags;
- LIST_HEAD(list);
-
dev_vdbg(chan2dev(chan), "%s\n", __func__);
/*
@@ -1441,19 +1401,15 @@
cpu_relax();
/* active_list entries will end up before queued entries */
- list_splice_init(&atchan->queue, &list);
- list_splice_init(&atchan->active_list, &list);
-
- spin_unlock_irqrestore(&atchan->lock, flags);
-
- /* Flush all pending and queued descriptors */
- list_for_each_entry_safe(desc, _desc, &list, desc_node)
- atc_chain_complete(atchan, desc);
+ list_splice_tail_init(&atchan->queue, &atchan->free_list);
+ list_splice_tail_init(&atchan->active_list, &atchan->free_list);
clear_bit(ATC_IS_PAUSED, &atchan->status);
/* if channel dedicated to cyclic operations, free it */
clear_bit(ATC_IS_CYCLIC, &atchan->status);
+ spin_unlock_irqrestore(&atchan->lock, flags);
+
return 0;
}
@@ -1508,20 +1464,26 @@
}
/**
- * atc_issue_pending - try to finish work
+ * atc_issue_pending - takes the first transaction descriptor in the pending
+ * queue and starts the transfer.
* @chan: target DMA channel
*/
static void atc_issue_pending(struct dma_chan *chan)
{
- struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ struct at_desc *desc;
+ unsigned long flags;
dev_vdbg(chan2dev(chan), "issue_pending\n");
- /* Not needed for cyclic transfers */
- if (atc_chan_is_cyclic(atchan))
- return;
+ spin_lock_irqsave(&atchan->lock, flags);
+ if (atc_chan_is_enabled(atchan) || list_empty(&atchan->queue))
+ return spin_unlock_irqrestore(&atchan->lock, flags);
- atc_advance_work(atchan);
+ desc = atc_first_queued(atchan);
+ list_move_tail(&desc->desc_node, &atchan->active_list);
+ atc_dostart(atchan, desc);
+ spin_unlock_irqrestore(&atchan->lock, flags);
}
/**
@@ -1939,7 +1901,11 @@
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
plat_dat->nr_channels);
- dma_async_device_register(&atdma->dma_common);
+ err = dma_async_device_register(&atdma->dma_common);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to register: %d.\n", err);
+ goto err_dma_async_device_register;
+ }
/*
* Do not return an error if the dmac node is not present in order to
@@ -1959,6 +1925,7 @@
err_of_dma_controller_register:
dma_async_device_unregister(&atdma->dma_common);
+err_dma_async_device_register:
dma_pool_destroy(atdma->memset_pool);
err_memset_pool_create:
dma_pool_destroy(atdma->dma_desc_pool);
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 80fc2fe..8dc82c7 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -164,13 +164,13 @@
/* LLI == Linked List Item; aka DMA buffer descriptor */
struct at_lli {
/* values that are not changed by hardware */
- dma_addr_t saddr;
- dma_addr_t daddr;
+ u32 saddr;
+ u32 daddr;
/* value that may get written back: */
- u32 ctrla;
+ u32 ctrla;
/* more values that are not changed by hardware */
- u32 ctrlb;
- dma_addr_t dscr; /* chain to next lli */
+ u32 ctrlb;
+ u32 dscr; /* chain to next lli */
};
/**
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 90afba0..b5d691a 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1390,7 +1390,7 @@
{
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
- struct at_xdmac_desc *desc, *_desc;
+ struct at_xdmac_desc *desc, *_desc, *iter;
struct list_head *descs_list;
enum dma_status ret;
int residue, retry;
@@ -1505,11 +1505,13 @@
* microblock.
*/
descs_list = &desc->descs_list;
- list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
- dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
- residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
- if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
+ list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
+ dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
+ residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
+ if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
+ desc = iter;
break;
+ }
}
residue += cur_ubc << dwidth;
@@ -1836,6 +1838,11 @@
for (i = 0; i < init_nr_desc_per_channel; i++) {
desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
if (!desc) {
+ if (i == 0) {
+ dev_warn(chan2dev(chan),
+ "can't allocate any descriptors\n");
+ return -EIO;
+ }
dev_warn(chan2dev(chan),
"only %d descriptors have been allocated\n", i);
break;
diff --git a/drivers/dma/bestcomm/ata.c b/drivers/dma/bestcomm/ata.c
index 2fd87f8..e169f18 100644
--- a/drivers/dma/bestcomm/ata.c
+++ b/drivers/dma/bestcomm/ata.c
@@ -133,7 +133,7 @@
struct bcom_ata_var *var;
/* Reset all BD */
- memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+ memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
tsk->index = 0;
tsk->outdex = 0;
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c
index d91cbbe..8c42e5c 100644
--- a/drivers/dma/bestcomm/bestcomm.c
+++ b/drivers/dma/bestcomm/bestcomm.c
@@ -95,7 +95,7 @@
tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa);
if (!tsk->bd)
goto error;
- memset(tsk->bd, 0x00, bd_count * bd_size);
+ memset_io(tsk->bd, 0x00, bd_count * bd_size);
tsk->num_bd = bd_count;
tsk->bd_size = bd_size;
@@ -186,16 +186,16 @@
inc = bcom_task_inc(task);
/* Clear & copy */
- memset(var, 0x00, BCOM_VAR_SIZE);
- memset(inc, 0x00, BCOM_INC_SIZE);
+ memset_io(var, 0x00, BCOM_VAR_SIZE);
+ memset_io(inc, 0x00, BCOM_INC_SIZE);
desc_src = (u32 *)(hdr + 1);
var_src = desc_src + hdr->desc_size;
inc_src = var_src + hdr->var_size;
- memcpy(desc, desc_src, hdr->desc_size * sizeof(u32));
- memcpy(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32));
- memcpy(inc, inc_src, hdr->inc_size * sizeof(u32));
+ memcpy_toio(desc, desc_src, hdr->desc_size * sizeof(u32));
+ memcpy_toio(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32));
+ memcpy_toio(inc, inc_src, hdr->inc_size * sizeof(u32));
return 0;
}
@@ -302,13 +302,13 @@
return -ENOMEM;
}
- memset(bcom_eng->tdt, 0x00, tdt_size);
- memset(bcom_eng->ctx, 0x00, ctx_size);
- memset(bcom_eng->var, 0x00, var_size);
- memset(bcom_eng->fdt, 0x00, fdt_size);
+ memset_io(bcom_eng->tdt, 0x00, tdt_size);
+ memset_io(bcom_eng->ctx, 0x00, ctx_size);
+ memset_io(bcom_eng->var, 0x00, var_size);
+ memset_io(bcom_eng->fdt, 0x00, fdt_size);
/* Copy the FDT for the EU#3 */
- memcpy(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops));
+ memcpy_toio(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops));
/* Initialize Task base structure */
for (task=0; task<BCOM_MAX_TASKS; task++)
diff --git a/drivers/dma/bestcomm/fec.c b/drivers/dma/bestcomm/fec.c
index 7f1fb1c..d203618 100644
--- a/drivers/dma/bestcomm/fec.c
+++ b/drivers/dma/bestcomm/fec.c
@@ -140,7 +140,7 @@
tsk->index = 0;
tsk->outdex = 0;
- memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+ memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
/* Configure some stuff */
bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_RX_BD_PRAGMA);
@@ -241,7 +241,7 @@
tsk->index = 0;
tsk->outdex = 0;
- memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+ memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
/* Configure some stuff */
bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_TX_BD_PRAGMA);
diff --git a/drivers/dma/bestcomm/gen_bd.c b/drivers/dma/bestcomm/gen_bd.c
index 906ddba..8a24a5c 100644
--- a/drivers/dma/bestcomm/gen_bd.c
+++ b/drivers/dma/bestcomm/gen_bd.c
@@ -142,7 +142,7 @@
tsk->index = 0;
tsk->outdex = 0;
- memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+ memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
/* Configure some stuff */
bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_RX_BD_PRAGMA);
@@ -226,7 +226,7 @@
tsk->index = 0;
tsk->outdex = 0;
- memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+ memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
/* Configure some stuff */
bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_TX_BD_PRAGMA);
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index 58c8cc8..d7ed50f 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -400,7 +400,7 @@
chunk->ll_region.sz += burst->sz;
desc->alloc_sz += burst->sz;
- if (chan->dir == EDMA_DIR_WRITE) {
+ if (dir == DMA_DEV_TO_MEM) {
burst->sar = src_addr;
if (xfer->cyclic) {
burst->dar = xfer->xfer.cyclic.paddr;
diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c
index e1a958a..8f16513 100644
--- a/drivers/dma/hisi_dma.c
+++ b/drivers/dma/hisi_dma.c
@@ -30,7 +30,7 @@
#define HISI_DMA_MODE 0x217c
#define HISI_DMA_OFFSET 0x100
-#define HISI_DMA_MSI_NUM 30
+#define HISI_DMA_MSI_NUM 32
#define HISI_DMA_CHAN_NUM 30
#define HISI_DMA_Q_DEPTH_VAL 1024
@@ -185,7 +185,8 @@
hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, index, 0);
}
-static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan)
+static void hisi_dma_reset_or_disable_hw_chan(struct hisi_dma_chan *chan,
+ bool disable)
{
struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
u32 index = chan->qp_num, tmp;
@@ -206,8 +207,11 @@
hisi_dma_do_reset(hdma_dev, index);
hisi_dma_reset_qp_point(hdma_dev, index);
hisi_dma_pause_dma(hdma_dev, index, false);
- hisi_dma_enable_dma(hdma_dev, index, true);
- hisi_dma_unmask_irq(hdma_dev, index);
+
+ if (!disable) {
+ hisi_dma_enable_dma(hdma_dev, index, true);
+ hisi_dma_unmask_irq(hdma_dev, index);
+ }
ret = readl_relaxed_poll_timeout(hdma_dev->base +
HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp,
@@ -223,7 +227,7 @@
struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
- hisi_dma_reset_hw_chan(chan);
+ hisi_dma_reset_or_disable_hw_chan(chan, false);
vchan_free_chan_resources(&chan->vc);
memset(chan->sq, 0, sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth);
@@ -272,7 +276,6 @@
vd = vchan_next_desc(&chan->vc);
if (!vd) {
- dev_err(&hdma_dev->pdev->dev, "no issued task!\n");
chan->desc = NULL;
return;
}
@@ -304,7 +307,7 @@
spin_lock_irqsave(&chan->vc.lock, flags);
- if (vchan_issue_pending(&chan->vc))
+ if (vchan_issue_pending(&chan->vc) && !chan->desc)
hisi_dma_start_transfer(chan);
spin_unlock_irqrestore(&chan->vc.lock, flags);
@@ -399,7 +402,7 @@
static void hisi_dma_disable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
{
- hisi_dma_reset_hw_chan(&hdma_dev->chan[qp_index]);
+ hisi_dma_reset_or_disable_hw_chan(&hdma_dev->chan[qp_index], true);
}
static void hisi_dma_enable_qps(struct hisi_dma_dev *hdma_dev)
@@ -438,18 +441,15 @@
desc = chan->desc;
cqe = chan->cq + chan->cq_head;
if (desc) {
+ chan->cq_head = (chan->cq_head + 1) % hdma_dev->chan_depth;
+ hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR,
+ chan->qp_num, chan->cq_head);
if (FIELD_GET(STATUS_MASK, cqe->w0) == STATUS_SUCC) {
- chan->cq_head = (chan->cq_head + 1) %
- hdma_dev->chan_depth;
- hisi_dma_chan_write(hdma_dev->base,
- HISI_DMA_CQ_HEAD_PTR, chan->qp_num,
- chan->cq_head);
vchan_cookie_complete(&desc->vd);
+ hisi_dma_start_transfer(chan);
} else {
dev_err(&hdma_dev->pdev->dev, "task error!\n");
}
-
- chan->desc = NULL;
}
spin_unlock_irqrestore(&chan->vc.lock, flags);
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 4da8857..ae65eb9 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -266,10 +266,16 @@
rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK,
ictx[i].name);
if (rc)
- return rc;
+ goto err_free_chrdev_region;
}
return 0;
+
+err_free_chrdev_region:
+ for (i--; i >= 0; i--)
+ unregister_chrdev_region(ictx[i].devt, MINORMASK);
+
+ return rc;
}
void idxd_cdev_remove(void)
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index aa74355..09ad37b 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -83,6 +83,27 @@
}
static struct dma_async_tx_descriptor *
+idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags)
+{
+ struct idxd_wq *wq = to_idxd_wq(c);
+ u32 desc_flags;
+ struct idxd_desc *desc;
+
+ if (wq->state != IDXD_WQ_ENABLED)
+ return NULL;
+
+ op_flag_setup(flags, &desc_flags);
+ desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
+ if (IS_ERR(desc))
+ return NULL;
+
+ idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP,
+ 0, 0, 0, desc->compl_dma, desc_flags);
+ desc->txd.flags = flags;
+ return &desc->txd;
+}
+
+static struct dma_async_tx_descriptor *
idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
dma_addr_t dma_src, size_t len, unsigned long flags)
{
@@ -188,10 +209,12 @@
INIT_LIST_HEAD(&dma->channels);
dma->dev = dev;
+ dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
dma->device_release = idxd_dma_release;
+ dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt;
if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 7b41cdf..51af0df 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1098,6 +1098,9 @@
u64 xfer_size;
int rc;
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
@@ -1132,6 +1135,9 @@
u64 batch_size;
int rc;
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 306f93e..2283dcd 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1789,7 +1789,7 @@
u32 reg, val, shift, num_map, i;
int ret = 0;
- if (IS_ERR(np) || IS_ERR(gpr_np))
+ if (IS_ERR(np) || !gpr_np)
goto out;
event_remap = of_find_property(np, propname, NULL);
@@ -1837,7 +1837,7 @@
}
out:
- if (!IS_ERR(gpr_np))
+ if (gpr_np)
of_node_put(gpr_np);
return ret;
@@ -2212,7 +2212,7 @@
#if IS_ENABLED(CONFIG_SOC_IMX6Q)
MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
#endif
-#if IS_ENABLED(CONFIG_SOC_IMX7D)
+#if IS_ENABLED(CONFIG_SOC_IMX7D) || IS_ENABLED(CONFIG_SOC_IMX8M)
MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
#endif
MODULE_LICENSE("GPL");
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 37ff4ec..e2070df 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -656,7 +656,7 @@
if (active - i == 0) {
dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
__func__);
- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+ mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
}
/* microsecond delay by sysfs variable per pending descriptor */
@@ -682,7 +682,7 @@
if (chanerr &
(IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+ mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
ioat_eh(ioat_chan);
}
}
@@ -879,7 +879,7 @@
}
if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+ mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
}
static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan)
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index 375e7e6..a1517ef 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -274,7 +274,7 @@
unsigned int status;
int ret;
- ret = pm_runtime_get_sync(mtkd->ddev.dev);
+ ret = pm_runtime_resume_and_get(mtkd->ddev.dev);
if (ret < 0) {
pm_runtime_put_noidle(chan->device->dev);
return ret;
@@ -288,18 +288,21 @@
ret = readx_poll_timeout(readl, c->base + VFF_EN,
status, !status, 10, 100);
if (ret)
- return ret;
+ goto err_pm;
ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
if (ret < 0) {
dev_err(chan->device->dev, "Can't request dma IRQ\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_pm;
}
if (mtkd->support_33bits)
mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
+err_pm:
+ pm_runtime_put_noidle(mtkd->ddev.dev);
return ret;
}
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 9b0d463..4800c59 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -899,6 +899,7 @@
tasklet_kill(&xor_dev->irq_tasklet);
clk_disable_unprepare(xor_dev->clk);
+ clk_disable_unprepare(xor_dev->reg_clk);
return 0;
}
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 65f816b..dc147cc 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -167,29 +167,11 @@
}
};
-static const struct platform_device_id mxs_dma_ids[] = {
- {
- .name = "imx23-dma-apbh",
- .driver_data = (kernel_ulong_t) &mxs_dma_types[0],
- }, {
- .name = "imx23-dma-apbx",
- .driver_data = (kernel_ulong_t) &mxs_dma_types[1],
- }, {
- .name = "imx28-dma-apbh",
- .driver_data = (kernel_ulong_t) &mxs_dma_types[2],
- }, {
- .name = "imx28-dma-apbx",
- .driver_data = (kernel_ulong_t) &mxs_dma_types[3],
- }, {
- /* end of list */
- }
-};
-
static const struct of_device_id mxs_dma_dt_ids[] = {
- { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], },
- { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], },
- { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], },
- { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], },
+ { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_types[0], },
+ { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_types[1], },
+ { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_types[2], },
+ { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_types[3], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids);
@@ -688,7 +670,7 @@
return mxs_chan->status;
}
-static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
+static int mxs_dma_init(struct mxs_dma_engine *mxs_dma)
{
int ret;
@@ -759,11 +741,9 @@
ofdma->of_node);
}
-static int __init mxs_dma_probe(struct platform_device *pdev)
+static int mxs_dma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- const struct platform_device_id *id_entry;
- const struct of_device_id *of_id;
const struct mxs_dma_type *dma_type;
struct mxs_dma_engine *mxs_dma;
struct resource *iores;
@@ -779,13 +759,7 @@
return ret;
}
- of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev);
- if (of_id)
- id_entry = of_id->data;
- else
- id_entry = platform_get_device_id(pdev);
-
- dma_type = (struct mxs_dma_type *)id_entry->driver_data;
+ dma_type = (struct mxs_dma_type *)of_device_get_match_data(&pdev->dev);
mxs_dma->type = dma_type->type;
mxs_dma->dev_id = dma_type->id;
@@ -865,11 +839,7 @@
.name = "mxs-dma",
.of_match_table = mxs_dma_dt_ids,
},
- .id_table = mxs_dma_ids,
+ .probe = mxs_dma_probe,
};
-static int __init mxs_dma_module_init(void)
-{
- return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe);
-}
-subsys_initcall(mxs_dma_module_init);
+builtin_platform_driver(mxs_dma_driver);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index dfbf514..5bbae99 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2591,7 +2591,7 @@
/* If the DMAC pool is empty, alloc new */
if (!desc) {
- DEFINE_SPINLOCK(lock);
+ static DEFINE_SPINLOCK(lock);
LIST_HEAD(pool);
if (!add_desc(&pool, &lock, GFP_ATOMIC, 1))
@@ -3199,7 +3199,7 @@
return ret;
}
-static int pl330_remove(struct amba_device *adev)
+static void pl330_remove(struct amba_device *adev)
{
struct pl330_dmac *pl330 = amba_get_drvdata(adev);
struct dma_pl330_chan *pch, *_p;
@@ -3239,7 +3239,6 @@
if (pl330->rstc)
reset_control_assert(pl330->rstc);
- return 0;
}
static const struct amba_id pl330_ids[] = {
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index b4ef4f1..68d9d60 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -1249,14 +1249,14 @@
return -ENOMEM;
for (i = 0; i < nb_phy_chans; i++)
- if (platform_get_irq(op, i) > 0)
+ if (platform_get_irq_optional(op, i) > 0)
nr_irq++;
for (i = 0; i < nb_phy_chans; i++) {
phy = &pdev->phys[i];
phy->base = pdev->base;
phy->idx = i;
- irq = platform_get_irq(op, i);
+ irq = platform_get_irq_optional(op, i);
if ((nr_irq > 1) && (irq > 0))
ret = devm_request_irq(&op->dev, irq,
pxad_chan_handler,
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index 528deb5..5c615a8 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -52,16 +52,6 @@
static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan)
{
struct sf_pdma_desc *desc;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->lock, flags);
-
- if (chan->desc && !chan->desc->in_use) {
- spin_unlock_irqrestore(&chan->lock, flags);
- return chan->desc;
- }
-
- spin_unlock_irqrestore(&chan->lock, flags);
desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
if (!desc)
@@ -94,6 +84,7 @@
{
struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
struct sf_pdma_desc *desc;
+ unsigned long iflags;
if (chan && (!len || !dest || !src)) {
dev_err(chan->pdma->dma_dev.dev,
@@ -109,10 +100,9 @@
desc->dirn = DMA_MEM_TO_MEM;
desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
- spin_lock_irqsave(&chan->vchan.lock, flags);
- chan->desc = desc;
+ spin_lock_irqsave(&chan->vchan.lock, iflags);
sf_pdma_fill_desc(desc, dest, src, len);
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ spin_unlock_irqrestore(&chan->vchan.lock, iflags);
return desc->async_tx;
}
@@ -169,11 +159,17 @@
unsigned long flags;
u64 residue = 0;
struct sf_pdma_desc *desc;
- struct dma_async_tx_descriptor *tx;
+ struct dma_async_tx_descriptor *tx = NULL;
spin_lock_irqsave(&chan->vchan.lock, flags);
- tx = &chan->desc->vdesc.tx;
+ list_for_each_entry(vd, &chan->vchan.desc_submitted, node)
+ if (vd->tx.cookie == cookie)
+ tx = &vd->tx;
+
+ if (!tx)
+ goto out;
+
if (cookie == tx->chan->completed_cookie)
goto out;
@@ -240,6 +236,19 @@
writel(v, regs->ctrl);
}
+static struct sf_pdma_desc *sf_pdma_get_first_pending_desc(struct sf_pdma_chan *chan)
+{
+ struct virt_dma_chan *vchan = &chan->vchan;
+ struct virt_dma_desc *vdesc;
+
+ if (list_empty(&vchan->desc_issued))
+ return NULL;
+
+ vdesc = list_first_entry(&vchan->desc_issued, struct virt_dma_desc, node);
+
+ return container_of(vdesc, struct sf_pdma_desc, vdesc);
+}
+
static void sf_pdma_xfer_desc(struct sf_pdma_chan *chan)
{
struct sf_pdma_desc *desc = chan->desc;
@@ -267,8 +276,11 @@
spin_lock_irqsave(&chan->vchan.lock, flags);
- if (vchan_issue_pending(&chan->vchan) && chan->desc)
+ if (!chan->desc && vchan_issue_pending(&chan->vchan)) {
+ /* vchan_issue_pending has made a check that desc in not NULL */
+ chan->desc = sf_pdma_get_first_pending_desc(chan);
sf_pdma_xfer_desc(chan);
+ }
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
@@ -297,6 +309,11 @@
spin_lock_irqsave(&chan->vchan.lock, flags);
list_del(&chan->desc->vdesc.node);
vchan_cookie_complete(&chan->desc->vdesc);
+
+ chan->desc = sf_pdma_get_first_pending_desc(chan);
+ if (chan->desc)
+ sf_pdma_xfer_desc(chan);
+
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 19ac95c..7f72b3f 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -115,10 +115,8 @@
ret = pm_runtime_get(schan->dev);
spin_unlock_irq(&schan->chan_lock);
- if (ret < 0) {
+ if (ret < 0)
dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
- pm_runtime_put(schan->dev);
- }
pm_runtime_barrier(schan->dev);
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 4357d23..60115d8 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -1236,11 +1236,8 @@
{
struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
struct sprd_dma_chn *c, *cn;
- int ret;
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0)
- return ret;
+ pm_runtime_get_sync(&pdev->dev);
/* explicitly free the irq */
if (sdev->irq > 0)
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index fe36738..9d54746 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -40,7 +40,6 @@
STM32_MDMA_SHIFT(mask))
#define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */
-#define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */
/* MDMA Channel x interrupt/status register */
#define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */
@@ -196,7 +195,7 @@
#define STM32_MDMA_MAX_BUF_LEN 128
#define STM32_MDMA_MAX_BLOCK_LEN 65536
-#define STM32_MDMA_MAX_CHANNELS 63
+#define STM32_MDMA_MAX_CHANNELS 32
#define STM32_MDMA_MAX_REQUESTS 256
#define STM32_MDMA_MAX_BURST 128
#define STM32_MDMA_VERY_HIGH_PRIORITY 0x11
@@ -1345,90 +1344,84 @@
static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
{
struct stm32_mdma_device *dmadev = devid;
- struct stm32_mdma_chan *chan = devid;
- u32 reg, id, ien, status, flag;
+ struct stm32_mdma_chan *chan;
+ u32 reg, id, ccr, ien, status;
/* Find out which channel generates the interrupt */
status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0);
- if (status) {
- id = __ffs(status);
- } else {
- status = readl_relaxed(dmadev->base + STM32_MDMA_GISR1);
- if (!status) {
- dev_dbg(mdma2dev(dmadev), "spurious it\n");
- return IRQ_NONE;
- }
- id = __ffs(status);
- /*
- * As GISR0 provides status for channel id from 0 to 31,
- * so GISR1 provides status for channel id from 32 to 62
- */
- id += 32;
+ if (!status) {
+ dev_dbg(mdma2dev(dmadev), "spurious it\n");
+ return IRQ_NONE;
}
+ id = __ffs(status);
chan = &dmadev->chan[id];
if (!chan) {
- dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n");
- goto exit;
+ dev_warn(mdma2dev(dmadev), "MDMA channel not initialized\n");
+ return IRQ_NONE;
}
/* Handle interrupt for the channel */
spin_lock(&chan->vchan.lock);
- status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
- ien = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
- ien &= STM32_MDMA_CCR_IRQ_MASK;
- ien >>= 1;
+ status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(id));
+ /* Mask Channel ReQuest Active bit which can be set in case of MEM2MEM */
+ status &= ~STM32_MDMA_CISR_CRQA;
+ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(id));
+ ien = (ccr & STM32_MDMA_CCR_IRQ_MASK) >> 1;
if (!(status & ien)) {
spin_unlock(&chan->vchan.lock);
- dev_dbg(chan2dev(chan),
- "spurious it (status=0x%04x, ien=0x%04x)\n",
- status, ien);
+ dev_warn(chan2dev(chan),
+ "spurious it (status=0x%04x, ien=0x%04x)\n",
+ status, ien);
return IRQ_NONE;
}
- flag = __ffs(status & ien);
- reg = STM32_MDMA_CIFCR(chan->id);
+ reg = STM32_MDMA_CIFCR(id);
- switch (1 << flag) {
- case STM32_MDMA_CISR_TEIF:
- id = chan->id;
- status = readl_relaxed(dmadev->base + STM32_MDMA_CESR(id));
- dev_err(chan2dev(chan), "Transfer Err: stat=0x%08x\n", status);
+ if (status & STM32_MDMA_CISR_TEIF) {
+ dev_err(chan2dev(chan), "Transfer Err: stat=0x%08x\n",
+ readl_relaxed(dmadev->base + STM32_MDMA_CESR(id)));
stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CTEIF);
- break;
+ status &= ~STM32_MDMA_CISR_TEIF;
+ }
- case STM32_MDMA_CISR_CTCIF:
+ if (status & STM32_MDMA_CISR_CTCIF) {
stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CCTCIF);
+ status &= ~STM32_MDMA_CISR_CTCIF;
stm32_mdma_xfer_end(chan);
- break;
+ }
- case STM32_MDMA_CISR_BRTIF:
+ if (status & STM32_MDMA_CISR_BRTIF) {
stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBRTIF);
- break;
+ status &= ~STM32_MDMA_CISR_BRTIF;
+ }
- case STM32_MDMA_CISR_BTIF:
+ if (status & STM32_MDMA_CISR_BTIF) {
stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBTIF);
+ status &= ~STM32_MDMA_CISR_BTIF;
chan->curr_hwdesc++;
if (chan->desc && chan->desc->cyclic) {
if (chan->curr_hwdesc == chan->desc->count)
chan->curr_hwdesc = 0;
vchan_cyclic_callback(&chan->desc->vdesc);
}
- break;
+ }
- case STM32_MDMA_CISR_TCIF:
+ if (status & STM32_MDMA_CISR_TCIF) {
stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CLTCIF);
- break;
+ status &= ~STM32_MDMA_CISR_TCIF;
+ }
- default:
- dev_err(chan2dev(chan), "it %d unhandled (status=0x%04x)\n",
- 1 << flag, status);
+ if (status) {
+ stm32_mdma_set_bits(dmadev, reg, status);
+ dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status);
+ if (!(ccr & STM32_MDMA_CCR_EN))
+ dev_err(chan2dev(chan), "chan disabled by HW\n");
}
spin_unlock(&chan->vchan.lock);
-exit:
return IRQ_HANDLED;
}
diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c
index 4ba8fa5..86ced7f 100644
--- a/drivers/dma/ti/dma-crossbar.c
+++ b/drivers/dma/ti/dma-crossbar.c
@@ -245,6 +245,7 @@
if (dma_spec->args[0] >= xbar->xbar_requests) {
dev_err(&pdev->dev, "Invalid XBAR request number: %d\n",
dma_spec->args[0]);
+ put_device(&pdev->dev);
return ERR_PTR(-EINVAL);
}
@@ -252,12 +253,14 @@
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
if (!dma_spec->np) {
dev_err(&pdev->dev, "Can't get DMA master\n");
+ put_device(&pdev->dev);
return ERR_PTR(-EINVAL);
}
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
of_node_put(dma_spec->np);
+ put_device(&pdev->dev);
return ERR_PTR(-ENOMEM);
}
@@ -268,6 +271,8 @@
mutex_unlock(&xbar->mutex);
dev_err(&pdev->dev, "Run out of free DMA requests\n");
kfree(map);
+ of_node_put(dma_spec->np);
+ put_device(&pdev->dev);
return ERR_PTR(-ENOMEM);
}
set_bit(map->xbar_out, xbar->dma_inuse);
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
index 8563a39..dadab2f 100644
--- a/drivers/dma/ti/k3-udma-private.c
+++ b/drivers/dma/ti/k3-udma-private.c
@@ -31,14 +31,14 @@
}
pdev = of_find_device_by_node(udma_node);
+ if (np != udma_node)
+ of_node_put(udma_node);
+
if (!pdev) {
pr_debug("UDMA device not found\n");
return ERR_PTR(-EPROBE_DEFER);
}
- if (np != udma_node)
- of_node_put(udma_node);
-
ud = platform_get_drvdata(pdev);
if (!ud) {
pr_debug("UDMA has not been probed\n");
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index cab4719..e76adc3 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -3020,9 +3020,10 @@
/* Request and map I/O memory */
xdev->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(xdev->regs))
- return PTR_ERR(xdev->regs);
-
+ if (IS_ERR(xdev->regs)) {
+ err = PTR_ERR(xdev->regs);
+ goto disable_clks;
+ }
/* Retrieve the DMA engine properties from the device tree */
xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
@@ -3050,7 +3051,7 @@
if (err < 0) {
dev_err(xdev->dev,
"missing xlnx,num-fstores property\n");
- return err;
+ goto disable_clks;
}
err = of_property_read_u32(node, "xlnx,flush-fsync",
@@ -3070,7 +3071,11 @@
xdev->ext_addr = false;
/* Set the dma mask bits */
- dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
+ err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
+ if (err < 0) {
+ dev_err(xdev->dev, "DMA mask error %d\n", err);
+ goto disable_clks;
+ }
/* Initialize the DMA engine */
xdev->common.dev = &pdev->dev;
@@ -3115,7 +3120,7 @@
for_each_child_of_node(node, child) {
err = xilinx_dma_child_probe(xdev, child);
if (err < 0)
- goto disable_clks;
+ goto error;
}
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
@@ -3150,12 +3155,12 @@
return 0;
-disable_clks:
- xdma_disable_allclks(xdev);
error:
for (i = 0; i < xdev->dma_config->max_channels; i++)
if (xdev->chan[i])
xilinx_dma_chan_remove(xdev->chan[i]);
+disable_clks:
+ xdma_disable_allclks(xdev);
return err;
}
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 5fecf5a..7e6be07 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -232,7 +232,7 @@
bool is_dmacoherent;
struct tasklet_struct tasklet;
bool idle;
- u32 desc_size;
+ size_t desc_size;
bool err;
u32 bus_width;
u32 src_burst_len;
@@ -490,7 +490,8 @@
}
chan->desc_pool_v = dma_alloc_coherent(chan->dev,
- (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
+ (2 * ZYNQMP_DMA_DESC_SIZE(chan) *
+ ZYNQMP_DMA_NUM_DESCS),
&chan->desc_pool_p, GFP_KERNEL);
if (!chan->desc_pool_v)
return -ENOMEM;
diff --git a/drivers/edac/dmc520_edac.c b/drivers/edac/dmc520_edac.c
index b8a7d95..1fa5ca5 100644
--- a/drivers/edac/dmc520_edac.c
+++ b/drivers/edac/dmc520_edac.c
@@ -489,7 +489,7 @@
dev = &pdev->dev;
for (idx = 0; idx < NUMBER_OF_IRQS; idx++) {
- irq = platform_get_irq_byname(pdev, dmc520_irq_configs[idx].name);
+ irq = platform_get_irq_byname_optional(pdev, dmc520_irq_configs[idx].name);
irqs[idx] = irq;
masks[idx] = dmc520_irq_configs[idx].mask;
if (irq >= 0) {
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index a918ca9..df5897c 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -101,9 +101,14 @@
dmi_memdev_name(handle, &bank, &device);
- /* both strings must be non-zero */
- if (bank && *bank && device && *device)
- snprintf(dimm->label, sizeof(dimm->label), "%s %s", bank, device);
+ /*
+ * Set to a NULL string when both bank and device are zero. In this case,
+ * the label assigned by default will be preserved.
+ */
+ snprintf(dimm->label, sizeof(dimm->label), "%s%s%s",
+ (bank && *bank) ? bank : "",
+ (bank && *bank && device && *device) ? " " : "",
+ (device && *device) ? device : "");
}
static void assign_dmi_dimm_info(struct dimm_info *dimm, struct memdev_dmi_entry *entry)
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index 92906b5..fea44dc 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -163,6 +163,11 @@
#define ECC_STAT_CECNT_SHIFT 8
#define ECC_STAT_BITNUM_MASK 0x7F
+/* ECC error count register definitions */
+#define ECC_ERRCNT_UECNT_MASK 0xFFFF0000
+#define ECC_ERRCNT_UECNT_SHIFT 16
+#define ECC_ERRCNT_CECNT_MASK 0xFFFF
+
/* DDR QOS Interrupt register definitions */
#define DDR_QOS_IRQ_STAT_OFST 0x20200
#define DDR_QOSUE_MASK 0x4
@@ -418,15 +423,16 @@
base = priv->baseaddr;
p = &priv->stat;
+ regval = readl(base + ECC_ERRCNT_OFST);
+ p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
+ p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
+ if (!p->ce_cnt)
+ goto ue_err;
+
regval = readl(base + ECC_STAT_OFST);
if (!regval)
return 1;
- p->ce_cnt = (regval & ECC_STAT_CECNT_MASK) >> ECC_STAT_CECNT_SHIFT;
- p->ue_cnt = (regval & ECC_STAT_UECNT_MASK) >> ECC_STAT_UECNT_SHIFT;
- if (!p->ce_cnt)
- goto ue_err;
-
p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
regval = readl(base + ECC_CEADDR0_OFST);
diff --git a/drivers/extcon/extcon-ptn5150.c b/drivers/extcon/extcon-ptn5150.c
index 5b9a3cf..2a78741 100644
--- a/drivers/extcon/extcon-ptn5150.c
+++ b/drivers/extcon/extcon-ptn5150.c
@@ -194,6 +194,13 @@
return 0;
}
+static void ptn5150_work_sync_and_put(void *data)
+{
+ struct ptn5150_info *info = data;
+
+ cancel_work_sync(&info->irq_work);
+}
+
static int ptn5150_i2c_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
@@ -284,6 +291,10 @@
if (ret)
return -EINVAL;
+ ret = devm_add_action_or_reset(dev, ptn5150_work_sync_and_put, info);
+ if (ret)
+ return ret;
+
/*
* Update current extcon state if for example OTG connection was there
* before the probe
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index e7a9561..3566104 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -1230,19 +1230,14 @@
edev->dev.type = &edev->extcon_dev_type;
}
- ret = device_register(&edev->dev);
- if (ret) {
- put_device(&edev->dev);
- goto err_dev;
- }
-
spin_lock_init(&edev->lock);
- edev->nh = devm_kcalloc(&edev->dev, edev->max_supported,
- sizeof(*edev->nh), GFP_KERNEL);
- if (!edev->nh) {
- ret = -ENOMEM;
- device_unregister(&edev->dev);
- goto err_dev;
+ if (edev->max_supported) {
+ edev->nh = kcalloc(edev->max_supported, sizeof(*edev->nh),
+ GFP_KERNEL);
+ if (!edev->nh) {
+ ret = -ENOMEM;
+ goto err_alloc_nh;
+ }
}
for (index = 0; index < edev->max_supported; index++)
@@ -1253,6 +1248,12 @@
dev_set_drvdata(&edev->dev, edev);
edev->state = 0;
+ ret = device_register(&edev->dev);
+ if (ret) {
+ put_device(&edev->dev);
+ goto err_dev;
+ }
+
mutex_lock(&extcon_dev_list_lock);
list_add(&edev->entry, &extcon_dev_list);
mutex_unlock(&extcon_dev_list_lock);
@@ -1261,6 +1262,9 @@
err_dev:
if (edev->max_supported)
+ kfree(edev->nh);
+err_alloc_nh:
+ if (edev->max_supported)
kfree(edev->extcon_dev_type.groups);
err_alloc_groups:
if (edev->max_supported && edev->mutually_exclusive) {
@@ -1320,6 +1324,7 @@
if (edev->max_supported) {
kfree(edev->extcon_dev_type.groups);
kfree(edev->cables);
+ kfree(edev->nh);
}
put_device(&edev->dev);
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 54be881..f3b3953 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -668,6 +668,7 @@
void fw_core_remove_card(struct fw_card *card)
{
struct fw_card_driver dummy_driver = dummy_driver_template;
+ unsigned long flags;
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
@@ -682,7 +683,9 @@
dummy_driver.stop_iso = card->driver->stop_iso;
card->driver = &dummy_driver;
+ spin_lock_irqsave(&card->lock, flags);
fw_destroy_nodes(card);
+ spin_unlock_irqrestore(&card->lock, flags);
/* Wait for all users, especially device workqueue jobs, to finish. */
fw_card_put(card);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index fb6c651..b0cc3f1 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1480,6 +1480,7 @@
{
struct outbound_phy_packet_event *e =
container_of(packet, struct outbound_phy_packet_event, p);
+ struct client *e_client;
switch (status) {
/* expected: */
@@ -1496,9 +1497,10 @@
}
e->phy_packet.data[0] = packet->timestamp;
+ e_client = e->client;
queue_event(e->client, &e->event, &e->phy_packet,
sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
- client_put(e->client);
+ client_put(e_client);
}
static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index ec68ed2..4cdbfef 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -374,16 +374,13 @@
card->bm_retries = 0;
}
+/* Must be called with card->lock held */
void fw_destroy_nodes(struct fw_card *card)
{
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
card->color++;
if (card->local_node != NULL)
for_each_fw_node(card, card->local_node, report_lost_node);
card->local_node = NULL;
- spin_unlock_irqrestore(&card->lock, flags);
}
static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
@@ -509,6 +506,8 @@
struct fw_node *local_node;
unsigned long flags;
+ spin_lock_irqsave(&card->lock, flags);
+
/*
* If the selfID buffer is not the immediate successor of the
* previously processed one, we cannot reliably compare the
@@ -520,8 +519,6 @@
card->bm_retries = 0;
}
- spin_lock_irqsave(&card->lock, flags);
-
card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
card->node_id = node_id;
/*
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index ac487c9..6c20815 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -73,24 +73,25 @@
static int close_transaction(struct fw_transaction *transaction,
struct fw_card *card, int rcode)
{
- struct fw_transaction *t;
+ struct fw_transaction *t = NULL, *iter;
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
- list_for_each_entry(t, &card->transaction_list, link) {
- if (t == transaction) {
- if (!try_cancel_split_timeout(t)) {
+ list_for_each_entry(iter, &card->transaction_list, link) {
+ if (iter == transaction) {
+ if (!try_cancel_split_timeout(iter)) {
spin_unlock_irqrestore(&card->lock, flags);
goto timed_out;
}
- list_del_init(&t->link);
- card->tlabel_mask &= ~(1ULL << t->tlabel);
+ list_del_init(&iter->link);
+ card->tlabel_mask &= ~(1ULL << iter->tlabel);
+ t = iter;
break;
}
}
spin_unlock_irqrestore(&card->lock, flags);
- if (&t->link != &card->transaction_list) {
+ if (t) {
t->callback(card, rcode, NULL, 0, t->callback_data);
return 0;
}
@@ -935,7 +936,7 @@
void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
{
- struct fw_transaction *t;
+ struct fw_transaction *t = NULL, *iter;
unsigned long flags;
u32 *data;
size_t data_length;
@@ -947,20 +948,21 @@
rcode = HEADER_GET_RCODE(p->header[1]);
spin_lock_irqsave(&card->lock, flags);
- list_for_each_entry(t, &card->transaction_list, link) {
- if (t->node_id == source && t->tlabel == tlabel) {
- if (!try_cancel_split_timeout(t)) {
+ list_for_each_entry(iter, &card->transaction_list, link) {
+ if (iter->node_id == source && iter->tlabel == tlabel) {
+ if (!try_cancel_split_timeout(iter)) {
spin_unlock_irqrestore(&card->lock, flags);
goto timed_out;
}
- list_del_init(&t->link);
- card->tlabel_mask &= ~(1ULL << t->tlabel);
+ list_del_init(&iter->link);
+ card->tlabel_mask &= ~(1ULL << iter->tlabel);
+ t = iter;
break;
}
}
spin_unlock_irqrestore(&card->lock, flags);
- if (&t->link == &card->transaction_list) {
+ if (!t) {
timed_out:
fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
source, tlabel);
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 4d50542..2ceed92 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -408,7 +408,7 @@
void *payload, size_t length, void *callback_data)
{
struct sbp2_logical_unit *lu = callback_data;
- struct sbp2_orb *orb;
+ struct sbp2_orb *orb = NULL, *iter;
struct sbp2_status status;
unsigned long flags;
@@ -433,17 +433,18 @@
/* Lookup the orb corresponding to this status write. */
spin_lock_irqsave(&lu->tgt->lock, flags);
- list_for_each_entry(orb, &lu->orb_list, link) {
+ list_for_each_entry(iter, &lu->orb_list, link) {
if (STATUS_GET_ORB_HIGH(status) == 0 &&
- STATUS_GET_ORB_LOW(status) == orb->request_bus) {
- orb->rcode = RCODE_COMPLETE;
- list_del(&orb->link);
+ STATUS_GET_ORB_LOW(status) == iter->request_bus) {
+ iter->rcode = RCODE_COMPLETE;
+ list_del(&iter->link);
+ orb = iter;
break;
}
}
spin_unlock_irqrestore(&lu->tgt->lock, flags);
- if (&orb->link != &lu->orb_list) {
+ if (orb) {
orb->callback(orb, &status);
kref_put(&orb->kref, free_orb); /* orb callback reference */
} else {
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
index 017e5d8..e51d28b 100644
--- a/drivers/firmware/arm_scmi/base.c
+++ b/drivers/firmware/arm_scmi/base.c
@@ -188,7 +188,7 @@
break;
loop_num_ret = le32_to_cpu(*num_ret);
- if (tot_num_ret + loop_num_ret > MAX_PROTOCOLS_IMP) {
+ if (loop_num_ret > MAX_PROTOCOLS_IMP - tot_num_ret) {
dev_err(dev, "No. of Protocol > MAX_PROTOCOLS_IMP");
break;
}
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 4645677..a45678c 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -202,7 +202,8 @@
if (rate_discrete && rate) {
clk->list.num_rates = tot_rate_cnt;
- sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL);
+ sort(clk->list.rates, tot_rate_cnt, sizeof(*rate),
+ rate_cmp_func, NULL);
}
clk->rate_discrete = rate_discrete;
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 745b7f9..8d24082 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -711,8 +711,12 @@
{
int ret = scmi_chan_setup(info, dev, prot_id, true);
- if (!ret) /* Rx is optional, hence no error check */
- scmi_chan_setup(info, dev, prot_id, false);
+ if (!ret) {
+ /* Rx is optional, report only memory errors */
+ ret = scmi_chan_setup(info, dev, prot_id, false);
+ if (ret && ret != -ENOMEM)
+ ret = 0;
+ }
return ret;
}
@@ -942,6 +946,7 @@
static struct platform_driver scmi_driver = {
.driver = {
.name = "arm-scmi",
+ .suppress_bind_attrs = true,
.of_match_table = scmi_of_match,
.dev_groups = versions_groups,
},
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
index a4e4aa9..af74e52 100644
--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -106,9 +106,28 @@
scmi_pd_data->domains = domains;
scmi_pd_data->num_domains = num_domains;
+ dev_set_drvdata(dev, scmi_pd_data);
+
return of_genpd_add_provider_onecell(np, scmi_pd_data);
}
+static void scmi_pm_domain_remove(struct scmi_device *sdev)
+{
+ int i;
+ struct genpd_onecell_data *scmi_pd_data;
+ struct device *dev = &sdev->dev;
+ struct device_node *np = dev->of_node;
+
+ of_genpd_del_provider(np);
+
+ scmi_pd_data = dev_get_drvdata(dev);
+ for (i = 0; i < scmi_pd_data->num_domains; i++) {
+ if (!scmi_pd_data->domains[i])
+ continue;
+ pm_genpd_remove(scmi_pd_data->domains[i]);
+ }
+}
+
static const struct scmi_device_id scmi_id_table[] = {
{ SCMI_PROTOCOL_POWER, "genpd" },
{ },
@@ -118,6 +137,7 @@
static struct scmi_driver scmi_power_domain_driver = {
.name = "scmi-power-domain",
.probe = scmi_pm_domain_probe,
+ .remove = scmi_pm_domain_remove,
.id_table = scmi_id_table,
};
module_scmi_driver(scmi_power_domain_driver);
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index 4ceba5e..36391cb 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -815,7 +815,7 @@
info->firmware_version = le32_to_cpu(caps.platform_version);
}
/* Ignore error if not implemented */
- if (scpi_info->is_legacy && ret == -EOPNOTSUPP)
+ if (info->is_legacy && ret == -EOPNOTSUPP)
return 0;
return ret;
@@ -905,13 +905,14 @@
struct resource res;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
+ struct scpi_drvinfo *scpi_drvinfo;
- scpi_info = devm_kzalloc(dev, sizeof(*scpi_info), GFP_KERNEL);
- if (!scpi_info)
+ scpi_drvinfo = devm_kzalloc(dev, sizeof(*scpi_drvinfo), GFP_KERNEL);
+ if (!scpi_drvinfo)
return -ENOMEM;
if (of_match_device(legacy_scpi_of_match, &pdev->dev))
- scpi_info->is_legacy = true;
+ scpi_drvinfo->is_legacy = true;
count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
if (count < 0) {
@@ -919,19 +920,19 @@
return -ENODEV;
}
- scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan),
- GFP_KERNEL);
- if (!scpi_info->channels)
+ scpi_drvinfo->channels =
+ devm_kcalloc(dev, count, sizeof(struct scpi_chan), GFP_KERNEL);
+ if (!scpi_drvinfo->channels)
return -ENOMEM;
- ret = devm_add_action(dev, scpi_free_channels, scpi_info);
+ ret = devm_add_action(dev, scpi_free_channels, scpi_drvinfo);
if (ret)
return ret;
- for (; scpi_info->num_chans < count; scpi_info->num_chans++) {
+ for (; scpi_drvinfo->num_chans < count; scpi_drvinfo->num_chans++) {
resource_size_t size;
- int idx = scpi_info->num_chans;
- struct scpi_chan *pchan = scpi_info->channels + idx;
+ int idx = scpi_drvinfo->num_chans;
+ struct scpi_chan *pchan = scpi_drvinfo->channels + idx;
struct mbox_client *cl = &pchan->cl;
struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
@@ -975,45 +976,53 @@
return ret;
}
- scpi_info->commands = scpi_std_commands;
+ scpi_drvinfo->commands = scpi_std_commands;
- platform_set_drvdata(pdev, scpi_info);
+ platform_set_drvdata(pdev, scpi_drvinfo);
- if (scpi_info->is_legacy) {
+ if (scpi_drvinfo->is_legacy) {
/* Replace with legacy variants */
scpi_ops.clk_set_val = legacy_scpi_clk_set_val;
- scpi_info->commands = scpi_legacy_commands;
+ scpi_drvinfo->commands = scpi_legacy_commands;
/* Fill priority bitmap */
for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++)
set_bit(legacy_hpriority_cmds[idx],
- scpi_info->cmd_priority);
+ scpi_drvinfo->cmd_priority);
}
- ret = scpi_init_versions(scpi_info);
+ scpi_info = scpi_drvinfo;
+
+ ret = scpi_init_versions(scpi_drvinfo);
if (ret) {
dev_err(dev, "incorrect or no SCP firmware found\n");
+ scpi_info = NULL;
return ret;
}
- if (scpi_info->is_legacy && !scpi_info->protocol_version &&
- !scpi_info->firmware_version)
+ if (scpi_drvinfo->is_legacy && !scpi_drvinfo->protocol_version &&
+ !scpi_drvinfo->firmware_version)
dev_info(dev, "SCP Protocol legacy pre-1.0 firmware\n");
else
dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n",
FIELD_GET(PROTO_REV_MAJOR_MASK,
- scpi_info->protocol_version),
+ scpi_drvinfo->protocol_version),
FIELD_GET(PROTO_REV_MINOR_MASK,
- scpi_info->protocol_version),
+ scpi_drvinfo->protocol_version),
FIELD_GET(FW_REV_MAJOR_MASK,
- scpi_info->firmware_version),
+ scpi_drvinfo->firmware_version),
FIELD_GET(FW_REV_MINOR_MASK,
- scpi_info->firmware_version),
+ scpi_drvinfo->firmware_version),
FIELD_GET(FW_REV_PATCH_MASK,
- scpi_info->firmware_version));
- scpi_info->scpi_ops = &scpi_ops;
+ scpi_drvinfo->firmware_version));
- return devm_of_platform_populate(dev);
+ scpi_drvinfo->scpi_ops = &scpi_ops;
+
+ ret = devm_of_platform_populate(dev);
+ if (ret)
+ scpi_info = NULL;
+
+ return ret;
}
static const struct of_device_id scpi_of_match[] = {
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
index 8b8127f..4a93fb4 100644
--- a/drivers/firmware/dmi-sysfs.c
+++ b/drivers/firmware/dmi-sysfs.c
@@ -603,7 +603,7 @@
"%d-%d", dh->type, entry->instance);
if (*ret) {
- kfree(entry);
+ kobject_put(&entry->kobj);
return;
}
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index 4dde8ed..3e8d4b5 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -243,29 +243,6 @@
}
/**
- * efi_capsule_flush - called by file close or file flush
- * @file: file pointer
- * @id: not used
- *
- * If a capsule is being partially uploaded then calling this function
- * will be treated as upload termination and will free those completed
- * buffer pages and -ECANCELED will be returned.
- **/
-static int efi_capsule_flush(struct file *file, fl_owner_t id)
-{
- int ret = 0;
- struct capsule_info *cap_info = file->private_data;
-
- if (cap_info->index > 0) {
- pr_err("capsule upload not complete\n");
- efi_free_all_buff_pages(cap_info);
- ret = -ECANCELED;
- }
-
- return ret;
-}
-
-/**
* efi_capsule_release - called by file close
* @inode: not used
* @file: file pointer
@@ -277,6 +254,13 @@
{
struct capsule_info *cap_info = file->private_data;
+ if (cap_info->index > 0 &&
+ (cap_info->header.headersize == 0 ||
+ cap_info->count < cap_info->total_size)) {
+ pr_err("capsule upload not complete\n");
+ efi_free_all_buff_pages(cap_info);
+ }
+
kfree(cap_info->pages);
kfree(cap_info->phys);
kfree(file->private_data);
@@ -324,7 +308,6 @@
.owner = THIS_MODULE,
.open = efi_capsule_open,
.write = efi_capsule_write,
- .flush = efi_capsule_flush,
.release = efi_capsule_release,
.llseek = no_llseek,
};
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 0ef086e..7e771c5 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -266,7 +266,7 @@
efi_name[i] = name[i];
ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES,
- preemptible(), record->size, record->psi->buf);
+ false, record->size, record->psi->buf);
if (record->reason == KMSG_DUMP_OOPS && try_module_get(THIS_MODULE))
if (!schedule_work(&efivar_work))
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index e3df82d..70be9c8 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -590,7 +590,7 @@
seed = early_memremap(efi_rng_seed, sizeof(*seed));
if (seed != NULL) {
- size = READ_ONCE(seed->size);
+ size = min(seed->size, EFI_RANDOM_SEED_SIZE);
early_memunmap(seed, sizeof(*seed));
} else {
pr_err("Could not map UEFI random seed!\n");
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index a2ae9c3..433e11d 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -37,6 +37,13 @@
$(call cc-option,-fno-addrsig) \
-D__DISABLE_EXPORTS
+#
+# struct randomization only makes sense for Linux internal types, which the EFI
+# stub code never touches, so let's turn off struct randomization for the stub
+# altogether
+#
+KBUILD_CFLAGS := $(filter-out $(RANDSTRUCT_CFLAGS), $(KBUILD_CFLAGS))
+
# remove SCS flags from all objects in this directory
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 415a971..7f4bafc 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -24,7 +24,7 @@
return EFI_SUCCESS;
tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf;
- if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) {
+ if (tg < ID_AA64MMFR0_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_TGRAN_SUPPORTED_MAX) {
if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
efi_err("This 64 KB granular kernel is not supported by your CPU\n");
else
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 368cd60..d48b0de 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -281,14 +281,6 @@
goto fail;
}
- /*
- * Now that we have done our final memory allocation (and free)
- * we can get the memory map key needed for exit_boot_services().
- */
- status = efi_get_memory_map(&map);
- if (status != EFI_SUCCESS)
- goto fail_free_new_fdt;
-
status = update_fdt((void *)fdt_addr, fdt_size,
(void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr,
initrd_addr, initrd_size);
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
index 24aa375..33ab567 100644
--- a/drivers/firmware/efi/libstub/random.c
+++ b/drivers/firmware/efi/libstub/random.c
@@ -75,7 +75,12 @@
if (status != EFI_SUCCESS)
return status;
- status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
+ /*
+ * Use EFI_ACPI_RECLAIM_MEMORY here so that it is guaranteed that the
+ * allocation will survive a kexec reboot (although we refresh the seed
+ * beforehand)
+ */
+ status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY,
sizeof(*seed) + EFI_RANDOM_SEED_SIZE,
(void **)&seed);
if (status != EFI_SUCCESS)
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
index 5efc524..a2be3a7 100644
--- a/drivers/firmware/efi/libstub/secureboot.c
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -19,7 +19,7 @@
/* SHIM variables */
static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
-static const efi_char16_t shim_MokSBState_name[] = L"MokSBState";
+static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT";
/*
* Determine whether we're in secure boot mode.
@@ -53,8 +53,8 @@
/*
* See if a user has put the shim into insecure mode. If so, and if the
- * variable doesn't have the runtime attribute set, we might as well
- * honor that.
+ * variable doesn't have the non-volatile attribute set, we might as
+ * well honor that.
*/
size = sizeof(moksbstate);
status = get_efi_var(shim_MokSBState_name, &shim_guid,
@@ -63,7 +63,7 @@
/* If it fails, we don't care why. Default to secure */
if (status != EFI_SUCCESS)
goto secure_boot_enabled;
- if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1)
+ if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1)
return efi_secureboot_mode_disabled;
secure_boot_enabled:
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 3672539..5d0f1b1 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -414,6 +414,13 @@
hdr->ramdisk_image = 0;
hdr->ramdisk_size = 0;
+ /*
+ * Disregard any setup data that was provided by the bootloader:
+ * setup_data could be pointing anywhere, and we have no way of
+ * authenticating or validating the payload.
+ */
+ hdr->setup_data = 0;
+
efi_stub_entry(handle, sys_table_arg, boot_params);
/* not reached */
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
index 8f66567..e8d69bd 100644
--- a/drivers/firmware/efi/tpm.c
+++ b/drivers/firmware/efi/tpm.c
@@ -97,7 +97,7 @@
goto out_calc;
}
- memblock_reserve((unsigned long)final_tbl,
+ memblock_reserve(efi.tpm_final_log,
tbl_size + sizeof(*final_tbl));
efi_tpm_final_log_size = tbl_size;
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
index 931544c..983e07d 100644
--- a/drivers/firmware/google/Kconfig
+++ b/drivers/firmware/google/Kconfig
@@ -21,7 +21,7 @@
config GOOGLE_COREBOOT_TABLE
tristate "Coreboot Table Access"
- depends on ACPI || OF
+ depends on HAS_IOMEM && (ACPI || OF)
help
This option enables the coreboot_table module, which provides other
firmware modules access to the coreboot table. The coreboot table
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index 0205987..5680741 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -152,12 +152,8 @@
if (!ptr)
return -ENOMEM;
- ret = bus_register(&coreboot_bus_type);
- if (!ret) {
- ret = coreboot_table_populate(dev, ptr);
- if (ret)
- bus_unregister(&coreboot_bus_type);
- }
+ ret = coreboot_table_populate(dev, ptr);
+
memunmap(ptr);
return ret;
@@ -172,7 +168,6 @@
static int coreboot_table_remove(struct platform_device *pdev)
{
bus_for_each_dev(&coreboot_bus_type, NULL, NULL, __cb_dev_unregister);
- bus_unregister(&coreboot_bus_type);
return 0;
}
@@ -202,6 +197,32 @@
.of_match_table = of_match_ptr(coreboot_of_match),
},
};
-module_platform_driver(coreboot_table_driver);
+
+static int __init coreboot_table_driver_init(void)
+{
+ int ret;
+
+ ret = bus_register(&coreboot_bus_type);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&coreboot_table_driver);
+ if (ret) {
+ bus_unregister(&coreboot_bus_type);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit coreboot_table_driver_exit(void)
+{
+ platform_driver_unregister(&coreboot_table_driver);
+ bus_unregister(&coreboot_bus_type);
+}
+
+module_init(coreboot_table_driver_init);
+module_exit(coreboot_table_driver_exit);
+
MODULE_AUTHOR("Google, Inc.");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index 7d9367b..c1cd5ca 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -680,6 +680,15 @@
static int gsmi_panic_callback(struct notifier_block *nb,
unsigned long reason, void *arg)
{
+
+ /*
+ * Panic callbacks are executed with all other CPUs stopped,
+ * so we must not attempt to spin waiting for gsmi_dev.lock
+ * to be released.
+ */
+ if (spin_is_locked(&gsmi_dev.lock))
+ return NOTIFY_DONE;
+
gsmi_shutdown_reason(GSMI_SHUTDOWN_PANIC);
return NOTIFY_DONE;
}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index e10a998..d417199 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -749,12 +749,6 @@
};
int ret;
- desc.args[0] = addr;
- desc.args[1] = size;
- desc.args[2] = spare;
- desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
- QCOM_SCM_VAL);
-
ret = qcom_scm_call(__scm->dev, &desc, NULL);
/* the pg table has been initialized already, ignore the error */
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 2a76879..7dd0ac1 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -477,7 +477,7 @@
case INTEL_SIP_SMC_RSU_ERROR:
pr_err("%s: STATUS_ERROR\n", __func__);
cbdata->status = BIT(SVC_STATUS_ERROR);
- cbdata->kaddr1 = NULL;
+ cbdata->kaddr1 = &res.a1;
cbdata->kaddr2 = NULL;
cbdata->kaddr3 = NULL;
pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata);
@@ -941,17 +941,17 @@
void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr)
{
struct stratix10_svc_data_mem *pmem;
- size_t size = 0;
list_for_each_entry(pmem, &svc_data_mem, node)
if (pmem->vaddr == kaddr) {
- size = pmem->size;
- break;
+ gen_pool_free(chan->ctrl->genpool,
+ (unsigned long)kaddr, pmem->size);
+ pmem->vaddr = NULL;
+ list_del(&pmem->node);
+ return;
}
- gen_pool_free(chan->ctrl->genpool, (unsigned long)kaddr, size);
- pmem->vaddr = NULL;
- list_del(&pmem->node);
+ list_del(&svc_data_mem);
}
EXPORT_SYMBOL_GPL(stratix10_svc_free_memory);
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index 440d99c..fad97ec 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -429,7 +429,7 @@
mode |= attrs & DEBUGFS_S_IWUSR ? 0200 : 0;
dentry = debugfs_create_file(name, mode, parent, bpmp,
&bpmp_debug_fops);
- if (!dentry) {
+ if (IS_ERR(dentry)) {
err = -ENOMEM;
goto out;
}
@@ -680,7 +680,7 @@
if (t & DEBUGFS_S_ISDIR) {
dentry = debugfs_create_dir(name, parent);
- if (!dentry)
+ if (IS_ERR(dentry))
return -ENOMEM;
err = bpmp_populate_dir(bpmp, seqbuf, dentry, depth+1);
if (err < 0)
@@ -693,7 +693,7 @@
dentry = debugfs_create_file(name, mode,
parent, bpmp,
&debugfs_fops);
- if (!dentry)
+ if (IS_ERR(dentry))
return -ENOMEM;
}
}
@@ -743,11 +743,11 @@
return 0;
root = debugfs_create_dir("bpmp", NULL);
- if (!root)
+ if (IS_ERR(root))
return -ENOMEM;
bpmp->debugfs_mirror = debugfs_create_dir("debug", root);
- if (!bpmp->debugfs_mirror) {
+ if (IS_ERR(bpmp->debugfs_mirror)) {
err = -ENOMEM;
goto out;
}
diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c
index 2cf25fd..75b4b3e 100644
--- a/drivers/fpga/altera-pr-ip-core.c
+++ b/drivers/fpga/altera-pr-ip-core.c
@@ -108,7 +108,7 @@
u32 *buffer_32 = (u32 *)buf;
size_t i = 0;
- if (count <= 0)
+ if (!count)
return -EINVAL;
/* Write out the complete 32-bit chunks */
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index b450870..eb8a6e3 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -1857,7 +1857,7 @@
return -EINVAL;
fds = memdup_user((void __user *)(arg + sizeof(hdr)),
- hdr.count * sizeof(s32));
+ array_size(hdr.count, sizeof(s32)));
if (IS_ERR(fds))
return PTR_ERR(fds);
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 59ddc9f..92e6eeb 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -1309,6 +1309,9 @@
mutex_init(&master->scan_lock);
master->idx = ida_simple_get(&master_ida, 0, INT_MAX, GFP_KERNEL);
+ if (master->idx < 0)
+ return master->idx;
+
dev_set_name(&master->dev, "fsi%d", master->idx);
master->dev.class = &fsi_master_class;
diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c
index dbad731..87edc77 100644
--- a/drivers/fsi/fsi-master-aspeed.c
+++ b/drivers/fsi/fsi-master-aspeed.c
@@ -525,7 +525,6 @@
static int fsi_master_aspeed_probe(struct platform_device *pdev)
{
struct fsi_master_aspeed *aspeed;
- struct resource *res;
int rc, links, reg;
__be32 raw;
@@ -535,26 +534,28 @@
return rc;
}
- aspeed = devm_kzalloc(&pdev->dev, sizeof(*aspeed), GFP_KERNEL);
+ aspeed = kzalloc(sizeof(*aspeed), GFP_KERNEL);
if (!aspeed)
return -ENOMEM;
aspeed->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- aspeed->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(aspeed->base))
- return PTR_ERR(aspeed->base);
+ aspeed->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(aspeed->base)) {
+ rc = PTR_ERR(aspeed->base);
+ goto err_free_aspeed;
+ }
aspeed->clk = devm_clk_get(aspeed->dev, NULL);
if (IS_ERR(aspeed->clk)) {
dev_err(aspeed->dev, "couldn't get clock\n");
- return PTR_ERR(aspeed->clk);
+ rc = PTR_ERR(aspeed->clk);
+ goto err_free_aspeed;
}
rc = clk_prepare_enable(aspeed->clk);
if (rc) {
dev_err(aspeed->dev, "couldn't enable clock\n");
- return rc;
+ goto err_free_aspeed;
}
rc = setup_cfam_reset(aspeed);
@@ -589,7 +590,7 @@
rc = opb_readl(aspeed, ctrl_base + FSI_MVER, &raw);
if (rc) {
dev_err(&pdev->dev, "failed to read hub version\n");
- return rc;
+ goto err_release;
}
reg = be32_to_cpu(raw);
@@ -628,6 +629,8 @@
err_release:
clk_disable_unprepare(aspeed->clk);
+err_free_aspeed:
+ kfree(aspeed);
return rc;
}
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 4275c18..ea2e261 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -646,10 +646,9 @@
gpio->clks[1].id = "db";
err = devm_clk_bulk_get_optional(gpio->dev, DWAPB_NR_CLOCKS,
gpio->clks);
- if (err) {
- dev_err(gpio->dev, "Cannot get APB/Debounce clocks\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(gpio->dev, err,
+ "Cannot get APB/Debounce clocks\n");
err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks);
if (err) {
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 67ed4f2..876027f 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -375,6 +375,13 @@
}
}
+static void gpio_mockup_debugfs_cleanup(void *data)
+{
+ struct gpio_mockup_chip *chip = data;
+
+ debugfs_remove_recursive(chip->dbg_dir);
+}
+
static void gpio_mockup_dispose_mappings(void *data)
{
struct gpio_mockup_chip *chip = data;
@@ -457,7 +464,7 @@
gpio_mockup_debugfs_setup(dev, chip);
- return 0;
+ return devm_add_action_or_reset(dev, gpio_mockup_debugfs_cleanup, chip);
}
static struct platform_driver gpio_mockup_driver = {
@@ -597,9 +604,9 @@
static void __exit gpio_mockup_exit(void)
{
+ gpio_mockup_unregister_pdevs();
debugfs_remove_recursive(gpio_mockup_dbg_dir);
platform_driver_unregister(&gpio_mockup_driver);
- gpio_mockup_unregister_pdevs();
}
module_init(gpio_mockup_init);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index d60d552..60c2533 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -169,6 +169,7 @@
switch (flow_type) {
case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_LEVEL_LOW:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index ed7c5fc..2ab34a8 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -700,6 +700,9 @@
unsigned long flags;
unsigned int on, off;
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
val = (unsigned long long) mvpwm->clk_rate * state->duty_cycle;
do_div(val, NSEC_PER_SEC);
if (val > UINT_MAX)
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index a78167b..3ad1a9e 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -350,6 +350,9 @@
.reg_bits = 8,
.val_bits = 8,
+ .use_single_read = true,
+ .use_single_write = true,
+
.readable_reg = pca953x_readable_register,
.writeable_reg = pca953x_writeable_register,
.volatile_reg = pca953x_volatile_register,
@@ -761,11 +764,11 @@
bitmap_xor(cur_stat, new_stat, old_stat, gc->ngpio);
bitmap_and(trigger, cur_stat, chip->irq_mask, gc->ngpio);
+ bitmap_copy(chip->irq_stat, new_stat, gc->ngpio);
+
if (bitmap_empty(trigger, gc->ngpio))
return false;
- bitmap_copy(chip->irq_stat, new_stat, gc->ngpio);
-
bitmap_and(cur_stat, chip->irq_trig_fall, old_stat, gc->ngpio);
bitmap_and(old_stat, chip->irq_trig_raise, new_stat, gc->ngpio);
bitmap_or(new_stat, old_stat, cur_stat, gc->ngpio);
@@ -893,15 +896,18 @@
static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert)
{
DECLARE_BITMAP(val, MAX_LINE);
+ u8 regaddr;
int ret;
- ret = regcache_sync_region(chip->regmap, chip->regs->output,
- chip->regs->output + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr,
+ regaddr + NBANK(chip) - 1);
if (ret)
goto out;
- ret = regcache_sync_region(chip->regmap, chip->regs->direction,
- chip->regs->direction + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr,
+ regaddr + NBANK(chip) - 1);
if (ret)
goto out;
@@ -1107,20 +1113,21 @@
{
struct pca953x_chip *chip = dev_get_drvdata(dev);
int ret;
+ u8 regaddr;
/*
* The ordering between direction and output is important,
* sync these registers first and only then sync the rest.
*/
- ret = regcache_sync_region(chip->regmap, chip->regs->direction,
- chip->regs->direction + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret);
return ret;
}
- ret = regcache_sync_region(chip->regmap, chip->regs->output,
- chip->regs->output + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret);
return ret;
@@ -1128,16 +1135,18 @@
#ifdef CONFIG_GPIO_PCA953X_IRQ
if (chip->driver_data & PCA_PCAL) {
- ret = regcache_sync_region(chip->regmap, PCAL953X_IN_LATCH,
- PCAL953X_IN_LATCH + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr,
+ regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync INT latch registers: %d\n",
ret);
return ret;
}
- ret = regcache_sync_region(chip->regmap, PCAL953X_INT_MASK,
- PCAL953X_INT_MASK + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr,
+ regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync INT mask registers: %d\n",
ret);
@@ -1153,7 +1162,9 @@
{
struct pca953x_chip *chip = dev_get_drvdata(dev);
+ mutex_lock(&chip->i2c_lock);
regcache_cache_only(chip->regmap, true);
+ mutex_unlock(&chip->i2c_lock);
if (atomic_read(&chip->wakeup_path))
device_set_wakeup_path(dev);
@@ -1176,13 +1187,17 @@
}
}
+ mutex_lock(&chip->i2c_lock);
regcache_cache_only(chip->regmap, false);
regcache_mark_dirty(chip->regmap);
ret = pca953x_regcache_sync(dev);
- if (ret)
+ if (ret) {
+ mutex_unlock(&chip->i2c_lock);
return ret;
+ }
ret = regcache_sync(chip->regmap);
+ mutex_unlock(&chip->i2c_lock);
if (ret) {
dev_err(dev, "Failed to restore register map: %d\n", ret);
return ret;
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 58776f2..1ae612c 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -125,9 +125,13 @@
{
struct vf610_gpio_port *port = gpiochip_get_data(chip);
unsigned long mask = BIT(gpio);
+ u32 val;
- if (port->sdata && port->sdata->have_paddr)
- vf610_gpio_writel(mask, port->gpio_base + GPIO_PDDR);
+ if (port->sdata && port->sdata->have_paddr) {
+ val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
+ val |= mask;
+ vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
+ }
vf610_gpio_set(chip, gpio, value);
diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
index 98cd715..8d09b61 100644
--- a/drivers/gpio/gpio-vr41xx.c
+++ b/drivers/gpio/gpio-vr41xx.c
@@ -217,8 +217,6 @@
printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
maskl, pendl, maskh, pendh);
- atomic_inc(&irq_err_count);
-
return -EINVAL;
}
diff --git a/drivers/gpio/gpio-winbond.c b/drivers/gpio/gpio-winbond.c
index 7f8f5b0..4b61d97 100644
--- a/drivers/gpio/gpio-winbond.c
+++ b/drivers/gpio/gpio-winbond.c
@@ -385,12 +385,13 @@
unsigned long *base = gpiochip_get_data(gc);
const struct winbond_gpio_info *info;
bool val;
+ int ret;
winbond_gpio_get_info(&offset, &info);
- val = winbond_sio_enter(*base);
- if (val)
- return val;
+ ret = winbond_sio_enter(*base);
+ if (ret)
+ return ret;
winbond_sio_select_logical(*base, info->dev);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 55e4f40..44ee319 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -276,8 +276,8 @@
pin = agpio->pin_table[0];
if (pin <= 255) {
- char ev_name[5];
- sprintf(ev_name, "_%c%02hhX",
+ char ev_name[8];
+ sprintf(ev_name, "_%c%02X",
agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
pin);
if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 2613881..381cfa2 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -1769,7 +1769,6 @@
ret = -ENODEV;
goto out_free_le;
}
- le->irq = irq;
if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
@@ -1783,7 +1782,7 @@
init_waitqueue_head(&le->wait);
/* Request a thread to read the events */
- ret = request_threaded_irq(le->irq,
+ ret = request_threaded_irq(irq,
lineevent_irq_handler,
lineevent_irq_thread,
irqflags,
@@ -1792,6 +1791,8 @@
if (ret)
goto out_free_le;
+ le->irq = irq;
+
fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
if (fd < 0) {
ret = fd;
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 2f895a2..2e63274 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -863,7 +863,8 @@
if (mm_gc->save_regs)
mm_gc->save_regs(mm_gc);
- mm_gc->gc.of_node = np;
+ of_node_put(mm_gc->gc.of_node);
+ mm_gc->gc.of_node = of_node_get(np);
ret = gpiochip_add_data(gc, data);
if (ret)
@@ -871,6 +872,7 @@
return 0;
err2:
+ of_node_put(np);
iounmap(mm_gc->regs);
err1:
kfree(gc->label);
@@ -912,7 +914,7 @@
i, &start);
of_property_read_u32_index(np, "gpio-reserved-ranges",
i + 1, &count);
- if (start >= chip->ngpio || start + count >= chip->ngpio)
+ if (start >= chip->ngpio || start + count > chip->ngpio)
continue;
bitmap_clear(chip->valid_mask, start, count);
@@ -933,6 +935,11 @@
if (!np)
return 0;
+ if (!of_property_read_bool(np, "gpio-ranges") &&
+ chip->of_gpio_ranges_fallback) {
+ return chip->of_gpio_ranges_fallback(chip, np);
+ }
+
group_names = of_find_property(np, group_names_propname, NULL);
for (;; index++) {
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 00526fd..59d8aff 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1411,6 +1411,16 @@
{
struct irq_domain *domain = gc->irq.domain;
+#ifdef CONFIG_GPIOLIB_IRQCHIP
+ /*
+ * Avoid race condition with other code, which tries to lookup
+ * an IRQ before the irqchip has been properly registered,
+ * i.e. while gpiochip is still being brought up.
+ */
+ if (!gc->irq.initialized)
+ return -EPROBE_DEFER;
+#endif
+
if (!gpiochip_irqchip_irq_valid(gc, offset))
return -ENXIO;
@@ -1602,6 +1612,15 @@
gpiochip_set_irq_hooks(gc);
+ /*
+ * Using barrier() here to prevent compiler from reordering
+ * gc->irq.initialized before initialization of above
+ * GPIO chip irq members.
+ */
+ barrier();
+
+ gc->irq.initialized = true;
+
acpi_gpiochip_request_interrupts(gc);
return 0;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index ca86827..4e9b3a9 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -30,6 +30,7 @@
config DRM_MIPI_DBI
tristate
depends on DRM
+ select DRM_KMS_HELPER
config DRM_MIPI_DSI
bool
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
index 5b39362..a0f0a17 100644
--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
@@ -119,6 +119,7 @@
#define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15
#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
+#define CONNECTOR_OBJECT_ID_USBC 0x17
/* deleted */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d949d6c..ff55553 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -283,7 +283,7 @@
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST
};
-
+#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index fb6230c..d3a974d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -689,7 +689,8 @@
const uint32_t flush_type = 0;
bool all_hub = false;
- if (adev->family == AMDGPU_FAMILY_AI)
+ if (adev->family == AMDGPU_FAMILY_AI ||
+ adev->family == AMDGPU_FAMILY_RV)
all_hub = true;
return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 26f8a21..1b4c7ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1024,11 +1024,15 @@
struct dma_fence **ef)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- struct drm_file *drm_priv = filp->private_data;
- struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
- struct amdgpu_vm *avm = &drv_priv->vm;
+ struct amdgpu_fpriv *drv_priv;
+ struct amdgpu_vm *avm;
int ret;
+ ret = amdgpu_file_to_fpriv(filp, &drv_priv);
+ if (ret)
+ return ret;
+ avm = &drv_priv->vm;
+
/* Already a compute VM? */
if (avm->process_info)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index df1f9b8..98d3661 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1671,10 +1671,12 @@
adev->mode_info.dither_property,
AMDGPU_FMT_DITHER_DISABLE);
- if (amdgpu_audio != 0)
+ if (amdgpu_audio != 0) {
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
+ }
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
@@ -1796,6 +1798,7 @@
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
@@ -1849,6 +1852,7 @@
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
@@ -1899,6 +1903,7 @@
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 12598a4..ffd8f56 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -116,7 +116,7 @@
int ret;
if (cs->in.num_chunks == 0)
- return 0;
+ return -EINVAL;
chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (!chunk_array)
@@ -1484,6 +1484,7 @@
return 0;
default:
+ dma_fence_put(fence);
return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index f262c4e..bde0496 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2047,6 +2047,11 @@
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
return r;
}
+
+ /*get pf2vf msg info at it's earliest time*/
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_init_data_exchange(adev);
+
}
}
@@ -2176,6 +2181,10 @@
/* need to do gmc hw init early so we can allocate gpu mem */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+ /* Try to reserve bad pages early */
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_exchange_data(adev);
+
r = amdgpu_device_vram_scratch_init(adev);
if (r) {
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index ed13a2f..30659c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -632,7 +632,7 @@
* Maximum number of processes that HWS can schedule concurrently. The maximum is the
* number of VMIDs assigned to the HWS, which is also the default.
*/
-int hws_max_conc_proc = 8;
+int hws_max_conc_proc = -1;
module_param(hws_max_conc_proc, int, 0444);
MODULE_PARM_DESC(hws_max_conc_proc,
"Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index e8c76bd..6aa9fd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -341,11 +341,9 @@
if (r)
goto release_object;
- if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
- r = amdgpu_mn_register(bo, args->addr);
- if (r)
- goto release_object;
- }
+ r = amdgpu_mn_register(bo, args->addr);
+ if (r)
+ goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 9f9f55a..f84582b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -263,7 +263,7 @@
* adev->gfx.mec.num_pipe_per_mec
* adev->gfx.mec.num_queue_per_pipe;
- while (queue_bit-- >= 0) {
+ while (--queue_bit >= 0) {
if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index ad9863b..6937f81 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -905,6 +905,10 @@
if (WARN_ON_ONCE(min_offset > max_offset))
return -EINVAL;
+ /* Check domain to be pinned to against preferred domains */
+ if (bo->preferred_domains & domain)
+ domain = bo->preferred_domains & domain;
+
/* A shared bo cannot be migrated to VRAM */
if (bo->prime_shared_count) {
if (domain & AMDGPU_GEM_DOMAIN_GTT)
@@ -1338,7 +1342,8 @@
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
return;
- dma_resv_lock(bo->base.resv, NULL);
+ if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
+ return;
r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
if (!WARN_ON(r)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 2f47f81..8a2abcf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -1921,7 +1921,7 @@
static bool fw_load_skip_check(struct psp_context *psp,
struct amdgpu_firmware_info *ucode)
{
- if (!ucode->fw)
+ if (!ucode->fw || !ucode->ucode_size)
return true;
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
@@ -2146,6 +2146,9 @@
psp_rap_terminate(psp);
psp_dtm_terminate(psp);
psp_hdcp_terminate(psp);
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ psp_xgmi_terminate(psp);
}
psp_asd_unload(psp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index eb22a19..3638f0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1979,15 +1979,12 @@
return 0;
}
-static int amdgpu_ras_check_asic_type(struct amdgpu_device *adev)
+static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
{
- if (adev->asic_type != CHIP_VEGA10 &&
- adev->asic_type != CHIP_VEGA20 &&
- adev->asic_type != CHIP_ARCTURUS &&
- adev->asic_type != CHIP_SIENNA_CICHLID)
- return 1;
- else
- return 0;
+ return adev->asic_type == CHIP_VEGA10 ||
+ adev->asic_type == CHIP_VEGA20 ||
+ adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_SIENNA_CICHLID;
}
/*
@@ -2006,7 +2003,7 @@
*supported = 0;
if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
- amdgpu_ras_check_asic_type(adev))
+ !amdgpu_ras_asic_supported(adev))
return;
if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index b313ce4..30005ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -625,8 +625,7 @@
void amdgpu_ucode_free_bo(struct amdgpu_device *adev)
{
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT)
- amdgpu_bo_free_kernel(&adev->firmware.fw_buf,
+ amdgpu_bo_free_kernel(&adev->firmware.fw_buf,
&adev->firmware.fw_buf_mc,
&adev->firmware.fw_buf_ptr);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index e7678ba..d6f2951 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -580,16 +580,34 @@
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
{
- uint64_t bp_block_offset = 0;
- uint32_t bp_block_size = 0;
- struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
-
adev->virt.fw_reserve.p_pf2vf = NULL;
adev->virt.fw_reserve.p_vf2pf = NULL;
adev->virt.vf2pf_update_interval_ms = 0;
if (adev->mman.fw_vram_usage_va != NULL) {
- adev->virt.vf2pf_update_interval_ms = 2000;
+ /* go through this logic in ip_init and reset to init workqueue*/
+ amdgpu_virt_exchange_data(adev);
+
+ INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
+ schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
+ } else if (adev->bios != NULL) {
+ /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+
+ amdgpu_virt_read_pf2vf_data(adev);
+ }
+}
+
+
+void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
+{
+ uint64_t bp_block_offset = 0;
+ uint32_t bp_block_size = 0;
+ struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
+
+ if (adev->mman.fw_vram_usage_va != NULL) {
adev->virt.fw_reserve.p_pf2vf =
(struct amd_sriov_msg_pf2vf_info_header *)
@@ -616,13 +634,9 @@
amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
}
}
-
- if (adev->virt.vf2pf_update_interval_ms != 0) {
- INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
- schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
- }
}
+
void amdgpu_detect_virtualization(struct amdgpu_device *adev)
{
uint32_t reg;
@@ -656,6 +670,12 @@
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
+ if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
+ /* VF MMIO access (except mailbox range) from CPU
+ * will be blocked during sriov runtime
+ */
+ adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
+
/* we have the ability to check now */
if (amdgpu_sriov_vf(adev)) {
switch (adev->asic_type) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 8dd624c..aea49ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -31,6 +31,7 @@
#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */
#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
#define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */
+#define AMDGPU_VF_MMIO_ACCESS_PROTECT (1 << 5) /* MMIO write access is not allowed in sriov runtime */
/* all asic after AI use this offset */
#define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5
@@ -241,6 +242,9 @@
#define amdgpu_passthrough(adev) \
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
+#define amdgpu_sriov_vf_mmio_access_protection(adev) \
+((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT)
+
static inline bool is_virtual_machine(void)
{
#ifdef CONFIG_X86
@@ -271,6 +275,7 @@
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+void amdgpu_virt_exchange_data(struct amdgpu_device *adev);
void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
void amdgpu_detect_virtualization(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 635601d..45b1f00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -3200,7 +3200,11 @@
*/
#ifdef CONFIG_X86_64
if (amdgpu_vm_update_mode == -1) {
- if (amdgpu_gmc_vram_full_visible(&adev->gmc))
+ /* For asic with VF MMIO access protection
+ * avoid using CPU for VM table updates
+ */
+ if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+ !amdgpu_sriov_vf_mmio_access_protection(adev))
adev->vm_manager.vm_update_mode =
AMDGPU_VM_USE_CPU_FOR_COMPUTE;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 042c85f..def0b70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -622,7 +622,7 @@
amdgpu_put_xgmi_hive(hive);
}
- return psp_xgmi_terminate(&adev->psp);
+ return 0;
}
int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index b19f7bd..38f4c74 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1248,6 +1248,8 @@
{ 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
/* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
+ /* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
+ { 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
{ 0, 0, 0, 0, 0 },
};
@@ -2568,7 +2570,8 @@
gfx_v9_0_tiling_mode_table_init(adev);
- gfx_v9_0_setup_rb(adev);
+ if (adev->gfx.num_gfx_rings)
+ gfx_v9_0_setup_rb(adev);
gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 150fa52..2aa9242 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -371,6 +371,7 @@
uint32_t seq;
uint16_t queried_pasid;
bool ret;
+ u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -389,7 +390,7 @@
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+ r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
return -ETIME;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 3a86404..1673bf3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -839,6 +839,7 @@
uint32_t seq;
uint16_t queried_pasid;
bool ret;
+ u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -878,7 +879,7 @@
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+ r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
up_read(&adev->reset_sem);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index f84701c..97441f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -178,6 +178,7 @@
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
+ tmp = mmVM_L2_CNTL3_DEFAULT;
if (adev->gmc.translate_further) {
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 2099f6e..bdb8e59 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -1429,8 +1429,11 @@
static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
{
+ struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
+ vcn_v3_0_pause_dpg_mode(adev, 0, &state);
+
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 31d793e..86b4dad 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -784,7 +784,7 @@
/* Fetch the CRAT table from ACPI */
status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
if (status == AE_NOT_FOUND) {
- pr_warn("CRAT table not found\n");
+ pr_info("CRAT table not found\n");
return -ENODATA;
} else if (ACPI_FAILURE(status)) {
const char *err = acpi_format_exception(status);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 8431313..148e43d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -664,15 +664,10 @@
- kfd->vm_info.first_vmid_kfd + 1;
/* Verify module parameters regarding mapped process number*/
- if ((hws_max_conc_proc < 0)
- || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
- dev_err(kfd_device,
- "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
- hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
- kfd->vm_info.vmid_num_kfd);
+ if (hws_max_conc_proc >= 0)
+ kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd);
+ else
kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
- } else
- kfd->max_proc_per_quantum = hws_max_conc_proc;
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 2645ebc..195b7e0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -138,19 +138,33 @@
}
static void increment_queue_count(struct device_queue_manager *dqm,
- enum kfd_queue_type type)
+ struct qcm_process_device *qpd,
+ struct queue *q)
{
dqm->active_queue_count++;
- if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
+ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
+ q->properties.type == KFD_QUEUE_TYPE_DIQ)
dqm->active_cp_queue_count++;
+
+ if (q->properties.is_gws) {
+ dqm->gws_queue_count++;
+ qpd->mapped_gws_queue = true;
+ }
}
static void decrement_queue_count(struct device_queue_manager *dqm,
- enum kfd_queue_type type)
+ struct qcm_process_device *qpd,
+ struct queue *q)
{
dqm->active_queue_count--;
- if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
+ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
+ q->properties.type == KFD_QUEUE_TYPE_DIQ)
dqm->active_cp_queue_count--;
+
+ if (q->properties.is_gws) {
+ dqm->gws_queue_count--;
+ qpd->mapped_gws_queue = false;
+ }
}
static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
@@ -377,7 +391,7 @@
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
if (q->properties.is_active)
- increment_queue_count(dqm, q->properties.type);
+ increment_queue_count(dqm, qpd, q);
/*
* Unconditionally increment this counter, regardless of the queue's
@@ -502,13 +516,8 @@
deallocate_vmid(dqm, qpd, q);
}
qpd->queue_count--;
- if (q->properties.is_active) {
- decrement_queue_count(dqm, q->properties.type);
- if (q->properties.is_gws) {
- dqm->gws_queue_count--;
- qpd->mapped_gws_queue = false;
- }
- }
+ if (q->properties.is_active)
+ decrement_queue_count(dqm, qpd, q);
return retval;
}
@@ -598,12 +607,11 @@
* dqm->active_queue_count to determine whether a new runlist must be
* uploaded.
*/
- if (q->properties.is_active && !prev_active)
- increment_queue_count(dqm, q->properties.type);
- else if (!q->properties.is_active && prev_active)
- decrement_queue_count(dqm, q->properties.type);
-
- if (q->gws && !q->properties.is_gws) {
+ if (q->properties.is_active && !prev_active) {
+ increment_queue_count(dqm, &pdd->qpd, q);
+ } else if (!q->properties.is_active && prev_active) {
+ decrement_queue_count(dqm, &pdd->qpd, q);
+ } else if (q->gws && !q->properties.is_gws) {
if (q->properties.is_active) {
dqm->gws_queue_count++;
pdd->qpd.mapped_gws_queue = true;
@@ -665,11 +673,7 @@
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = false;
- decrement_queue_count(dqm, q->properties.type);
- if (q->properties.is_gws) {
- dqm->gws_queue_count--;
- qpd->mapped_gws_queue = false;
- }
+ decrement_queue_count(dqm, qpd, q);
if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
continue;
@@ -713,7 +717,7 @@
continue;
q->properties.is_active = false;
- decrement_queue_count(dqm, q->properties.type);
+ decrement_queue_count(dqm, qpd, q);
}
pdd->last_evict_timestamp = get_jiffies_64();
retval = execute_queues_cpsch(dqm,
@@ -784,11 +788,7 @@
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = true;
- increment_queue_count(dqm, q->properties.type);
- if (q->properties.is_gws) {
- dqm->gws_queue_count++;
- qpd->mapped_gws_queue = true;
- }
+ increment_queue_count(dqm, qpd, q);
if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
continue;
@@ -846,7 +846,7 @@
continue;
q->properties.is_active = true;
- increment_queue_count(dqm, q->properties.type);
+ increment_queue_count(dqm, &pdd->qpd, q);
}
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
@@ -1247,7 +1247,7 @@
dqm->total_queue_count);
list_add(&kq->list, &qpd->priv_queue_list);
- increment_queue_count(dqm, kq->queue->properties.type);
+ increment_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
dqm_unlock(dqm);
@@ -1261,7 +1261,7 @@
{
dqm_lock(dqm);
list_del(&kq->list);
- decrement_queue_count(dqm, kq->queue->properties.type);
+ decrement_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = false;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
/*
@@ -1328,7 +1328,7 @@
qpd->queue_count++;
if (q->properties.is_active) {
- increment_queue_count(dqm, q->properties.type);
+ increment_queue_count(dqm, qpd, q);
execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
@@ -1513,15 +1513,11 @@
list_del(&q->list);
qpd->queue_count--;
if (q->properties.is_active) {
- decrement_queue_count(dqm, q->properties.type);
+ decrement_queue_count(dqm, qpd, q);
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
if (retval == -ETIME)
qpd->reset_wavefronts = true;
- if (q->properties.is_gws) {
- dqm->gws_queue_count--;
- qpd->mapped_gws_queue = false;
- }
}
/*
@@ -1732,7 +1728,7 @@
/* Clean all kernel queues */
list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
list_del(&kq->list);
- decrement_queue_count(dqm, kq->queue->properties.type);
+ decrement_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = false;
dqm->total_queue_count--;
filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
@@ -1745,13 +1741,8 @@
else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
- if (q->properties.is_active) {
- decrement_queue_count(dqm, q->properties.type);
- if (q->properties.is_gws) {
- dqm->gws_queue_count--;
- qpd->mapped_gws_queue = false;
- }
- }
+ if (q->properties.is_active)
+ decrement_queue_count(dqm, qpd, q);
dqm->total_queue_count--;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index ba2c2ce..159be13 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -531,6 +531,8 @@
event_waiters = kmalloc_array(num_events,
sizeof(struct kfd_event_waiter),
GFP_KERNEL);
+ if (!event_waiters)
+ return NULL;
for (i = 0; (event_waiters) && (i < num_events) ; i++) {
init_wait(&event_waiters[i].wait);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index 17d1736..bd4caa3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -270,15 +270,6 @@
return ret;
}
- ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client,
- O_RDWR);
- if (ret < 0) {
- kfifo_free(&client->fifo);
- kfree(client);
- return ret;
- }
- *fd = ret;
-
init_waitqueue_head(&client->wait_queue);
spin_lock_init(&client->lock);
client->events = 0;
@@ -288,5 +279,20 @@
list_add_rcu(&client->list, &dev->smi_clients);
spin_unlock(&dev->smi_lock);
+ ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client,
+ O_RDWR);
+ if (ret < 0) {
+ spin_lock(&dev->smi_lock);
+ list_del_rcu(&client->list);
+ spin_unlock(&dev->smi_lock);
+
+ synchronize_rcu();
+
+ kfifo_free(&client->fifo);
+ kfree(client);
+ return ret;
+ }
+ *fd = ret;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6c8f141..55ecc67 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -922,6 +922,37 @@
}
}
+struct amdgpu_stutter_quirk {
+ u16 chip_vendor;
+ u16 chip_device;
+ u16 subsys_vendor;
+ u16 subsys_device;
+ u8 revision;
+};
+
+static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
+ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
+ { 0, 0, 0, 0, 0 },
+};
+
+static bool dm_should_disable_stutter(struct pci_dev *pdev)
+{
+ const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
+
+ while (p && p->chip_device != 0) {
+ if (pdev->vendor == p->chip_vendor &&
+ pdev->device == p->chip_device &&
+ pdev->subsystem_vendor == p->subsys_vendor &&
+ pdev->subsystem_device == p->subsys_device &&
+ pdev->revision == p->revision) {
+ return true;
+ }
+ ++p;
+ }
+ return false;
+}
+
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
@@ -1014,6 +1045,8 @@
if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+ if (dm_should_disable_stutter(adev->pdev))
+ adev->dm.dc->debug.disable_stutter = true;
if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
adev->dm.dc->debug.disable_stutter = true;
@@ -2022,7 +2055,8 @@
* this is the case when traversing through already created
* MST connectors, should be skipped
*/
- if (aconnector->mst_port)
+ if (aconnector->dc_link &&
+ aconnector->dc_link->type == dc_connection_mst_branch)
continue;
mutex_lock(&aconnector->hpd_lock);
@@ -2140,7 +2174,7 @@
static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
{
- u32 max_cll, min_cll, max, min, q, r;
+ u32 max_avg, min_cll, max, min, q, r;
struct amdgpu_dm_backlight_caps *caps;
struct amdgpu_display_manager *dm;
struct drm_connector *conn_base;
@@ -2163,7 +2197,7 @@
caps = &dm->backlight_caps;
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
caps->aux_support = false;
- max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
+ max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
if (caps->ext_caps->bits.oled == 1 /*||
@@ -2191,8 +2225,8 @@
* The results of the above expressions can be verified at
* pre_computed_values.
*/
- q = max_cll >> 5;
- r = max_cll % 32;
+ q = max_avg >> 5;
+ r = max_avg % 32;
max = (1 << q) * pre_computed_values[r];
// min luminance: maxLum * (CV/255)^2 / 100
@@ -6396,6 +6430,9 @@
mode = amdgpu_dm_create_common_mode(encoder,
common_modes[i].name, common_modes[i].w,
common_modes[i].h);
+ if (!mode)
+ continue;
+
drm_mode_probed_add(connector, mode);
amdgpu_dm_connector->num_modes++;
}
@@ -8612,10 +8649,13 @@
static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
{
struct drm_connector *connector;
- struct drm_connector_state *conn_state;
+ struct drm_connector_state *conn_state, *old_conn_state;
struct amdgpu_dm_connector *aconnector = NULL;
int i;
- for_each_new_connector_in_state(state, connector, conn_state, i) {
+ for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
+ if (!conn_state->crtc)
+ conn_state = old_conn_state;
+
if (conn_state->crtc != crtc)
continue;
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c b/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c
index 6ca288f..2d46bc5 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c
@@ -26,12 +26,12 @@
#include "bw_fixed.h"
-#define MIN_I64 \
- (int64_t)(-(1LL << 63))
-
#define MAX_I64 \
(int64_t)((1ULL << 63) - 1)
+#define MIN_I64 \
+ (-MAX_I64 - 1)
+
#define FRACTIONAL_PART_MASK \
((1ULL << BW_FIXED_BITS_PER_FRACTIONAL_PART) - 1)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 93f5229..99887bc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2202,11 +2202,8 @@
if (update->abm_level)
stream->abm_level = *update->abm_level;
- if (update->periodic_interrupt0)
- stream->periodic_interrupt0 = *update->periodic_interrupt0;
-
- if (update->periodic_interrupt1)
- stream->periodic_interrupt1 = *update->periodic_interrupt1;
+ if (update->periodic_interrupt)
+ stream->periodic_interrupt = *update->periodic_interrupt;
if (update->gamut_remap)
stream->gamut_remap_matrix = *update->gamut_remap;
@@ -2288,13 +2285,8 @@
if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
- if (stream_update->periodic_interrupt0 &&
- dc->hwss.setup_periodic_interrupt)
- dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
-
- if (stream_update->periodic_interrupt1 &&
- dc->hwss.setup_periodic_interrupt)
- dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
+ if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
+ dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
stream_update->vrr_infopacket ||
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 0e359a2..3f4403e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2822,7 +2822,7 @@
&dpcd_pattern_type.value,
sizeof(dpcd_pattern_type));
- channel_count = dpcd_test_mode.bits.channel_count + 1;
+ channel_count = min(dpcd_test_mode.bits.channel_count + 1, AUDIO_CHANNELS_COUNT);
// read pattern periods for requested channels when sawTooth pattern is requested
if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH ||
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 5f4cdb0..1e47afc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1674,6 +1674,9 @@
if (is_timing_changed(stream_a, stream_b))
return false;
+ if (stream_a->signal != stream_b->signal)
+ return false;
+
if (stream_a->dpms_off != stream_b->dpms_off)
return false;
@@ -1698,8 +1701,8 @@
if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
return false;
- // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks
- if (old_stream->audio_info.mode_count != stream->audio_info.mode_count)
+ /*compare audio info*/
+ if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0)
return false;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 205bedd..0487c1b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -179,8 +179,7 @@
/* DMCU info */
unsigned int abm_level;
- struct periodic_interrupt_config periodic_interrupt0;
- struct periodic_interrupt_config periodic_interrupt1;
+ struct periodic_interrupt_config periodic_interrupt;
/* from core_stream struct */
struct dc_context *ctx;
@@ -244,8 +243,7 @@
struct dc_info_packet *hdr_static_metadata;
unsigned int *abm_level;
- struct periodic_interrupt_config *periodic_interrupt0;
- struct periodic_interrupt_config *periodic_interrupt1;
+ struct periodic_interrupt_config *periodic_interrupt;
struct dc_info_packet *vrr_infopacket;
struct dc_info_packet *vsc_infopacket;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index bae3a14..89cc852 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -546,9 +546,11 @@
switch (pix_clk_params->color_depth) {
case COLOR_DEPTH_101010:
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2;
+ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
break;
case COLOR_DEPTH_121212:
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2;
+ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
break;
case COLOR_DEPTH_161616:
actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 3ac6c7b..e33fe02 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2047,7 +2047,8 @@
continue;
if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
continue;
- if (pipe_ctx->stream_res.audio != NULL) {
+ if (pipe_ctx->stream_res.audio != NULL &&
+ pipe_ctx->stream_res.audio->enabled == false) {
struct audio_output audio_output;
build_audio_output(context, pipe_ctx, &audio_output);
@@ -2075,7 +2076,8 @@
if (!dc_is_dp_signal(pipe_ctx->stream->signal))
continue;
- if (pipe_ctx->stream_res.audio != NULL) {
+ if (pipe_ctx->stream_res.audio != NULL &&
+ pipe_ctx->stream_res.audio->enabled == false) {
struct audio_output audio_output;
build_audio_output(context, pipe_ctx, &audio_output);
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 8f362e8..be6d43c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -361,7 +361,8 @@
audio_regs(2),
audio_regs(3),
audio_regs(4),
- audio_regs(5)
+ audio_regs(5),
+ audio_regs(6),
};
#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 532f6a1..71a85c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2387,14 +2387,18 @@
&blnd_cfg.black_color);
}
- if (per_pixel_alpha)
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
- else
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
-
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
+ if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else if (per_pixel_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+ }
+
if (pipe_ctx->plane_state->global_alpha)
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
else
@@ -3607,7 +3611,7 @@
{
const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
int vline_int_offset_from_vupdate =
- pipe_ctx->stream->periodic_interrupt0.lines_offset;
+ pipe_ctx->stream->periodic_interrupt.lines_offset;
int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
int start_position;
@@ -3632,18 +3636,10 @@
static void dcn10_cal_vline_position(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
- enum vline_select vline,
uint32_t *start_line,
uint32_t *end_line)
{
- enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
-
- if (vline == VLINE0)
- ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
- else if (vline == VLINE1)
- ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
-
- switch (ref_point) {
+ switch (pipe_ctx->stream->periodic_interrupt.ref_point) {
case START_V_UPDATE:
dcn10_calc_vupdate_position(
dc,
@@ -3652,7 +3648,9 @@
end_line);
break;
case START_V_SYNC:
- // Suppose to do nothing because vsync is 0;
+ // vsync is line 0 so start_line is just the requested line offset
+ *start_line = pipe_ctx->stream->periodic_interrupt.lines_offset;
+ *end_line = *start_line + 2;
break;
default:
ASSERT(0);
@@ -3662,24 +3660,15 @@
void dcn10_setup_periodic_interrupt(
struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum vline_select vline)
+ struct pipe_ctx *pipe_ctx)
{
struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ uint32_t start_line = 0;
+ uint32_t end_line = 0;
- if (vline == VLINE0) {
- uint32_t start_line = 0;
- uint32_t end_line = 0;
+ dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
- dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
-
- tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
-
- } else if (vline == VLINE1) {
- pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
- tg,
- pipe_ctx->stream->periodic_interrupt1.lines_offset);
- }
+ tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
}
void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index e5691e4..81b5057 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -174,8 +174,7 @@
void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx);
void dcn10_setup_periodic_interrupt(
struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum vline_select vline);
+ struct pipe_ctx *pipe_ctx);
enum dc_status dcn10_set_clock(struct dc *dc,
enum dc_clock_type clock_type,
uint32_t clk_khz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 3fcd408..8556825 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -125,6 +125,12 @@
while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc;
+
+ /* avoid circular linked list */
+ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
+ if (tmp_mpcc == tmp_mpcc->mpcc_bot)
+ break;
+
tmp_mpcc = tmp_mpcc->mpcc_bot;
}
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 800be26..963d72f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -464,6 +464,11 @@
OTG_CLOCK_ON, 1,
1, 1000);
} else {
+
+ //last chance to clear underflow, otherwise, it will always there due to clock is off.
+ if (optc->funcs->is_optc_underflow_occurred(optc) == true)
+ optc->funcs->clear_optc_underflow(optc);
+
REG_UPDATE_2(OTG_CLOCK_CONTROL,
OTG_CLOCK_GATE_DIS, 0,
OTG_CLOCK_EN, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 79a2b9c..c6c4888 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1481,6 +1481,7 @@
/* Any updates are handled in dc interface, just need
* to apply existing for plane enable / opp change */
if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
+ || pipe_ctx->update_flags.bits.plane_changed
|| pipe_ctx->stream->update_flags.bits.gamut_remap
|| pipe_ctx->stream->update_flags.bits.out_csc) {
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
@@ -1745,7 +1746,7 @@
for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000
&& hubp->funcs->hubp_is_flip_pending(hubp); j++)
- mdelay(1);
+ udelay(1);
}
}
@@ -2270,14 +2271,18 @@
pipe_ctx, &blnd_cfg.black_color);
}
- if (per_pixel_alpha)
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
- else
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
-
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
+ if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else if (per_pixel_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+ }
+
if (pipe_ctx->plane_state->global_alpha)
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 99cc095..a701ea5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -533,6 +533,12 @@
while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc;
+
+ /* avoid circular linked list */
+ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
+ if (tmp_mpcc == tmp_mpcc->mpcc_bot)
+ break;
+
tmp_mpcc = tmp_mpcc->mpcc_bot;
}
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 7ed4d7c..01c4e87 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -1267,6 +1267,7 @@
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index af462fe..b0fd885 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -86,7 +86,7 @@
VMID, address->vmid);
if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) {
- REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1);
+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0);
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1);
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 2663f1b..e427f4f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -6653,8 +6653,7 @@
return ret;
}
-
-static void UseMinimumDCFCLK(
+static noinline_for_stack void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
int MaxInterDCNTileRepeaters,
int MaxPrefetchMode,
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index 92280cc..dae8e48 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -53,8 +53,8 @@
*/
struct gpio_service *dal_gpio_service_create(
- enum dce_version dce_version_major,
- enum dce_version dce_version_minor,
+ enum dce_version dce_version,
+ enum dce_environment dce_environment,
struct dc_context *ctx)
{
struct gpio_service *service;
@@ -67,14 +67,14 @@
return NULL;
}
- if (!dal_hw_translate_init(&service->translate, dce_version_major,
- dce_version_minor)) {
+ if (!dal_hw_translate_init(&service->translate, dce_version,
+ dce_environment)) {
BREAK_TO_DEBUGGER();
goto failure_1;
}
- if (!dal_hw_factory_init(&service->factory, dce_version_major,
- dce_version_minor)) {
+ if (!dal_hw_factory_init(&service->factory, dce_version,
+ dce_environment)) {
BREAK_TO_DEBUGGER();
goto failure_1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 64c1be8..3165a66 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -32,11 +32,6 @@
#include "inc/hw/link_encoder.h"
#include "core_status.h"
-enum vline_select {
- VLINE0,
- VLINE1
-};
-
struct pipe_ctx;
struct dc_state;
struct dc_stream_status;
@@ -112,8 +107,7 @@
int group_index, int group_size,
struct pipe_ctx *grouped_pipes[]);
void (*setup_periodic_interrupt)(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum vline_select vline);
+ struct pipe_ctx *pipe_ctx);
void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
unsigned int vmin, unsigned int vmax,
unsigned int vmid, unsigned int vmid_frame_number);
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
index 0e0f494..b037fd5 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -227,14 +227,6 @@
.funcs = &pflip_irq_info_funcs\
}
-#define vupdate_int_entry(reg_num)\
- [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
- IRQ_REG_ENTRY(OTG, reg_num,\
- OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\
- OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\
- .funcs = &vblank_irq_info_funcs\
- }
-
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
@@ -348,12 +340,6 @@
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
- vupdate_int_entry(0),
- vupdate_int_entry(1),
- vupdate_int_entry(2),
- vupdate_int_entry(3),
- vupdate_int_entry(4),
- vupdate_int_entry(5),
vupdate_no_lock_int_entry(0),
vupdate_no_lock_int_entry(1),
vupdate_no_lock_int_entry(2),
diff --git a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
index 9c55d24..7e3240e 100644
--- a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
+++ b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
@@ -42,8 +42,8 @@
struct gpio **ptr);
struct gpio_service *dal_gpio_service_create(
- enum dce_version dce_version_major,
- enum dce_version dce_version_minor,
+ enum dce_version dce_version,
+ enum dce_environment dce_environment,
struct dc_context *ctx);
struct gpio *dal_gpio_service_create_irq(
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 09bc2c2..3c4390d 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1524,6 +1524,7 @@
struct fixed31_32 lut2;
struct fixed31_32 delta_lut;
struct fixed31_32 delta_index;
+ const struct fixed31_32 one = dc_fixpt_from_int(1);
i = 0;
/* fixed_pt library has problems handling too small values */
@@ -1552,6 +1553,9 @@
} else
hw_x = coordinates_x[i].x;
+ if (dc_fixpt_le(one, hw_x))
+ hw_x = one;
+
norm_x = dc_fixpt_mul(norm_factor, hw_x);
index = dc_fixpt_floor(norm_x);
if (index < 0 || index > 255)
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 0fdf7a3..96e1805 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -100,7 +100,8 @@
//PB7 = MD0
#define MASK_VTEM_MD0__VRR_EN 0x01
#define MASK_VTEM_MD0__M_CONST 0x02
-#define MASK_VTEM_MD0__RESERVED2 0x0C
+#define MASK_VTEM_MD0__QMS_EN 0x04
+#define MASK_VTEM_MD0__RESERVED2 0x08
#define MASK_VTEM_MD0__FVA_FACTOR_M1 0xF0
//MD1
@@ -109,7 +110,7 @@
//MD2
#define MASK_VTEM_MD2__BASE_REFRESH_RATE_98 0x03
#define MASK_VTEM_MD2__RB 0x04
-#define MASK_VTEM_MD2__RESERVED3 0xF8
+#define MASK_VTEM_MD2__NEXT_TFR 0xF8
//MD3
#define MASK_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 4910961..5abb680 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2098,8 +2098,8 @@
}
}
- /* setting should not be allowed from VF */
- if (amdgpu_sriov_vf(adev)) {
+ /* setting should not be allowed from VF if not in one VF mode */
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
dev_attr->attr.mode &= ~S_IWUGO;
dev_attr->store = NULL;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index e6f40ee..9d97938 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -709,13 +709,13 @@
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
hwmgr->display_config->num_display > 3 ?
- data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk :
+ (data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk / 100) :
min_mclk,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
- data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk,
+ data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk / 100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinVcn,
@@ -728,11 +728,11 @@
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk,
+ data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk / 100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByFreq,
- data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk,
+ data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk / 100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxVcn,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c
index 4b3faac..c8a5a56 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c
@@ -1609,19 +1609,7 @@
static u8 kv_get_acp_boot_level(struct amdgpu_device *adev)
{
- u8 i;
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
-
- for (i = 0; i < table->count; i++) {
- if (table->entries[i].clk >= 0) /* XXX */
- break;
- }
-
- if (i >= table->count)
- i = table->count - 1;
-
- return i;
+ return 0;
}
static void kv_update_acp_boot_level(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
index a1e7ba5..d6544a6 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
@@ -7250,17 +7250,15 @@
if (!adev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
- for (i = 0; i < state_array->ucNumEntries; i++) {
+ for (adev->pm.dpm.num_ps = 0, i = 0; i < state_array->ucNumEntries; i++) {
u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
ps = kzalloc(sizeof(struct si_ps), GFP_KERNEL);
- if (ps == NULL) {
- kfree(adev->pm.dpm.ps);
+ if (ps == NULL)
return -ENOMEM;
- }
adev->pm.dpm.ps[i].ps_priv = ps;
si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
non_clock_info,
@@ -7282,8 +7280,8 @@
k++;
}
power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+ adev->pm.dpm.num_ps++;
}
- adev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index e589321..ee27970 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -115,7 +115,7 @@
uint32_t *min,
uint32_t *max)
{
- int ret = 0;
+ int ret = -ENOTSUPP;
if (!min && !max)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 1c526cb..3a31058 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -379,16 +379,31 @@
return 0;
}
-static int arcturus_check_powerplay_table(struct smu_context *smu)
+static void arcturus_check_bxco_support(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
struct smu_11_0_powerplay_table *powerplay_table =
table_context->power_play_table;
struct smu_baco_context *smu_baco = &smu->smu_baco;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t val;
if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
- powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
- smu_baco->platform_support = true;
+ powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO) {
+ val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
+ smu_baco->platform_support =
+ (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
+ false;
+ }
+}
+
+static int arcturus_check_powerplay_table(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_11_0_powerplay_table *powerplay_table =
+ table_context->power_play_table;
+
+ arcturus_check_bxco_support(smu);
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
@@ -2131,13 +2146,11 @@
static bool arcturus_is_baco_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- uint32_t val;
if (!smu_v11_0_baco_is_support(smu) || amdgpu_sriov_vf(adev))
return false;
- val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
- return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false;
+ return true;
}
static int arcturus_set_df_cstate(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 2937784..a7773b6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -338,19 +338,34 @@
return 0;
}
-static int navi10_check_powerplay_table(struct smu_context *smu)
+static void navi10_check_bxco_support(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
struct smu_11_0_powerplay_table *powerplay_table =
table_context->power_play_table;
struct smu_baco_context *smu_baco = &smu->smu_baco;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t val;
+
+ if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
+ powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO) {
+ val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
+ smu_baco->platform_support =
+ (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
+ false;
+ }
+}
+
+static int navi10_check_powerplay_table(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_11_0_powerplay_table *powerplay_table =
+ table_context->power_play_table;
if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true;
- if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
- powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
- smu_baco->platform_support = true;
+ navi10_check_bxco_support(smu);
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
@@ -1948,13 +1963,11 @@
static bool navi10_is_baco_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- uint32_t val;
if (amdgpu_sriov_vf(adev) || (!smu_v11_0_baco_is_support(smu)))
return false;
- val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
- return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false;
+ return true;
}
static int navi10_set_default_od_settings(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 8556c22..45c8152 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -294,16 +294,47 @@
return 0;
}
-static int sienna_cichlid_check_powerplay_table(struct smu_context *smu)
+static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
struct smu_11_0_7_powerplay_table *powerplay_table =
table_context->power_play_table;
struct smu_baco_context *smu_baco = &smu->smu_baco;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t val;
if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO ||
- powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO)
- smu_baco->platform_support = true;
+ powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO) {
+ val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
+ smu_baco->platform_support =
+ (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
+ false;
+
+ /*
+ * Disable BACO entry/exit completely on below SKUs to
+ * avoid hardware intermittent failures.
+ */
+ if (((adev->pdev->device == 0x73A1) &&
+ (adev->pdev->revision == 0x00)) ||
+ ((adev->pdev->device == 0x73BF) &&
+ (adev->pdev->revision == 0xCF)) ||
+ ((adev->pdev->device == 0x7422) &&
+ (adev->pdev->revision == 0x00)))
+ smu_baco->platform_support = false;
+
+ }
+}
+
+static int sienna_cichlid_check_powerplay_table(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_11_0_7_powerplay_table *powerplay_table =
+ table_context->power_play_table;
+
+ if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_HARDWAREDC)
+ smu->dc_controlled_by_gpio = true;
+
+ sienna_cichlid_check_bxco_support(smu);
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
@@ -1736,13 +1767,11 @@
static bool sienna_cichlid_is_baco_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- uint32_t val;
if (amdgpu_sriov_vf(adev) || (!smu_v11_0_baco_is_support(smu)))
return false;
- val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
- return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false;
+ return true;
}
static bool sienna_cichlid_is_mode1_reset_supported(struct smu_context *smu)
@@ -2759,6 +2788,7 @@
.dump_pptable = sienna_cichlid_dump_pptable,
.init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode,
+ .fini_microcode = smu_v11_0_fini_microcode,
.init_smc_tables = sienna_cichlid_init_smc_tables,
.fini_smc_tables = smu_v11_0_fini_smc_tables,
.init_power = smu_v11_0_init_power,
@@ -2805,6 +2835,7 @@
.get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.run_btc = sienna_cichlid_run_btc,
+ .set_power_source = smu_v11_0_set_power_source,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = sienna_cichlid_get_gpu_metrics,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
index 98e915e..bc3f42e 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
@@ -264,6 +264,10 @@
formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
layer->layer_type, &n_formats);
+ if (!formats) {
+ kfree(kplane);
+ return -ENOMEM;
+ }
err = drm_universal_plane_init(&kms->base, plane,
get_possible_crtcs(kms, c->pipeline),
@@ -274,8 +278,10 @@
komeda_put_fourcc_list(formats);
- if (err)
- goto cleanup;
+ if (err) {
+ kfree(kplane);
+ return err;
+ }
drm_plane_helper_add(plane, &komeda_plane_helper_funcs);
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 587d947..af72909 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -483,7 +483,10 @@
if (crtc->state)
malidp_crtc_destroy_state(crtc, crtc->state);
- __drm_atomic_helper_crtc_reset(crtc, &state->base);
+ if (state)
+ __drm_atomic_helper_crtc_reset(crtc, &state->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
}
static int malidp_crtc_enable_vblank(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index a9bb734..711061b 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -169,6 +169,7 @@
#define ADV7511_PACKET_ENABLE_SPARE2 BIT(1)
#define ADV7511_PACKET_ENABLE_SPARE1 BIT(0)
+#define ADV7535_REG_POWER2_HPD_OVERRIDE BIT(6)
#define ADV7511_REG_POWER2_HPD_SRC_MASK 0xc0
#define ADV7511_REG_POWER2_HPD_SRC_BOTH 0x00
#define ADV7511_REG_POWER2_HPD_SRC_HPD 0x40
@@ -385,10 +386,7 @@
#else
static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
{
- unsigned int offset = adv7511->type == ADV7533 ?
- ADV7533_REG_CEC_OFFSET : 0;
-
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
ADV7511_CEC_CTRL_POWER_DOWN);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
index a20a45c..ddd1305 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
@@ -316,7 +316,7 @@
goto err_cec_alloc;
}
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 0);
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL, 0);
/* cec soft reset */
regmap_write(adv7511->regmap_cec,
ADV7511_REG_CEC_SOFT_RESET + offset, 0x01);
@@ -343,7 +343,7 @@
dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n",
ret);
err_cec_parse_dt:
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
ADV7511_CEC_CTRL_POWER_DOWN);
return ret == -EPROBE_DEFER ? ret : 0;
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index a0d392c..430c5e8 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -351,11 +351,17 @@
* from standby or are enabled. When the HPD goes low the adv7511 is
* reset and the outputs are disabled which might cause the monitor to
* go to standby again. To avoid this we ignore the HPD pin for the
- * first few seconds after enabling the output.
+ * first few seconds after enabling the output. On the other hand
+ * adv7535 require to enable HPD Override bit for proper HPD.
*/
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
- ADV7511_REG_POWER2_HPD_SRC_MASK,
- ADV7511_REG_POWER2_HPD_SRC_NONE);
+ if (adv7511->type == ADV7535)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7535_REG_POWER2_HPD_OVERRIDE,
+ ADV7535_REG_POWER2_HPD_OVERRIDE);
+ else
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7511_REG_POWER2_HPD_SRC_MASK,
+ ADV7511_REG_POWER2_HPD_SRC_NONE);
}
static void adv7511_power_on(struct adv7511 *adv7511)
@@ -375,6 +381,10 @@
static void __adv7511_power_off(struct adv7511 *adv7511)
{
/* TODO: setup additional power down modes */
+ if (adv7511->type == ADV7535)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7535_REG_POWER2_HPD_OVERRIDE, 0);
+
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN,
ADV7511_POWER_POWER_DOWN);
@@ -672,9 +682,14 @@
status = connector_status_disconnected;
} else {
/* Renable HPD sensing */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
- ADV7511_REG_POWER2_HPD_SRC_MASK,
- ADV7511_REG_POWER2_HPD_SRC_BOTH);
+ if (adv7511->type == ADV7535)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7535_REG_POWER2_HPD_OVERRIDE,
+ ADV7535_REG_POWER2_HPD_OVERRIDE);
+ else
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7511_REG_POWER2_HPD_SRC_MASK,
+ ADV7511_REG_POWER2_HPD_SRC_BOTH);
}
adv7511->status = status;
@@ -1048,6 +1063,10 @@
ADV7511_CEC_I2C_ADDR_DEFAULT);
if (IS_ERR(adv->i2c_cec))
return PTR_ERR(adv->i2c_cec);
+
+ regmap_write(adv->regmap, ADV7511_REG_CEC_I2C_ADDR,
+ adv->i2c_cec->addr << 1);
+
i2c_set_clientdata(adv->i2c_cec, adv);
adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec,
@@ -1252,9 +1271,6 @@
if (ret)
goto err_i2c_unregister_packet;
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR,
- adv7511->i2c_cec->addr << 1);
-
INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
if (i2c->irq) {
@@ -1291,6 +1307,7 @@
return 0;
err_unregister_cec:
+ cec_unregister_adapter(adv7511->cec_adap);
i2c_unregister_device(adv7511->i2c_cec);
if (adv7511->cec_clk)
clk_disable_unprepare(adv7511->cec_clk);
@@ -1364,10 +1381,21 @@
static int __init adv7511_init(void)
{
- if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
- mipi_dsi_driver_register(&adv7533_dsi_driver);
+ int ret;
- return i2c_add_driver(&adv7511_driver);
+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) {
+ ret = mipi_dsi_driver_register(&adv7533_dsi_driver);
+ if (ret)
+ return ret;
+ }
+
+ ret = i2c_add_driver(&adv7511_driver);
+ if (ret) {
+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
+ mipi_dsi_driver_unregister(&adv7533_dsi_driver);
+ }
+
+ return ret;
}
module_init(adv7511_init);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index aa1bb86..e8baa07 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1269,6 +1269,25 @@
}
static
+struct drm_crtc *analogix_dp_get_old_crtc(struct analogix_dp_device *dp,
+ struct drm_atomic_state *state)
+{
+ struct drm_encoder *encoder = dp->encoder;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+
+ connector = drm_atomic_get_old_connector_for_encoder(state, encoder);
+ if (!connector)
+ return NULL;
+
+ conn_state = drm_atomic_get_old_connector_state(state, connector);
+ if (!conn_state)
+ return NULL;
+
+ return conn_state->crtc;
+}
+
+static
struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp,
struct drm_atomic_state *state)
{
@@ -1448,14 +1467,16 @@
{
struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
- struct drm_crtc *crtc;
+ struct drm_crtc *old_crtc, *new_crtc;
+ struct drm_crtc_state *old_crtc_state = NULL;
struct drm_crtc_state *new_crtc_state = NULL;
+ int ret;
- crtc = analogix_dp_get_new_crtc(dp, old_state);
- if (!crtc)
+ new_crtc = analogix_dp_get_new_crtc(dp, old_state);
+ if (!new_crtc)
goto out;
- new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc);
+ new_crtc_state = drm_atomic_get_new_crtc_state(old_state, new_crtc);
if (!new_crtc_state)
goto out;
@@ -1464,6 +1485,19 @@
return;
out:
+ old_crtc = analogix_dp_get_old_crtc(dp, old_state);
+ if (old_crtc) {
+ old_crtc_state = drm_atomic_get_old_crtc_state(old_state,
+ old_crtc);
+
+ /* When moving from PSR to fully disabled, exit PSR first. */
+ if (old_crtc_state && old_crtc_state->self_refresh_active) {
+ ret = analogix_dp_disable_psr(dp);
+ if (ret)
+ DRM_ERROR("Failed to disable psr (%d)\n", ret);
+ }
+ }
+
analogix_dp_bridge_disable(bridge);
}
@@ -1639,8 +1673,19 @@
struct drm_dp_aux_msg *msg)
{
struct analogix_dp_device *dp = to_dp(aux);
+ int ret;
- return analogix_dp_transfer(dp, msg);
+ pm_runtime_get_sync(dp->dev);
+
+ ret = analogix_dp_detect_hpd(dp);
+ if (ret)
+ goto out;
+
+ ret = analogix_dp_transfer(dp, msg);
+out:
+ pm_runtime_put(dp->dev);
+
+ return ret;
}
struct analogix_dp_device *
@@ -1705,8 +1750,10 @@
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dp->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dp->reg_base))
- return ERR_CAST(dp->reg_base);
+ if (IS_ERR(dp->reg_base)) {
+ ret = PTR_ERR(dp->reg_base);
+ goto err_disable_clk;
+ }
dp->force_hpd = of_property_read_bool(dev->of_node, "force-hpd");
@@ -1718,7 +1765,8 @@
if (IS_ERR(dp->hpd_gpiod)) {
dev_err(dev, "error getting HDP GPIO: %ld\n",
PTR_ERR(dp->hpd_gpiod));
- return ERR_CAST(dp->hpd_gpiod);
+ ret = PTR_ERR(dp->hpd_gpiod);
+ goto err_disable_clk;
}
if (dp->hpd_gpiod) {
@@ -1738,7 +1786,8 @@
if (dp->irq == -ENXIO) {
dev_err(&pdev->dev, "failed to get irq\n");
- return ERR_PTR(-ENODEV);
+ ret = -ENODEV;
+ goto err_disable_clk;
}
ret = devm_request_threaded_irq(&pdev->dev, dp->irq,
@@ -1747,11 +1796,15 @@
irq_flags, "analogix-dp", dp);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
- return ERR_PTR(ret);
+ goto err_disable_clk;
}
disable_irq(dp->irq);
return dp;
+
+err_disable_clk:
+ clk_disable_unprepare(dp->clock);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(analogix_dp_probe);
@@ -1812,12 +1865,6 @@
int analogix_dp_suspend(struct analogix_dp_device *dp)
{
clk_disable_unprepare(dp->clock);
-
- if (dp->plat_data->panel) {
- if (drm_panel_unprepare(dp->plat_data->panel))
- DRM_ERROR("failed to turnoff the panel\n");
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(analogix_dp_suspend);
@@ -1832,13 +1879,6 @@
return ret;
}
- if (dp->plat_data->panel) {
- if (drm_panel_prepare(dp->plat_data->panel)) {
- DRM_ERROR("failed to setup the panel\n");
- return -EBUSY;
- }
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(analogix_dp_resume);
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index b31281f..0ced08d 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -1286,6 +1286,7 @@
{ .compatible = "cdns,dsi" },
{ },
};
+MODULE_DEVICE_TABLE(of, cdns_dsi_of_match);
static struct platform_driver cdns_dsi_platform_driver = {
.probe = cdns_dsi_drm_probe,
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 29b1ce2..1dcc28a 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -816,13 +816,14 @@
drm_connector_helper_add(<9611->connector,
<9611_bridge_connector_helper_funcs);
- drm_connector_attach_encoder(<9611->connector, bridge->encoder);
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
}
+ drm_connector_attach_encoder(<9611->connector, bridge->encoder);
+
return 0;
}
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index cce98bf..72248a5 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -296,7 +296,9 @@
* This check is to avoid both the drivers
* removing the bridge in their remove() function
*/
- if (!ge_b850v3_lvds_ptr)
+ if (!ge_b850v3_lvds_ptr ||
+ !ge_b850v3_lvds_ptr->stdp2690_i2c ||
+ !ge_b850v3_lvds_ptr->stdp4028_i2c)
goto out;
drm_bridge_remove(&ge_b850v3_lvds_ptr->bridge);
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index 6cac2e5..b68d335 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -1188,6 +1188,7 @@
ret = nwl_dsi_select_input(dsi);
if (ret < 0) {
+ pm_runtime_disable(dev);
mipi_dsi_host_unregister(&dsi->dsi_host);
return ret;
}
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 7bd0aff..9248510 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -333,8 +333,8 @@
if (IS_ERR(ps_bridge->panel_bridge))
return PTR_ERR(ps_bridge->panel_bridge);
- ps_bridge->supplies[0].supply = "vdd33";
- ps_bridge->supplies[1].supply = "vdd12";
+ ps_bridge->supplies[0].supply = "vdd12";
+ ps_bridge->supplies[1].supply = "vdd33";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies),
ps_bridge->supplies);
if (ret)
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 843265d..ab0bce4 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -605,7 +605,7 @@
u8 *buf = &ctx->burst.tx_buf[ctx->burst.tx_count];
int size = len + 2;
- if (ctx->burst.tx_count + size > ARRAY_SIZE(ctx->burst.tx_buf)) {
+ if (ctx->burst.tx_count + size >= ARRAY_SIZE(ctx->burst.tx_buf)) {
dev_err(ctx->dev, "TX-BLK buffer exhausted\n");
ctx->error = -EINVAL;
return NULL;
@@ -622,7 +622,7 @@
u8 *buf = &ctx->burst.rx_buf[ctx->burst.rx_count];
int size = len + 1;
- if (ctx->burst.tx_count + size > ARRAY_SIZE(ctx->burst.tx_buf)) {
+ if (ctx->burst.rx_count + size >= ARRAY_SIZE(ctx->burst.rx_buf)) {
dev_err(ctx->dev, "RX-BLK buffer exhausted\n");
ctx->error = -EINVAL;
return NULL;
@@ -2120,7 +2120,7 @@
if (ret) {
dev_err(ctx->dev, "Failed to register RC device\n");
ctx->error = ret;
- rc_free_device(ctx->rc_dev);
+ rc_free_device(rc_dev);
return;
}
ctx->rc_dev = rc_dev;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 29c0eb4..356c7d0 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -2566,8 +2566,9 @@
if (!output_fmts)
return NULL;
- /* If dw-hdmi is the only bridge, avoid negociating with ourselves */
- if (list_is_singular(&bridge->encoder->bridge_chain)) {
+ /* If dw-hdmi is the first or only bridge, avoid negociating with ourselves */
+ if (list_is_singular(&bridge->encoder->bridge_chain) ||
+ list_is_first(&bridge->chain_node, &bridge->encoder->bridge_chain)) {
*num_output_fmts = 1;
output_fmts[0] = MEDIA_BUS_FMT_FIXED;
@@ -2983,6 +2984,7 @@
{
struct dw_hdmi *hdmi = dev_id;
u8 intr_stat, phy_int_pol, phy_pol_mask, phy_stat;
+ enum drm_connector_status status = connector_status_unknown;
intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0);
phy_int_pol = hdmi_readb(hdmi, HDMI_PHY_POL0);
@@ -3021,13 +3023,15 @@
cec_notifier_phys_addr_invalidate(hdmi->cec_notifier);
mutex_unlock(&hdmi->cec_notifier_mutex);
}
+
+ if (phy_stat & HDMI_PHY_HPD)
+ status = connector_status_connected;
+
+ if (!(phy_stat & (HDMI_PHY_HPD | HDMI_PHY_RX_SENSE)))
+ status = connector_status_disconnected;
}
- if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
- enum drm_connector_status status = phy_int_pol & HDMI_PHY_HPD
- ? connector_status_connected
- : connector_status_disconnected;
-
+ if (status != connector_status_unknown) {
dev_dbg(hdmi->dev, "EVENT=%s\n",
status == connector_status_connected ?
"plugin" : "plugout");
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 6b268f9..376fa6e 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -1172,6 +1172,7 @@
ret = mipi_dsi_host_register(&dsi->dsi_host);
if (ret) {
dev_err(dev, "Failed to register MIPI host: %d\n", ret);
+ pm_runtime_disable(dev);
dw_mipi_dsi_debugfs_remove(dsi);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 34a3e4e..b4f7e7a 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1535,19 +1535,12 @@
return IRQ_HANDLED;
}
-static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int tc_probe_edp_bridge_endpoint(struct tc_data *tc)
{
- struct device *dev = &client->dev;
+ struct device *dev = tc->dev;
struct drm_panel *panel;
- struct tc_data *tc;
int ret;
- tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
- if (!tc)
- return -ENOMEM;
-
- tc->dev = dev;
-
/* port@2 is the output port */
ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, NULL);
if (ret && ret != -ENODEV)
@@ -1566,6 +1559,50 @@
tc->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
}
+ return 0;
+}
+
+static void tc_clk_disable(void *data)
+{
+ struct clk *refclk = data;
+
+ clk_disable_unprepare(refclk);
+}
+
+static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct tc_data *tc;
+ int ret;
+
+ tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
+ if (!tc)
+ return -ENOMEM;
+
+ tc->dev = dev;
+
+ ret = tc_probe_edp_bridge_endpoint(tc);
+ if (ret)
+ return ret;
+
+ tc->refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(tc->refclk)) {
+ ret = PTR_ERR(tc->refclk);
+ dev_err(dev, "Failed to get refclk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(tc->refclk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, tc_clk_disable, tc->refclk);
+ if (ret)
+ return ret;
+
+ /* tRSTW = 100 cycles , at 13 MHz that is ~7.69 us */
+ usleep_range(10, 15);
+
/* Shut down GPIO is optional */
tc->sd_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH);
if (IS_ERR(tc->sd_gpio))
@@ -1586,13 +1623,6 @@
usleep_range(5000, 10000);
}
- tc->refclk = devm_clk_get(dev, "ref");
- if (IS_ERR(tc->refclk)) {
- ret = PTR_ERR(tc->refclk);
- dev_err(dev, "Failed to get refclk: %d\n", ret);
- return ret;
- }
-
tc->regmap = devm_regmap_init_i2c(client, &tc_regmap_config);
if (IS_ERR(tc->regmap)) {
ret = PTR_ERR(tc->regmap);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 8a871e5..7fc8e70 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -996,9 +996,19 @@
return drm_atomic_crtc_effectively_active(old_state);
/*
- * We need to run through the crtc_funcs->disable() function if the CRTC
- * is currently on, if it's transitioning to self refresh mode, or if
- * it's in self refresh mode and needs to be fully disabled.
+ * We need to disable bridge(s) and CRTC if we're transitioning out of
+ * self-refresh and changing CRTCs at the same time, because the
+ * bridge tracks self-refresh status via CRTC state.
+ */
+ if (old_state->self_refresh_active &&
+ old_state->crtc != new_state->crtc)
+ return true;
+
+ /*
+ * We also need to run through the crtc_funcs->disable() function if
+ * the CRTC is currently on, if it's transitioning to self refresh
+ * mode, or if it's in self refresh mode and needs to be fully
+ * disabled.
*/
return old_state->active ||
(old_state->self_refresh_active && !new_state->enable) ||
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 044acd0..d799ec1 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -753,8 +753,8 @@
struct drm_connector_state *conn_state,
u32 out_bus_fmt)
{
+ unsigned int i, num_in_bus_fmts = 0;
struct drm_bridge_state *cur_state;
- unsigned int num_in_bus_fmts, i;
struct drm_bridge *prev_bridge;
u32 *in_bus_fmts;
int ret;
@@ -875,7 +875,7 @@
struct drm_connector *conn = conn_state->connector;
struct drm_encoder *encoder = bridge->encoder;
struct drm_bridge_state *last_bridge_state;
- unsigned int i, num_out_bus_fmts;
+ unsigned int i, num_out_bus_fmts = 0;
struct drm_bridge *last_bridge;
u32 *out_bus_fmts;
int ret = 0;
diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h
index 25ce42e..61e09f8 100644
--- a/drivers/gpu/drm/drm_crtc_helper_internal.h
+++ b/drivers/gpu/drm/drm_crtc_helper_internal.h
@@ -32,16 +32,6 @@
#include <drm/drm_encoder.h>
#include <drm/drm_modes.h>
-/* drm_fb_helper.c */
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-int drm_fb_helper_modinit(void);
-#else
-static inline int drm_fb_helper_modinit(void)
-{
- return 0;
-}
-#endif
-
/* drm_dp_aux_dev.c */
#ifdef CONFIG_DRM_DP_AUX_CHARDEV
int drm_dp_aux_dev_init(void);
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
index 1c9ea9f..f2ff0bf 100644
--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -62,23 +62,45 @@
ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
u8 offset, void *buffer, size_t size)
{
+ u8 zero = 0;
+ char *tmpbuf = NULL;
+ /*
+ * As sub-addressing is not supported by all adaptors,
+ * always explicitly read from the start and discard
+ * any bytes that come before the requested offset.
+ * This way, no matter whether the adaptor supports it
+ * or not, we'll end up reading the proper data.
+ */
struct i2c_msg msgs[] = {
{
.addr = DP_DUAL_MODE_SLAVE_ADDRESS,
.flags = 0,
.len = 1,
- .buf = &offset,
+ .buf = &zero,
},
{
.addr = DP_DUAL_MODE_SLAVE_ADDRESS,
.flags = I2C_M_RD,
- .len = size,
+ .len = size + offset,
.buf = buffer,
},
};
int ret;
+ if (offset) {
+ tmpbuf = kmalloc(size + offset, GFP_KERNEL);
+ if (!tmpbuf)
+ return -ENOMEM;
+
+ msgs[1].buf = tmpbuf;
+ }
+
ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs));
+ if (tmpbuf)
+ memcpy(buffer, tmpbuf + offset, size);
+
+ kfree(tmpbuf);
+
if (ret < 0)
return ret;
if (ret != ARRAY_SIZE(msgs))
@@ -205,18 +227,6 @@
if (ret)
return DRM_DP_DUAL_MODE_UNKNOWN;
- /*
- * Sigh. Some (maybe all?) type 1 adaptors are broken and ack
- * the offset but ignore it, and instead they just always return
- * data from the start of the HDMI ID buffer. So for a broken
- * type 1 HDMI adaptor a single byte read will always give us
- * 0x44, and for a type 1 DVI adaptor it should give 0x00
- * (assuming it implements any registers). Fortunately neither
- * of those values will match the type 2 signature of the
- * DP_DUAL_MODE_ADAPTOR_ID register so we can proceed with
- * the type 2 adaptor detection safely even in the presence
- * of broken type 1 adaptors.
- */
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
&adaptor_id, sizeof(adaptor_id));
DRM_DEBUG_KMS("DP dual mode adaptor ID: %02x (err %zd)\n",
@@ -231,11 +241,10 @@
return DRM_DP_DUAL_MODE_TYPE2_DVI;
}
/*
- * If neither a proper type 1 ID nor a broken type 1 adaptor
- * as described above, assume type 1, but let the user know
- * that we may have misdetected the type.
+ * If not a proper type 1 ID, still assume type 1, but let
+ * the user know that we may have misdetected the type.
*/
- if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0])
+ if (!is_type1_adaptor(adaptor_id))
DRM_ERROR("Unexpected DP dual mode adaptor ID %02x\n",
adaptor_id);
@@ -339,10 +348,8 @@
* @enable: enable (as opposed to disable) the TMDS output buffers
*
* Set the state of the TMDS output buffers in the adaptor. For
- * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. As
- * some type 1 adaptors have problems with registers (see comments
- * in drm_dp_dual_mode_detect()) we avoid touching the register,
- * making this function a no-op on type 1 adaptors.
+ * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register.
+ * Type1 adaptors do not support any register writes.
*
* Returns:
* 0 on success, negative error code on failure
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 3c55753..6ba16db 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -2172,17 +2172,8 @@
struct drm_dp_phy_test_params *data, u8 dp_rev)
{
int err, i;
- u8 link_config[2];
u8 test_pattern;
- link_config[0] = drm_dp_link_rate_to_bw_code(data->link_rate);
- link_config[1] = data->num_lanes;
- if (data->enhanced_frame_cap)
- link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, link_config, 2);
- if (err < 0)
- return err;
-
test_pattern = data->phy_pattern;
if (dp_rev < 0x12) {
test_pattern = (test_pattern << 2) &
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 1f54e94..4272cd3 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -4792,6 +4792,7 @@
mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
drm_edid_get_monitor_name(mst_edid, name, namelen);
+ kfree(mst_edid);
}
/**
@@ -4855,14 +4856,14 @@
seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
- if (ret) {
+ if (ret != 2) {
seq_printf(m, "faux/mst read failed\n");
goto out;
}
seq_printf(m, "faux/mst: %*ph\n", 2, buf);
ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
- if (ret) {
+ if (ret != 1) {
seq_printf(m, "mst ctrl read failed\n");
goto out;
}
@@ -4870,7 +4871,7 @@
/* dump the standard OUI branch header */
ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
- if (ret) {
+ if (ret != DP_BRANCH_OUI_HEADER_SIZE) {
seq_printf(m, "branch oui read failed\n");
goto out;
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 006e3b8..4ca995c 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -610,7 +610,7 @@
mutex_init(&dev->clientlist_mutex);
mutex_init(&dev->master_mutex);
- ret = drmm_add_action(dev, drm_dev_init_release, NULL);
+ ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 3d7593e..4334e46 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1995,9 +1995,6 @@
connector_bad_edid(connector, edid, edid[0x7e] + 1);
- edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
- edid[0x7e] = valid_extensions;
-
new = kmalloc_array(valid_extensions + 1, EDID_LENGTH,
GFP_KERNEL);
if (!new)
@@ -2014,6 +2011,9 @@
base += EDID_LENGTH;
}
+ new[EDID_LENGTH - 1] += new[0x7e] - valid_extensions;
+ new[0x7e] = valid_extensions;
+
kfree(edid);
edid = new;
}
@@ -4806,7 +4806,8 @@
if (!edid_ext)
goto end;
- has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0);
+ has_audio = (edid_ext[0] == CEA_EXT &&
+ (edid_ext[3] & EDID_BASIC_AUDIO) != 0);
if (has_audio) {
DRM_DEBUG_KMS("Monitor has basic audio support\n");
@@ -4959,16 +4960,8 @@
connector->name, dc_bpc);
info->bpc = dc_bpc;
- /*
- * Deep color support mandates RGB444 support for all video
- * modes and forbids YCRCB422 support for all video modes per
- * HDMI 1.3 spec.
- */
- info->color_formats = DRM_COLOR_FORMAT_RGB444;
-
/* YCRCB444 is optional according to spec. */
if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) {
- info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n",
connector->name);
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 8033467..ac5d61e 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -2271,24 +2271,3 @@
drm_client_register(&fb_helper->client);
}
EXPORT_SYMBOL(drm_fbdev_generic_setup);
-
-/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
- * but the module doesn't depend on any fb console symbols. At least
- * attempt to load fbcon to avoid leaving the system without a usable console.
- */
-int __init drm_fb_helper_modinit(void)
-{
-#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
- const char name[] = "fbcon";
- struct module *fbcon;
-
- mutex_lock(&module_mutex);
- fbcon = find_module(name);
- mutex_unlock(&module_mutex);
-
- if (!fbcon)
- request_module_nowait(name);
-#endif
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_modinit);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 69c2c07..8b30e8d 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -166,21 +166,6 @@
}
EXPORT_SYMBOL(drm_gem_private_object_init);
-static void
-drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
-{
- /*
- * Note: obj->dma_buf can't disappear as long as we still hold a
- * handle reference in obj->handle_count.
- */
- mutex_lock(&filp->prime.lock);
- if (obj->dma_buf) {
- drm_prime_remove_buf_handle_locked(&filp->prime,
- obj->dma_buf);
- }
- mutex_unlock(&filp->prime.lock);
-}
-
/**
* drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
@@ -254,7 +239,7 @@
else if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
- drm_gem_remove_prime_handles(obj, file_priv);
+ drm_prime_remove_buf_handle(&file_priv->prime, id);
drm_vma_node_revoke(&obj->vma_node, file_priv);
drm_gem_object_handle_put_unlocked(obj);
@@ -1277,7 +1262,7 @@
ret = dma_resv_lock_slow_interruptible(obj->resv,
acquire_ctx);
if (ret) {
- ww_acquire_done(acquire_ctx);
+ ww_acquire_fini(acquire_ctx);
return ret;
}
}
@@ -1302,7 +1287,7 @@
goto retry;
}
- ww_acquire_done(acquire_ctx);
+ ww_acquire_fini(acquire_ctx);
return ret;
}
}
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index b65865c..41efe40 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -86,8 +86,8 @@
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
- struct dma_buf *dma_buf);
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle);
/* drm_drv.c */
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
@@ -116,7 +116,8 @@
static inline void drm_vblank_destroy_worker(struct drm_vblank_crtc *vblank)
{
- kthread_destroy_worker(vblank->worker);
+ if (vblank->worker)
+ kthread_destroy_worker(vblank->worker);
}
int drm_vblank_worker_init(struct drm_vblank_crtc *vblank);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 4606cc9..c160a45 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -473,7 +473,13 @@
*/
static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
{
- int len;
+ size_t len;
+
+ /* don't attempt to copy a NULL pointer */
+ if (WARN_ONCE(!value, "BUG: the value to copy was not set!")) {
+ *buf_len = 0;
+ return 0;
+ }
/* don't overflow userbuf */
len = strlen(value);
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index 221a852..f933da1 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -64,19 +64,18 @@
static int __init drm_kms_helper_init(void)
{
- int ret;
+ /*
+ * The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
+ * but the module doesn't depend on any fb console symbols. At least
+ * attempt to load fbcon to avoid leaving the system without a usable
+ * console.
+ */
+ if (IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) &&
+ IS_MODULE(CONFIG_FRAMEBUFFER_CONSOLE) &&
+ !IS_ENABLED(CONFIG_EXPERT))
+ request_module_nowait("fbcon");
- /* Call init functions from specific kms helpers here */
- ret = drm_fb_helper_modinit();
- if (ret < 0)
- goto out;
-
- ret = drm_dp_aux_dev_init();
- if (ret < 0)
- goto out;
-
-out:
- return ret;
+ return drm_dp_aux_dev_init();
}
static void __exit drm_kms_helper_exit(void)
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 230c4fd..9f13222 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -1137,6 +1137,13 @@
size_t chunk;
int ret;
+ /* In __spi_validate, there's a validation that no partial transfers
+ * are accepted (xfer->len % w_size must be zero).
+ * Here we align max_chunk to multiple of 2 (16bits),
+ * to prevent transfers from being rejected.
+ */
+ max_chunk = ALIGN_DOWN(max_chunk, 2);
+
spi_message_init_with_transfers(&m, &tr, 1);
while (len) {
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 5dd475e..2c43d54 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -300,6 +300,7 @@
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+ mipi_dsi_detach(dsi);
mipi_dsi_device_unregister(dsi);
return 0;
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 448c2f2..ca0fefe 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -128,6 +128,18 @@
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Acer Switch V 10 (SW5-017) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SW5-017"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Anbernic Win600 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Anbernic"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Win600"),
+ },
+ .driver_data = (void *)&lcd720x1280_rightside_up,
}, { /* Asus T100HA */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -166,6 +178,12 @@
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MicroPC"),
},
.driver_data = (void *)&lcd720x1280_rightside_up,
+ }, { /* GPD Win Max */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1619-01"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /*
* GPD Pocket, note that the the DMI data is less generic then
* it seems, devices with a board-vendor of "AMI Corporation"
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index affe1cf..24f6439 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -186,6 +186,13 @@
if (WARN_ON(config->num_total_plane >= 32))
return -EINVAL;
+ /*
+ * First driver to need more than 64 formats needs to fix this. Each
+ * format is encoded as a bit and the current code only supports a u64.
+ */
+ if (WARN_ON(format_count > 64))
+ return -EINVAL;
+
WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
(!funcs->atomic_destroy_state ||
!funcs->atomic_duplicate_state));
@@ -207,13 +214,6 @@
return -ENOMEM;
}
- /*
- * First driver to need more than 64 formats needs to fix this. Each
- * format is encoded as a bit and the current code only supports a u64.
- */
- if (WARN_ON(format_count > 64))
- return -EINVAL;
-
if (format_modifiers) {
const uint64_t *temp_modifiers = format_modifiers;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 9f955f2..825499e 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -187,29 +187,33 @@
return -ENOENT;
}
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
- struct dma_buf *dma_buf)
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle)
{
struct rb_node *rb;
- rb = prime_fpriv->dmabufs.rb_node;
+ mutex_lock(&prime_fpriv->lock);
+
+ rb = prime_fpriv->handles.rb_node;
while (rb) {
struct drm_prime_member *member;
- member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
- if (member->dma_buf == dma_buf) {
+ member = rb_entry(rb, struct drm_prime_member, handle_rb);
+ if (member->handle == handle) {
rb_erase(&member->handle_rb, &prime_fpriv->handles);
rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
- dma_buf_put(dma_buf);
+ dma_buf_put(member->dma_buf);
kfree(member);
- return;
- } else if (member->dma_buf < dma_buf) {
+ break;
+ } else if (member->handle < handle) {
rb = rb->rb_right;
} else {
rb = rb->rb_left;
}
}
+
+ mutex_unlock(&prime_fpriv->lock);
}
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 984569a..9ba2fe4 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -282,6 +282,12 @@
mutex_lock(&context->lock);
+ /* Bail if the mapping has been reaped by another thread */
+ if (!mapping->context) {
+ mutex_unlock(&context->lock);
+ return;
+ }
+
/* If the vram node is on the mm, unmap and remove the node */
if (mapping->vram_node.mm == &context->mm)
etnaviv_iommu_remove_mapping(context, mapping);
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index f2d87a7..1c04c23 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -800,31 +800,40 @@
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to prepare_enable the pclk [%d]\n",
ret);
- return ret;
+ goto err_pclk_enable;
}
ret = clk_prepare_enable(ctx->aclk);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to prepare_enable the aclk [%d]\n",
ret);
- return ret;
+ goto err_aclk_enable;
}
ret = clk_prepare_enable(ctx->eclk);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to prepare_enable the eclk [%d]\n",
ret);
- return ret;
+ goto err_eclk_enable;
}
ret = clk_prepare_enable(ctx->vclk);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to prepare_enable the vclk [%d]\n",
ret);
- return ret;
+ goto err_vclk_enable;
}
return 0;
+
+err_vclk_enable:
+ clk_disable_unprepare(ctx->eclk);
+err_eclk_enable:
+ clk_disable_unprepare(ctx->aclk);
+err_aclk_enable:
+ clk_disable_unprepare(ctx->pclk);
+err_pclk_enable:
+ return ret;
}
#endif
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 3df6d6e..70148ae 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -529,15 +529,18 @@
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
gma_crtc->page_flip_event = event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
/* Call this locked if we want an event at vblank interrupt. */
ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
if (ret) {
- gma_crtc->page_flip_event = NULL;
- drm_crtc_vblank_put(crtc);
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (gma_crtc->page_flip_event) {
+ gma_crtc->page_flip_event = NULL;
+ drm_crtc_vblank_put(crtc);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
}
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
} else {
ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
}
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 531c548..bd3df5a 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -536,14 +536,15 @@
struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
{
- struct drm_crtc *crtc = NULL;
+ struct drm_crtc *crtc;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+
if (gma_crtc->pipe == pipe)
- break;
+ return crtc;
}
- return crtc;
+ return NULL;
}
int gma_connector_clones(struct drm_device *dev, int type_mask)
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 43943e9..4e41c14 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
- depends on DRM && PCI && ARM64
+ depends on DRM && PCI && (ARM64 || COMPILE_TEST)
+ depends on MMU
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 0bf31f9..e6780fc 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -526,8 +526,8 @@
* reg for DC3CO debugging and validation,
* but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
*/
- seq_printf(m, "DC3CO count: %d\n",
- intel_de_read(dev_priv, DMC_DEBUG3));
+ seq_printf(m, "DC3CO count: %d\n", intel_de_read(dev_priv, IS_DGFX(dev_priv) ?
+ DG1_DMC_DEBUG3 : TGL_DMC_DEBUG3));
} else {
dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
SKL_CSR_DC3_DC5_COUNT;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index f2c8b56..261a5e9 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -163,6 +163,28 @@
intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
&link_bw, &rate_select);
+ /*
+ * WaEdpLinkRateDataReload
+ *
+ * Parade PS8461E MUX (used on varius TGL+ laptops) needs
+ * to snoop the link rates reported by the sink when we
+ * use LINK_RATE_SET in order to operate in jitter cleaning
+ * mode (as opposed to redriver mode). Unfortunately it
+ * loses track of the snooped link rates when powered down,
+ * so we need to make it re-snoop often. Without this high
+ * link rates are not stable.
+ */
+ if (!link_bw) {
+ struct intel_connector *connector = intel_dp->attached_connector;
+ __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
+
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Reloading eDP link rates\n",
+ connector->base.base.id, connector->base.name);
+
+ drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
+ sink_rates, sizeof(sink_rates));
+ }
+
if (link_bw)
drm_dbg_kms(&i915->drm,
"Using LINK_BW_SET value %02x\n", link_bw);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index ecaa538..ef78781 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -790,6 +790,7 @@
ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
if (ret) {
+ drm_dp_mst_put_port_malloc(port);
intel_connector_free(intel_connector);
return NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index eed037e..8777d35 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -121,9 +121,25 @@
#define ICL_GPIO_DDPA_CTRLCLK_2 8
#define ICL_GPIO_DDPA_CTRLDATA_2 9
-static enum port intel_dsi_seq_port_to_port(u8 port)
+static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi,
+ u8 seq_port)
{
- return port ? PORT_C : PORT_A;
+ /*
+ * If single link DSI is being used on any port, the VBT sequence block
+ * send packet apparently always has 0 for the port. Just use the port
+ * we have configured, and ignore the sequence block port.
+ */
+ if (hweight8(intel_dsi->ports) == 1)
+ return ffs(intel_dsi->ports) - 1;
+
+ if (seq_port) {
+ if (intel_dsi->ports & PORT_B)
+ return PORT_B;
+ else if (intel_dsi->ports & PORT_C)
+ return PORT_C;
+ }
+
+ return PORT_A;
}
static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
@@ -145,15 +161,10 @@
seq_port = (flags >> MIPI_PORT_SHIFT) & 3;
- /* For DSI single link on Port A & C, the seq_port value which is
- * parsed from Sequence Block#53 of VBT has been set to 0
- * Now, read/write of packets for the DSI single link on Port A and
- * Port C will based on the DVO port from VBT block 2.
- */
- if (intel_dsi->ports == (1 << PORT_C))
- port = PORT_C;
- else
- port = intel_dsi_seq_port_to_port(seq_port);
+ port = intel_dsi_seq_port_to_port(intel_dsi, seq_port);
+
+ if (drm_WARN_ON(&dev_priv->drm, !intel_dsi->dsi_hosts[port]))
+ goto out;
dsi_device = intel_dsi->dsi_hosts[port]->device;
if (!dsi_device) {
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 46beb15..8eb1842 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -156,6 +156,9 @@
/* ASRock ITX*/
{ 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
{ 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+ /* ECS Liva Q2 */
+ { 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
};
void intel_init_quirks(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 4eaa4aa..58f8fb7 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -2760,13 +2760,10 @@
if (!intel_sdvo_connector)
return false;
- if (device == 0) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
+ if (device == 0)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
- } else if (device == 1) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
+ else if (device == 1)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
- }
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
@@ -2821,7 +2818,6 @@
encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
- intel_sdvo->controlled_output |= type;
intel_sdvo_connector->output_flag = type;
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
@@ -2862,13 +2858,10 @@
encoder->encoder_type = DRM_MODE_ENCODER_DAC;
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
- if (device == 0) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+ if (device == 0)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
- } else if (device == 1) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+ else if (device == 1)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
- }
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
kfree(intel_sdvo_connector);
@@ -2898,13 +2891,10 @@
encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
- if (device == 0) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+ if (device == 0)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
- } else if (device == 1) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+ else if (device == 1)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
- }
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
kfree(intel_sdvo_connector);
@@ -2937,16 +2927,39 @@
return false;
}
+static u16 intel_sdvo_filter_output_flags(u16 flags)
+{
+ flags &= SDVO_OUTPUT_MASK;
+
+ /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
+ if (!(flags & SDVO_OUTPUT_TMDS0))
+ flags &= ~SDVO_OUTPUT_TMDS1;
+
+ if (!(flags & SDVO_OUTPUT_RGB0))
+ flags &= ~SDVO_OUTPUT_RGB1;
+
+ if (!(flags & SDVO_OUTPUT_LVDS0))
+ flags &= ~SDVO_OUTPUT_LVDS1;
+
+ return flags;
+}
+
static bool
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
{
- /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
+ struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
+
+ flags = intel_sdvo_filter_output_flags(flags);
+
+ intel_sdvo->controlled_output = flags;
+
+ intel_sdvo_select_ddc_bus(i915, intel_sdvo);
if (flags & SDVO_OUTPUT_TMDS0)
if (!intel_sdvo_dvi_init(intel_sdvo, 0))
return false;
- if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+ if (flags & SDVO_OUTPUT_TMDS1)
if (!intel_sdvo_dvi_init(intel_sdvo, 1))
return false;
@@ -2967,7 +2980,7 @@
if (!intel_sdvo_analog_init(intel_sdvo, 0))
return false;
- if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+ if (flags & SDVO_OUTPUT_RGB1)
if (!intel_sdvo_analog_init(intel_sdvo, 1))
return false;
@@ -2975,14 +2988,13 @@
if (!intel_sdvo_lvds_init(intel_sdvo, 0))
return false;
- if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+ if (flags & SDVO_OUTPUT_LVDS1)
if (!intel_sdvo_lvds_init(intel_sdvo, 1))
return false;
- if ((flags & SDVO_OUTPUT_MASK) == 0) {
+ if (flags == 0) {
unsigned char bytes[2];
- intel_sdvo->controlled_output = 0;
memcpy(bytes, &intel_sdvo->caps.output_flags, 2);
DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
SDVO_NAME(intel_sdvo),
@@ -3394,8 +3406,6 @@
*/
intel_sdvo->base.cloneable = 0;
- intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo);
-
/* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
goto err_output;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 8dd295d..dd35d3d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -36,13 +36,13 @@
goto err_unpin_pages;
}
- ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
+ ret = sg_alloc_table(st, obj->mm.pages->orig_nents, GFP_KERNEL);
if (ret)
goto err_free;
src = obj->mm.pages->sgl;
dst = st->sgl;
- for (i = 0; i < obj->mm.pages->nents; i++) {
+ for (i = 0; i < obj->mm.pages->orig_nents; i++) {
sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst);
src = sg_next(src);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 5754bcc..92dd65b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -423,7 +423,7 @@
return -EACCES;
addr -= area->vm_start;
- if (addr >= obj->base.size)
+ if (range_overflows_t(u64, addr, len, obj->base.size))
return -EINVAL;
/* As this is primarily for debugging, let's focus on simplicity */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 6615eb5..5f86d9a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -736,6 +736,24 @@
mutex_lock(>->tlb_invalidate_lock);
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+ spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */
+
+ for_each_engine(engine, gt, id) {
+ struct reg_and_bit rb;
+
+ rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
+ if (!i915_mmio_reg_offset(rb.reg))
+ continue;
+
+ if (INTEL_GEN(i915) == 12 && (engine->class == VIDEO_DECODE_CLASS ||
+ engine->class == VIDEO_ENHANCEMENT_CLASS))
+ rb.bit = _MASKED_BIT_ENABLE(rb.bit);
+
+ intel_uncore_write_fw(uncore, rb.reg, rb.bit);
+ }
+
+ spin_unlock_irq(&uncore->lock);
+
for_each_engine(engine, gt, id) {
/*
* HW architecture suggest typical invalidation time at 40us,
@@ -750,7 +768,6 @@
if (!i915_mmio_reg_offset(rb.reg))
continue;
- intel_uncore_write_fw(uncore, rb.reg, rb.bit);
if (__intel_wait_for_register_fw(uncore,
rb.reg, rb.bit, 0,
timeout_us, timeout_ms,
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 95d41c0..35d55f9 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -4788,8 +4788,8 @@
continue;
hw = shmem_pin_map(engine->default_state);
- if (IS_ERR(hw)) {
- err = PTR_ERR(hw);
+ if (!hw) {
+ err = -ENOMEM;
break;
}
hw += LRC_STATE_OFFSET / sizeof(*hw);
@@ -4965,8 +4965,8 @@
continue;
hw = shmem_pin_map(engine->default_state);
- if (IS_ERR(hw)) {
- err = PTR_ERR(hw);
+ if (!hw) {
+ err = -ENOMEM;
break;
}
hw += LRC_STATE_OFFSET / sizeof(*hw);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 0b1ea29..606e6c3 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -660,7 +660,7 @@
else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
index = FDI_RX_IMR_TO_PIPE(offset);
else {
- gvt_vgpu_err("Unsupport registers %x\n", offset);
+ gvt_vgpu_err("Unsupported registers %x\n", offset);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 74e66de..5e670aa 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -3964,8 +3964,8 @@
return ERR_PTR(err);
}
-static ssize_t show_dynamic_id(struct device *dev,
- struct device_attribute *attr,
+static ssize_t show_dynamic_id(struct kobject *kobj,
+ struct kobj_attribute *attr,
char *buf)
{
struct i915_oa_config *oa_config =
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
index a36a455..534951f 100644
--- a/drivers/gpu/drm/i915/i915_perf_types.h
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -54,7 +54,7 @@
struct attribute_group sysfs_metric;
struct attribute *attrs[2];
- struct device_attribute sysfs_metric_id;
+ struct kobj_attribute sysfs_metric_id;
struct kref ref;
struct rcu_head rcu;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 1248899..04157d8 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7202,7 +7202,7 @@
#define _SEL_FETCH_PLANE_BASE_6_A 0x70940
#define _SEL_FETCH_PLANE_BASE_7_A 0x70960
#define _SEL_FETCH_PLANE_BASE_CUR_A 0x70880
-#define _SEL_FETCH_PLANE_BASE_1_B 0x70990
+#define _SEL_FETCH_PLANE_BASE_1_B 0x71890
#define _SEL_FETCH_PLANE_BASE_A(plane) _PICK(plane, \
_SEL_FETCH_PLANE_BASE_1_A, \
@@ -7546,7 +7546,8 @@
#define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084)
#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088)
-#define DMC_DEBUG3 _MMIO(0x101090)
+#define TGL_DMC_DEBUG3 _MMIO(0x101090)
+#define DG1_DMC_DEBUG3 _MMIO(0x13415c)
/* Display Internal Timeout Register */
#define RM_TIMEOUT _MMIO(0x42060)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 45d32ef..ac40a95 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -500,7 +500,14 @@
struct device *kdev = kobj_to_dev(kobj);
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
struct i915_gpu_coredump *gpu;
- ssize_t ret;
+ ssize_t ret = 0;
+
+ /*
+ * FIXME: Concurrent clients triggering resets and reading + clearing
+ * dumps can cause inconsistent sysfs reads when a user calls in with a
+ * non-zero offset to complete a prior partial read but the
+ * gpu_coredump has been cleared or replaced.
+ */
gpu = i915_first_error_state(i915);
if (IS_ERR(gpu)) {
@@ -512,8 +519,10 @@
const char *str = "No error state collected\n";
size_t len = strlen(str);
- ret = min_t(size_t, count, len - off);
- memcpy(buf, str + off, ret);
+ if (off < len) {
+ ret = min_t(size_t, count, len - off);
+ memcpy(buf, str + off, ret);
+ }
}
return ret;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 472aaea..1b5e8d3 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2846,7 +2846,7 @@
}
static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[8])
+ u16 wm[])
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -5145,10 +5145,14 @@
wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
modifier == I915_FORMAT_MOD_Yf_TILED ||
modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
wp->width = width;
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.c b/drivers/gpu/drm/imx/dcss/dcss-dev.c
index c849533..3f5750c 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-dev.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-dev.c
@@ -207,6 +207,7 @@
ret = dcss_submodules_init(dcss);
if (ret) {
+ of_node_put(dcss->of_port);
dev_err(dev, "submodules initialization failed\n");
goto clks_err;
}
@@ -237,6 +238,8 @@
dcss_clocks_disable(dcss);
}
+ of_node_put(dcss->of_port);
+
pm_runtime_disable(dcss->dev);
dcss_submodules_stop(dcss);
diff --git a/drivers/gpu/drm/imx/dcss/dcss-plane.c b/drivers/gpu/drm/imx/dcss/dcss-plane.c
index f54087a..46a188d 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-plane.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-plane.c
@@ -268,7 +268,6 @@
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
struct dcss_dev *dcss = plane->dev->dev_private;
struct drm_framebuffer *fb = state->fb;
- u32 pixel_format;
struct drm_crtc_state *crtc_state;
bool modifiers_present;
u32 src_w, src_h, dst_w, dst_h;
@@ -279,7 +278,6 @@
if (!fb || !state->crtc || !state->visible)
return;
- pixel_format = state->fb->format->format;
crtc_state = state->crtc->state;
modifiers_present = !!(fb->flags & DRM_MODE_FB_MODIFIERS);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 75036aa..efd13e5 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -553,6 +553,8 @@
edidp = of_get_property(child, "edid", &edid_len);
if (edidp) {
channel->edid = kmemdup(edidp, edid_len, GFP_KERNEL);
+ if (!channel->edid)
+ return -ENOMEM;
} else if (!channel->panel) {
/* fallback to display-timings node */
ret = of_get_drm_display_mode(child,
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 2a8d2e3..9fe6a47 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -212,8 +212,9 @@
return ret;
}
-static int imx_tve_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+imx_tve_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct imx_tve *tve = con_to_tve(connector);
unsigned long rate;
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index d412fc2..fd9d8e5 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -68,7 +68,7 @@
drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
if (plane == &ipu_crtc->plane[0]->base)
disable_full = true;
- if (&ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
+ if (ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
disable_partial = true;
}
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 605ac88..b61bfa8 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -70,8 +70,10 @@
ret = of_get_drm_display_mode(np, &imxpd->mode,
&imxpd->bus_flags,
OF_USE_NATIVE_MODE);
- if (ret)
+ if (ret) {
+ drm_mode_destroy(connector->dev, mode);
return ret;
+ }
drm_mode_copy(mode, &imxpd->mode);
mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index b6bb5fc..e34718c 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -68,6 +69,21 @@
bool panel_is_sharp;
bool no_vblank;
+
+ /*
+ * clk_mutex is used to synchronize the pixel clock rate update with
+ * the VBLANK. When the pixel clock's parent clock needs to be updated,
+ * clock_nb's notifier function will lock the mutex, then wait until the
+ * next VBLANK. At that point, the parent clock's rate can be updated,
+ * and the mutex is then unlocked. If an atomic commit happens in the
+ * meantime, it will lock on the mutex, effectively waiting until the
+ * clock update process finishes. Finally, the pixel clock's rate will
+ * be recomputed when the mutex has been released, in the pending atomic
+ * commit, or a future one.
+ */
+ struct mutex clk_mutex;
+ bool update_clk_rate;
+ struct notifier_block clock_nb;
};
static const u32 ingenic_drm_primary_formats[] = {
@@ -111,6 +127,29 @@
return container_of(crtc, struct ingenic_drm, crtc);
}
+static inline struct ingenic_drm *drm_nb_get_priv(struct notifier_block *nb)
+{
+ return container_of(nb, struct ingenic_drm, clock_nb);
+}
+
+static int ingenic_drm_update_pixclk(struct notifier_block *nb,
+ unsigned long action,
+ void *data)
+{
+ struct ingenic_drm *priv = drm_nb_get_priv(nb);
+
+ switch (action) {
+ case PRE_RATE_CHANGE:
+ mutex_lock(&priv->clk_mutex);
+ priv->update_clk_rate = true;
+ drm_crtc_wait_one_vblank(&priv->crtc);
+ return NOTIFY_OK;
+ default:
+ mutex_unlock(&priv->clk_mutex);
+ return NOTIFY_OK;
+ }
+}
+
static void ingenic_drm_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -276,8 +315,14 @@
if (drm_atomic_crtc_needs_modeset(state)) {
ingenic_drm_crtc_update_timings(priv, &state->mode);
+ priv->update_clk_rate = true;
+ }
+ if (priv->update_clk_rate) {
+ mutex_lock(&priv->clk_mutex);
clk_set_rate(priv->pix_clk, state->adjusted_mode.clock * 1000);
+ priv->update_clk_rate = false;
+ mutex_unlock(&priv->clk_mutex);
}
if (event) {
@@ -936,16 +981,28 @@
if (soc_info->has_osd)
regmap_write(priv->map, JZ_REG_LCD_OSDC, JZ_LCD_OSDC_OSDEN);
+ mutex_init(&priv->clk_mutex);
+ priv->clock_nb.notifier_call = ingenic_drm_update_pixclk;
+
+ parent_clk = clk_get_parent(priv->pix_clk);
+ ret = clk_notifier_register(parent_clk, &priv->clock_nb);
+ if (ret) {
+ dev_err(dev, "Unable to register clock notifier\n");
+ goto err_devclk_disable;
+ }
+
ret = drm_dev_register(drm, 0);
if (ret) {
dev_err(dev, "Failed to register DRM driver\n");
- goto err_devclk_disable;
+ goto err_clk_notifier_unregister;
}
drm_fbdev_generic_setup(drm, 32);
return 0;
+err_clk_notifier_unregister:
+ clk_notifier_unregister(parent_clk, &priv->clock_nb);
err_devclk_disable:
if (priv->lcd_clk)
clk_disable_unprepare(priv->lcd_clk);
@@ -967,7 +1024,9 @@
static void ingenic_drm_unbind(struct device *dev)
{
struct ingenic_drm *priv = dev_get_drvdata(dev);
+ struct clk *parent_clk = clk_get_parent(priv->pix_clk);
+ clk_notifier_unregister(parent_clk, &priv->clock_nb);
if (priv->lcd_clk)
clk_disable_unprepare(priv->lcd_clk);
clk_disable_unprepare(priv->pix_clk);
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index 5275b27..64e6fb8 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -1118,6 +1118,7 @@
bridge = of_drm_find_bridge(child);
if (!bridge) {
dev_err(dev, "failed to find bridge\n");
+ of_node_put(child);
return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
index cb29b64..12bf937 100644
--- a/drivers/gpu/drm/mediatek/mtk_cec.c
+++ b/drivers/gpu/drm/mediatek/mtk_cec.c
@@ -84,7 +84,7 @@
u32 tmp = readl(cec->regs + offset) & ~mask;
tmp |= val & mask;
- writel(val, cec->regs + offset);
+ writel(tmp, cec->regs + offset);
}
void mtk_cec_set_hpd_event(struct device *dev,
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 52f11a6..c1ae336 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -52,13 +52,7 @@
};
enum mtk_dpi_out_color_format {
- MTK_DPI_COLOR_FORMAT_RGB,
- MTK_DPI_COLOR_FORMAT_RGB_FULL,
- MTK_DPI_COLOR_FORMAT_YCBCR_444,
- MTK_DPI_COLOR_FORMAT_YCBCR_422,
- MTK_DPI_COLOR_FORMAT_XV_YCC,
- MTK_DPI_COLOR_FORMAT_YCBCR_444_FULL,
- MTK_DPI_COLOR_FORMAT_YCBCR_422_FULL
+ MTK_DPI_COLOR_FORMAT_RGB
};
struct mtk_dpi {
@@ -358,24 +352,11 @@
static void mtk_dpi_config_color_format(struct mtk_dpi *dpi,
enum mtk_dpi_out_color_format format)
{
- if ((format == MTK_DPI_COLOR_FORMAT_YCBCR_444) ||
- (format == MTK_DPI_COLOR_FORMAT_YCBCR_444_FULL)) {
- mtk_dpi_config_yuv422_enable(dpi, false);
- mtk_dpi_config_csc_enable(dpi, true);
- mtk_dpi_config_swap_input(dpi, false);
- mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_BGR);
- } else if ((format == MTK_DPI_COLOR_FORMAT_YCBCR_422) ||
- (format == MTK_DPI_COLOR_FORMAT_YCBCR_422_FULL)) {
- mtk_dpi_config_yuv422_enable(dpi, true);
- mtk_dpi_config_csc_enable(dpi, true);
- mtk_dpi_config_swap_input(dpi, true);
- mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_RGB);
- } else {
- mtk_dpi_config_yuv422_enable(dpi, false);
- mtk_dpi_config_csc_enable(dpi, false);
- mtk_dpi_config_swap_input(dpi, false);
- mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_RGB);
- }
+ /* only support RGB888 */
+ mtk_dpi_config_yuv422_enable(dpi, false);
+ mtk_dpi_config_csc_enable(dpi, false);
+ mtk_dpi_config_swap_input(dpi, false);
+ mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_RGB);
}
static void mtk_dpi_power_off(struct mtk_dpi *dpi)
@@ -416,7 +397,6 @@
if (dpi->pinctrl && dpi->pins_dpi)
pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi);
- mtk_dpi_enable(dpi);
return 0;
err_pixel:
@@ -553,6 +533,7 @@
mtk_dpi_power_on(dpi);
mtk_dpi_set_display_mode(dpi, &dpi->mode);
+ mtk_dpi_enable(dpi);
}
static const struct drm_bridge_funcs mtk_dpi_bridge_funcs = {
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 65fd99c..146c4d0 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -202,6 +202,7 @@
struct mtk_phy_timing phy_timing;
int refcount;
bool enabled;
+ bool lanes_ready;
u32 irq_data;
wait_queue_head_t irq_wait_queue;
const struct mtk_dsi_driver_data *driver_data;
@@ -644,18 +645,11 @@
mtk_dsi_reset_engine(dsi);
mtk_dsi_phy_timconfig(dsi);
- mtk_dsi_rxtx_control(dsi);
- usleep_range(30, 100);
- mtk_dsi_reset_dphy(dsi);
mtk_dsi_ps_control_vact(dsi);
mtk_dsi_set_vm_cmd(dsi);
mtk_dsi_config_vdo_timing(dsi);
mtk_dsi_set_interrupt_enable(dsi);
- mtk_dsi_clk_ulp_mode_leave(dsi);
- mtk_dsi_lane0_ulp_mode_leave(dsi);
- mtk_dsi_clk_hs_mode(dsi, 0);
-
return 0;
err_disable_engine_clk:
clk_disable_unprepare(dsi->engine_clk);
@@ -687,6 +681,8 @@
mtk_dsi_reset_engine(dsi);
mtk_dsi_lane0_ulp_mode_enter(dsi);
mtk_dsi_clk_ulp_mode_enter(dsi);
+ /* set the lane number as 0 to pull down mipi */
+ writel(0, dsi->regs + DSI_TXRX_CTRL);
mtk_dsi_disable(dsi);
@@ -694,21 +690,31 @@
clk_disable_unprepare(dsi->digital_clk);
phy_power_off(dsi->phy);
+
+ dsi->lanes_ready = false;
+}
+
+static void mtk_dsi_lane_ready(struct mtk_dsi *dsi)
+{
+ if (!dsi->lanes_ready) {
+ dsi->lanes_ready = true;
+ mtk_dsi_rxtx_control(dsi);
+ usleep_range(30, 100);
+ mtk_dsi_reset_dphy(dsi);
+ mtk_dsi_clk_ulp_mode_leave(dsi);
+ mtk_dsi_lane0_ulp_mode_leave(dsi);
+ mtk_dsi_clk_hs_mode(dsi, 0);
+ msleep(20);
+ /* The reaction time after pulling up the mipi signal for dsi_rx */
+ }
}
static void mtk_output_dsi_enable(struct mtk_dsi *dsi)
{
- int ret;
-
if (dsi->enabled)
return;
- ret = mtk_dsi_poweron(dsi);
- if (ret < 0) {
- DRM_ERROR("failed to power on dsi\n");
- return;
- }
-
+ mtk_dsi_lane_ready(dsi);
mtk_dsi_set_mode(dsi);
mtk_dsi_clk_hs_mode(dsi, 1);
@@ -722,8 +728,6 @@
if (!dsi->enabled)
return;
- mtk_dsi_poweroff(dsi);
-
dsi->enabled = false;
}
@@ -746,24 +750,53 @@
drm_display_mode_to_videomode(adjusted, &dsi->vm);
}
-static void mtk_dsi_bridge_disable(struct drm_bridge *bridge)
+static void mtk_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
mtk_output_dsi_disable(dsi);
}
-static void mtk_dsi_bridge_enable(struct drm_bridge *bridge)
+static void mtk_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
+ if (dsi->refcount == 0)
+ return;
+
mtk_output_dsi_enable(dsi);
}
+static void mtk_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct mtk_dsi *dsi = bridge_to_dsi(bridge);
+ int ret;
+
+ ret = mtk_dsi_poweron(dsi);
+ if (ret < 0)
+ DRM_ERROR("failed to power on dsi\n");
+}
+
+static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct mtk_dsi *dsi = bridge_to_dsi(bridge);
+
+ mtk_dsi_poweroff(dsi);
+}
+
static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
.attach = mtk_dsi_bridge_attach,
- .disable = mtk_dsi_bridge_disable,
- .enable = mtk_dsi_bridge_enable,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_disable = mtk_dsi_bridge_atomic_disable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_enable = mtk_dsi_bridge_atomic_enable,
+ .atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable,
+ .atomic_post_disable = mtk_dsi_bridge_atomic_post_disable,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
.mode_set = mtk_dsi_bridge_mode_set,
};
@@ -891,24 +924,35 @@
u8 read_data[16];
void *src_addr;
u8 irq_flag = CMD_DONE_INT_FLAG;
+ u32 dsi_mode;
+ int ret;
- if (readl(dsi->regs + DSI_MODE_CTRL) & MODE) {
- DRM_ERROR("dsi engine is not command mode\n");
- return -EINVAL;
+ dsi_mode = readl(dsi->regs + DSI_MODE_CTRL);
+ if (dsi_mode & MODE) {
+ mtk_dsi_stop(dsi);
+ ret = mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
+ if (ret)
+ goto restore_dsi_mode;
}
if (MTK_DSI_HOST_IS_READ(msg->type))
irq_flag |= LPRX_RD_RDY_INT_FLAG;
- if (mtk_dsi_host_send_cmd(dsi, msg, irq_flag) < 0)
- return -ETIME;
+ mtk_dsi_lane_ready(dsi);
- if (!MTK_DSI_HOST_IS_READ(msg->type))
- return 0;
+ ret = mtk_dsi_host_send_cmd(dsi, msg, irq_flag);
+ if (ret)
+ goto restore_dsi_mode;
+
+ if (!MTK_DSI_HOST_IS_READ(msg->type)) {
+ recv_cnt = 0;
+ goto restore_dsi_mode;
+ }
if (!msg->rx_buf) {
DRM_ERROR("dsi receive buffer size may be NULL\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto restore_dsi_mode;
}
for (i = 0; i < 16; i++)
@@ -933,7 +977,13 @@
DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n",
recv_cnt, *((u8 *)(msg->tx_buf)));
- return recv_cnt;
+restore_dsi_mode:
+ if (dsi_mode & MODE) {
+ mtk_dsi_set_mode(dsi);
+ mtk_dsi_start(dsi);
+ }
+
+ return ret < 0 ? ret : recv_cnt;
}
static const struct mipi_dsi_host_ops mtk_dsi_ops = {
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 2753067..b0bfe85 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -116,8 +116,11 @@
for_each_endpoint_of_node(dev->of_node, ep) {
/* If the endpoint node exists, consider it enabled */
remote = of_graph_get_remote_port(ep);
- if (remote)
+ if (remote) {
+ of_node_put(remote);
+ of_node_put(ep);
return true;
+ }
}
return false;
@@ -396,10 +399,8 @@
drm_irq_uninstall(drm);
drm_dev_put(drm);
- if (priv->afbcd.ops) {
- priv->afbcd.ops->reset(priv);
- meson_rdma_free(priv);
- }
+ if (priv->afbcd.ops)
+ priv->afbcd.ops->exit(priv);
}
static const struct component_master_ops meson_drv_master_ops = {
@@ -527,6 +528,13 @@
return 0;
};
+static int meson_drv_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &meson_drv_master_ops);
+
+ return 0;
+}
+
static struct meson_drm_match_data meson_drm_gxbb_data = {
.compat = VPU_COMPATIBLE_GXBB,
};
@@ -564,6 +572,7 @@
static struct platform_driver meson_drm_platform_driver = {
.probe = meson_drv_probe,
+ .remove = meson_drv_remove,
.shutdown = meson_drv_shutdown,
.driver = {
.name = "meson-drm",
diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.c b/drivers/gpu/drm/meson/meson_osd_afbcd.c
index ffc6b58..0cdbe89 100644
--- a/drivers/gpu/drm/meson/meson_osd_afbcd.c
+++ b/drivers/gpu/drm/meson/meson_osd_afbcd.c
@@ -79,11 +79,6 @@
return meson_gxm_afbcd_pixel_fmt(modifier, format) >= 0;
}
-static int meson_gxm_afbcd_init(struct meson_drm *priv)
-{
- return 0;
-}
-
static int meson_gxm_afbcd_reset(struct meson_drm *priv)
{
writel_relaxed(VIU_SW_RESET_OSD1_AFBCD,
@@ -93,6 +88,16 @@
return 0;
}
+static int meson_gxm_afbcd_init(struct meson_drm *priv)
+{
+ return 0;
+}
+
+static void meson_gxm_afbcd_exit(struct meson_drm *priv)
+{
+ meson_gxm_afbcd_reset(priv);
+}
+
static int meson_gxm_afbcd_enable(struct meson_drm *priv)
{
writel_relaxed(FIELD_PREP(OSD1_AFBCD_ID_FIFO_THRD, 0x40) |
@@ -172,6 +177,7 @@
struct meson_afbcd_ops meson_afbcd_gxm_ops = {
.init = meson_gxm_afbcd_init,
+ .exit = meson_gxm_afbcd_exit,
.reset = meson_gxm_afbcd_reset,
.enable = meson_gxm_afbcd_enable,
.disable = meson_gxm_afbcd_disable,
@@ -269,6 +275,18 @@
return meson_g12a_afbcd_pixel_fmt(modifier, format) >= 0;
}
+static int meson_g12a_afbcd_reset(struct meson_drm *priv)
+{
+ meson_rdma_reset(priv);
+
+ meson_rdma_writel_sync(priv, VIU_SW_RESET_G12A_AFBC_ARB |
+ VIU_SW_RESET_G12A_OSD1_AFBCD,
+ VIU_SW_RESET);
+ meson_rdma_writel_sync(priv, 0, VIU_SW_RESET);
+
+ return 0;
+}
+
static int meson_g12a_afbcd_init(struct meson_drm *priv)
{
int ret;
@@ -286,16 +304,10 @@
return 0;
}
-static int meson_g12a_afbcd_reset(struct meson_drm *priv)
+static void meson_g12a_afbcd_exit(struct meson_drm *priv)
{
- meson_rdma_reset(priv);
-
- meson_rdma_writel_sync(priv, VIU_SW_RESET_G12A_AFBC_ARB |
- VIU_SW_RESET_G12A_OSD1_AFBCD,
- VIU_SW_RESET);
- meson_rdma_writel_sync(priv, 0, VIU_SW_RESET);
-
- return 0;
+ meson_g12a_afbcd_reset(priv);
+ meson_rdma_free(priv);
}
static int meson_g12a_afbcd_enable(struct meson_drm *priv)
@@ -380,6 +392,7 @@
struct meson_afbcd_ops meson_afbcd_g12a_ops = {
.init = meson_g12a_afbcd_init,
+ .exit = meson_g12a_afbcd_exit,
.reset = meson_g12a_afbcd_reset,
.enable = meson_g12a_afbcd_enable,
.disable = meson_g12a_afbcd_disable,
diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.h b/drivers/gpu/drm/meson/meson_osd_afbcd.h
index 5e55233..e77ddeb 100644
--- a/drivers/gpu/drm/meson/meson_osd_afbcd.h
+++ b/drivers/gpu/drm/meson/meson_osd_afbcd.h
@@ -14,6 +14,7 @@
struct meson_afbcd_ops {
int (*init)(struct meson_drm *priv);
+ void (*exit)(struct meson_drm *priv);
int (*reset)(struct meson_drm *priv);
int (*enable)(struct meson_drm *priv);
int (*disable)(struct meson_drm *priv);
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 35338ed..255c6b8 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -163,7 +163,7 @@
/* Enable OSD and BLK0, set max global alpha */
priv->viu.osd1_ctrl_stat = OSD_ENABLE |
- (0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
+ (0x100 << OSD_GLOBAL_ALPHA_SHIFT) |
OSD_BLK0_ENABLE;
priv->viu.osd1_ctrl_stat2 = readl(priv->io_base +
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 259f3e6..d4b9078 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -94,7 +94,7 @@
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF11_12));
writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF20_21));
- writel((m[11] & 0x1fff) << 16,
+ writel((m[11] & 0x1fff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF22));
writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
@@ -469,17 +469,17 @@
priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE));
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
- writel_relaxed(VIU_OSD_BLEND_REORDER(0, 1) |
- VIU_OSD_BLEND_REORDER(1, 0) |
- VIU_OSD_BLEND_REORDER(2, 0) |
- VIU_OSD_BLEND_REORDER(3, 0) |
- VIU_OSD_BLEND_DIN_EN(1) |
- VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 |
- VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 |
- VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 |
- VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) |
- VIU_OSD_BLEND_HOLD_LINES(4),
- priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
+ u32 val = (u32)VIU_OSD_BLEND_REORDER(0, 1) |
+ (u32)VIU_OSD_BLEND_REORDER(1, 0) |
+ (u32)VIU_OSD_BLEND_REORDER(2, 0) |
+ (u32)VIU_OSD_BLEND_REORDER(3, 0) |
+ (u32)VIU_OSD_BLEND_DIN_EN(1) |
+ (u32)VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 |
+ (u32)VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 |
+ (u32)VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 |
+ (u32)VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) |
+ (u32)VIU_OSD_BLEND_HOLD_LINES(4);
+ writel_relaxed(val, priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
writel_relaxed(OSD_BLEND_PATH_SEL_ENABLE,
priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 509968c..2a13e29 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1243,7 +1243,10 @@
WREG_GFX(3, 0x00);
WREG_GFX(4, 0x00);
WREG_GFX(5, 0x40);
- WREG_GFX(6, 0x05);
+ /* GCTL6 should be 0x05, but we configure memmapsl to 0xb8000 (text mode),
+ * so that it doesn't hang when running kexec/kdump on G200_SE rev42.
+ */
+ WREG_GFX(6, 0x0d);
WREG_GFX(7, 0x0f);
WREG_GFX(8, 0x0f);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 9e09805..dffc133 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1222,7 +1222,7 @@
return ERR_CAST(mmu);
return msm_gem_address_space_create(mmu,
- "gpu", 0x100000000ULL, 0x1ffffffffULL);
+ "gpu", 0x100000000ULL, SZ_4G);
}
static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
@@ -1308,6 +1308,7 @@
BUG_ON(!node);
ret = a6xx_gmu_init(a6xx_gpu, node);
+ of_node_put(node);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 458b5b2..de8cc25 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -960,7 +960,8 @@
for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
release_firmware(adreno_gpu->fw[i]);
- pm_runtime_disable(&priv->gpu_pdev->dev);
+ if (pm_runtime_enabled(&priv->gpu_pdev->dev))
+ pm_runtime_disable(&priv->gpu_pdev->dev);
msm_gpu_cleanup(&adreno_gpu->base);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index f7f5c25..a0274fc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -1113,7 +1113,7 @@
}
- if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort &&
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_TMDS &&
dpu_enc->cur_master->hw_mdptop &&
dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index 6f0f545..108882b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -146,6 +146,7 @@
active_v_end = active_v_start + (p->yres * hsync_period) - 1;
display_v_start += p->hsync_pulse_width + p->h_back_porch;
+ display_v_end -= p->h_front_porch;
active_hctl = (active_h_end << 16) | active_h_start;
display_hctl = active_hctl;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 08e082d..b7841f7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -675,11 +675,11 @@
_dpu_kms_mmu_destroy(dpu_kms);
if (dpu_kms->catalog) {
- for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
- u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
-
- if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
- dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ if (dpu_kms->hw_vbif[i]) {
+ dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]);
+ dpu_kms->hw_vbif[i] = NULL;
+ }
}
}
@@ -937,7 +937,9 @@
dpu_kms_parse_data_bus_icc_path(dpu_kms);
- pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev);
+ if (rc < 0)
+ goto error;
dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
@@ -983,7 +985,7 @@
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
- dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
+ dpu_kms->hw_vbif[vbif_idx] = dpu_hw_vbif_init(vbif_idx,
dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 9b2b504..74a13cc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -34,6 +34,14 @@
{
int i;
+ for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) {
+ struct dpu_hw_dspp *hw;
+
+ if (rm->dspp_blks[i]) {
+ hw = to_dpu_hw_dspp(rm->dspp_blks[i]);
+ dpu_hw_dspp_destroy(hw);
+ }
+ }
for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) {
struct dpu_hw_pingpong *hw;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 5e8c3f3..fc86d34 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -11,6 +11,14 @@
#include "dpu_hw_vbif.h"
#include "dpu_trace.h"
+static struct dpu_hw_vbif *dpu_get_vbif(struct dpu_kms *dpu_kms, enum dpu_vbif vbif_idx)
+{
+ if (vbif_idx < ARRAY_SIZE(dpu_kms->hw_vbif))
+ return dpu_kms->hw_vbif[vbif_idx];
+
+ return NULL;
+}
+
/**
* _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
* @vbif: Pointer to hardware vbif driver
@@ -148,20 +156,15 @@
void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_ot_params *params)
{
- struct dpu_hw_vbif *vbif = NULL;
+ struct dpu_hw_vbif *vbif;
struct dpu_hw_mdp *mdp;
bool forced_on = false;
u32 ot_lim;
- int ret, i;
+ int ret;
mdp = dpu_kms->hw_mdp;
- for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
- if (dpu_kms->hw_vbif[i] &&
- dpu_kms->hw_vbif[i]->idx == params->vbif_idx)
- vbif = dpu_kms->hw_vbif[i];
- }
-
+ vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
if (!vbif || !mdp) {
DPU_DEBUG("invalid arguments vbif %d mdp %d\n",
vbif != NULL, mdp != NULL);
@@ -204,7 +207,7 @@
void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_qos_params *params)
{
- struct dpu_hw_vbif *vbif = NULL;
+ struct dpu_hw_vbif *vbif;
struct dpu_hw_mdp *mdp;
bool forced_on = false;
const struct dpu_vbif_qos_tbl *qos_tbl;
@@ -216,13 +219,7 @@
}
mdp = dpu_kms->hw_mdp;
- for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
- if (dpu_kms->hw_vbif[i] &&
- dpu_kms->hw_vbif[i]->idx == params->vbif_idx) {
- vbif = dpu_kms->hw_vbif[i];
- break;
- }
- }
+ vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
if (!vbif || !vbif->cap) {
DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 913de59..b4d0bfc 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -221,6 +221,7 @@
encoder = mdp4_lcdc_encoder_init(dev, panel_node);
if (IS_ERR(encoder)) {
DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
+ of_node_put(panel_node);
return PTR_ERR(encoder);
}
@@ -230,6 +231,7 @@
connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
if (IS_ERR(connector)) {
DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
+ of_node_put(panel_node);
return PTR_ERR(connector);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
index 7288041..7444b75 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
@@ -56,8 +56,9 @@
return ret;
}
-static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index a8fa084..ff4f207 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -608,9 +608,15 @@
if (ret)
return ret;
- mdp5_mixer_release(new_crtc_state->state, old_mixer);
+ ret = mdp5_mixer_release(new_crtc_state->state, old_mixer);
+ if (ret)
+ return ret;
+
if (old_r_mixer) {
- mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
+ ret = mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
+ if (ret)
+ return ret;
+
if (!need_right_mixer)
pipeline->r_mixer = NULL;
}
@@ -977,8 +983,10 @@
ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
&mdp5_crtc->cursor.iova);
- if (ret)
+ if (ret) {
+ drm_gem_object_put(cursor_bo);
return -EINVAL;
+ }
pm_runtime_get_sync(&pdev->dev);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index e193865..9baaaef 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -598,9 +598,9 @@
pdev = mdp5_kms->pdev;
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (irq < 0) {
- ret = irq;
- DRM_DEV_ERROR(&pdev->dev, "failed to get irq: %d\n", ret);
+ if (!irq) {
+ ret = -EINVAL;
+ DRM_DEV_ERROR(&pdev->dev, "failed to get irq\n");
goto fail;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
index 954db68..2536def 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
@@ -116,21 +116,28 @@
return 0;
}
-void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
+int mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
{
struct mdp5_global_state *global_state = mdp5_get_global_state(s);
- struct mdp5_hw_mixer_state *new_state = &global_state->hwmixer;
+ struct mdp5_hw_mixer_state *new_state;
if (!mixer)
- return;
+ return 0;
+
+ if (IS_ERR(global_state))
+ return PTR_ERR(global_state);
+
+ new_state = &global_state->hwmixer;
if (WARN_ON(!new_state->hwmixer_to_crtc[mixer->idx]))
- return;
+ return -EINVAL;
DBG("%s: release from crtc %s", mixer->name,
new_state->hwmixer_to_crtc[mixer->idx]->name);
new_state->hwmixer_to_crtc[mixer->idx] = NULL;
+
+ return 0;
}
void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
index 43c9ba4..545ee22 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
@@ -30,7 +30,7 @@
int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
uint32_t caps, struct mdp5_hw_mixer **mixer,
struct mdp5_hw_mixer **r_mixer);
-void mdp5_mixer_release(struct drm_atomic_state *s,
- struct mdp5_hw_mixer *mixer);
+int mdp5_mixer_release(struct drm_atomic_state *s,
+ struct mdp5_hw_mixer *mixer);
#endif /* __MDP5_LM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
index ba66959..e4b8a78 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
@@ -119,18 +119,24 @@
return 0;
}
-void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
+int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
{
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
- struct mdp5_global_state *state = mdp5_get_global_state(s);
- struct mdp5_hw_pipe_state *new_state = &state->hwpipe;
+ struct mdp5_global_state *state;
+ struct mdp5_hw_pipe_state *new_state;
if (!hwpipe)
- return;
+ return 0;
+
+ state = mdp5_get_global_state(s);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ new_state = &state->hwpipe;
if (WARN_ON(!new_state->hwpipe_to_plane[hwpipe->idx]))
- return;
+ return -EINVAL;
DBG("%s: release from plane %s", hwpipe->name,
new_state->hwpipe_to_plane[hwpipe->idx]->name);
@@ -141,6 +147,8 @@
}
new_state->hwpipe_to_plane[hwpipe->idx] = NULL;
+
+ return 0;
}
void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
index 9b26d07..cca6793 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
@@ -37,7 +37,7 @@
uint32_t caps, uint32_t blkcfg,
struct mdp5_hw_pipe **hwpipe,
struct mdp5_hw_pipe **r_hwpipe);
-void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
+int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
uint32_t reg_offset, uint32_t caps);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 8342309..0dc23c8 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -179,7 +179,10 @@
drm_framebuffer_put(plane->state->fb);
kfree(to_mdp5_plane_state(plane->state));
+ plane->state = NULL;
mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
+ if (!mdp5_state)
+ return;
/* assign default blend parameters */
mdp5_state->alpha = 255;
@@ -390,12 +393,24 @@
mdp5_state->r_hwpipe = NULL;
- mdp5_pipe_release(state->state, old_hwpipe);
- mdp5_pipe_release(state->state, old_right_hwpipe);
+ ret = mdp5_pipe_release(state->state, old_hwpipe);
+ if (ret)
+ return ret;
+
+ ret = mdp5_pipe_release(state->state, old_right_hwpipe);
+ if (ret)
+ return ret;
+
}
} else {
- mdp5_pipe_release(state->state, mdp5_state->hwpipe);
- mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
+ ret = mdp5_pipe_release(state->state, mdp5_state->hwpipe);
+ if (ret)
+ return ret;
+
+ ret = mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
+ if (ret)
+ return ret;
+
mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index aeca8b2..613348b 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -416,7 +416,7 @@
if (rate == link_rate_hbr3)
pixel_div = 6;
- else if (rate == 1620000 || rate == 270000)
+ else if (rate == 162000 || rate == 270000)
pixel_div = 2;
else if (rate == link_rate_hbr2)
pixel_div = 4;
@@ -572,7 +572,7 @@
dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
}
-u32 dp_catalog_hpd_get_state_status(struct dp_catalog *dp_catalog)
+u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index 6d257db..176a902 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -97,7 +97,7 @@
void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
u32 intr_mask, bool en);
void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog);
-u32 dp_catalog_hpd_get_state_status(struct dp_catalog *dp_catalog);
+u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog);
u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog);
int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog, u8 v_level,
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index c83a165..9fac55c 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1205,7 +1205,7 @@
if (ret)
return ret;
- dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN);
+ dp_ctrl_train_pattern_set(ctrl, pattern);
for (tries = 0; tries <= maximum_retries; tries++) {
drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
@@ -1460,6 +1460,30 @@
return ret;
}
+static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl)
+{
+ struct dp_io *dp_io;
+ struct phy *phy;
+ int ret;
+
+ dp_io = &ctrl->parser->io;
+ phy = dp_io->phy;
+
+ dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+
+ dp_catalog_ctrl_reset(ctrl->catalog);
+
+ ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
+ if (ret) {
+ DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
+ }
+
+ phy_power_off(phy);
+ phy_exit(phy);
+
+ return 0;
+}
+
static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
{
int ret = 0;
@@ -1640,8 +1664,7 @@
if (rc)
return rc;
- while (--link_train_max_retries &&
- !atomic_read(&ctrl->dp_ctrl.aborted)) {
+ while (--link_train_max_retries) {
rc = dp_ctrl_reinitialize_mainlink(ctrl);
if (rc) {
DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n",
@@ -1656,6 +1679,10 @@
break;
} else if (training_step == DP_TRAINING_1) {
/* link train_1 failed */
+ if (!dp_catalog_link_is_connected(ctrl->catalog)) {
+ break;
+ }
+
rc = dp_ctrl_link_rate_down_shift(ctrl);
if (rc < 0) { /* already in RBR = 1.6G */
if (cr.lane_0_1 & DP_LANE0_1_CR_DONE) {
@@ -1675,6 +1702,10 @@
}
} else if (training_step == DP_TRAINING_2) {
/* link train_2 failed, lower lane rate */
+ if (!dp_catalog_link_is_connected(ctrl->catalog)) {
+ break;
+ }
+
rc = dp_ctrl_link_lane_down_shift(ctrl);
if (rc < 0) {
/* end with failure */
@@ -1695,6 +1726,11 @@
*/
if (rc == 0) /* link train successfully */
dp_ctrl_push_idle(dp_ctrl);
+ else {
+ /* link training failed */
+ dp_ctrl_deinitialize_mainlink(ctrl);
+ rc = -ECONNRESET;
+ }
return rc;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 66f2ea3..5a152d5 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -45,7 +45,7 @@
ST_CONNECT_PENDING,
ST_CONNECTED,
ST_DISCONNECT_PENDING,
- ST_SUSPEND_PENDING,
+ ST_DISPLAY_OFF,
ST_SUSPENDED,
};
@@ -102,6 +102,8 @@
struct dp_display_mode dp_mode;
struct msm_dp dp_display;
+ bool encoder_mode_set;
+
/* wait for audio signaling */
struct completion audio_comp;
@@ -111,6 +113,7 @@
u32 hpd_state;
u32 event_pndx;
u32 event_gndx;
+ struct task_struct *ev_tsk;
struct dp_event event_list[DP_EVENT_Q_MAX];
spinlock_t event_lock;
@@ -194,6 +197,8 @@
complete_all(&dp->audio_comp);
}
+static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv);
+
static int dp_display_bind(struct device *dev, struct device *master,
void *data)
{
@@ -234,9 +239,18 @@
}
rc = dp_register_audio_driver(dev, dp->audio);
- if (rc)
+ if (rc) {
DRM_ERROR("Audio registration Dp failed\n");
+ goto end;
+ }
+ rc = dp_hpd_event_thread_start(dp);
+ if (rc) {
+ DRM_ERROR("Event thread create failed\n");
+ goto end;
+ }
+
+ return 0;
end:
return rc;
}
@@ -255,6 +269,12 @@
return;
}
+ /* disable all HPD interrupts */
+ if (dp->core_initialized)
+ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
+
+ kthread_stop(dp->ev_tsk);
+
dp_power_client_deinit(dp->power);
dp_aux_unregister(dp->aux);
priv->dp = NULL;
@@ -288,13 +308,24 @@
drm_helper_hpd_irq_event(connector->dev);
}
-static int dp_display_send_hpd_notification(struct dp_display_private *dp,
- bool hpd)
+
+static void dp_display_set_encoder_mode(struct dp_display_private *dp)
{
- static bool encoder_mode_set;
struct msm_drm_private *priv = dp->dp_display.drm_dev->dev_private;
struct msm_kms *kms = priv->kms;
+ if (!dp->encoder_mode_set && dp->dp_display.encoder &&
+ kms->funcs->set_encoder_mode) {
+ kms->funcs->set_encoder_mode(kms,
+ dp->dp_display.encoder, false);
+
+ dp->encoder_mode_set = true;
+ }
+}
+
+static int dp_display_send_hpd_notification(struct dp_display_private *dp,
+ bool hpd)
+{
if ((hpd && dp->dp_display.is_connected) ||
(!hpd && !dp->dp_display.is_connected)) {
DRM_DEBUG_DP("HPD already %s\n", (hpd ? "on" : "off"));
@@ -307,15 +338,6 @@
dp->dp_display.is_connected = hpd;
- if (dp->dp_display.is_connected && dp->dp_display.encoder
- && !encoder_mode_set
- && kms->funcs->set_encoder_mode) {
- kms->funcs->set_encoder_mode(kms,
- dp->dp_display.encoder, false);
- DRM_DEBUG_DP("set_encoder_mode() Completed\n");
- encoder_mode_set = true;
- }
-
dp_display_send_hpd_event(&dp->dp_display);
return 0;
@@ -351,7 +373,6 @@
dp_add_event(dp, EV_USER_NOTIFICATION, true, 0);
-
end:
return rc;
}
@@ -368,6 +389,8 @@
if (dp->usbpd->orientation == ORIENTATION_CC2)
flip = true;
+ dp_display_set_encoder_mode(dp);
+
dp_power_init(dp->power, flip);
dp_ctrl_host_init(dp->ctrl, flip);
dp_aux_init(dp->aux);
@@ -451,25 +474,42 @@
}
}
+static int dp_display_handle_port_ststus_changed(struct dp_display_private *dp)
+{
+ int rc = 0;
+
+ if (dp_display_is_sink_count_zero(dp)) {
+ DRM_DEBUG_DP("sink count is zero, nothing to do\n");
+ if (dp->hpd_state != ST_DISCONNECTED) {
+ dp->hpd_state = ST_DISCONNECT_PENDING;
+ dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
+ }
+ } else {
+ if (dp->hpd_state == ST_DISCONNECTED) {
+ dp->hpd_state = ST_CONNECT_PENDING;
+ rc = dp_display_process_hpd_high(dp);
+ if (rc)
+ dp->hpd_state = ST_DISCONNECTED;
+ }
+ }
+
+ return rc;
+}
+
static int dp_display_handle_irq_hpd(struct dp_display_private *dp)
{
- u32 sink_request;
+ u32 sink_request = dp->link->sink_request;
- sink_request = dp->link->sink_request;
-
- if (sink_request & DS_PORT_STATUS_CHANGED) {
- dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
- if (dp_display_is_sink_count_zero(dp)) {
- DRM_DEBUG_DP("sink count is zero, nothing to do\n");
- return 0;
+ if (dp->hpd_state == ST_DISCONNECTED) {
+ if (sink_request & DP_LINK_STATUS_UPDATED) {
+ DRM_ERROR("Disconnected, no DP_LINK_STATUS_UPDATED\n");
+ return -EINVAL;
}
-
- return dp_display_process_hpd_high(dp);
}
dp_ctrl_handle_sink_request(dp->ctrl);
- if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN)
+ if (sink_request & DP_TEST_LINK_VIDEO_PATTERN)
dp_display_handle_video_request(dp);
return 0;
@@ -478,7 +518,9 @@
static int dp_display_usbpd_attention_cb(struct device *dev)
{
int rc = 0;
+ u32 sink_request;
struct dp_display_private *dp;
+ struct dp_usbpd *hpd;
if (!dev) {
DRM_ERROR("invalid dev\n");
@@ -492,10 +534,17 @@
return -ENODEV;
}
+ hpd = dp->usbpd;
+
/* check for any test request issued by sink */
rc = dp_link_process_request(dp->link);
- if (!rc)
- dp_display_handle_irq_hpd(dp);
+ if (!rc) {
+ sink_request = dp->link->sink_request;
+ if (sink_request & DS_PORT_STATUS_CHANGED)
+ rc = dp_display_handle_port_ststus_changed(dp);
+ else
+ rc = dp_display_handle_irq_hpd(dp);
+ }
return rc;
}
@@ -513,7 +562,7 @@
mutex_lock(&dp->event_mutex);
state = dp->hpd_state;
- if (state == ST_SUSPEND_PENDING) {
+ if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
mutex_unlock(&dp->event_mutex);
return 0;
}
@@ -535,13 +584,18 @@
hpd->hpd_high = 1;
ret = dp_display_usbpd_configure_cb(&dp->pdev->dev);
- if (ret) { /* failed */
+ if (ret) { /* link train failed */
hpd->hpd_high = 0;
dp->hpd_state = ST_DISCONNECTED;
- }
- /* start sanity checking */
- dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout);
+ if (ret == -ECONNRESET) { /* cable unplugged */
+ dp->core_initialized = false;
+ }
+
+ } else {
+ /* start sentinel checking in case of missing uevent */
+ dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout);
+ }
mutex_unlock(&dp->event_mutex);
@@ -594,11 +648,6 @@
mutex_lock(&dp->event_mutex);
state = dp->hpd_state;
- if (state == ST_SUSPEND_PENDING) {
- mutex_unlock(&dp->event_mutex);
- return 0;
- }
-
if (state == ST_DISCONNECT_PENDING || state == ST_DISCONNECTED) {
mutex_unlock(&dp->event_mutex);
return 0;
@@ -625,7 +674,7 @@
*/
dp_display_usbpd_disconnect_cb(&dp->pdev->dev);
- /* start sanity checking */
+ /* start sentinel checking in case of missing uevent */
dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
/* signal the disconnect event early to ensure proper teardown */
@@ -659,17 +708,21 @@
static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
{
u32 state;
+ int ret;
mutex_lock(&dp->event_mutex);
/* irq_hpd can happen at either connected or disconnected state */
state = dp->hpd_state;
- if (state == ST_SUSPEND_PENDING) {
+ if (state == ST_DISPLAY_OFF) {
mutex_unlock(&dp->event_mutex);
return 0;
}
- dp_display_usbpd_attention_cb(&dp->pdev->dev);
+ ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
+ if (ret == -ECONNRESET) { /* cable unplugged */
+ dp->core_initialized = false;
+ }
mutex_unlock(&dp->event_mutex);
@@ -814,6 +867,11 @@
dp_display = g_dp_display;
+ if (dp_display->power_on) {
+ DRM_DEBUG_DP("Link already setup, return\n");
+ return 0;
+ }
+
rc = dp_ctrl_on_stream(dp->ctrl);
if (!rc)
dp_display->power_on = true;
@@ -846,6 +904,9 @@
dp_display = g_dp_display;
+ if (!dp_display->power_on)
+ return 0;
+
/* wait only if audio was enabled */
if (dp_display->audio_enabled) {
/* signal the disconnect event */
@@ -984,12 +1045,17 @@
while (1) {
if (timeout_mode) {
wait_event_timeout(dp_priv->event_q,
- (dp_priv->event_pndx == dp_priv->event_gndx),
- EVENT_TIMEOUT);
+ (dp_priv->event_pndx == dp_priv->event_gndx) ||
+ kthread_should_stop(), EVENT_TIMEOUT);
} else {
wait_event_interruptible(dp_priv->event_q,
- (dp_priv->event_pndx != dp_priv->event_gndx));
+ (dp_priv->event_pndx != dp_priv->event_gndx) ||
+ kthread_should_stop());
}
+
+ if (kthread_should_stop())
+ break;
+
spin_lock_irqsave(&dp_priv->event_lock, flag);
todo = &dp_priv->event_list[dp_priv->event_gndx];
if (todo->delay) {
@@ -1062,12 +1128,17 @@
return 0;
}
-static void dp_hpd_event_setup(struct dp_display_private *dp_priv)
+static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv)
{
- init_waitqueue_head(&dp_priv->event_q);
- spin_lock_init(&dp_priv->event_lock);
+ /* set event q to empty */
+ dp_priv->event_gndx = 0;
+ dp_priv->event_pndx = 0;
- kthread_run(hpd_event_thread, dp_priv, "dp_hpd_handler");
+ dp_priv->ev_tsk = kthread_run(hpd_event_thread, dp_priv, "dp_hpd_handler");
+ if (IS_ERR(dp_priv->ev_tsk))
+ return PTR_ERR(dp_priv->ev_tsk);
+
+ return 0;
}
static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
@@ -1091,7 +1162,7 @@
}
if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) {
- /* delete connect pending event first */
+ /* stop sentinel connect pending checking */
dp_del_event(dp, EV_CONNECT_PENDING_TIMEOUT);
dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0);
}
@@ -1125,13 +1196,12 @@
dp = container_of(dp_display, struct dp_display_private, dp_display);
dp->irq = irq_of_parse_and_map(dp->pdev->dev.of_node, 0);
- if (dp->irq < 0) {
- rc = dp->irq;
- DRM_ERROR("failed to get irq: %d\n", rc);
- return rc;
+ if (!dp->irq) {
+ DRM_ERROR("failed to get irq\n");
+ return -EINVAL;
}
- rc = devm_request_irq(&dp->pdev->dev, dp->irq,
+ rc = devm_request_irq(dp_display->drm_dev->dev, dp->irq,
dp_display_irq_handler,
IRQF_TRIGGER_HIGH, "dp_display_isr", dp);
if (rc < 0) {
@@ -1167,8 +1237,11 @@
return -EPROBE_DEFER;
}
+ /* setup event q */
mutex_init(&dp->event_mutex);
g_dp_display = &dp->dp_display;
+ init_waitqueue_head(&dp->event_q);
+ spin_lock_init(&dp->event_lock);
/* Store DP audio handle inside DP display */
g_dp_display->dp_audio = dp->audio;
@@ -1220,15 +1293,12 @@
dp_catalog_ctrl_hpd_config(dp->catalog);
- status = dp_catalog_hpd_get_state_status(dp->catalog);
+ status = dp_catalog_link_is_connected(dp->catalog);
- if (status) {
+ if (status)
dp->dp_display.is_connected = true;
- } else {
+ else
dp->dp_display.is_connected = false;
- /* make sure next resume host_init be called */
- dp->core_initialized = false;
- }
mutex_unlock(&dp->event_mutex);
@@ -1250,6 +1320,9 @@
dp->hpd_state = ST_SUSPENDED;
+ /* host_init will be called at pm_resume */
+ dp->core_initialized = false;
+
mutex_unlock(&dp->event_mutex);
return 0;
@@ -1308,8 +1381,6 @@
dp = container_of(dp_display, struct dp_display_private, dp_display);
- dp_hpd_event_setup(dp);
-
dp_add_event(dp, EV_HPD_INIT_SETUP, 0, 100);
}
@@ -1336,6 +1407,7 @@
struct drm_encoder *encoder)
{
struct msm_drm_private *priv;
+ struct dp_display_private *dp_priv;
int ret;
if (WARN_ON(!encoder) || WARN_ON(!dp_display) || WARN_ON(!dev))
@@ -1344,6 +1416,8 @@
priv = dev->dev_private;
dp_display->drm_dev = dev;
+ dp_priv = container_of(dp_display, struct dp_display_private, dp_display);
+
ret = dp_display_request_irq(dp_display);
if (ret) {
DRM_ERROR("request_irq failed, ret=%d\n", ret);
@@ -1361,6 +1435,8 @@
return ret;
}
+ dp_priv->panel->connector = dp_display->connector;
+
priv->connectors[priv->num_connectors++] = dp_display->connector;
return 0;
}
@@ -1379,6 +1455,7 @@
mutex_lock(&dp_display->event_mutex);
+ /* stop sentinel checking */
dp_del_event(dp_display, EV_CONNECT_PENDING_TIMEOUT);
rc = dp_display_set_mode(dp, &dp_display->dp_mode);
@@ -1397,7 +1474,7 @@
state = dp_display->hpd_state;
- if (state == ST_SUSPEND_PENDING)
+ if (state == ST_DISPLAY_OFF)
dp_display_host_init(dp_display);
dp_display_enable(dp_display, 0);
@@ -1409,7 +1486,8 @@
dp_display_unprepare(dp);
}
- if (state == ST_SUSPEND_PENDING)
+ /* manual kick off plug event to train link */
+ if (state == ST_DISPLAY_OFF)
dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0);
/* completed connection */
@@ -1441,6 +1519,7 @@
mutex_lock(&dp_display->event_mutex);
+ /* stop sentinel checking */
dp_del_event(dp_display, EV_DISCONNECT_PENDING_TIMEOUT);
dp_display_disable(dp_display, 0);
@@ -1454,7 +1533,7 @@
/* completed disconnection */
dp_display->hpd_state = ST_DISCONNECTED;
} else {
- dp_display->hpd_state = ST_SUSPEND_PENDING;
+ dp_display->hpd_state = ST_DISPLAY_OFF;
}
mutex_unlock(&dp_display->event_mutex);
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 2768d1d..4e8a191 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -196,6 +196,11 @@
&panel->aux->ddc);
if (!dp_panel->edid) {
DRM_ERROR("panel edid read failed\n");
+ /* check edid read fail is due to unplug */
+ if (!dp_catalog_link_is_connected(panel->catalog)) {
+ rc = -ETIMEDOUT;
+ goto end;
+ }
/* fail safe edid */
mutex_lock(&connector->dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index f845333..7377596 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -205,6 +205,12 @@
return -EINVAL;
priv = dev->dev_private;
+
+ if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
+ DRM_DEV_ERROR(dev->dev, "too many bridges\n");
+ return -ENOSPC;
+ }
+
msm_dsi->dev = dev;
ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index d255bea..73f066e 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -117,7 +117,7 @@
static const struct msm_dsi_config msm8996_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
- .num = 2,
+ .num = 3,
.regs = {
{"vdda", 18160, 1 }, /* 1.25 V */
{"vcca", 17000, 32 }, /* 0.925 V */
@@ -156,7 +156,7 @@
static const struct msm_dsi_config sdm660_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
- .num = 2,
+ .num = 1,
.regs = {
{"vdda", 12560, 4 }, /* 1.2 V */
},
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 64454a6..51e8318 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1371,10 +1371,10 @@
dsi_get_bpp(msm_host->format) / 8;
len = dsi_cmd_dma_add(msm_host, msg);
- if (!len) {
+ if (len < 0) {
pr_err("%s: failed to add cmd type = 0x%x\n",
__func__, msg->type);
- return -EINVAL;
+ return len;
}
/* for video mode, do not send cmds more than
@@ -1393,10 +1393,14 @@
}
ret = dsi_cmd_dma_tx(msm_host, len);
- if (ret < len) {
- pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
- __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
- return -ECOMM;
+ if (ret < 0) {
+ pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d, ret=%d\n",
+ __func__, msg->type, (*(u8 *)(msg->tx_buf)), len, ret);
+ return ret;
+ } else if (ret < len) {
+ pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, ret=%d len=%d\n",
+ __func__, msg->type, (*(u8 *)(msg->tx_buf)), ret, len);
+ return -EIO;
}
return len;
@@ -2139,9 +2143,12 @@
}
ret = dsi_cmds2buf_tx(msm_host, msg);
- if (ret < msg->tx_len) {
+ if (ret < 0) {
pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
return ret;
+ } else if (ret < msg->tx_len) {
+ pr_err("%s: Read cmd Tx failed, too short: %d\n", __func__, ret);
+ return -ECOMM;
}
/*
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 1d28dfb..fb421ca 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -644,7 +644,7 @@
return connector;
fail:
- connector->funcs->destroy(msm_dsi->connector);
+ connector->funcs->destroy(connector);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index e07986a..2e0be85 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -345,7 +345,7 @@
} else {
timing->shared_timings.clk_pre =
linear_inter(tmax, tmin, pcnt2, 0, false);
- timing->shared_timings.clk_pre_inc_by_2 = 0;
+ timing->shared_timings.clk_pre_inc_by_2 = 0;
}
timing->ta_go = 3;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 94f948e..bd65dc9 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -142,6 +142,10 @@
/* HDCP needs physical address of hdmi register */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
config->mmio_name);
+ if (!res) {
+ ret = -EINVAL;
+ goto fail;
+ }
hdmi->mmio_phy_addr = res->start;
hdmi->qfprom_mmio = msm_ioremap(pdev,
@@ -289,6 +293,11 @@
struct platform_device *pdev = hdmi->pdev;
int ret;
+ if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
+ DRM_DEV_ERROR(dev->dev, "too many bridges\n");
+ return -ENOSPC;
+ }
+
hdmi->dev = dev;
hdmi->encoder = encoder;
@@ -311,14 +320,14 @@
}
hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (hdmi->irq < 0) {
- ret = hdmi->irq;
- DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
+ if (!hdmi->irq) {
+ ret = -EINVAL;
+ DRM_DEV_ERROR(dev->dev, "failed to get irq\n");
goto fail;
}
- ret = devm_request_irq(&pdev->dev, hdmi->irq,
- msm_hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ ret = devm_request_irq(dev->dev, hdmi->irq,
+ msm_hdmi_irq, IRQF_TRIGGER_HIGH,
"hdmi_isr", hdmi);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index e37e5af..087efcb 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -10,6 +10,7 @@
#include <linux/uaccess.h>
#include <uapi/linux/sched/types.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 819567e..9c05bf6 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -849,6 +849,7 @@
get_pid_task(aspace->pid, PIDTYPE_PID);
if (task) {
comm = kstrdup(task->comm, GFP_KERNEL);
+ put_task_struct(task);
} else {
comm = NULL;
}
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 515ef80..8c64ce7 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -17,7 +17,7 @@
int npages = obj->size >> PAGE_SHIFT;
if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
- return NULL;
+ return ERR_PTR(-ENOMEM);
return drm_prime_pages_to_sg(obj->dev, msm_obj->pages, npages);
}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 22ac7c6..ecab628 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -58,7 +58,7 @@
u64 addr = iova;
unsigned int i;
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ for_each_sgtable_sg(sgt, sg, i) {
size_t size = sg->length;
phys_addr_t phys = sg_phys(sg);
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index fea30e7..084b6ae 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -191,6 +191,9 @@
file->private_data = rd;
rd->open = true;
+ /* Reset fifo to clear any previously unread data: */
+ rd->fifo.head = rd->fifo.tail = 0;
+
/* the parsing tools need to know gpu-id to know which
* register database to load.
*/
diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
index 3d82b3c..93f8f4f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -160,14 +160,14 @@
static inline struct drm_encoder *
nv50_head_atom_get_encoder(struct nv50_head_atom *atom)
{
- struct drm_encoder *encoder = NULL;
+ struct drm_encoder *encoder;
/* We only ever have a single encoder */
drm_for_each_encoder_mask(encoder, atom->state.crtc->dev,
atom->state.encoder_mask)
- break;
+ return encoder;
- return encoder;
+ return NULL;
}
#define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.c b/drivers/gpu/drm/nouveau/dispnv50/crc.c
index 66f32d9..5624a71 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc.c
@@ -411,9 +411,18 @@
struct nv50_head_atom *armh = nv50_head_atom(old_crtc_state);
struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
struct nv50_outp_atom *outp_atom;
- struct nouveau_encoder *outp =
- nv50_real_outp(nv50_head_atom_get_encoder(armh));
- struct drm_encoder *encoder = &outp->base.base;
+ struct nouveau_encoder *outp;
+ struct drm_encoder *encoder, *enc;
+
+ enc = nv50_head_atom_get_encoder(armh);
+ if (!enc)
+ continue;
+
+ outp = nv50_real_outp(enc);
+ if (!outp)
+ continue;
+
+ encoder = &outp->base.base;
if (!asyh->clr.crc)
continue;
@@ -464,8 +473,16 @@
struct drm_device *dev = crtc->dev;
struct nv50_crc *crc = &head->crc;
const struct nv50_crc_func *func = nv50_disp(dev)->core->func->crc;
- struct nouveau_encoder *outp =
- nv50_real_outp(nv50_head_atom_get_encoder(asyh));
+ struct nouveau_encoder *outp;
+ struct drm_encoder *encoder;
+
+ encoder = nv50_head_atom_get_encoder(asyh);
+ if (!encoder)
+ return;
+
+ outp = nv50_real_outp(encoder);
+ if (!outp)
+ return;
func->set_src(head, outp->or,
nv50_crc_source_type(outp, asyh->crc.src),
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index c7a94c9..f2f3280 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -51,8 +51,9 @@
nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE],
struct nouveau_backlight *bl)
{
- const int nb = ida_simple_get(&bl_ida, 0, 0, GFP_KERNEL);
- if (nb < 0 || nb >= 100)
+ const int nb = ida_alloc_max(&bl_ida, 99, GFP_KERNEL);
+
+ if (nb < 0)
return false;
if (nb > 0)
snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight%d", nb);
@@ -280,7 +281,7 @@
nv_encoder, ops, &props);
if (IS_ERR(bl->dev)) {
if (bl->id >= 0)
- ida_simple_remove(&bl_ida, bl->id);
+ ida_free(&bl_ida, bl->id);
ret = PTR_ERR(bl->dev);
goto fail_alloc;
}
@@ -306,7 +307,7 @@
return;
if (bl->id >= 0)
- ida_simple_remove(&bl_ida, bl->id);
+ ida_free(&bl_ida, bl->id);
backlight_device_unregister(bl->dev);
nv_conn->backlight = NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index b4946b5..b57dcad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -279,8 +279,10 @@
break;
}
- if (WARN_ON(pi < 0))
+ if (WARN_ON(pi < 0)) {
+ kfree(nvbo);
return ERR_PTR(-EINVAL);
+ }
/* Disable compression if suitable settings couldn't be found. */
if (nvbo->comp && !vmm->page[pi].comp) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 4c992fd..9542fc6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -500,7 +500,8 @@
connector->interlace_allowed =
nv_encoder->caps.dp_interlace;
else
- connector->interlace_allowed = true;
+ connector->interlace_allowed =
+ drm->client.device.info.family < NV_DEVICE_INFO_V0_VOLTA;
connector->doublescan_allowed = true;
} else
if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index f2ad6f4..0012875 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -521,7 +521,7 @@
pm_runtime_mark_last_busy(drm->dev->dev);
noop:
- pm_runtime_put_sync(drm->dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
}
#ifdef CONFIG_ACPI
@@ -543,7 +543,7 @@
* it's own hotplug events.
*/
pm_runtime_put_autosuspend(drm->dev->dev);
- } else if (ret == 0) {
+ } else if (ret == 0 || ret == -EINPROGRESS) {
/* We've started resuming the GPU already, so
* it will handle scheduling a full reprobe
* itself
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 92987da..5e72e6c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -679,7 +679,11 @@
goto out_free_dma;
for (i = 0; i < npages; i += max) {
- args.end = start + (max << PAGE_SHIFT);
+ if (args.start + (max << PAGE_SHIFT) > end)
+ args.end = end;
+ else
+ args.end = args.start + (max << PAGE_SHIFT);
+
ret = migrate_vma_setup(&args);
if (ret)
goto out_free_pfns;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 24ec533..a3c8649 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -464,7 +464,7 @@
if (state == FBINFO_STATE_RUNNING) {
nouveau_fbcon_hotplug_resume(drm->fbcon);
pm_runtime_mark_last_busy(drm->dev->dev);
- pm_runtime_put_sync(drm->dev->dev);
+ pm_runtime_put_autosuspend(drm->dev->dev);
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 5f5b87f..f08bda5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -89,7 +89,6 @@
ret = nouveau_bo_init(nvbo, size, align, NOUVEAU_GEM_DOMAIN_GART,
sg, robj);
if (ret) {
- nouveau_bo_ref(NULL, &nvbo);
obj = ERR_PTR(ret);
goto unlock;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index d0d52c1..950a3de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -123,7 +123,7 @@
mutex_init(&tdev->iommu.mutex);
- if (iommu_present(&platform_bus_type)) {
+ if (device_iommu_mapped(dev)) {
tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
if (!tdev->iommu.domain)
goto error;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
index 667fa01..a6ea89a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
@@ -142,11 +142,12 @@
hsfw->imem_size = desc->code_size;
hsfw->imem_tag = desc->start_tag;
- hsfw->imem = kmalloc(desc->code_size, GFP_KERNEL);
- memcpy(hsfw->imem, data + desc->code_off, desc->code_size);
-
+ hsfw->imem = kmemdup(data + desc->code_off, desc->code_size, GFP_KERNEL);
nvkm_firmware_put(fw);
- return 0;
+ if (!hsfw->imem)
+ return -ENOMEM;
+ else
+ return 0;
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index 8bff14a..f0368d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -33,7 +33,7 @@
{
u32 p = *addr;
- if (*addr > bios->image0_size && bios->imaged_addr) {
+ if (*addr >= bios->image0_size && bios->imaged_addr) {
*addr -= bios->image0_size;
*addr += bios->imaged_addr;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index dc184e8..5214fd0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -135,10 +135,10 @@
list_for_each_entry_from_reverse(cstate, &pstate->list, head) {
if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
- break;
+ return cstate;
}
- return cstate;
+ return NULL;
}
static struct nvkm_cstate *
@@ -169,6 +169,8 @@
if (!list_empty(&pstate->list)) {
cstate = nvkm_cstate_get(clk, pstate, cstatei);
cstate = nvkm_cstate_find_best(clk, pstate, cstate);
+ if (!cstate)
+ return -EINVAL;
} else {
cstate = &pstate->base;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
index 7938722..d82529b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
@@ -216,6 +216,7 @@
.intr = gt215_pmu_intr,
.recv = gm20b_pmu_recv,
.initmsg = gm20b_pmu_initmsg,
+ .reset = gf100_pmu_reset,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
index 3dfb3e8..9f32982 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
@@ -23,7 +23,7 @@
*/
#include "priv.h"
-static void
+void
gp102_pmu_reset(struct nvkm_pmu *pmu)
{
struct nvkm_device *device = pmu->subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
index 7f5f9d5..0bd4b32 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
@@ -83,6 +83,7 @@
.intr = gt215_pmu_intr,
.recv = gm20b_pmu_recv,
.initmsg = gm20b_pmu_initmsg,
+ .reset = gp102_pmu_reset,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index b945ec3..80c4cb8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -41,6 +41,7 @@
bool gf100_pmu_enabled(struct nvkm_pmu *);
void gf100_pmu_reset(struct nvkm_pmu *);
+void gp102_pmu_reset(struct nvkm_pmu *pmu);
void gk110_pmu_pgob(struct nvkm_pmu *, bool);
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 6ccbc29..d5b3123 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1173,6 +1173,7 @@
default:
break;
}
+ of_node_put(port);
}
}
@@ -1205,11 +1206,13 @@
default:
break;
}
+ of_node_put(port);
}
return 0;
error:
+ of_node_put(port);
__dss_uninit_ports(dss, i);
return r;
}
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index bbdd086..4b92c63 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -229,7 +229,7 @@
ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
if (ret)
- dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret);
+ dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret);
}
static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
@@ -265,7 +265,7 @@
return 0;
}
-static int rpi_touchscreen_enable(struct drm_panel *panel)
+static int rpi_touchscreen_prepare(struct drm_panel *panel)
{
struct rpi_touchscreen *ts = panel_to_ts(panel);
int i;
@@ -295,6 +295,13 @@
rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
msleep(100);
+ return 0;
+}
+
+static int rpi_touchscreen_enable(struct drm_panel *panel)
+{
+ struct rpi_touchscreen *ts = panel_to_ts(panel);
+
/* Turn on the backlight. */
rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
@@ -349,7 +356,7 @@
static const struct drm_panel_funcs rpi_touchscreen_funcs = {
.disable = rpi_touchscreen_disable,
.unprepare = rpi_touchscreen_noop,
- .prepare = rpi_touchscreen_noop,
+ .prepare = rpi_touchscreen_prepare,
.enable = rpi_touchscreen_enable,
.get_modes = rpi_touchscreen_get_modes,
};
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 959dcbd..1a87cc4 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -676,7 +676,7 @@
static const struct panel_desc ampire_am_1280800n3tzqw_t00h = {
.modes = &ire_am_1280800n3tzqw_t00h_mode,
.num_modes = 1,
- .bpc = 6,
+ .bpc = 8,
.size = {
.width = 217,
.height = 136,
@@ -2144,6 +2144,7 @@
.unprepare = 800,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
@@ -2200,7 +2201,7 @@
.enable = 200,
.disable = 20,
},
- .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
@@ -2654,6 +2655,7 @@
static const struct panel_desc logictechno_lt161010_2nh = {
.timings = &logictechno_lt161010_2nh_timing,
.num_timings = 1,
+ .bpc = 6,
.size = {
.width = 154,
.height = 86,
@@ -2683,6 +2685,7 @@
static const struct panel_desc logictechno_lt170410_2whc = {
.timings = &logictechno_lt170410_2whc_timing,
.num_timings = 1,
+ .bpc = 8,
.size = {
.width = 217,
.height = 136,
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index a702618..1dfc457 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -427,8 +427,8 @@
if (args->retained) {
if (args->madv == PANFROST_MADV_DONTNEED)
- list_add_tail(&bo->base.madv_list,
- &pfdev->shrinker_list);
+ list_move_tail(&bo->base.madv_list,
+ &pfdev->shrinker_list);
else if (args->madv == PANFROST_MADV_WILLNEED)
list_del_init(&bo->base.madv_list);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 2aae636..107ad2d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -359,8 +359,11 @@
panfrost_gpu_init_features(pfdev);
- dma_set_mask_and_coherent(pfdev->dev,
+ err = dma_set_mask_and_coherent(pfdev->dev,
DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features)));
+ if (err)
+ return err;
+
dma_set_max_seg_size(pfdev->dev, UINT_MAX);
irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu");
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 7fc45b1..1359696 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -491,7 +491,7 @@
err_pages:
drm_gem_shmem_put_pages(&bo->base);
err_bo:
- drm_gem_object_put(&bo->base.base);
+ panfrost_gem_mapping_put(bomapping);
return ret;
}
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 46b0d1c..d5e8e3a 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -324,7 +324,7 @@
return ret;
}
-static int pl111_amba_remove(struct amba_device *amba_dev)
+static void pl111_amba_remove(struct amba_device *amba_dev)
{
struct device *dev = &amba_dev->dev;
struct drm_device *drm = amba_get_drvdata(amba_dev);
@@ -335,8 +335,6 @@
drm_panel_bridge_remove(priv->bridge);
drm_dev_put(drm);
of_reserved_mem_device_release(dev);
-
- return 0;
}
/*
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index bdd883f..963a5d5 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -402,6 +402,7 @@
if (of_device_is_compatible(child, "arm,pl111")) {
has_coretile_clcd = true;
ct_clcd = child;
+ of_node_put(child);
break;
}
if (of_device_is_compatible(child, "arm,hdlcd")) {
diff --git a/drivers/gpu/drm/radeon/.gitignore b/drivers/gpu/drm/radeon/.gitignore
index 9c1a941..d877738 100644
--- a/drivers/gpu/drm/radeon/.gitignore
+++ b/drivers/gpu/drm/radeon/.gitignore
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: MIT
mkregtable
*_reg_safe.h
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 6f60f48..52819e7 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: MIT
config DRM_RADEON_USERPTR
bool "Always enable userptr support"
depends on DRM_RADEON
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 11c97ed..3d502f1 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: MIT
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 59cdadc..a521874 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -2740,10 +2740,10 @@
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
}
j++;
- if (j > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
break;
case MC_SEQ_RESERVE_M >> 2:
+ if (j >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
temp_reg = RREG32(MC_PMG_CMD_MRS1);
table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
@@ -2752,8 +2752,6 @@
(temp_reg & 0xffff0000) |
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
j++;
- if (j > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
break;
default:
break;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index e308344..ef111d4 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -473,6 +473,8 @@
native_mode->vdisplay != 0 &&
native_mode->clock != 0) {
mode = drm_mode_duplicate(dev, native_mode);
+ if (!mode)
+ return NULL;
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
@@ -487,6 +489,8 @@
* simpler.
*/
mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
+ if (!mode)
+ return NULL;
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 266e3cb..8287410 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1623,6 +1623,9 @@
if (r) {
/* delay GPU reset to resume */
radeon_fence_driver_force_completion(rdev, i);
+ } else {
+ /* finish executing delayed work */
+ flush_delayed_work(&rdev->fence_drv[i].lockup_work);
}
}
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index ade2327..5125816 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -398,7 +398,15 @@
if (IS_ERR(dp->adp))
return PTR_ERR(dp->adp);
- return component_add(dev, &rockchip_dp_component_ops);
+ ret = component_add(dev, &rockchip_dp_component_ops);
+ if (ret)
+ goto err_dp_remove;
+
+ return 0;
+
+err_dp_remove:
+ analogix_dp_remove(dp->adp);
+ return ret;
}
static int rockchip_dp_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index dec54c7..857c47c 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -276,8 +276,9 @@
return ret;
}
-static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+cdn_dp_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct cdn_dp_device *dp = connector_to_dp(connector);
struct drm_display_info *display_info = &dp->connector.display_info;
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index b0fb3c3..c51be1c 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -1286,5 +1286,11 @@
.of_match_table = dw_mipi_dsi_rockchip_dt_ids,
.pm = &dw_mipi_dsi_rockchip_pm_ops,
.name = "dw-mipi-dsi-rockchip",
+ /*
+ * For dual-DSI display, one DSI pokes at the other DSI's
+ * drvdata in dw_mipi_dsi_rockchip_find_second(). This is not
+ * safe for asynchronous probe.
+ */
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
},
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 0f23144..af98bfc 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1530,6 +1530,9 @@
{
struct rockchip_crtc_state *rockchip_state;
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
if (!rockchip_state)
return NULL;
@@ -2097,10 +2100,10 @@
vop_win_init(vop);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vop->len = resource_size(res);
vop->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(vop->regs))
return PTR_ERR(vop->regs);
+ vop->len = resource_size(res);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 62488ac..089c00a 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -527,8 +527,8 @@
struct drm_device *ddev = crtc->dev;
struct drm_connector_list_iter iter;
struct drm_connector *connector = NULL;
- struct drm_encoder *encoder = NULL;
- struct drm_bridge *bridge = NULL;
+ struct drm_encoder *encoder = NULL, *en_iter;
+ struct drm_bridge *bridge = NULL, *br_iter;
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
struct videomode vm;
u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
@@ -538,15 +538,19 @@
int ret;
/* get encoder from crtc */
- drm_for_each_encoder(encoder, ddev)
- if (encoder->crtc == crtc)
+ drm_for_each_encoder(en_iter, ddev)
+ if (en_iter->crtc == crtc) {
+ encoder = en_iter;
break;
+ }
if (encoder) {
/* get bridge from encoder */
- list_for_each_entry(bridge, &encoder->bridge_chain, chain_node)
- if (bridge->encoder == encoder)
+ list_for_each_entry(br_iter, &encoder->bridge_chain, chain_node)
+ if (br_iter->encoder == encoder) {
+ bridge = br_iter;
break;
+ }
/* Get the connector from encoder */
drm_connector_list_iter_begin(ddev, &iter);
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 29861fc..c5912fd 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -71,7 +71,6 @@
goto free_drm;
}
- dev_set_drvdata(dev, drm);
drm->dev_private = drv;
INIT_LIST_HEAD(&drv->frontend_list);
INIT_LIST_HEAD(&drv->engine_list);
@@ -112,6 +111,8 @@
drm_fbdev_generic_setup(drm, 32);
+ dev_set_drvdata(dev, drm);
+
return 0;
finish_poll:
@@ -128,6 +129,7 @@
{
struct drm_device *drm = dev_get_drvdata(dev);
+ dev_set_drvdata(dev, NULL);
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 4f5efca..51edb42 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -531,7 +531,7 @@
struct drm_display_mode *mode)
{
struct mipi_dsi_device *device = dsi->device;
- unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
+ int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
u16 hbp = 0, hfp = 0, hsa = 0, hblk = 0, vblk = 0;
u32 basic_ctl = 0;
size_t bytes;
@@ -555,7 +555,7 @@
* (4 bytes). Its minimal size is therefore 10 bytes
*/
#define HSA_PACKET_OVERHEAD 10
- hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
+ hsa = max(HSA_PACKET_OVERHEAD,
(mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
/*
@@ -564,7 +564,7 @@
* therefore 6 bytes
*/
#define HBP_PACKET_OVERHEAD 6
- hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
+ hbp = max(HBP_PACKET_OVERHEAD,
(mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD);
/*
@@ -574,7 +574,7 @@
* 16 bytes
*/
#define HFP_PACKET_OVERHEAD 16
- hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
+ hfp = max(HFP_PACKET_OVERHEAD,
(mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD);
/*
@@ -583,7 +583,7 @@
* bytes). Its minimal size is therefore 10 bytes.
*/
#define HBLK_PACKET_OVERHEAD 10
- hblk = max((unsigned int)HBLK_PACKET_OVERHEAD,
+ hblk = max(HBLK_PACKET_OVERHEAD,
(mode->htotal - (mode->hsync_end - mode->hsync_start)) * Bpp -
HBLK_PACKET_OVERHEAD);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 2c6ebc3..318692a 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1042,6 +1042,10 @@
struct host1x *host1x = dev_get_drvdata(dev->dev.parent);
struct iommu_domain *domain;
+ /* Our IOMMU usage policy doesn't currently play well with GART */
+ if (of_machine_is_compatible("nvidia,tegra20"))
+ return false;
+
/*
* If the Tegra DRM clients are backed by an IOMMU, push buffers are
* likely to be allocated beyond the 32-bit boundary if sufficient
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index f46d377..de1333d 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1538,8 +1538,10 @@
dsi->slave = platform_get_drvdata(gangster);
of_node_put(np);
- if (!dsi->slave)
+ if (!dsi->slave) {
+ put_device(&gangster->dev);
return -EPROBE_DEFER;
+ }
dsi->slave->master = dsi;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index b177525..8ece0dd 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -60,11 +60,13 @@
int tilcdc_add_component_encoder(struct drm_device *ddev)
{
struct tilcdc_drm_private *priv = ddev->dev_private;
- struct drm_encoder *encoder;
+ struct drm_encoder *encoder = NULL, *iter;
- list_for_each_entry(encoder, &ddev->mode_config.encoder_list, head)
- if (encoder->possible_crtcs & (1 << priv->crtc->index))
+ list_for_each_entry(iter, &ddev->mode_config.encoder_list, head)
+ if (iter->possible_crtcs & (1 << priv->crtc->index)) {
+ encoder = iter;
break;
+ }
if (!encoder) {
dev_err(ddev->dev, "%s: No suitable encoder found\n", __func__);
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index c0bc2a1..9d0c127 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -175,6 +175,7 @@
static const struct spi_device_id st7735r_id[] = {
{ "jd-t18003-t01", (uintptr_t)&jd_t18003_t01_cfg },
+ { "rh128128t", (uintptr_t)&rh128128t_cfg },
{ },
};
MODULE_DEVICE_TABLE(spi, st7735r_id);
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index edcfd8c..209b5ce 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -400,9 +400,6 @@
udl_handle_damage(fb, 0, 0, fb->width, fb->height);
- if (!crtc_state->mode_changed)
- return;
-
/* enable display */
udl_crtc_write_mode_to_hw(crtc);
}
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index f4ccca9..79724fd 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -319,7 +319,8 @@
u32 pixel_rep = (mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1;
bool is_dsi = (vc4_encoder->type == VC4_ENCODER_TYPE_DSI0 ||
vc4_encoder->type == VC4_ENCODER_TYPE_DSI1);
- u32 format = is_dsi ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24;
+ bool is_dsi1 = vc4_encoder->type == VC4_ENCODER_TYPE_DSI1;
+ u32 format = is_dsi1 ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24;
u8 ppc = pv_data->pixels_per_clock;
bool debug_dump_regs = false;
@@ -345,7 +346,8 @@
PV_HORZB_HACTIVE));
CRTC_WRITE(PV_VERTA,
- VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end,
+ VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end +
+ interlace,
PV_VERTA_VBP) |
VC4_SET_FIELD(mode->crtc_vsync_end - mode->crtc_vsync_start,
PV_VERTA_VSYNC));
@@ -357,7 +359,7 @@
if (interlace) {
CRTC_WRITE(PV_VERTA_EVEN,
VC4_SET_FIELD(mode->crtc_vtotal -
- mode->crtc_vsync_end - 1,
+ mode->crtc_vsync_end,
PV_VERTA_VBP) |
VC4_SET_FIELD(mode->crtc_vsync_end -
mode->crtc_vsync_start,
@@ -377,7 +379,7 @@
PV_VCONTROL_CONTINUOUS |
(is_dsi ? PV_VCONTROL_DSI : 0) |
PV_VCONTROL_INTERLACE |
- VC4_SET_FIELD(mode->htotal * pixel_rep / 2,
+ VC4_SET_FIELD(mode->htotal * pixel_rep / (2 * ppc),
PV_VCONTROL_ODD_DELAY));
CRTC_WRITE(PV_VSYNCD_EVEN, 0);
} else {
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 839610f..888aec1 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -246,6 +246,15 @@
}
}
+static const struct of_device_id vc4_dma_range_matches[] = {
+ { .compatible = "brcm,bcm2711-hvs" },
+ { .compatible = "brcm,bcm2835-hvs" },
+ { .compatible = "brcm,bcm2835-v3d" },
+ { .compatible = "brcm,cygnus-v3d" },
+ { .compatible = "brcm,vc4-v3d" },
+ {}
+};
+
static int vc4_drm_bind(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -263,6 +272,16 @@
vc4_drm_driver.driver_features &= ~DRIVER_RENDER;
of_node_put(node);
+ node = of_find_matching_node_and_match(NULL, vc4_dma_range_matches,
+ NULL);
+ if (node) {
+ ret = of_dma_configure(dev, node, true);
+ of_node_put(node);
+
+ if (ret)
+ return ret;
+ }
+
vc4 = devm_drm_dev_alloc(dev, &vc4_drm_driver, struct vc4_dev, base);
if (IS_ERR(vc4))
return PTR_ERR(vc4);
@@ -385,7 +404,12 @@
if (ret)
return ret;
- return platform_driver_register(&vc4_platform_driver);
+ ret = platform_driver_register(&vc4_platform_driver);
+ if (ret)
+ platform_unregister_drivers(component_drivers,
+ ARRAY_SIZE(component_drivers));
+
+ return ret;
}
static void __exit vc4_drm_unregister(void)
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 9809c3a..9214636 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -77,7 +77,6 @@
struct vc4_hvs *hvs;
struct vc4_v3d *v3d;
struct vc4_dpi *dpi;
- struct vc4_dsi *dsi1;
struct vc4_vec *vec;
struct vc4_txp *txp;
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index eaf2769..0bda40c 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -181,8 +181,50 @@
#define DSI0_TXPKT_PIX_FIFO 0x20 /* AKA PIX_FIFO */
-#define DSI0_INT_STAT 0x24
-#define DSI0_INT_EN 0x28
+#define DSI0_INT_STAT 0x24
+#define DSI0_INT_EN 0x28
+# define DSI0_INT_FIFO_ERR BIT(25)
+# define DSI0_INT_CMDC_DONE_MASK VC4_MASK(24, 23)
+# define DSI0_INT_CMDC_DONE_SHIFT 23
+# define DSI0_INT_CMDC_DONE_NO_REPEAT 1
+# define DSI0_INT_CMDC_DONE_REPEAT 3
+# define DSI0_INT_PHY_DIR_RTF BIT(22)
+# define DSI0_INT_PHY_D1_ULPS BIT(21)
+# define DSI0_INT_PHY_D1_STOP BIT(20)
+# define DSI0_INT_PHY_RXLPDT BIT(19)
+# define DSI0_INT_PHY_RXTRIG BIT(18)
+# define DSI0_INT_PHY_D0_ULPS BIT(17)
+# define DSI0_INT_PHY_D0_LPDT BIT(16)
+# define DSI0_INT_PHY_D0_FTR BIT(15)
+# define DSI0_INT_PHY_D0_STOP BIT(14)
+/* Signaled when the clock lane enters the given state. */
+# define DSI0_INT_PHY_CLK_ULPS BIT(13)
+# define DSI0_INT_PHY_CLK_HS BIT(12)
+# define DSI0_INT_PHY_CLK_FTR BIT(11)
+/* Signaled on timeouts */
+# define DSI0_INT_PR_TO BIT(10)
+# define DSI0_INT_TA_TO BIT(9)
+# define DSI0_INT_LPRX_TO BIT(8)
+# define DSI0_INT_HSTX_TO BIT(7)
+/* Contention on a line when trying to drive the line low */
+# define DSI0_INT_ERR_CONT_LP1 BIT(6)
+# define DSI0_INT_ERR_CONT_LP0 BIT(5)
+/* Control error: incorrect line state sequence on data lane 0. */
+# define DSI0_INT_ERR_CONTROL BIT(4)
+# define DSI0_INT_ERR_SYNC_ESC BIT(3)
+# define DSI0_INT_RX2_PKT BIT(2)
+# define DSI0_INT_RX1_PKT BIT(1)
+# define DSI0_INT_CMD_PKT BIT(0)
+
+#define DSI0_INTERRUPTS_ALWAYS_ENABLED (DSI0_INT_ERR_SYNC_ESC | \
+ DSI0_INT_ERR_CONTROL | \
+ DSI0_INT_ERR_CONT_LP0 | \
+ DSI0_INT_ERR_CONT_LP1 | \
+ DSI0_INT_HSTX_TO | \
+ DSI0_INT_LPRX_TO | \
+ DSI0_INT_TA_TO | \
+ DSI0_INT_PR_TO)
+
# define DSI1_INT_PHY_D3_ULPS BIT(30)
# define DSI1_INT_PHY_D3_STOP BIT(29)
# define DSI1_INT_PHY_D2_ULPS BIT(28)
@@ -493,6 +535,18 @@
*/
#define DSI1_ID 0x8c
+struct vc4_dsi_variant {
+ /* Whether we're on bcm2835's DSI0 or DSI1. */
+ unsigned int port;
+
+ bool broken_axi_workaround;
+
+ const char *debugfs_name;
+ const struct debugfs_reg32 *regs;
+ size_t nregs;
+
+};
+
/* General DSI hardware state. */
struct vc4_dsi {
struct platform_device *pdev;
@@ -509,8 +563,7 @@
u32 *reg_dma_mem;
dma_addr_t reg_paddr;
- /* Whether we're on bcm2835's DSI0 or DSI1. */
- int port;
+ const struct vc4_dsi_variant *variant;
/* DSI channel for the panel we're connected to. */
u32 channel;
@@ -586,10 +639,10 @@
#define DSI_READ(offset) readl(dsi->regs + (offset))
#define DSI_WRITE(offset, val) dsi_dma_workaround_write(dsi, offset, val)
#define DSI_PORT_READ(offset) \
- DSI_READ(dsi->port ? DSI1_##offset : DSI0_##offset)
+ DSI_READ(dsi->variant->port ? DSI1_##offset : DSI0_##offset)
#define DSI_PORT_WRITE(offset, val) \
- DSI_WRITE(dsi->port ? DSI1_##offset : DSI0_##offset, val)
-#define DSI_PORT_BIT(bit) (dsi->port ? DSI1_##bit : DSI0_##bit)
+ DSI_WRITE(dsi->variant->port ? DSI1_##offset : DSI0_##offset, val)
+#define DSI_PORT_BIT(bit) (dsi->variant->port ? DSI1_##bit : DSI0_##bit)
/* VC4 DSI encoder KMS struct */
struct vc4_dsi_encoder {
@@ -750,6 +803,9 @@
list_for_each_entry_reverse(iter, &dsi->bridge_chain, chain_node) {
if (iter->funcs->disable)
iter->funcs->disable(iter);
+
+ if (iter == dsi->bridge)
+ break;
}
vc4_dsi_ulps(dsi, true);
@@ -794,11 +850,9 @@
/* Find what divider gets us a faster clock than the requested
* pixel clock.
*/
- for (divider = 1; divider < 8; divider++) {
- if (parent_rate / divider < pll_clock) {
- divider--;
+ for (divider = 1; divider < 255; divider++) {
+ if (parent_rate / (divider + 1) < pll_clock)
break;
- }
}
/* Now that we've picked a PLL divider, calculate back to its
@@ -835,9 +889,9 @@
unsigned long phy_clock;
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret) {
- DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->port);
+ DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port);
return;
}
@@ -871,7 +925,7 @@
DSI_PORT_WRITE(STAT, DSI_PORT_READ(STAT));
/* Set AFE CTR00/CTR1 to release powerdown of analog. */
- if (dsi->port == 0) {
+ if (dsi->variant->port == 0) {
u32 afec0 = (VC4_SET_FIELD(7, DSI_PHY_AFEC0_PTATADJ) |
VC4_SET_FIELD(7, DSI_PHY_AFEC0_CTATADJ));
@@ -883,6 +937,9 @@
DSI_PORT_WRITE(PHY_AFEC0, afec0);
+ /* AFEC reset hold time */
+ mdelay(1);
+
DSI_PORT_WRITE(PHY_AFEC1,
VC4_SET_FIELD(6, DSI0_PHY_AFEC1_IDR_DLANE1) |
VC4_SET_FIELD(6, DSI0_PHY_AFEC1_IDR_DLANE0) |
@@ -1017,7 +1074,7 @@
DSI_PORT_BIT(PHYC_CLANE_ENABLE) |
((dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) ?
0 : DSI_PORT_BIT(PHYC_HS_CLK_CONTINUOUS)) |
- (dsi->port == 0 ?
+ (dsi->variant->port == 0 ?
VC4_SET_FIELD(lpx - 1, DSI0_PHYC_ESC_CLK_LPDT) :
VC4_SET_FIELD(lpx - 1, DSI1_PHYC_ESC_CLK_LPDT)));
@@ -1043,18 +1100,15 @@
DSI_DISP1_ENABLE);
/* Ungate the block. */
- if (dsi->port == 0)
+ if (dsi->variant->port == 0)
DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI0_CTRL_CTRL0);
else
DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI1_CTRL_EN);
/* Bring AFE out of reset. */
- if (dsi->port == 0) {
- } else {
- DSI_PORT_WRITE(PHY_AFEC0,
- DSI_PORT_READ(PHY_AFEC0) &
- ~DSI1_PHY_AFEC0_RESET);
- }
+ DSI_PORT_WRITE(PHY_AFEC0,
+ DSI_PORT_READ(PHY_AFEC0) &
+ ~DSI_PORT_BIT(PHY_AFEC0_RESET));
vc4_dsi_ulps(dsi, false);
@@ -1173,13 +1227,28 @@
/* Enable the appropriate interrupt for the transfer completion. */
dsi->xfer_result = 0;
reinit_completion(&dsi->xfer_completion);
- DSI_PORT_WRITE(INT_STAT, DSI1_INT_TXPKT1_DONE | DSI1_INT_PHY_DIR_RTF);
- if (msg->rx_len) {
- DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED |
- DSI1_INT_PHY_DIR_RTF));
+ if (dsi->variant->port == 0) {
+ DSI_PORT_WRITE(INT_STAT,
+ DSI0_INT_CMDC_DONE_MASK | DSI1_INT_PHY_DIR_RTF);
+ if (msg->rx_len) {
+ DSI_PORT_WRITE(INT_EN, (DSI0_INTERRUPTS_ALWAYS_ENABLED |
+ DSI0_INT_PHY_DIR_RTF));
+ } else {
+ DSI_PORT_WRITE(INT_EN,
+ (DSI0_INTERRUPTS_ALWAYS_ENABLED |
+ VC4_SET_FIELD(DSI0_INT_CMDC_DONE_NO_REPEAT,
+ DSI0_INT_CMDC_DONE)));
+ }
} else {
- DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED |
- DSI1_INT_TXPKT1_DONE));
+ DSI_PORT_WRITE(INT_STAT,
+ DSI1_INT_TXPKT1_DONE | DSI1_INT_PHY_DIR_RTF);
+ if (msg->rx_len) {
+ DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED |
+ DSI1_INT_PHY_DIR_RTF));
+ } else {
+ DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED |
+ DSI1_INT_TXPKT1_DONE));
+ }
}
/* Send the packet. */
@@ -1196,7 +1265,7 @@
ret = dsi->xfer_result;
}
- DSI_PORT_WRITE(INT_EN, DSI1_INTERRUPTS_ALWAYS_ENABLED);
+ DSI_PORT_WRITE(INT_EN, DSI_PORT_BIT(INTERRUPTS_ALWAYS_ENABLED));
if (ret)
goto reset_fifo_and_return;
@@ -1242,7 +1311,7 @@
DSI_PORT_BIT(CTRL_RESET_FIFOS));
DSI_PORT_WRITE(TXPKT1C, 0);
- DSI_PORT_WRITE(INT_EN, DSI1_INTERRUPTS_ALWAYS_ENABLED);
+ DSI_PORT_WRITE(INT_EN, DSI_PORT_BIT(INTERRUPTS_ALWAYS_ENABLED));
return ret;
}
@@ -1305,8 +1374,16 @@
.mode_fixup = vc4_dsi_encoder_mode_fixup,
};
+static const struct vc4_dsi_variant bcm2835_dsi1_variant = {
+ .port = 1,
+ .broken_axi_workaround = true,
+ .debugfs_name = "dsi1_regs",
+ .regs = dsi1_regs,
+ .nregs = ARRAY_SIZE(dsi1_regs),
+};
+
static const struct of_device_id vc4_dsi_dt_match[] = {
- { .compatible = "brcm,bcm2835-dsi1", (void *)(uintptr_t)1 },
+ { .compatible = "brcm,bcm2835-dsi1", &bcm2835_dsi1_variant },
{}
};
@@ -1317,7 +1394,7 @@
if (!(stat & bit))
return;
- DRM_ERROR("DSI%d: %s error\n", dsi->port, type);
+ DRM_ERROR("DSI%d: %s error\n", dsi->variant->port, type);
*ret = IRQ_HANDLED;
}
@@ -1351,26 +1428,28 @@
DSI_PORT_WRITE(INT_STAT, stat);
dsi_handle_error(dsi, &ret, stat,
- DSI1_INT_ERR_SYNC_ESC, "LPDT sync");
+ DSI_PORT_BIT(INT_ERR_SYNC_ESC), "LPDT sync");
dsi_handle_error(dsi, &ret, stat,
- DSI1_INT_ERR_CONTROL, "data lane 0 sequence");
+ DSI_PORT_BIT(INT_ERR_CONTROL), "data lane 0 sequence");
dsi_handle_error(dsi, &ret, stat,
- DSI1_INT_ERR_CONT_LP0, "LP0 contention");
+ DSI_PORT_BIT(INT_ERR_CONT_LP0), "LP0 contention");
dsi_handle_error(dsi, &ret, stat,
- DSI1_INT_ERR_CONT_LP1, "LP1 contention");
+ DSI_PORT_BIT(INT_ERR_CONT_LP1), "LP1 contention");
dsi_handle_error(dsi, &ret, stat,
- DSI1_INT_HSTX_TO, "HSTX timeout");
+ DSI_PORT_BIT(INT_HSTX_TO), "HSTX timeout");
dsi_handle_error(dsi, &ret, stat,
- DSI1_INT_LPRX_TO, "LPRX timeout");
+ DSI_PORT_BIT(INT_LPRX_TO), "LPRX timeout");
dsi_handle_error(dsi, &ret, stat,
- DSI1_INT_TA_TO, "turnaround timeout");
+ DSI_PORT_BIT(INT_TA_TO), "turnaround timeout");
dsi_handle_error(dsi, &ret, stat,
- DSI1_INT_PR_TO, "peripheral reset timeout");
+ DSI_PORT_BIT(INT_PR_TO), "peripheral reset timeout");
- if (stat & (DSI1_INT_TXPKT1_DONE | DSI1_INT_PHY_DIR_RTF)) {
+ if (stat & ((dsi->variant->port ? DSI1_INT_TXPKT1_DONE :
+ DSI0_INT_CMDC_DONE_MASK) |
+ DSI_PORT_BIT(INT_PHY_DIR_RTF))) {
complete(&dsi->xfer_completion);
ret = IRQ_HANDLED;
- } else if (stat & DSI1_INT_HSTX_TO) {
+ } else if (stat & DSI_PORT_BIT(INT_HSTX_TO)) {
complete(&dsi->xfer_completion);
dsi->xfer_result = -ETIMEDOUT;
ret = IRQ_HANDLED;
@@ -1390,12 +1469,12 @@
struct device *dev = &dsi->pdev->dev;
const char *parent_name = __clk_get_name(dsi->pll_phy_clock);
static const struct {
- const char *dsi0_name, *dsi1_name;
+ const char *name;
int div;
} phy_clocks[] = {
- { "dsi0_byte", "dsi1_byte", 8 },
- { "dsi0_ddr2", "dsi1_ddr2", 4 },
- { "dsi0_ddr", "dsi1_ddr", 2 },
+ { "byte", 8 },
+ { "ddr2", 4 },
+ { "ddr", 2 },
};
int i;
@@ -1411,8 +1490,12 @@
for (i = 0; i < ARRAY_SIZE(phy_clocks); i++) {
struct clk_fixed_factor *fix = &dsi->phy_clocks[i];
struct clk_init_data init;
+ char clk_name[16];
int ret;
+ snprintf(clk_name, sizeof(clk_name),
+ "dsi%u_%s", dsi->variant->port, phy_clocks[i].name);
+
/* We just use core fixed factor clock ops for the PHY
* clocks. The clocks are actually gated by the
* PHY_AFEC0_DDRCLK_EN bits, which we should be
@@ -1429,10 +1512,7 @@
memset(&init, 0, sizeof(init));
init.parent_names = &parent_name;
init.num_parents = 1;
- if (dsi->port == 1)
- init.name = phy_clocks[i].dsi1_name;
- else
- init.name = phy_clocks[i].dsi0_name;
+ init.name = clk_name;
init.ops = &clk_fixed_factor_ops;
ret = devm_clk_hw_register(dev, &fix->hw);
@@ -1451,7 +1531,6 @@
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
struct vc4_dsi_encoder *vc4_dsi_encoder;
struct drm_panel *panel;
@@ -1463,7 +1542,7 @@
if (!match)
return -ENODEV;
- dsi->port = (uintptr_t)match->data;
+ dsi->variant = match->data;
vc4_dsi_encoder = devm_kzalloc(dev, sizeof(*vc4_dsi_encoder),
GFP_KERNEL);
@@ -1471,7 +1550,8 @@
return -ENOMEM;
INIT_LIST_HEAD(&dsi->bridge_chain);
- vc4_dsi_encoder->base.type = VC4_ENCODER_TYPE_DSI1;
+ vc4_dsi_encoder->base.type = dsi->variant->port ?
+ VC4_ENCODER_TYPE_DSI1 : VC4_ENCODER_TYPE_DSI0;
vc4_dsi_encoder->dsi = dsi;
dsi->encoder = &vc4_dsi_encoder->base.base;
@@ -1480,13 +1560,8 @@
return PTR_ERR(dsi->regs);
dsi->regset.base = dsi->regs;
- if (dsi->port == 0) {
- dsi->regset.regs = dsi0_regs;
- dsi->regset.nregs = ARRAY_SIZE(dsi0_regs);
- } else {
- dsi->regset.regs = dsi1_regs;
- dsi->regset.nregs = ARRAY_SIZE(dsi1_regs);
- }
+ dsi->regset.regs = dsi->variant->regs;
+ dsi->regset.nregs = dsi->variant->nregs;
if (DSI_PORT_READ(ID) != DSI_ID_VALUE) {
dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n",
@@ -1498,7 +1573,7 @@
* from the ARM. It does handle writes from the DMA engine,
* so set up a channel for talking to it.
*/
- if (dsi->port == 1) {
+ if (dsi->variant->broken_axi_workaround) {
dsi->reg_dma_mem = dma_alloc_coherent(dev, 4,
&dsi->reg_dma_paddr,
GFP_KERNEL);
@@ -1604,9 +1679,6 @@
if (ret)
return ret;
- if (dsi->port == 1)
- vc4->dsi1 = dsi;
-
drm_simple_encoder_init(drm, dsi->encoder, DRM_MODE_ENCODER_DSI);
drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
@@ -1622,10 +1694,7 @@
*/
list_splice_init(&dsi->encoder->bridge_chain, &dsi->bridge_chain);
- if (dsi->port == 0)
- vc4_debugfs_add_regset32(drm, "dsi0_regs", &dsi->regset);
- else
- vc4_debugfs_add_regset32(drm, "dsi1_regs", &dsi->regset);
+ vc4_debugfs_add_regset32(drm, dsi->variant->debugfs_name, &dsi->regset);
pm_runtime_enable(dev);
@@ -1635,8 +1704,6 @@
static void vc4_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
- struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
if (dsi->bridge)
@@ -1648,9 +1715,6 @@
*/
list_splice_init(&dsi->bridge_chain, &dsi->encoder->bridge_chain);
drm_encoder_cleanup(dsi->encoder);
-
- if (dsi->port == 1)
- vc4->dsi1 = NULL;
}
static const struct component_ops vc4_dsi_ops = {
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index a308f2d..08175c3 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -83,6 +83,8 @@
#define CEC_CLOCK_FREQ 40000
#define VC4_HSM_MID_CLOCK 149985000
+#define HDMI_14_MAX_TMDS_CLK (340 * 1000 * 1000)
+
static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
@@ -209,7 +211,9 @@
static void vc4_hdmi_connector_reset(struct drm_connector *connector)
{
drm_atomic_helper_connector_reset(connector);
- drm_atomic_helper_connector_tv_reset(connector);
+
+ if (connector->state)
+ drm_atomic_helper_connector_tv_reset(connector);
}
static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
@@ -518,12 +522,12 @@
VC4_HDMI_VERTA_VFP) |
VC4_SET_FIELD(mode->crtc_vdisplay, VC4_HDMI_VERTA_VAL));
u32 vertb = (VC4_SET_FIELD(0, VC4_HDMI_VERTB_VSPO) |
- VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end,
+ VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end +
+ interlaced,
VC4_HDMI_VERTB_VBP));
u32 vertb_even = (VC4_SET_FIELD(0, VC4_HDMI_VERTB_VSPO) |
VC4_SET_FIELD(mode->crtc_vtotal -
- mode->crtc_vsync_end -
- interlaced,
+ mode->crtc_vsync_end,
VC4_HDMI_VERTB_VBP));
HDMI_WRITE(HDMI_HORZA,
@@ -561,13 +565,13 @@
VC4_SET_FIELD(mode->crtc_vsync_start - mode->crtc_vdisplay,
VC5_HDMI_VERTA_VFP) |
VC4_SET_FIELD(mode->crtc_vdisplay, VC5_HDMI_VERTA_VAL));
- u32 vertb = (VC4_SET_FIELD(0, VC5_HDMI_VERTB_VSPO) |
+ u32 vertb = (VC4_SET_FIELD(mode->htotal >> (2 - pixel_rep),
+ VC5_HDMI_VERTB_VSPO) |
VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end,
VC4_HDMI_VERTB_VBP));
u32 vertb_even = (VC4_SET_FIELD(0, VC5_HDMI_VERTB_VSPO) |
VC4_SET_FIELD(mode->crtc_vtotal -
- mode->crtc_vsync_end -
- interlaced,
+ mode->crtc_vsync_end - interlaced,
VC4_HDMI_VERTB_VBP));
HDMI_WRITE(HDMI_VEC_INTERFACE_XBAR, 0x354021);
@@ -1031,22 +1035,12 @@
audio_packet_config |= VC4_SET_FIELD(channel_mask,
VC4_HDMI_AUDIO_PACKET_CEA_MASK);
- /* Set the MAI threshold. This logic mimics the firmware's. */
- if (vc4_hdmi->audio.samplerate > 96000) {
- HDMI_WRITE(HDMI_MAI_THR,
- VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQHIGH) |
- VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQLOW));
- } else if (vc4_hdmi->audio.samplerate > 48000) {
- HDMI_WRITE(HDMI_MAI_THR,
- VC4_SET_FIELD(0x14, VC4_HD_MAI_THR_DREQHIGH) |
- VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQLOW));
- } else {
- HDMI_WRITE(HDMI_MAI_THR,
- VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICHIGH) |
- VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICLOW) |
- VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQHIGH) |
- VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQLOW));
- }
+ /* Set the MAI threshold */
+ HDMI_WRITE(HDMI_MAI_THR,
+ VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_PANICHIGH) |
+ VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_PANICLOW) |
+ VC4_SET_FIELD(0x06, VC4_HD_MAI_THR_DREQHIGH) |
+ VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_DREQLOW));
HDMI_WRITE(HDMI_MAI_CONFIG,
VC4_HDMI_MAI_CONFIG_BIT_REVERSE |
@@ -1231,12 +1225,12 @@
struct snd_soc_card *card = &vc4_hdmi->audio.card;
struct device *dev = &vc4_hdmi->pdev->dev;
const __be32 *addr;
- int index;
+ int index, len;
int ret;
- if (!of_find_property(dev->of_node, "dmas", NULL)) {
+ if (!of_find_property(dev->of_node, "dmas", &len) || !len) {
dev_warn(dev,
- "'dmas' DT property is missing, no HDMI audio\n");
+ "'dmas' DT property is missing or empty, no HDMI audio\n");
return 0;
}
@@ -1947,7 +1941,7 @@
.encoder_type = VC4_ENCODER_TYPE_HDMI0,
.debugfs_name = "hdmi0_regs",
.card_name = "vc4-hdmi-0",
- .max_pixel_clock = 297000000,
+ .max_pixel_clock = HDMI_14_MAX_TMDS_CLK,
.registers = vc5_hdmi_hdmi0_fields,
.num_registers = ARRAY_SIZE(vc5_hdmi_hdmi0_fields),
.phy_lane_mapping = {
@@ -1973,7 +1967,7 @@
.encoder_type = VC4_ENCODER_TYPE_HDMI1,
.debugfs_name = "hdmi1_regs",
.card_name = "vc4-hdmi-1",
- .max_pixel_clock = 297000000,
+ .max_pixel_clock = HDMI_14_MAX_TMDS_CLK,
.registers = vc5_hdmi_hdmi1_fields,
.num_registers = ARRAY_SIZE(vc5_hdmi_hdmi1_fields),
.phy_lane_mapping = {
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index ad69157..95fa6fc 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -564,6 +564,7 @@
struct vc4_hvs *hvs = NULL;
int ret;
u32 dispctrl;
+ u32 reg;
hvs = devm_kzalloc(&pdev->dev, sizeof(*hvs), GFP_KERNEL);
if (!hvs)
@@ -635,6 +636,26 @@
vc4->hvs = hvs;
+ reg = HVS_READ(SCALER_DISPECTRL);
+ reg &= ~SCALER_DISPECTRL_DSP2_MUX_MASK;
+ HVS_WRITE(SCALER_DISPECTRL,
+ reg | VC4_SET_FIELD(0, SCALER_DISPECTRL_DSP2_MUX));
+
+ reg = HVS_READ(SCALER_DISPCTRL);
+ reg &= ~SCALER_DISPCTRL_DSP3_MUX_MASK;
+ HVS_WRITE(SCALER_DISPCTRL,
+ reg | VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX));
+
+ reg = HVS_READ(SCALER_DISPEOLN);
+ reg &= ~SCALER_DISPEOLN_DSP4_MUX_MASK;
+ HVS_WRITE(SCALER_DISPEOLN,
+ reg | VC4_SET_FIELD(3, SCALER_DISPEOLN_DSP4_MUX));
+
+ reg = HVS_READ(SCALER_DISPDITHER);
+ reg &= ~SCALER_DISPDITHER_DSP5_MUX_MASK;
+ HVS_WRITE(SCALER_DISPDITHER,
+ reg | VC4_SET_FIELD(3, SCALER_DISPDITHER_DSP5_MUX));
+
dispctrl = HVS_READ(SCALER_DISPCTRL);
dispctrl |= SCALER_DISPCTRL_ENABLE;
@@ -642,10 +663,6 @@
SCALER_DISPCTRL_DISPEIRQ(1) |
SCALER_DISPCTRL_DISPEIRQ(2);
- /* Set DSP3 (PV1) to use HVS channel 2, which would otherwise
- * be unused.
- */
- dispctrl &= ~SCALER_DISPCTRL_DSP3_MUX_MASK;
dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
SCALER_DISPCTRL_SLVWREIRQ |
SCALER_DISPCTRL_SLVRDEIRQ |
@@ -659,7 +676,6 @@
SCALER_DISPCTRL_DSPEISLUR(1) |
SCALER_DISPCTRL_DSPEISLUR(2) |
SCALER_DISPCTRL_SCLEIRQ);
- dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index af4b894..4df222a 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -303,16 +303,16 @@
adjhdisplay,
crtc_state->mode.hdisplay);
vc4_pstate->crtc_x += left;
- if (vc4_pstate->crtc_x > crtc_state->mode.hdisplay - left)
- vc4_pstate->crtc_x = crtc_state->mode.hdisplay - left;
+ if (vc4_pstate->crtc_x > crtc_state->mode.hdisplay - right)
+ vc4_pstate->crtc_x = crtc_state->mode.hdisplay - right;
adjvdisplay = crtc_state->mode.vdisplay - (top + bottom);
vc4_pstate->crtc_y = DIV_ROUND_CLOSEST(vc4_pstate->crtc_y *
adjvdisplay,
crtc_state->mode.vdisplay);
vc4_pstate->crtc_y += top;
- if (vc4_pstate->crtc_y > crtc_state->mode.vdisplay - top)
- vc4_pstate->crtc_y = crtc_state->mode.vdisplay - top;
+ if (vc4_pstate->crtc_y > crtc_state->mode.vdisplay - bottom)
+ vc4_pstate->crtc_y = crtc_state->mode.vdisplay - bottom;
vc4_pstate->crtc_w = DIV_ROUND_CLOSEST(vc4_pstate->crtc_w *
adjhdisplay,
@@ -332,7 +332,6 @@
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
- u32 subpixel_src_mask = (1 << 16) - 1;
int num_planes = fb->format->num_planes;
struct drm_crtc_state *crtc_state;
u32 h_subsample = fb->format->hsub;
@@ -354,18 +353,15 @@
for (i = 0; i < num_planes; i++)
vc4_state->offsets[i] = bo->paddr + fb->offsets[i];
- /* We don't support subpixel source positioning for scaling. */
- if ((state->src.x1 & subpixel_src_mask) ||
- (state->src.x2 & subpixel_src_mask) ||
- (state->src.y1 & subpixel_src_mask) ||
- (state->src.y2 & subpixel_src_mask)) {
- return -EINVAL;
- }
-
- vc4_state->src_x = state->src.x1 >> 16;
- vc4_state->src_y = state->src.y1 >> 16;
- vc4_state->src_w[0] = (state->src.x2 - state->src.x1) >> 16;
- vc4_state->src_h[0] = (state->src.y2 - state->src.y1) >> 16;
+ /*
+ * We don't support subpixel source positioning for scaling,
+ * but fractional coordinates can be generated by clipping
+ * so just round for now
+ */
+ vc4_state->src_x = DIV_ROUND_CLOSEST(state->src.x1, 1 << 16);
+ vc4_state->src_y = DIV_ROUND_CLOSEST(state->src.y1, 1 << 16);
+ vc4_state->src_w[0] = DIV_ROUND_CLOSEST(state->src.x2, 1 << 16) - vc4_state->src_x;
+ vc4_state->src_h[0] = DIV_ROUND_CLOSEST(state->src.y2, 1 << 16) - vc4_state->src_y;
vc4_state->crtc_x = state->dst.x1;
vc4_state->crtc_y = state->dst.y1;
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index d13502a..f8fa09d 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -295,12 +295,18 @@
if (WARN_ON(i == ARRAY_SIZE(drm_fmts)))
return;
- ctrl = TXP_GO | TXP_VSTART_AT_EOF | TXP_EI |
+ ctrl = TXP_GO | TXP_EI |
VC4_SET_FIELD(0xf, TXP_BYTE_ENABLE) |
VC4_SET_FIELD(txp_fmts[i], TXP_FORMAT);
if (fb->format->has_alpha)
ctrl |= TXP_ALPHA_ENABLE;
+ else
+ /*
+ * If TXP_ALPHA_ENABLE isn't set and TXP_ALPHA_INVERT is, the
+ * hardware will force the output padding to be 0xff.
+ */
+ ctrl |= TXP_ALPHA_INVERT;
gem = drm_fb_cma_get_gem_obj(fb, 0);
TXP_WRITE(TXP_DST_PTR, gem->paddr + fb->offsets[0]);
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index bd5b8eb..c6bd168 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -257,7 +257,7 @@
static const struct drm_display_mode ntsc_mode = {
DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 13500,
720, 720 + 14, 720 + 14 + 64, 720 + 14 + 64 + 60, 0,
- 480, 480 + 3, 480 + 3 + 3, 480 + 3 + 3 + 16, 0,
+ 480, 480 + 7, 480 + 7 + 6, 525, 0,
DRM_MODE_FLAG_INTERLACE)
};
@@ -279,7 +279,7 @@
static const struct drm_display_mode pal_mode = {
DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 13500,
720, 720 + 20, 720 + 20 + 64, 720 + 20 + 64 + 60, 0,
- 576, 576 + 2, 576 + 2 + 3, 576 + 2 + 3 + 20, 0,
+ 576, 576 + 4, 576 + 4 + 6, 625, 0,
DRM_MODE_FLAG_INTERLACE)
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index f84b7e6..9b2b99e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -177,6 +177,8 @@
DRM_DEBUG("add mode: %dx%d\n", width, height);
mode = drm_cvt_mode(connector->dev, width, height, 60,
false, false, false);
+ if (!mode)
+ return count;
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
count++;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index c8da7ad..33b8eba 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -470,8 +470,10 @@
spin_unlock(&vgdev->display_info_lock);
/* not in cache - need to talk to hw */
- virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
- &cache_ent);
+ ret = virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
+ &cache_ent);
+ if (ret)
+ return ret;
virtio_gpu_notify(vgdev);
copy_exit:
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 6a311cd..e6de627 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -213,14 +213,14 @@
}
static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_plane_state *state)
{
struct virtio_gpu_framebuffer *vgfb;
- if (!plane->state->fb)
+ if (!state->fb)
return;
- vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ vgfb = to_virtio_gpu_framebuffer(state->fb);
if (vgfb->fence) {
dma_fence_put(&vgfb->fence->f);
vgfb->fence = NULL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 5e40fa0..e98a29d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -601,7 +601,7 @@
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
- if (use_dma_api)
+ if (virtio_gpu_is_shmem(bo) && use_dma_api)
dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
shmem->pages, DMA_TO_DEVICE);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index c59806d..97d9d25 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -498,7 +498,7 @@
static int vmw_fb_kms_framebuffer(struct fb_info *info)
{
- struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd = {0};
struct vmw_fb_par *par = info->par;
struct fb_var_screeninfo *var = &info->var;
struct drm_framebuffer *cur_fb;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index a2c09dc..9f674a8 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -198,6 +198,10 @@
static bool host1x_wants_iommu(struct host1x *host1x)
{
+ /* Our IOMMU usage policy doesn't currently play well with GART */
+ if (of_machine_is_compatible("nvidia,tegra20"))
+ return false;
+
/*
* If we support addressing a maximum of 32 bits of physical memory
* and if the host1x firewall is enabled, there's no need to enable
@@ -520,6 +524,7 @@
host1x_syncpt_deinit(host);
reset_control_assert(host->rst);
clk_disable_unprepare(host->clk);
+ host1x_channel_list_free(&host->channel_list);
host1x_iommu_exit(host);
return 0;
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index b4a31d5..74eca68 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -451,8 +451,9 @@
error = rate / (sig->mode.pixelclock / 1000);
- dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %d.%u%%\n",
- rate, div, (signed)(error - 1000) / 10, error % 10);
+ dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %c%d.%d%%\n",
+ rate, div, error < 1000 ? '-' : '+',
+ abs(error - 1000) / 10, abs(error - 1000) % 10);
/* Allow a 1% error */
if (error < 1010 && error >= 990) {
diff --git a/drivers/greybus/svc.c b/drivers/greybus/svc.c
index ce7740e..51d0875 100644
--- a/drivers/greybus/svc.c
+++ b/drivers/greybus/svc.c
@@ -866,8 +866,14 @@
gb_svc_debugfs_init(svc);
- return gb_svc_queue_deferred_request(op);
+ ret = gb_svc_queue_deferred_request(op);
+ if (ret)
+ goto err_remove_debugfs;
+ return 0;
+
+err_remove_debugfs:
+ gb_svc_debugfs_exit(svc);
err_unregister_device:
gb_svc_watchdog_destroy(svc);
device_del(&svc->dev);
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index 6b66593..ef73fef 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -831,6 +831,8 @@
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY,
USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1) },
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY,
+ USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY) },
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY,
USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_T4_BTNLESS) },
{ }
};
diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c
index 74ad8bf..e8c5e3a 100644
--- a/drivers/hid/hid-bigbenff.c
+++ b/drivers/hid/hid-bigbenff.c
@@ -347,6 +347,12 @@
bigben->report = list_entry(report_list->next,
struct hid_report, list);
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ error = -ENODEV;
+ goto error_hw_stop;
+ }
+
hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
set_bit(FF_RUMBLE, hidinput->input->ffbit);
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 477baa3..172f20e 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -788,6 +788,11 @@
data->word = le16_to_cpup((__le16 *)buf);
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
+ if (read_length > I2C_SMBUS_BLOCK_MAX) {
+ ret = -EINVAL;
+ goto power_normal;
+ }
+
memcpy(data->block + 1, buf, read_length);
break;
case I2C_SMBUS_BLOCK_DATA:
diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c
index 0e8f424..8386733 100644
--- a/drivers/hid/hid-elan.c
+++ b/drivers/hid/hid-elan.c
@@ -188,7 +188,6 @@
ret = input_mt_init_slots(input, ELAN_MAX_FINGERS, INPUT_MT_POINTER);
if (ret) {
hid_err(hdev, "Failed to init elan MT slots: %d\n", ret);
- input_free_device(input);
return ret;
}
@@ -200,7 +199,6 @@
hid_err(hdev, "Failed to register elan input device: %d\n",
ret);
input_mt_destroy_slots(input);
- input_free_device(input);
return ret;
}
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 978ee2a..b7704dd 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -498,7 +498,7 @@
ret = hid_add_device(hid_dev);
if (ret)
- goto probe_err1;
+ goto probe_err2;
ret = hid_parse(hid_dev);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index d2e4f9f..3350a41 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -389,7 +389,9 @@
#define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401
#define USB_DEVICE_ID_HP_X2 0x074d
#define USB_DEVICE_ID_HP_X2_10_COVER 0x0755
+#define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
+#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
@@ -743,6 +745,7 @@
#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
#define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
#define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
+#define USB_DEVICE_ID_LENOVO_X12_TAB 0x60fe
#define USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E 0x600e
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019
@@ -824,6 +827,7 @@
#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
#define USB_DEVICE_ID_MADCATZ_RAT5 0x1705
#define USB_DEVICE_ID_MADCATZ_RAT9 0x1709
+#define USB_DEVICE_ID_MADCATZ_MMO7 0x1713
#define USB_VENDOR_ID_MCC 0x09db
#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index a17d1dd..75a4d8d 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -324,6 +324,10 @@
HID_BATTERY_QUIRK_IGNORE },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
+ HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
{}
};
diff --git a/drivers/hid/hid-led.c b/drivers/hid/hid-led.c
index c2c66ce..7d82f8d 100644
--- a/drivers/hid/hid-led.c
+++ b/drivers/hid/hid-led.c
@@ -366,7 +366,7 @@
.type = DREAM_CHEEKY,
.name = "Dream Cheeky Webmail Notifier",
.short_name = "dream_cheeky",
- .max_brightness = 31,
+ .max_brightness = 63,
.num_leds = 1,
.report_size = 9,
.report_type = RAW_REQUEST,
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index a311b0a..587259b 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1000,6 +1000,7 @@
workitem.reports_supported |= STD_KEYBOARD;
break;
case 0x0f:
+ case 0x11:
device_type = "eQUAD Lightspeed 1.2";
logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
workitem.reports_supported |= STD_KEYBOARD;
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index fc4c074..28158d2 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -387,7 +387,7 @@
magicmouse_raw_event(hdev, report, data + 2, data[1]);
magicmouse_raw_event(hdev, report, data + 2 + data[1],
size - 2 - data[1]);
- break;
+ return 0;
default:
return 0;
}
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index 4211b98..de52e9f 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -385,6 +385,9 @@
data_len = 7;
break;
default:
+ if (len > I2C_SMBUS_BLOCK_MAX)
+ return -EINVAL;
+
memcpy(&mcp->txbuf[5], buf, len);
data_len = len + 5;
}
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index e5a3704..a78ce16 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1155,7 +1155,7 @@
int contact_count = -1;
/* sticky fingers release in progress, abort */
- if (test_and_set_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
+ if (test_and_set_bit_lock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
return;
scantime = *app->scantime;
@@ -1236,7 +1236,7 @@
del_timer(&td->release_timer);
}
- clear_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags);
+ clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags);
}
static int mt_touch_input_configured(struct hid_device *hdev,
@@ -1671,11 +1671,11 @@
* An input report came in just before we release the sticky fingers,
* it will take care of the sticky fingers.
*/
- if (test_and_set_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
+ if (test_and_set_bit_lock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
return;
if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags))
mt_release_contacts(hdev);
- clear_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags);
+ clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags);
}
static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
@@ -1990,6 +1990,12 @@
USB_VENDOR_ID_LENOVO,
USB_DEVICE_ID_LENOVO_X1_TAB3) },
+ /* Lenovo X12 TAB Gen 1 */
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_LENOVO,
+ USB_DEVICE_ID_LENOVO_X12_TAB) },
+
/* MosArt panels */
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
MT_USB_DEVICE(USB_VENDOR_ID_ASUS,
@@ -2129,6 +2135,9 @@
{ .driver_data = MT_CLS_GOOGLE,
HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE,
USB_DEVICE_ID_GOOGLE_TOUCH_ROSE) },
+ { .driver_data = MT_CLS_GOOGLE,
+ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_GOOGLE,
+ USB_DEVICE_ID_GOOGLE_WHISKERS) },
/* Generic MT device */
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 2ab71d7..4a8014e 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -609,6 +609,7 @@
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7) },
#endif
#if IS_ENABLED(CONFIG_HID_SAMSUNG)
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
index 26373b8..6da80e4 100644
--- a/drivers/hid/hid-roccat.c
+++ b/drivers/hid/hid-roccat.c
@@ -257,6 +257,8 @@
if (!new_value)
return -ENOMEM;
+ mutex_lock(&device->cbuf_lock);
+
report = &device->cbuf[device->cbuf_end];
/* passing NULL is safe */
@@ -276,6 +278,8 @@
reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE;
}
+ mutex_unlock(&device->cbuf_lock);
+
wake_up_interruptible(&device->wait);
return 0;
}
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
index c7bf14c..b84e975 100644
--- a/drivers/hid/hid-saitek.c
+++ b/drivers/hid/hid-saitek.c
@@ -187,6 +187,8 @@
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7),
.driver_data = SAITEK_RELEASE_MODE_MMO7 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7),
+ .driver_data = SAITEK_RELEASE_MODE_MMO7 },
{ }
};
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index a3b151b..fc616db 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -134,6 +134,11 @@
int ret;
r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
+ if (!r) {
+ hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
+ return -EINVAL;
+ }
+
if (hid_report_len(r) < 64)
return -EINVAL;
@@ -165,6 +170,11 @@
int ret;
r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
+ if (!r) {
+ hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
+ return -EINVAL;
+ }
+
if (hid_report_len(r) < 64)
return -EINVAL;
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 2eee5e3..fade7fc 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -346,10 +346,13 @@
unsigned int minor = iminor(inode);
struct hidraw_list *list = file->private_data;
unsigned long flags;
+ int i;
mutex_lock(&minors_lock);
spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
+ for (i = list->tail; i < list->head; i++)
+ kfree(list->buffer[i].value);
list_del(&list->node);
spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
kfree(list);
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 998aad8..14811d4 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -620,6 +620,17 @@
if (report_type == HID_OUTPUT_REPORT)
return -EINVAL;
+ /*
+ * In case of unnumbered reports the response from the device will
+ * not have the report ID that the upper layers expect, so we need
+ * to stash it the buffer ourselves and adjust the data size.
+ */
+ if (!report_number) {
+ buf[0] = 0;
+ buf++;
+ count--;
+ }
+
/* +2 bytes to include the size of the reply in the query buffer */
ask_count = min(count + 2, (size_t)ihid->bufsize);
@@ -641,6 +652,9 @@
count = min(count, ret_count - 2);
memcpy(buf, ihid->rawbuf + 2, count);
+ if (!report_number)
+ count++;
+
return count;
}
@@ -657,17 +671,19 @@
mutex_lock(&ihid->reset_lock);
- if (report_id) {
- buf++;
- count--;
- }
-
+ /*
+ * Note that both numbered and unnumbered reports passed here
+ * are supposed to have report ID stored in the 1st byte of the
+ * buffer, so we strip it off unconditionally before passing payload
+ * to i2c_hid_set_or_send_report which takes care of encoding
+ * everything properly.
+ */
ret = i2c_hid_set_or_send_report(client,
report_type == HID_FEATURE_REPORT ? 0x03 : 0x02,
- report_id, buf, count, use_data);
+ report_id, buf + 1, count - 1, use_data);
- if (report_id && ret >= 0)
- ret++; /* add report_id to the number of transfered bytes */
+ if (ret >= 0)
+ ret++; /* add report_id to the number of transferred bytes */
mutex_unlock(&ihid->reset_lock);
diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
index 6cf59fd..b6d6d11 100644
--- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
+++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
@@ -656,21 +656,12 @@
*/
payload_max_size &= ~(L1_CACHE_BYTES - 1);
- dma_buf = kmalloc(payload_max_size, GFP_KERNEL | GFP_DMA32);
+ dma_buf = dma_alloc_coherent(devc, payload_max_size, &dma_buf_phy, GFP_KERNEL);
if (!dma_buf) {
client_data->flag_retry = true;
return -ENOMEM;
}
- dma_buf_phy = dma_map_single(devc, dma_buf, payload_max_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(devc, dma_buf_phy)) {
- dev_err(cl_data_to_dev(client_data), "DMA map failed\n");
- client_data->flag_retry = true;
- rv = -ENOMEM;
- goto end_err_dma_buf_release;
- }
-
ldr_xfer_dma_frag.fragment.hdr.command = LOADER_CMD_XFER_FRAGMENT;
ldr_xfer_dma_frag.fragment.xfer_mode = LOADER_XFER_MODE_DIRECT_DMA;
ldr_xfer_dma_frag.ddr_phys_addr = (u64)dma_buf_phy;
@@ -690,14 +681,7 @@
ldr_xfer_dma_frag.fragment.size = fragment_size;
memcpy(dma_buf, &fw->data[fragment_offset], fragment_size);
- dma_sync_single_for_device(devc, dma_buf_phy,
- payload_max_size,
- DMA_TO_DEVICE);
-
- /*
- * Flush cache here because the dma_sync_single_for_device()
- * does not do for x86.
- */
+ /* Flush cache to be sure the data is in main memory. */
clflush_cache_range(dma_buf, payload_max_size);
dev_dbg(cl_data_to_dev(client_data),
@@ -720,15 +704,8 @@
fragment_offset += fragment_size;
}
- dma_unmap_single(devc, dma_buf_phy, payload_max_size, DMA_TO_DEVICE);
- kfree(dma_buf);
- return 0;
-
end_err_resp_buf_release:
- /* Free ISH buffer if not done already, in error case */
- dma_unmap_single(devc, dma_buf_phy, payload_max_size, DMA_TO_DEVICE);
-end_err_dma_buf_release:
- kfree(dma_buf);
+ dma_free_coherent(devc, payload_max_size, dma_buf, dma_buf_phy);
return rv;
}
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h b/drivers/hid/intel-ish-hid/ishtp-hid.h
index 5ffd0da..65af0eb 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.h
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.h
@@ -110,7 +110,7 @@
* @multi_packet_cnt: Count of fragmented packet count
*
* This structure is used to store completion flags and per client data like
- * like report description, number of HID devices etc.
+ * report description, number of HID devices etc.
*/
struct ishtp_cl_data {
/* completion flags */
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
index 1cc1571..c0d6930 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -626,13 +626,14 @@
}
/**
- * ipc_tx_callback() - IPC tx callback function
+ * ipc_tx_send() - IPC tx send function
* @prm: Pointer to client device instance
*
- * Send message over IPC either first time or on callback on previous message
- * completion
+ * Send message over IPC. Message will be split into fragments
+ * if message size is bigger than IPC FIFO size, and all
+ * fragments will be sent one by one.
*/
-static void ipc_tx_callback(void *prm)
+static void ipc_tx_send(void *prm)
{
struct ishtp_cl *cl = prm;
struct ishtp_cl_tx_ring *cl_msg;
@@ -677,32 +678,41 @@
list);
rem = cl_msg->send_buf.size - cl->tx_offs;
- ishtp_hdr.host_addr = cl->host_client_id;
- ishtp_hdr.fw_addr = cl->fw_client_id;
- ishtp_hdr.reserved = 0;
- pmsg = cl_msg->send_buf.data + cl->tx_offs;
+ while (rem > 0) {
+ ishtp_hdr.host_addr = cl->host_client_id;
+ ishtp_hdr.fw_addr = cl->fw_client_id;
+ ishtp_hdr.reserved = 0;
+ pmsg = cl_msg->send_buf.data + cl->tx_offs;
- if (rem <= dev->mtu) {
- ishtp_hdr.length = rem;
- ishtp_hdr.msg_complete = 1;
- cl->sending = 0;
- list_del_init(&cl_msg->list); /* Must be before write */
- spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
- /* Submit to IPC queue with no callback */
- ishtp_write_message(dev, &ishtp_hdr, pmsg);
- spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
- list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
- ++cl->tx_ring_free_size;
- spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
- tx_free_flags);
- } else {
- /* Send IPC fragment */
- spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
- cl->tx_offs += dev->mtu;
- ishtp_hdr.length = dev->mtu;
- ishtp_hdr.msg_complete = 0;
- ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl);
+ if (rem <= dev->mtu) {
+ /* Last fragment or only one packet */
+ ishtp_hdr.length = rem;
+ ishtp_hdr.msg_complete = 1;
+ /* Submit to IPC queue with no callback */
+ ishtp_write_message(dev, &ishtp_hdr, pmsg);
+ cl->tx_offs = 0;
+ cl->sending = 0;
+
+ break;
+ } else {
+ /* Send ipc fragment */
+ ishtp_hdr.length = dev->mtu;
+ ishtp_hdr.msg_complete = 0;
+ /* All fregments submitted to IPC queue with no callback */
+ ishtp_write_message(dev, &ishtp_hdr, pmsg);
+ cl->tx_offs += dev->mtu;
+ rem = cl_msg->send_buf.size - cl->tx_offs;
+ }
}
+
+ list_del_init(&cl_msg->list);
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+ ++cl->tx_ring_free_size;
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+ tx_free_flags);
}
/**
@@ -720,7 +730,7 @@
return;
cl->tx_offs = 0;
- ipc_tx_callback(cl);
+ ipc_tx_send(cl);
++cl->send_msg_cnt_ipc;
}
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 329bb1a..4dbf690 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -2124,7 +2124,7 @@
error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac);
if (error) {
- /* no pad in use on this interface */
+ /* no pad events using this interface */
input_free_device(pad_input_dev);
wacom_wac->pad_input = NULL;
pad_input_dev = NULL;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index d90bfa8..d8d127f 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -638,9 +638,26 @@
return (tool_id & ~0xFFF) << 4 | (tool_id & 0xFFF);
}
+static bool wacom_is_art_pen(int tool_id)
+{
+ bool is_art_pen = false;
+
+ switch (tool_id) {
+ case 0x885: /* Intuos3 Marker Pen */
+ case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
+ case 0x10804: /* Intuos4/5 13HD/24HD Art Pen */
+ is_art_pen = true;
+ break;
+ }
+ return is_art_pen;
+}
+
static int wacom_intuos_get_tool_type(int tool_id)
{
- int tool_type;
+ int tool_type = BTN_TOOL_PEN;
+
+ if (wacom_is_art_pen(tool_id))
+ return tool_type;
switch (tool_id) {
case 0x812: /* Inking pen */
@@ -655,12 +672,9 @@
case 0x852:
case 0x823: /* Intuos3 Grip Pen */
case 0x813: /* Intuos3 Classic Pen */
- case 0x885: /* Intuos3 Marker Pen */
case 0x802: /* Intuos4/5 13HD/24HD General Pen */
- case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
case 0x8e2: /* IntuosHT2 pen */
case 0x022:
- case 0x10804: /* Intuos4/5 13HD/24HD Art Pen */
case 0x10842: /* MobileStudio Pro Pro Pen slim */
case 0x14802: /* Intuos4/5 13HD/24HD Classic Pen */
case 0x16802: /* Cintiq 13HD Pro Pen */
@@ -718,10 +732,6 @@
case 0x10902: /* Intuos4/5 13HD/24HD Airbrush */
tool_type = BTN_TOOL_AIRBRUSH;
break;
-
- default: /* Unknown tool */
- tool_type = BTN_TOOL_PEN;
- break;
}
return tool_type;
}
@@ -2006,7 +2016,6 @@
wacom_wac->has_mute_touch_switch = true;
usage->type = EV_SW;
usage->code = SW_MUTE_DEVICE;
- features->device_type |= WACOM_DEVICETYPE_PAD;
break;
case WACOM_HID_WD_TOUCHSTRIP:
wacom_map_usage(input, usage, field, EV_ABS, ABS_RX, 0);
@@ -2086,6 +2095,30 @@
wacom_wac->hid_data.inrange_state |= value;
}
+ /* Process touch switch state first since it is reported through touch interface,
+ * which is indepentent of pad interface. In the case when there are no other pad
+ * events, the pad interface will not even be created.
+ */
+ if ((equivalent_usage == WACOM_HID_WD_MUTE_DEVICE) ||
+ (equivalent_usage == WACOM_HID_WD_TOUCHONOFF)) {
+ if (wacom_wac->shared->touch_input) {
+ bool *is_touch_on = &wacom_wac->shared->is_touch_on;
+
+ if (equivalent_usage == WACOM_HID_WD_MUTE_DEVICE && value)
+ *is_touch_on = !(*is_touch_on);
+ else if (equivalent_usage == WACOM_HID_WD_TOUCHONOFF)
+ *is_touch_on = value;
+
+ input_report_switch(wacom_wac->shared->touch_input,
+ SW_MUTE_DEVICE, !(*is_touch_on));
+ input_sync(wacom_wac->shared->touch_input);
+ }
+ return;
+ }
+
+ if (!input)
+ return;
+
switch (equivalent_usage) {
case WACOM_HID_WD_TOUCHRING:
/*
@@ -2121,22 +2154,6 @@
input_event(input, usage->type, usage->code, 0);
break;
- case WACOM_HID_WD_MUTE_DEVICE:
- case WACOM_HID_WD_TOUCHONOFF:
- if (wacom_wac->shared->touch_input) {
- bool *is_touch_on = &wacom_wac->shared->is_touch_on;
-
- if (equivalent_usage == WACOM_HID_WD_MUTE_DEVICE && value)
- *is_touch_on = !(*is_touch_on);
- else if (equivalent_usage == WACOM_HID_WD_TOUCHONOFF)
- *is_touch_on = value;
-
- input_report_switch(wacom_wac->shared->touch_input,
- SW_MUTE_DEVICE, !(*is_touch_on));
- input_sync(wacom_wac->shared->touch_input);
- }
- break;
-
case WACOM_HID_WD_MODE_CHANGE:
if (wacom_wac->is_direct_mode != value) {
wacom_wac->is_direct_mode = value;
@@ -2312,6 +2329,9 @@
}
return;
case HID_DG_TWIST:
+ /* don't modify the value if the pen doesn't support the feature */
+ if (!wacom_is_art_pen(wacom_wac->id[0])) return;
+
/*
* Userspace expects pen twist to have its zero point when
* the buttons/finger is on the tablet's left. HID values
@@ -2763,7 +2783,7 @@
/* usage tests must precede field tests */
if (WACOM_BATTERY_USAGE(usage))
wacom_wac_battery_event(hdev, field, usage, value);
- else if (WACOM_PAD_FIELD(field) && wacom->wacom_wac.pad_input)
+ else if (WACOM_PAD_FIELD(field))
wacom_wac_pad_event(hdev, field, usage, value);
else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
wacom_wac_pen_event(hdev, field, usage, value);
diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
index 44a3f56..eb98201 100644
--- a/drivers/hsi/controllers/omap_ssi_core.c
+++ b/drivers/hsi/controllers/omap_ssi_core.c
@@ -524,6 +524,7 @@
if (!childpdev) {
err = -ENODEV;
dev_err(&pd->dev, "failed to create ssi controller port\n");
+ of_node_put(child);
goto out3;
}
}
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index a0cb5be..b9495b7 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -230,10 +230,10 @@
if (msg->ttype == HSI_MSG_READ) {
err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
DMA_FROM_DEVICE);
- if (err < 0) {
+ if (!err) {
dev_dbg(&ssi->device, "DMA map SG failed !\n");
pm_runtime_put_autosuspend(omap_port->pdev);
- return err;
+ return -EIO;
}
csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
@@ -247,10 +247,10 @@
} else {
err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
DMA_TO_DEVICE);
- if (err < 0) {
+ if (!err) {
dev_dbg(&ssi->device, "DMA map SG failed !\n");
pm_runtime_put_autosuspend(omap_port->pdev);
- return err;
+ return -EIO;
}
csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 6476bfe..5b902ad 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -350,7 +350,7 @@
* execute:
*
* (a) In the "normal (i.e., not resuming from hibernation)" path,
- * the full barrier in smp_store_mb() guarantees that the store
+ * the full barrier in virt_store_mb() guarantees that the store
* is propagated to all CPUs before the add_channel_work work
* is queued. In turn, add_channel_work is queued before the
* channel's ring buffer is allocated/initialized and the
@@ -362,14 +362,14 @@
* recv_int_page before retrieving the channel pointer from the
* array of channels.
*
- * (b) In the "resuming from hibernation" path, the smp_store_mb()
+ * (b) In the "resuming from hibernation" path, the virt_store_mb()
* guarantees that the store is propagated to all CPUs before
* the VMBus connection is marked as ready for the resume event
* (cf. check_ready_for_resume_event()). The interrupt handler
* of the VMBus driver and vmbus_chan_sched() can not run before
* vmbus_bus_resume() has completed execution (cf. resume_noirq).
*/
- smp_store_mb(
+ virt_store_mb(
vmbus_connection.channels[channel->offermsg.child_relid],
channel);
}
@@ -501,13 +501,17 @@
* Add the new device to the bus. This will kick off device-driver
* binding which eventually invokes the device driver's AddDevice()
* method.
+ *
+ * If vmbus_device_register() fails, the 'device_obj' is freed in
+ * vmbus_device_release() as called by device_unregister() in the
+ * error path of vmbus_device_register(). In the outside error
+ * path, there's no need to free it.
*/
ret = vmbus_device_register(newchannel->device_obj);
if (ret != 0) {
pr_err("unable to add child device object (relid %d)\n",
newchannel->offermsg.child_relid);
- kfree(newchannel->device_obj);
goto err_deq_chan;
}
@@ -606,6 +610,7 @@
*/
if (newchannel->offermsg.offer.sub_channel_index == 0) {
mutex_unlock(&vmbus_connection.channel_mutex);
+ cpus_read_unlock();
/*
* Don't call free_channel(), because newchannel->kobj
* is not initialized yet.
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index eb56e09..6a71699 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1558,7 +1558,7 @@
break;
default:
- pr_warn("Unhandled message: type: %d\n", dm_hdr->type);
+ pr_warn_ratelimited("Unhandled message: type: %d\n", dm_hdr->type);
}
}
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 356e221..769851b 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -378,7 +378,16 @@
static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
{
u32 priv_read_loc = rbi->priv_read_index;
- u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
+ u32 write_loc;
+
+ /*
+ * The Hyper-V host writes the packet data, then uses
+ * store_release() to update the write_index. Use load_acquire()
+ * here to prevent loads of the packet data from being re-ordered
+ * before the read of the write_index and potentially getting
+ * stale data.
+ */
+ write_loc = virt_load_acquire(&rbi->ring_buffer->write_index);
if (write_loc >= priv_read_loc)
return write_loc - priv_read_loc;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 362da2a..e99400f 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1351,7 +1351,7 @@
tasklet_schedule(&hv_cpu->msg_dpc);
}
- add_interrupt_randomness(hv_get_vector(), 0);
+ add_interrupt_randomness(hv_get_vector());
}
/*
@@ -2020,6 +2020,7 @@
ret = device_register(&child_device_obj->device);
if (ret) {
pr_err("Unable to register child device\n");
+ put_device(&child_device_obj->device);
return ret;
}
@@ -2251,7 +2252,7 @@
bool fb_overlap_ok)
{
struct resource *iter, *shadow;
- resource_size_t range_min, range_max, start;
+ resource_size_t range_min, range_max, start, end;
const char *dev_n = dev_name(&device_obj->device);
int retval;
@@ -2286,6 +2287,14 @@
range_max = iter->end;
start = (range_min + align - 1) & ~(align - 1);
for (; start + size - 1 <= range_max; start += align) {
+ end = start + size - 1;
+
+ /* Skip the whole fb_mmio region if not fb_overlap_ok */
+ if (!fb_overlap_ok && fb_mmio &&
+ (((start >= fb_mmio->start) && (start <= fb_mmio->end)) ||
+ ((end >= fb_mmio->start) && (end <= fb_mmio->end))))
+ continue;
+
shadow = __request_region(iter, start, size, NULL,
IORESOURCE_BUSY);
if (!shadow)
@@ -2673,10 +2682,15 @@
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
kmsg_dump_unregister(&hv_kmsg_dumper);
unregister_die_notifier(&hyperv_die_block);
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &hyperv_panic_block);
}
+ /*
+ * The panic notifier is always registered, hence we should
+ * also unconditionally unregister it here as well.
+ */
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &hyperv_panic_block);
+
free_page((unsigned long)hv_panic_page);
unregister_sysctl_table(hv_ctl_table_hdr);
hv_ctl_table_hdr = NULL;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 0c2b032..f741c74 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -922,7 +922,7 @@
config SENSORS_LTQ_CPUTEMP
bool "Lantiq cpu temperature sensor driver"
- depends on LANTIQ
+ depends on SOC_XWAY
help
If you say yes here you get support for the temperature
sensor inside your CPU.
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 740f39a..71e3579 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -20,6 +20,7 @@
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/util_macros.h>
+#include <linux/sched.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2C, 0x2E, 0x2F, I2C_CLIENT_END };
@@ -260,11 +261,10 @@
adt7470_read_temperatures(client, data);
mutex_unlock(&data->lock);
- set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop())
break;
- schedule_timeout(msecs_to_jiffies(data->auto_update_interval));
+ schedule_timeout_interruptible(msecs_to_jiffies(data->auto_update_interval));
}
return 0;
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index bb92112..0321292 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -46,9 +46,6 @@
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
-#define TO_CORE_ID(cpu) (cpu_data(cpu).cpu_core_id)
-#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
-
#ifdef CONFIG_SMP
#define for_each_sibling(i, cpu) \
for_each_cpu(i, topology_sibling_cpumask(cpu))
@@ -91,6 +88,8 @@
struct platform_data {
struct device *hwmon_dev;
u16 pkg_id;
+ u16 cpu_map[NUM_REAL_CORES];
+ struct ida ida;
struct cpumask cpumask;
struct temp_data *core_data[MAX_CORE_DATA];
struct device_attribute name_attr;
@@ -441,7 +440,7 @@
MSR_IA32_THERM_STATUS;
tdata->is_pkg_data = pkg_flag;
tdata->cpu = cpu;
- tdata->cpu_core_id = TO_CORE_ID(cpu);
+ tdata->cpu_core_id = topology_core_id(cpu);
tdata->attr_size = MAX_CORE_ATTRS;
mutex_init(&tdata->update_lock);
return tdata;
@@ -454,7 +453,7 @@
struct platform_data *pdata = platform_get_drvdata(pdev);
struct cpuinfo_x86 *c = &cpu_data(cpu);
u32 eax, edx;
- int err, attr_no;
+ int err, index, attr_no;
/*
* Find attr number for sysfs:
@@ -462,14 +461,26 @@
* The attr number is always core id + 2
* The Pkgtemp will always show up as temp1_*, if available
*/
- attr_no = pkg_flag ? PKG_SYSFS_ATTR_NO : TO_ATTR_NO(cpu);
+ if (pkg_flag) {
+ attr_no = PKG_SYSFS_ATTR_NO;
+ } else {
+ index = ida_alloc(&pdata->ida, GFP_KERNEL);
+ if (index < 0)
+ return index;
+ pdata->cpu_map[index] = topology_core_id(cpu);
+ attr_no = index + BASE_SYSFS_ATTR_NO;
+ }
- if (attr_no > MAX_CORE_DATA - 1)
- return -ERANGE;
+ if (attr_no > MAX_CORE_DATA - 1) {
+ err = -ERANGE;
+ goto ida_free;
+ }
tdata = init_temp_data(cpu, pkg_flag);
- if (!tdata)
- return -ENOMEM;
+ if (!tdata) {
+ err = -ENOMEM;
+ goto ida_free;
+ }
/* Test if we can access the status register */
err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx);
@@ -505,6 +516,9 @@
exit_free:
pdata->core_data[attr_no] = NULL;
kfree(tdata);
+ida_free:
+ if (!pkg_flag)
+ ida_free(&pdata->ida, index);
return err;
}
@@ -524,6 +538,9 @@
kfree(pdata->core_data[indx]);
pdata->core_data[indx] = NULL;
+
+ if (indx >= BASE_SYSFS_ATTR_NO)
+ ida_free(&pdata->ida, indx - BASE_SYSFS_ATTR_NO);
}
static int coretemp_probe(struct platform_device *pdev)
@@ -537,6 +554,7 @@
return -ENOMEM;
pdata->pkg_id = pdev->id;
+ ida_init(&pdata->ida);
platform_set_drvdata(pdev, pdata);
pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME,
@@ -553,6 +571,7 @@
if (pdata->core_data[i])
coretemp_remove_core(pdata, i);
+ ida_destroy(&pdata->ida);
return 0;
}
@@ -647,7 +666,7 @@
struct platform_device *pdev = coretemp_get_pdev(cpu);
struct platform_data *pd;
struct temp_data *tdata;
- int indx, target;
+ int i, indx = -1, target;
/*
* Don't execute this on suspend as the device remove locks
@@ -660,12 +679,19 @@
if (!pdev)
return 0;
- /* The core id is too big, just return */
- indx = TO_ATTR_NO(cpu);
- if (indx > MAX_CORE_DATA - 1)
+ pd = platform_get_drvdata(pdev);
+
+ for (i = 0; i < NUM_REAL_CORES; i++) {
+ if (pd->cpu_map[i] == topology_core_id(cpu)) {
+ indx = i + BASE_SYSFS_ATTR_NO;
+ break;
+ }
+ }
+
+ /* Too many cores and this core is not populated, just return */
+ if (indx < 0)
return 0;
- pd = platform_get_drvdata(pdev);
tdata = pd->core_data[indx];
cpumask_clear_cpu(cpu, &pd->cpumask);
diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
index 72c7603..00303af 100644
--- a/drivers/hwmon/drivetemp.c
+++ b/drivers/hwmon/drivetemp.c
@@ -621,3 +621,4 @@
MODULE_AUTHOR("Guenter Roeck <linus@roeck-us.net>");
MODULE_DESCRIPTION("Hard drive temperature monitor");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:drivetemp");
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 4dec793..94b3572 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -1577,8 +1577,9 @@
temp *= 125;
if (sign)
temp -= 128000;
- } else
- temp = data->temp[nr] * 1000;
+ } else {
+ temp = ((s8)data->temp[nr]) * 1000;
+ }
return sprintf(buf, "%d\n", temp);
}
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 3ea4021..d96e435 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -391,6 +391,9 @@
if (!fan_data)
return -EINVAL;
+ if (state >= fan_data->num_speed)
+ return -EINVAL;
+
set_fan_speed(fan_data, state);
return 0;
}
diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c
index 1fe3741..f29ce49 100644
--- a/drivers/hwmon/gsc-hwmon.c
+++ b/drivers/hwmon/gsc-hwmon.c
@@ -267,6 +267,7 @@
pdata->nchannels = nchannels;
/* fan controller base address */
+ of_node_get(dev->parent->of_node);
fan = of_find_compatible_node(dev->parent->of_node, NULL, "gw,gsc-fan");
if (fan && of_property_read_u32(fan, "reg", &pdata->fan_base)) {
dev_err(dev, "fan node without base\n");
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index a4ec852..2e6d6a5 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -550,7 +550,7 @@
res = platform_device_add(data->pdev);
if (res)
- goto ipmi_err;
+ goto dev_add_err;
platform_set_drvdata(data->pdev, data);
@@ -598,7 +598,9 @@
ipmi_destroy_user(data->ipmi.user);
ipmi_err:
platform_set_drvdata(data->pdev, NULL);
- platform_device_unregister(data->pdev);
+ platform_device_del(data->pdev);
+dev_add_err:
+ platform_device_put(data->pdev);
dev_err:
ida_simple_remove(&aem_ida, data->id);
id_err:
@@ -690,7 +692,7 @@
res = platform_device_add(data->pdev);
if (res)
- goto ipmi_err;
+ goto dev_add_err;
platform_set_drvdata(data->pdev, data);
@@ -738,7 +740,9 @@
ipmi_destroy_user(data->ipmi.user);
ipmi_err:
platform_set_drvdata(data->pdev, NULL);
- platform_device_unregister(data->pdev);
+ platform_device_del(data->pdev);
+dev_add_err:
+ platform_device_put(data->pdev);
dev_err:
ida_simple_remove(&aem_ida, data->id);
id_err:
diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c
index 046523d..41e3d3b 100644
--- a/drivers/hwmon/mr75203.c
+++ b/drivers/hwmon/mr75203.c
@@ -68,8 +68,9 @@
/* VM Individual Macro Register */
#define VM_COM_REG_SIZE 0x200
-#define VM_SDIF_DONE(n) (VM_COM_REG_SIZE + 0x34 + 0x200 * (n))
-#define VM_SDIF_DATA(n) (VM_COM_REG_SIZE + 0x40 + 0x200 * (n))
+#define VM_SDIF_DONE(vm) (VM_COM_REG_SIZE + 0x34 + 0x200 * (vm))
+#define VM_SDIF_DATA(vm, ch) \
+ (VM_COM_REG_SIZE + 0x40 + 0x200 * (vm) + 0x4 * (ch))
/* SDA Slave Register */
#define IP_CTRL 0x00
@@ -115,6 +116,7 @@
u32 t_num;
u32 p_num;
u32 v_num;
+ u32 c_num;
u32 ip_freq;
u8 *vm_idx;
};
@@ -178,14 +180,15 @@
{
struct pvt_device *pvt = dev_get_drvdata(dev);
struct regmap *v_map = pvt->v_map;
+ u8 vm_idx, ch_idx;
u32 n, stat;
- u8 vm_idx;
int ret;
- if (channel >= pvt->v_num)
+ if (channel >= pvt->v_num * pvt->c_num)
return -EINVAL;
- vm_idx = pvt->vm_idx[channel];
+ vm_idx = pvt->vm_idx[channel / pvt->c_num];
+ ch_idx = channel % pvt->c_num;
switch (attr) {
case hwmon_in_input:
@@ -196,13 +199,23 @@
if (ret)
return ret;
- ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx), &n);
+ ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx, ch_idx), &n);
if(ret < 0)
return ret;
n &= SAMPLE_DATA_MSK;
- /* Convert the N bitstream count into voltage */
- *val = (PVT_N_CONST * n - PVT_R_CONST) >> PVT_CONV_BITS;
+ /*
+ * Convert the N bitstream count into voltage.
+ * To support negative voltage calculation for 64bit machines
+ * n must be cast to long, since n and *val differ both in
+ * signedness and in size.
+ * Division is used instead of right shift, because for signed
+ * numbers, the sign bit is used to fill the vacated bit
+ * positions, and if the number is negative, 1 is used.
+ * BIT(x) may not be used instead of (1 << x) because it's
+ * unsigned.
+ */
+ *val = (PVT_N_CONST * (long)n - PVT_R_CONST) / (1 << PVT_CONV_BITS);
return 0;
default:
@@ -385,6 +398,19 @@
if (ret)
return ret;
+ val = (BIT(pvt->c_num) - 1) | VM_CH_INIT |
+ IP_POLL << SDIF_ADDR_SFT | SDIF_WRN_W | SDIF_PROG;
+ ret = regmap_write(v_map, SDIF_W, val);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(v_map, SDIF_STAT,
+ val, !(val & SDIF_BUSY),
+ PVT_POLL_DELAY_US,
+ PVT_POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
val = CFG1_VOL_MEAS_MODE | CFG1_PARALLEL_OUT |
CFG1_14_BIT | IP_CFG << SDIF_ADDR_SFT |
SDIF_WRN_W | SDIF_PROG;
@@ -499,8 +525,8 @@
static int mr75203_probe(struct platform_device *pdev)
{
+ u32 ts_num, vm_num, pd_num, ch_num, val, index, i;
const struct hwmon_channel_info **pvt_info;
- u32 ts_num, vm_num, pd_num, val, index, i;
struct device *dev = &pdev->dev;
u32 *temp_config, *in_config;
struct device *hwmon_dev;
@@ -541,9 +567,11 @@
ts_num = (val & TS_NUM_MSK) >> TS_NUM_SFT;
pd_num = (val & PD_NUM_MSK) >> PD_NUM_SFT;
vm_num = (val & VM_NUM_MSK) >> VM_NUM_SFT;
+ ch_num = (val & CH_NUM_MSK) >> CH_NUM_SFT;
pvt->t_num = ts_num;
pvt->p_num = pd_num;
pvt->v_num = vm_num;
+ pvt->c_num = ch_num;
val = 0;
if (ts_num)
val++;
@@ -580,7 +608,7 @@
}
if (vm_num) {
- u32 num = vm_num;
+ u32 total_ch;
ret = pvt_get_regmap(pdev, "vm", pvt);
if (ret)
@@ -594,30 +622,30 @@
ret = device_property_read_u8_array(dev, "intel,vm-map",
pvt->vm_idx, vm_num);
if (ret) {
- num = 0;
+ /*
+ * Incase intel,vm-map property is not defined, we
+ * assume incremental channel numbers.
+ */
+ for (i = 0; i < vm_num; i++)
+ pvt->vm_idx[i] = i;
} else {
for (i = 0; i < vm_num; i++)
if (pvt->vm_idx[i] >= vm_num ||
pvt->vm_idx[i] == 0xff) {
- num = i;
+ pvt->v_num = i;
+ vm_num = i;
break;
}
}
- /*
- * Incase intel,vm-map property is not defined, we assume
- * incremental channel numbers.
- */
- for (i = num; i < vm_num; i++)
- pvt->vm_idx[i] = i;
-
- in_config = devm_kcalloc(dev, num + 1,
+ total_ch = ch_num * vm_num;
+ in_config = devm_kcalloc(dev, total_ch + 1,
sizeof(*in_config), GFP_KERNEL);
if (!in_config)
return -ENOMEM;
- memset32(in_config, HWMON_I_INPUT, num);
- in_config[num] = 0;
+ memset32(in_config, HWMON_I_INPUT, total_ch);
+ in_config[total_ch] = 0;
pvt_in.config = in_config;
pvt_info[index++] = &pvt_in;
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 88a5df2..de27837 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -319,6 +319,7 @@
/*
* STATUS_VOUT, STATUS_INPUT
*/
+#define PB_VOLTAGE_VIN_OFF BIT(3)
#define PB_VOLTAGE_UV_FAULT BIT(4)
#define PB_VOLTAGE_UV_WARNING BIT(5)
#define PB_VOLTAGE_OV_WARNING BIT(6)
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 71798fd..117e3ce 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -1360,7 +1360,7 @@
.reg = PMBUS_VIN_UV_FAULT_LIMIT,
.attr = "lcrit",
.alarm = "lcrit_alarm",
- .sbit = PB_VOLTAGE_UV_FAULT,
+ .sbit = PB_VOLTAGE_UV_FAULT | PB_VOLTAGE_VIN_OFF,
}, {
.reg = PMBUS_VIN_OV_WARN_LIMIT,
.attr = "max",
@@ -2255,10 +2255,14 @@
{
struct device *dev = rdev_get_dev(rdev);
struct i2c_client *client = to_i2c_client(dev->parent);
+ struct pmbus_data *data = i2c_get_clientdata(client);
u8 page = rdev_get_id(rdev);
int ret;
+ mutex_lock(&data->update_lock);
ret = pmbus_read_byte_data(client, page, PMBUS_OPERATION);
+ mutex_unlock(&data->update_lock);
+
if (ret < 0)
return ret;
@@ -2269,11 +2273,17 @@
{
struct device *dev = rdev_get_dev(rdev);
struct i2c_client *client = to_i2c_client(dev->parent);
+ struct pmbus_data *data = i2c_get_clientdata(client);
u8 page = rdev_get_id(rdev);
+ int ret;
- return pmbus_update_byte_data(client, page, PMBUS_OPERATION,
- PB_OPERATION_CONTROL_ON,
- enable ? PB_OPERATION_CONTROL_ON : 0);
+ mutex_lock(&data->update_lock);
+ ret = pmbus_update_byte_data(client, page, PMBUS_OPERATION,
+ PB_OPERATION_CONTROL_ON,
+ enable ? PB_OPERATION_CONTROL_ON : 0);
+ mutex_unlock(&data->update_lock);
+
+ return ret;
}
static int pmbus_regulator_enable(struct regulator_dev *rdev)
diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
index 6c84780..066b129 100644
--- a/drivers/hwmon/sch56xx-common.c
+++ b/drivers/hwmon/sch56xx-common.c
@@ -424,7 +424,7 @@
if (nowayout)
set_bit(WDOG_NO_WAY_OUT, &data->wddev.status);
if (output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)
- set_bit(WDOG_ACTIVE, &data->wddev.status);
+ set_bit(WDOG_HW_RUNNING, &data->wddev.status);
/* Since the watchdog uses a downcounter there is no register to read
the BIOS set timeout from (if any was set at all) ->
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index 9dc210b..48466b0 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -730,10 +730,21 @@
return 0;
}
+static const struct of_device_id __maybe_unused tmp4xx_of_match[] = {
+ { .compatible = "ti,tmp401", },
+ { .compatible = "ti,tmp411", },
+ { .compatible = "ti,tmp431", },
+ { .compatible = "ti,tmp432", },
+ { .compatible = "ti,tmp435", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tmp4xx_of_match);
+
static struct i2c_driver tmp401_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "tmp401",
+ .of_match_table = of_match_ptr(tmp4xx_of_match),
},
.probe_new = tmp401_probe,
.id_table = tmp401_id,
diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c
index 3647109..e499146 100644
--- a/drivers/hwspinlock/qcom_hwspinlock.c
+++ b/drivers/hwspinlock/qcom_hwspinlock.c
@@ -105,7 +105,7 @@
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .max_register = 0x40000,
+ .max_register = 0x20000,
.fast_io = true,
};
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index a61313f..8e19e8c 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -567,12 +567,11 @@
return ret;
}
-static int catu_remove(struct amba_device *adev)
+static void catu_remove(struct amba_device *adev)
{
struct catu_drvdata *drvdata = dev_get_drvdata(&adev->dev);
coresight_unregister(drvdata->csdev);
- return 0;
}
static struct amba_id catu_ids[] = {
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index 424c296..5ddc810 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -1337,7 +1337,7 @@
continue;
conn->child_dev =
coresight_find_csdev_by_fwnode(conn->child_fwnode);
- if (conn->child_dev) {
+ if (conn->child_dev && conn->child_dev->has_conns_grp) {
ret = coresight_make_links(csdev, conn,
conn->child_dev);
if (ret)
@@ -1382,6 +1382,7 @@
* platform data.
*/
fwnode_handle_put(conn->child_fwnode);
+ conn->child_fwnode = NULL;
/* No need to continue */
break;
}
@@ -1486,6 +1487,7 @@
int nr_refcnts = 1;
atomic_t *refcnts = NULL;
struct coresight_device *csdev;
+ bool registered = false;
csdev = kzalloc(sizeof(*csdev), GFP_KERNEL);
if (!csdev) {
@@ -1506,7 +1508,8 @@
refcnts = kcalloc(nr_refcnts, sizeof(*refcnts), GFP_KERNEL);
if (!refcnts) {
ret = -ENOMEM;
- goto err_free_csdev;
+ kfree(csdev);
+ goto err_out;
}
csdev->refcnt = refcnts;
@@ -1530,6 +1533,13 @@
csdev->dev.fwnode = fwnode_handle_get(dev_fwnode(desc->dev));
dev_set_name(&csdev->dev, "%s", desc->name);
+ /*
+ * Make sure the device registration and the connection fixup
+ * are synchronised, so that we don't see uninitialised devices
+ * on the coresight bus while trying to resolve the connections.
+ */
+ mutex_lock(&coresight_mutex);
+
ret = device_register(&csdev->dev);
if (ret) {
put_device(&csdev->dev);
@@ -1537,7 +1547,7 @@
* All resources are free'd explicitly via
* coresight_device_release(), triggered from put_device().
*/
- goto err_out;
+ goto out_unlock;
}
if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
@@ -1552,11 +1562,11 @@
* from put_device(), which is in turn called from
* function device_unregister().
*/
- goto err_out;
+ goto out_unlock;
}
}
-
- mutex_lock(&coresight_mutex);
+ /* Device is now registered */
+ registered = true;
ret = coresight_create_conns_sysfs_group(csdev);
if (!ret)
@@ -1566,16 +1576,18 @@
if (!ret && cti_assoc_ops && cti_assoc_ops->add)
cti_assoc_ops->add(csdev);
+out_unlock:
mutex_unlock(&coresight_mutex);
- if (ret) {
+ /* Success */
+ if (!ret)
+ return csdev;
+
+ /* Unregister the device if needed */
+ if (registered) {
coresight_unregister(csdev);
return ERR_PTR(ret);
}
- return csdev;
-
-err_free_csdev:
- kfree(csdev);
err_out:
/* Cleanup the connection information */
coresight_release_platform_data(NULL, desc->pdata);
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index e1d2324..1e98562 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -379,9 +379,10 @@
int cpu;
struct debug_drvdata *drvdata;
- mutex_lock(&debug_lock);
+ /* Bail out if we can't acquire the mutex or the functionality is off */
+ if (!mutex_trylock(&debug_lock))
+ return NOTIFY_DONE;
- /* Bail out if the functionality is disabled */
if (!debug_enable)
goto skip_dump;
@@ -400,7 +401,7 @@
skip_dump:
mutex_unlock(&debug_lock);
- return 0;
+ return NOTIFY_DONE;
}
static struct notifier_block debug_notifier = {
@@ -627,7 +628,7 @@
return ret;
}
-static int debug_remove(struct amba_device *adev)
+static void debug_remove(struct amba_device *adev)
{
struct device *dev = &adev->dev;
struct debug_drvdata *drvdata = amba_get_drvdata(adev);
@@ -642,8 +643,6 @@
if (!--debug_count)
debug_func_exit();
-
- return 0;
}
static const struct amba_cs_uci_id uci_id_debug[] = {
diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c
index 7ea9359..9027069 100644
--- a/drivers/hwtracing/coresight/coresight-cti-core.c
+++ b/drivers/hwtracing/coresight/coresight-cti-core.c
@@ -90,11 +90,9 @@
static int cti_enable_hw(struct cti_drvdata *drvdata)
{
struct cti_config *config = &drvdata->config;
- struct device *dev = &drvdata->csdev->dev;
unsigned long flags;
int rc = 0;
- pm_runtime_get_sync(dev->parent);
spin_lock_irqsave(&drvdata->spinlock, flags);
/* no need to do anything if enabled or unpowered*/
@@ -119,7 +117,6 @@
/* cannot enable due to error */
cti_err_not_enabled:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(dev->parent);
return rc;
}
@@ -153,7 +150,6 @@
static int cti_disable_hw(struct cti_drvdata *drvdata)
{
struct cti_config *config = &drvdata->config;
- struct device *dev = &drvdata->csdev->dev;
spin_lock(&drvdata->spinlock);
@@ -174,7 +170,6 @@
coresight_disclaim_device_unlocked(drvdata->base);
CS_LOCK(drvdata->base);
spin_unlock(&drvdata->spinlock);
- pm_runtime_put(dev->parent);
return 0;
/* not disabled this call */
@@ -836,7 +831,7 @@
if (drvdata->csdev_release)
drvdata->csdev_release(dev);
}
-static int cti_remove(struct amba_device *adev)
+static void cti_remove(struct amba_device *adev)
{
struct cti_drvdata *drvdata = dev_get_drvdata(&adev->dev);
@@ -845,8 +840,6 @@
mutex_unlock(&ect_mutex);
coresight_unregister(drvdata->csdev);
-
- return 0;
}
static int cti_probe(struct amba_device *adev, const struct amba_id *id)
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 0cf6f0b..51c801c 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -803,7 +803,7 @@
return ret;
}
-static int etb_remove(struct amba_device *adev)
+static void etb_remove(struct amba_device *adev)
{
struct etb_drvdata *drvdata = dev_get_drvdata(&adev->dev);
@@ -814,8 +814,6 @@
*/
misc_deregister(&drvdata->miscdev);
coresight_unregister(drvdata->csdev);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index 5bf5a5a..683a69e 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -909,7 +909,7 @@
etmdrvdata[cpu] = NULL;
}
-static int etm_remove(struct amba_device *adev)
+static void etm_remove(struct amba_device *adev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
@@ -932,8 +932,6 @@
cpus_read_unlock();
coresight_unregister(drvdata->csdev);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 74d3e2f..99df453 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -1582,7 +1582,7 @@
etmdrvdata[cpu] = NULL;
}
-static int etm4_remove(struct amba_device *adev)
+static void etm4_remove(struct amba_device *adev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev);
@@ -1605,8 +1605,6 @@
cpus_read_unlock();
coresight_unregister(drvdata->csdev);
-
- return 0;
}
static const struct amba_id etm4_ids[] = {
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index 4682f26..42cc38c 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -364,8 +364,12 @@
mode = ETM_MODE_QELEM(config->mode);
/* start by clearing QE bits */
config->cfg &= ~(BIT(13) | BIT(14));
- /* if supported, Q elements with instruction counts are enabled */
- if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
+ /*
+ * if supported, Q elements with instruction counts are enabled.
+ * Always set the low bit for any requested mode. Valid combos are
+ * 0b00, 0b01 and 0b11.
+ */
+ if (mode && drvdata->q_support)
config->cfg |= BIT(13);
/*
* if supported, Q elements with and without instruction
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 3fc6c67..b2fb853 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -370,9 +370,9 @@
return funnel_probe(&adev->dev, &adev->res);
}
-static int dynamic_funnel_remove(struct amba_device *adev)
+static void dynamic_funnel_remove(struct amba_device *adev)
{
- return funnel_remove(&adev->dev);
+ funnel_remove(&adev->dev);
}
static const struct amba_id dynamic_funnel_ids[] = {
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 38008ac..da2bfee 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -388,9 +388,9 @@
return replicator_probe(&adev->dev, &adev->res);
}
-static int dynamic_replicator_remove(struct amba_device *adev)
+static void dynamic_replicator_remove(struct amba_device *adev)
{
- return replicator_remove(&adev->dev);
+ replicator_remove(&adev->dev);
}
static const struct amba_id dynamic_replicator_ids[] = {
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 587c1d7..0ecca9f 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -951,15 +951,13 @@
return ret;
}
-static int stm_remove(struct amba_device *adev)
+static void stm_remove(struct amba_device *adev)
{
struct stm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
coresight_unregister(drvdata->csdev);
stm_unregister_device(&drvdata->stm);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index 8169dff..e29b391 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -559,7 +559,7 @@
spin_unlock_irqrestore(&drvdata->spinlock, flags);
}
-static int tmc_remove(struct amba_device *adev)
+static void tmc_remove(struct amba_device *adev)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(&adev->dev);
@@ -570,8 +570,6 @@
*/
misc_deregister(&drvdata->miscdev);
coresight_unregister(drvdata->csdev);
-
- return 0;
}
static const struct amba_id tmc_ids[] = {
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 5b35029..0ca39d9 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -173,13 +173,11 @@
return PTR_ERR(drvdata->csdev);
}
-static int tpiu_remove(struct amba_device *adev)
+static void tpiu_remove(struct amba_device *adev)
{
struct tpiu_drvdata *drvdata = dev_get_drvdata(&adev->dev);
coresight_unregister(drvdata->csdev);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/intel_th/msu-sink.c b/drivers/hwtracing/intel_th/msu-sink.c
index 2c7f511..891b28e 100644
--- a/drivers/hwtracing/intel_th/msu-sink.c
+++ b/drivers/hwtracing/intel_th/msu-sink.c
@@ -71,6 +71,9 @@
block = dma_alloc_coherent(priv->dev->parent->parent,
PAGE_SIZE, &sg_dma_address(sg_ptr),
GFP_KERNEL);
+ if (!block)
+ return -ENOMEM;
+
sg_set_buf(sg_ptr, block, PAGE_SIZE);
}
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 3a77551..24f56a7 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -1053,6 +1053,16 @@
static inline void msc_buffer_set_wb(struct msc_window *win) {}
#endif /* CONFIG_X86 */
+static struct page *msc_sg_page(struct scatterlist *sg)
+{
+ void *addr = sg_virt(sg);
+
+ if (is_vmalloc_addr(addr))
+ return vmalloc_to_page(addr);
+
+ return sg_page(sg);
+}
+
/**
* msc_buffer_win_alloc() - alloc a window for a multiblock mode
* @msc: MSC device
@@ -1125,7 +1135,7 @@
int i;
for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) {
- struct page *page = sg_page(sg);
+ struct page *page = msc_sg_page(sg);
page->mapping = NULL;
dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
@@ -1387,7 +1397,7 @@
pgoff -= win->pgoff;
for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
- struct page *page = sg_page(sg);
+ struct page *page = msc_sg_page(sg);
size_t pgsz = PFN_DOWN(sg->length);
if (pgoff < pgsz)
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 817cdb2..e254380 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -100,8 +100,10 @@
}
th = intel_th_alloc(&pdev->dev, drvdata, resource, r);
- if (IS_ERR(th))
- return PTR_ERR(th);
+ if (IS_ERR(th)) {
+ err = PTR_ERR(th);
+ goto err_free_irq;
+ }
th->activate = intel_th_pci_activate;
th->deactivate = intel_th_pci_deactivate;
@@ -109,6 +111,10 @@
pci_set_master(pdev);
return 0;
+
+err_free_irq:
+ pci_free_irq_vectors(pdev);
+ return err;
}
static void intel_th_pci_remove(struct pci_dev *pdev)
@@ -279,6 +285,21 @@
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Meteor Lake-P */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7e24),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Raptor Lake-S */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7a26),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Raptor Lake-S CPU */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa76f),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Alder Lake CPU */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
.driver_data = (kernel_ulong_t)&intel_th_2x,
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index 66864f9..7960fa4 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -657,6 +657,7 @@
unsigned int_addr_flag = 0;
struct i2c_msg *m_start = msg;
bool is_read;
+ u8 *dma_buf = NULL;
dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
@@ -704,7 +705,17 @@
dev->msg = m_start;
dev->recv_len_abort = false;
+ if (dev->use_dma) {
+ dma_buf = i2c_get_dma_safe_msg_buf(m_start, 1);
+ if (!dma_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ dev->buf = dma_buf;
+ }
+
ret = at91_do_twi_transfer(dev);
+ i2c_put_dma_safe_msg_buf(dma_buf, m_start, !ret);
ret = (ret < 0) ? ret : num;
out:
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index c1bbc4c..5092821 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -386,9 +386,9 @@
*/
static irqreturn_t cdns_i2c_master_isr(void *ptr)
{
- unsigned int isr_status, avail_bytes, updatetx;
+ unsigned int isr_status, avail_bytes;
unsigned int bytes_to_send;
- bool hold_quirk;
+ bool updatetx;
struct cdns_i2c *id = ptr;
/* Signal completion only after everything is updated */
int done_flag = 0;
@@ -408,11 +408,7 @@
* Check if transfer size register needs to be updated again for a
* large data receive operation.
*/
- updatetx = 0;
- if (id->recv_count > id->curr_recv_count)
- updatetx = 1;
-
- hold_quirk = (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT) && updatetx;
+ updatetx = id->recv_count > id->curr_recv_count;
/* When receiving, handle data interrupt and completion interrupt */
if (id->p_recv_buf &&
@@ -443,7 +439,7 @@
break;
}
- if (cdns_is_holdquirk(id, hold_quirk))
+ if (cdns_is_holdquirk(id, updatetx))
break;
}
@@ -454,7 +450,7 @@
* maintain transfer size non-zero while performing a large
* receive operation.
*/
- if (cdns_is_holdquirk(id, hold_quirk)) {
+ if (cdns_is_holdquirk(id, updatetx)) {
/* wait while fifo is full */
while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) !=
(id->curr_recv_count - CDNS_I2C_FIFO_DEPTH))
@@ -476,22 +472,6 @@
CDNS_I2C_XFER_SIZE_OFFSET);
id->curr_recv_count = id->recv_count;
}
- } else if (id->recv_count && !hold_quirk &&
- !id->curr_recv_count) {
-
- /* Set the slave address in address register*/
- cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
- CDNS_I2C_ADDR_OFFSET);
-
- if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) {
- cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
- CDNS_I2C_XFER_SIZE_OFFSET);
- id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE;
- } else {
- cdns_i2c_writereg(id->recv_count,
- CDNS_I2C_XFER_SIZE_OFFSET);
- id->curr_recv_count = id->recv_count;
- }
}
/* Clear hold (if not repeated start) and signal completion */
@@ -586,8 +566,13 @@
ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
ctrl_reg |= CDNS_I2C_CR_RW | CDNS_I2C_CR_CLR_FIFO;
+ /*
+ * Receive up to I2C_SMBUS_BLOCK_MAX data bytes, plus one message length
+ * byte, plus one checksum byte if PEC is enabled. p_msg->len will be 2 if
+ * PEC is enabled, otherwise 1.
+ */
if (id->p_msg->flags & I2C_M_RECV_LEN)
- id->recv_count = I2C_SMBUS_BLOCK_MAX + 1;
+ id->recv_count = I2C_SMBUS_BLOCK_MAX + id->p_msg->len;
id->curr_recv_count = id->recv_count;
@@ -724,7 +709,7 @@
static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
struct i2c_adapter *adap)
{
- unsigned long time_left;
+ unsigned long time_left, msg_timeout;
u32 reg;
id->p_msg = msg;
@@ -749,8 +734,16 @@
else
cdns_i2c_msend(id);
+ /* Minimal time to execute this message */
+ msg_timeout = msecs_to_jiffies((1000 * msg->len * BITS_PER_BYTE) / id->i2c_clk);
+ /* Plus some wiggle room */
+ msg_timeout += msecs_to_jiffies(500);
+
+ if (msg_timeout < adap->timeout)
+ msg_timeout = adap->timeout;
+
/* Wait for the signal of completion */
- time_left = wait_for_completion_timeout(&id->xfer_done, adap->timeout);
+ time_left = wait_for_completion_timeout(&id->xfer_done, msg_timeout);
if (time_left == 0) {
cdns_i2c_master_reset(adap);
dev_err(id->adap.dev.parent,
@@ -765,6 +758,9 @@
if (id->err_status & CDNS_I2C_IXR_ARB_LOST)
return -EAGAIN;
+ if (msg->flags & I2C_M_RECV_LEN)
+ msg->len += min_t(unsigned int, msg->buf[0], I2C_SMBUS_BLOCK_MAX);
+
return 0;
}
@@ -1281,6 +1277,7 @@
return 0;
err_clk_dis:
+ clk_notifier_unregister(id->clk, &id->clk_rate_change_nb);
clk_disable_unprepare(id->clk);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index 3c19aad..9468c6c 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -474,9 +474,6 @@
{
int ret;
- if (IS_ERR(dev->clk))
- return PTR_ERR(dev->clk);
-
if (prepare) {
/* Optional interface clock */
ret = clk_prepare_enable(dev->pclk);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 0dfeb2d..ad91c7c 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -266,8 +266,17 @@
goto exit_reset;
}
- dev->clk = devm_clk_get(&pdev->dev, NULL);
- if (!i2c_dw_prepare_clk(dev, true)) {
+ dev->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk)) {
+ ret = PTR_ERR(dev->clk);
+ goto exit_reset;
+ }
+
+ ret = i2c_dw_prepare_clk(dev, true);
+ if (ret)
+ goto exit_reset;
+
+ if (dev->clk) {
u64 clk_khz;
dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 5618c1f..45682d3 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1275,6 +1275,7 @@
*/
{ "Latitude 5480", 0x29 },
{ "Vostro V131", 0x1d },
+ { "Vostro 5568", 0x29 },
};
static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv)
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 72af4b4..be4ad51 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1280,9 +1280,7 @@
struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
int irq, ret;
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0)
- return ret;
+ ret = pm_runtime_get_sync(&pdev->dev);
/* remove adapter */
dev_dbg(&i2c_imx->adapter.dev, "adapter removed\n");
@@ -1291,17 +1289,21 @@
if (i2c_imx->dma)
i2c_imx_dma_free(i2c_imx);
- /* setup chip registers to defaults */
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
+ if (ret >= 0) {
+ /* setup chip registers to defaults */
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
+ clk_disable(i2c_imx->clk);
+ }
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
irq = platform_get_irq(pdev, 0);
if (irq >= 0)
free_irq(irq, i2c_imx);
- clk_disable_unprepare(i2c_imx->clk);
+
+ clk_unprepare(i2c_imx->clk);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index a35a27c..3d2d926 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -82,6 +82,7 @@
#define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
#define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */
+#define ISMT_LOG_ENTRIES 3 /* number of interrupt cause log entries */
/* Hardware Descriptor Constants - Control Field */
#define ISMT_DESC_CWRL 0x01 /* Command/Write Length */
@@ -175,6 +176,8 @@
u8 head; /* ring buffer head pointer */
struct completion cmp; /* interrupt completion */
u8 buffer[I2C_SMBUS_BLOCK_MAX + 16]; /* temp R/W data buffer */
+ dma_addr_t log_dma;
+ u32 *log;
};
static const struct pci_device_id ismt_ids[] = {
@@ -409,6 +412,9 @@
memset(desc, 0, sizeof(struct ismt_desc));
desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
+ /* Always clear the log entries */
+ memset(priv->log, 0, ISMT_LOG_ENTRIES * sizeof(u32));
+
/* Initialize common control bits */
if (likely(pci_dev_msi_enabled(priv->pci_dev)))
desc->control = ISMT_DESC_INT | ISMT_DESC_FAIR;
@@ -693,6 +699,8 @@
/* initialize the Master Descriptor Base Address (MDBA) */
writeq(priv->io_rng_dma, priv->smba + ISMT_MSTR_MDBA);
+ writeq(priv->log_dma, priv->smba + ISMT_GR_SMTICL);
+
/* initialize the Master Control Register (MCTRL) */
writel(ISMT_MCTRL_MEIE, priv->smba + ISMT_MSTR_MCTRL);
@@ -780,6 +788,12 @@
priv->head = 0;
init_completion(&priv->cmp);
+ priv->log = dmam_alloc_coherent(&priv->pci_dev->dev,
+ ISMT_LOG_ENTRIES * sizeof(u32),
+ &priv->log_dma, GFP_KERNEL);
+ if (!priv->log)
+ return -ENOMEM;
+
return 0;
}
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index ef73a42..07eb819 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -465,18 +465,18 @@
*/
meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_START, 0);
- ret = i2c_add_adapter(&i2c->adap);
- if (ret < 0) {
- clk_disable_unprepare(i2c->clk);
- return ret;
- }
-
/* Disable filtering */
meson_i2c_set_mask(i2c, REG_SLAVE_ADDR,
REG_SLV_SDA_FILTER | REG_SLV_SCL_FILTER, 0);
meson_i2c_set_clk_div(i2c, timings.bus_freq_hz);
+ ret = i2c_add_adapter(&i2c->adap);
+ if (ret < 0) {
+ clk_disable_unprepare(i2c->clk);
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
index ab261d7..90c488a 100644
--- a/drivers/i2c/busses/i2c-mlxbf.c
+++ b/drivers/i2c/busses/i2c-mlxbf.c
@@ -6,6 +6,7 @@
*/
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -63,13 +64,14 @@
*/
#define MLXBF_I2C_TYU_PLL_OUT_FREQ (400 * 1000 * 1000)
/* Reference clock for Bluefield - 156 MHz. */
-#define MLXBF_I2C_PLL_IN_FREQ (156 * 1000 * 1000)
+#define MLXBF_I2C_PLL_IN_FREQ 156250000ULL
/* Constant used to determine the PLL frequency. */
-#define MLNXBF_I2C_COREPLL_CONST 16384
+#define MLNXBF_I2C_COREPLL_CONST 16384ULL
+
+#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000ULL
/* PLL registers. */
-#define MLXBF_I2C_CORE_PLL_REG0 0x0
#define MLXBF_I2C_CORE_PLL_REG1 0x4
#define MLXBF_I2C_CORE_PLL_REG2 0x8
@@ -187,22 +189,15 @@
#define MLXBF_I2C_COREPLL_FREQ MLXBF_I2C_TYU_PLL_OUT_FREQ
/* Core PLL TYU configuration. */
-#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(12, 0)
-#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(3, 0)
-#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(5, 0)
-
-#define MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT 3
-#define MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT 16
-#define MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT 20
+#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(15, 3)
+#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(19, 16)
+#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(25, 20)
/* Core PLL YU configuration. */
#define MLXBF_I2C_COREPLL_CORE_F_YU_MASK GENMASK(25, 0)
#define MLXBF_I2C_COREPLL_CORE_OD_YU_MASK GENMASK(3, 0)
-#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(5, 0)
+#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(31, 26)
-#define MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT 0
-#define MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT 1
-#define MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT 26
/* Core PLL frequency. */
static u64 mlxbf_i2c_corepll_frequency;
@@ -317,6 +312,7 @@
* exact.
*/
#define MLXBF_I2C_SMBUS_TIMEOUT (300 * 1000) /* 300ms */
+#define MLXBF_I2C_SMBUS_LOCK_POLL_TIMEOUT (300 * 1000) /* 300ms */
/* Encapsulates timing parameters. */
struct mlxbf_i2c_timings {
@@ -485,8 +481,6 @@
#define MLXBF_I2C_MASK_8 GENMASK(7, 0)
#define MLXBF_I2C_MASK_16 GENMASK(15, 0)
-#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000
-
/*
* Function to poll a set of bits at a specific address; it checks whether
* the bits are equal to zero when eq_zero is set to 'true', and not equal
@@ -527,6 +521,25 @@
return false;
}
+/*
+ * wait for the lock to be released before acquiring it.
+ */
+static bool mlxbf_i2c_smbus_master_lock(struct mlxbf_i2c_priv *priv)
+{
+ if (mlxbf_smbus_poll(priv->smbus->io, MLXBF_I2C_SMBUS_MASTER_GW,
+ MLXBF_I2C_MASTER_LOCK_BIT, true,
+ MLXBF_I2C_SMBUS_LOCK_POLL_TIMEOUT))
+ return true;
+
+ return false;
+}
+
+static void mlxbf_i2c_smbus_master_unlock(struct mlxbf_i2c_priv *priv)
+{
+ /* Clear the gw to clear the lock */
+ writel(0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_GW);
+}
+
static bool mlxbf_i2c_smbus_transaction_success(u32 master_status,
u32 cause_status)
{
@@ -675,7 +688,7 @@
/* Clear status bits. */
writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_STATUS);
/* Set the cause data. */
- writel(~0x0, priv->smbus->io + MLXBF_I2C_CAUSE_OR_CLEAR);
+ writel(~0x0, priv->mst_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR);
/* Zero PEC byte. */
writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_PEC);
/* Zero byte count. */
@@ -718,10 +731,19 @@
slave = request->slave & GENMASK(6, 0);
addr = slave << 1;
- /* First of all, check whether the HW is idle. */
- if (WARN_ON(!mlxbf_smbus_master_wait_for_idle(priv)))
+ /*
+ * Try to acquire the smbus gw lock before any reads of the GW register since
+ * a read sets the lock.
+ */
+ if (WARN_ON(!mlxbf_i2c_smbus_master_lock(priv)))
return -EBUSY;
+ /* Check whether the HW is idle */
+ if (WARN_ON(!mlxbf_smbus_master_wait_for_idle(priv))) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
/* Set first byte. */
data_desc[data_idx++] = addr;
@@ -744,6 +766,11 @@
if (flags & MLXBF_I2C_F_WRITE) {
write_en = 1;
write_len += operation->length;
+ if (data_idx + operation->length >
+ MLXBF_I2C_MASTER_DATA_DESC_SIZE) {
+ ret = -ENOBUFS;
+ goto out_unlock;
+ }
memcpy(data_desc + data_idx,
operation->buffer, operation->length);
data_idx += operation->length;
@@ -775,7 +802,7 @@
ret = mlxbf_i2c_smbus_enable(priv, slave, write_len, block_en,
pec_en, 0);
if (ret)
- return ret;
+ goto out_unlock;
}
if (read_en) {
@@ -802,6 +829,9 @@
priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_FSM);
}
+out_unlock:
+ mlxbf_i2c_smbus_master_unlock(priv);
+
return ret;
}
@@ -1413,24 +1443,19 @@
return 0;
}
-static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
+static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
{
- u64 core_frequency, pad_frequency;
+ u64 core_frequency;
u8 core_od, core_r;
u32 corepll_val;
u16 core_f;
- pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
-
corepll_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
/* Get Core PLL configuration bits. */
- core_f = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_F_TYU_MASK;
- core_od = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK;
- core_r = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_R_TYU_MASK;
+ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_TYU_MASK, corepll_val);
+ core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK, corepll_val);
+ core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_TYU_MASK, corepll_val);
/*
* Compute PLL output frequency as follow:
@@ -1442,31 +1467,26 @@
* Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
* and PadFrequency, respectively.
*/
- core_frequency = pad_frequency * (++core_f);
+ core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f);
core_frequency /= (++core_r) * (++core_od);
return core_frequency;
}
-static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
+static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
{
u32 corepll_reg1_val, corepll_reg2_val;
- u64 corepll_frequency, pad_frequency;
+ u64 corepll_frequency;
u8 core_od, core_r;
u32 core_f;
- pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
-
corepll_reg1_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
corepll_reg2_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG2);
/* Get Core PLL configuration bits */
- core_f = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_F_YU_MASK;
- core_r = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_R_YU_MASK;
- core_od = rol32(corepll_reg2_val, MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_OD_YU_MASK;
+ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_YU_MASK, corepll_reg1_val);
+ core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_YU_MASK, corepll_reg1_val);
+ core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_YU_MASK, corepll_reg2_val);
/*
* Compute PLL output frequency as follow:
@@ -1478,7 +1498,7 @@
* Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
* and PadFrequency, respectively.
*/
- corepll_frequency = (pad_frequency * core_f) / MLNXBF_I2C_COREPLL_CONST;
+ corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST;
corepll_frequency /= (++core_r) * (++core_od);
return corepll_frequency;
@@ -2186,14 +2206,14 @@
[1] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_1],
[2] = &mlxbf_i2c_gpio_res[MLXBF_I2C_CHIP_TYPE_1]
},
- .calculate_freq = mlxbf_calculate_freq_from_tyu
+ .calculate_freq = mlxbf_i2c_calculate_freq_from_tyu
},
[MLXBF_I2C_CHIP_TYPE_2] = {
.type = MLXBF_I2C_CHIP_TYPE_2,
.shared_res = {
[0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_2]
},
- .calculate_freq = mlxbf_calculate_freq_from_yu
+ .calculate_freq = mlxbf_i2c_calculate_freq_from_yu
}
};
diff --git a/drivers/i2c/busses/i2c-mt7621.c b/drivers/i2c/busses/i2c-mt7621.c
index 45fe4a7..901f0fb 100644
--- a/drivers/i2c/busses/i2c-mt7621.c
+++ b/drivers/i2c/busses/i2c-mt7621.c
@@ -304,7 +304,8 @@
if (i2c->bus_freq == 0) {
dev_warn(i2c->dev, "clock-frequency 0 not supported\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_disable_clk;
}
adap = &i2c->adap;
@@ -322,10 +323,15 @@
ret = i2c_add_adapter(adap);
if (ret < 0)
- return ret;
+ goto err_disable_clk;
dev_info(&pdev->dev, "clock %u kHz\n", i2c->bus_freq / 1000);
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(i2c->clk);
+
return ret;
}
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index d4b1b08..a3363b2 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -1055,7 +1055,7 @@
return ret;
}
-static int nmk_i2c_remove(struct amba_device *adev)
+static void nmk_i2c_remove(struct amba_device *adev)
{
struct resource *res = &adev->res;
struct nmk_i2c_dev *dev = amba_get_drvdata(adev);
@@ -1068,8 +1068,6 @@
i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
clk_disable_unprepare(dev->clk);
release_mem_region(res->start, resource_size(res));
-
- return 0;
}
static struct i2c_vendor_data vendor_stn8815 = {
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index 2ad1663..31e3d2c 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -123,11 +123,11 @@
* Since the addr regs are sprinkled all over the address space,
* use this array to get the address or each register.
*/
-#define I2C_NUM_OWN_ADDR 10
+#define I2C_NUM_OWN_ADDR 2
+#define I2C_NUM_OWN_ADDR_SUPPORTED 2
+
static const int npcm_i2caddr[I2C_NUM_OWN_ADDR] = {
- NPCM_I2CADDR1, NPCM_I2CADDR2, NPCM_I2CADDR3, NPCM_I2CADDR4,
- NPCM_I2CADDR5, NPCM_I2CADDR6, NPCM_I2CADDR7, NPCM_I2CADDR8,
- NPCM_I2CADDR9, NPCM_I2CADDR10,
+ NPCM_I2CADDR1, NPCM_I2CADDR2,
};
#endif
@@ -359,14 +359,14 @@
{
struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap);
- return !!(I2CCTL3_SCL_LVL & ioread32(bus->reg + NPCM_I2CCTL3));
+ return !!(I2CCTL3_SCL_LVL & ioread8(bus->reg + NPCM_I2CCTL3));
}
static int npcm_i2c_get_SDA(struct i2c_adapter *_adap)
{
struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap);
- return !!(I2CCTL3_SDA_LVL & ioread32(bus->reg + NPCM_I2CCTL3));
+ return !!(I2CCTL3_SDA_LVL & ioread8(bus->reg + NPCM_I2CCTL3));
}
static inline u16 npcm_i2c_get_index(struct npcm_i2c *bus)
@@ -391,14 +391,10 @@
#if IS_ENABLED(CONFIG_I2C_SLAVE)
int i;
- /* select bank 0 for I2C addresses */
- npcm_i2c_select_bank(bus, I2C_BANK_0);
-
/* Slave addresses removal */
- for (i = I2C_SLAVE_ADDR1; i < I2C_NUM_OWN_ADDR; i++)
+ for (i = I2C_SLAVE_ADDR1; i < I2C_NUM_OWN_ADDR_SUPPORTED; i++)
iowrite8(0, bus->reg + npcm_i2caddr[i]);
- npcm_i2c_select_bank(bus, I2C_BANK_1);
#endif
/* Disable module */
i2cctl2 = ioread8(bus->reg + NPCM_I2CCTL2);
@@ -563,6 +559,15 @@
iowrite8(val, bus->reg + NPCM_I2CCTL1);
}
+static inline void npcm_i2c_clear_master_status(struct npcm_i2c *bus)
+{
+ u8 val;
+
+ /* Clear NEGACK, STASTR and BER bits */
+ val = NPCM_I2CST_BER | NPCM_I2CST_NEGACK | NPCM_I2CST_STASTR;
+ iowrite8(val, bus->reg + NPCM_I2CST);
+}
+
#if IS_ENABLED(CONFIG_I2C_SLAVE)
static void npcm_i2c_slave_int_enable(struct npcm_i2c *bus, bool enable)
{
@@ -594,8 +599,7 @@
i2cctl1 &= ~NPCM_I2CCTL1_GCMEN;
iowrite8(i2cctl1, bus->reg + NPCM_I2CCTL1);
return 0;
- }
- if (addr_type == I2C_ARP_ADDR) {
+ } else if (addr_type == I2C_ARP_ADDR) {
i2cctl3 = ioread8(bus->reg + NPCM_I2CCTL3);
if (enable)
i2cctl3 |= I2CCTL3_ARPMEN;
@@ -604,16 +608,16 @@
iowrite8(i2cctl3, bus->reg + NPCM_I2CCTL3);
return 0;
}
+ if (addr_type > I2C_SLAVE_ADDR2 && addr_type <= I2C_SLAVE_ADDR10)
+ dev_err(bus->dev, "try to enable more than 2 SA not supported\n");
+
if (addr_type >= I2C_ARP_ADDR)
return -EFAULT;
- /* select bank 0 for address 3 to 10 */
- if (addr_type > I2C_SLAVE_ADDR2)
- npcm_i2c_select_bank(bus, I2C_BANK_0);
+
/* Set and enable the address */
iowrite8(sa_reg, bus->reg + npcm_i2caddr[addr_type]);
npcm_i2c_slave_int_enable(bus, enable);
- if (addr_type > I2C_SLAVE_ADDR2)
- npcm_i2c_select_bank(bus, I2C_BANK_1);
+
return 0;
}
#endif
@@ -642,8 +646,8 @@
iowrite8(NPCM_I2CCST_BB, bus->reg + NPCM_I2CCST);
iowrite8(0xFF, bus->reg + NPCM_I2CST);
- /* Clear EOB bit */
- iowrite8(NPCM_I2CCST3_EO_BUSY, bus->reg + NPCM_I2CCST3);
+ /* Clear and disable EOB */
+ npcm_i2c_eob_int(bus, false);
/* Clear all fifo bits: */
iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO, bus->reg + NPCM_I2CFIF_CTS);
@@ -655,6 +659,9 @@
}
#endif
+ /* clear status bits for spurious interrupts */
+ npcm_i2c_clear_master_status(bus);
+
bus->state = I2C_IDLE;
}
@@ -815,15 +822,6 @@
}
}
-static inline void npcm_i2c_clear_master_status(struct npcm_i2c *bus)
-{
- u8 val;
-
- /* Clear NEGACK, STASTR and BER bits */
- val = NPCM_I2CST_BER | NPCM_I2CST_NEGACK | NPCM_I2CST_STASTR;
- iowrite8(val, bus->reg + NPCM_I2CST);
-}
-
static void npcm_i2c_master_abort(struct npcm_i2c *bus)
{
/* Only current master is allowed to issue a stop condition */
@@ -840,15 +838,11 @@
{
u8 slave_add;
- /* select bank 0 for address 3 to 10 */
- if (addr_type > I2C_SLAVE_ADDR2)
- npcm_i2c_select_bank(bus, I2C_BANK_0);
+ if (addr_type > I2C_SLAVE_ADDR2 && addr_type <= I2C_SLAVE_ADDR10)
+ dev_err(bus->dev, "get slave: try to use more than 2 SA not supported\n");
slave_add = ioread8(bus->reg + npcm_i2caddr[(int)addr_type]);
- if (addr_type > I2C_SLAVE_ADDR2)
- npcm_i2c_select_bank(bus, I2C_BANK_1);
-
return slave_add;
}
@@ -858,12 +852,12 @@
/* Set the enable bit */
slave_add |= 0x80;
- npcm_i2c_select_bank(bus, I2C_BANK_0);
- for (i = I2C_SLAVE_ADDR1; i < I2C_NUM_OWN_ADDR; i++) {
+
+ for (i = I2C_SLAVE_ADDR1; i < I2C_NUM_OWN_ADDR_SUPPORTED; i++) {
if (ioread8(bus->reg + npcm_i2caddr[i]) == slave_add)
iowrite8(0, bus->reg + npcm_i2caddr[i]);
}
- npcm_i2c_select_bank(bus, I2C_BANK_1);
+
return 0;
}
@@ -918,11 +912,15 @@
for (i = 0; i < I2C_HW_FIFO_SIZE; i++) {
if (bus->slv_wr_size >= I2C_HW_FIFO_SIZE)
break;
- i2c_slave_event(bus->slave, I2C_SLAVE_READ_REQUESTED, &value);
+ if (bus->state == I2C_SLAVE_MATCH) {
+ i2c_slave_event(bus->slave, I2C_SLAVE_READ_REQUESTED, &value);
+ bus->state = I2C_OPER_STARTED;
+ } else {
+ i2c_slave_event(bus->slave, I2C_SLAVE_READ_PROCESSED, &value);
+ }
ind = (bus->slv_wr_ind + bus->slv_wr_size) % I2C_HW_FIFO_SIZE;
bus->slv_wr_buf[ind] = value;
bus->slv_wr_size++;
- i2c_slave_event(bus->slave, I2C_SLAVE_READ_PROCESSED, &value);
}
return I2C_HW_FIFO_SIZE - ret;
}
@@ -970,7 +968,6 @@
if (nwrite == 0)
return;
- bus->state = I2C_OPER_STARTED;
bus->operation = I2C_WRITE_OPER;
/* get the next buffer */
@@ -1231,7 +1228,16 @@
ret = IRQ_HANDLED;
} /* SDAST */
- return ret;
+ /*
+ * if irq is not one of the above, make sure EOB is disabled and all
+ * status bits are cleared.
+ */
+ if (ret == IRQ_NONE) {
+ npcm_i2c_eob_int(bus, false);
+ npcm_i2c_clear_master_status(bus);
+ }
+
+ return IRQ_HANDLED;
}
static int npcm_i2c_reg_slave(struct i2c_client *client)
@@ -1467,6 +1473,9 @@
npcm_i2c_eob_int(bus, false);
npcm_i2c_master_stop(bus);
+ /* Clear SDA Status bit (by reading dummy byte) */
+ npcm_i2c_rd_byte(bus);
+
/*
* The bus is released from stall only after the SW clears
* NEGACK bit. Then a Stop condition is sent.
@@ -1474,6 +1483,8 @@
npcm_i2c_clear_master_status(bus);
readx_poll_timeout_atomic(ioread8, bus->reg + NPCM_I2CCST, val,
!(val & NPCM_I2CCST_BUSY), 10, 200);
+ /* verify no status bits are still set after bus is released */
+ npcm_i2c_clear_master_status(bus);
}
bus->state = I2C_IDLE;
@@ -1672,10 +1683,10 @@
int iter = 27;
if ((npcm_i2c_get_SDA(_adap) == 1) && (npcm_i2c_get_SCL(_adap) == 1)) {
- dev_dbg(bus->dev, "bus%d recovery skipped, bus not stuck",
- bus->num);
+ dev_dbg(bus->dev, "bus%d-0x%x recovery skipped, bus not stuck",
+ bus->num, bus->dest_addr);
npcm_i2c_reset(bus);
- return status;
+ return 0;
}
npcm_i2c_int_enable(bus, false);
@@ -1909,6 +1920,7 @@
bus_freq_hz < I2C_FREQ_MIN_HZ || bus_freq_hz > I2C_FREQ_MAX_HZ)
return -EINVAL;
+ npcm_i2c_int_enable(bus, false);
npcm_i2c_disable(bus);
/* Configure FIFO mode : */
@@ -1937,10 +1949,17 @@
val = (val | NPCM_I2CCTL1_NMINTE) & ~NPCM_I2CCTL1_RWS;
iowrite8(val, bus->reg + NPCM_I2CCTL1);
- npcm_i2c_int_enable(bus, true);
-
npcm_i2c_reset(bus);
+ /* check HW is OK: SDA and SCL should be high at this point. */
+ if ((npcm_i2c_get_SDA(&bus->adap) == 0) || (npcm_i2c_get_SCL(&bus->adap) == 0)) {
+ dev_err(bus->dev, "I2C%d init fail: lines are low\n", bus->num);
+ dev_err(bus->dev, "SDA=%d SCL=%d\n", npcm_i2c_get_SDA(&bus->adap),
+ npcm_i2c_get_SCL(&bus->adap));
+ return -ENXIO;
+ }
+
+ npcm_i2c_int_enable(bus, true);
return 0;
}
@@ -1988,10 +2007,14 @@
#if IS_ENABLED(CONFIG_I2C_SLAVE)
if (bus->slave) {
bus->master_or_slave = I2C_SLAVE;
- return npcm_i2c_int_slave_handler(bus);
+ if (npcm_i2c_int_slave_handler(bus))
+ return IRQ_HANDLED;
}
#endif
- return IRQ_NONE;
+ /* clear status bits for spurious interrupts */
+ npcm_i2c_clear_master_status(bus);
+
+ return IRQ_HANDLED;
}
static bool npcm_i2c_master_start_xmit(struct npcm_i2c *bus,
@@ -2047,8 +2070,7 @@
u16 nwrite, nread;
u8 *write_data, *read_data;
u8 slave_addr;
- int timeout;
- int ret = 0;
+ unsigned long timeout;
bool read_block = false;
bool read_PEC = false;
u8 bus_busy;
@@ -2099,13 +2121,13 @@
* 9: bits per transaction (including the ack/nack)
*/
timeout_usec = (2 * 9 * USEC_PER_SEC / bus->bus_freq) * (2 + nread + nwrite);
- timeout = max(msecs_to_jiffies(35), usecs_to_jiffies(timeout_usec));
+ timeout = max_t(unsigned long, bus->adap.timeout, usecs_to_jiffies(timeout_usec));
if (nwrite >= 32 * 1024 || nread >= 32 * 1024) {
dev_err(bus->dev, "i2c%d buffer too big\n", bus->num);
return -EINVAL;
}
- time_left = jiffies + msecs_to_jiffies(DEFAULT_STALL_COUNT) + 1;
+ time_left = jiffies + timeout + 1;
do {
/*
* we must clear slave address immediately when the bus is not
@@ -2138,12 +2160,12 @@
bus->read_block_use = read_block;
reinit_completion(&bus->cmd_complete);
- if (!npcm_i2c_master_start_xmit(bus, slave_addr, nwrite, nread,
- write_data, read_data, read_PEC,
- read_block))
- ret = -EBUSY;
- if (ret != -EBUSY) {
+ npcm_i2c_int_enable(bus, true);
+
+ if (npcm_i2c_master_start_xmit(bus, slave_addr, nwrite, nread,
+ write_data, read_data, read_PEC,
+ read_block)) {
time_left = wait_for_completion_timeout(&bus->cmd_complete,
timeout);
@@ -2157,26 +2179,31 @@
}
}
}
- ret = bus->cmd_err;
/* if there was BER, check if need to recover the bus: */
if (bus->cmd_err == -EAGAIN)
- ret = i2c_recover_bus(adap);
+ bus->cmd_err = i2c_recover_bus(adap);
/*
* After any type of error, check if LAST bit is still set,
* due to a HW issue.
* It cannot be cleared without resetting the module.
*/
- if (bus->cmd_err &&
- (NPCM_I2CRXF_CTL_LAST_PEC & ioread8(bus->reg + NPCM_I2CRXF_CTL)))
+ else if (bus->cmd_err &&
+ (NPCM_I2CRXF_CTL_LAST_PEC & ioread8(bus->reg + NPCM_I2CRXF_CTL)))
npcm_i2c_reset(bus);
+ /* after any xfer, successful or not, stall and EOB must be disabled */
+ npcm_i2c_stall_after_start(bus, false);
+ npcm_i2c_eob_int(bus, false);
+
#if IS_ENABLED(CONFIG_I2C_SLAVE)
/* reenable slave if it was enabled */
if (bus->slave)
iowrite8((bus->slave->addr & 0x7F) | NPCM_I2CADDR_SAEN,
bus->reg + NPCM_I2CADDR1);
+#else
+ npcm_i2c_int_enable(bus, false);
#endif
return bus->cmd_err;
}
@@ -2269,7 +2296,7 @@
adap = &bus->adap;
adap->owner = THIS_MODULE;
adap->retries = 3;
- adap->timeout = HZ;
+ adap->timeout = msecs_to_jiffies(35);
adap->algo = &npcm_i2c_algo;
adap->quirks = &npcm_i2c_quirks;
adap->algo_data = bus;
@@ -2336,8 +2363,7 @@
static int __init npcm_i2c_init(void)
{
npcm_i2c_debugfs_dir = debugfs_create_dir("npcm_i2c", NULL);
- platform_driver_register(&npcm_i2c_bus_driver);
- return 0;
+ return platform_driver_register(&npcm_i2c_bus_driver);
}
module_init(npcm_i2c_init);
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index 20f2772..2c90952 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -137,6 +137,12 @@
TXFIFO_WR(smbus, msg->buf[msg->len-1] |
(stop ? MTXFIFO_STOP : 0));
+
+ if (stop) {
+ err = pasemi_smb_waitready(smbus);
+ if (err)
+ goto reset_out;
+ }
}
return 0;
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 8c1b31e..aa1d365 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -961,6 +961,7 @@
"", &piix4_main_adapters[0]);
if (retval < 0)
return retval;
+ piix4_adapter_count = 1;
}
/* Check for auxiliary SMBus on some AMD chipsets */
diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
index 09e5990..06c87c7 100644
--- a/drivers/i2c/busses/i2c-qcom-cci.c
+++ b/drivers/i2c/busses/i2c-qcom-cci.c
@@ -638,6 +638,11 @@
if (ret < 0)
goto error;
+ pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
for (i = 0; i < cci->data->num_masters; i++) {
if (!cci->master[i].cci)
continue;
@@ -649,14 +654,12 @@
}
}
- pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
- pm_runtime_use_autosuspend(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
-
return 0;
error_i2c:
+ pm_runtime_disable(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+
for (--i ; i >= 0; i--) {
if (cci->master[i].cci) {
i2c_del_adapter(&cci->master[i].adap);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 8722ca2..6a7a7a0 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -999,8 +999,10 @@
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
ret = rcar_i2c_clock_calculate(priv);
- if (ret < 0)
- goto out_pm_put;
+ if (ret < 0) {
+ pm_runtime_put(dev);
+ goto out_pm_disable;
+ }
rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
@@ -1029,19 +1031,19 @@
ret = platform_get_irq(pdev, 0);
if (ret < 0)
- goto out_pm_disable;
+ goto out_pm_put;
priv->irq = ret;
ret = devm_request_irq(dev, priv->irq, irqhandler, irqflags, dev_name(dev), priv);
if (ret < 0) {
dev_err(dev, "cannot get irq %d\n", priv->irq);
- goto out_pm_disable;
+ goto out_pm_put;
}
platform_set_drvdata(pdev, priv);
ret = i2c_add_numbered_adapter(adap);
if (ret < 0)
- goto out_pm_disable;
+ goto out_pm_put;
if (priv->flags & ID_P_HOST_NOTIFY) {
priv->host_notify_client = i2c_new_slave_host_notify_device(adap);
@@ -1058,7 +1060,8 @@
out_del_device:
i2c_del_adapter(&priv->adap);
out_pm_put:
- pm_runtime_put(dev);
+ if (priv->flags & ID_P_PM_BLOCKED)
+ pm_runtime_put(dev);
out_pm_disable:
pm_runtime_disable(dev);
return ret;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 8b113ae..42f1db6 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -283,6 +283,7 @@
struct dma_chan *tx_dma_chan;
struct dma_chan *rx_dma_chan;
unsigned int dma_buf_size;
+ struct device *dma_dev;
dma_addr_t dma_phys;
void *dma_buf;
@@ -419,7 +420,7 @@
static void tegra_i2c_release_dma(struct tegra_i2c_dev *i2c_dev)
{
if (i2c_dev->dma_buf) {
- dma_free_coherent(i2c_dev->dev, i2c_dev->dma_buf_size,
+ dma_free_coherent(i2c_dev->dma_dev, i2c_dev->dma_buf_size,
i2c_dev->dma_buf, i2c_dev->dma_phys);
i2c_dev->dma_buf = NULL;
}
@@ -466,10 +467,13 @@
i2c_dev->tx_dma_chan = chan;
+ WARN_ON(i2c_dev->tx_dma_chan->device != i2c_dev->rx_dma_chan->device);
+ i2c_dev->dma_dev = chan->device->dev;
+
i2c_dev->dma_buf_size = i2c_dev->hw->quirks->max_write_len +
I2C_PACKET_HEADER_SIZE;
- dma_buf = dma_alloc_coherent(i2c_dev->dev, i2c_dev->dma_buf_size,
+ dma_buf = dma_alloc_coherent(i2c_dev->dma_dev, i2c_dev->dma_buf_size,
&dma_phys, GFP_KERNEL | __GFP_NOWARN);
if (!dma_buf) {
dev_err(i2c_dev->dev, "failed to allocate DMA buffer\n");
@@ -1255,7 +1259,7 @@
if (i2c_dev->dma_mode) {
if (i2c_dev->msg_read) {
- dma_sync_single_for_device(i2c_dev->dev,
+ dma_sync_single_for_device(i2c_dev->dma_dev,
i2c_dev->dma_phys,
xfer_size, DMA_FROM_DEVICE);
@@ -1263,7 +1267,7 @@
if (err)
return err;
} else {
- dma_sync_single_for_cpu(i2c_dev->dev,
+ dma_sync_single_for_cpu(i2c_dev->dma_dev,
i2c_dev->dma_phys,
xfer_size, DMA_TO_DEVICE);
}
@@ -1276,7 +1280,7 @@
memcpy(i2c_dev->dma_buf + I2C_PACKET_HEADER_SIZE,
msg->buf, msg->len);
- dma_sync_single_for_device(i2c_dev->dev,
+ dma_sync_single_for_device(i2c_dev->dma_dev,
i2c_dev->dma_phys,
xfer_size, DMA_TO_DEVICE);
@@ -1327,7 +1331,7 @@
}
if (i2c_dev->msg_read && i2c_dev->msg_err == I2C_ERR_NONE) {
- dma_sync_single_for_cpu(i2c_dev->dev,
+ dma_sync_single_for_cpu(i2c_dev->dma_dev,
i2c_dev->dma_phys,
xfer_size, DMA_FROM_DEVICE);
diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
index 12c90aa..a77cd86 100644
--- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c
+++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
@@ -213,6 +213,7 @@
i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info;
i2c->adap.dev.parent = dev;
i2c->adap.dev.of_node = pdev->dev.of_node;
+ i2c->adap.dev.fwnode = dev->fwnode;
snprintf(i2c->adap.name, sizeof(i2c->adap.name),
"Cavium ThunderX i2c adapter at %s", dev_name(dev));
i2c_set_adapdata(&i2c->adap, i2c);
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 2a8568b..3b564e6 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -756,7 +756,6 @@
static const struct i2c_adapter xiic_adapter = {
.owner = THIS_MODULE,
- .name = DRIVER_NAME,
.class = I2C_CLASS_DEPRECATED,
.algo = &xiic_algorithm,
.quirks = &xiic_quirks,
@@ -793,6 +792,8 @@
i2c_set_adapdata(&i2c->adap, i2c);
i2c->adap.dev.parent = &pdev->dev;
i2c->adap.dev.of_node = pdev->dev.of_node;
+ snprintf(i2c->adap.name, sizeof(i2c->adap.name),
+ DRIVER_NAME " %s", pdev->name);
mutex_init(&i2c->lock);
init_waitqueue_head(&i2c->wait);
@@ -934,6 +935,7 @@
module_platform_driver(xiic_i2c_driver);
+MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("info@mocean-labs.com");
MODULE_DESCRIPTION("Xilinx I2C bus driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index bdce6d3..34fecf9 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -2405,8 +2405,9 @@
if (!adap)
return;
- put_device(&adap->dev);
module_put(adap->owner);
+ /* Should be last, otherwise we risk use-after-free with 'adap' */
+ put_device(&adap->dev);
}
EXPORT_SYMBOL(i2c_put_adapter);
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index 5365199..f7a7405 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -261,7 +261,7 @@
err = device_create_file(&pdev->dev, &dev_attr_available_masters);
if (err)
- goto err_rollback;
+ goto err_rollback_activation;
err = device_create_file(&pdev->dev, &dev_attr_current_master);
if (err)
@@ -271,8 +271,9 @@
err_rollback_available:
device_remove_file(&pdev->dev, &dev_attr_available_masters);
-err_rollback:
+err_rollback_activation:
i2c_demux_deactivate_master(priv);
+err_rollback:
for (j = 0; j < i; j++) {
of_node_put(priv->chan[j].parent_np);
of_changeset_destroy(&priv->chan[j].chgset);
diff --git a/drivers/i2c/muxes/i2c-mux-gpmux.c b/drivers/i2c/muxes/i2c-mux-gpmux.c
index d3acd8d..33024ac 100644
--- a/drivers/i2c/muxes/i2c-mux-gpmux.c
+++ b/drivers/i2c/muxes/i2c-mux-gpmux.c
@@ -134,6 +134,7 @@
return 0;
err_children:
+ of_node_put(child);
i2c_mux_del_adapters(muxc);
err_parent:
i2c_put_adapter(parent);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index d793355..b92b032 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -47,11 +47,13 @@
#include <linux/tick.h>
#include <trace/events/power.h>
#include <linux/sched.h>
+#include <linux/sched/smt.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/moduleparam.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <asm/nospec-branch.h>
#include <asm/mwait.h>
#include <asm/msr.h>
@@ -94,6 +96,12 @@
#define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15)
/*
+ * Disable IBRS across idle (when KERNEL_IBRS), is exclusive vs IRQ_ENABLE
+ * above.
+ */
+#define CPUIDLE_FLAG_IBRS BIT(16)
+
+/*
* MWAIT takes an 8-bit "hint" in EAX "suggesting"
* the C-state (top nibble) and sub-state (bottom nibble)
* 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc.
@@ -132,6 +140,24 @@
return index;
}
+static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ bool smt_active = sched_smt_active();
+ u64 spec_ctrl = spec_ctrl_current();
+ int ret;
+
+ if (smt_active)
+ wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+
+ ret = intel_idle(dev, drv, index);
+
+ if (smt_active)
+ wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
+
+ return ret;
+}
+
/**
* intel_idle_s2idle - Ask the processor to enter the given idle state.
* @dev: cpuidle device of the target CPU.
@@ -653,7 +679,7 @@
{
.name = "C6",
.desc = "MWAIT 0x20",
- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 85,
.target_residency = 200,
.enter = &intel_idle,
@@ -661,7 +687,7 @@
{
.name = "C7s",
.desc = "MWAIT 0x33",
- .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 124,
.target_residency = 800,
.enter = &intel_idle,
@@ -669,7 +695,7 @@
{
.name = "C8",
.desc = "MWAIT 0x40",
- .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 200,
.target_residency = 800,
.enter = &intel_idle,
@@ -677,7 +703,7 @@
{
.name = "C9",
.desc = "MWAIT 0x50",
- .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 480,
.target_residency = 5000,
.enter = &intel_idle,
@@ -685,7 +711,7 @@
{
.name = "C10",
.desc = "MWAIT 0x60",
- .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 890,
.target_residency = 5000,
.enter = &intel_idle,
@@ -714,7 +740,7 @@
{
.name = "C6",
.desc = "MWAIT 0x20",
- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 133,
.target_residency = 600,
.enter = &intel_idle,
@@ -1501,6 +1527,11 @@
/* Structure copy. */
drv->states[drv->state_count] = cpuidle_state_table[cstate];
+ if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) &&
+ cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IBRS) {
+ drv->states[drv->state_count].enter = intel_idle_ibrs;
+ }
+
if ((disabled_states_mask & BIT(drv->state_count)) ||
((icpu->use_acpi || force_use_acpi) &&
intel_idle_off_by_default(mwait_hint) &&
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index da56488..6aa5a72 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -1068,11 +1068,12 @@
data->trig->dev.parent = dev;
data->trig->ops = &bma180_trigger_ops;
iio_trigger_set_drvdata(data->trig, indio_dev);
- indio_dev->trig = iio_trigger_get(data->trig);
ret = iio_trigger_register(data->trig);
if (ret)
goto err_trigger_free;
+
+ indio_dev->trig = iio_trigger_get(data->trig);
}
ret = iio_triggered_buffer_setup(indio_dev, NULL,
diff --git a/drivers/iio/accel/bma400.h b/drivers/iio/accel/bma400.h
index 5ad10db..416090c 100644
--- a/drivers/iio/accel/bma400.h
+++ b/drivers/iio/accel/bma400.h
@@ -83,8 +83,27 @@
#define BMA400_ACC_ODR_MIN_WHOLE_HZ 25
#define BMA400_ACC_ODR_MIN_HZ 12
-#define BMA400_SCALE_MIN 38357
-#define BMA400_SCALE_MAX 306864
+/*
+ * BMA400_SCALE_MIN macro value represents m/s^2 for 1 LSB before
+ * converting to micro values for +-2g range.
+ *
+ * For +-2g - 1 LSB = 0.976562 milli g = 0.009576 m/s^2
+ * For +-4g - 1 LSB = 1.953125 milli g = 0.019153 m/s^2
+ * For +-16g - 1 LSB = 7.8125 milli g = 0.076614 m/s^2
+ *
+ * The raw value which is used to select the different ranges is determined
+ * by the first bit set position from the scale value, so BMA400_SCALE_MIN
+ * should be odd.
+ *
+ * Scale values for +-2g, +-4g, +-8g and +-16g are populated into bma400_scales
+ * array by left shifting BMA400_SCALE_MIN.
+ * e.g.:
+ * To select +-2g = 9577 << 0 = raw value to write is 0.
+ * To select +-8g = 9577 << 2 = raw value to write is 2.
+ * To select +-16g = 9577 << 3 = raw value to write is 3.
+ */
+#define BMA400_SCALE_MIN 9577
+#define BMA400_SCALE_MAX 76617
#define BMA400_NUM_REGULATORS 2
#define BMA400_VDD_REGULATOR 0
diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c
index 7eeba80..58aa6a0 100644
--- a/drivers/iio/accel/bma400_core.c
+++ b/drivers/iio/accel/bma400_core.c
@@ -13,14 +13,14 @@
#include <linux/bitops.h>
#include <linux/device.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/iio/iio.h>
+
#include "bma400.h"
/*
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index a720870..b12e804 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -176,6 +176,7 @@
* @enabled_events: event flags enabled and handled by this driver
*/
struct mma_chip_info {
+ const char *name;
u8 chip_id;
const struct iio_chan_spec *channels;
int num_channels;
@@ -1303,6 +1304,7 @@
static const struct mma_chip_info mma_chip_info_table[] = {
[mma8451] = {
+ .name = "mma8451",
.chip_id = MMA8451_DEVICE_ID,
.channels = mma8451_channels,
.num_channels = ARRAY_SIZE(mma8451_channels),
@@ -1327,6 +1329,7 @@
MMA8452_INT_FF_MT,
},
[mma8452] = {
+ .name = "mma8452",
.chip_id = MMA8452_DEVICE_ID,
.channels = mma8452_channels,
.num_channels = ARRAY_SIZE(mma8452_channels),
@@ -1343,6 +1346,7 @@
MMA8452_INT_FF_MT,
},
[mma8453] = {
+ .name = "mma8453",
.chip_id = MMA8453_DEVICE_ID,
.channels = mma8453_channels,
.num_channels = ARRAY_SIZE(mma8453_channels),
@@ -1359,6 +1363,7 @@
MMA8452_INT_FF_MT,
},
[mma8652] = {
+ .name = "mma8652",
.chip_id = MMA8652_DEVICE_ID,
.channels = mma8652_channels,
.num_channels = ARRAY_SIZE(mma8652_channels),
@@ -1368,6 +1373,7 @@
.enabled_events = MMA8452_INT_FF_MT,
},
[mma8653] = {
+ .name = "mma8653",
.chip_id = MMA8653_DEVICE_ID,
.channels = mma8653_channels,
.num_channels = ARRAY_SIZE(mma8653_channels),
@@ -1382,6 +1388,7 @@
.enabled_events = MMA8452_INT_FF_MT,
},
[fxls8471] = {
+ .name = "fxls8471",
.chip_id = FXLS8471_DEVICE_ID,
.channels = mma8451_channels,
.num_channels = ARRAY_SIZE(mma8451_channels),
@@ -1489,10 +1496,14 @@
int i;
int ret;
- ret = i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG2,
+ /*
+ * Find on fxls8471, after config reset bit, it reset immediately,
+ * and will not give ACK, so here do not check the return value.
+ * The following code will read the reset register, and check whether
+ * this reset works.
+ */
+ i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG2,
MMA8452_CTRL_REG2_RST);
- if (ret < 0)
- return ret;
for (i = 0; i < 10; i++) {
usleep_range(100, 200);
@@ -1525,13 +1536,6 @@
struct mma8452_data *data;
struct iio_dev *indio_dev;
int ret;
- const struct of_device_id *match;
-
- match = of_match_device(mma8452_dt_ids, &client->dev);
- if (!match) {
- dev_err(&client->dev, "unknown device model\n");
- return -ENODEV;
- }
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
@@ -1540,7 +1544,16 @@
data = iio_priv(indio_dev);
data->client = client;
mutex_init(&data->lock);
- data->chip_info = match->data;
+
+ data->chip_info = device_get_match_data(&client->dev);
+ if (!data->chip_info) {
+ if (id) {
+ data->chip_info = &mma_chip_info_table[id->driver_data];
+ } else {
+ dev_err(&client->dev, "unknown device model\n");
+ return -ENODEV;
+ }
+ }
data->vdd_reg = devm_regulator_get(&client->dev, "vdd");
if (IS_ERR(data->vdd_reg))
@@ -1584,11 +1597,11 @@
}
dev_info(&client->dev, "registering %s accelerometer; ID 0x%x\n",
- match->compatible, data->chip_info->chip_id);
+ data->chip_info->name, data->chip_info->chip_id);
i2c_set_clientdata(client, indio_dev);
indio_dev->info = &mma8452_info;
- indio_dev->name = id->name;
+ indio_dev->name = data->chip_info->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = data->chip_info->channels;
indio_dev->num_channels = data->chip_info->num_channels;
@@ -1814,7 +1827,7 @@
static struct i2c_driver mma8452_driver = {
.driver = {
.name = "mma8452",
- .of_match_table = of_match_ptr(mma8452_dt_ids),
+ .of_match_table = mma8452_dt_ids,
.pm = &mma8452_pm_ops,
},
.probe = mma8452_probe,
diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
index 5a2b0ff..ecd9d8a 100644
--- a/drivers/iio/accel/mxc4005.c
+++ b/drivers/iio/accel/mxc4005.c
@@ -461,8 +461,6 @@
data->dready_trig->dev.parent = &client->dev;
data->dready_trig->ops = &mxc4005_trigger_ops;
iio_trigger_set_drvdata(data->dready_trig, indio_dev);
- indio_dev->trig = data->dready_trig;
- iio_trigger_get(indio_dev->trig);
ret = devm_iio_trigger_register(&client->dev,
data->dready_trig);
if (ret) {
@@ -470,6 +468,8 @@
"failed to register trigger\n");
return ret;
}
+
+ indio_dev->trig = iio_trigger_get(data->dready_trig);
}
return devm_iio_device_register(&client->dev, indio_dev);
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index bd35009..19ab7d7 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -170,7 +170,6 @@
.sign = 'u',
.realbits = 24,
.storagebits = 32,
- .shift = 8,
.endianness = IIO_BE,
},
};
diff --git a/drivers/iio/adc/ad7292.c b/drivers/iio/adc/ad7292.c
index ab204e9..3e6ece0 100644
--- a/drivers/iio/adc/ad7292.c
+++ b/drivers/iio/adc/ad7292.c
@@ -289,10 +289,8 @@
ret = devm_add_action_or_reset(&spi->dev,
ad7292_regulator_disable, st);
- if (ret) {
- regulator_disable(st->reg);
+ if (ret)
return ret;
- }
ret = regulator_get_voltage(st->reg);
if (ret < 0)
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index 8c1e866..96eeda4 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -93,6 +93,7 @@
.sign = 'u', \
.realbits = (bits), \
.storagebits = 16, \
+ .shift = 12 - (bits), \
.endianness = IIO_BE, \
}, \
}
@@ -274,7 +275,8 @@
return ret;
if (chan->address == EXTRACT(ret, 12, 4))
- *val = EXTRACT(ret, 0, 12);
+ *val = EXTRACT(ret, chan->scan_type.shift,
+ chan->scan_type.realbits);
else
return -EIO;
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index 9109da2..cbe1011 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -334,16 +334,19 @@
if (!try_module_get(cl->dev->driver->owner)) {
mutex_unlock(®istered_clients_lock);
+ of_node_put(cln);
return ERR_PTR(-ENODEV);
}
get_device(cl->dev);
cl->info = info;
mutex_unlock(®istered_clients_lock);
+ of_node_put(cln);
return cl;
}
mutex_unlock(®istered_clients_lock);
+ of_node_put(cln);
return ERR_PTR(-EPROBE_DEFER);
}
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 4ede7e7..250b78e 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -74,7 +74,7 @@
#define AT91_SAMA5D2_MR_ANACH BIT(23)
/* Tracking Time */
#define AT91_SAMA5D2_MR_TRACKTIM(v) ((v) << 24)
-#define AT91_SAMA5D2_MR_TRACKTIM_MAX 0xff
+#define AT91_SAMA5D2_MR_TRACKTIM_MAX 0xf
/* Transfer Time */
#define AT91_SAMA5D2_MR_TRANSFER(v) ((v) << 28)
#define AT91_SAMA5D2_MR_TRANSFER_MAX 0x3
@@ -1353,10 +1353,12 @@
ret = at91_adc_read_position(st, chan->channel,
&tmp_val);
*val = tmp_val;
+ if (ret > 0)
+ ret = at91_adc_adjust_val_osr(st, val);
mutex_unlock(&st->lock);
iio_device_release_direct_mode(indio_dev);
- return at91_adc_adjust_val_osr(st, val);
+ return ret;
}
if (chan->type == IIO_PRESSURE) {
ret = iio_device_claim_direct_mode(indio_dev);
@@ -1367,10 +1369,12 @@
ret = at91_adc_read_pressure(st, chan->channel,
&tmp_val);
*val = tmp_val;
+ if (ret > 0)
+ ret = at91_adc_adjust_val_osr(st, val);
mutex_unlock(&st->lock);
iio_device_release_direct_mode(indio_dev);
- return at91_adc_adjust_val_osr(st, val);
+ return ret;
}
/* in this case we have a voltage channel */
@@ -1461,16 +1465,20 @@
/* if no change, optimize out */
if (val == st->oversampling_ratio)
return 0;
+ mutex_lock(&st->lock);
st->oversampling_ratio = val;
/* update ratio */
at91_adc_config_emr(st);
+ mutex_unlock(&st->lock);
return 0;
case IIO_CHAN_INFO_SAMP_FREQ:
if (val < st->soc_info.min_sample_rate ||
val > st->soc_info.max_sample_rate)
return -EINVAL;
+ mutex_lock(&st->lock);
at91_adc_setup_samp_freq(indio_dev, val);
+ mutex_unlock(&st->lock);
return 0;
default:
return -EINVAL;
@@ -1899,6 +1907,9 @@
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct at91_adc_state *st = iio_priv(indio_dev);
+ if (iio_buffer_enabled(indio_dev))
+ at91_adc_buffer_postdisable(indio_dev);
+
/*
* Do a sofware reset of the ADC before we go to suspend.
* this will ensure that all pins are free from being muxed by the ADC
@@ -1942,14 +1953,11 @@
if (!iio_buffer_enabled(indio_dev))
return 0;
- /* check if we are enabling triggered buffer or the touchscreen */
- if (at91_adc_current_chan_is_touch(indio_dev))
- return at91_adc_configure_touch(st, true);
- else
- return at91_adc_configure_trigger(st->trig, true);
+ ret = at91_adc_buffer_prepare(indio_dev);
+ if (ret)
+ goto vref_disable_resume;
- /* not needed but more explicit */
- return 0;
+ return at91_adc_configure_trigger(st->trig, true);
vref_disable_resume:
regulator_disable(st->vref);
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 0a793e7..38d4a91 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -616,8 +616,10 @@
trig->ops = &at91_adc_trigger_ops;
ret = iio_trigger_register(trig);
- if (ret)
+ if (ret) {
+ iio_trigger_free(trig);
return NULL;
+ }
return trig;
}
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 5f5e8b3..84dbe9e 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -196,6 +196,14 @@
},
.driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA,
},
+ {
+ /* Nuvision Solo 10 Draw */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TMAX"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TM101W610L"),
+ },
+ .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA,
+ },
{}
};
diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c
index 1adddf5..61f373f 100644
--- a/drivers/iio/adc/ltc2497.c
+++ b/drivers/iio/adc/ltc2497.c
@@ -41,6 +41,19 @@
}
*val = (be32_to_cpu(st->buf) >> 14) - (1 << 17);
+
+ /*
+ * The part started a new conversion at the end of the above i2c
+ * transfer, so if the address didn't change since the last call
+ * everything is fine and we can return early.
+ * If not (which should only happen when some sort of bulk
+ * conversion is implemented) we have to program the new
+ * address. Note that this probably fails as the conversion that
+ * was triggered above is like not complete yet and the two
+ * operations have to be done in a single transfer.
+ */
+ if (ddata->addr_prev == address)
+ return 0;
}
ret = i2c_smbus_write_byte(st->client,
diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c
index e573da5..6527827 100644
--- a/drivers/iio/adc/mcp3911.c
+++ b/drivers/iio/adc/mcp3911.c
@@ -38,8 +38,8 @@
#define MCP3911_CHANNEL(x) (MCP3911_REG_CHANNEL0 + x * 3)
#define MCP3911_OFFCAL(x) (MCP3911_REG_OFFCAL_CH0 + x * 6)
-/* Internal voltage reference in uV */
-#define MCP3911_INT_VREF_UV 1200000
+/* Internal voltage reference in mV */
+#define MCP3911_INT_VREF_MV 1200
#define MCP3911_REG_READ(reg, id) ((((reg) << 1) | ((id) << 5) | (1 << 0)) & 0xff)
#define MCP3911_REG_WRITE(reg, id) ((((reg) << 1) | ((id) << 5) | (0 << 0)) & 0xff)
@@ -111,6 +111,8 @@
if (ret)
goto out;
+ *val = sign_extend32(*val, 23);
+
ret = IIO_VAL_INT;
break;
@@ -135,11 +137,18 @@
*val = ret / 1000;
} else {
- *val = MCP3911_INT_VREF_UV;
+ *val = MCP3911_INT_VREF_MV;
}
- *val2 = 24;
- ret = IIO_VAL_FRACTIONAL_LOG2;
+ /*
+ * For 24bit Conversion
+ * Raw = ((Voltage)/(Vref) * 2^23 * Gain * 1.5
+ * Voltage = Raw * (Vref)/(2^23 * Gain * 1.5)
+ */
+
+ /* val2 = (2^23 * 1.5) */
+ *val2 = 12582912;
+ ret = IIO_VAL_FRACTIONAL;
break;
}
diff --git a/drivers/iio/adc/mp2629_adc.c b/drivers/iio/adc/mp2629_adc.c
index 331a9a7..acd9420 100644
--- a/drivers/iio/adc/mp2629_adc.c
+++ b/drivers/iio/adc/mp2629_adc.c
@@ -56,7 +56,8 @@
MP2629_MAP(SYSTEM_VOLT, "system-volt"),
MP2629_MAP(INPUT_VOLT, "input-volt"),
MP2629_MAP(BATT_CURRENT, "batt-current"),
- MP2629_MAP(INPUT_CURRENT, "input-current")
+ MP2629_MAP(INPUT_CURRENT, "input-current"),
+ { }
};
static int mp2629_read_raw(struct iio_dev *indio_dev,
@@ -73,7 +74,7 @@
if (ret)
return ret;
- if (chan->address == MP2629_INPUT_VOLT)
+ if (chan->channel == MP2629_INPUT_VOLT)
rval &= GENMASK(6, 0);
*val = rval;
return IIO_VAL_INT;
diff --git a/drivers/iio/adc/sc27xx_adc.c b/drivers/iio/adc/sc27xx_adc.c
index aa32a1f..2b463e1 100644
--- a/drivers/iio/adc/sc27xx_adc.c
+++ b/drivers/iio/adc/sc27xx_adc.c
@@ -36,8 +36,8 @@
/* Bits and mask definition for SC27XX_ADC_CH_CFG register */
#define SC27XX_ADC_CHN_ID_MASK GENMASK(4, 0)
-#define SC27XX_ADC_SCALE_MASK GENMASK(10, 8)
-#define SC27XX_ADC_SCALE_SHIFT 8
+#define SC27XX_ADC_SCALE_MASK GENMASK(10, 9)
+#define SC27XX_ADC_SCALE_SHIFT 9
/* Bits definitions for SC27XX_ADC_INT_EN registers */
#define SC27XX_ADC_IRQ_EN BIT(0)
@@ -103,14 +103,14 @@
100, 341,
};
-static const struct sc27xx_adc_linear_graph big_scale_graph_calib = {
- 4200, 856,
- 3600, 733,
+static const struct sc27xx_adc_linear_graph sc2731_big_scale_graph_calib = {
+ 4200, 850,
+ 3600, 728,
};
-static const struct sc27xx_adc_linear_graph small_scale_graph_calib = {
- 1000, 833,
- 100, 80,
+static const struct sc27xx_adc_linear_graph sc2731_small_scale_graph_calib = {
+ 1000, 838,
+ 100, 84,
};
static int sc27xx_adc_get_calib_data(u32 calib_data, int calib_adc)
@@ -130,11 +130,11 @@
size_t len;
if (big_scale) {
- calib_graph = &big_scale_graph_calib;
+ calib_graph = &sc2731_big_scale_graph_calib;
graph = &big_scale_graph;
cell_name = "big_scale_calib";
} else {
- calib_graph = &small_scale_graph_calib;
+ calib_graph = &sc2731_small_scale_graph_calib;
graph = &small_scale_graph;
cell_name = "small_scale_calib";
}
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index a83199b..20fc867 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -64,6 +64,7 @@
* @max_clk_rate_hz: maximum analog clock rate (Hz, from datasheet)
* @has_syscfg: SYSCFG capability flags
* @num_irqs: number of interrupt lines
+ * @num_adcs: maximum number of ADC instances in the common registers
*/
struct stm32_adc_priv_cfg {
const struct stm32_adc_common_regs *regs;
@@ -71,6 +72,7 @@
u32 max_clk_rate_hz;
unsigned int has_syscfg;
unsigned int num_irqs;
+ unsigned int num_adcs;
};
/**
@@ -333,7 +335,7 @@
* before invoking the interrupt handler (e.g. call ISR only for
* IRQ-enabled ADCs).
*/
- for (i = 0; i < priv->cfg->num_irqs; i++) {
+ for (i = 0; i < priv->cfg->num_adcs; i++) {
if ((status & priv->cfg->regs->eoc_msk[i] &&
stm32_adc_eoc_enabled(priv, i)) ||
(status & priv->cfg->regs->ovr_msk[i]))
@@ -784,6 +786,7 @@
.clk_sel = stm32f4_adc_clk_sel,
.max_clk_rate_hz = 36000000,
.num_irqs = 1,
+ .num_adcs = 3,
};
static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = {
@@ -792,14 +795,16 @@
.max_clk_rate_hz = 36000000,
.has_syscfg = HAS_VBOOSTER,
.num_irqs = 1,
+ .num_adcs = 2,
};
static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = {
.regs = &stm32h7_adc_common_regs,
.clk_sel = stm32h7_adc_clk_sel,
- .max_clk_rate_hz = 40000000,
+ .max_clk_rate_hz = 36000000,
.has_syscfg = HAS_VBOOSTER | HAS_ANASWVDD,
.num_irqs = 2,
+ .num_adcs = 2,
};
static const struct of_device_id stm32_adc_of_match[] = {
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 9939dee..e60ad48 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -1265,7 +1265,6 @@
struct stm32_adc *adc = iio_priv(indio_dev);
const struct stm32_adc_regspec *regs = adc->cfg->regs;
u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
- u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg);
/* Check ovr status right now, as ovr mask should be already disabled */
if (status & regs->isr_ovr.mask) {
@@ -1280,11 +1279,6 @@
return IRQ_HANDLED;
}
- if (!(status & mask))
- dev_err_ratelimited(&indio_dev->dev,
- "Unexpected IRQ: IER=0x%08x, ISR=0x%08x\n",
- mask, status);
-
return IRQ_NONE;
}
@@ -1294,10 +1288,6 @@
struct stm32_adc *adc = iio_priv(indio_dev);
const struct stm32_adc_regspec *regs = adc->cfg->regs;
u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
- u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg);
-
- if (!(status & mask))
- return IRQ_WAKE_THREAD;
if (status & regs->isr_ovr.mask) {
/*
diff --git a/drivers/iio/adc/stmpe-adc.c b/drivers/iio/adc/stmpe-adc.c
index fba659b..64305d9 100644
--- a/drivers/iio/adc/stmpe-adc.c
+++ b/drivers/iio/adc/stmpe-adc.c
@@ -61,7 +61,7 @@
static int stmpe_read_voltage(struct stmpe_adc *info,
struct iio_chan_spec const *chan, int *val)
{
- long ret;
+ unsigned long ret;
mutex_lock(&info->lock);
@@ -79,7 +79,7 @@
ret = wait_for_completion_timeout(&info->completion, STMPE_ADC_TIMEOUT);
- if (ret <= 0) {
+ if (ret == 0) {
stmpe_reg_write(info->stmpe, STMPE_REG_ADC_INT_STA,
STMPE_ADC_CH(info->channel));
mutex_unlock(&info->lock);
@@ -96,7 +96,7 @@
static int stmpe_read_temp(struct stmpe_adc *info,
struct iio_chan_spec const *chan, int *val)
{
- long ret;
+ unsigned long ret;
mutex_lock(&info->lock);
@@ -114,7 +114,7 @@
ret = wait_for_completion_timeout(&info->completion, STMPE_ADC_TIMEOUT);
- if (ret <= 0) {
+ if (ret == 0) {
mutex_unlock(&info->lock);
return -ETIMEDOUT;
}
diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
index c6416ad..256177b 100644
--- a/drivers/iio/adc/twl6030-gpadc.c
+++ b/drivers/iio/adc/twl6030-gpadc.c
@@ -911,6 +911,8 @@
ret = devm_request_threaded_irq(dev, irq, NULL,
twl6030_gpadc_irq_handler,
IRQF_ONESHOT, "twl6030_gpadc", indio_dev);
+ if (ret)
+ return ret;
ret = twl6030_gpadc_enable_irq(TWL6030_GPADC_RT_SW1_EOC_MASK);
if (ret < 0) {
diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c
index e42ea2b..3809f98 100644
--- a/drivers/iio/afe/iio-rescale.c
+++ b/drivers/iio/afe/iio-rescale.c
@@ -38,7 +38,7 @@
int *val, int *val2, long mask)
{
struct rescale *rescale = iio_priv(indio_dev);
- unsigned long long tmp;
+ s64 tmp;
int ret;
switch (mask) {
@@ -59,10 +59,10 @@
*val2 = rescale->denominator;
return IIO_VAL_FRACTIONAL;
case IIO_VAL_FRACTIONAL_LOG2:
- tmp = *val * 1000000000LL;
- do_div(tmp, rescale->denominator);
+ tmp = (s64)*val * 1000000000LL;
+ tmp = div_s64(tmp, rescale->denominator);
tmp *= rescale->numerator;
- do_div(tmp, 1000000000LL);
+ tmp = div_s64(tmp, 1000000000LL);
*val = tmp;
return ret;
default:
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index 60dd87e..384d167 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -500,11 +500,11 @@
data->drdy_trig->dev.parent = &client->dev;
data->drdy_trig->ops = &ccs811_trigger_ops;
iio_trigger_set_drvdata(data->drdy_trig, indio_dev);
- indio_dev->trig = data->drdy_trig;
- iio_trigger_get(indio_dev->trig);
ret = iio_trigger_register(data->drdy_trig);
if (ret)
goto err_poweroff;
+
+ indio_dev->trig = iio_trigger_get(data->drdy_trig);
}
ret = iio_triggered_buffer_setup(indio_dev, NULL,
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 7a69c1b..56206fd 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -70,16 +70,18 @@
int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
{
- int err;
+ int err = 0;
struct st_sensor_odr_avl odr_out = {0, 0};
struct st_sensor_data *sdata = iio_priv(indio_dev);
+ mutex_lock(&sdata->odr_lock);
+
if (!sdata->sensor_settings->odr.mask)
- return 0;
+ goto unlock_mutex;
err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out);
if (err < 0)
- goto st_sensors_match_odr_error;
+ goto unlock_mutex;
if ((sdata->sensor_settings->odr.addr ==
sdata->sensor_settings->pw.addr) &&
@@ -102,7 +104,9 @@
if (err >= 0)
sdata->odr = odr_out.hz;
-st_sensors_match_odr_error:
+unlock_mutex:
+ mutex_unlock(&sdata->odr_lock);
+
return err;
}
EXPORT_SYMBOL(st_sensors_set_odr);
@@ -364,6 +368,8 @@
struct st_sensors_platform_data *of_pdata;
int err = 0;
+ mutex_init(&sdata->odr_lock);
+
/* If OF/DT pdata exists, it will take precedence of anything else */
of_pdata = st_sensors_dev_probe(indio_dev->dev.parent, pdata);
if (IS_ERR(of_pdata))
@@ -557,18 +563,24 @@
err = -EBUSY;
goto out;
} else {
+ mutex_lock(&sdata->odr_lock);
err = st_sensors_set_enable(indio_dev, true);
- if (err < 0)
+ if (err < 0) {
+ mutex_unlock(&sdata->odr_lock);
goto out;
+ }
msleep((sdata->sensor_settings->bootime * 1000) / sdata->odr);
err = st_sensors_read_axis_data(indio_dev, ch, val);
- if (err < 0)
+ if (err < 0) {
+ mutex_unlock(&sdata->odr_lock);
goto out;
+ }
*val = *val >> ch->scan_type.shift;
err = st_sensors_set_enable(indio_dev, false);
+ mutex_unlock(&sdata->odr_lock);
}
out:
mutex_unlock(&indio_dev->mlock);
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index e86886c..b168eb1 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -178,7 +178,7 @@
switch (m) {
case IIO_CHAN_INFO_RAW:
- *val = st->cached_val;
+ *val = st->cached_val >> chan->scan_type.shift;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = st->vref_mv;
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index 0405e92..9872644 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -523,7 +523,7 @@
if (!ret)
st->channel_modes[reg] = tmp;
- fwnode_property_read_u32(child, "adi,off-state", &tmp);
+ ret = fwnode_property_read_u32(child, "adi,off-state", &tmp);
if (!ret)
st->channel_offstate[reg] = tmp;
}
diff --git a/drivers/iio/dac/ad5593r.c b/drivers/iio/dac/ad5593r.c
index 5b4df36..4cc855c 100644
--- a/drivers/iio/dac/ad5593r.c
+++ b/drivers/iio/dac/ad5593r.c
@@ -13,6 +13,8 @@
#include <linux/module.h>
#include <linux/mod_devicetable.h>
+#include <asm/unaligned.h>
+
#define AD5593R_MODE_CONF (0 << 4)
#define AD5593R_MODE_DAC_WRITE (1 << 4)
#define AD5593R_MODE_ADC_READBACK (4 << 4)
@@ -20,6 +22,24 @@
#define AD5593R_MODE_GPIO_READBACK (6 << 4)
#define AD5593R_MODE_REG_READBACK (7 << 4)
+static int ad5593r_read_word(struct i2c_client *i2c, u8 reg, u16 *value)
+{
+ int ret;
+ u8 buf[2];
+
+ ret = i2c_smbus_write_byte(i2c, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_master_recv(i2c, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ *value = get_unaligned_be16(buf);
+
+ return 0;
+}
+
static int ad5593r_write_dac(struct ad5592r_state *st, unsigned chan, u16 value)
{
struct i2c_client *i2c = to_i2c_client(st->dev);
@@ -38,13 +58,7 @@
if (val < 0)
return (int) val;
- val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_ADC_READBACK);
- if (val < 0)
- return (int) val;
-
- *value = (u16) val;
-
- return 0;
+ return ad5593r_read_word(i2c, AD5593R_MODE_ADC_READBACK, value);
}
static int ad5593r_reg_write(struct ad5592r_state *st, u8 reg, u16 value)
@@ -58,25 +72,19 @@
static int ad5593r_reg_read(struct ad5592r_state *st, u8 reg, u16 *value)
{
struct i2c_client *i2c = to_i2c_client(st->dev);
- s32 val;
- val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_REG_READBACK | reg);
- if (val < 0)
- return (int) val;
-
- *value = (u16) val;
-
- return 0;
+ return ad5593r_read_word(i2c, AD5593R_MODE_REG_READBACK | reg, value);
}
static int ad5593r_gpio_read(struct ad5592r_state *st, u8 *value)
{
struct i2c_client *i2c = to_i2c_client(st->dev);
- s32 val;
+ u16 val;
+ int ret;
- val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_GPIO_READBACK);
- if (val < 0)
- return (int) val;
+ ret = ad5593r_read_word(i2c, AD5593R_MODE_GPIO_READBACK, &val);
+ if (ret)
+ return ret;
*value = (u8) val;
diff --git a/drivers/iio/dummy/iio_simple_dummy.c b/drivers/iio/dummy/iio_simple_dummy.c
index c0b7ef9..c24f609 100644
--- a/drivers/iio/dummy/iio_simple_dummy.c
+++ b/drivers/iio/dummy/iio_simple_dummy.c
@@ -575,10 +575,9 @@
*/
swd = kzalloc(sizeof(*swd), GFP_KERNEL);
- if (!swd) {
- ret = -ENOMEM;
- goto error_kzalloc;
- }
+ if (!swd)
+ return ERR_PTR(-ENOMEM);
+
/*
* Allocate an IIO device.
*
@@ -590,7 +589,7 @@
indio_dev = iio_device_alloc(parent, sizeof(*st));
if (!indio_dev) {
ret = -ENOMEM;
- goto error_ret;
+ goto error_free_swd;
}
st = iio_priv(indio_dev);
@@ -616,6 +615,10 @@
* indio_dev->name = spi_get_device_id(spi)->name;
*/
indio_dev->name = kstrdup(name, GFP_KERNEL);
+ if (!indio_dev->name) {
+ ret = -ENOMEM;
+ goto error_free_device;
+ }
/* Provide description of available channels */
indio_dev->channels = iio_dummy_channels;
@@ -632,7 +635,7 @@
ret = iio_simple_dummy_events_register(indio_dev);
if (ret < 0)
- goto error_free_device;
+ goto error_free_name;
ret = iio_simple_dummy_configure_buffer(indio_dev);
if (ret < 0)
@@ -649,11 +652,12 @@
iio_simple_dummy_unconfigure_buffer(indio_dev);
error_unregister_events:
iio_simple_dummy_events_unregister(indio_dev);
+error_free_name:
+ kfree(indio_dev->name);
error_free_device:
iio_device_free(indio_dev);
-error_ret:
+error_free_swd:
kfree(swd);
-error_kzalloc:
return ERR_PTR(ret);
}
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index 39e1c43..84c6ad4 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -872,6 +872,7 @@
ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
MPU3050_PWR_MGM_SLEEP, 0);
if (ret) {
+ regulator_bulk_disable(ARRAY_SIZE(mpu3050->regs), mpu3050->regs);
dev_err(mpu3050->dev, "error setting power mode\n");
return ret;
}
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index 82f03a4..5fd6188 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -731,7 +731,7 @@
ret = regmap_write(data->regmap, BMI160_REG_CMD, BMI160_CMD_SOFTRESET);
if (ret)
- return ret;
+ goto disable_regulator;
usleep_range(BMI160_SOFTRESET_USLEEP, BMI160_SOFTRESET_USLEEP + 1);
@@ -742,29 +742,37 @@
if (use_spi) {
ret = regmap_read(data->regmap, BMI160_REG_DUMMY, &val);
if (ret)
- return ret;
+ goto disable_regulator;
}
ret = regmap_read(data->regmap, BMI160_REG_CHIP_ID, &val);
if (ret) {
dev_err(dev, "Error reading chip id\n");
- return ret;
+ goto disable_regulator;
}
if (val != BMI160_CHIP_ID_VAL) {
dev_err(dev, "Wrong chip id, got %x expected %x\n",
val, BMI160_CHIP_ID_VAL);
- return -ENODEV;
+ ret = -ENODEV;
+ goto disable_regulator;
}
ret = bmi160_set_mode(data, BMI160_ACCEL, true);
if (ret)
- return ret;
+ goto disable_regulator;
ret = bmi160_set_mode(data, BMI160_GYRO, true);
if (ret)
- return ret;
+ goto disable_accel;
return 0;
+
+disable_accel:
+ bmi160_set_mode(data, BMI160_ACCEL, false);
+
+disable_regulator:
+ regulator_bulk_disable(ARRAY_SIZE(data->supplies), data->supplies);
+ return ret;
}
static int bmi160_data_rdy_trigger_set_state(struct iio_trigger *trig,
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
index c0f5059..995a9dc 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
@@ -17,6 +17,7 @@
#include "inv_icm42600_buffer.h"
enum inv_icm42600_chip {
+ INV_CHIP_INVALID,
INV_CHIP_ICM42600,
INV_CHIP_ICM42602,
INV_CHIP_ICM42605,
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
index 8bd7718..dcbd4e9 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
@@ -565,7 +565,7 @@
bool open_drain;
int ret;
- if (chip < 0 || chip >= INV_CHIP_NB) {
+ if (chip <= INV_CHIP_INVALID || chip >= INV_CHIP_NB) {
dev_err(dev, "invalid chip = %d\n", chip);
return -ENODEV;
}
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
index 85b1934..5389101 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
@@ -18,12 +18,15 @@
unsigned int mask, val;
int ret;
- /* setup interface registers */
- ret = regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG6,
- INV_ICM42600_INTF_CONFIG6_MASK,
- INV_ICM42600_INTF_CONFIG6_I3C_EN);
- if (ret)
- return ret;
+ /*
+ * setup interface registers
+ * This register write to REG_INTF_CONFIG6 enables a spike filter that
+ * is impacting the line and can prevent the I2C ACK to be seen by the
+ * controller. So we don't test the return value.
+ */
+ regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG6,
+ INV_ICM42600_INTF_CONFIG6_MASK,
+ INV_ICM42600_INTF_CONFIG6_I3C_EN);
ret = regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG4,
INV_ICM42600_INTF_CONFIG4_I3C_BUS_ONLY, 0);
diff --git a/drivers/iio/industrialio-sw-trigger.c b/drivers/iio/industrialio-sw-trigger.c
index 9ae793a..a7714d3 100644
--- a/drivers/iio/industrialio-sw-trigger.c
+++ b/drivers/iio/industrialio-sw-trigger.c
@@ -58,8 +58,12 @@
t->group = configfs_register_default_group(iio_triggers_group, t->name,
&iio_trigger_type_group_type);
- if (IS_ERR(t->group))
+ if (IS_ERR(t->group)) {
+ mutex_lock(&iio_trigger_types_lock);
+ list_del(&t->list);
+ mutex_unlock(&iio_trigger_types_lock);
ret = PTR_ERR(t->group);
+ }
return ret;
}
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index ede99e0..c32b257 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -136,9 +136,10 @@
idev = bus_find_device(&iio_bus_type, NULL, iiospec.np,
iio_dev_node_match);
- of_node_put(iiospec.np);
- if (idev == NULL)
+ if (idev == NULL) {
+ of_node_put(iiospec.np);
return -EPROBE_DEFER;
+ }
indio_dev = dev_to_iio_dev(idev);
channel->indio_dev = indio_dev;
@@ -146,6 +147,7 @@
index = indio_dev->info->of_xlate(indio_dev, &iiospec);
else
index = __of_iio_simple_xlate(indio_dev, &iiospec);
+ of_node_put(iiospec.np);
if (index < 0)
goto err_put;
channel->channel = &indio_dev->channels[index];
@@ -561,28 +563,50 @@
static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
int raw, int *processed, unsigned int scale)
{
- int scale_type, scale_val, scale_val2, offset;
+ int scale_type, scale_val, scale_val2;
+ int offset_type, offset_val, offset_val2;
s64 raw64 = raw;
- int ret;
- ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET);
- if (ret >= 0)
- raw64 += offset;
+ offset_type = iio_channel_read(chan, &offset_val, &offset_val2,
+ IIO_CHAN_INFO_OFFSET);
+ if (offset_type >= 0) {
+ switch (offset_type) {
+ case IIO_VAL_INT:
+ break;
+ case IIO_VAL_INT_PLUS_MICRO:
+ case IIO_VAL_INT_PLUS_NANO:
+ /*
+ * Both IIO_VAL_INT_PLUS_MICRO and IIO_VAL_INT_PLUS_NANO
+ * implicitely truncate the offset to it's integer form.
+ */
+ break;
+ case IIO_VAL_FRACTIONAL:
+ offset_val /= offset_val2;
+ break;
+ case IIO_VAL_FRACTIONAL_LOG2:
+ offset_val >>= offset_val2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ raw64 += offset_val;
+ }
scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
IIO_CHAN_INFO_SCALE);
if (scale_type < 0) {
/*
- * Just pass raw values as processed if no scaling is
- * available.
+ * If no channel scaling is available apply consumer scale to
+ * raw value and return.
*/
- *processed = raw;
+ *processed = raw * scale;
return 0;
}
switch (scale_type) {
case IIO_VAL_INT:
- *processed = raw64 * scale_val;
+ *processed = raw64 * scale_val * scale;
break;
case IIO_VAL_INT_PLUS_MICRO:
if (scale_val2 < 0)
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index 9afb3fc..4a7ccf2 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -53,9 +53,6 @@
#define APDS9960_REG_CONTROL_PGAIN_MASK_SHIFT 2
#define APDS9960_REG_CONFIG_2 0x90
-#define APDS9960_REG_CONFIG_2_GGAIN_MASK 0x60
-#define APDS9960_REG_CONFIG_2_GGAIN_MASK_SHIFT 5
-
#define APDS9960_REG_ID 0x92
#define APDS9960_REG_STATUS 0x93
@@ -76,6 +73,9 @@
#define APDS9960_REG_GCONF_1_GFIFO_THRES_MASK_SHIFT 6
#define APDS9960_REG_GCONF_2 0xa3
+#define APDS9960_REG_GCONF_2_GGAIN_MASK 0x60
+#define APDS9960_REG_GCONF_2_GGAIN_MASK_SHIFT 5
+
#define APDS9960_REG_GOFFSET_U 0xa4
#define APDS9960_REG_GOFFSET_D 0xa5
#define APDS9960_REG_GPULSE 0xa6
@@ -395,9 +395,9 @@
}
ret = regmap_update_bits(data->regmap,
- APDS9960_REG_CONFIG_2,
- APDS9960_REG_CONFIG_2_GGAIN_MASK,
- idx << APDS9960_REG_CONFIG_2_GGAIN_MASK_SHIFT);
+ APDS9960_REG_GCONF_2,
+ APDS9960_REG_GCONF_2_GGAIN_MASK,
+ idx << APDS9960_REG_GCONF_2_GGAIN_MASK_SHIFT);
if (!ret)
data->pxs_gain = idx;
mutex_unlock(&data->lock);
diff --git a/drivers/iio/light/isl29028.c b/drivers/iio/light/isl29028.c
index 2f8b494..74e7547 100644
--- a/drivers/iio/light/isl29028.c
+++ b/drivers/iio/light/isl29028.c
@@ -627,7 +627,7 @@
ISL29028_POWER_OFF_DELAY_MS);
pm_runtime_use_autosuspend(&client->dev);
- ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(&client->dev,
"%s(): iio registration failed with error %d\n",
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
index 40b7dd2..e39d512 100644
--- a/drivers/iio/light/tsl2583.c
+++ b/drivers/iio/light/tsl2583.c
@@ -856,7 +856,7 @@
TSL2583_POWER_OFF_DELAY_MS);
pm_runtime_use_autosuspend(&clientp->dev);
- ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret) {
dev_err(&clientp->dev, "%s: iio registration failed\n",
__func__);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index d988b6a..3774e59 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -389,6 +389,7 @@
if (ret) {
dev_warn(&data->client->dev,
"Failed to enable specified Vid supply\n");
+ regulator_disable(data->vdd);
return ret;
}
diff --git a/drivers/iio/pressure/dps310.c b/drivers/iio/pressure/dps310.c
index 0730380..cf8b92f 100644
--- a/drivers/iio/pressure/dps310.c
+++ b/drivers/iio/pressure/dps310.c
@@ -89,6 +89,7 @@
s32 c00, c10, c20, c30, c01, c11, c21;
s32 pressure_raw;
s32 temp_raw;
+ bool timeout_recovery_failed;
};
static const struct iio_chan_spec dps310_channels[] = {
@@ -159,6 +160,102 @@
return 0;
}
+/*
+ * Some versions of the chip will read temperatures in the ~60C range when
+ * it's actually ~20C. This is the manufacturer recommended workaround
+ * to correct the issue. The registers used below are undocumented.
+ */
+static int dps310_temp_workaround(struct dps310_data *data)
+{
+ int rc;
+ int reg;
+
+ rc = regmap_read(data->regmap, 0x32, ®);
+ if (rc)
+ return rc;
+
+ /*
+ * If bit 1 is set then the device is okay, and the workaround does not
+ * need to be applied
+ */
+ if (reg & BIT(1))
+ return 0;
+
+ rc = regmap_write(data->regmap, 0x0e, 0xA5);
+ if (rc)
+ return rc;
+
+ rc = regmap_write(data->regmap, 0x0f, 0x96);
+ if (rc)
+ return rc;
+
+ rc = regmap_write(data->regmap, 0x62, 0x02);
+ if (rc)
+ return rc;
+
+ rc = regmap_write(data->regmap, 0x0e, 0x00);
+ if (rc)
+ return rc;
+
+ return regmap_write(data->regmap, 0x0f, 0x00);
+}
+
+static int dps310_startup(struct dps310_data *data)
+{
+ int rc;
+ int ready;
+
+ /*
+ * Set up pressure sensor in single sample, one measurement per second
+ * mode
+ */
+ rc = regmap_write(data->regmap, DPS310_PRS_CFG, 0);
+ if (rc)
+ return rc;
+
+ /*
+ * Set up external (MEMS) temperature sensor in single sample, one
+ * measurement per second mode
+ */
+ rc = regmap_write(data->regmap, DPS310_TMP_CFG, DPS310_TMP_EXT);
+ if (rc)
+ return rc;
+
+ /* Temp and pressure shifts are disabled when PRC <= 8 */
+ rc = regmap_write_bits(data->regmap, DPS310_CFG_REG,
+ DPS310_PRS_SHIFT_EN | DPS310_TMP_SHIFT_EN, 0);
+ if (rc)
+ return rc;
+
+ /* MEAS_CFG doesn't update correctly unless first written with 0 */
+ rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG,
+ DPS310_MEAS_CTRL_BITS, 0);
+ if (rc)
+ return rc;
+
+ /* Turn on temperature and pressure measurement in the background */
+ rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG,
+ DPS310_MEAS_CTRL_BITS, DPS310_PRS_EN |
+ DPS310_TEMP_EN | DPS310_BACKGROUND);
+ if (rc)
+ return rc;
+
+ /*
+ * Calibration coefficients required for reporting temperature.
+ * They are available 40ms after the device has started
+ */
+ rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready,
+ ready & DPS310_COEF_RDY, 10000, 40000);
+ if (rc)
+ return rc;
+
+ rc = dps310_get_coefs(data);
+ if (rc)
+ return rc;
+
+ return dps310_temp_workaround(data);
+}
+
static int dps310_get_pres_precision(struct dps310_data *data)
{
int rc;
@@ -297,11 +394,69 @@
return scale_factors[ilog2(rc)];
}
+static int dps310_reset_wait(struct dps310_data *data)
+{
+ int rc;
+
+ rc = regmap_write(data->regmap, DPS310_RESET, DPS310_RESET_MAGIC);
+ if (rc)
+ return rc;
+
+ /* Wait for device chip access: 2.5ms in specification */
+ usleep_range(2500, 12000);
+ return 0;
+}
+
+static int dps310_reset_reinit(struct dps310_data *data)
+{
+ int rc;
+
+ rc = dps310_reset_wait(data);
+ if (rc)
+ return rc;
+
+ return dps310_startup(data);
+}
+
+static int dps310_ready_status(struct dps310_data *data, int ready_bit, int timeout)
+{
+ int sleep = DPS310_POLL_SLEEP_US(timeout);
+ int ready;
+
+ return regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready, ready & ready_bit,
+ sleep, timeout);
+}
+
+static int dps310_ready(struct dps310_data *data, int ready_bit, int timeout)
+{
+ int rc;
+
+ rc = dps310_ready_status(data, ready_bit, timeout);
+ if (rc) {
+ if (rc == -ETIMEDOUT && !data->timeout_recovery_failed) {
+ /* Reset and reinitialize the chip. */
+ if (dps310_reset_reinit(data)) {
+ data->timeout_recovery_failed = true;
+ } else {
+ /* Try again to get sensor ready status. */
+ if (dps310_ready_status(data, ready_bit, timeout))
+ data->timeout_recovery_failed = true;
+ else
+ return 0;
+ }
+ }
+
+ return rc;
+ }
+
+ data->timeout_recovery_failed = false;
+ return 0;
+}
+
static int dps310_read_pres_raw(struct dps310_data *data)
{
int rc;
int rate;
- int ready;
int timeout;
s32 raw;
u8 val[3];
@@ -313,9 +468,7 @@
timeout = DPS310_POLL_TIMEOUT_US(rate);
/* Poll for sensor readiness; base the timeout upon the sample rate. */
- rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready,
- ready & DPS310_PRS_RDY,
- DPS310_POLL_SLEEP_US(timeout), timeout);
+ rc = dps310_ready(data, DPS310_PRS_RDY, timeout);
if (rc)
goto done;
@@ -352,7 +505,6 @@
{
int rc;
int rate;
- int ready;
int timeout;
if (mutex_lock_interruptible(&data->lock))
@@ -362,10 +514,8 @@
timeout = DPS310_POLL_TIMEOUT_US(rate);
/* Poll for sensor readiness; base the timeout upon the sample rate. */
- rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready,
- ready & DPS310_TMP_RDY,
- DPS310_POLL_SLEEP_US(timeout), timeout);
- if (rc < 0)
+ rc = dps310_ready(data, DPS310_TMP_RDY, timeout);
+ if (rc)
goto done;
rc = dps310_read_temp_ready(data);
@@ -660,7 +810,7 @@
{
struct dps310_data *data = action_data;
- regmap_write(data->regmap, DPS310_RESET, DPS310_RESET_MAGIC);
+ dps310_reset_wait(data);
}
static const struct regmap_config dps310_regmap_config = {
@@ -677,52 +827,12 @@
.write_raw = dps310_write_raw,
};
-/*
- * Some verions of chip will read temperatures in the ~60C range when
- * its actually ~20C. This is the manufacturer recommended workaround
- * to correct the issue. The registers used below are undocumented.
- */
-static int dps310_temp_workaround(struct dps310_data *data)
-{
- int rc;
- int reg;
-
- rc = regmap_read(data->regmap, 0x32, ®);
- if (rc < 0)
- return rc;
-
- /*
- * If bit 1 is set then the device is okay, and the workaround does not
- * need to be applied
- */
- if (reg & BIT(1))
- return 0;
-
- rc = regmap_write(data->regmap, 0x0e, 0xA5);
- if (rc < 0)
- return rc;
-
- rc = regmap_write(data->regmap, 0x0f, 0x96);
- if (rc < 0)
- return rc;
-
- rc = regmap_write(data->regmap, 0x62, 0x02);
- if (rc < 0)
- return rc;
-
- rc = regmap_write(data->regmap, 0x0e, 0x00);
- if (rc < 0)
- return rc;
-
- return regmap_write(data->regmap, 0x0f, 0x00);
-}
-
static int dps310_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct dps310_data *data;
struct iio_dev *iio;
- int rc, ready;
+ int rc;
iio = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!iio)
@@ -747,54 +857,8 @@
if (rc)
return rc;
- /*
- * Set up pressure sensor in single sample, one measurement per second
- * mode
- */
- rc = regmap_write(data->regmap, DPS310_PRS_CFG, 0);
-
- /*
- * Set up external (MEMS) temperature sensor in single sample, one
- * measurement per second mode
- */
- rc = regmap_write(data->regmap, DPS310_TMP_CFG, DPS310_TMP_EXT);
- if (rc < 0)
- return rc;
-
- /* Temp and pressure shifts are disabled when PRC <= 8 */
- rc = regmap_write_bits(data->regmap, DPS310_CFG_REG,
- DPS310_PRS_SHIFT_EN | DPS310_TMP_SHIFT_EN, 0);
- if (rc < 0)
- return rc;
-
- /* MEAS_CFG doesn't update correctly unless first written with 0 */
- rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG,
- DPS310_MEAS_CTRL_BITS, 0);
- if (rc < 0)
- return rc;
-
- /* Turn on temperature and pressure measurement in the background */
- rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG,
- DPS310_MEAS_CTRL_BITS, DPS310_PRS_EN |
- DPS310_TEMP_EN | DPS310_BACKGROUND);
- if (rc < 0)
- return rc;
-
- /*
- * Calibration coefficients required for reporting temperature.
- * They are available 40ms after the device has started
- */
- rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready,
- ready & DPS310_COEF_RDY, 10000, 40000);
- if (rc < 0)
- return rc;
-
- rc = dps310_get_coefs(data);
- if (rc < 0)
- return rc;
-
- rc = dps310_temp_workaround(data);
- if (rc < 0)
+ rc = dps310_startup(data);
+ if (rc)
return rc;
rc = devm_iio_device_register(&client->dev, iio);
diff --git a/drivers/iio/pressure/ms5611.h b/drivers/iio/pressure/ms5611.h
index bc06271..5e2d2d4 100644
--- a/drivers/iio/pressure/ms5611.h
+++ b/drivers/iio/pressure/ms5611.h
@@ -25,13 +25,6 @@
MS5607,
};
-struct ms5611_chip_info {
- u16 prom[MS5611_PROM_WORDS_NB];
-
- int (*temp_and_pressure_compensate)(struct ms5611_chip_info *chip_info,
- s32 *temp, s32 *pressure);
-};
-
/*
* OverSampling Rate descriptor.
* Warning: cmd MUST be kept aligned on a word boundary (see
@@ -50,12 +43,15 @@
const struct ms5611_osr *pressure_osr;
const struct ms5611_osr *temp_osr;
- int (*reset)(struct device *dev);
- int (*read_prom_word)(struct device *dev, int index, u16 *word);
- int (*read_adc_temp_and_pressure)(struct device *dev,
+ u16 prom[MS5611_PROM_WORDS_NB];
+
+ int (*reset)(struct ms5611_state *st);
+ int (*read_prom_word)(struct ms5611_state *st, int index, u16 *word);
+ int (*read_adc_temp_and_pressure)(struct ms5611_state *st,
s32 *temp, s32 *pressure);
- struct ms5611_chip_info *chip_info;
+ int (*compensate_temp_and_pressure)(struct ms5611_state *st, s32 *temp,
+ s32 *pressure);
struct regulator *vdd;
};
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index 214b0d2..874a73b 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -85,8 +85,7 @@
struct ms5611_state *st = iio_priv(indio_dev);
for (i = 0; i < MS5611_PROM_WORDS_NB; i++) {
- ret = st->read_prom_word(&indio_dev->dev,
- i, &st->chip_info->prom[i]);
+ ret = st->read_prom_word(st, i, &st->prom[i]);
if (ret < 0) {
dev_err(&indio_dev->dev,
"failed to read prom at %d\n", i);
@@ -94,7 +93,7 @@
}
}
- if (!ms5611_prom_is_valid(st->chip_info->prom, MS5611_PROM_WORDS_NB)) {
+ if (!ms5611_prom_is_valid(st->prom, MS5611_PROM_WORDS_NB)) {
dev_err(&indio_dev->dev, "PROM integrity check failed\n");
return -ENODEV;
}
@@ -108,28 +107,27 @@
int ret;
struct ms5611_state *st = iio_priv(indio_dev);
- ret = st->read_adc_temp_and_pressure(&indio_dev->dev, temp, pressure);
+ ret = st->read_adc_temp_and_pressure(st, temp, pressure);
if (ret < 0) {
dev_err(&indio_dev->dev,
"failed to read temperature and pressure\n");
return ret;
}
- return st->chip_info->temp_and_pressure_compensate(st->chip_info,
- temp, pressure);
+ return st->compensate_temp_and_pressure(st, temp, pressure);
}
-static int ms5611_temp_and_pressure_compensate(struct ms5611_chip_info *chip_info,
+static int ms5611_temp_and_pressure_compensate(struct ms5611_state *st,
s32 *temp, s32 *pressure)
{
s32 t = *temp, p = *pressure;
s64 off, sens, dt;
- dt = t - (chip_info->prom[5] << 8);
- off = ((s64)chip_info->prom[2] << 16) + ((chip_info->prom[4] * dt) >> 7);
- sens = ((s64)chip_info->prom[1] << 15) + ((chip_info->prom[3] * dt) >> 8);
+ dt = t - (st->prom[5] << 8);
+ off = ((s64)st->prom[2] << 16) + ((st->prom[4] * dt) >> 7);
+ sens = ((s64)st->prom[1] << 15) + ((st->prom[3] * dt) >> 8);
- t = 2000 + ((chip_info->prom[6] * dt) >> 23);
+ t = 2000 + ((st->prom[6] * dt) >> 23);
if (t < 2000) {
s64 off2, sens2, t2;
@@ -155,17 +153,17 @@
return 0;
}
-static int ms5607_temp_and_pressure_compensate(struct ms5611_chip_info *chip_info,
+static int ms5607_temp_and_pressure_compensate(struct ms5611_state *st,
s32 *temp, s32 *pressure)
{
s32 t = *temp, p = *pressure;
s64 off, sens, dt;
- dt = t - (chip_info->prom[5] << 8);
- off = ((s64)chip_info->prom[2] << 17) + ((chip_info->prom[4] * dt) >> 6);
- sens = ((s64)chip_info->prom[1] << 16) + ((chip_info->prom[3] * dt) >> 7);
+ dt = t - (st->prom[5] << 8);
+ off = ((s64)st->prom[2] << 17) + ((st->prom[4] * dt) >> 6);
+ sens = ((s64)st->prom[1] << 16) + ((st->prom[3] * dt) >> 7);
- t = 2000 + ((chip_info->prom[6] * dt) >> 23);
+ t = 2000 + ((st->prom[6] * dt) >> 23);
if (t < 2000) {
s64 off2, sens2, t2, tmp;
@@ -196,7 +194,7 @@
int ret;
struct ms5611_state *st = iio_priv(indio_dev);
- ret = st->reset(&indio_dev->dev);
+ ret = st->reset(st);
if (ret < 0) {
dev_err(&indio_dev->dev, "failed to reset device\n");
return ret;
@@ -343,15 +341,6 @@
static const unsigned long ms5611_scan_masks[] = {0x3, 0};
-static struct ms5611_chip_info chip_info_tbl[] = {
- [MS5611] = {
- .temp_and_pressure_compensate = ms5611_temp_and_pressure_compensate,
- },
- [MS5607] = {
- .temp_and_pressure_compensate = ms5607_temp_and_pressure_compensate,
- }
-};
-
static const struct iio_chan_spec ms5611_channels[] = {
{
.type = IIO_PRESSURE,
@@ -434,7 +423,20 @@
struct ms5611_state *st = iio_priv(indio_dev);
mutex_init(&st->lock);
- st->chip_info = &chip_info_tbl[type];
+
+ switch (type) {
+ case MS5611:
+ st->compensate_temp_and_pressure =
+ ms5611_temp_and_pressure_compensate;
+ break;
+ case MS5607:
+ st->compensate_temp_and_pressure =
+ ms5607_temp_and_pressure_compensate;
+ break;
+ default:
+ return -EINVAL;
+ }
+
st->temp_osr =
&ms5611_avail_temp_osr[ARRAY_SIZE(ms5611_avail_temp_osr) - 1];
st->pressure_osr =
diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c
index 7c04f73..cccc40f 100644
--- a/drivers/iio/pressure/ms5611_i2c.c
+++ b/drivers/iio/pressure/ms5611_i2c.c
@@ -20,17 +20,15 @@
#include "ms5611.h"
-static int ms5611_i2c_reset(struct device *dev)
+static int ms5611_i2c_reset(struct ms5611_state *st)
{
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
-
return i2c_smbus_write_byte(st->client, MS5611_RESET);
}
-static int ms5611_i2c_read_prom_word(struct device *dev, int index, u16 *word)
+static int ms5611_i2c_read_prom_word(struct ms5611_state *st, int index,
+ u16 *word)
{
int ret;
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
ret = i2c_smbus_read_word_swapped(st->client,
MS5611_READ_PROM_WORD + (index << 1));
@@ -57,11 +55,10 @@
return 0;
}
-static int ms5611_i2c_read_adc_temp_and_pressure(struct device *dev,
+static int ms5611_i2c_read_adc_temp_and_pressure(struct ms5611_state *st,
s32 *temp, s32 *pressure)
{
int ret;
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
const struct ms5611_osr *osr = st->temp_osr;
ret = i2c_smbus_write_byte(st->client, osr->cmd);
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index 45d3a7d..3039fe8 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -15,18 +15,17 @@
#include "ms5611.h"
-static int ms5611_spi_reset(struct device *dev)
+static int ms5611_spi_reset(struct ms5611_state *st)
{
u8 cmd = MS5611_RESET;
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
return spi_write_then_read(st->client, &cmd, 1, NULL, 0);
}
-static int ms5611_spi_read_prom_word(struct device *dev, int index, u16 *word)
+static int ms5611_spi_read_prom_word(struct ms5611_state *st, int index,
+ u16 *word)
{
int ret;
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
ret = spi_w8r16be(st->client, MS5611_READ_PROM_WORD + (index << 1));
if (ret < 0)
@@ -37,11 +36,10 @@
return 0;
}
-static int ms5611_spi_read_adc(struct device *dev, s32 *val)
+static int ms5611_spi_read_adc(struct ms5611_state *st, s32 *val)
{
int ret;
u8 buf[3] = { MS5611_READ_ADC };
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
ret = spi_write_then_read(st->client, buf, 1, buf, 3);
if (ret < 0)
@@ -52,11 +50,10 @@
return 0;
}
-static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev,
+static int ms5611_spi_read_adc_temp_and_pressure(struct ms5611_state *st,
s32 *temp, s32 *pressure)
{
int ret;
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
const struct ms5611_osr *osr = st->temp_osr;
/*
@@ -68,7 +65,7 @@
return ret;
usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
- ret = ms5611_spi_read_adc(dev, temp);
+ ret = ms5611_spi_read_adc(st, temp);
if (ret < 0)
return ret;
@@ -78,7 +75,7 @@
return ret;
usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
- return ms5611_spi_read_adc(dev, pressure);
+ return ms5611_spi_read_adc(st, pressure);
}
static int ms5611_spi_probe(struct spi_device *spi)
@@ -94,7 +91,7 @@
spi_set_drvdata(spi, indio_dev);
spi->mode = SPI_MODE_0;
- spi->max_speed_hz = 20000000;
+ spi->max_speed_hz = min(spi->max_speed_hz, 20000000U);
spi->bits_per_word = 8;
ret = spi_setup(spi);
if (ret < 0)
diff --git a/drivers/iio/proximity/vl53l0x-i2c.c b/drivers/iio/proximity/vl53l0x-i2c.c
index 235e125..3d3ab86 100644
--- a/drivers/iio/proximity/vl53l0x-i2c.c
+++ b/drivers/iio/proximity/vl53l0x-i2c.c
@@ -104,6 +104,7 @@
u16 tries = 20;
u8 buffer[12];
int ret;
+ unsigned long time_left;
ret = i2c_smbus_write_byte_data(client, VL_REG_SYSRANGE_START, 1);
if (ret < 0)
@@ -112,10 +113,8 @@
if (data->client->irq) {
reinit_completion(&data->completion);
- ret = wait_for_completion_timeout(&data->completion, HZ/10);
- if (ret < 0)
- return ret;
- else if (ret == 0)
+ time_left = wait_for_completion_timeout(&data->completion, HZ/10);
+ if (time_left == 0)
return -ETIMEDOUT;
vl53l0x_clear_irq(data);
diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
index 3b4a0e6..8306daa 100644
--- a/drivers/iio/temperature/ltc2983.c
+++ b/drivers/iio/temperature/ltc2983.c
@@ -1376,13 +1376,6 @@
return ret;
}
- st->iio_chan = devm_kzalloc(&st->spi->dev,
- st->iio_channels * sizeof(*st->iio_chan),
- GFP_KERNEL);
-
- if (!st->iio_chan)
- return -ENOMEM;
-
ret = regmap_update_bits(st->regmap, LTC2983_GLOBAL_CONFIG_REG,
LTC2983_NOTCH_FREQ_MASK,
LTC2983_NOTCH_FREQ(st->filter_notch_freq));
@@ -1494,6 +1487,12 @@
if (ret)
return ret;
+ st->iio_chan = devm_kzalloc(&spi->dev,
+ st->iio_channels * sizeof(*st->iio_chan),
+ GFP_KERNEL);
+ if (!st->iio_chan)
+ return -ENOMEM;
+
ret = ltc2983_setup(st, true);
if (ret)
return ret;
diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c
index e09e580..9ed5b94 100644
--- a/drivers/iio/trigger/iio-trig-sysfs.c
+++ b/drivers/iio/trigger/iio-trig-sysfs.c
@@ -196,6 +196,7 @@
}
iio_trigger_unregister(t->trig);
+ irq_work_sync(&t->work);
iio_trigger_free(t->trig);
list_del(&t->l);
@@ -208,9 +209,13 @@
static int __init iio_sysfs_trig_init(void)
{
+ int ret;
device_initialize(&iio_sysfs_trig_dev);
dev_set_name(&iio_sysfs_trig_dev, "iio_sysfs_trigger");
- return device_add(&iio_sysfs_trig_dev);
+ ret = device_add(&iio_sysfs_trig_dev);
+ if (ret)
+ put_device(&iio_sysfs_trig_dev);
+ return ret;
}
module_init(iio_sysfs_trig_init);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index ee568bd..3133b6b 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1280,8 +1280,10 @@
return ERR_CAST(cm_id_priv);
err = cm_init_listen(cm_id_priv, service_id, 0);
- if (err)
+ if (err) {
+ ib_destroy_cm_id(&cm_id_priv->id);
return ERR_PTR(err);
+ }
spin_lock_irq(&cm_id_priv->lock);
listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler);
@@ -1641,14 +1643,13 @@
static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
struct sa_path_rec *primary_path,
- struct sa_path_rec *alt_path)
+ struct sa_path_rec *alt_path,
+ struct ib_wc *wc)
{
u32 lid;
if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
- sa_path_set_dlid(primary_path,
- IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
- req_msg));
+ sa_path_set_dlid(primary_path, wc->slid);
sa_path_set_slid(primary_path,
IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
req_msg));
@@ -1685,7 +1686,8 @@
static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
struct sa_path_rec *primary_path,
- struct sa_path_rec *alt_path)
+ struct sa_path_rec *alt_path,
+ struct ib_wc *wc)
{
primary_path->dgid =
*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg);
@@ -1743,7 +1745,7 @@
if (sa_path_is_roce(alt_path))
alt_path->roce.route_resolved = false;
}
- cm_format_path_lid_from_req(req_msg, primary_path, alt_path);
+ cm_format_path_lid_from_req(req_msg, primary_path, alt_path, wc);
}
static u16 cm_get_bth_pkey(struct cm_work *work)
@@ -2161,7 +2163,7 @@
if (cm_req_has_alt_path(req_msg))
work->path[1].rec_type = work->path[0].rec_type;
cm_format_paths_from_req(req_msg, &work->path[0],
- &work->path[1]);
+ &work->path[1], work->mad_recv_wc->wc);
if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
sa_path_set_dmac(&work->path[0],
cm_id_priv->av.ah_attr.roce.dmac);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index fbb0efb..9ed5de3 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1437,7 +1437,7 @@
return false;
memset(&fl4, 0, sizeof(fl4));
- fl4.flowi4_iif = net_dev->ifindex;
+ fl4.flowi4_oif = net_dev->ifindex;
fl4.daddr = daddr;
fl4.saddr = saddr;
@@ -1722,8 +1722,8 @@
}
if (!validate_net_dev(*net_dev,
- (struct sockaddr *)&req->listen_addr_storage,
- (struct sockaddr *)&req->src_addr_storage)) {
+ (struct sockaddr *)&req->src_addr_storage,
+ (struct sockaddr *)&req->listen_addr_storage)) {
id_priv = ERR_PTR(-EHOSTUNREACH);
goto err;
}
@@ -2635,7 +2635,7 @@
{
struct rdma_id_private *id_priv;
- if (id->qp_type != IB_QPT_RC)
+ if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI)
return -EINVAL;
id_priv = container_of(id, struct rdma_id_private, id);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index aa526c5..d91892f 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -2759,10 +2759,18 @@
nldev_init();
rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
- roce_gid_mgmt_init();
+ ret = roce_gid_mgmt_init();
+ if (ret) {
+ pr_warn("Couldn't init RoCE GID management\n");
+ goto err_parent;
+ }
return 0;
+err_parent:
+ rdma_nl_unregister(RDMA_NL_LS);
+ nldev_exit();
+ unregister_pernet_device(&rdma_dev_net_ops);
err_compat:
unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
err_sa:
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 12d29d5..c90f637 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -2181,7 +2181,7 @@
rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
}
-void __exit nldev_exit(void)
+void nldev_exit(void)
{
rdma_nl_unregister(RDMA_NL_NLDEV);
}
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 323f6cf..af4af47 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -466,7 +466,7 @@
mutex_unlock(&umem_odp->umem_mutex);
out_put_mm:
- mmput(owning_mm);
+ mmput_async(owning_mm);
out_put_task:
if (owning_process)
put_task_struct(owning_process);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 4660268..d7c90da 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -749,6 +749,7 @@
mr->uobject = uobj;
atomic_inc(&pd->usecnt);
mr->iova = cmd.hca_va;
+ mr->length = cmd.length;
rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
rdma_restrack_set_name(&mr->res, NULL);
@@ -832,8 +833,10 @@
atomic_dec(&old_pd->usecnt);
}
- if (cmd.flags & IB_MR_REREG_TRANS)
+ if (cmd.flags & IB_MR_REREG_TRANS) {
mr->iova = cmd.hca_va;
+ mr->length = cmd.length;
+ }
memset(&resp, 0, sizeof(resp));
resp.lkey = mr->lkey;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 3d895cc..5889639 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -2078,9 +2078,12 @@
return mr;
mr->device = pd->device;
+ mr->type = IB_MR_TYPE_USER;
mr->pd = pd;
mr->dm = NULL;
atomic_inc(&pd->usecnt);
+ mr->iova = virt_addr;
+ mr->length = length;
rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
rdma_restrack_parent_name(&mr->res, &pd->res);
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 329ee4f..d84b109 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -306,6 +306,8 @@
unsigned long dim = from->nr_segs;
int idx;
+ if (!HFI1_CAP_IS_KSET(SDMA))
+ return -EINVAL;
idx = srcu_read_lock(&fd->pq_srcu);
pq = srcu_dereference(fd->pq, &fd->pq_srcu);
if (!cq || !pq) {
@@ -1218,8 +1220,10 @@
goto done;
ret = init_user_ctxt(fd, uctxt);
- if (ret)
+ if (ret) {
+ hfi1_free_ctxt_rcv_groups(uctxt);
goto done;
+ }
user_init(uctxt);
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index fa2cd76..837293a 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -530,7 +530,7 @@
u16 shift, mult;
u64 src;
u32 current_egress_rate; /* Mbits /sec */
- u32 max_pkt_time;
+ u64 max_pkt_time;
/*
* max_pkt_time is the maximum packet egress time in units
* of the fabric clock period 1/(805 MHz).
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index d213f65..ed8a96a 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -121,6 +121,9 @@
unsigned long flags;
struct list_head del_list;
+ /* Prevent freeing of mm until we are completely finished. */
+ mmgrab(handler->mn.mm);
+
/* Unregister first so we don't get any more notifications. */
mmu_notifier_unregister(&handler->mn, handler->mn.mm);
@@ -143,6 +146,9 @@
do_remove(handler, &del_list);
+ /* Now the mm may be freed. */
+ mmdrop(handler->mn.mm);
+
kfree(handler);
}
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 1cd8f80..60eb3a6 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -955,8 +955,7 @@
spin_unlock(&sc->release_lock);
write_seqlock(&sc->waitlock);
- if (!list_empty(&sc->piowait))
- list_move(&sc->piowait, &wake_list);
+ list_splice_init(&sc->piowait, &wake_list);
write_sequnlock(&sc->waitlock);
while (!list_empty(&wake_list)) {
struct iowait *wait;
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 0b73dc7..a044bee 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1330,11 +1330,13 @@
kvfree(sde->tx_ring);
sde->tx_ring = NULL;
}
- spin_lock_irq(&dd->sde_map_lock);
- sdma_map_free(rcu_access_pointer(dd->sdma_map));
- RCU_INIT_POINTER(dd->sdma_map, NULL);
- spin_unlock_irq(&dd->sde_map_lock);
- synchronize_rcu();
+ if (rcu_access_pointer(dd->sdma_map)) {
+ spin_lock_irq(&dd->sde_map_lock);
+ sdma_map_free(rcu_access_pointer(dd->sdma_map));
+ RCU_INIT_POINTER(dd->sdma_map, NULL);
+ spin_unlock_irq(&dd->sde_map_lock);
+ synchronize_rcu();
+ }
kfree(dd->per_sdma);
dd->per_sdma = NULL;
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 3591923..5f3edd2 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1439,8 +1439,7 @@
4096 : hfi1_max_mtu), IB_MTU_4096);
props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
mtu_to_enum(ppd->ibmtu, IB_MTU_4096);
- props->phys_mtu = HFI1_CAP_IS_KSET(AIP) ? hfi1_max_mtu :
- ib_mtu_enum_to_int(props->max_mtu);
+ props->phys_mtu = hfi1_max_mtu;
return 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index abe882e..6dab03b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -5642,8 +5642,8 @@
dev_err(dev, "AEQ overflow!\n");
- int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S;
- roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG,
+ 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S);
/* Set reset level for reset_event() */
if (ops->set_default_reset_request)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index be7f2fe..8a92fae 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -92,7 +92,7 @@
#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
-#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
+#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
#define HNS_ROCE_INVALID_LKEY 0x100
#define HNS_ROCE_CMQ_TX_TIMEOUT 30000
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 027ec84..6d7cc72 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -286,7 +286,6 @@
goto err_alloc_pbl;
mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
- mr->ibmr.length = length;
return &mr->ibmr;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 291e06d..6fe98af 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -386,11 +386,8 @@
hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge));
- if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
- hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
- else
- hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
- hr_qp->rq.max_gs);
+ hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
+ hr_qp->rq.max_gs);
hr_qp->rq.wqe_cnt = cnt;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 426fed0..811b4bb 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -439,7 +439,6 @@
goto err_mr;
mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
- mr->ibmr.length = length;
mr->ibmr.page_size = 1U << shift;
return &mr->ibmr;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 343e670..2f053f4 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -1792,8 +1792,10 @@
key_level2,
obj_event,
GFP_KERNEL);
- if (err)
+ if (err) {
+ kfree(obj_event);
return err;
+ }
INIT_LIST_HEAD(&obj_event->obj_sub_list);
}
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index b3391ec..0404e6f 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -2081,12 +2081,10 @@
if (err)
return err;
- if (flags) {
- mlx5_ib_ft_type_to_namespace(
+ if (flags)
+ return mlx5_ib_ft_type_to_namespace(
MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX,
&obj->ns_type);
- return 0;
- }
}
obj->ns_type = MLX5_FLOW_NAMESPACE_BYPASS;
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 9bb9bb0..cca7a4a 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -166,6 +166,12 @@
mdev = dev->mdev;
mdev_port_num = 1;
}
+ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
+ /* set local port to one for Function-Per-Port HCA. */
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ }
+
/* Declaring support of extended counters */
if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
struct ib_class_port_info cpi = {};
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 1934669..d827a4e 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -531,8 +531,10 @@
spin_lock_irq(&ent->lock);
if (ent->disabled)
goto out;
- if (need_delay)
+ if (need_delay) {
queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
+ goto out;
+ }
remove_cache_mr_locked(ent);
queue_adjust_cache_locked(ent);
}
@@ -575,6 +577,8 @@
ent = &cache->ent[entry];
spin_lock_irq(&ent->lock);
if (list_empty(&ent->head)) {
+ queue_adjust_cache_locked(ent);
+ ent->miss++;
spin_unlock_irq(&ent->lock);
mr = create_cache_mr(ent);
if (IS_ERR(mr))
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 9676416..d0bb61b 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -374,6 +374,10 @@
if (IS_IWARP(dev)) {
xa_init(&dev->qps);
dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
+ if (!dev->iwarp_wq) {
+ rc = -ENOMEM;
+ goto err1;
+ }
}
/* Allocate Status blocks for CNQ */
@@ -381,7 +385,7 @@
GFP_KERNEL);
if (!dev->sb_array) {
rc = -ENOMEM;
- goto err1;
+ goto err_destroy_wq;
}
dev->cnq_array = kcalloc(dev->num_cnq,
@@ -432,6 +436,9 @@
kfree(dev->cnq_array);
err2:
kfree(dev->sb_array);
+err_destroy_wq:
+ if (IS_IWARP(dev))
+ destroy_workqueue(dev->iwarp_wq);
err1:
kfree(dev->sgid_tbl);
return rc;
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 9dde703..8ef6eec 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -418,6 +418,7 @@
u32 sq_psn;
u32 qkey;
u32 dest_qp_num;
+ u8 timeout;
/* Relevant to qps created from kernel space only (ULPs) */
u8 prev_wqe_size;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index eeb87f3..3543b9a 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -2622,6 +2622,8 @@
1 << max_t(int, attr->timeout - 8, 0);
else
qp_params.ack_timeout = 0;
+
+ qp->timeout = attr->timeout;
}
if (attr_mask & IB_QP_RETRY_CNT) {
@@ -2781,7 +2783,7 @@
rdma_ah_set_dgid_raw(&qp_attr->ah_attr, ¶ms.dgid.bytes[0]);
rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
rdma_ah_set_sl(&qp_attr->ah_attr, 0);
- qp_attr->timeout = params.timeout;
+ qp_attr->timeout = qp->timeout;
qp_attr->rnr_retry = params.rnr_retry;
qp_attr->retry_cnt = params.retry_cnt;
qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
@@ -2987,7 +2989,11 @@
rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
if (rc) {
- DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+ if (rc == -EINVAL)
+ DP_ERR(dev, "Out of MR resources\n");
+ else
+ DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
+
goto err1;
}
@@ -3082,8 +3088,12 @@
rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
if (rc) {
- DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
- goto err0;
+ if (rc == -EINVAL)
+ DP_ERR(dev, "Out of MR resources\n");
+ else
+ DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
+
+ goto err1;
}
/* Index only, 18 bit long, lkey = itid << 8 | key */
@@ -3107,7 +3117,7 @@
rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
if (rc) {
DP_ERR(dev, "roce register tid returned an error %d\n", rc);
- goto err1;
+ goto err2;
}
mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
@@ -3116,8 +3126,10 @@
DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
return mr;
-err1:
+err2:
dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err1:
+ qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
err0:
kfree(mr);
return ERR_PTR(rc);
@@ -3212,7 +3224,11 @@
rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
if (rc) {
- DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+ if (rc == -EINVAL)
+ DP_ERR(dev, "Out of MR resources\n");
+ else
+ DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
+
goto err1;
}
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 09f0dbf..585a9c7 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -2826,7 +2826,7 @@
EXPORT_SYMBOL(rvt_qp_iter);
/*
- * This should be called with s_lock held.
+ * This should be called with s_lock and r_lock held.
*/
void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
enum ib_wc_status status)
@@ -3185,7 +3185,9 @@
rvp->n_loop_pkts++;
flush_send:
sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
+ spin_lock(&sqp->r_lock);
rvt_send_complete(sqp, wqe, send_status);
+ spin_unlock(&sqp->r_lock);
if (local_ops) {
atomic_dec(&sqp->local_ops_pending);
local_ops = 0;
@@ -3239,9 +3241,15 @@
spin_unlock_irqrestore(&qp->r_lock, flags);
serr_no_r_lock:
spin_lock_irqsave(&sqp->s_lock, flags);
+ spin_lock(&sqp->r_lock);
rvt_send_complete(sqp, wqe, send_status);
+ spin_unlock(&sqp->r_lock);
if (sqp->ibqp.qp_type == IB_QPT_RC) {
- int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+ int lastwqe;
+
+ spin_lock(&sqp->r_lock);
+ lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+ spin_unlock(&sqp->r_lock);
sqp->s_flags &= ~RVT_S_BUSY;
spin_unlock_irqrestore(&sqp->s_lock, flags);
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index f9fb56e..dca8642 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -98,6 +98,12 @@
RXE_INFLIGHT_SKBS_PER_QP_HIGH = 64,
RXE_INFLIGHT_SKBS_PER_QP_LOW = 16,
+ /* Max number of interations of each tasklet
+ * before yielding the cpu to let other
+ * work make progress
+ */
+ RXE_MAX_ITERATIONS = 1024,
+
/* Delay before calling arbiter timer */
RXE_NSEC_ARB_TIMER_DELAY = 200,
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index a1b7901..2e4b008 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -184,6 +184,14 @@
spin_lock_init(&qp->grp_lock);
spin_lock_init(&qp->state_lock);
+ spin_lock_init(&qp->req.task.state_lock);
+ spin_lock_init(&qp->resp.task.state_lock);
+ spin_lock_init(&qp->comp.task.state_lock);
+
+ spin_lock_init(&qp->sq.sq_lock);
+ spin_lock_init(&qp->rq.producer_lock);
+ spin_lock_init(&qp->rq.consumer_lock);
+
atomic_set(&qp->ssn, 0);
atomic_set(&qp->skb_out, 0);
}
@@ -239,7 +247,6 @@
qp->req.opcode = -1;
qp->comp.opcode = -1;
- spin_lock_init(&qp->sq.sq_lock);
skb_queue_head_init(&qp->req_pkts);
rxe_init_task(rxe, &qp->req.task, qp,
@@ -289,9 +296,6 @@
}
}
- spin_lock_init(&qp->rq.producer_lock);
- spin_lock_init(&qp->rq.consumer_lock);
-
skb_queue_head_init(&qp->resp_pkts);
rxe_init_task(rxe, &qp->resp.task, qp,
@@ -771,7 +775,9 @@
rxe_cleanup_task(&qp->comp.task);
/* flush out any receive wr's or pending requests */
- __rxe_do_task(&qp->req.task);
+ if (qp->req.task.func)
+ __rxe_do_task(&qp->req.task);
+
if (qp->sq.queue) {
__rxe_do_task(&qp->comp.task);
__rxe_do_task(&qp->req.task);
@@ -811,8 +817,10 @@
free_rd_atomic_resources(qp);
- kernel_sock_shutdown(qp->sk, SHUT_RDWR);
- sock_release(qp->sk);
+ if (qp->sk) {
+ kernel_sock_shutdown(qp->sk, SHUT_RDWR);
+ sock_release(qp->sk);
+ }
}
/* called when the last reference to the qp is dropped */
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index d491764..69238f8 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -650,7 +650,7 @@
opcode = next_opcode(qp, wqe, wqe->wr.opcode);
if (unlikely(opcode < 0)) {
wqe->status = IB_WC_LOC_QP_OP_ERR;
- goto exit;
+ goto err;
}
mask = rxe_opcode[opcode].mask;
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 6951fdc..568cf56 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -8,7 +8,7 @@
#include <linux/interrupt.h>
#include <linux/hardirq.h>
-#include "rxe_task.h"
+#include "rxe.h"
int __rxe_do_task(struct rxe_task *task)
@@ -34,6 +34,7 @@
int ret;
unsigned long flags;
struct rxe_task *task = from_tasklet(task, t, tasklet);
+ unsigned int iterations = RXE_MAX_ITERATIONS;
spin_lock_irqsave(&task->state_lock, flags);
switch (task->state) {
@@ -62,13 +63,20 @@
spin_lock_irqsave(&task->state_lock, flags);
switch (task->state) {
case TASK_STATE_BUSY:
- if (ret)
+ if (ret) {
task->state = TASK_STATE_START;
- else
+ } else if (iterations--) {
cont = 1;
+ } else {
+ /* reschedule the tasklet and exit
+ * the loop to give up the cpu
+ */
+ tasklet_schedule(&task->tasklet);
+ task->state = TASK_STATE_START;
+ }
break;
- /* soneone tried to run the task since the last time we called
+ /* someone tried to run the task since the last time we called
* func, so we will call one more time regardless of the
* return value
*/
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 66764f7..b87ba4c 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -725,11 +725,11 @@
enum mpa_v2_ctrl mpa_p2p_mode = MPA_V2_RDMA_NO_RTR;
rv = siw_recv_mpa_rr(cep);
- if (rv != -EAGAIN)
- siw_cancel_mpatimer(cep);
if (rv)
goto out_err;
+ siw_cancel_mpatimer(cep);
+
rep = &cep->mpa.hdr;
if (__mpa_rr_revision(rep->params.bits) > MPA_REVISION_2) {
@@ -895,7 +895,8 @@
}
out_err:
- siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL);
+ if (rv != -EAGAIN)
+ siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL);
return rv;
}
@@ -968,14 +969,15 @@
siw_cep_set_inuse(new_cep);
rv = siw_proc_mpareq(new_cep);
- siw_cep_set_free(new_cep);
-
if (rv != -EAGAIN) {
siw_cep_put(cep);
new_cep->listen_cep = NULL;
- if (rv)
+ if (rv) {
+ siw_cep_set_free(new_cep);
goto error;
+ }
}
+ siw_cep_set_free(new_cep);
}
return;
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 875ea6f..fd721cc 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -961,27 +961,28 @@
static int siw_get_trailer(struct siw_qp *qp, struct siw_rx_stream *srx)
{
struct sk_buff *skb = srx->skb;
+ int avail = min(srx->skb_new, srx->fpdu_part_rem);
u8 *tbuf = (u8 *)&srx->trailer.crc - srx->pad;
__wsum crc_in, crc_own = 0;
siw_dbg_qp(qp, "expected %d, available %d, pad %u\n",
srx->fpdu_part_rem, srx->skb_new, srx->pad);
- if (srx->skb_new < srx->fpdu_part_rem)
+ skb_copy_bits(skb, srx->skb_offset, tbuf, avail);
+
+ srx->skb_new -= avail;
+ srx->skb_offset += avail;
+ srx->skb_copied += avail;
+ srx->fpdu_part_rem -= avail;
+
+ if (srx->fpdu_part_rem)
return -EAGAIN;
- skb_copy_bits(skb, srx->skb_offset, tbuf, srx->fpdu_part_rem);
-
- if (srx->mpa_crc_hd && srx->pad)
- crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad);
-
- srx->skb_new -= srx->fpdu_part_rem;
- srx->skb_offset += srx->fpdu_part_rem;
- srx->skb_copied += srx->fpdu_part_rem;
-
if (!srx->mpa_crc_hd)
return 0;
+ if (srx->pad)
+ crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad);
/*
* CRC32 is computed, transmitted and received directly in NBO,
* so there's never a reason to convert byte order.
@@ -1083,10 +1084,9 @@
* completely received.
*/
if (iwarp_pktinfo[opcode].hdr_len > sizeof(struct iwarp_ctrl_tagged)) {
- bytes = iwarp_pktinfo[opcode].hdr_len - MIN_DDP_HDR;
+ int hdrlen = iwarp_pktinfo[opcode].hdr_len;
- if (srx->skb_new < bytes)
- return -EAGAIN;
+ bytes = min_t(int, hdrlen - MIN_DDP_HDR, srx->skb_new);
skb_copy_bits(skb, srx->skb_offset,
(char *)c_hdr + srx->fpdu_part_rcvd, bytes);
@@ -1096,6 +1096,9 @@
srx->skb_new -= bytes;
srx->skb_offset += bytes;
srx->skb_copied += bytes;
+
+ if (srx->fpdu_part_rcvd < hdrlen)
+ return -EAGAIN;
}
/*
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 7989c40..3c3ae5e 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -29,7 +29,7 @@
dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
if (paddr)
- return virt_to_page(paddr);
+ return virt_to_page((void *)paddr);
return NULL;
}
@@ -523,13 +523,23 @@
kunmap(p);
}
} else {
- u64 va = sge->laddr + sge_off;
+ /*
+ * Cast to an uintptr_t to preserve all 64 bits
+ * in sge->laddr.
+ */
+ uintptr_t va = (uintptr_t)(sge->laddr + sge_off);
- page_array[seg] = virt_to_page(va & PAGE_MASK);
+ /*
+ * virt_to_page() takes a (void *) pointer
+ * so cast to a (void *) meaning it will be 64
+ * bits on a 64 bit platform and 32 bits on a
+ * 32 bit platform.
+ */
+ page_array[seg] = virt_to_page((void *)(va & PAGE_MASK));
if (do_crc)
crypto_shash_update(
c_tx->mpa_crc_hd,
- (void *)(uintptr_t)va,
+ (void *)va,
plen);
}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 3690e28..a16e066 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -499,6 +499,7 @@
iser_conn->iscsi_conn = conn;
out:
+ iscsi_put_endpoint(ep);
mutex_unlock(&iser_conn->state_mutex);
return error;
}
@@ -988,6 +989,7 @@
/* connection management */
.create_conn = iscsi_iser_conn_create,
.bind_conn = iscsi_iser_conn_bind,
+ .unbind_conn = iscsi_conn_unbind,
.destroy_conn = iscsi_conn_teardown,
.attr_is_visible = iser_attr_is_visible,
.set_param = iscsi_iser_set_param,
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 13634ed..5c39e4c 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -1728,11 +1728,6 @@
if (con->c.cid == 0) {
queue_depth = le16_to_cpu(msg->queue_depth);
- if (queue_depth > MAX_SESS_QUEUE_DEPTH) {
- rtrs_err(clt, "Invalid RTRS message: queue=%d\n",
- queue_depth);
- return -ECONNRESET;
- }
if (sess->queue_depth > 0 && queue_depth != sess->queue_depth) {
rtrs_err(clt, "Error: queue depth changed\n");
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index 51c60f5..c5ca123 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -23,6 +23,17 @@
#define RTRS_PROTO_VER_STRING __stringify(RTRS_PROTO_VER_MAJOR) "." \
__stringify(RTRS_PROTO_VER_MINOR)
+/*
+ * Max IB immediate data size is 2^28 (MAX_IMM_PAYL_BITS)
+ * and the minimum chunk size is 4096 (2^12).
+ * So the maximum sess_queue_depth is 65536 (2^16) in theory.
+ * But mempool_create, create_qp and ib_post_send fail with
+ * "cannot allocate memory" error if sess_queue_depth is too big.
+ * Therefore the pratical max value of sess_queue_depth is
+ * somewhere between 1 and 65534 and it depends on the system.
+ */
+#define MAX_SESS_QUEUE_DEPTH 65535
+
enum rtrs_imm_const {
MAX_IMM_TYPE_BITS = 4,
MAX_IMM_TYPE_MASK = ((1 << MAX_IMM_TYPE_BITS) - 1),
@@ -46,16 +57,7 @@
MAX_PATHS_NUM = 128,
- /*
- * Max IB immediate data size is 2^28 (MAX_IMM_PAYL_BITS)
- * and the minimum chunk size is 4096 (2^12).
- * So the maximum sess_queue_depth is 65536 (2^16) in theory.
- * But mempool_create, create_qp and ib_post_send fail with
- * "cannot allocate memory" error if sess_queue_depth is too big.
- * Therefore the pratical max value of sess_queue_depth is
- * somewhere between 1 and 65536 and it depends on the system.
- */
- MAX_SESS_QUEUE_DEPTH = 65536,
+ MIN_CHUNK_SIZE = 8192,
RTRS_HB_INTERVAL_MS = 5000,
RTRS_HB_MISSED_MAX = 5,
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index b033bfa..b152a74 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -2193,9 +2193,9 @@
sess_queue_depth, 1, MAX_SESS_QUEUE_DEPTH);
return -EINVAL;
}
- if (max_chunk_size < 4096 || !is_power_of_2(max_chunk_size)) {
+ if (max_chunk_size < MIN_CHUNK_SIZE || !is_power_of_2(max_chunk_size)) {
pr_err("Invalid max_chunk_size value %d, has to be >= %d and should be power of two.\n",
- max_chunk_size, 4096);
+ max_chunk_size, MIN_CHUNK_SIZE);
return -EINVAL;
}
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 07ecc7d..c0ed08f 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -565,12 +565,9 @@
if (ret)
return ret;
- sport->port_guid_id.wwn.priv = sport;
- srpt_format_guid(sport->port_guid_id.name,
- sizeof(sport->port_guid_id.name),
+ srpt_format_guid(sport->guid_name, ARRAY_SIZE(sport->guid_name),
&sport->gid.global.interface_id);
- sport->port_gid_id.wwn.priv = sport;
- snprintf(sport->port_gid_id.name, sizeof(sport->port_gid_id.name),
+ snprintf(sport->gid_name, ARRAY_SIZE(sport->gid_name),
"0x%016llx%016llx",
be64_to_cpu(sport->gid.global.subnet_prefix),
be64_to_cpu(sport->gid.global.interface_id));
@@ -2310,31 +2307,35 @@
tag_num = ch->rq_size;
tag_size = 1; /* ib_srpt does not use se_sess->sess_cmd_map */
- mutex_lock(&sport->port_guid_id.mutex);
- list_for_each_entry(stpg, &sport->port_guid_id.tpg_list, entry) {
- if (!IS_ERR_OR_NULL(ch->sess))
- break;
- ch->sess = target_setup_session(&stpg->tpg, tag_num,
+ if (sport->guid_id) {
+ mutex_lock(&sport->guid_id->mutex);
+ list_for_each_entry(stpg, &sport->guid_id->tpg_list, entry) {
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL,
ch->sess_name, ch, NULL);
+ }
+ mutex_unlock(&sport->guid_id->mutex);
}
- mutex_unlock(&sport->port_guid_id.mutex);
- mutex_lock(&sport->port_gid_id.mutex);
- list_for_each_entry(stpg, &sport->port_gid_id.tpg_list, entry) {
- if (!IS_ERR_OR_NULL(ch->sess))
- break;
- ch->sess = target_setup_session(&stpg->tpg, tag_num,
+ if (sport->gid_id) {
+ mutex_lock(&sport->gid_id->mutex);
+ list_for_each_entry(stpg, &sport->gid_id->tpg_list, entry) {
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL, i_port_id,
ch, NULL);
- if (!IS_ERR_OR_NULL(ch->sess))
- break;
- /* Retry without leading "0x" */
- ch->sess = target_setup_session(&stpg->tpg, tag_num,
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ /* Retry without leading "0x" */
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL,
i_port_id + 2, ch, NULL);
+ }
+ mutex_unlock(&sport->gid_id->mutex);
}
- mutex_unlock(&sport->port_gid_id.mutex);
if (IS_ERR_OR_NULL(ch->sess)) {
WARN_ON_ONCE(ch->sess == NULL);
@@ -2980,7 +2981,12 @@
return 0;
}
-static struct se_wwn *__srpt_lookup_wwn(const char *name)
+struct port_and_port_id {
+ struct srpt_port *sport;
+ struct srpt_port_id **port_id;
+};
+
+static struct port_and_port_id __srpt_lookup_port(const char *name)
{
struct ib_device *dev;
struct srpt_device *sdev;
@@ -2995,25 +3001,38 @@
for (i = 0; i < dev->phys_port_cnt; i++) {
sport = &sdev->port[i];
- if (strcmp(sport->port_guid_id.name, name) == 0)
- return &sport->port_guid_id.wwn;
- if (strcmp(sport->port_gid_id.name, name) == 0)
- return &sport->port_gid_id.wwn;
+ if (strcmp(sport->guid_name, name) == 0) {
+ kref_get(&sdev->refcnt);
+ return (struct port_and_port_id){
+ sport, &sport->guid_id};
+ }
+ if (strcmp(sport->gid_name, name) == 0) {
+ kref_get(&sdev->refcnt);
+ return (struct port_and_port_id){
+ sport, &sport->gid_id};
+ }
}
}
- return NULL;
+ return (struct port_and_port_id){};
}
-static struct se_wwn *srpt_lookup_wwn(const char *name)
+/**
+ * srpt_lookup_port() - Look up an RDMA port by name
+ * @name: ASCII port name
+ *
+ * Increments the RDMA port reference count if an RDMA port pointer is returned.
+ * The caller must drop that reference count by calling srpt_port_put_ref().
+ */
+static struct port_and_port_id srpt_lookup_port(const char *name)
{
- struct se_wwn *wwn;
+ struct port_and_port_id papi;
spin_lock(&srpt_dev_lock);
- wwn = __srpt_lookup_wwn(name);
+ papi = __srpt_lookup_port(name);
spin_unlock(&srpt_dev_lock);
- return wwn;
+ return papi;
}
static void srpt_free_srq(struct srpt_device *sdev)
@@ -3098,6 +3117,18 @@
return ret;
}
+static void srpt_free_sdev(struct kref *refcnt)
+{
+ struct srpt_device *sdev = container_of(refcnt, typeof(*sdev), refcnt);
+
+ kfree(sdev);
+}
+
+static void srpt_sdev_put(struct srpt_device *sdev)
+{
+ kref_put(&sdev->refcnt, srpt_free_sdev);
+}
+
/**
* srpt_add_one - InfiniBand device addition callback function
* @device: Describes a HCA.
@@ -3115,6 +3146,7 @@
if (!sdev)
return -ENOMEM;
+ kref_init(&sdev->refcnt);
sdev->device = device;
mutex_init(&sdev->sdev_mutex);
@@ -3178,10 +3210,6 @@
sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
sport->port_attrib.use_srq = false;
INIT_WORK(&sport->work, srpt_refresh_port_work);
- mutex_init(&sport->port_guid_id.mutex);
- INIT_LIST_HEAD(&sport->port_guid_id.tpg_list);
- mutex_init(&sport->port_gid_id.mutex);
- INIT_LIST_HEAD(&sport->port_gid_id.tpg_list);
ret = srpt_refresh_port(sport);
if (ret) {
@@ -3210,7 +3238,7 @@
srpt_free_srq(sdev);
ib_dealloc_pd(sdev->pd);
free_dev:
- kfree(sdev);
+ srpt_sdev_put(sdev);
pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev));
return ret;
}
@@ -3254,7 +3282,7 @@
ib_dealloc_pd(sdev->pd);
- kfree(sdev);
+ srpt_sdev_put(sdev);
}
static struct ib_client srpt_client = {
@@ -3282,10 +3310,10 @@
{
struct srpt_port *sport = wwn->priv;
- if (wwn == &sport->port_guid_id.wwn)
- return &sport->port_guid_id;
- if (wwn == &sport->port_gid_id.wwn)
- return &sport->port_gid_id;
+ if (sport->guid_id && &sport->guid_id->wwn == wwn)
+ return sport->guid_id;
+ if (sport->gid_id && &sport->gid_id->wwn == wwn)
+ return sport->gid_id;
WARN_ON_ONCE(true);
return NULL;
}
@@ -3800,7 +3828,31 @@
struct config_group *group,
const char *name)
{
- return srpt_lookup_wwn(name) ? : ERR_PTR(-EINVAL);
+ struct port_and_port_id papi = srpt_lookup_port(name);
+ struct srpt_port *sport = papi.sport;
+ struct srpt_port_id *port_id;
+
+ if (!papi.port_id)
+ return ERR_PTR(-EINVAL);
+ if (*papi.port_id) {
+ /* Attempt to create a directory that already exists. */
+ WARN_ON_ONCE(true);
+ return &(*papi.port_id)->wwn;
+ }
+ port_id = kzalloc(sizeof(*port_id), GFP_KERNEL);
+ if (!port_id) {
+ srpt_sdev_put(sport->sdev);
+ return ERR_PTR(-ENOMEM);
+ }
+ mutex_init(&port_id->mutex);
+ INIT_LIST_HEAD(&port_id->tpg_list);
+ port_id->wwn.priv = sport;
+ memcpy(port_id->name, port_id == sport->guid_id ? sport->guid_name :
+ sport->gid_name, ARRAY_SIZE(port_id->name));
+
+ *papi.port_id = port_id;
+
+ return &port_id->wwn;
}
/**
@@ -3809,6 +3861,18 @@
*/
static void srpt_drop_tport(struct se_wwn *wwn)
{
+ struct srpt_port_id *port_id = container_of(wwn, typeof(*port_id), wwn);
+ struct srpt_port *sport = wwn->priv;
+
+ if (sport->guid_id == port_id)
+ sport->guid_id = NULL;
+ else if (sport->gid_id == port_id)
+ sport->gid_id = NULL;
+ else
+ WARN_ON_ONCE(true);
+
+ srpt_sdev_put(sport->sdev);
+ kfree(port_id);
}
static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf)
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index bdeb010..2bf381e 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -376,7 +376,7 @@
};
/**
- * struct srpt_port_id - information about an RDMA port name
+ * struct srpt_port_id - LIO RDMA port information
* @mutex: Protects @tpg_list changes.
* @tpg_list: TPGs associated with the RDMA port name.
* @wwn: WWN associated with the RDMA port name.
@@ -393,7 +393,7 @@
};
/**
- * struct srpt_port - information associated by SRPT with a single IB port
+ * struct srpt_port - SRPT RDMA port information
* @sdev: backpointer to the HCA information.
* @mad_agent: per-port management datagram processing information.
* @enabled: Whether or not this target port is enabled.
@@ -402,8 +402,10 @@
* @lid: cached value of the port's lid.
* @gid: cached value of the port's gid.
* @work: work structure for refreshing the aforementioned cached values.
- * @port_guid_id: target port GUID
- * @port_gid_id: target port GID
+ * @guid_name: port name in GUID format.
+ * @guid_id: LIO target port information for the port name in GUID format.
+ * @gid_name: port name in GID format.
+ * @gid_id: LIO target port information for the port name in GID format.
* @port_attrib: Port attributes that can be accessed through configfs.
* @refcount: Number of objects associated with this port.
* @freed_channels: Completion that will be signaled once @refcount becomes 0.
@@ -419,8 +421,10 @@
u32 lid;
union ib_gid gid;
struct work_struct work;
- struct srpt_port_id port_guid_id;
- struct srpt_port_id port_gid_id;
+ char guid_name[64];
+ struct srpt_port_id *guid_id;
+ char gid_name[64];
+ struct srpt_port_id *gid_id;
struct srpt_port_attrib port_attrib;
atomic_t refcount;
struct completion *freed_channels;
@@ -430,6 +434,7 @@
/**
* struct srpt_device - information associated by SRPT with a single HCA
+ * @refcnt: Reference count for this device.
* @device: Backpointer to the struct ib_device managed by the IB core.
* @pd: IB protection domain.
* @lkey: L_Key (local key) with write access to all local memory.
@@ -445,6 +450,7 @@
* @port: Information about the ports owned by this HCA.
*/
struct srpt_device {
+ struct kref refcnt;
struct ib_device *device;
struct ib_pd *pd;
u32 lkey;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index ff9dc37..49504dc 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -47,6 +47,17 @@
static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
+static const unsigned int input_max_code[EV_CNT] = {
+ [EV_KEY] = KEY_MAX,
+ [EV_REL] = REL_MAX,
+ [EV_ABS] = ABS_MAX,
+ [EV_MSC] = MSC_MAX,
+ [EV_SW] = SW_MAX,
+ [EV_LED] = LED_MAX,
+ [EV_SND] = SND_MAX,
+ [EV_FF] = FF_MAX,
+};
+
static inline int is_event_supported(unsigned int code,
unsigned long *bm, unsigned int max)
{
@@ -1976,6 +1987,14 @@
*/
void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code)
{
+ if (type < EV_CNT && input_max_code[type] &&
+ code > input_max_code[type]) {
+ pr_err("%s: invalid code %u for type %u\n", __func__, code,
+ type);
+ dump_stack();
+ return;
+ }
+
switch (type) {
case EV_KEY:
__set_bit(code, dev->keybit);
@@ -2179,12 +2198,6 @@
/* KEY_RESERVED is not supposed to be transmitted to userspace. */
__clear_bit(KEY_RESERVED, dev->keybit);
- /* Buttonpads should not map BTN_RIGHT and/or BTN_MIDDLE. */
- if (test_bit(INPUT_PROP_BUTTONPAD, dev->propbit)) {
- __clear_bit(BTN_RIGHT, dev->keybit);
- __clear_bit(BTN_MIDDLE, dev->keybit);
- }
-
/* Make sure that bitmasks not mentioned in dev->evbit are clean. */
input_cleanse_bitmasks(dev);
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index b2a68bc..84b8752 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -50,6 +50,7 @@
{ 0x046d, 0xc291, "Logitech WingMan Formula Force", btn_wheel, abs_wheel, ff_iforce },
{ 0x05ef, 0x020a, "AVB Top Shot Pegasus", btn_joystick_avb, abs_avb_pegasus, ff_iforce },
{ 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_wheel, abs_wheel, ff_iforce },
+ { 0x05ef, 0x8886, "Boeder Force Feedback Wheel", btn_wheel, abs_wheel, ff_iforce },
{ 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce },
@@ -272,22 +273,22 @@
* Get device info.
*/
- if (!iforce_get_id_packet(iforce, 'M', buf, &len) || len < 3)
+ if (!iforce_get_id_packet(iforce, 'M', buf, &len) && len >= 3)
input_dev->id.vendor = get_unaligned_le16(buf + 1);
else
dev_warn(&iforce->dev->dev, "Device does not respond to id packet M\n");
- if (!iforce_get_id_packet(iforce, 'P', buf, &len) || len < 3)
+ if (!iforce_get_id_packet(iforce, 'P', buf, &len) && len >= 3)
input_dev->id.product = get_unaligned_le16(buf + 1);
else
dev_warn(&iforce->dev->dev, "Device does not respond to id packet P\n");
- if (!iforce_get_id_packet(iforce, 'B', buf, &len) || len < 3)
+ if (!iforce_get_id_packet(iforce, 'B', buf, &len) && len >= 3)
iforce->device_memory.end = get_unaligned_le16(buf + 1);
else
dev_warn(&iforce->dev->dev, "Device does not respond to id packet B\n");
- if (!iforce_get_id_packet(iforce, 'N', buf, &len) || len < 2)
+ if (!iforce_get_id_packet(iforce, 'N', buf, &len) && len >= 2)
ff_effects = buf[1];
else
dev_warn(&iforce->dev->dev, "Device does not respond to id packet N\n");
diff --git a/drivers/input/joystick/iforce/iforce-serio.c b/drivers/input/joystick/iforce/iforce-serio.c
index f95a81b..2380546 100644
--- a/drivers/input/joystick/iforce/iforce-serio.c
+++ b/drivers/input/joystick/iforce/iforce-serio.c
@@ -39,7 +39,7 @@
again:
if (iforce->xmit.head == iforce->xmit.tail) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ iforce_clear_xmit_and_wake(iforce);
spin_unlock_irqrestore(&iforce->xmit_lock, flags);
return;
}
@@ -64,7 +64,7 @@
if (test_and_clear_bit(IFORCE_XMIT_AGAIN, iforce->xmit_flags))
goto again;
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ iforce_clear_xmit_and_wake(iforce);
spin_unlock_irqrestore(&iforce->xmit_lock, flags);
}
@@ -169,7 +169,7 @@
iforce_serio->cmd_response_len = iforce_serio->len;
/* Signal that command is done */
- wake_up(&iforce->wait);
+ wake_up_all(&iforce->wait);
} else if (likely(iforce->type)) {
iforce_process_packet(iforce, iforce_serio->id,
iforce_serio->data_in,
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index ea58805..cba92bd 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -30,7 +30,7 @@
spin_lock_irqsave(&iforce->xmit_lock, flags);
if (iforce->xmit.head == iforce->xmit.tail) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ iforce_clear_xmit_and_wake(iforce);
spin_unlock_irqrestore(&iforce->xmit_lock, flags);
return;
}
@@ -58,9 +58,9 @@
XMIT_INC(iforce->xmit.tail, n);
if ( (n=usb_submit_urb(iforce_usb->out, GFP_ATOMIC)) ) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
dev_warn(&iforce_usb->intf->dev,
"usb_submit_urb failed %d\n", n);
+ iforce_clear_xmit_and_wake(iforce);
}
/* The IFORCE_XMIT_RUNNING bit is not cleared here. That's intended.
@@ -175,15 +175,15 @@
struct iforce *iforce = &iforce_usb->iforce;
if (urb->status) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
dev_dbg(&iforce_usb->intf->dev, "urb->status %d, exiting\n",
urb->status);
+ iforce_clear_xmit_and_wake(iforce);
return;
}
__iforce_usb_xmit(iforce);
- wake_up(&iforce->wait);
+ wake_up_all(&iforce->wait);
}
static int iforce_usb_probe(struct usb_interface *intf,
diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h
index 6aa761e..9ccb910 100644
--- a/drivers/input/joystick/iforce/iforce.h
+++ b/drivers/input/joystick/iforce/iforce.h
@@ -119,6 +119,12 @@
response_data, response_len);
}
+static inline void iforce_clear_xmit_and_wake(struct iforce *iforce)
+{
+ clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ wake_up_all(&iforce->wait);
+}
+
/* Public functions */
/* iforce-main.c */
int iforce_init_device(struct device *parent, u16 bustype,
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index ba101af..70dedc0 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -112,6 +112,8 @@
u8 xtype;
} xpad_device[] = {
{ 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
+ { 0x03eb, 0xff01, "Wooting One (Legacy)", 0, XTYPE_XBOX360 },
+ { 0x03eb, 0xff02, "Wooting Two (Legacy)", 0, XTYPE_XBOX360 },
{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
@@ -242,6 +244,7 @@
{ 0x0f0d, 0x0063, "Hori Real Arcade Pro Hayabusa (USA) Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0f0d, 0x0067, "HORIPAD ONE", 0, XTYPE_XBOXONE },
{ 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ { 0x0f0d, 0x00c5, "Hori Fighting Commander ONE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
@@ -258,6 +261,7 @@
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x1430, 0xf801, "RedOctane Controller", 0, XTYPE_XBOX360 },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
@@ -322,6 +326,7 @@
{ 0x24c6, 0x5502, "Hori Fighting Stick VX Alt", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5503, "Hori Fighting Edge", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x5510, "Hori Fighting Commander ONE (Xbox 360/PC Mode)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x550d, "Hori GEM Xbox controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x550e, "Hori Real Arcade Pro V Kai 360", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x551a, "PowerA FUSION Pro Controller", 0, XTYPE_XBOXONE },
@@ -331,6 +336,14 @@
{ 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x24c6, 0xfafe, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x2563, 0x058d, "OneXPlayer Gamepad", 0, XTYPE_XBOX360 },
+ { 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE },
+ { 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1220, "Wooting Two HE", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 },
{ 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 },
{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
@@ -416,6 +429,7 @@
static const struct usb_device_id xpad_table[] = {
{ USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */
XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 Controller */
+ XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */
XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */
@@ -426,6 +440,7 @@
{ USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */
XPAD_XBOX360_VENDOR(0x07ff), /* Mad Catz GamePad */
+ XPAD_XBOX360_VENDOR(0x0c12), /* Zeroplus X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
@@ -446,8 +461,12 @@
XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */
XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
+ XPAD_XBOX360_VENDOR(0x2563), /* OneXPlayer Gamepad */
+ XPAD_XBOX360_VENDOR(0x260d), /* Dareu H101 */
+ XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller for Xbox */
XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */
XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
+ XPAD_XBOX360_VENDOR(0x31e3), /* Wooting Keyboards */
XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */
{ }
};
@@ -1964,7 +1983,6 @@
.disconnect = xpad_disconnect,
.suspend = xpad_suspend,
.resume = xpad_resume,
- .reset_resume = xpad_resume,
.id_table = xpad_table,
};
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index 6528676..ad8660b 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -20,7 +20,7 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
-#define SNVS_HPVIDR1_REG 0xF8
+#define SNVS_HPVIDR1_REG 0xBF8
#define SNVS_LPSR_REG 0x4C /* LP Status Register */
#define SNVS_LPCR_REG 0x38 /* LP Control Register */
#define SNVS_HPSR_REG 0x14
diff --git a/drivers/input/misc/rk805-pwrkey.c b/drivers/input/misc/rk805-pwrkey.c
index 3fb64db..76873aa 100644
--- a/drivers/input/misc/rk805-pwrkey.c
+++ b/drivers/input/misc/rk805-pwrkey.c
@@ -98,6 +98,7 @@
};
module_platform_driver(rk805_pwrkey_driver);
+MODULE_ALIAS("platform:rk805-pwrkey");
MODULE_AUTHOR("Joseph Chen <chenjh@rock-chips.com>");
MODULE_DESCRIPTION("RK805 PMIC Power Key driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index cb6ec59..31c02c2 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -18,6 +18,10 @@
#include <linux/gpio.h>
#include <linux/platform_device.h>
+static bool use_low_level_irq;
+module_param(use_low_level_irq, bool, 0444);
+MODULE_PARM_DESC(use_low_level_irq, "Use low-level triggered IRQ instead of edge triggered");
+
struct soc_button_info {
const char *name;
int acpi_index;
@@ -74,6 +78,13 @@
},
},
{
+ /* Acer Switch V 10 SW5-017, same issue as Acer Switch 10 SW5-012. */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SW5-017"),
+ },
+ },
+ {
/*
* Acer One S1003. _LID method messes with power-button GPIO
* IRQ settings, leading to a non working power-button.
@@ -85,13 +96,13 @@
},
{
/*
- * Lenovo Yoga Tab2 1051L, something messes with the home-button
+ * Lenovo Yoga Tab2 1051F/1051L, something messes with the home-button
* IRQ settings, leading to a non working home-button.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "60073"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1051L"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1051"),
},
},
{} /* Terminating entry */
@@ -164,7 +175,8 @@
}
/* See dmi_use_low_level_irq[] comment */
- if (!autorepeat && dmi_check_system(dmi_use_low_level_irq)) {
+ if (!autorepeat && (use_low_level_irq ||
+ dmi_check_system(dmi_use_low_level_irq))) {
irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
gpio_keys[n_buttons].irq = irq;
gpio_keys[n_buttons].gpio = -ENOENT;
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index fe43e55..cdcb773 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -205,6 +205,7 @@
info = &state->u.bbc;
info->clock_freq = of_getintprop_default(dp, "clock-frequency", 0);
+ of_node_put(dp);
if (!info->clock_freq)
goto out_free;
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 59a1450..ca15061 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -942,17 +942,22 @@
if (!dev->tp_data)
goto err_free_bt_buffer;
- if (dev->bt_urb)
+ if (dev->bt_urb) {
usb_fill_int_urb(dev->bt_urb, udev,
usb_rcvintpipe(udev, cfg->bt_ep),
dev->bt_data, dev->cfg.bt_datalen,
bcm5974_irq_button, dev, 1);
+ dev->bt_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ }
+
usb_fill_int_urb(dev->tp_urb, udev,
usb_rcvintpipe(udev, cfg->tp_ep),
dev->tp_data, dev->cfg.tp_datalen,
bcm5974_irq_trackpad, dev, 1);
+ dev->tp_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
/* create bcm5974 device */
usb_make_path(udev, dev->phys, sizeof(dev->phys));
strlcat(dev->phys, "/input0", sizeof(dev->phys));
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 8257709..f1013b9 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -191,6 +191,7 @@
"SYN3221", /* HP 15-ay000 */
"SYN323d", /* HP Spectre X360 13-w013dx */
"SYN3257", /* HP Envy 13-ad105ng */
+ "SYN3286", /* HP Laptop 15-da3001TU */
NULL
};
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index ecdeca1..4408245 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -159,7 +159,7 @@
return ret;
}
-static int amba_kmi_remove(struct amba_device *dev)
+static void amba_kmi_remove(struct amba_device *dev)
{
struct amba_kmi_port *kmi = amba_get_drvdata(dev);
@@ -168,7 +168,6 @@
iounmap(kmi->base);
kfree(kmi);
amba_release_regions(dev);
- return 0;
}
static int __maybe_unused amba_kmi_resume(struct device *dev)
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index 2f9775d..70ea03a 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -350,6 +350,10 @@
ps2port->port = serio;
ps2port->padev = dev;
ps2port->addr = ioremap(hpa, GSC_STATUS + 4);
+ if (!ps2port->addr) {
+ ret = -ENOMEM;
+ goto fail_nomem;
+ }
spin_lock_init(&ps2port->lock);
gscps2_reset(ps2port);
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index a9f68f5..8648b4c 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1543,8 +1543,6 @@
{
int error;
- i8042_platform_device = dev;
-
if (i8042_reset == I8042_RESET_ALWAYS) {
error = i8042_controller_selftest();
if (error)
@@ -1582,7 +1580,6 @@
i8042_free_aux_ports(); /* in case KBD failed but AUX not */
i8042_free_irqs();
i8042_controller_reset(false);
- i8042_platform_device = NULL;
return error;
}
@@ -1592,7 +1589,6 @@
i8042_unregister_ports();
i8042_free_irqs();
i8042_controller_reset(false);
- i8042_platform_device = NULL;
return 0;
}
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 8df402a..3c152e9 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -3134,8 +3134,9 @@
if (error)
return error;
+ /* Request the RESET line as asserted so we go into reset */
data->reset_gpio = devm_gpiod_get_optional(&client->dev,
- "reset", GPIOD_OUT_LOW);
+ "reset", GPIOD_OUT_HIGH);
if (IS_ERR(data->reset_gpio)) {
error = PTR_ERR(data->reset_gpio);
dev_err(&client->dev, "Failed to get reset gpio: %d\n", error);
@@ -3153,8 +3154,9 @@
disable_irq(client->irq);
if (data->reset_gpio) {
+ /* Wait a while and then de-assert the RESET GPIO line */
msleep(MXT_RESET_GPIO_TIME);
- gpiod_set_value(data->reset_gpio, 1);
+ gpiod_set_value(data->reset_gpio, 0);
msleep(MXT_RESET_INVALID_CHG);
}
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 5fc789f..b7f87ad 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -154,6 +154,7 @@
static const struct goodix_chip_id goodix_chip_ids[] = {
{ .id = "1151", .data = >1x_chip_data },
+ { .id = "1158", .data = >1x_chip_data },
{ .id = "5663", .data = >1x_chip_data },
{ .id = "5688", .data = >1x_chip_data },
{ .id = "917S", .data = >1x_chip_data },
@@ -1058,6 +1059,7 @@
input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0);
input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+retry_read_config:
/* Read configuration and apply touchscreen parameters */
goodix_read_config(ts);
@@ -1065,6 +1067,16 @@
touchscreen_parse_properties(ts->input_dev, true, &ts->prop);
if (!ts->prop.max_x || !ts->prop.max_y || !ts->max_touch_num) {
+ if (!ts->reset_controller_at_probe &&
+ ts->irq_pin_access_method != IRQ_PIN_ACCESS_NONE) {
+ dev_info(&ts->client->dev, "Config not set, resetting controller\n");
+ /* Retry after a controller reset */
+ ts->reset_controller_at_probe = true;
+ error = goodix_reset(ts);
+ if (error)
+ return error;
+ goto retry_read_config;
+ }
dev_err(&ts->client->dev,
"Invalid config (%d, %d, %d), using defaults\n",
ts->prop.max_x, ts->prop.max_y, ts->max_touch_num);
@@ -1385,6 +1397,7 @@
#ifdef CONFIG_OF
static const struct of_device_id goodix_of_match[] = {
{ .compatible = "goodix,gt1151" },
+ { .compatible = "goodix,gt1158" },
{ .compatible = "goodix,gt5663" },
{ .compatible = "goodix,gt5688" },
{ .compatible = "goodix,gt911" },
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 30576a5..f437eef 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -420,9 +420,9 @@
if (error)
return error;
- usleep_range(50, 100);
+ usleep_range(12000, 15000);
gpiod_set_value_cansleep(reset_gpio, 0);
- msleep(100);
+ msleep(160);
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index f67efdd..43fcc8c 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -1453,7 +1453,7 @@
"ce", GPIOD_OUT_LOW);
if (IS_ERR(ts->gpio_ce)) {
error = PTR_ERR(ts->gpio_ce);
- if (error != EPROBE_DEFER)
+ if (error != -EPROBE_DEFER)
dev_err(&client->dev,
"Failed to get gpio: %d\n", error);
return error;
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index 9a64e1d..a05a7a6 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -337,13 +337,15 @@
struct stmfts_data *sdata = input_get_drvdata(dev);
int err;
- err = pm_runtime_get_sync(&sdata->client->dev);
- if (err < 0)
+ err = pm_runtime_resume_and_get(&sdata->client->dev);
+ if (err)
return err;
err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_ON);
- if (err)
+ if (err) {
+ pm_runtime_put_sync(&sdata->client->dev);
return err;
+ }
mutex_lock(&sdata->mutex);
sdata->running = true;
diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c
index 6df6f07..17b10b8 100644
--- a/drivers/input/touchscreen/zinitix.c
+++ b/drivers/input/touchscreen/zinitix.c
@@ -135,7 +135,7 @@
struct touch_event {
__le16 status;
- u8 finger_cnt;
+ u8 finger_mask;
u8 time_stamp;
struct point_coord point_coord[MAX_SUPPORTED_FINGER_NUM];
};
@@ -311,11 +311,32 @@
static void zinitix_report_finger(struct bt541_ts_data *bt541, int slot,
const struct point_coord *p)
{
+ u16 x, y;
+
+ if (unlikely(!(p->sub_status &
+ (SUB_BIT_UP | SUB_BIT_DOWN | SUB_BIT_MOVE)))) {
+ dev_dbg(&bt541->client->dev, "unknown finger event %#02x\n",
+ p->sub_status);
+ return;
+ }
+
+ x = le16_to_cpu(p->x);
+ y = le16_to_cpu(p->y);
+
input_mt_slot(bt541->input_dev, slot);
- input_mt_report_slot_state(bt541->input_dev, MT_TOOL_FINGER, true);
- touchscreen_report_pos(bt541->input_dev, &bt541->prop,
- le16_to_cpu(p->x), le16_to_cpu(p->y), true);
- input_report_abs(bt541->input_dev, ABS_MT_TOUCH_MAJOR, p->width);
+ if (input_mt_report_slot_state(bt541->input_dev, MT_TOOL_FINGER,
+ !(p->sub_status & SUB_BIT_UP))) {
+ touchscreen_report_pos(bt541->input_dev,
+ &bt541->prop, x, y, true);
+ input_report_abs(bt541->input_dev,
+ ABS_MT_TOUCH_MAJOR, p->width);
+ dev_dbg(&bt541->client->dev, "finger %d %s (%u, %u)\n",
+ slot, p->sub_status & SUB_BIT_DOWN ? "down" : "move",
+ x, y);
+ } else {
+ dev_dbg(&bt541->client->dev, "finger %d up (%u, %u)\n",
+ slot, x, y);
+ }
}
static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler)
@@ -323,6 +344,7 @@
struct bt541_ts_data *bt541 = bt541_handler;
struct i2c_client *client = bt541->client;
struct touch_event touch_event;
+ unsigned long finger_mask;
int error;
int i;
@@ -335,10 +357,14 @@
goto out;
}
- for (i = 0; i < MAX_SUPPORTED_FINGER_NUM; i++)
- if (touch_event.point_coord[i].sub_status & SUB_BIT_EXIST)
- zinitix_report_finger(bt541, i,
- &touch_event.point_coord[i]);
+ finger_mask = touch_event.finger_mask;
+ for_each_set_bit(i, &finger_mask, MAX_SUPPORTED_FINGER_NUM) {
+ const struct point_coord *p = &touch_event.point_coord[i];
+
+ /* Only process contacts that are actually reported */
+ if (p->sub_status & SUB_BIT_EXIST)
+ zinitix_report_finger(bt541, i, p);
+ }
input_mt_sync_frame(bt541->input_dev);
input_sync(bt541->input_dev);
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 7887941..ceb6cdc 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -1084,9 +1084,14 @@
{
struct device_node *child;
int count = 0;
+ const struct of_device_id __maybe_unused ignore_list[] = {
+ { .compatible = "qcom,sc7180-ipa-virt" },
+ {}
+ };
for_each_available_child_of_node(np, child) {
- if (of_property_read_bool(child, "#interconnect-cells"))
+ if (of_property_read_bool(child, "#interconnect-cells") &&
+ likely(!of_match_node(ignore_list, child)))
count++;
count += of_count_icc_providers(child);
}
diff --git a/drivers/interconnect/imx/imx.c b/drivers/interconnect/imx/imx.c
index e398ebf..36f870e 100644
--- a/drivers/interconnect/imx/imx.c
+++ b/drivers/interconnect/imx/imx.c
@@ -226,16 +226,16 @@
struct device *dev = &pdev->dev;
struct icc_onecell_data *data;
struct icc_provider *provider;
- int max_node_id;
+ int num_nodes;
int ret;
/* icc_onecell_data is indexed by node_id, unlike nodes param */
- max_node_id = get_max_node_id(nodes, nodes_count);
- data = devm_kzalloc(dev, struct_size(data, nodes, max_node_id),
+ num_nodes = get_max_node_id(nodes, nodes_count) + 1;
+ data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->num_nodes = max_node_id;
+ data->num_nodes = num_nodes;
provider = devm_kzalloc(dev, sizeof(*provider), GFP_KERNEL);
if (!provider)
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
index f6fae64..27cc5f0 100644
--- a/drivers/interconnect/qcom/icc-rpmh.c
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -20,13 +20,18 @@
{
size_t i;
struct qcom_icc_node *qn;
+ struct qcom_icc_provider *qp;
qn = node->data;
+ qp = to_qcom_provider(node->provider);
for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
qn->sum_avg[i] = 0;
qn->max_peak[i] = 0;
}
+
+ for (i = 0; i < qn->num_bcms; i++)
+ qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
}
EXPORT_SYMBOL_GPL(qcom_icc_pre_aggregate);
@@ -44,10 +49,8 @@
{
size_t i;
struct qcom_icc_node *qn;
- struct qcom_icc_provider *qp;
qn = node->data;
- qp = to_qcom_provider(node->provider);
if (!tag)
tag = QCOM_ICC_TAG_ALWAYS;
@@ -67,9 +70,6 @@
*agg_avg += avg_bw;
*agg_peak = max_t(u32, *agg_peak, peak_bw);
- for (i = 0; i < qn->num_bcms; i++)
- qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
-
return 0;
}
EXPORT_SYMBOL_GPL(qcom_icc_aggregate);
diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c
index 8d9044e..597a7ee 100644
--- a/drivers/interconnect/qcom/sc7180.c
+++ b/drivers/interconnect/qcom/sc7180.c
@@ -47,7 +47,6 @@
DEFINE_QNODE(qnm_snoc_gc, SC7180_MASTER_SNOC_GC_MEM_NOC, 1, 8, SC7180_SLAVE_LLCC);
DEFINE_QNODE(qnm_snoc_sf, SC7180_MASTER_SNOC_SF_MEM_NOC, 1, 16, SC7180_SLAVE_LLCC);
DEFINE_QNODE(qxm_gpu, SC7180_MASTER_GFX3D, 2, 32, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC);
-DEFINE_QNODE(ipa_core_master, SC7180_MASTER_IPA_CORE, 1, 8, SC7180_SLAVE_IPA_CORE);
DEFINE_QNODE(llcc_mc, SC7180_MASTER_LLCC, 2, 4, SC7180_SLAVE_EBI1);
DEFINE_QNODE(qhm_mnoc_cfg, SC7180_MASTER_CNOC_MNOC_CFG, 1, 4, SC7180_SLAVE_SERVICE_MNOC);
DEFINE_QNODE(qxm_camnoc_hf0, SC7180_MASTER_CAMNOC_HF0, 2, 32, SC7180_SLAVE_MNOC_HF_MEM_NOC);
@@ -129,7 +128,6 @@
DEFINE_QNODE(qns_gem_noc_snoc, SC7180_SLAVE_GEM_NOC_SNOC, 1, 8, SC7180_MASTER_GEM_NOC_SNOC);
DEFINE_QNODE(qns_llcc, SC7180_SLAVE_LLCC, 1, 16, SC7180_MASTER_LLCC);
DEFINE_QNODE(srvc_gemnoc, SC7180_SLAVE_SERVICE_GEM_NOC, 1, 4);
-DEFINE_QNODE(ipa_core_slave, SC7180_SLAVE_IPA_CORE, 1, 8);
DEFINE_QNODE(ebi, SC7180_SLAVE_EBI1, 2, 4);
DEFINE_QNODE(qns_mem_noc_hf, SC7180_SLAVE_MNOC_HF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_HF_MEM_NOC);
DEFINE_QNODE(qns_mem_noc_sf, SC7180_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_SF_MEM_NOC);
@@ -160,7 +158,6 @@
DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf);
DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
-DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave);
DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_aop, &qhs_aoss, &qhs_boot_rom, &qhs_camera_cfg, &qhs_camera_nrt_throttle_cfg, &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl, &qhs_cpr_cx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_display_rt_throttle_cfg, &qhs_display_throttle_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_mss_cfg, &qhs_npu_cfg, &qhs_npu_dma_throttle_cfg, &qhs_npu_dsp_throttle_cfg, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qm_cfg, &qhs_qm_mpu_cfg, &qhs_qup0, &qhs_qup1, &qhs_security, &qhs_snoc_cfg, &qhs_tcsr, &qhs_tlmm_1, &qhs_tlmm_2, &qhs_tlmm_3, &qhs_ufs_mem_cfg, &qhs_usb3, &qhs_venus_cfg, &qhs_venus_throttle_cfg, &qhs_vsense_ctrl_cfg, &srvc_cnoc);
DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qhm_mnoc_cfg, &qxm_mdp0, &qxm_rot, &qxm_venus0, &qxm_venus_arm9);
DEFINE_QBCM(bcm_sh2, "SH2", false, &acm_sys_tcu);
@@ -372,22 +369,6 @@
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
-static struct qcom_icc_bcm *ipa_virt_bcms[] = {
- &bcm_ip0,
-};
-
-static struct qcom_icc_node *ipa_virt_nodes[] = {
- [MASTER_IPA_CORE] = &ipa_core_master,
- [SLAVE_IPA_CORE] = &ipa_core_slave,
-};
-
-static struct qcom_icc_desc sc7180_ipa_virt = {
- .nodes = ipa_virt_nodes,
- .num_nodes = ARRAY_SIZE(ipa_virt_nodes),
- .bcms = ipa_virt_bcms,
- .num_bcms = ARRAY_SIZE(ipa_virt_bcms),
-};
-
static struct qcom_icc_bcm *mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
@@ -611,8 +592,6 @@
.data = &sc7180_dc_noc},
{ .compatible = "qcom,sc7180-gem-noc",
.data = &sc7180_gem_noc},
- { .compatible = "qcom,sc7180-ipa-virt",
- .data = &sc7180_ipa_virt},
{ .compatible = "qcom,sc7180-mc-virt",
.data = &sc7180_mc_virt},
{ .compatible = "qcom,sc7180-mmss-noc",
diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c
index c76b2c7..b936196 100644
--- a/drivers/interconnect/qcom/sm8150.c
+++ b/drivers/interconnect/qcom/sm8150.c
@@ -627,7 +627,6 @@
.driver = {
.name = "qnoc-sm8150",
.of_match_table = qnoc_of_match,
- .sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
index cc558fe..4082004 100644
--- a/drivers/interconnect/qcom/sm8250.c
+++ b/drivers/interconnect/qcom/sm8250.c
@@ -643,7 +643,6 @@
.driver = {
.name = "qnoc-sm8250",
.of_match_table = qnoc_of_match,
- .sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 6eaefc9..e988f6f 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -84,7 +84,7 @@
#define ACPI_DEVFLAG_LINT1 0x80
#define ACPI_DEVFLAG_ATSDIS 0x10000000
-#define LOOP_TIMEOUT 100000
+#define LOOP_TIMEOUT 2000000
/*
* ACPI table definitions
*
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 200cf5d..f216a86 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -923,7 +923,8 @@
memset(cmd, 0, sizeof(*cmd));
cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
cmd->data[1] = upper_32_bits(paddr);
- cmd->data[2] = data;
+ cmd->data[2] = lower_32_bits(data);
+ cmd->data[3] = upper_32_bits(data);
CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 7067b7c..bc4cbc7 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1368,6 +1368,7 @@
dev_info(smmu->dev, "\t0x%016llx\n",
(unsigned long long)evt[i]);
+ cond_resched();
}
/*
@@ -3511,6 +3512,8 @@
/* Base address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
if (resource_size(res) < arm_smmu_resource_size(smmu)) {
dev_err(dev, "MMIO region too small (%pr)\n", res);
return -EINVAL;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index df24bbe..6b41fe2 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -2123,11 +2123,10 @@
if (err)
return err;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ioaddr = res->start;
- smmu->base = devm_ioremap_resource(dev, res);
+ smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(smmu->base))
return PTR_ERR(smmu->base);
+ ioaddr = res->start;
/*
* The resource size should effectively match the value of SMMU_TOP;
* stash that temporarily until we know PAGESIZE to validate it with.
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index b30d6c9..a24390c 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -766,9 +766,12 @@
{
struct device_node *child;
- for_each_child_of_node(qcom_iommu->dev->of_node, child)
- if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec"))
+ for_each_child_of_node(qcom_iommu->dev->of_node, child) {
+ if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec")) {
+ of_node_put(child);
return true;
+ }
+ }
return false;
}
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index de324b4..0cdb549 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -635,7 +635,7 @@
ret = iommu_device_register(&data->iommu);
if (ret)
- return ret;
+ goto err_iommu_register;
platform_set_drvdata(pdev, data);
@@ -662,6 +662,10 @@
pm_runtime_enable(dev);
return 0;
+
+err_iommu_register:
+ iommu_device_sysfs_remove(&data->iommu);
+ return ret;
}
static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index b8d0b56..0bc497f 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -385,7 +385,7 @@
static struct notifier_block dmar_pci_bus_nb = {
.notifier_call = dmar_pci_bus_notifier,
- .priority = INT_MIN,
+ .priority = 1,
};
static struct dmar_drhd_unit *
@@ -497,7 +497,7 @@
if (drhd->reg_base_addr == rhsa->base_address) {
int node = pxm_to_node(rhsa->proximity_domain);
- if (!node_online(node))
+ if (node != NUMA_NO_NODE && !node_online(node))
node = NUMA_NO_NODE;
drhd->iommu->node = node;
return 0;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index b21c822..f23329b 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -560,14 +560,36 @@
return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
}
+/*
+ * Calculate the Supported Adjusted Guest Address Widths of an IOMMU.
+ * Refer to 11.4.2 of the VT-d spec for the encoding of each bit of
+ * the returned SAGAW.
+ */
+static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
+{
+ unsigned long fl_sagaw, sl_sagaw;
+
+ fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0);
+ sl_sagaw = cap_sagaw(iommu->cap);
+
+ /* Second level only. */
+ if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
+ return sl_sagaw;
+
+ /* First level only. */
+ if (!ecap_slts(iommu->ecap))
+ return fl_sagaw;
+
+ return fl_sagaw & sl_sagaw;
+}
+
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{
unsigned long sagaw;
int agaw = -1;
- sagaw = cap_sagaw(iommu->cap);
- for (agaw = width_to_agaw(max_gaw);
- agaw >= 0; agaw--) {
+ sagaw = __iommu_calculate_sagaw(iommu);
+ for (agaw = width_to_agaw(max_gaw); agaw >= 0; agaw--) {
if (test_bit(agaw, &sagaw))
break;
}
@@ -1626,7 +1648,8 @@
unsigned long pfn, unsigned int pages,
int ih, int map)
{
- unsigned int mask = ilog2(__roundup_pow_of_two(pages));
+ unsigned int aligned_pages = __roundup_pow_of_two(pages);
+ unsigned int mask = ilog2(aligned_pages);
uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
u16 did = domain->iommu_did[iommu->seq_id];
@@ -1638,10 +1661,30 @@
if (domain_use_first_level(domain)) {
domain_flush_piotlb(iommu, domain, addr, pages, ih);
} else {
+ unsigned long bitmask = aligned_pages - 1;
+
+ /*
+ * PSI masks the low order bits of the base address. If the
+ * address isn't aligned to the mask, then compute a mask value
+ * needed to ensure the target range is flushed.
+ */
+ if (unlikely(bitmask & pfn)) {
+ unsigned long end_pfn = pfn + pages - 1, shared_bits;
+
+ /*
+ * Since end_pfn <= pfn + bitmask, the only way bits
+ * higher than bitmask can differ in pfn and end_pfn is
+ * by carrying. This means after masking out bitmask,
+ * high bits starting with the first set bit in
+ * shared_bits are all equal in both pfn and end_pfn.
+ */
+ shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
+ mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
+ }
+
/*
* Fallback to domain selective flush if no PSI support or
- * the size is too big. PSI requires page size to be 2 ^ x,
- * and the base address is naturally aligned to the size.
+ * the size is too big.
*/
if (!cap_pgsel_inv(iommu->cap) ||
mask > cap_max_amask_val(iommu->cap))
@@ -2803,6 +2846,7 @@
if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
domain_exit(si_domain);
+ si_domain = NULL;
return -EFAULT;
}
@@ -3462,6 +3506,10 @@
disable_dmar_iommu(iommu);
free_dmar_iommu(iommu);
}
+ if (si_domain) {
+ domain_exit(si_domain);
+ si_domain = NULL;
+ }
kfree(g_iommus);
@@ -6275,7 +6323,7 @@
ver = (dev->device >> 8) & 0xff;
if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
ver != 0x4e && ver != 0x8a && ver != 0x98 &&
- ver != 0x9a)
+ ver != 0x9a && ver != 0xa7)
return;
if (risky_device(dev))
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index fb911b6..86fd49a 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -669,7 +669,7 @@
* Since it is a second level only translation setup, we should
* set SRE bit as well (addresses are expected to be GPAs).
*/
- if (pasid != PASID_RID2PASID)
+ if (pasid != PASID_RID2PASID && ecap_srs(iommu->ecap))
pasid_set_sre(pte);
pasid_set_present(pte);
pasid_flush_caches(iommu, pte, pasid, did);
@@ -704,7 +704,8 @@
* We should set SRE bit as well since the addresses are expected
* to be GPAs.
*/
- pasid_set_sre(pte);
+ if (ecap_srs(iommu->ecap))
+ pasid_set_sre(pte);
pasid_set_present(pte);
pasid_flush_caches(iommu, pte, pasid, did);
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 1164d1a..4600e97 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -138,10 +138,11 @@
cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
if (free == cached_iova ||
(free->pfn_hi < iovad->dma_32bit_pfn &&
- free->pfn_lo >= cached_iova->pfn_lo)) {
+ free->pfn_lo >= cached_iova->pfn_lo))
iovad->cached32_node = rb_next(&free->node);
+
+ if (free->pfn_lo < iovad->dma_32bit_pfn)
iovad->max32_alloc_size = iovad->dma_32bit_pfn;
- }
cached_iova = rb_entry(iovad->cached_node, struct iova, node);
if (free->pfn_lo >= cached_iova->pfn_lo)
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 0f18abd..bae6c70 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1013,7 +1013,9 @@
bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
mmu->features = of_device_get_match_data(&pdev->dev);
memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+ if (ret)
+ return ret;
/* Map I/O memory and request IRQ. */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 3615cd6..149f2e5 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -616,16 +616,19 @@
static int qcom_iommu_of_xlate(struct device *dev,
struct of_phandle_args *spec)
{
- struct msm_iommu_dev *iommu;
+ struct msm_iommu_dev *iommu = NULL, *iter;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&msm_iommu_lock, flags);
- list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
- if (iommu->dev->of_node == spec->np)
+ list_for_each_entry(iter, &qcom_iommu_devices, dev_node) {
+ if (iter->dev->of_node == spec->np) {
+ iommu = iter;
break;
+ }
+ }
- if (!iommu || iommu->dev->of_node != spec->np) {
+ if (!iommu) {
ret = -ENODEV;
goto fail;
}
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 19387d2..051815c 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -768,8 +768,7 @@
iommu_device_sysfs_remove(&data->iommu);
iommu_device_unregister(&data->iommu);
- if (iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, NULL);
+ list_del(&data->list);
clk_disable_unprepare(data->bclk);
devm_free_irq(&pdev->dev, data->irq, data);
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index a99afb5..259f652 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -32,12 +32,12 @@
ssize_t bytes; \
const char *str = "%20s: %08x\n"; \
const int maxcol = 32; \
- bytes = snprintf(p, maxcol, str, __stringify(name), \
+ if (len < maxcol) \
+ goto out; \
+ bytes = scnprintf(p, maxcol, str, __stringify(name), \
iommu_read_reg(obj, MMU_##name)); \
p += bytes; \
len -= bytes; \
- if (len < maxcol) \
- goto out; \
} while (0)
static ssize_t
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 71f29c0..ff2c692 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1665,7 +1665,7 @@
num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus",
sizeof(phandle));
if (num_iommus < 0)
- return 0;
+ return ERR_PTR(-ENODEV);
arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL);
if (!arch_data)
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index dc062e8..3c24bf4 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -178,7 +178,7 @@
config IRQ_MIPS_CPU
bool
select GENERIC_IRQ_CHIP
- select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING
+ select GENERIC_IRQ_IPI if SMP && SYS_SUPPORTS_MULTITHREADING
select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
@@ -313,7 +313,8 @@
config MIPS_GIC
bool
- select GENERIC_IRQ_IPI
+ select GENERIC_IRQ_IPI if SMP
+ select IRQ_DOMAIN_HIERARCHY
select MIPS_CM
config INGENIC_IRQ
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 84f2741..c76fb70 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -308,7 +308,16 @@
static void armada_xp_mpic_perf_init(void)
{
- unsigned long cpuid = cpu_logical_map(smp_processor_id());
+ unsigned long cpuid;
+
+ /*
+ * This Performance Counter Overflow interrupt is specific for
+ * Armada 370 and XP. It is not available on Armada 375, 38x and 39x.
+ */
+ if (!of_machine_is_compatible("marvell,armada-370-xp"))
+ return;
+
+ cpuid = cpu_logical_map(smp_processor_id());
/* Enable Performance Counter Overflow interrupts */
writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid),
diff --git a/drivers/irqchip/irq-aspeed-i2c-ic.c b/drivers/irqchip/irq-aspeed-i2c-ic.c
index 8d591c1..3d32108 100644
--- a/drivers/irqchip/irq-aspeed-i2c-ic.c
+++ b/drivers/irqchip/irq-aspeed-i2c-ic.c
@@ -79,8 +79,8 @@
}
i2c_ic->parent_irq = irq_of_parse_and_map(node, 0);
- if (i2c_ic->parent_irq < 0) {
- ret = i2c_ic->parent_irq;
+ if (!i2c_ic->parent_irq) {
+ ret = -EINVAL;
goto err_iounmap;
}
diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c
index 0f0aac7..7cb1336 100644
--- a/drivers/irqchip/irq-aspeed-scu-ic.c
+++ b/drivers/irqchip/irq-aspeed-scu-ic.c
@@ -159,8 +159,8 @@
}
irq = irq_of_parse_and_map(node, 0);
- if (irq < 0) {
- rc = irq;
+ if (!irq) {
+ rc = -EINVAL;
goto err;
}
diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c
index b4c1924..38fab02 100644
--- a/drivers/irqchip/irq-gic-realview.c
+++ b/drivers/irqchip/irq-gic-realview.c
@@ -57,6 +57,7 @@
/* The PB11MPCore GIC needs to be configured in the syscon */
map = syscon_node_to_regmap(np);
+ of_node_put(np);
if (!IS_ERR(map)) {
/* new irq mode with no DCC */
regmap_write(map, REALVIEW_SYS_LOCK_OFFSET,
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 42b2953..d8cb5bc 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1615,7 +1615,7 @@
cpu = cpumask_pick_least_loaded(d, tmpmask);
} else {
- cpumask_and(tmpmask, irq_data_get_affinity_mask(d), cpu_online_mask);
+ cpumask_copy(tmpmask, aff_mask);
/* If we cannot cross sockets, limit the search to that node */
if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) &&
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 04d1b39..4c8f18f 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -206,11 +206,11 @@
}
}
-static void gic_do_wait_for_rwp(void __iomem *base)
+static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
{
u32 count = 1000000; /* 1s! */
- while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
+ while (readl_relaxed(base + GICD_CTLR) & bit) {
count--;
if (!count) {
pr_err_ratelimited("RWP timeout, gone fishing\n");
@@ -224,13 +224,13 @@
/* Wait for completion of a distributor change */
static void gic_dist_wait_for_rwp(void)
{
- gic_do_wait_for_rwp(gic_data.dist_base);
+ gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
}
/* Wait for completion of a redistributor change */
static void gic_redist_wait_for_rwp(void)
{
- gic_do_wait_for_rwp(gic_data_rdist_rd_base());
+ gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
}
#ifdef CONFIG_ARM64
@@ -1467,6 +1467,12 @@
if(fwspec->param_count != 2)
return -EINVAL;
+ if (fwspec->param[0] < 16) {
+ pr_err(FW_BUG "Illegal GSI%d translation request\n",
+ fwspec->param[0]);
+ return -EINVAL;
+ }
+
*hwirq = fwspec->param[0];
*type = fwspec->param[1];
@@ -1825,7 +1831,7 @@
gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
if (!gic_data.ppi_descs)
- return;
+ goto out_put_node;
nr_parts = of_get_child_count(parts_node);
@@ -1866,12 +1872,15 @@
continue;
cpu = of_cpu_node_to_id(cpu_node);
- if (WARN_ON(cpu < 0))
+ if (WARN_ON(cpu < 0)) {
+ of_node_put(cpu_node);
continue;
+ }
pr_cont("%pOF[%d] ", cpu_node, cpu);
cpumask_set_cpu(cpu, &part->mask);
+ of_node_put(cpu_node);
}
pr_cont("}\n");
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 176f5f0..205cbd2 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1094,6 +1094,12 @@
if(fwspec->param_count != 2)
return -EINVAL;
+ if (fwspec->param[0] < 16) {
+ pr_err(FW_BUG "Illegal GSI%d translation request\n",
+ fwspec->param[0]);
+ return -EINVAL;
+ }
+
*hwirq = fwspec->param[0];
*type = fwspec->param[1];
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 2158859..8ada91b 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -50,13 +50,15 @@
static DEFINE_SPINLOCK(gic_lock);
static struct irq_domain *gic_irq_domain;
-static struct irq_domain *gic_ipi_domain;
static int gic_shared_intrs;
static unsigned int gic_cpu_pin;
static unsigned int timer_cpu_pin;
static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
+
+#ifdef CONFIG_GENERIC_IRQ_IPI
static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
+#endif /* CONFIG_GENERIC_IRQ_IPI */
static struct gic_all_vpes_chip_data {
u32 map;
@@ -459,9 +461,11 @@
u32 map;
if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
+#ifdef CONFIG_GENERIC_IRQ_IPI
/* verify that shared irqs don't conflict with an IPI irq */
if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv))
return -EBUSY;
+#endif /* CONFIG_GENERIC_IRQ_IPI */
err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
&gic_level_irq_controller,
@@ -550,6 +554,8 @@
.map = gic_irq_domain_map,
};
+#ifdef CONFIG_GENERIC_IRQ_IPI
+
static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq,
@@ -653,6 +659,48 @@
.match = gic_ipi_domain_match,
};
+static int gic_register_ipi_domain(struct device_node *node)
+{
+ struct irq_domain *gic_ipi_domain;
+ unsigned int v[2], num_ipis;
+
+ gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
+ IRQ_DOMAIN_FLAG_IPI_PER_CPU,
+ GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
+ node, &gic_ipi_domain_ops, NULL);
+ if (!gic_ipi_domain) {
+ pr_err("Failed to add IPI domain");
+ return -ENXIO;
+ }
+
+ irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
+
+ if (node &&
+ !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
+ bitmap_set(ipi_resrv, v[0], v[1]);
+ } else {
+ /*
+ * Reserve 2 interrupts per possible CPU/VP for use as IPIs,
+ * meeting the requirements of arch/mips SMP.
+ */
+ num_ipis = 2 * num_possible_cpus();
+ bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis);
+ }
+
+ bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
+
+ return 0;
+}
+
+#else /* !CONFIG_GENERIC_IRQ_IPI */
+
+static inline int gic_register_ipi_domain(struct device_node *node)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_GENERIC_IRQ_IPI */
+
static int gic_cpu_startup(unsigned int cpu)
{
/* Enable or disable EIC */
@@ -671,11 +719,12 @@
static int __init gic_of_init(struct device_node *node,
struct device_node *parent)
{
- unsigned int cpu_vec, i, gicconfig, v[2], num_ipis;
+ unsigned int cpu_vec, i, gicconfig;
unsigned long reserved;
phys_addr_t gic_base;
struct resource res;
size_t gic_len;
+ int ret;
/* Find the first available CPU vector. */
i = 0;
@@ -717,6 +766,10 @@
}
mips_gic_base = ioremap(gic_base, gic_len);
+ if (!mips_gic_base) {
+ pr_err("Failed to ioremap gic_base\n");
+ return -ENOMEM;
+ }
gicconfig = read_gic_config();
gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
@@ -764,30 +817,9 @@
return -ENXIO;
}
- gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
- IRQ_DOMAIN_FLAG_IPI_PER_CPU,
- GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
- node, &gic_ipi_domain_ops, NULL);
- if (!gic_ipi_domain) {
- pr_err("Failed to add IPI domain");
- return -ENXIO;
- }
-
- irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
-
- if (node &&
- !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
- bitmap_set(ipi_resrv, v[0], v[1]);
- } else {
- /*
- * Reserve 2 interrupts per possible CPU/VP for use as IPIs,
- * meeting the requirements of arch/mips SMP.
- */
- num_ipis = 2 * num_possible_cpus();
- bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis);
- }
-
- bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
+ ret = gic_register_ipi_domain(node);
+ if (ret)
+ return ret;
board_bind_eic_interrupt = &gic_bind_eic_interrupt;
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
index 21cb31f..e903c44 100644
--- a/drivers/irqchip/irq-nvic.c
+++ b/drivers/irqchip/irq-nvic.c
@@ -94,6 +94,7 @@
if (!nvic_irq_domain) {
pr_warn("Failed to allocate irq domain\n");
+ iounmap(nvic_base);
return -ENOMEM;
}
@@ -103,6 +104,7 @@
if (ret) {
pr_warn("Failed to allocate irq chips\n");
irq_domain_remove(nvic_irq_domain);
+ iounmap(nvic_base);
return ret;
}
diff --git a/drivers/irqchip/irq-or1k-pic.c b/drivers/irqchip/irq-or1k-pic.c
index 03d2366..d5f1fab 100644
--- a/drivers/irqchip/irq-or1k-pic.c
+++ b/drivers/irqchip/irq-or1k-pic.c
@@ -66,7 +66,6 @@
.name = "or1k-PIC-level",
.irq_unmask = or1k_pic_unmask,
.irq_mask = or1k_pic_mask,
- .irq_mask_ack = or1k_pic_mask_ack,
},
.handle = handle_level_irq,
.flags = IRQ_LEVEL | IRQ_NOPROBE,
diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c
index abd011f..c7db617 100644
--- a/drivers/irqchip/irq-sni-exiu.c
+++ b/drivers/irqchip/irq-sni-exiu.c
@@ -37,11 +37,26 @@
u32 spi_base;
};
-static void exiu_irq_eoi(struct irq_data *d)
+static void exiu_irq_ack(struct irq_data *d)
{
struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
writel(BIT(d->hwirq), data->base + EIREQCLR);
+}
+
+static void exiu_irq_eoi(struct irq_data *d)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+
+ /*
+ * Level triggered interrupts are latched and must be cleared during
+ * EOI or the interrupt will be jammed on. Of course if a level
+ * triggered interrupt is still asserted then the write will not clear
+ * the interrupt.
+ */
+ if (irqd_is_level_type(d))
+ writel(BIT(d->hwirq), data->base + EIREQCLR);
+
irq_chip_eoi_parent(d);
}
@@ -91,10 +106,13 @@
writel_relaxed(val, data->base + EILVL);
val = readl_relaxed(data->base + EIEDG);
- if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH)
+ if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH) {
val &= ~BIT(d->hwirq);
- else
+ irq_set_handler_locked(d, handle_fasteoi_irq);
+ } else {
val |= BIT(d->hwirq);
+ irq_set_handler_locked(d, handle_fasteoi_ack_irq);
+ }
writel_relaxed(val, data->base + EIEDG);
writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR);
@@ -104,6 +122,7 @@
static struct irq_chip exiu_irq_chip = {
.name = "EXIU",
+ .irq_ack = exiu_irq_ack,
.irq_eoi = exiu_irq_eoi,
.irq_enable = exiu_irq_enable,
.irq_mask = exiu_irq_mask,
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index e1f771c..ad3e2c1 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -148,10 +148,10 @@
lic->cop_iep[i] = readl_relaxed(ictlr + ICTLR_COP_IEP_CLASS);
/* Disable COP interrupts */
- writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR);
/* Disable CPU interrupts */
- writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR);
/* Enable the wakeup sources of ictlr */
writel_relaxed(lic->ictlr_wake_mask[i], ictlr + ICTLR_CPU_IER_SET);
@@ -172,12 +172,12 @@
writel_relaxed(lic->cpu_iep[i],
ictlr + ICTLR_CPU_IEP_CLASS);
- writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR);
writel_relaxed(lic->cpu_ier[i],
ictlr + ICTLR_CPU_IER_SET);
writel_relaxed(lic->cop_iep[i],
ictlr + ICTLR_COP_IEP_CLASS);
- writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR);
writel_relaxed(lic->cop_ier[i],
ictlr + ICTLR_COP_IER_SET);
}
@@ -312,7 +312,7 @@
lic->base[i] = base;
/* Disable all interrupts */
- writel_relaxed(~0UL, base + ICTLR_CPU_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), base + ICTLR_CPU_IER_CLR);
/* All interrupts target IRQ */
writel_relaxed(0, base + ICTLR_CPU_IEP_CLASS);
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index 2793333..8c581c9 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -151,14 +151,25 @@
.irq_set_affinity = xtensa_mx_irq_set_affinity,
};
+static void __init xtensa_mx_init_common(struct irq_domain *root_domain)
+{
+ unsigned int i;
+
+ irq_set_default_host(root_domain);
+ secondary_init_irq();
+
+ /* Initialize default IRQ routing to CPU 0 */
+ for (i = 0; i < XCHAL_NUM_EXTINTERRUPTS; ++i)
+ set_er(1, MIROUT(i));
+}
+
int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
&xtensa_mx_irq_domain_ops,
&xtensa_mx_irq_chip);
- irq_set_default_host(root_domain);
- secondary_init_irq();
+ xtensa_mx_init_common(root_domain);
return 0;
}
@@ -168,8 +179,7 @@
struct irq_domain *root_domain =
irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops,
&xtensa_mx_irq_chip);
- irq_set_default_host(root_domain);
- secondary_init_irq();
+ xtensa_mx_init_common(root_domain);
return 0;
}
IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init);
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index 5dc63c2..fc747b7 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -74,17 +74,18 @@
static void pdc_enable_intr(struct irq_data *d, bool on)
{
int pin_out = d->hwirq;
+ unsigned long flags;
u32 index, mask;
u32 enable;
index = pin_out / 32;
mask = pin_out % 32;
- raw_spin_lock(&pdc_lock);
+ raw_spin_lock_irqsave(&pdc_lock, flags);
enable = pdc_reg_read(IRQ_ENABLE_BANK, index);
enable = on ? ENABLE_INTR(enable, mask) : CLEAR_INTR(enable, mask);
pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
- raw_spin_unlock(&pdc_lock);
+ raw_spin_unlock_irqrestore(&pdc_lock, flags);
}
static void qcom_pdc_gic_disable(struct irq_data *d)
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index a52f275..f844713 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -956,7 +956,7 @@
}
if (card->irq > 0)
free_irq(card->irq, card);
- if (card->isac.dch.dev.dev.class)
+ if (device_is_registered(&card->isac.dch.dev.dev))
mISDN_unregister_device(&card->isac.dch.dev);
for (i = 0; i < 2; i++) {
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
index a41b4b2..90ee56d 100644
--- a/drivers/isdn/mISDN/core.c
+++ b/drivers/isdn/mISDN/core.c
@@ -222,7 +222,7 @@
err = get_free_devid();
if (err < 0)
- goto error1;
+ return err;
dev->id = err;
device_initialize(&dev->dev);
@@ -233,11 +233,12 @@
if (debug & DEBUG_CORE)
printk(KERN_DEBUG "mISDN_register %s %d\n",
dev_name(&dev->dev), dev->id);
+ dev->dev.class = &mISDN_class;
+
err = create_stack(dev);
if (err)
goto error1;
- dev->dev.class = &mISDN_class;
dev->dev.platform_data = dev;
dev->dev.parent = parent;
dev_set_drvdata(&dev->dev, dev);
@@ -249,8 +250,8 @@
error3:
delete_stack(dev);
- return err;
error1:
+ put_device(&dev->dev);
return err;
}
diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c
index c3b2c99..cfbcd9e 100644
--- a/drivers/isdn/mISDN/dsp_pipeline.c
+++ b/drivers/isdn/mISDN/dsp_pipeline.c
@@ -77,6 +77,7 @@
if (!entry)
return -ENOMEM;
+ INIT_LIST_HEAD(&entry->list);
entry->elem = elem;
entry->dev.class = elements_class;
@@ -107,7 +108,7 @@
device_unregister(&entry->dev);
return ret;
err1:
- kfree(entry);
+ put_device(&entry->dev);
return ret;
}
EXPORT_SYMBOL(mISDN_dsp_element_register);
diff --git a/drivers/isdn/mISDN/l1oip.h b/drivers/isdn/mISDN/l1oip.h
index 7ea10db..48133d0 100644
--- a/drivers/isdn/mISDN/l1oip.h
+++ b/drivers/isdn/mISDN/l1oip.h
@@ -59,6 +59,7 @@
int bundle; /* bundle channels in one frm */
int codec; /* codec to use for transmis. */
int limit; /* limit number of bchannels */
+ bool shutdown; /* if card is released */
/* timer */
struct timer_list keep_tl;
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index b57dcb8..aec4f2a 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -275,7 +275,7 @@
p = frame;
/* restart timer */
- if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ))
+ if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ) && !hc->shutdown)
mod_timer(&hc->keep_tl, jiffies + L1OIP_KEEPALIVE * HZ);
else
hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ;
@@ -601,7 +601,9 @@
goto multiframe;
/* restart timer */
- if (time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) || !hc->timeout_on) {
+ if ((time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) ||
+ !hc->timeout_on) &&
+ !hc->shutdown) {
hc->timeout_on = 1;
mod_timer(&hc->timeout_tl, jiffies + L1OIP_TIMEOUT * HZ);
} else /* only adjust timer */
@@ -1232,11 +1234,10 @@
{
int ch;
- if (timer_pending(&hc->keep_tl))
- del_timer(&hc->keep_tl);
+ hc->shutdown = true;
- if (timer_pending(&hc->timeout_tl))
- del_timer(&hc->timeout_tl);
+ del_timer_sync(&hc->keep_tl);
+ del_timer_sync(&hc->timeout_tl);
cancel_work_sync(&hc->workq);
diff --git a/drivers/leds/leds-lm3601x.c b/drivers/leds/leds-lm3601x.c
index d0e1d48..3d12727 100644
--- a/drivers/leds/leds-lm3601x.c
+++ b/drivers/leds/leds-lm3601x.c
@@ -444,8 +444,6 @@
{
struct lm3601x_led *led = i2c_get_clientdata(client);
- mutex_destroy(&led->lock);
-
return regmap_update_bits(led->regmap, LM3601X_ENABLE_REG,
LM3601X_ENABLE_MASK,
LM3601X_MODE_STANDBY);
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 4c2ce21..95b0914 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -5,7 +5,7 @@
menuconfig NVM
bool "Open-Channel SSD target support"
- depends on BLOCK
+ depends on BLOCK && BROKEN
help
Say Y here to get to enable Open-channel SSDs.
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index 5cdc361..539a2ed 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -44,6 +44,7 @@
config ADB_CUDA
bool "Support for Cuda/Egret based Macs and PowerMacs"
depends on (ADB || PPC_PMAC) && !PPC_PMAC64
+ select RTC_LIB
help
This provides support for Cuda/Egret based Macintosh and
Power Macintosh systems. This includes most m68k based Macs,
@@ -57,6 +58,7 @@
config ADB_PMU
bool "Support for PMU based PowerMacs and PowerBooks"
depends on PPC_PMAC || MAC
+ select RTC_LIB
help
On PowerBooks, iBooks, and recent iMacs and Power Macintoshes, the
PMU is an embedded microprocessor whose primary function is to
@@ -67,6 +69,10 @@
this device; you should do so if your machine is one of those
mentioned above.
+config ADB_PMU_EVENT
+ def_bool y
+ depends on ADB_PMU && INPUT=y
+
config ADB_PMU_LED
bool "Support for the Power/iBook front LED"
depends on PPC_PMAC && ADB_PMU
diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile
index 49819b1..712edcb 100644
--- a/drivers/macintosh/Makefile
+++ b/drivers/macintosh/Makefile
@@ -12,7 +12,8 @@
obj-$(CONFIG_INPUT_ADBHID) += adbhid.o
obj-$(CONFIG_ANSLCD) += ans-lcd.o
-obj-$(CONFIG_ADB_PMU) += via-pmu.o via-pmu-event.o
+obj-$(CONFIG_ADB_PMU) += via-pmu.o
+obj-$(CONFIG_ADB_PMU_EVENT) += via-pmu-event.o
obj-$(CONFIG_ADB_PMU_LED) += via-pmu-led.o
obj-$(CONFIG_PMAC_BACKLIGHT) += via-pmu-backlight.o
obj-$(CONFIG_ADB_CUDA) += via-cuda.o
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 73b3961..afb0942 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -647,7 +647,7 @@
switch(req->data[1]) {
case ADB_QUERY_GETDEVINFO:
- if (req->nbytes < 3)
+ if (req->nbytes < 3 || req->data[2] >= 16)
break;
mutex_lock(&adb_handler_mutex);
req->reply[0] = adb_handler[req->data[2]].original_address;
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 73e6ae8..aae6328 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -1460,7 +1460,7 @@
pmu_pass_intr(data, len);
/* len == 6 is probably a bad check. But how do I
* know what PMU versions send what events here? */
- if (len == 6) {
+ if (IS_ENABLED(CONFIG_ADB_PMU_EVENT) && len == 6) {
via_pmu_event(PMU_EVT_POWER, !!(data[1]&8));
via_pmu_event(PMU_EVT_LID, data[1]&1);
}
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index bee33ab..e913ed1 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -632,15 +632,15 @@
rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
DMA_TO_DEVICE);
- if (rc < 0)
- return rc;
+ if (!rc)
+ return -EIO;
rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
DMA_FROM_DEVICE);
- if (rc < 0) {
+ if (!rc) {
dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
DMA_TO_DEVICE);
- return rc;
+ return -EIO;
}
return 0;
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 2543c7b..c566339 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
#include <linux/slab.h>
#define IMX_MU_xSR_GIPn(x) BIT(28 + (3 - (x)))
@@ -66,6 +67,7 @@
const struct imx_mu_dcfg *dcfg;
struct clk *clk;
int irq;
+ bool suspend;
u32 xcr;
@@ -277,6 +279,9 @@
return IRQ_NONE;
}
+ if (priv->suspend)
+ pm_system_wakeup();
+
return IRQ_HANDLED;
}
@@ -326,6 +331,8 @@
break;
}
+ priv->suspend = true;
+
return 0;
}
@@ -543,6 +550,8 @@
clk_disable_unprepare(priv->clk);
+ priv->suspend = false;
+
return 0;
disable_runtime_pm:
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 3e7d4b2..4229b9b 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -82,11 +82,11 @@
exit:
spin_unlock_irqrestore(&chan->lock, flags);
- /* kick start the timer immediately to avoid delays */
if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
- /* but only if not already active */
- if (!hrtimer_active(&chan->mbox->poll_hrt))
- hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
+ /* kick start the timer immediately to avoid delays */
+ spin_lock_irqsave(&chan->mbox->poll_hrt_lock, flags);
+ hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
+ spin_unlock_irqrestore(&chan->mbox->poll_hrt_lock, flags);
}
}
@@ -120,20 +120,26 @@
container_of(hrtimer, struct mbox_controller, poll_hrt);
bool txdone, resched = false;
int i;
+ unsigned long flags;
for (i = 0; i < mbox->num_chans; i++) {
struct mbox_chan *chan = &mbox->chans[i];
if (chan->active_req && chan->cl) {
- resched = true;
txdone = chan->mbox->ops->last_tx_done(chan);
if (txdone)
tx_tick(chan, 0);
+ else
+ resched = true;
}
}
if (resched) {
- hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
+ spin_lock_irqsave(&mbox->poll_hrt_lock, flags);
+ if (!hrtimer_is_queued(hrtimer))
+ hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
+ spin_unlock_irqrestore(&mbox->poll_hrt_lock, flags);
+
return HRTIMER_RESTART;
}
return HRTIMER_NORESTART;
@@ -500,6 +506,7 @@
hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
mbox->poll_hrt.function = txdone_hrtimer;
+ spin_lock_init(&mbox->poll_hrt_lock);
}
for (i = 0; i < mbox->num_chans; i++) {
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
index e07091d..4895d80 100644
--- a/drivers/mailbox/tegra-hsp.c
+++ b/drivers/mailbox/tegra-hsp.c
@@ -410,6 +410,11 @@
value = tegra_hsp_channel_readl(ch, HSP_SM_SHRD_MBOX);
if ((value & HSP_SM_SHRD_MBOX_FULL) == 0) {
mbox_chan_txdone(chan, 0);
+
+ /* Wait until channel is empty */
+ if (chan->active_req != NULL)
+ continue;
+
return 0;
}
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index fe6dce1..b47c00d 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -2006,8 +2006,7 @@
int i;
struct bkey *k = NULL;
struct btree_iter iter;
- struct btree_check_state *check_state;
- char name[32];
+ struct btree_check_state check_state;
/* check and mark root node keys */
for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
@@ -2018,61 +2017,59 @@
if (c->root->level == 0)
return 0;
- check_state = kzalloc(sizeof(struct btree_check_state), GFP_KERNEL);
- if (!check_state)
- return -ENOMEM;
+ memset(&check_state, 0, sizeof(struct btree_check_state));
+ check_state.c = c;
+ check_state.total_threads = bch_btree_chkthread_nr();
+ check_state.key_idx = 0;
+ spin_lock_init(&check_state.idx_lock);
+ atomic_set(&check_state.started, 0);
+ atomic_set(&check_state.enough, 0);
+ init_waitqueue_head(&check_state.wait);
- check_state->c = c;
- check_state->total_threads = bch_btree_chkthread_nr();
- check_state->key_idx = 0;
- spin_lock_init(&check_state->idx_lock);
- atomic_set(&check_state->started, 0);
- atomic_set(&check_state->enough, 0);
- init_waitqueue_head(&check_state->wait);
-
+ rw_lock(0, c->root, c->root->level);
/*
* Run multiple threads to check btree nodes in parallel,
- * if check_state->enough is non-zero, it means current
+ * if check_state.enough is non-zero, it means current
* running check threads are enough, unncessary to create
* more.
*/
- for (i = 0; i < check_state->total_threads; i++) {
- /* fetch latest check_state->enough earlier */
+ for (i = 0; i < check_state.total_threads; i++) {
+ /* fetch latest check_state.enough earlier */
smp_mb__before_atomic();
- if (atomic_read(&check_state->enough))
+ if (atomic_read(&check_state.enough))
break;
- check_state->infos[i].result = 0;
- check_state->infos[i].state = check_state;
- snprintf(name, sizeof(name), "bch_btrchk[%u]", i);
- atomic_inc(&check_state->started);
+ check_state.infos[i].result = 0;
+ check_state.infos[i].state = &check_state;
- check_state->infos[i].thread =
+ check_state.infos[i].thread =
kthread_run(bch_btree_check_thread,
- &check_state->infos[i],
- name);
- if (IS_ERR(check_state->infos[i].thread)) {
+ &check_state.infos[i],
+ "bch_btrchk[%d]", i);
+ if (IS_ERR(check_state.infos[i].thread)) {
pr_err("fails to run thread bch_btrchk[%d]\n", i);
for (--i; i >= 0; i--)
- kthread_stop(check_state->infos[i].thread);
+ kthread_stop(check_state.infos[i].thread);
ret = -ENOMEM;
goto out;
}
+ atomic_inc(&check_state.started);
}
- wait_event_interruptible(check_state->wait,
- atomic_read(&check_state->started) == 0 ||
- test_bit(CACHE_SET_IO_DISABLE, &c->flags));
+ /*
+ * Must wait for all threads to stop.
+ */
+ wait_event(check_state.wait, atomic_read(&check_state.started) == 0);
- for (i = 0; i < check_state->total_threads; i++) {
- if (check_state->infos[i].result) {
- ret = check_state->infos[i].result;
+ for (i = 0; i < check_state.total_threads; i++) {
+ if (check_state.infos[i].result) {
+ ret = check_state.infos[i].result;
goto out;
}
}
out:
- kfree(check_state);
+ rw_unlock(0, c->root);
return ret;
}
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 5048210..1b5fdbc 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -226,7 +226,7 @@
int result;
};
-#define BCH_BTR_CHKTHREAD_MAX 64
+#define BCH_BTR_CHKTHREAD_MAX 12
struct btree_check_state {
struct cache_set *c;
int total_threads;
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index c6613e8..aea4833 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -407,6 +407,11 @@
return ret;
}
+void bch_journal_space_reserve(struct journal *j)
+{
+ j->do_reserve = true;
+}
+
/* Journalling */
static void btree_flush_write(struct cache_set *c)
@@ -625,12 +630,30 @@
}
}
+static unsigned int free_journal_buckets(struct cache_set *c)
+{
+ struct journal *j = &c->journal;
+ struct cache *ca = c->cache;
+ struct journal_device *ja = &c->cache->journal;
+ unsigned int n;
+
+ /* In case njournal_buckets is not power of 2 */
+ if (ja->cur_idx >= ja->discard_idx)
+ n = ca->sb.njournal_buckets + ja->discard_idx - ja->cur_idx;
+ else
+ n = ja->discard_idx - ja->cur_idx;
+
+ if (n > (1 + j->do_reserve))
+ return n - (1 + j->do_reserve);
+
+ return 0;
+}
+
static void journal_reclaim(struct cache_set *c)
{
struct bkey *k = &c->journal.key;
struct cache *ca = c->cache;
uint64_t last_seq;
- unsigned int next;
struct journal_device *ja = &ca->journal;
atomic_t p __maybe_unused;
@@ -653,12 +676,10 @@
if (c->journal.blocks_free)
goto out;
- next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
- /* No space available on this device */
- if (next == ja->discard_idx)
+ if (!free_journal_buckets(c))
goto out;
- ja->cur_idx = next;
+ ja->cur_idx = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
k->ptr[0] = MAKE_PTR(0,
bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
ca->sb.nr_this_dev);
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
index f2ea34d..cd316b4 100644
--- a/drivers/md/bcache/journal.h
+++ b/drivers/md/bcache/journal.h
@@ -105,6 +105,7 @@
spinlock_t lock;
spinlock_t flush_write_lock;
bool btree_flushing;
+ bool do_reserve;
/* used when waiting because the journal was full */
struct closure_waitlist wait;
struct closure io;
@@ -182,5 +183,6 @@
void bch_journal_free(struct cache_set *c);
int bch_journal_alloc(struct cache_set *c);
+void bch_journal_space_reserve(struct journal *j);
#endif /* _BCACHE_JOURNAL_H */
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 2143263..9789526 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -1109,6 +1109,12 @@
* which would call closure_get(&dc->disk.cl)
*/
ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
+ if (!ddip) {
+ bio->bi_status = BLK_STS_RESOURCE;
+ bio->bi_end_io(bio);
+ return;
+ }
+
ddip->d = d;
/* Count on the bcache device */
ddip->start_time = part_start_io_acct(d->disk, &ddip->part, bio);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 81f1cc5..7f5ea25 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -2150,6 +2150,7 @@
flash_devs_run(c);
+ bch_journal_space_reserve(&c->journal);
set_bit(CACHE_SET_RUNNING, &c->flags);
return 0;
err:
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 3c74996..3aa73da 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -119,6 +119,53 @@
dc->writeback_rate_target = target;
}
+static bool idle_counter_exceeded(struct cache_set *c)
+{
+ int counter, dev_nr;
+
+ /*
+ * If c->idle_counter is overflow (idel for really long time),
+ * reset as 0 and not set maximum rate this time for code
+ * simplicity.
+ */
+ counter = atomic_inc_return(&c->idle_counter);
+ if (counter <= 0) {
+ atomic_set(&c->idle_counter, 0);
+ return false;
+ }
+
+ dev_nr = atomic_read(&c->attached_dev_nr);
+ if (dev_nr == 0)
+ return false;
+
+ /*
+ * c->idle_counter is increased by writeback thread of all
+ * attached backing devices, in order to represent a rough
+ * time period, counter should be divided by dev_nr.
+ * Otherwise the idle time cannot be larger with more backing
+ * device attached.
+ * The following calculation equals to checking
+ * (counter / dev_nr) < (dev_nr * 6)
+ */
+ if (counter < (dev_nr * dev_nr * 6))
+ return false;
+
+ return true;
+}
+
+/*
+ * Idle_counter is increased every time when update_writeback_rate() is
+ * called. If all backing devices attached to the same cache set have
+ * identical dc->writeback_rate_update_seconds values, it is about 6
+ * rounds of update_writeback_rate() on each backing device before
+ * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
+ * to each dc->writeback_rate.rate.
+ * In order to avoid extra locking cost for counting exact dirty cached
+ * devices number, c->attached_dev_nr is used to calculate the idle
+ * throushold. It might be bigger if not all cached device are in write-
+ * back mode, but it still works well with limited extra rounds of
+ * update_writeback_rate().
+ */
static bool set_at_max_writeback_rate(struct cache_set *c,
struct cached_dev *dc)
{
@@ -129,21 +176,8 @@
/* Don't set max writeback rate if gc is running */
if (!c->gc_mark_valid)
return false;
- /*
- * Idle_counter is increased everytime when update_writeback_rate() is
- * called. If all backing devices attached to the same cache set have
- * identical dc->writeback_rate_update_seconds values, it is about 6
- * rounds of update_writeback_rate() on each backing device before
- * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
- * to each dc->writeback_rate.rate.
- * In order to avoid extra locking cost for counting exact dirty cached
- * devices number, c->attached_dev_nr is used to calculate the idle
- * throushold. It might be bigger if not all cached device are in write-
- * back mode, but it still works well with limited extra rounds of
- * update_writeback_rate().
- */
- if (atomic_inc_return(&c->idle_counter) <
- atomic_read(&c->attached_dev_nr) * 6)
+
+ if (!idle_counter_exceeded(c))
return false;
if (atomic_read(&c->at_max_writeback_rate) != 1)
@@ -157,13 +191,10 @@
dc->writeback_rate_change = 0;
/*
- * Check c->idle_counter and c->at_max_writeback_rate agagain in case
- * new I/O arrives during before set_at_max_writeback_rate() returns.
- * Then the writeback rate is set to 1, and its new value should be
- * decided via __update_writeback_rate().
+ * In case new I/O arrives during before
+ * set_at_max_writeback_rate() returns.
*/
- if ((atomic_read(&c->idle_counter) <
- atomic_read(&c->attached_dev_nr) * 6) ||
+ if (!idle_counter_exceeded(c) ||
!atomic_read(&c->at_max_writeback_rate))
return false;
@@ -756,13 +787,11 @@
/* Init */
#define INIT_KEYS_EACH_TIME 500000
-#define INIT_KEYS_SLEEP_MS 100
struct sectors_dirty_init {
struct btree_op op;
unsigned int inode;
size_t count;
- struct bkey start;
};
static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
@@ -778,11 +807,8 @@
KEY_START(k), KEY_SIZE(k));
op->count++;
- if (atomic_read(&b->c->search_inflight) &&
- !(op->count % INIT_KEYS_EACH_TIME)) {
- bkey_copy_key(&op->start, k);
- return -EAGAIN;
- }
+ if (!(op->count % INIT_KEYS_EACH_TIME))
+ cond_resched();
return MAP_CONTINUE;
}
@@ -797,24 +823,16 @@
bch_btree_op_init(&op.op, -1);
op.inode = d->id;
op.count = 0;
- op.start = KEY(op.inode, 0, 0);
- do {
- ret = bcache_btree(map_keys_recurse,
- k,
- c->root,
- &op.op,
- &op.start,
- sectors_dirty_init_fn,
- 0);
- if (ret == -EAGAIN)
- schedule_timeout_interruptible(
- msecs_to_jiffies(INIT_KEYS_SLEEP_MS));
- else if (ret < 0) {
- pr_warn("sectors dirty init failed, ret=%d!\n", ret);
- break;
- }
- } while (ret == -EAGAIN);
+ ret = bcache_btree(map_keys_recurse,
+ k,
+ c->root,
+ &op.op,
+ &KEY(op.inode, 0, 0),
+ sectors_dirty_init_fn,
+ 0);
+ if (ret < 0)
+ pr_warn("sectors dirty init failed, ret=%d!\n", ret);
return ret;
}
@@ -858,7 +876,6 @@
goto out;
}
skip_nr--;
- cond_resched();
}
if (p) {
@@ -868,7 +885,6 @@
p = NULL;
prev_idx = cur_idx;
- cond_resched();
}
out:
@@ -899,65 +915,56 @@
struct btree_iter iter;
struct sectors_dirty_init op;
struct cache_set *c = d->c;
- struct bch_dirty_init_state *state;
- char name[32];
+ struct bch_dirty_init_state state;
/* Just count root keys if no leaf node */
+ rw_lock(0, c->root, c->root->level);
if (c->root->level == 0) {
bch_btree_op_init(&op.op, -1);
op.inode = d->id;
op.count = 0;
- op.start = KEY(op.inode, 0, 0);
for_each_key_filter(&c->root->keys,
k, &iter, bch_ptr_invalid)
sectors_dirty_init_fn(&op.op, c->root, k);
+
+ rw_unlock(0, c->root);
return;
}
- state = kzalloc(sizeof(struct bch_dirty_init_state), GFP_KERNEL);
- if (!state) {
- pr_warn("sectors dirty init failed: cannot allocate memory\n");
- return;
- }
+ memset(&state, 0, sizeof(struct bch_dirty_init_state));
+ state.c = c;
+ state.d = d;
+ state.total_threads = bch_btre_dirty_init_thread_nr();
+ state.key_idx = 0;
+ spin_lock_init(&state.idx_lock);
+ atomic_set(&state.started, 0);
+ atomic_set(&state.enough, 0);
+ init_waitqueue_head(&state.wait);
- state->c = c;
- state->d = d;
- state->total_threads = bch_btre_dirty_init_thread_nr();
- state->key_idx = 0;
- spin_lock_init(&state->idx_lock);
- atomic_set(&state->started, 0);
- atomic_set(&state->enough, 0);
- init_waitqueue_head(&state->wait);
-
- for (i = 0; i < state->total_threads; i++) {
- /* Fetch latest state->enough earlier */
+ for (i = 0; i < state.total_threads; i++) {
+ /* Fetch latest state.enough earlier */
smp_mb__before_atomic();
- if (atomic_read(&state->enough))
+ if (atomic_read(&state.enough))
break;
- state->infos[i].state = state;
- atomic_inc(&state->started);
- snprintf(name, sizeof(name), "bch_dirty_init[%d]", i);
-
- state->infos[i].thread =
- kthread_run(bch_dirty_init_thread,
- &state->infos[i],
- name);
- if (IS_ERR(state->infos[i].thread)) {
+ state.infos[i].state = &state;
+ state.infos[i].thread =
+ kthread_run(bch_dirty_init_thread, &state.infos[i],
+ "bch_dirtcnt[%d]", i);
+ if (IS_ERR(state.infos[i].thread)) {
pr_err("fails to run thread bch_dirty_init[%d]\n", i);
for (--i; i >= 0; i--)
- kthread_stop(state->infos[i].thread);
+ kthread_stop(state.infos[i].thread);
goto out;
}
+ atomic_inc(&state.started);
}
- wait_event_interruptible(state->wait,
- atomic_read(&state->started) == 0 ||
- test_bit(CACHE_SET_IO_DISABLE, &c->flags));
-
out:
- kfree(state);
+ /* Must wait for all threads to stop. */
+ wait_event(state.wait, atomic_read(&state.started) == 0);
+ rw_unlock(0, c->root);
}
void bch_cached_dev_writeback_init(struct cached_dev *dc)
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 3f1230e..0f1d969 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -16,7 +16,7 @@
#define BCH_AUTO_GC_DIRTY_THRESHOLD 50
-#define BCH_DIRTY_INIT_THRD_MAX 64
+#define BCH_DIRTY_INIT_THRD_MAX 12
/*
* 14 (16384ths) is chosen here as something that each backing device
* should be a reasonable fraction of the share, and not to blow up
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 2aa4acd..3d975db 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2561,7 +2561,7 @@
static int get_key_size(char **key_string)
{
- return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
+ return (*key_string[0] == ':') ? -EINVAL : (int)(strlen(*key_string) >> 1);
}
#endif /* CONFIG_KEYS */
@@ -3404,6 +3404,11 @@
return DM_MAPIO_SUBMITTED;
}
+static char hex2asc(unsigned char c)
+{
+ return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27);
+}
+
static void crypt_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
{
@@ -3422,9 +3427,12 @@
if (cc->key_size > 0) {
if (cc->key_string)
DMEMIT(":%u:%s", cc->key_size, cc->key_string);
- else
- for (i = 0; i < cc->key_size; i++)
- DMEMIT("%02x", cc->key[i]);
+ else {
+ for (i = 0; i < cc->key_size; i++) {
+ DMEMIT("%c%c", hex2asc(cc->key[i] >> 4),
+ hex2asc(cc->key[i] & 0xf));
+ }
+ }
} else
DMEMIT("-");
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index d9ac737..96bad05 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1396,7 +1396,7 @@
static void stop_worker(struct era *era)
{
atomic_set(&era->suspended, 1);
- flush_workqueue(era->wq);
+ drain_workqueue(era->wq);
}
/*----------------------------------------------------------------
@@ -1566,6 +1566,12 @@
}
stop_worker(era);
+
+ r = metadata_commit(era->md);
+ if (r) {
+ DMERR("%s: metadata_commit failed", __func__);
+ /* FIXME: fail mode */
+ }
}
static int era_preresume(struct dm_target *ti)
diff --git a/drivers/md/dm-historical-service-time.c b/drivers/md/dm-historical-service-time.c
index 186f91e..06fe43c 100644
--- a/drivers/md/dm-historical-service-time.c
+++ b/drivers/md/dm-historical-service-time.c
@@ -429,7 +429,7 @@
{
struct selector *s = ps->context;
struct path_info *pi = NULL, *best = NULL;
- u64 time_now = sched_clock();
+ u64 time_now = ktime_get_ns();
struct dm_path *ret = NULL;
unsigned long flags;
@@ -470,7 +470,7 @@
static u64 path_service_time(struct path_info *pi, u64 start_time)
{
- u64 sched_now = ktime_get_ns();
+ u64 now = ktime_get_ns();
/* if a previous disk request has finished after this IO was
* sent to the hardware, pretend the submission happened
@@ -479,11 +479,11 @@
if (time_after64(pi->last_finish, start_time))
start_time = pi->last_finish;
- pi->last_finish = sched_now;
- if (time_before64(sched_now, start_time))
+ pi->last_finish = now;
+ if (time_before64(now, start_time))
return 0;
- return sched_now - start_time;
+ return now - start_time;
}
static int hst_end_io(struct path_selector *ps, struct dm_path *path,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 4c7da1c..2156a2d 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -254,6 +254,7 @@
struct completion crypto_backoff;
+ bool wrote_to_journal;
bool journal_uptodate;
bool just_formatted;
bool recalculate_flag;
@@ -2256,6 +2257,8 @@
if (!commit_sections)
goto release_flush_bios;
+ ic->wrote_to_journal = true;
+
i = commit_start;
for (n = 0; n < commit_sections; n++) {
for (j = 0; j < ic->journal_section_entries; j++) {
@@ -2354,9 +2357,11 @@
dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
sec &= ~(sector_t)(ic->sectors_per_block - 1);
}
+ if (unlikely(sec >= ic->provided_data_sectors)) {
+ journal_entry_set_unused(je);
+ continue;
+ }
}
- if (unlikely(sec >= ic->provided_data_sectors))
- continue;
get_area_and_offset(ic, sec, &area, &offset);
restore_last_bytes(ic, access_journal_data(ic, i, j), je);
for (k = j + 1; k < ic->journal_section_entries; k++) {
@@ -2468,10 +2473,6 @@
unsigned prev_free_sectors;
- /* the following test is not needed, but it tests the replay code */
- if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
- return;
-
spin_lock_irq(&ic->endio_wait.lock);
write_start = ic->committed_section;
write_sections = ic->n_committed_sections;
@@ -2978,10 +2979,17 @@
drain_workqueue(ic->commit_wq);
if (ic->mode == 'J') {
- if (ic->meta_dev)
- queue_work(ic->writer_wq, &ic->writer_work);
+ queue_work(ic->writer_wq, &ic->writer_work);
drain_workqueue(ic->writer_wq);
dm_integrity_flush_buffers(ic, true);
+ if (ic->wrote_to_journal) {
+ init_journal(ic, ic->free_section,
+ ic->journal_sections - ic->free_section, ic->commit_seq);
+ if (ic->free_section) {
+ init_journal(ic, 0, ic->free_section,
+ next_commit_seq(ic->commit_seq));
+ }
+ }
}
if (ic->mode == 'B') {
@@ -3009,6 +3017,8 @@
DEBUG_print("resume\n");
+ ic->wrote_to_journal = false;
+
if (ic->provided_data_sectors != old_provided_data_sectors) {
if (ic->provided_data_sectors > old_provided_data_sectors &&
ic->mode == 'B' &&
@@ -4230,6 +4240,7 @@
}
if (ic->internal_hash) {
+ size_t recalc_tags_size;
ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
if (!ic->recalc_wq ) {
ti->error = "Cannot allocate workqueue";
@@ -4243,8 +4254,10 @@
r = -ENOMEM;
goto bad;
}
- ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
- ic->tag_size, GFP_KERNEL);
+ recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size;
+ if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
+ recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
+ ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL);
if (!ic->recalc_tags) {
ti->error = "Cannot allocate tags for recalculating";
r = -ENOMEM;
@@ -4322,8 +4335,6 @@
}
if (should_write_sb) {
- int r;
-
init_journal(ic, 0, ic->journal_sections, 0);
r = dm_integrity_failed(ic);
if (unlikely(r)) {
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 1ca65b4..20171c9 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -17,6 +17,7 @@
#include <linux/dm-ioctl.h>
#include <linux/hdreg.h>
#include <linux/compat.h>
+#include <linux/nospec.h>
#include <linux/uaccess.h>
@@ -572,7 +573,7 @@
size_t *needed = needed_param;
*needed += sizeof(struct dm_target_versions);
- *needed += strlen(tt->name);
+ *needed += strlen(tt->name) + 1;
*needed += ALIGN_MASK;
}
@@ -637,7 +638,7 @@
iter_info.old_vers = NULL;
iter_info.vers = vers;
iter_info.flags = 0;
- iter_info.end = (char *)vers+len;
+ iter_info.end = (char *)vers + needed;
/*
* Now loop through filling out the names & versions.
@@ -1696,6 +1697,7 @@
if (unlikely(cmd >= ARRAY_SIZE(_ioctls)))
return NULL;
+ cmd = array_index_nospec(cmd, ARRAY_SIZE(_ioctls));
*ioctl_flags = _ioctls[cmd].flags;
return _ioctls[cmd].fn;
}
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 33e71ea..fe3a947 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -415,8 +415,7 @@
/*
* Work out how many "unsigned long"s we need to hold the bitset.
*/
- bitset_size = dm_round_up(region_count,
- sizeof(*lc->clean_bits) << BYTE_SHIFT);
+ bitset_size = dm_round_up(region_count, BITS_PER_LONG);
bitset_size >>= BYTE_SHIFT;
lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits);
@@ -616,7 +615,7 @@
log_clear_bit(lc, lc->clean_bits, i);
/* clear any old bits -- device has shrunk */
- for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++)
+ for (i = lc->region_count; i % BITS_PER_LONG; i++)
log_clear_bit(lc, lc->clean_bits, i);
/* copy clean across to sync */
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index f5083b4..a2d09c9 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1002,12 +1002,13 @@
static int validate_raid_redundancy(struct raid_set *rs)
{
unsigned int i, rebuild_cnt = 0;
- unsigned int rebuilds_per_group = 0, copies;
+ unsigned int rebuilds_per_group = 0, copies, raid_disks;
unsigned int group_size, last_group_start;
- for (i = 0; i < rs->md.raid_disks; i++)
- if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
- !rs->dev[i].rdev.sb_page)
+ for (i = 0; i < rs->raid_disks; i++)
+ if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) &&
+ ((!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
+ !rs->dev[i].rdev.sb_page)))
rebuild_cnt++;
switch (rs->md.level) {
@@ -1047,8 +1048,9 @@
* A A B B C
* C D D E E
*/
+ raid_disks = min(rs->raid_disks, rs->md.raid_disks);
if (__is_raid10_near(rs->md.new_layout)) {
- for (i = 0; i < rs->md.raid_disks; i++) {
+ for (i = 0; i < raid_disks; i++) {
if (!(i % copies))
rebuilds_per_group = 0;
if ((!rs->dev[i].rdev.sb_page ||
@@ -1071,10 +1073,10 @@
* results in the need to treat the last (potentially larger)
* set differently.
*/
- group_size = (rs->md.raid_disks / copies);
- last_group_start = (rs->md.raid_disks / group_size) - 1;
+ group_size = (raid_disks / copies);
+ last_group_start = (raid_disks / group_size) - 1;
last_group_start *= group_size;
- for (i = 0; i < rs->md.raid_disks; i++) {
+ for (i = 0; i < raid_disks; i++) {
if (!(i % copies) && !(i > last_group_start))
rebuilds_per_group = 0;
if ((!rs->dev[i].rdev.sb_page ||
@@ -1589,7 +1591,7 @@
{
int i;
- for (i = 0; i < rs->md.raid_disks; i++) {
+ for (i = 0; i < rs->raid_disks; i++) {
struct md_rdev *rdev = &rs->dev[i].rdev;
if (!test_bit(Journal, &rdev->flags) &&
@@ -3512,7 +3514,7 @@
{
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
- struct r5conf *conf = mddev->private;
+ struct r5conf *conf = rs_is_raid456(rs) ? mddev->private : NULL;
int i, max_nr_stripes = conf ? conf->max_nr_stripes : 0;
unsigned long recovery;
unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */
@@ -3732,13 +3734,13 @@
unsigned int i;
int r = 0;
- for (i = 0; !r && i < rs->md.raid_disks; i++)
- if (rs->dev[i].data_dev)
- r = fn(ti,
- rs->dev[i].data_dev,
- 0, /* No offset on data devs */
- rs->md.dev_sectors,
- data);
+ for (i = 0; !r && i < rs->raid_disks; i++) {
+ if (rs->dev[i].data_dev) {
+ r = fn(ti, rs->dev[i].data_dev,
+ 0, /* No offset on data devs */
+ rs->md.dev_sectors, data);
+ }
+ }
return r;
}
@@ -3792,7 +3794,7 @@
memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
- for (i = 0; i < mddev->raid_disks; i++) {
+ for (i = 0; i < rs->raid_disks; i++) {
r = &rs->dev[i].rdev;
/* HM FIXME: enhance journal device recovery processing */
if (test_bit(Journal, &r->flags))
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index b1e867f..5f933db 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -397,7 +397,7 @@
}
/* The target has remapped the I/O so dispatch it */
- trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
+ trace_block_rq_remap(clone, disk_devt(dm_disk(md)),
blk_rq_pos(rq));
ret = dm_dispatch_clone_request(clone, rq);
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
@@ -492,8 +492,13 @@
if (unlikely(!ti)) {
int srcu_idx;
- struct dm_table *map = dm_get_live_table(md, &srcu_idx);
+ struct dm_table *map;
+ map = dm_get_live_table(md, &srcu_idx);
+ if (unlikely(!map)) {
+ dm_put_live_table(md, srcu_idx);
+ return BLK_STS_RESOURCE;
+ }
ti = dm_table_find_target(map, 0);
dm_put_live_table(md, srcu_idx);
}
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 35d368c..55443a6 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -224,6 +224,7 @@
atomic_read(&shared->in_flight[READ]),
atomic_read(&shared->in_flight[WRITE]));
}
+ cond_resched();
}
dm_stat_free(&s->rcu_head);
}
@@ -313,6 +314,7 @@
for (ni = 0; ni < n_entries; ni++) {
atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
+ cond_resched();
}
if (s->n_histogram_entries) {
@@ -325,6 +327,7 @@
for (ni = 0; ni < n_entries; ni++) {
s->stat_shared[ni].tmp.histogram = hi;
hi += s->n_histogram_entries + 1;
+ cond_resched();
}
}
@@ -345,6 +348,7 @@
for (ni = 0; ni < n_entries; ni++) {
p[ni].histogram = hi;
hi += s->n_histogram_entries + 1;
+ cond_resched();
}
}
}
@@ -474,6 +478,7 @@
}
DMEMIT("\n");
}
+ cond_resched();
}
mutex_unlock(&stats->mutex);
@@ -750,6 +755,7 @@
local_irq_enable();
}
}
+ cond_resched();
}
}
@@ -865,6 +871,8 @@
if (unlikely(sz + 1 >= maxlen))
goto buffer_overflow;
+
+ cond_resched();
}
if (clear)
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 6ebb212..842d79e 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -2058,10 +2058,13 @@
dm_sm_threshold_fn fn,
void *context)
{
- int r;
+ int r = -EINVAL;
pmd_write_lock_in_core(pmd);
- r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context);
+ if (!pmd->fail_io) {
+ r = dm_sm_register_threshold_callback(pmd->metadata_sm,
+ threshold, fn, context);
+ }
pmd_write_unlock(pmd);
return r;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index fff4c50..a196d7c 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -3401,8 +3401,10 @@
calc_metadata_threshold(pt),
metadata_low_callback,
pool);
- if (r)
+ if (r) {
+ ti->error = "Error registering metadata threshold";
goto out_flags_changed;
+ }
dm_pool_register_pre_commit_callback(pool->pmd,
metadata_pre_commit_callback, pool);
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 808a98e..c801f6b 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -1242,6 +1242,7 @@
static struct target_type verity_target = {
.name = "verity",
+ .features = DM_TARGET_IMMUTABLE,
.version = {1, 7, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 9d6ae3e..13cc318 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -20,7 +20,7 @@
#define HIGH_WATERMARK 50
#define LOW_WATERMARK 45
-#define MAX_WRITEBACK_JOBS 0
+#define MAX_WRITEBACK_JOBS min(0x10000000 / PAGE_SIZE, totalram_pages() / 16)
#define ENDIO_LATENCY 16
#define WRITEBACK_LATENCY 64
#define AUTOCOMMIT_BLOCKS_SSD 65536
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6030cba..1005abf 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -607,18 +607,19 @@
false, 0, &io->stats_aux);
}
-static void end_io_acct(struct dm_io *io)
+static void end_io_acct(struct mapped_device *md, struct bio *bio,
+ unsigned long start_time, struct dm_stats_aux *stats_aux)
{
- struct mapped_device *md = io->md;
- struct bio *bio = io->orig_bio;
- unsigned long duration = jiffies - io->start_time;
-
- bio_end_io_acct(bio, io->start_time);
+ unsigned long duration = jiffies - start_time;
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio->bi_iter.bi_sector, bio_sectors(bio),
- true, duration, &io->stats_aux);
+ true, duration, stats_aux);
+
+ smp_wmb();
+
+ bio_end_io_acct(bio, start_time);
/* nudge anyone waiting on suspend queue */
if (unlikely(wq_has_sleeper(&md->wait)))
@@ -903,6 +904,8 @@
blk_status_t io_error;
struct bio *bio;
struct mapped_device *md = io->md;
+ unsigned long start_time = 0;
+ struct dm_stats_aux stats_aux;
/* Push-back supersedes any I/O errors */
if (unlikely(error)) {
@@ -929,8 +932,10 @@
io_error = io->status;
bio = io->orig_bio;
- end_io_acct(io);
+ start_time = io->start_time;
+ stats_aux = io->stats_aux;
free_io(md, io);
+ end_io_acct(md, bio, start_time, &stats_aux);
if (io_error == BLK_STS_DM_REQUEUE)
return;
@@ -1692,15 +1697,10 @@
struct dm_table *map;
map = dm_get_live_table(md, &srcu_idx);
- if (unlikely(!map)) {
- DMERR_LIMIT("%s: mapping table unavailable, erroring io",
- dm_device_name(md));
- bio_io_error(bio);
- goto out;
- }
- /* If suspended, queue this IO for later */
- if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
+ /* If suspended, or map not yet available, queue this IO for later */
+ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
+ unlikely(!map)) {
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
else if (bio->bi_opf & REQ_RAHEAD)
@@ -2350,6 +2350,8 @@
}
finish_wait(&md->wait, &wait);
+ smp_rmb();
+
return r;
}
@@ -3001,6 +3003,11 @@
goto out;
ti = dm_table_get_target(table, 0);
+ if (dm_suspended_md(md)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
ret = -EINVAL;
if (!ti->type->iterate_devices)
goto out;
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index ea3130e..d377ea0 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -639,14 +639,6 @@
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
write_behind = le32_to_cpu(sb->write_behind);
sectors_reserved = le32_to_cpu(sb->sectors_reserved);
- /* Setup nodes/clustername only if bitmap version is
- * cluster-compatible
- */
- if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
- nodes = le32_to_cpu(sb->nodes);
- strlcpy(bitmap->mddev->bitmap_info.cluster_name,
- sb->cluster_name, 64);
- }
/* verify that the bitmap-specific fields are valid */
if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
@@ -668,6 +660,16 @@
goto out;
}
+ /*
+ * Setup nodes/clustername only if bitmap version is
+ * cluster-compatible
+ */
+ if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
+ nodes = le32_to_cpu(sb->nodes);
+ strlcpy(bitmap->mddev->bitmap_info.cluster_name,
+ sb->cluster_name, 64);
+ }
+
/* keep the array size field of the bitmap superblock up to date */
sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
@@ -700,9 +702,9 @@
out:
kunmap_atomic(sb);
- /* Assigning chunksize is required for "re_read" */
- bitmap->mddev->bitmap_info.chunksize = chunksize;
if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
+ /* Assigning chunksize is required for "re_read" */
+ bitmap->mddev->bitmap_info.chunksize = chunksize;
err = md_setup_cluster(bitmap->mddev, nodes);
if (err) {
pr_warn("%s: Could not setup cluster service (%d)\n",
@@ -713,18 +715,18 @@
goto re_read;
}
-
out_no_sb:
- if (test_bit(BITMAP_STALE, &bitmap->flags))
- bitmap->events_cleared = bitmap->mddev->events;
- bitmap->mddev->bitmap_info.chunksize = chunksize;
- bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
- bitmap->mddev->bitmap_info.max_write_behind = write_behind;
- bitmap->mddev->bitmap_info.nodes = nodes;
- if (bitmap->mddev->bitmap_info.space == 0 ||
- bitmap->mddev->bitmap_info.space > sectors_reserved)
- bitmap->mddev->bitmap_info.space = sectors_reserved;
- if (err) {
+ if (err == 0) {
+ if (test_bit(BITMAP_STALE, &bitmap->flags))
+ bitmap->events_cleared = bitmap->mddev->events;
+ bitmap->mddev->bitmap_info.chunksize = chunksize;
+ bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
+ bitmap->mddev->bitmap_info.max_write_behind = write_behind;
+ bitmap->mddev->bitmap_info.nodes = nodes;
+ if (bitmap->mddev->bitmap_info.space == 0 ||
+ bitmap->mddev->bitmap_info.space > sectors_reserved)
+ bitmap->mddev->bitmap_info.space = sectors_reserved;
+ } else {
md_bitmap_print_sb(bitmap);
if (bitmap->cluster_slot < 0)
md_cluster_stop(bitmap->mddev);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index cc38765..0043dec 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2648,14 +2648,16 @@
static bool does_sb_need_changing(struct mddev *mddev)
{
- struct md_rdev *rdev;
+ struct md_rdev *rdev = NULL, *iter;
struct mdp_superblock_1 *sb;
int role;
/* Find a good rdev */
- rdev_for_each(rdev, mddev)
- if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
+ rdev_for_each(iter, mddev)
+ if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) {
+ rdev = iter;
break;
+ }
/* No good device found. */
if (!rdev)
@@ -6297,6 +6299,7 @@
/* stop the array and free an attached data structures.
* This is called from dm-raid
*/
+ __md_stop_writes(mddev);
__md_stop(mddev);
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
@@ -7968,17 +7971,22 @@
void md_unregister_thread(struct md_thread **threadp)
{
- struct md_thread *thread = *threadp;
- if (!thread)
- return;
- pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
- /* Locking ensures that mddev_unlock does not wake_up a
+ struct md_thread *thread;
+
+ /*
+ * Locking ensures that mddev_unlock does not wake_up a
* non-existent thread
*/
spin_lock(&pers_lock);
+ thread = *threadp;
+ if (!thread) {
+ spin_unlock(&pers_lock);
+ return;
+ }
*threadp = NULL;
spin_unlock(&pers_lock);
+ pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
kthread_stop(thread->tsk);
kfree(thread);
}
@@ -9417,6 +9425,7 @@
wake_up(&resync_wait);
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev);
if (mddev->event_work.func)
@@ -9728,16 +9737,18 @@
void md_reload_sb(struct mddev *mddev, int nr)
{
- struct md_rdev *rdev;
+ struct md_rdev *rdev = NULL, *iter;
int err;
/* Find the rdev */
- rdev_for_each_rcu(rdev, mddev) {
- if (rdev->desc_nr == nr)
+ rdev_for_each_rcu(iter, mddev) {
+ if (iter->desc_nr == nr) {
+ rdev = iter;
break;
+ }
}
- if (!rdev || rdev->desc_nr != nr) {
+ if (!rdev) {
pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
return;
}
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 35843df..a20332e 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -48,7 +48,7 @@
int len = 0;
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
- len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
+ len += scnprintf(line+len, 200-len, "%s%s", k?"/":"",
bdevname(conf->devlist[j*raid_disks
+ k]->bdev, b));
pr_debug("md: zone%d=[%s]\n", j, line);
@@ -128,21 +128,6 @@
pr_debug("md/raid0:%s: FINAL %d zones\n",
mdname(mddev), conf->nr_strip_zones);
- if (conf->nr_strip_zones == 1) {
- conf->layout = RAID0_ORIG_LAYOUT;
- } else if (mddev->layout == RAID0_ORIG_LAYOUT ||
- mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
- conf->layout = mddev->layout;
- } else if (default_layout == RAID0_ORIG_LAYOUT ||
- default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
- conf->layout = default_layout;
- } else {
- pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
- mdname(mddev));
- pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
- err = -ENOTSUPP;
- goto abort;
- }
/*
* now since we have the hard sector sizes, we can make sure
* chunk size is a multiple of that sector size
@@ -273,6 +258,22 @@
(unsigned long long)smallest->sectors);
}
+ if (conf->nr_strip_zones == 1 || conf->strip_zone[1].nb_dev == 1) {
+ conf->layout = RAID0_ORIG_LAYOUT;
+ } else if (mddev->layout == RAID0_ORIG_LAYOUT ||
+ mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
+ conf->layout = mddev->layout;
+ } else if (default_layout == RAID0_ORIG_LAYOUT ||
+ default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
+ conf->layout = default_layout;
+ } else {
+ pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
+ mdname(mddev));
+ pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
+ err = -EOPNOTSUPP;
+ goto abort;
+ }
+
pr_debug("md/raid0:%s: done.\n", mdname(mddev));
*private_conf = conf;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 70dccc3..0e741a8 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1809,9 +1809,12 @@
int err = 0;
int number = rdev->raid_disk;
struct md_rdev **rdevp;
- struct raid10_info *p = conf->mirrors + number;
+ struct raid10_info *p;
print_conf(conf);
+ if (unlikely(number >= mddev->raid_disks))
+ return 0;
+ p = conf->mirrors + number;
if (rdev == p->rdev)
rdevp = &p->rdev;
else if (rdev == p->replacement)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index c82953a..9f114b9 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -36,6 +36,7 @@
*/
#include <linux/blkdev.h>
+#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
@@ -686,17 +687,17 @@
return degraded;
}
-static int has_failed(struct r5conf *conf)
+static bool has_failed(struct r5conf *conf)
{
- int degraded;
+ int degraded = conf->mddev->degraded;
- if (conf->mddev->reshape_position == MaxSector)
- return conf->mddev->degraded > conf->max_degraded;
+ if (test_bit(MD_BROKEN, &conf->mddev->flags))
+ return true;
- degraded = raid5_calc_degraded(conf);
- if (degraded > conf->max_degraded)
- return 1;
- return 0;
+ if (conf->mddev->reshape_position != MaxSector)
+ degraded = raid5_calc_degraded(conf);
+
+ return degraded > conf->max_degraded;
}
struct stripe_head *
@@ -2864,10 +2865,10 @@
if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
- raid5_release_stripe(sh);
if (sh->batch_head && sh != sh->batch_head)
raid5_release_stripe(sh->batch_head);
+ raid5_release_stripe(sh);
}
static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
@@ -2877,34 +2878,31 @@
unsigned long flags;
pr_debug("raid456: error called\n");
+ pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n",
+ mdname(mddev), bdevname(rdev->bdev, b));
+
spin_lock_irqsave(&conf->device_lock, flags);
-
- if (test_bit(In_sync, &rdev->flags) &&
- mddev->degraded == conf->max_degraded) {
- /*
- * Don't allow to achieve failed state
- * Don't try to recover this device
- */
- conf->recovery_disabled = mddev->recovery_disabled;
- spin_unlock_irqrestore(&conf->device_lock, flags);
- return;
- }
-
set_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
mddev->degraded = raid5_calc_degraded(conf);
+
+ if (has_failed(conf)) {
+ set_bit(MD_BROKEN, &conf->mddev->flags);
+ conf->recovery_disabled = mddev->recovery_disabled;
+
+ pr_crit("md/raid:%s: Cannot continue operation (%d/%d failed).\n",
+ mdname(mddev), mddev->degraded, conf->raid_disks);
+ } else {
+ pr_crit("md/raid:%s: Operation continuing on %d devices.\n",
+ mdname(mddev), conf->raid_disks - mddev->degraded);
+ }
+
spin_unlock_irqrestore(&conf->device_lock, flags);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(Blocked, &rdev->flags);
set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
- pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
- "md/raid:%s: Operation continuing on %d devices.\n",
- mdname(mddev),
- bdevname(rdev->bdev, b),
- mdname(mddev),
- conf->raid_disks - mddev->degraded);
r5c_update_on_rdev_error(mddev, rdev);
}
@@ -3939,7 +3937,7 @@
* back cache (prexor with orig_page, and then xor with
* page) in the read path
*/
- if (s->injournal && s->failed) {
+ if (s->to_read && s->injournal && s->failed) {
if (test_bit(STRIPE_R5C_CACHING, &sh->state))
r5c_make_stripe_write_out(sh);
goto out;
@@ -6522,7 +6520,18 @@
spin_unlock_irq(&conf->device_lock);
md_check_recovery(mddev);
spin_lock_irq(&conf->device_lock);
+
+ /*
+ * Waiting on MD_SB_CHANGE_PENDING below may deadlock
+ * seeing md_check_recovery() is needed to clear
+ * the flag when using mdmon.
+ */
+ continue;
}
+
+ wait_event_lock_irq(mddev->sb_wait,
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags),
+ conf->device_lock);
}
pr_debug("%d stripes handled\n", handled);
@@ -8007,6 +8016,7 @@
*/
if (rdev->saved_raid_disk >= 0 &&
rdev->saved_raid_disk >= first &&
+ rdev->saved_raid_disk <= last &&
conf->disks[rdev->saved_raid_disk].rdev == NULL)
first = rdev->saved_raid_disk;
diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
index 2e5698f..e23aa60 100644
--- a/drivers/media/cec/core/cec-adap.c
+++ b/drivers/media/cec/core/cec-adap.c
@@ -1271,7 +1271,7 @@
* While trying to poll the physical address was reset
* and the adapter was unconfigured, so bail out.
*/
- if (!adap->is_configuring)
+ if (adap->phys_addr == CEC_PHYS_ADDR_INVALID)
return -EINTR;
if (err)
@@ -1328,7 +1328,6 @@
adap->phys_addr != CEC_PHYS_ADDR_INVALID)
WARN_ON(adap->ops->adap_log_addr(adap, CEC_LOG_ADDR_INVALID));
adap->log_addrs.log_addr_mask = 0;
- adap->is_configuring = false;
adap->is_configured = false;
cec_flush(adap);
wake_up_interruptible(&adap->kthread_waitq);
@@ -1520,9 +1519,10 @@
for (i = 0; i < las->num_log_addrs; i++)
las->log_addr[i] = CEC_LOG_ADDR_INVALID;
cec_adap_unconfigure(adap);
+ adap->is_configuring = false;
adap->kthread_config = NULL;
- mutex_unlock(&adap->lock);
complete(&adap->config_completion);
+ mutex_unlock(&adap->lock);
return 0;
}
diff --git a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
index 2d95e16..f66699d 100644
--- a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
+++ b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
@@ -44,6 +44,8 @@
uint8_t *cec_message = cros_ec->event_data.data.cec_message;
unsigned int len = cros_ec->event_size;
+ if (len > CEC_MAX_MSG_SIZE)
+ len = CEC_MAX_MSG_SIZE;
cros_ec_cec->rx_msg.len = len;
memcpy(cros_ec_cec->rx_msg.msg, cec_message, len);
diff --git a/drivers/media/cec/platform/s5p/s5p_cec.c b/drivers/media/cec/platform/s5p/s5p_cec.c
index 028a09a..102f1af 100644
--- a/drivers/media/cec/platform/s5p/s5p_cec.c
+++ b/drivers/media/cec/platform/s5p/s5p_cec.c
@@ -115,6 +115,8 @@
dev_dbg(cec->dev, "Buffer overrun (worker did not process previous message)\n");
cec->rx = STATE_BUSY;
cec->msg.len = status >> 24;
+ if (cec->msg.len > CEC_MAX_MSG_SIZE)
+ cec->msg.len = CEC_MAX_MSG_SIZE;
cec->msg.rx_status = CEC_RX_STATUS_OK;
s5p_cec_get_rx_buf(cec, cec->msg.len,
cec->msg.msg);
diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c
index 6974f17..1331f2c 100644
--- a/drivers/media/dvb-core/dvb_vb2.c
+++ b/drivers/media/dvb-core/dvb_vb2.c
@@ -358,6 +358,12 @@
int dvb_vb2_querybuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
{
+ struct vb2_queue *q = &ctx->vb_q;
+
+ if (b->index >= q->num_buffers) {
+ dprintk(1, "[%s] buffer index out of range\n", ctx->name);
+ return -EINVAL;
+ }
vb2_core_querybuf(&ctx->vb_q, b->index, b);
dprintk(3, "[%s] index=%d\n", ctx->name, b->index);
return 0;
@@ -382,8 +388,13 @@
int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
{
+ struct vb2_queue *q = &ctx->vb_q;
int ret;
+ if (b->index >= q->num_buffers) {
+ dprintk(1, "[%s] buffer index out of range\n", ctx->name);
+ return -EINVAL;
+ }
ret = vb2_core_qbuf(&ctx->vb_q, b->index, b, NULL);
if (ret) {
dprintk(1, "[%s] index=%d errno=%d\n", ctx->name,
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index a57470b..2134e25 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -6672,7 +6672,7 @@
static int drxk_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
struct drxk_state *state = fe->demodulator_priv;
- u16 err;
+ u16 err = 0;
dprintk(1, "\n");
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index ab7883c..9f5713b 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -555,7 +555,7 @@
buffer[3] = 0;
buffer[3] = hdmi_infoframe_checksum(buffer, len + 4);
- if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
+ if (hdmi_infoframe_unpack(&frame, buffer, len + 4) < 0) {
v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc);
return;
}
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index d1f5879..8cf1704 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -2454,7 +2454,7 @@
buffer[i + 3] = infoframe_read(sd,
adv76xx_cri[index].payload_addr + i);
- if (hdmi_infoframe_unpack(frame, buffer, sizeof(buffer)) < 0) {
+ if (hdmi_infoframe_unpack(frame, buffer, len + 3) < 0) {
v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__,
adv76xx_cri[index].desc);
return -ENOENT;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index f7d2b6c..a870117 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -2574,7 +2574,7 @@
for (i = 0; i < len; i++)
buffer[i + 3] = infoframe_read(sd, cri->payload_addr + i);
- if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
+ if (hdmi_infoframe_unpack(&frame, buffer, len + 3) < 0) {
v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc);
return;
}
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index b42b289..154776d 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -2000,7 +2000,6 @@
v4l2_async_unregister_subdev(sd);
v4l2_ctrl_handler_free(&info->hdl);
media_entity_cleanup(&info->sd.entity);
- ov7670_power_off(sd);
return 0;
}
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 35a51e9..1f0e4b9 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -3898,7 +3898,7 @@
/* video */
vdev_init(btv, &btv->video_dev, &bttv_video_template, "video");
- btv->video_dev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
+ btv->video_dev.device_caps = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
if (btv->tuner_type != TUNER_ABSENT)
btv->video_dev.device_caps |= V4L2_CAP_TUNER;
@@ -3919,7 +3919,7 @@
/* vbi */
vdev_init(btv, &btv->vbi_dev, &bttv_video_template, "vbi");
btv->vbi_dev.device_caps = V4L2_CAP_VBI_CAPTURE | V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING | V4L2_CAP_TUNER;
+ V4L2_CAP_STREAMING;
if (btv->tuner_type != TUNER_ABSENT)
btv->vbi_dev.device_caps |= V4L2_CAP_TUNER;
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 4e8132d..a23c025 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -2154,7 +2154,7 @@
err = pci_set_dma_mask(pci_dev, 0xffffffff);
if (err) {
pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
- goto fail_ctrl;
+ goto fail_dma_set_mask;
}
err = request_irq(pci_dev->irq, cx23885_irq,
@@ -2162,7 +2162,7 @@
if (err < 0) {
pr_err("%s: can't get IRQ %d\n",
dev->name, pci_dev->irq);
- goto fail_irq;
+ goto fail_dma_set_mask;
}
switch (dev->board) {
@@ -2184,7 +2184,7 @@
return 0;
-fail_irq:
+fail_dma_set_mask:
cx23885_dev_unregister(dev);
fail_ctrl:
v4l2_ctrl_handler_free(hdl);
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index 285047b..a3d4528 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -1340,11 +1340,11 @@
struct cx25821_dev *dev = get_cx25821(v4l2_dev);
cx25821_shutdown(dev);
- pci_disable_device(pci_dev);
/* unregister stuff */
if (pci_dev->irq)
free_irq(pci_dev->irq, dev);
+ pci_disable_device(pci_dev);
cx25821_dev_unregister(dev);
v4l2_device_unregister(v4l2_dev);
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index a57c991..10d2971 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -162,6 +162,9 @@
cx_write(MO_TS_GPCNTRL, GP_COUNT_CONTROL_RESET);
q->count = 0;
+ /* clear interrupt status register */
+ cx_write(MO_TS_INTSTAT, 0x1f1111);
+
/* enable irqs */
dprintk(1, "setting the interrupt mask\n");
cx_set(MO_PCI_INTMSK, core->pci_irqmask | PCI_INT_TSINT);
diff --git a/drivers/media/pci/cx88/cx88-vbi.c b/drivers/media/pci/cx88/cx88-vbi.c
index 58489ea..7cf2271 100644
--- a/drivers/media/pci/cx88/cx88-vbi.c
+++ b/drivers/media/pci/cx88/cx88-vbi.c
@@ -144,11 +144,10 @@
return -EINVAL;
vb2_set_plane_payload(vb, 0, size);
- cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl,
- 0, VBI_LINE_LENGTH * lines,
- VBI_LINE_LENGTH, 0,
- lines);
- return 0;
+ return cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl,
+ 0, VBI_LINE_LENGTH * lines,
+ VBI_LINE_LENGTH, 0,
+ lines);
}
static void buffer_finish(struct vb2_buffer *vb)
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index 8cffdac..e5adffa 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -431,6 +431,7 @@
static int buffer_prepare(struct vb2_buffer *vb)
{
+ int ret;
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
struct cx88_core *core = dev->core;
@@ -445,35 +446,35 @@
switch (core->field) {
case V4L2_FIELD_TOP:
- cx88_risc_buffer(dev->pci, &buf->risc,
- sgt->sgl, 0, UNSET,
- buf->bpl, 0, core->height);
+ ret = cx88_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl, 0, UNSET,
+ buf->bpl, 0, core->height);
break;
case V4L2_FIELD_BOTTOM:
- cx88_risc_buffer(dev->pci, &buf->risc,
- sgt->sgl, UNSET, 0,
- buf->bpl, 0, core->height);
+ ret = cx88_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl, UNSET, 0,
+ buf->bpl, 0, core->height);
break;
case V4L2_FIELD_SEQ_TB:
- cx88_risc_buffer(dev->pci, &buf->risc,
- sgt->sgl,
- 0, buf->bpl * (core->height >> 1),
- buf->bpl, 0,
- core->height >> 1);
+ ret = cx88_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl,
+ 0, buf->bpl * (core->height >> 1),
+ buf->bpl, 0,
+ core->height >> 1);
break;
case V4L2_FIELD_SEQ_BT:
- cx88_risc_buffer(dev->pci, &buf->risc,
- sgt->sgl,
- buf->bpl * (core->height >> 1), 0,
- buf->bpl, 0,
- core->height >> 1);
+ ret = cx88_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl,
+ buf->bpl * (core->height >> 1), 0,
+ buf->bpl, 0,
+ core->height >> 1);
break;
case V4L2_FIELD_INTERLACED:
default:
- cx88_risc_buffer(dev->pci, &buf->risc,
- sgt->sgl, 0, buf->bpl,
- buf->bpl, buf->bpl,
- core->height >> 1);
+ ret = cx88_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl, 0, buf->bpl,
+ buf->bpl, buf->bpl,
+ core->height >> 1);
break;
}
dprintk(2,
@@ -481,7 +482,7 @@
buf, buf->vb.vb2_buf.index, __func__,
core->width, core->height, dev->fmt->depth, dev->fmt->fourcc,
(unsigned long)buf->risc.dma);
- return 0;
+ return ret;
}
static void buffer_finish(struct vb2_buffer *vb)
diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h
index e5efe52..00caf60 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.h
+++ b/drivers/media/pci/ivtv/ivtv-driver.h
@@ -332,7 +332,6 @@
struct ivtv *itv; /* for ease of use */
const char *name; /* name of the stream */
int type; /* stream type */
- u32 caps; /* V4L2 capabilities */
struct v4l2_fh *fh; /* pointer to the streaming filehandle */
spinlock_t qlock; /* locks access to the queues */
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index 35dccb3..a9d69b2 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -443,7 +443,7 @@
struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
struct v4l2_window *winfmt = &fmt->fmt.win;
- if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
+ if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
return -EINVAL;
if (!itv->osd_video_pbase)
return -EINVAL;
@@ -554,7 +554,7 @@
u32 chromakey = fmt->fmt.win.chromakey;
u8 global_alpha = fmt->fmt.win.global_alpha;
- if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
+ if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
return -EINVAL;
if (!itv->osd_video_pbase)
return -EINVAL;
@@ -1388,7 +1388,7 @@
0,
};
- if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
+ if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
return -ENOTTY;
if (!itv->osd_video_pbase)
return -ENOTTY;
@@ -1455,7 +1455,7 @@
struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
struct yuv_playback_info *yi = &itv->yuv_info;
- if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
+ if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
return -ENOTTY;
if (!itv->osd_video_pbase)
return -ENOTTY;
@@ -1475,7 +1475,7 @@
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
- if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
+ if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
return -ENOTTY;
if (!itv->osd_video_pbase)
return -ENOTTY;
diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
index f04ee84..f9de5d1 100644
--- a/drivers/media/pci/ivtv/ivtv-streams.c
+++ b/drivers/media/pci/ivtv/ivtv-streams.c
@@ -176,7 +176,7 @@
s->itv = itv;
s->type = type;
s->name = ivtv_stream_info[type].name;
- s->caps = ivtv_stream_info[type].v4l2_caps;
+ s->vdev.device_caps = ivtv_stream_info[type].v4l2_caps;
if (ivtv_stream_info[type].pio)
s->dma = PCI_DMA_NONE;
@@ -299,12 +299,9 @@
if (s_mpg->vdev.v4l2_dev)
num = s_mpg->vdev.num + ivtv_stream_info[type].num_offset;
}
- s->vdev.device_caps = s->caps;
- if (itv->osd_video_pbase) {
- itv->streams[IVTV_DEC_STREAM_TYPE_YUV].vdev.device_caps |=
- V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
- itv->streams[IVTV_DEC_STREAM_TYPE_MPG].vdev.device_caps |=
- V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
+ if (itv->osd_video_pbase && (type == IVTV_DEC_STREAM_TYPE_YUV ||
+ type == IVTV_DEC_STREAM_TYPE_MPG)) {
+ s->vdev.device_caps |= V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
itv->v4l2_cap |= V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
}
video_set_drvdata(&s->vdev, s);
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index 7a1fb06..d3cde05 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -1214,16 +1214,14 @@
static int saa7134_alsa_init(void)
{
- struct saa7134_dev *dev = NULL;
- struct list_head *list;
+ struct saa7134_dev *dev;
saa7134_dmasound_init = alsa_device_init;
saa7134_dmasound_exit = alsa_device_exit;
pr_info("saa7134 ALSA driver for DMA sound loaded\n");
- list_for_each(list,&saa7134_devlist) {
- dev = list_entry(list, struct saa7134_dev, devlist);
+ list_for_each_entry(dev, &saa7134_devlist, devlist) {
if (dev->pci->device == PCI_DEVICE_ID_PHILIPS_SAA7130)
pr_info("%s/alsa: %s doesn't support digital audio\n",
dev->name, saa7134_boards[dev->board].name);
@@ -1231,7 +1229,7 @@
alsa_device_init(dev);
}
- if (dev == NULL)
+ if (list_empty(&saa7134_devlist))
pr_info("saa7134 ALSA: no saa7134 cards found\n");
return 0;
diff --git a/drivers/media/pci/tw686x/tw686x-core.c b/drivers/media/pci/tw686x/tw686x-core.c
index 74ae4f0..8a25a0d 100644
--- a/drivers/media/pci/tw686x/tw686x-core.c
+++ b/drivers/media/pci/tw686x/tw686x-core.c
@@ -315,13 +315,6 @@
spin_lock_init(&dev->lock);
- err = request_irq(pci_dev->irq, tw686x_irq, IRQF_SHARED,
- dev->name, dev);
- if (err < 0) {
- dev_err(&pci_dev->dev, "unable to request interrupt\n");
- goto iounmap;
- }
-
timer_setup(&dev->dma_delay_timer, tw686x_dma_delay, 0);
/*
@@ -333,18 +326,23 @@
err = tw686x_video_init(dev);
if (err) {
dev_err(&pci_dev->dev, "can't register video\n");
- goto free_irq;
+ goto iounmap;
}
err = tw686x_audio_init(dev);
if (err)
dev_warn(&pci_dev->dev, "can't register audio\n");
+ err = request_irq(pci_dev->irq, tw686x_irq, IRQF_SHARED,
+ dev->name, dev);
+ if (err < 0) {
+ dev_err(&pci_dev->dev, "unable to request interrupt\n");
+ goto iounmap;
+ }
+
pci_set_drvdata(pci_dev, dev);
return 0;
-free_irq:
- free_irq(pci_dev->irq, dev);
iounmap:
pci_iounmap(pci_dev, dev->mmio);
free_region:
diff --git a/drivers/media/pci/tw686x/tw686x-video.c b/drivers/media/pci/tw686x/tw686x-video.c
index 1ced2b0..55ed885 100644
--- a/drivers/media/pci/tw686x/tw686x-video.c
+++ b/drivers/media/pci/tw686x/tw686x-video.c
@@ -1283,8 +1283,10 @@
video_set_drvdata(vdev, vc);
err = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
- if (err < 0)
+ if (err < 0) {
+ video_device_release(vdev);
goto error;
+ }
vc->num = vdev->num;
}
diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
index debc750..9d91243 100644
--- a/drivers/media/platform/aspeed-video.c
+++ b/drivers/media/platform/aspeed-video.c
@@ -151,7 +151,7 @@
#define VE_SRC_TB_EDGE_DET_BOT GENMASK(28, VE_SRC_TB_EDGE_DET_BOT_SHF)
#define VE_MODE_DETECT_STATUS 0x098
-#define VE_MODE_DETECT_H_PIXELS GENMASK(11, 0)
+#define VE_MODE_DETECT_H_PERIOD GENMASK(11, 0)
#define VE_MODE_DETECT_V_LINES_SHF 16
#define VE_MODE_DETECT_V_LINES GENMASK(27, VE_MODE_DETECT_V_LINES_SHF)
#define VE_MODE_DETECT_STATUS_VSYNC BIT(28)
@@ -162,6 +162,8 @@
#define VE_SYNC_STATUS_VSYNC_SHF 16
#define VE_SYNC_STATUS_VSYNC GENMASK(27, VE_SYNC_STATUS_VSYNC_SHF)
+#define VE_H_TOTAL_PIXELS 0x0A0
+
#define VE_INTERRUPT_CTRL 0x304
#define VE_INTERRUPT_STATUS 0x308
#define VE_INTERRUPT_MODE_DETECT_WD BIT(0)
@@ -765,6 +767,7 @@
u32 src_lr_edge;
u32 src_tb_edge;
u32 sync;
+ u32 htotal;
struct v4l2_bt_timings *det = &video->detected_timings;
det->width = MIN_WIDTH;
@@ -809,6 +812,7 @@
src_tb_edge = aspeed_video_read(video, VE_SRC_TB_EDGE_DET);
mds = aspeed_video_read(video, VE_MODE_DETECT_STATUS);
sync = aspeed_video_read(video, VE_SYNC_STATUS);
+ htotal = aspeed_video_read(video, VE_H_TOTAL_PIXELS);
video->frame_bottom = (src_tb_edge & VE_SRC_TB_EDGE_DET_BOT) >>
VE_SRC_TB_EDGE_DET_BOT_SHF;
@@ -825,8 +829,7 @@
VE_SRC_LR_EDGE_DET_RT_SHF;
video->frame_left = src_lr_edge & VE_SRC_LR_EDGE_DET_LEFT;
det->hfrontporch = video->frame_left;
- det->hbackporch = (mds & VE_MODE_DETECT_H_PIXELS) -
- video->frame_right;
+ det->hbackporch = htotal - video->frame_right;
det->hsync = sync & VE_SYNC_STATUS_HSYNC;
if (video->frame_left > video->frame_right)
continue;
@@ -1720,6 +1723,7 @@
rc = aspeed_video_setup_video(video);
if (rc) {
+ aspeed_video_free_buf(video, &video->jpeg);
clk_unprepare(video->vclk);
clk_unprepare(video->eclk);
return rc;
@@ -1745,8 +1749,7 @@
v4l2_device_unregister(v4l2_dev);
- dma_free_coherent(video->dev, VE_JPEG_HEADER_SIZE, video->jpeg.virt,
- video->jpeg.dma);
+ aspeed_video_free_buf(video, &video->jpeg);
of_reserved_mem_device_release(dev);
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 1eed69d..14d4830 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -408,6 +408,7 @@
if (!vdoa_data)
vdoa_data = ERR_PTR(-EPROBE_DEFER);
+ put_device(&vdoa_pdev->dev);
out:
of_node_put(vdoa_node);
@@ -1317,7 +1318,8 @@
struct v4l2_frmivalenum *f)
{
struct coda_ctx *ctx = fh_to_ctx(fh);
- int i;
+ struct coda_q_data *q_data;
+ const struct coda_codec *codec;
if (f->index)
return -EINVAL;
@@ -1326,12 +1328,19 @@
if (!ctx->vdoa && f->pixel_format == V4L2_PIX_FMT_YUYV)
return -EINVAL;
- for (i = 0; i < CODA_MAX_FORMATS; i++) {
- if (f->pixel_format == ctx->cvd->src_formats[i] ||
- f->pixel_format == ctx->cvd->dst_formats[i])
- break;
+ if (coda_format_normalize_yuv(f->pixel_format) == V4L2_PIX_FMT_YUV420) {
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ codec = coda_find_codec(ctx->dev, f->pixel_format,
+ q_data->fourcc);
+ } else {
+ codec = coda_find_codec(ctx->dev, V4L2_PIX_FMT_YUV420,
+ f->pixel_format);
}
- if (i == CODA_MAX_FORMATS)
+ if (!codec)
+ return -EINVAL;
+
+ if (f->width < MIN_W || f->width > codec->max_w ||
+ f->height < MIN_H || f->height > codec->max_h)
return -EINVAL;
f->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
@@ -2334,8 +2343,8 @@
V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET, -12, 12, 1, 0);
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_PROFILE,
- V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, 0x0,
- V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE);
+ V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE, 0x0,
+ V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE);
if (ctx->dev->devtype->product == CODA_HX4 ||
ctx->dev->devtype->product == CODA_7541) {
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
@@ -2349,12 +2358,15 @@
if (ctx->dev->devtype->product == CODA_960) {
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_LEVEL,
- V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
- ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_2,
+ ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
(1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
(1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
(1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0)),
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_2)),
V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
}
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
@@ -2416,7 +2428,7 @@
ctx->h264_profile_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls,
&coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE,
V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
- ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
(1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
(1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)),
V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c
index 5e67994..ee610da 100644
--- a/drivers/media/platform/davinci/vpif.c
+++ b/drivers/media/platform/davinci/vpif.c
@@ -428,6 +428,7 @@
static struct resource *res, *res_irq;
struct platform_device *pdev_capture, *pdev_display;
struct device_node *endpoint = NULL;
+ int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
vpif_base = devm_ioremap_resource(&pdev->dev, res);
@@ -458,8 +459,8 @@
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res_irq) {
dev_warn(&pdev->dev, "Missing IRQ resource.\n");
- pm_runtime_put(&pdev->dev);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_rpm;
}
pdev_capture = devm_kzalloc(&pdev->dev, sizeof(*pdev_capture),
@@ -493,10 +494,17 @@
}
return 0;
+
+err_put_rpm:
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
}
static int vpif_remove(struct platform_device *pdev)
{
+ pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index d26fa59..b52d220 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -140,7 +140,7 @@
dev_err(&is->pdev->dev, "clock %s enable failed\n",
fimc_is_clocks[i]);
for (--i; i >= 0; i--)
- clk_disable(is->clocks[i]);
+ clk_disable_unprepare(is->clocks[i]);
return ret;
}
pr_debug("enabled clock: %s\n", fimc_is_clocks[i]);
@@ -213,6 +213,7 @@
if (ret < 0 || index >= FIMC_IS_SENSORS_NUM) {
of_node_put(child);
+ of_node_put(i2c_bus);
return ret;
}
index++;
@@ -830,7 +831,7 @@
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
- goto err_irq;
+ goto err_pm_disable;
vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
@@ -864,6 +865,8 @@
pm_runtime_put_noidle(dev);
if (!pm_runtime_enabled(dev))
fimc_is_runtime_suspend(dev);
+err_pm_disable:
+ pm_runtime_disable(dev);
err_irq:
free_irq(is->irq, is);
err_clk:
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.h b/drivers/media/platform/exynos4-is/fimc-isp-video.h
index edcb3a5..2dd4ddb 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.h
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.h
@@ -32,7 +32,7 @@
return 0;
}
-void fimc_isp_video_device_unregister(struct fimc_isp *isp,
+static inline void fimc_isp_video_device_unregister(struct fimc_isp *isp,
enum v4l2_buf_type type)
{
}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h b/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h
index 2cb8cec..b810c96 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h
@@ -40,12 +40,14 @@
* @ipi_id : IPI_MDP
* @ap_inst : AP mtk_mdp_vpu address
* @vpu_inst_addr : VPU MDP instance address
+ * @padding : Alignment padding
*/
struct mdp_ipi_comm {
uint32_t msg_id;
uint32_t ipi_id;
uint64_t ap_inst;
uint32_t vpu_inst_addr;
+ uint32_t padding;
};
/**
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c
index cd27f63..cfc7ebe 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c
@@ -102,6 +102,8 @@
vpu_wdt_reg_handler(fw_pdev, mtk_vcodec_vpu_reset_handler, dev, rst_id);
fw = devm_kzalloc(&dev->plat_dev->dev, sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return ERR_PTR(-ENOMEM);
fw->type = VPU;
fw->ops = &mtk_vcodec_vpu_msg;
fw->pdev = fw_pdev;
diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
index a59022a..966b4d9 100644
--- a/drivers/media/platform/qcom/venus/hfi.c
+++ b/drivers/media/platform/qcom/venus/hfi.c
@@ -104,6 +104,9 @@
mutex_lock(&core->lock);
}
+ if (!core->ops)
+ goto unlock;
+
ret = core->ops->core_deinit(core);
if (!ret)
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index ea13170..de34a87 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -158,6 +158,8 @@
else
return NULL;
fmt = find_format(inst, pixmp->pixelformat, f->type);
+ if (!fmt)
+ return NULL;
}
pixmp->width = clamp(pixmp->width, frame_width_min(inst),
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index 6759091..e324634 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -868,7 +868,7 @@
ret = pm_runtime_resume_and_get(rga->dev);
if (ret < 0)
- goto rel_vdev;
+ goto rel_m2m;
rga->version.major = (rga_read(rga, RGA_VERSION_INFO) >> 24) & 0xFF;
rga->version.minor = (rga_read(rga, RGA_VERSION_INFO) >> 20) & 0x0F;
@@ -884,7 +884,7 @@
DMA_ATTR_WRITE_COMBINE);
if (!rga->cmdbuf_virt) {
ret = -ENOMEM;
- goto rel_vdev;
+ goto rel_m2m;
}
rga->src_mmu_pages =
@@ -895,7 +895,7 @@
}
rga->dst_mmu_pages =
(unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
- if (rga->dst_mmu_pages) {
+ if (!rga->dst_mmu_pages) {
ret = -ENOMEM;
goto free_src_pages;
}
@@ -921,6 +921,8 @@
free_dma:
dma_free_attrs(rga->dev, RGA_CMDBUF_SIZE, rga->cmdbuf_virt,
rga->cmdbuf_phy, DMA_ATTR_WRITE_COMBINE);
+rel_m2m:
+ v4l2_m2m_release(rga->m2m_dev);
rel_vdev:
video_device_release(vfd);
unreg_v4l2_dev:
diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c
index c691b3d..5da49a0 100644
--- a/drivers/media/platform/sti/delta/delta-v4l2.c
+++ b/drivers/media/platform/sti/delta/delta-v4l2.c
@@ -1862,7 +1862,7 @@
if (ret) {
dev_err(delta->dev, "%s failed to initialize firmware ipc channel\n",
DELTA_PREFIX);
- goto err;
+ goto err_pm_disable;
}
/* register all available decoders */
@@ -1876,7 +1876,7 @@
if (ret) {
dev_err(delta->dev, "%s failed to register V4L2 device\n",
DELTA_PREFIX);
- goto err;
+ goto err_pm_disable;
}
delta->work_queue = create_workqueue(DELTA_NAME);
@@ -1901,6 +1901,8 @@
destroy_workqueue(delta->work_queue);
err_v4l2:
v4l2_device_unregister(&delta->v4l2_dev);
+err_pm_disable:
+ pm_runtime_disable(dev);
err:
return ret;
}
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
index 85587c1..75083cb 100644
--- a/drivers/media/platform/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -291,11 +291,11 @@
+ crop.left * fmtinfo->bpp[0] / 8;
if (format->num_planes > 1) {
+ unsigned int bpl = format->plane_fmt[1].bytesperline;
unsigned int offset;
- offset = crop.top * format->plane_fmt[1].bytesperline
- + crop.left / fmtinfo->hsub
- * fmtinfo->bpp[1] / 8;
+ offset = crop.top / fmtinfo->vsub * bpl
+ + crop.left / fmtinfo->hsub * fmtinfo->bpp[1] / 8;
mem.addr[1] += offset;
mem.addr[2] += offset;
}
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index cc2856e..f2b0c49 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -472,7 +472,7 @@
{
struct device_node *ports;
struct device_node *port;
- int ret;
+ int ret = 0;
ports = of_get_child_by_name(xdev->dev->of_node, "ports");
if (ports == NULL) {
@@ -482,13 +482,14 @@
for_each_child_of_node(ports, port) {
ret = xvip_graph_dma_init_one(xdev, port);
- if (ret < 0) {
+ if (ret) {
of_node_put(port);
- return ret;
+ break;
}
}
- return 0;
+ of_node_put(ports);
+ return ret;
}
static void xvip_graph_cleanup(struct xvip_composite_device *xdev)
diff --git a/drivers/media/rc/gpio-ir-tx.c b/drivers/media/rc/gpio-ir-tx.c
index c6cd2e6..a50701c 100644
--- a/drivers/media/rc/gpio-ir-tx.c
+++ b/drivers/media/rc/gpio-ir-tx.c
@@ -48,11 +48,29 @@
return 0;
}
+static void delay_until(ktime_t until)
+{
+ /*
+ * delta should never exceed 0.5 seconds (IR_MAX_DURATION) and on
+ * m68k ndelay(s64) does not compile; so use s32 rather than s64.
+ */
+ s32 delta;
+
+ while (true) {
+ delta = ktime_us_delta(until, ktime_get());
+ if (delta <= 0)
+ return;
+
+ /* udelay more than 1ms may not work */
+ delta = min(delta, 1000);
+ udelay(delta);
+ }
+}
+
static void gpio_ir_tx_unmodulated(struct gpio_ir *gpio_ir, uint *txbuf,
uint count)
{
ktime_t edge;
- s32 delta;
int i;
local_irq_disable();
@@ -63,9 +81,7 @@
gpiod_set_value(gpio_ir->gpio, !(i % 2));
edge = ktime_add_us(edge, txbuf[i]);
- delta = ktime_us_delta(edge, ktime_get());
- if (delta > 0)
- udelay(delta);
+ delay_until(edge);
}
gpiod_set_value(gpio_ir->gpio, 0);
@@ -97,9 +113,7 @@
if (i % 2) {
// space
edge = ktime_add_us(edge, txbuf[i]);
- delta = ktime_us_delta(edge, ktime_get());
- if (delta > 0)
- udelay(delta);
+ delay_until(edge);
} else {
// pulse
ktime_t last = ktime_add_us(edge, txbuf[i]);
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index a7962ca..bc9ac60 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -153,6 +153,24 @@
const struct imon_usb_dev_descr *dev_descr;
/* device description with key */
/* table for front panels */
+ /*
+ * Fields for deferring free_imon_context().
+ *
+ * Since reference to "struct imon_context" is stored into
+ * "struct file"->private_data, we need to remember
+ * how many file descriptors might access this "struct imon_context".
+ */
+ refcount_t users;
+ /*
+ * Use a flag for telling display_open()/vfd_write()/lcd_write() that
+ * imon_disconnect() was already called.
+ */
+ bool disconnected;
+ /*
+ * We need to wait for RCU grace period in order to allow
+ * display_open() to safely check ->disconnected and increment ->users.
+ */
+ struct rcu_head rcu;
};
#define TOUCH_TIMEOUT (HZ/30)
@@ -160,18 +178,18 @@
/* vfd character device file operations */
static const struct file_operations vfd_fops = {
.owner = THIS_MODULE,
- .open = &display_open,
- .write = &vfd_write,
- .release = &display_close,
+ .open = display_open,
+ .write = vfd_write,
+ .release = display_close,
.llseek = noop_llseek,
};
/* lcd character device file operations */
static const struct file_operations lcd_fops = {
.owner = THIS_MODULE,
- .open = &display_open,
- .write = &lcd_write,
- .release = &display_close,
+ .open = display_open,
+ .write = lcd_write,
+ .release = display_close,
.llseek = noop_llseek,
};
@@ -439,9 +457,6 @@
.id_table = imon_usb_id_table,
};
-/* to prevent races between open() and disconnect(), probing, etc */
-static DEFINE_MUTEX(driver_lock);
-
/* Module bookkeeping bits */
MODULE_AUTHOR(MOD_AUTHOR);
MODULE_DESCRIPTION(MOD_DESC);
@@ -481,9 +496,11 @@
struct device *dev = ictx->dev;
usb_free_urb(ictx->tx_urb);
+ WARN_ON(ictx->dev_present_intf0);
usb_free_urb(ictx->rx_urb_intf0);
+ WARN_ON(ictx->dev_present_intf1);
usb_free_urb(ictx->rx_urb_intf1);
- kfree(ictx);
+ kfree_rcu(ictx, rcu);
dev_dbg(dev, "%s: iMON context freed\n", __func__);
}
@@ -499,9 +516,6 @@
int subminor;
int retval = 0;
- /* prevent races with disconnect */
- mutex_lock(&driver_lock);
-
subminor = iminor(inode);
interface = usb_find_interface(&imon_driver, subminor);
if (!interface) {
@@ -509,13 +523,16 @@
retval = -ENODEV;
goto exit;
}
- ictx = usb_get_intfdata(interface);
- if (!ictx) {
+ rcu_read_lock();
+ ictx = usb_get_intfdata(interface);
+ if (!ictx || ictx->disconnected || !refcount_inc_not_zero(&ictx->users)) {
+ rcu_read_unlock();
pr_err("no context found for minor %d\n", subminor);
retval = -ENODEV;
goto exit;
}
+ rcu_read_unlock();
mutex_lock(&ictx->lock);
@@ -533,8 +550,10 @@
mutex_unlock(&ictx->lock);
+ if (retval && refcount_dec_and_test(&ictx->users))
+ free_imon_context(ictx);
+
exit:
- mutex_unlock(&driver_lock);
return retval;
}
@@ -544,16 +563,9 @@
*/
static int display_close(struct inode *inode, struct file *file)
{
- struct imon_context *ictx = NULL;
+ struct imon_context *ictx = file->private_data;
int retval = 0;
- ictx = file->private_data;
-
- if (!ictx) {
- pr_err("no context for device\n");
- return -ENODEV;
- }
-
mutex_lock(&ictx->lock);
if (!ictx->display_supported) {
@@ -568,6 +580,8 @@
}
mutex_unlock(&ictx->lock);
+ if (refcount_dec_and_test(&ictx->users))
+ free_imon_context(ictx);
return retval;
}
@@ -937,15 +951,12 @@
int offset;
int seq;
int retval = 0;
- struct imon_context *ictx;
+ struct imon_context *ictx = file->private_data;
static const unsigned char vfd_packet6[] = {
0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF };
- ictx = file->private_data;
- if (!ictx) {
- pr_err_ratelimited("no context for device\n");
+ if (ictx->disconnected)
return -ENODEV;
- }
mutex_lock(&ictx->lock);
@@ -1021,13 +1032,10 @@
size_t n_bytes, loff_t *pos)
{
int retval = 0;
- struct imon_context *ictx;
+ struct imon_context *ictx = file->private_data;
- ictx = file->private_data;
- if (!ictx) {
- pr_err_ratelimited("no context for device\n");
+ if (ictx->disconnected)
return -ENODEV;
- }
mutex_lock(&ictx->lock);
@@ -2405,7 +2413,6 @@
int ifnum, sysfs_err;
int ret = 0;
struct imon_context *ictx = NULL;
- struct imon_context *first_if_ctx = NULL;
u16 vendor, product;
usbdev = usb_get_dev(interface_to_usbdev(interface));
@@ -2417,17 +2424,12 @@
dev_dbg(dev, "%s: found iMON device (%04x:%04x, intf%d)\n",
__func__, vendor, product, ifnum);
- /* prevent races probing devices w/multiple interfaces */
- mutex_lock(&driver_lock);
-
first_if = usb_ifnum_to_if(usbdev, 0);
if (!first_if) {
ret = -ENODEV;
goto fail;
}
- first_if_ctx = usb_get_intfdata(first_if);
-
if (ifnum == 0) {
ictx = imon_init_intf0(interface, id);
if (!ictx) {
@@ -2435,9 +2437,11 @@
ret = -ENODEV;
goto fail;
}
+ refcount_set(&ictx->users, 1);
} else {
/* this is the secondary interface on the device */
+ struct imon_context *first_if_ctx = usb_get_intfdata(first_if);
/* fail early if first intf failed to register */
if (!first_if_ctx) {
@@ -2451,14 +2455,13 @@
ret = -ENODEV;
goto fail;
}
+ refcount_inc(&ictx->users);
}
usb_set_intfdata(interface, ictx);
if (ifnum == 0) {
- mutex_lock(&ictx->lock);
-
if (product == 0xffdc && ictx->rf_device) {
sysfs_err = sysfs_create_group(&interface->dev.kobj,
&imon_rf_attr_group);
@@ -2469,21 +2472,17 @@
if (ictx->display_supported)
imon_init_display(ictx, interface);
-
- mutex_unlock(&ictx->lock);
}
dev_info(dev, "iMON device (%04x:%04x, intf%d) on usb<%d:%d> initialized\n",
vendor, product, ifnum,
usbdev->bus->busnum, usbdev->devnum);
- mutex_unlock(&driver_lock);
usb_put_dev(usbdev);
return 0;
fail:
- mutex_unlock(&driver_lock);
usb_put_dev(usbdev);
dev_err(dev, "unable to register, err %d\n", ret);
@@ -2499,10 +2498,8 @@
struct device *dev;
int ifnum;
- /* prevent races with multi-interface device probing and display_open */
- mutex_lock(&driver_lock);
-
ictx = usb_get_intfdata(interface);
+ ictx->disconnected = true;
dev = ictx->dev;
ifnum = interface->cur_altsetting->desc.bInterfaceNumber;
@@ -2543,11 +2540,9 @@
}
}
- if (!ictx->dev_present_intf0 && !ictx->dev_present_intf1)
+ if (refcount_dec_and_test(&ictx->users))
free_imon_context(ictx);
- mutex_unlock(&driver_lock);
-
dev_dbg(dev, "%s: iMON device (intf%d) disconnected\n",
__func__, ifnum);
}
diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c
index 1aa7989..7f39427 100644
--- a/drivers/media/rc/ir_toy.c
+++ b/drivers/media/rc/ir_toy.c
@@ -429,7 +429,7 @@
err = usb_submit_urb(irtoy->urb_in, GFP_KERNEL);
if (err != 0) {
dev_err(irtoy->dev, "fail to submit in urb: %d\n", err);
- return err;
+ goto free_rcdev;
}
err = irtoy_setup(irtoy);
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index dbb5a4f..142319c 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1077,7 +1077,7 @@
struct mceusb_dev *ir = dev->priv;
unsigned int units;
- units = DIV_ROUND_CLOSEST(timeout, MCE_TIME_UNIT);
+ units = DIV_ROUND_UP(timeout, MCE_TIME_UNIT);
cmdbuf[2] = units >> 8;
cmdbuf[3] = units;
@@ -1416,42 +1416,37 @@
{
int ret;
struct device *dev = ir->dev;
- char *data;
-
- data = kzalloc(USB_CTRL_MSG_SZ, GFP_KERNEL);
- if (!data) {
- dev_err(dev, "%s: memory allocation failed!", __func__);
- return;
- }
+ char data[USB_CTRL_MSG_SZ];
/*
* This is a strange one. Windows issues a set address to the device
* on the receive control pipe and expect a certain value pair back
*/
- ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
- USB_REQ_SET_ADDRESS, USB_TYPE_VENDOR, 0, 0,
- data, USB_CTRL_MSG_SZ, 3000);
+ ret = usb_control_msg_recv(ir->usbdev, 0, USB_REQ_SET_ADDRESS,
+ USB_DIR_IN | USB_TYPE_VENDOR,
+ 0, 0, data, USB_CTRL_MSG_SZ, 3000,
+ GFP_KERNEL);
dev_dbg(dev, "set address - ret = %d", ret);
dev_dbg(dev, "set address - data[0] = %d, data[1] = %d",
data[0], data[1]);
/* set feature: bit rate 38400 bps */
- ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
- USB_REQ_SET_FEATURE, USB_TYPE_VENDOR,
- 0xc04e, 0x0000, NULL, 0, 3000);
+ ret = usb_control_msg_send(ir->usbdev, 0,
+ USB_REQ_SET_FEATURE, USB_TYPE_VENDOR,
+ 0xc04e, 0x0000, NULL, 0, 3000, GFP_KERNEL);
dev_dbg(dev, "set feature - ret = %d", ret);
/* bRequest 4: set char length to 8 bits */
- ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
- 4, USB_TYPE_VENDOR,
- 0x0808, 0x0000, NULL, 0, 3000);
+ ret = usb_control_msg_send(ir->usbdev, 0,
+ 4, USB_TYPE_VENDOR,
+ 0x0808, 0x0000, NULL, 0, 3000, GFP_KERNEL);
dev_dbg(dev, "set char length - retB = %d", ret);
/* bRequest 2: set handshaking to use DTR/DSR */
- ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
- 2, USB_TYPE_VENDOR,
- 0x0000, 0x0100, NULL, 0, 3000);
+ ret = usb_control_msg_send(ir->usbdev, 0,
+ 2, USB_TYPE_VENDOR,
+ 0x0000, 0x0100, NULL, 0, 3000, GFP_KERNEL);
dev_dbg(dev, "set handshake - retC = %d", ret);
/* device resume */
@@ -1459,8 +1454,6 @@
/* get hw/sw revision? */
mce_command_out(ir, GET_REVISION, sizeof(GET_REVISION));
-
- kfree(data);
}
static void mceusb_gen2_init(struct mceusb_dev *ir)
diff --git a/drivers/media/test-drivers/vidtv/vidtv_s302m.c b/drivers/media/test-drivers/vidtv/vidtv_s302m.c
index d79b658..4676083 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_s302m.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_s302m.c
@@ -455,6 +455,9 @@
e->name = kstrdup(args.name, GFP_KERNEL);
e->encoder_buf = vzalloc(VIDTV_S302M_BUF_SZ);
+ if (!e->encoder_buf)
+ goto out_kfree_e;
+
e->encoder_buf_sz = VIDTV_S302M_BUF_SZ;
e->encoder_buf_offset = 0;
@@ -467,10 +470,8 @@
e->is_video_encoder = false;
ctx = kzalloc(priv_sz, GFP_KERNEL);
- if (!ctx) {
- kfree(e);
- return NULL;
- }
+ if (!ctx)
+ goto out_kfree_buf;
e->ctx = ctx;
ctx->last_duration = 0;
@@ -498,6 +499,14 @@
e->next = NULL;
return e;
+
+out_kfree_buf:
+ kfree(e->encoder_buf);
+
+out_kfree_e:
+ kfree(e->name);
+ kfree(e);
+ return NULL;
}
void vidtv_s302m_encoder_destroy(struct vidtv_encoder *e)
diff --git a/drivers/media/test-drivers/vim2m.c b/drivers/media/test-drivers/vim2m.c
index a776bb8..a246243 100644
--- a/drivers/media/test-drivers/vim2m.c
+++ b/drivers/media/test-drivers/vim2m.c
@@ -1325,12 +1325,6 @@
vfd->lock = &dev->dev_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
- ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
- if (ret) {
- v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
- goto error_v4l2;
- }
-
video_set_drvdata(vfd, dev);
v4l2_info(&dev->v4l2_dev,
"Device registered as /dev/video%d\n", vfd->num);
@@ -1353,12 +1347,20 @@
media_device_init(&dev->mdev);
dev->mdev.ops = &m2m_media_ops;
dev->v4l2_dev.mdev = &dev->mdev;
+#endif
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto error_m2m;
+ }
+
+#ifdef CONFIG_MEDIA_CONTROLLER
ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd,
MEDIA_ENT_F_PROC_VIDEO_SCALER);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
- goto error_dev;
+ goto error_v4l2;
}
ret = media_device_register(&dev->mdev);
@@ -1373,11 +1375,13 @@
error_m2m_mc:
v4l2_m2m_unregister_media_controller(dev->m2m_dev);
#endif
-error_dev:
+error_v4l2:
video_unregister_device(&dev->vfd);
/* vim2m_device_release called by video_unregister_device to release various objects */
return ret;
-error_v4l2:
+error_m2m:
+ v4l2_m2m_release(dev->m2m_dev);
+error_dev:
v4l2_device_unregister(&dev->v4l2_dev);
error_free:
kfree(dev);
diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
index 1e356dc..761d2ab 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.c
+++ b/drivers/media/test-drivers/vivid/vivid-core.c
@@ -330,6 +330,28 @@
return vivid_vid_out_g_fbuf(file, fh, a);
}
+/*
+ * Only support the framebuffer of one of the vivid instances.
+ * Anything else is rejected.
+ */
+bool vivid_validate_fb(const struct v4l2_framebuffer *a)
+{
+ struct vivid_dev *dev;
+ int i;
+
+ for (i = 0; i < n_devs; i++) {
+ dev = vivid_devs[i];
+ if (!dev || !dev->video_pbase)
+ continue;
+ if ((unsigned long)a->base == dev->video_pbase &&
+ a->fmt.width <= dev->display_width &&
+ a->fmt.height <= dev->display_height &&
+ a->fmt.bytesperline <= dev->display_byte_stride)
+ return true;
+ }
+ return false;
+}
+
static int vidioc_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *a)
{
struct video_device *vdev = video_devdata(file);
@@ -850,8 +872,12 @@
/* how many inputs do we have and of what type? */
dev->num_inputs = num_inputs[inst];
- if (dev->num_inputs < 1)
- dev->num_inputs = 1;
+ if (node_type & 0x20007) {
+ if (dev->num_inputs < 1)
+ dev->num_inputs = 1;
+ } else {
+ dev->num_inputs = 0;
+ }
if (dev->num_inputs >= MAX_INPUTS)
dev->num_inputs = MAX_INPUTS;
for (i = 0; i < dev->num_inputs; i++) {
@@ -868,8 +894,12 @@
/* how many outputs do we have and of what type? */
dev->num_outputs = num_outputs[inst];
- if (dev->num_outputs < 1)
- dev->num_outputs = 1;
+ if (node_type & 0x40300) {
+ if (dev->num_outputs < 1)
+ dev->num_outputs = 1;
+ } else {
+ dev->num_outputs = 0;
+ }
if (dev->num_outputs >= MAX_OUTPUTS)
dev->num_outputs = MAX_OUTPUTS;
for (i = 0; i < dev->num_outputs; i++) {
diff --git a/drivers/media/test-drivers/vivid/vivid-core.h b/drivers/media/test-drivers/vivid/vivid-core.h
index 99e69b8..6aa32c8 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.h
+++ b/drivers/media/test-drivers/vivid/vivid-core.h
@@ -609,4 +609,6 @@
return dev->output_type[dev->output] == HDMI;
}
+bool vivid_validate_fb(const struct v4l2_framebuffer *a);
+
#endif
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
index eadf28a..d493bd1 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
@@ -452,6 +452,12 @@
tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
dev->crop_cap = dev->src_rect;
dev->crop_bounds_cap = dev->src_rect;
+ if (dev->bitmap_cap &&
+ (dev->compose_cap.width != dev->crop_cap.width ||
+ dev->compose_cap.height != dev->crop_cap.height)) {
+ vfree(dev->bitmap_cap);
+ dev->bitmap_cap = NULL;
+ }
dev->compose_cap = dev->crop_cap;
if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap))
dev->compose_cap.height /= 2;
@@ -909,6 +915,8 @@
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_rect *crop = &dev->crop_cap;
struct v4l2_rect *compose = &dev->compose_cap;
+ unsigned orig_compose_w = compose->width;
+ unsigned orig_compose_h = compose->height;
unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
int ret;
@@ -1025,17 +1033,17 @@
s->r.height /= factor;
}
v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect);
- if (dev->bitmap_cap && (compose->width != s->r.width ||
- compose->height != s->r.height)) {
- vfree(dev->bitmap_cap);
- dev->bitmap_cap = NULL;
- }
*compose = s->r;
break;
default:
return -EINVAL;
}
+ if (dev->bitmap_cap && (compose->width != orig_compose_w ||
+ compose->height != orig_compose_h)) {
+ vfree(dev->bitmap_cap);
+ dev->bitmap_cap = NULL;
+ }
tpg_s_crop_compose(&dev->tpg, crop, compose);
return 0;
}
@@ -1276,7 +1284,14 @@
return -EINVAL;
if (a->fmt.bytesperline < (a->fmt.width * fmt->bit_depth[0]) / 8)
return -EINVAL;
- if (a->fmt.height * a->fmt.bytesperline < a->fmt.sizeimage)
+ if (a->fmt.bytesperline > a->fmt.sizeimage / a->fmt.height)
+ return -EINVAL;
+
+ /*
+ * Only support the framebuffer of one of the vivid instances.
+ * Anything else is rejected.
+ */
+ if (!vivid_validate_fb(a))
return -EINVAL;
dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base);
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index a2563c2..2299d5c 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -512,7 +512,7 @@
if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1)
return -ENODEV;
- if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[1].desc))
+ if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[0].desc))
return -ENODEV;
switch (fc_usb->udev->speed) {
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 87e3755..26408a9 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -3881,6 +3881,8 @@
goto err_free;
}
+ kref_init(&dev->ref);
+
dev->devno = nr;
dev->model = id->driver_info;
dev->alt = -1;
@@ -3981,6 +3983,8 @@
}
if (dev->board.has_dual_ts && em28xx_duplicate_dev(dev) == 0) {
+ kref_init(&dev->dev_next->ref);
+
dev->dev_next->ts = SECONDARY_TS;
dev->dev_next->alt = -1;
dev->dev_next->is_audio_only = has_vendor_audio &&
@@ -4035,12 +4039,8 @@
em28xx_write_reg(dev, 0x0b, 0x82);
mdelay(100);
}
-
- kref_init(&dev->dev_next->ref);
}
- kref_init(&dev->ref);
-
request_modules(dev);
/*
@@ -4095,11 +4095,8 @@
em28xx_close_extension(dev);
- if (dev->dev_next) {
- em28xx_close_extension(dev->dev_next);
+ if (dev->dev_next)
em28xx_release_resources(dev->dev_next);
- }
-
em28xx_release_resources(dev);
if (dev->dev_next) {
diff --git a/drivers/media/usb/go7007/s2250-board.c b/drivers/media/usb/go7007/s2250-board.c
index b9e4512..2e5913b 100644
--- a/drivers/media/usb/go7007/s2250-board.c
+++ b/drivers/media/usb/go7007/s2250-board.c
@@ -504,6 +504,7 @@
u8 *data;
struct go7007 *go = i2c_get_adapdata(adapter);
struct go7007_usb *usb = go->hpi_context;
+ int err = -EIO;
audio = i2c_new_dummy_device(adapter, TLV320_ADDRESS >> 1);
if (IS_ERR(audio))
@@ -532,11 +533,8 @@
V4L2_CID_HUE, -512, 511, 1, 0);
sd->ctrl_handler = &state->hdl;
if (state->hdl.error) {
- int err = state->hdl.error;
-
- v4l2_ctrl_handler_free(&state->hdl);
- kfree(state);
- return err;
+ err = state->hdl.error;
+ goto fail;
}
state->std = V4L2_STD_NTSC;
@@ -600,7 +598,7 @@
i2c_unregister_device(audio);
v4l2_ctrl_handler_free(&state->hdl);
kfree(state);
- return -EIO;
+ return err;
}
static int s2250_remove(struct i2c_client *client)
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 563128d..fd7d2a9 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -308,7 +308,6 @@
dev->status = STATUS_STREAMING;
- INIT_WORK(&dev->worker, hdpvr_transmit_buffers);
schedule_work(&dev->worker);
v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev,
@@ -410,7 +409,7 @@
struct hdpvr_device *dev = video_drvdata(file);
struct hdpvr_buffer *buf = NULL;
struct urb *urb;
- unsigned int ret = 0;
+ int ret = 0;
int rem, cnt;
if (*pos)
@@ -1165,6 +1164,9 @@
bool ac3 = dev->flags & HDPVR_FLAG_AC3_CAP;
int res;
+ // initialize dev->worker
+ INIT_WORK(&dev->worker, hdpvr_transmit_buffers);
+
dev->cur_std = V4L2_STD_525_60;
dev->width = 720;
dev->height = 480;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 3915d55..d22ce32 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -2569,6 +2569,11 @@
} while (0);
mutex_unlock(&pvr2_unit_mtx);
+ INIT_WORK(&hdw->workpoll, pvr2_hdw_worker_poll);
+
+ if (hdw->unit_number == -1)
+ goto fail;
+
cnt1 = 0;
cnt2 = scnprintf(hdw->name+cnt1,sizeof(hdw->name)-cnt1,"pvrusb2");
cnt1 += cnt2;
@@ -2580,8 +2585,6 @@
if (cnt1 >= sizeof(hdw->name)) cnt1 = sizeof(hdw->name)-1;
hdw->name[cnt1] = 0;
- INIT_WORK(&hdw->workpoll,pvr2_hdw_worker_poll);
-
pvr2_trace(PVR2_TRACE_INIT,"Driver unit number is %d, name is %s",
hdw->unit_number,hdw->name);
@@ -2607,6 +2610,7 @@
del_timer_sync(&hdw->encoder_run_timer);
del_timer_sync(&hdw->encoder_wait_timer);
flush_work(&hdw->workpoll);
+ v4l2_device_unregister(&hdw->v4l2_dev);
usb_free_urb(hdw->ctl_read_urb);
usb_free_urb(hdw->ctl_write_urb);
kfree(hdw->ctl_read_buffer);
diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c
index 4e1698f..ce71750 100644
--- a/drivers/media/usb/stk1160/stk1160-core.c
+++ b/drivers/media/usb/stk1160/stk1160-core.c
@@ -403,7 +403,7 @@
/* Here is the only place where isoc get released */
stk1160_uninit_isoc(dev);
- stk1160_clear_queue(dev);
+ stk1160_clear_queue(dev, VB2_BUF_STATE_ERROR);
video_unregister_device(&dev->vdev);
v4l2_device_disconnect(&dev->v4l2_dev);
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index 6a4eb61..1aa9534 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -258,7 +258,7 @@
stk1160_uninit_isoc(dev);
out_stop_hw:
usb_set_interface(dev->udev, 0, 0);
- stk1160_clear_queue(dev);
+ stk1160_clear_queue(dev, VB2_BUF_STATE_QUEUED);
mutex_unlock(&dev->v4l_lock);
@@ -306,7 +306,7 @@
stk1160_stop_hw(dev);
- stk1160_clear_queue(dev);
+ stk1160_clear_queue(dev, VB2_BUF_STATE_ERROR);
stk1160_dbg("streaming stopped\n");
@@ -745,7 +745,7 @@
/********************************************************************/
/* Must be called with both v4l_lock and vb_queue_lock hold */
-void stk1160_clear_queue(struct stk1160 *dev)
+void stk1160_clear_queue(struct stk1160 *dev, enum vb2_buffer_state vb2_state)
{
struct stk1160_buffer *buf;
unsigned long flags;
@@ -756,7 +756,7 @@
buf = list_first_entry(&dev->avail_bufs,
struct stk1160_buffer, list);
list_del(&buf->list);
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, vb2_state);
stk1160_dbg("buffer [%p/%d] aborted\n",
buf, buf->vb.vb2_buf.index);
}
@@ -766,7 +766,7 @@
buf = dev->isoc_ctl.buf;
dev->isoc_ctl.buf = NULL;
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&buf->vb.vb2_buf, vb2_state);
stk1160_dbg("buffer [%p/%d] aborted\n",
buf, buf->vb.vb2_buf.index);
}
diff --git a/drivers/media/usb/stk1160/stk1160.h b/drivers/media/usb/stk1160/stk1160.h
index a31ea1c..a70963c 100644
--- a/drivers/media/usb/stk1160/stk1160.h
+++ b/drivers/media/usb/stk1160/stk1160.h
@@ -166,7 +166,7 @@
int stk1160_vb2_setup(struct stk1160 *dev);
int stk1160_video_register(struct stk1160 *dev);
void stk1160_video_unregister(struct stk1160 *dev);
-void stk1160_clear_queue(struct stk1160 *dev);
+void stk1160_clear_queue(struct stk1160 *dev, enum vb2_buffer_state vb2_state);
/* Provided by stk1160-video.c */
int stk1160_alloc_isoc(struct stk1160 *dev);
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 753b8a9..b40a2b9 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -863,29 +863,31 @@
struct uvc_video_chain *chain = handle->chain;
const struct uvc_entity *selector = chain->selector;
struct uvc_entity *iterm = NULL;
+ struct uvc_entity *it;
u32 index = input->index;
- int pin = 0;
if (selector == NULL ||
(chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) {
if (index != 0)
return -EINVAL;
- list_for_each_entry(iterm, &chain->entities, chain) {
- if (UVC_ENTITY_IS_ITERM(iterm))
+ list_for_each_entry(it, &chain->entities, chain) {
+ if (UVC_ENTITY_IS_ITERM(it)) {
+ iterm = it;
break;
+ }
}
- pin = iterm->id;
} else if (index < selector->bNrInPins) {
- pin = selector->baSourceID[index];
- list_for_each_entry(iterm, &chain->entities, chain) {
- if (!UVC_ENTITY_IS_ITERM(iterm))
+ list_for_each_entry(it, &chain->entities, chain) {
+ if (!UVC_ENTITY_IS_ITERM(it))
continue;
- if (iterm->id == pin)
+ if (it->id == selector->baSourceID[index]) {
+ iterm = it;
break;
+ }
}
}
- if (iterm == NULL || iterm->id != pin)
+ if (iterm == NULL)
return -EINVAL;
memset(input, 0, sizeof(*input));
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index af48705..003c32f 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -161,6 +161,20 @@
(bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) ||
(!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE)))
return false;
+
+ /* sanity checks for the blanking timings */
+ if (!bt->interlaced &&
+ (bt->il_vbackporch || bt->il_vsync || bt->il_vfrontporch))
+ return false;
+ if (bt->hfrontporch > 2 * bt->width ||
+ bt->hsync > 1024 || bt->hbackporch > 1024)
+ return false;
+ if (bt->vfrontporch > 4096 ||
+ bt->vsync > 128 || bt->vbackporch > 4096)
+ return false;
+ if (bt->interlaced && (bt->il_vfrontporch > 4096 ||
+ bt->il_vsync > 128 || bt->il_vbackporch > 4096))
+ return false;
return fnc == NULL || fnc(t, fnc_handle);
}
EXPORT_SYMBOL_GPL(v4l2_valid_dv_timings);
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index b221b4e..ad14d52 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -585,19 +585,14 @@
}
EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
-int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
- struct v4l2_buffer *buf)
+static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq,
+ struct v4l2_buffer *buf)
{
- struct vb2_queue *vq;
- int ret = 0;
- unsigned int i;
-
- vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
- ret = vb2_querybuf(vq, buf);
-
/* Adjust MMAP memory offsets for the CAPTURE queue */
if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) {
if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
+ unsigned int i;
+
for (i = 0; i < buf->length; ++i)
buf->m.planes[i].m.mem_offset
+= DST_QUEUE_OFF_BASE;
@@ -605,8 +600,23 @@
buf->m.offset += DST_QUEUE_OFF_BASE;
}
}
+}
- return ret;
+int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf)
+{
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+ ret = vb2_querybuf(vq, buf);
+ if (ret)
+ return ret;
+
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ v4l2_m2m_adjust_mem_offset(vq, buf);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
@@ -763,6 +773,9 @@
if (ret)
return ret;
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ v4l2_m2m_adjust_mem_offset(vq, buf);
+
/*
* If the capture queue is streaming, but streaming hasn't started
* on the device, but was asked to stop, mark the previously queued
@@ -784,9 +797,17 @@
struct v4l2_buffer *buf)
{
struct vb2_queue *vq;
+ int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
- return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
+ ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
+ if (ret)
+ return ret;
+
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ v4l2_m2m_adjust_mem_offset(vq, buf);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
@@ -795,9 +816,17 @@
{
struct video_device *vdev = video_devdata(file);
struct vb2_queue *vq;
+ int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
- return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
+ ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
+ if (ret)
+ return ret;
+
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ v4l2_m2m_adjust_mem_offset(vq, buf);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
@@ -898,7 +927,7 @@
if ((!src_q->streaming || src_q->error ||
list_empty(&src_q->queued_list)) &&
(!dst_q->streaming || dst_q->error ||
- list_empty(&dst_q->queued_list)))
+ (list_empty(&dst_q->queued_list) && !dst_q->last_buffer_dequeued)))
return EPOLLERR;
spin_lock_irqsave(&src_q->done_lock, flags);
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index c267283..e749dcb 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -544,20 +544,27 @@
smc_np = of_parse_phandle(dev->of_node, "atmel,smc", 0);
ebi->smc.regmap = syscon_node_to_regmap(smc_np);
- if (IS_ERR(ebi->smc.regmap))
- return PTR_ERR(ebi->smc.regmap);
+ if (IS_ERR(ebi->smc.regmap)) {
+ ret = PTR_ERR(ebi->smc.regmap);
+ goto put_node;
+ }
ebi->smc.layout = atmel_hsmc_get_reg_layout(smc_np);
- if (IS_ERR(ebi->smc.layout))
- return PTR_ERR(ebi->smc.layout);
+ if (IS_ERR(ebi->smc.layout)) {
+ ret = PTR_ERR(ebi->smc.layout);
+ goto put_node;
+ }
ebi->smc.clk = of_clk_get(smc_np, 0);
if (IS_ERR(ebi->smc.clk)) {
- if (PTR_ERR(ebi->smc.clk) != -ENOENT)
- return PTR_ERR(ebi->smc.clk);
+ if (PTR_ERR(ebi->smc.clk) != -ENOENT) {
+ ret = PTR_ERR(ebi->smc.clk);
+ goto put_node;
+ }
ebi->smc.clk = NULL;
}
+ of_node_put(smc_np);
ret = clk_prepare_enable(ebi->smc.clk);
if (ret)
return ret;
@@ -608,6 +615,10 @@
}
return of_platform_populate(np, NULL, NULL, dev);
+
+put_node:
+ of_node_put(smc_np);
+ return ret;
}
static __maybe_unused int atmel_ebi_resume(struct device *dev)
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index ddb1879..5a059be 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -1403,7 +1403,7 @@
temp = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL);
- if (!emif || !pd || !dev_info) {
+ if (!emif || !temp || !dev_info) {
dev_err(dev, "%s:%d: allocation error\n", __func__, __LINE__);
goto error;
}
@@ -1495,7 +1495,7 @@
{
struct emif_data *emif;
struct resource *res;
- int irq;
+ int irq, ret;
if (pdev->dev.of_node)
emif = of_get_memory_device_details(pdev->dev.of_node, &pdev->dev);
@@ -1526,7 +1526,9 @@
emif_onetime_settings(emif);
emif_debugfs_init(emif);
disable_and_clear_all_interrupts(emif);
- setup_interrupts(emif, irq);
+ ret = setup_interrupts(emif, irq);
+ if (ret)
+ goto error;
/* One-time actions taken on probing the first device */
if (!emif1) {
diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c
index d9f5437..1791614 100644
--- a/drivers/memory/of_memory.c
+++ b/drivers/memory/of_memory.c
@@ -134,6 +134,7 @@
for_each_child_of_node(np_ddr, np_tim) {
if (of_device_is_compatible(np_tim, tim_compat)) {
if (of_do_get_timings(np_tim, &timings[i])) {
+ of_node_put(np_tim);
devm_kfree(dev, timings);
goto default_timings;
}
@@ -282,6 +283,7 @@
if (of_device_is_compatible(np_tim, tim_compat)) {
if (of_lpddr3_do_get_timings(np_tim, &timings[i])) {
devm_kfree(dev, timings);
+ of_node_put(np_tim);
goto default_timings;
}
i++;
diff --git a/drivers/memory/pl172.c b/drivers/memory/pl172.c
index 575fadb..9eb8cc7 100644
--- a/drivers/memory/pl172.c
+++ b/drivers/memory/pl172.c
@@ -273,14 +273,12 @@
return ret;
}
-static int pl172_remove(struct amba_device *adev)
+static void pl172_remove(struct amba_device *adev)
{
struct pl172_data *pl172 = amba_get_drvdata(adev);
clk_disable_unprepare(pl172->clk);
amba_release_regions(adev);
-
- return 0;
}
static const struct amba_id pl172_ids[] = {
diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
index cc01979..1a6964f 100644
--- a/drivers/memory/pl353-smc.c
+++ b/drivers/memory/pl353-smc.c
@@ -416,6 +416,7 @@
if (init)
init(adev, child);
of_platform_device_create(child, NULL, &adev->dev);
+ of_node_put(child);
return 0;
@@ -427,14 +428,12 @@
return err;
}
-static int pl353_smc_remove(struct amba_device *adev)
+static void pl353_smc_remove(struct amba_device *adev)
{
struct pl353_smc_data *pl353_smc = amba_get_drvdata(adev);
clk_disable_unprepare(pl353_smc->memclk);
clk_disable_unprepare(pl353_smc->aclk);
-
- return 0;
}
static const struct amba_id pl353_ids[] = {
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
index 9019121..1dfb81d 100644
--- a/drivers/memory/renesas-rpc-if.c
+++ b/drivers/memory/renesas-rpc-if.c
@@ -163,25 +163,39 @@
/*
- * Custom accessor functions to ensure SMRDR0 and SMWDR0 are always accessed
- * with proper width. Requires SMENR_SPIDE to be correctly set before!
+ * Custom accessor functions to ensure SM[RW]DR[01] are always accessed with
+ * proper width. Requires rpcif.xfer_size to be correctly set before!
*/
static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val)
{
struct rpcif *rpc = context;
- if (reg == RPCIF_SMRDR0 || reg == RPCIF_SMWDR0) {
- u32 spide = readl(rpc->base + RPCIF_SMENR) & RPCIF_SMENR_SPIDE(0xF);
-
- if (spide == 0x8) {
+ switch (reg) {
+ case RPCIF_SMRDR0:
+ case RPCIF_SMWDR0:
+ switch (rpc->xfer_size) {
+ case 1:
*val = readb(rpc->base + reg);
return 0;
- } else if (spide == 0xC) {
+
+ case 2:
*val = readw(rpc->base + reg);
return 0;
- } else if (spide != 0xF) {
+
+ case 4:
+ case 8:
+ *val = readl(rpc->base + reg);
+ return 0;
+
+ default:
return -EILSEQ;
}
+
+ case RPCIF_SMRDR1:
+ case RPCIF_SMWDR1:
+ if (rpc->xfer_size != 8)
+ return -EILSEQ;
+ break;
}
*val = readl(rpc->base + reg);
@@ -193,18 +207,34 @@
{
struct rpcif *rpc = context;
- if (reg == RPCIF_SMRDR0 || reg == RPCIF_SMWDR0) {
- u32 spide = readl(rpc->base + RPCIF_SMENR) & RPCIF_SMENR_SPIDE(0xF);
-
- if (spide == 0x8) {
+ switch (reg) {
+ case RPCIF_SMWDR0:
+ switch (rpc->xfer_size) {
+ case 1:
writeb(val, rpc->base + reg);
return 0;
- } else if (spide == 0xC) {
+
+ case 2:
writew(val, rpc->base + reg);
return 0;
- } else if (spide != 0xF) {
+
+ case 4:
+ case 8:
+ writel(val, rpc->base + reg);
+ return 0;
+
+ default:
return -EILSEQ;
}
+
+ case RPCIF_SMWDR1:
+ if (rpc->xfer_size != 8)
+ return -EILSEQ;
+ break;
+
+ case RPCIF_SMRDR0:
+ case RPCIF_SMRDR1:
+ return -EPERM;
}
writel(val, rpc->base + reg);
@@ -455,6 +485,7 @@
smenr |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes));
regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
+ rpc->xfer_size = nbytes;
memcpy(data, rpc->buffer + pos, nbytes);
if (nbytes == 8) {
@@ -519,6 +550,7 @@
regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
regmap_write(rpc->regmap, RPCIF_SMCR,
rpc->smcr | RPCIF_SMCR_SPIE);
+ rpc->xfer_size = nbytes;
ret = wait_msg_xfer_end(rpc);
if (ret)
goto err_out;
@@ -592,6 +624,7 @@
struct platform_device *vdev;
struct device_node *flash;
const char *name;
+ int ret;
flash = of_get_next_child(pdev->dev.of_node, NULL);
if (!flash) {
@@ -615,7 +648,14 @@
return -ENOMEM;
vdev->dev.parent = &pdev->dev;
platform_set_drvdata(pdev, vdev);
- return platform_device_add(vdev);
+
+ ret = platform_device_add(vdev);
+ if (ret) {
+ platform_device_put(vdev);
+ return ret;
+ }
+
+ return 0;
}
static int rpcif_remove(struct platform_device *pdev)
diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
index 3d230f0..5cec30d 100644
--- a/drivers/memory/samsung/exynos5422-dmc.c
+++ b/drivers/memory/samsung/exynos5422-dmc.c
@@ -1192,33 +1192,39 @@
dmc->timing_row = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
sizeof(u32), GFP_KERNEL);
- if (!dmc->timing_row)
- return -ENOMEM;
+ if (!dmc->timing_row) {
+ ret = -ENOMEM;
+ goto put_node;
+ }
dmc->timing_data = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
sizeof(u32), GFP_KERNEL);
- if (!dmc->timing_data)
- return -ENOMEM;
+ if (!dmc->timing_data) {
+ ret = -ENOMEM;
+ goto put_node;
+ }
dmc->timing_power = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
sizeof(u32), GFP_KERNEL);
- if (!dmc->timing_power)
- return -ENOMEM;
+ if (!dmc->timing_power) {
+ ret = -ENOMEM;
+ goto put_node;
+ }
dmc->timings = of_lpddr3_get_ddr_timings(np_ddr, dmc->dev,
DDR_TYPE_LPDDR3,
&dmc->timings_arr_size);
if (!dmc->timings) {
- of_node_put(np_ddr);
dev_warn(dmc->dev, "could not get timings from DT\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_node;
}
dmc->min_tck = of_lpddr3_get_min_tck(np_ddr, dmc->dev);
if (!dmc->min_tck) {
- of_node_put(np_ddr);
dev_warn(dmc->dev, "could not get tck from DT\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_node;
}
/* Sorted array of OPPs with frequency ascending */
@@ -1232,13 +1238,14 @@
clk_period_ps);
}
- of_node_put(np_ddr);
/* Take the highest frequency's timings as 'bypass' */
dmc->bypass_timing_row = dmc->timing_row[idx - 1];
dmc->bypass_timing_data = dmc->timing_data[idx - 1];
dmc->bypass_timing_power = dmc->timing_power[idx - 1];
+put_node:
+ of_node_put(np_ddr);
return ret;
}
@@ -1327,7 +1334,6 @@
*/
static int exynos5_performance_counters_init(struct exynos5_dmc *dmc)
{
- int counters_size;
int ret, i;
dmc->num_counters = devfreq_event_get_edev_count(dmc->dev,
@@ -1337,8 +1343,8 @@
return dmc->num_counters;
}
- counters_size = sizeof(struct devfreq_event_dev) * dmc->num_counters;
- dmc->counter = devm_kzalloc(dmc->dev, counters_size, GFP_KERNEL);
+ dmc->counter = devm_kcalloc(dmc->dev, dmc->num_counters,
+ sizeof(*dmc->counter), GFP_KERNEL);
if (!dmc->counter)
return -ENOMEM;
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index bc1f484..6df98c0 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1335,17 +1335,17 @@
msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
msb->logical_block_count = msb->zone_count * 496 - 2;
- msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
- msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
+ msb->used_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
+ msb->erased_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
msb->lba_to_pba_table =
kmalloc_array(msb->logical_block_count, sizeof(u16),
GFP_KERNEL);
if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
!msb->erased_blocks_bitmap) {
- kfree(msb->used_blocks_bitmap);
+ bitmap_free(msb->used_blocks_bitmap);
+ bitmap_free(msb->erased_blocks_bitmap);
kfree(msb->lba_to_pba_table);
- kfree(msb->erased_blocks_bitmap);
return -ENOMEM;
}
@@ -1953,7 +1953,8 @@
static void msb_data_clear(struct msb_data *msb)
{
kfree(msb->boot_page);
- kfree(msb->used_blocks_bitmap);
+ bitmap_free(msb->used_blocks_bitmap);
+ bitmap_free(msb->erased_blocks_bitmap);
kfree(msb->lba_to_pba_table);
kfree(msb->cache);
msb->card = NULL;
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index a6bd213..14e4bbe 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -914,14 +914,14 @@
ret = mfd_add_devices(&pdev->dev, pdev->id,
&asic3_cell_ds1wm, 1, mem, asic->irq_base, NULL);
if (ret < 0)
- goto out;
+ goto out_unmap;
}
if (mem_sdio && (irq >= 0)) {
ret = mfd_add_devices(&pdev->dev, pdev->id,
&asic3_cell_mmc, 1, mem_sdio, irq, NULL);
if (ret < 0)
- goto out;
+ goto out_unmap;
}
ret = 0;
@@ -935,8 +935,12 @@
ret = mfd_add_devices(&pdev->dev, 0,
asic3_cell_leds, ASIC3_NUM_LEDS, NULL, 0, NULL);
}
+ return ret;
- out:
+out_unmap:
+ if (asic->tmio_cnf)
+ iounmap(asic->tmio_cnf);
+out:
return ret;
}
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index e5c8bc9..9658204 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -46,14 +46,12 @@
}
clk_enable(davinci_vc->clk);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- fifo_base = (dma_addr_t)res->start;
- davinci_vc->base = devm_ioremap_resource(&pdev->dev, res);
+ davinci_vc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(davinci_vc->base)) {
ret = PTR_ERR(davinci_vc->base);
goto fail;
}
+ fifo_base = (dma_addr_t)res->start;
davinci_vc->regmap = devm_regmap_init_mmio(&pdev->dev,
davinci_vc->base,
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
index a016b39..5f1f6f3 100644
--- a/drivers/mfd/fsl-imx25-tsadc.c
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -69,7 +69,7 @@
int irq;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
+ if (irq < 0)
return irq;
tsadc->domain = irq_domain_add_simple(np, 2, 0, &mx25_tsadc_domain_ops,
@@ -84,6 +84,19 @@
return 0;
}
+static int mx25_tsadc_unset_irq(struct platform_device *pdev)
+{
+ struct mx25_tsadc *tsadc = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (irq >= 0) {
+ irq_set_chained_handler_and_data(irq, NULL, NULL);
+ irq_domain_remove(tsadc->domain);
+ }
+
+ return 0;
+}
+
static void mx25_tsadc_setup_clk(struct platform_device *pdev,
struct mx25_tsadc *tsadc)
{
@@ -171,18 +184,21 @@
platform_set_drvdata(pdev, tsadc);
- return devm_of_platform_populate(dev);
+ ret = devm_of_platform_populate(dev);
+ if (ret)
+ goto err_irq;
+
+ return 0;
+
+err_irq:
+ mx25_tsadc_unset_irq(pdev);
+
+ return ret;
}
static int mx25_tsadc_remove(struct platform_device *pdev)
{
- struct mx25_tsadc *tsadc = platform_get_drvdata(pdev);
- int irq = platform_get_irq(pdev, 0);
-
- if (irq) {
- irq_set_chained_handler_and_data(irq, NULL, NULL);
- irq_domain_remove(tsadc->domain);
- }
+ mx25_tsadc_unset_irq(pdev);
return 0;
}
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index ddd64f9..926653e 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -95,6 +95,7 @@
return 0;
err_del_irq_chip:
+ pwm_remove_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
return ret;
}
diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c
index e92eeeb..4cd5ecc 100644
--- a/drivers/mfd/ipaq-micro.c
+++ b/drivers/mfd/ipaq-micro.c
@@ -403,7 +403,7 @@
micro_reset_comm(micro);
irq = platform_get_irq(pdev, 0);
- if (!irq)
+ if (irq < 0)
return -EINVAL;
ret = devm_request_irq(&pdev->dev, irq, micro_serial_isr,
IRQF_SHARED, "ipaq-micro",
diff --git a/drivers/mfd/lp8788-irq.c b/drivers/mfd/lp8788-irq.c
index 348439a..3900629 100644
--- a/drivers/mfd/lp8788-irq.c
+++ b/drivers/mfd/lp8788-irq.c
@@ -175,6 +175,7 @@
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"lp8788-irq", irqd);
if (ret) {
+ irq_domain_remove(lp->irqdm);
dev_err(lp->dev, "failed to create a thread for IRQ_N\n");
return ret;
}
@@ -188,4 +189,6 @@
{
if (lp->irq)
free_irq(lp->irq, lp->irqdm);
+ if (lp->irqdm)
+ irq_domain_remove(lp->irqdm);
}
diff --git a/drivers/mfd/lp8788.c b/drivers/mfd/lp8788.c
index 768d556..5c3d642 100644
--- a/drivers/mfd/lp8788.c
+++ b/drivers/mfd/lp8788.c
@@ -195,8 +195,16 @@
if (ret)
return ret;
- return mfd_add_devices(lp->dev, -1, lp8788_devs,
- ARRAY_SIZE(lp8788_devs), NULL, 0, NULL);
+ ret = mfd_add_devices(lp->dev, -1, lp8788_devs,
+ ARRAY_SIZE(lp8788_devs), NULL, 0, NULL);
+ if (ret)
+ goto err_exit_irq;
+
+ return 0;
+
+err_exit_irq:
+ lp8788_irq_exit(lp);
+ return ret;
}
static int lp8788_remove(struct i2c_client *cl)
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 3bbb29a..2411b7a 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -63,6 +63,8 @@
#define SPIBASE_BYT 0x54
#define SPIBASE_BYT_SZ 512
#define SPIBASE_BYT_EN BIT(1)
+#define BYT_BCR 0xfc
+#define BYT_BCR_WPD BIT(0)
#define SPIBASE_LPT 0x3800
#define SPIBASE_LPT_SZ 512
@@ -1083,12 +1085,57 @@
return ret;
}
+static bool lpc_ich_byt_set_writeable(void __iomem *base, void *data)
+{
+ u32 val;
+
+ val = readl(base + BYT_BCR);
+ if (!(val & BYT_BCR_WPD)) {
+ val |= BYT_BCR_WPD;
+ writel(val, base + BYT_BCR);
+ val = readl(base + BYT_BCR);
+ }
+
+ return val & BYT_BCR_WPD;
+}
+
+static bool lpc_ich_lpt_set_writeable(void __iomem *base, void *data)
+{
+ struct pci_dev *pdev = data;
+ u32 bcr;
+
+ pci_read_config_dword(pdev, BCR, &bcr);
+ if (!(bcr & BCR_WPD)) {
+ bcr |= BCR_WPD;
+ pci_write_config_dword(pdev, BCR, bcr);
+ pci_read_config_dword(pdev, BCR, &bcr);
+ }
+
+ return bcr & BCR_WPD;
+}
+
+static bool lpc_ich_bxt_set_writeable(void __iomem *base, void *data)
+{
+ unsigned int spi = PCI_DEVFN(13, 2);
+ struct pci_bus *bus = data;
+ u32 bcr;
+
+ pci_bus_read_config_dword(bus, spi, BCR, &bcr);
+ if (!(bcr & BCR_WPD)) {
+ bcr |= BCR_WPD;
+ pci_bus_write_config_dword(bus, spi, BCR, bcr);
+ pci_bus_read_config_dword(bus, spi, BCR, &bcr);
+ }
+
+ return bcr & BCR_WPD;
+}
+
static int lpc_ich_init_spi(struct pci_dev *dev)
{
struct lpc_ich_priv *priv = pci_get_drvdata(dev);
struct resource *res = &intel_spi_res[0];
struct intel_spi_boardinfo *info;
- u32 spi_base, rcba, bcr;
+ u32 spi_base, rcba;
info = devm_kzalloc(&dev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -1102,6 +1149,8 @@
if (spi_base & SPIBASE_BYT_EN) {
res->start = spi_base & ~(SPIBASE_BYT_SZ - 1);
res->end = res->start + SPIBASE_BYT_SZ - 1;
+
+ info->set_writeable = lpc_ich_byt_set_writeable;
}
break;
@@ -1112,8 +1161,8 @@
res->start = spi_base + SPIBASE_LPT;
res->end = res->start + SPIBASE_LPT_SZ - 1;
- pci_read_config_dword(dev, BCR, &bcr);
- info->writeable = !!(bcr & BCR_WPD);
+ info->set_writeable = lpc_ich_lpt_set_writeable;
+ info->data = dev;
}
break;
@@ -1134,8 +1183,8 @@
res->start = spi_base & 0xfffffff0;
res->end = res->start + SPIBASE_APL_SZ - 1;
- pci_bus_read_config_dword(bus, spi, BCR, &bcr);
- info->writeable = !!(bcr & BCR_WPD);
+ info->set_writeable = lpc_ich_bxt_set_writeable;
+ info->data = bus;
}
pci_bus_write_config_byte(bus, p2sb, 0xe1, 0x1);
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index fec2096..a6661e0 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -419,9 +419,11 @@
ret = max77620_config_fps(chip, fps_child);
if (ret < 0) {
of_node_put(fps_child);
+ of_node_put(fps_np);
return ret;
}
}
+ of_node_put(fps_np);
config = chip->enable_global_lpm ? MAX77620_ONOFFCNFG2_SLP_LPM_MSK : 0;
ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG2,
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index 1abe743..e281a92 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -323,8 +323,10 @@
adc1 |= MC13783_ADC1_ATOX;
dev_dbg(mc13xxx->dev, "%s: request irq\n", __func__);
- mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_ADCDONE,
+ ret = mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_ADCDONE,
mc13xxx_handler_adcdone, __func__, &adcdone_data);
+ if (ret)
+ goto out;
mc13xxx_reg_write(mc13xxx, MC13XXX_ADC0, adc0);
mc13xxx_reg_write(mc13xxx, MC13XXX_ADC1, adc1);
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 6d2f4a0..37ad72d 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1720,7 +1720,12 @@
static int __init sm501_base_init(void)
{
- platform_driver_register(&sm501_plat_driver);
+ int ret;
+
+ ret = platform_driver_register(&sm501_plat_driver);
+ if (ret < 0)
+ return ret;
+
return pci_register_driver(&sm501_pci_driver);
}
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 70da0c4..58811c5 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -405,11 +405,8 @@
static int t7l66xb_remove(struct platform_device *dev)
{
- struct t7l66xb_platform_data *pdata = dev_get_platdata(&dev->dev);
struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
- int ret;
- ret = pdata->disable(dev);
clk_disable_unprepare(t7l66xb->clk48m);
clk_put(t7l66xb->clk48m);
clk_disable_unprepare(t7l66xb->clk32k);
@@ -420,8 +417,7 @@
mfd_remove_devices(&dev->dev);
kfree(t7l66xb);
- return ret;
-
+ return 0;
}
static struct platform_driver t7l66xb_platform_driver = {
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index d6cd553..69f9b03 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -232,9 +232,9 @@
clk_disable_unprepare(ssc->clk);
ssc->irq = platform_get_irq(pdev, 0);
- if (!ssc->irq) {
+ if (ssc->irq < 0) {
dev_dbg(&pdev->dev, "could not get irq\n");
- return -ENXIO;
+ return ssc->irq;
}
mutex_lock(&user_lock);
diff --git a/drivers/misc/cardreader/alcor_pci.c b/drivers/misc/cardreader/alcor_pci.c
index de6d44a..3f514d7 100644
--- a/drivers/misc/cardreader/alcor_pci.c
+++ b/drivers/misc/cardreader/alcor_pci.c
@@ -266,7 +266,7 @@
if (!priv)
return -ENOMEM;
- ret = ida_simple_get(&alcor_pci_idr, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&alcor_pci_idr, GFP_KERNEL);
if (ret < 0)
return ret;
priv->id = ret;
@@ -280,7 +280,8 @@
ret = pci_request_regions(pdev, DRV_NAME_ALCOR_PCI);
if (ret) {
dev_err(&pdev->dev, "Cannot request region\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto error_free_ida;
}
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
@@ -324,6 +325,8 @@
error_release_regions:
pci_release_regions(pdev);
+error_free_ida:
+ ida_free(&alcor_pci_idr, priv->id);
return ret;
}
@@ -337,7 +340,7 @@
mfd_remove_devices(&pdev->dev);
- ida_simple_remove(&alcor_pci_idr, priv->id);
+ ida_free(&alcor_pci_idr, priv->id);
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 5d15607..358b000 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -1529,7 +1529,7 @@
pcr->remap_addr = ioremap(base, len);
if (!pcr->remap_addr) {
ret = -ENOMEM;
- goto free_handle;
+ goto free_idr;
}
pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
@@ -1591,6 +1591,10 @@
pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
unmap:
iounmap(pcr->remap_addr);
+free_idr:
+ spin_lock(&rtsx_pci_lock);
+ idr_remove(&rtsx_pci_idr, pcr->id);
+ spin_unlock(&rtsx_pci_lock);
free_handle:
kfree(handle);
free_pcr:
diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
index 59eda55..f150d87 100644
--- a/drivers/misc/cardreader/rtsx_usb.c
+++ b/drivers/misc/cardreader/rtsx_usb.c
@@ -631,16 +631,20 @@
ucr->pusb_dev = usb_dev;
- ucr->iobuf = usb_alloc_coherent(ucr->pusb_dev, IOBUF_SIZE,
- GFP_KERNEL, &ucr->iobuf_dma);
- if (!ucr->iobuf)
+ ucr->cmd_buf = kmalloc(IOBUF_SIZE, GFP_KERNEL);
+ if (!ucr->cmd_buf)
return -ENOMEM;
+ ucr->rsp_buf = kmalloc(IOBUF_SIZE, GFP_KERNEL);
+ if (!ucr->rsp_buf) {
+ ret = -ENOMEM;
+ goto out_free_cmd_buf;
+ }
+
usb_set_intfdata(intf, ucr);
ucr->vendor_id = id->idVendor;
ucr->product_id = id->idProduct;
- ucr->cmd_buf = ucr->rsp_buf = ucr->iobuf;
mutex_init(&ucr->dev_mutex);
@@ -667,8 +671,12 @@
return 0;
out_init_fail:
- usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf,
- ucr->iobuf_dma);
+ usb_set_intfdata(ucr->pusb_intf, NULL);
+ kfree(ucr->rsp_buf);
+ ucr->rsp_buf = NULL;
+out_free_cmd_buf:
+ kfree(ucr->cmd_buf);
+ ucr->cmd_buf = NULL;
return ret;
}
@@ -681,8 +689,12 @@
mfd_remove_devices(&intf->dev);
usb_set_intfdata(ucr->pusb_intf, NULL);
- usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf,
- ucr->iobuf_dma);
+
+ kfree(ucr->cmd_buf);
+ ucr->cmd_buf = NULL;
+
+ kfree(ucr->rsp_buf);
+ ucr->rsp_buf = NULL;
}
#ifdef CONFIG_PM
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index 4cb829d..2e4dcfe 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -349,6 +349,7 @@
out:
cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
+ bitmap_free(ctx->irq_bitmap);
afu_irq_name_free(ctx);
return -ENOMEM;
}
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index 3e4a594..6a45664 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -940,14 +940,18 @@
u32 csraddr, csrval;
char *buf;
+ if (*offp)
+ return 0;
+
/* Copy data from User-space */
buf = kmalloc(count + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = simple_write_to_buffer(buf, count, offp, ubuf, count);
- if (ret < 0)
+ if (copy_from_user(buf, ubuf, count)) {
+ ret = -EFAULT;
goto free_buf;
+ }
buf[count] = 0;
/* Find position of colon in the buffer */
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index d0471fe..2c3142b 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -1349,17 +1349,18 @@
struct fastrpc_req_munmap *req)
{
struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
- struct fastrpc_buf *buf, *b;
+ struct fastrpc_buf *buf = NULL, *iter, *b;
struct fastrpc_munmap_req_msg req_msg;
struct device *dev = fl->sctx->dev;
int err;
u32 sc;
spin_lock(&fl->lock);
- list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
- if ((buf->raddr == req->vaddrout) && (buf->size == req->size))
+ list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
+ if ((iter->raddr == req->vaddrout) && (iter->size == req->size)) {
+ buf = iter;
break;
- buf = NULL;
+ }
}
spin_unlock(&fl->lock);
@@ -1547,7 +1548,12 @@
of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
spin_lock_irqsave(&cctx->lock, flags);
- sess = &cctx->session[cctx->sesscount];
+ if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) {
+ dev_err(&pdev->dev, "too many sessions\n");
+ spin_unlock_irqrestore(&cctx->lock, flags);
+ return -ENOSPC;
+ }
+ sess = &cctx->session[cctx->sesscount++];
sess->used = false;
sess->valid = true;
sess->dev = dev;
@@ -1560,13 +1566,12 @@
struct fastrpc_session_ctx *dup_sess;
for (i = 1; i < sessions; i++) {
- if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
+ if (cctx->sesscount >= FASTRPC_MAX_SESSIONS)
break;
- dup_sess = &cctx->session[cctx->sesscount];
+ dup_sess = &cctx->session[cctx->sesscount++];
memcpy(dup_sess, sess, sizeof(*dup_sess));
}
}
- cctx->sesscount++;
spin_unlock_irqrestore(&cctx->lock, flags);
rc = dma_set_mask(dev, DMA_BIT_MASK(32));
if (rc) {
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index 912ddfa..9716b07 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -859,6 +859,8 @@
pci_set_power_state(hdev->pdev, PCI_D0);
pci_restore_state(hdev->pdev);
rc = pci_enable_device(hdev->pdev);
+ if (rc < 0)
+ return rc;
} else if (value == 2) {
pci_save_state(hdev->pdev);
pci_disable_device(hdev->pdev);
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 4948915..3e4d894 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -1060,10 +1060,10 @@
{
if (strlen(opt) >= MAX_CONFIG_LEN) {
printk(KERN_ERR "kgdbts: config string too long\n");
- return -ENOSPC;
+ return 1;
}
strcpy(config, opt);
- return 0;
+ return 1;
}
__setup("kgdbts=", kgdbts_option_setup);
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index a337f97..d39b813 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -231,6 +231,11 @@
not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
+ if (!not_checked || !checked) {
+ kfree(not_checked);
+ kfree(checked);
+ return;
+ }
pr_info("Array access within bounds ...\n");
/* For both, touch all bytes in the actual member size. */
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index 109e8d4..cde2655 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -30,12 +30,12 @@
*/
static noinline unsigned char *trick_compiler(unsigned char *stack)
{
- return stack + 0;
+ return stack + unconst;
}
static noinline unsigned char *do_usercopy_stack_callee(int value)
{
- unsigned char buf[32];
+ unsigned char buf[128];
int i;
/* Exercise stack to avoid everything living in registers. */
@@ -43,7 +43,12 @@
buf[i] = value & 0xff;
}
- return trick_compiler(buf);
+ /*
+ * Put the target buffer in the middle of stack allocation
+ * so that we don't step on future stack users regardless
+ * of stack growth direction.
+ */
+ return trick_compiler(&buf[(128/2)-32]);
}
static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
@@ -66,6 +71,12 @@
bad_stack -= sizeof(unsigned long);
}
+#ifdef ARCH_HAS_CURRENT_STACK_POINTER
+ pr_info("stack : %px\n", (void *)current_stack_pointer);
+#endif
+ pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack));
+ pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack));
+
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 67bb6a2..afb2e78 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -107,6 +107,9 @@
#define MEI_DEV_ID_ADP_S 0x7AE8 /* Alder Lake Point S */
#define MEI_DEV_ID_ADP_LP 0x7A60 /* Alder Lake Point LP */
#define MEI_DEV_ID_ADP_P 0x51E0 /* Alder Lake Point P */
+#define MEI_DEV_ID_ADP_N 0x54E0 /* Alder Lake Point N */
+
+#define MEI_DEV_ID_RPL_S 0x7A68 /* Raptor Lake Point S */
/*
* MEI HW Section
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index fee6030..ca3067f 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -427,31 +427,26 @@
list_for_each_entry(cl, &dev->file_list, link) {
if (mei_cl_hbm_equal(cl, mei_hdr)) {
cl_dbg(dev, cl, "got a message\n");
- break;
+ ret = mei_cl_irq_read_msg(cl, mei_hdr, meta_hdr, cmpl_list);
+ goto reset_slots;
}
}
/* if no recipient cl was found we assume corrupted header */
- if (&cl->link == &dev->file_list) {
- /* A message for not connected fixed address clients
- * should be silently discarded
- * On power down client may be force cleaned,
- * silently discard such messages
- */
- if (hdr_is_fixed(mei_hdr) ||
- dev->dev_state == MEI_DEV_POWER_DOWN) {
- mei_irq_discard_msg(dev, mei_hdr, mei_hdr->length);
- ret = 0;
- goto reset_slots;
- }
- dev_err(dev->dev, "no destination client found 0x%08X\n",
- dev->rd_msg_hdr[0]);
- ret = -EBADMSG;
- goto end;
+ /* A message for not connected fixed address clients
+ * should be silently discarded
+ * On power down client may be force cleaned,
+ * silently discard such messages
+ */
+ if (hdr_is_fixed(mei_hdr) ||
+ dev->dev_state == MEI_DEV_POWER_DOWN) {
+ mei_irq_discard_msg(dev, mei_hdr, mei_hdr->length);
+ ret = 0;
+ goto reset_slots;
}
-
- ret = mei_cl_irq_read_msg(cl, mei_hdr, meta_hdr, cmpl_list);
-
+ dev_err(dev->dev, "no destination client found 0x%08X\n", dev->rd_msg_hdr[0]);
+ ret = -EBADMSG;
+ goto end;
reset_slots:
/* reset the number of slots and header */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 3a45aaf..5324b65 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -113,6 +113,9 @@
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
+
+ {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
/* required last entry */
{0, }
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index 4d1b44d..e094809 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -259,6 +259,8 @@
if (IS_ERR(ev_ctx))
return PTR_ERR(ev_ctx);
rc = ocxl_irq_set_handler(ctx, irq_id, irq_handler, irq_free, ev_ctx);
+ if (rc)
+ eventfd_ctx_put(ev_ctx);
break;
case OCXL_IOCTL_GET_METADATA:
@@ -558,7 +560,9 @@
err_unregister:
ocxl_sysfs_unregister_afu(info); // safe to call even if register failed
+ free_minor(info);
device_unregister(&info->dev);
+ return rc;
err_put:
ocxl_afu_put(afu);
free_minor(info);
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index d384473..48eec5f 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -331,6 +331,22 @@
return false;
}
+static int pci_endpoint_test_validate_xfer_params(struct device *dev,
+ struct pci_endpoint_test_xfer_param *param, size_t alignment)
+{
+ if (!param->size) {
+ dev_dbg(dev, "Data size is zero\n");
+ return -EINVAL;
+ }
+
+ if (param->size > SIZE_MAX - alignment) {
+ dev_dbg(dev, "Maximum transfer data size exceeded\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
unsigned long arg)
{
@@ -362,9 +378,11 @@
return false;
}
+ err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
+ if (err)
+ return false;
+
size = param.size;
- if (size > SIZE_MAX - alignment)
- goto err;
use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
if (use_dma)
@@ -496,9 +514,11 @@
return false;
}
+ err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
+ if (err)
+ return false;
+
size = param.size;
- if (size > SIZE_MAX - alignment)
- goto err;
use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
if (use_dma)
@@ -594,9 +614,11 @@
return false;
}
+ err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
+ if (err)
+ return false;
+
size = param.size;
- if (size > SIZE_MAX - alignment)
- goto err;
use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
if (use_dma)
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index 56dd98a..95e56eb 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -9,43 +9,38 @@
static struct class *uacce_class;
static dev_t uacce_devt;
-static DEFINE_MUTEX(uacce_mutex);
static DEFINE_XARRAY_ALLOC(uacce_xa);
+/*
+ * If the parent driver or the device disappears, the queue state is invalid and
+ * ops are not usable anymore.
+ */
+static bool uacce_queue_is_valid(struct uacce_queue *q)
+{
+ return q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED;
+}
+
static int uacce_start_queue(struct uacce_queue *q)
{
- int ret = 0;
+ int ret;
- mutex_lock(&uacce_mutex);
-
- if (q->state != UACCE_Q_INIT) {
- ret = -EINVAL;
- goto out_with_lock;
- }
+ if (q->state != UACCE_Q_INIT)
+ return -EINVAL;
if (q->uacce->ops->start_queue) {
ret = q->uacce->ops->start_queue(q);
if (ret < 0)
- goto out_with_lock;
+ return ret;
}
q->state = UACCE_Q_STARTED;
-
-out_with_lock:
- mutex_unlock(&uacce_mutex);
-
- return ret;
+ return 0;
}
static int uacce_put_queue(struct uacce_queue *q)
{
struct uacce_device *uacce = q->uacce;
- mutex_lock(&uacce_mutex);
-
- if (q->state == UACCE_Q_ZOMBIE)
- goto out;
-
if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue)
uacce->ops->stop_queue(q);
@@ -54,8 +49,6 @@
uacce->ops->put_queue(q);
q->state = UACCE_Q_ZOMBIE;
-out:
- mutex_unlock(&uacce_mutex);
return 0;
}
@@ -65,20 +58,36 @@
{
struct uacce_queue *q = filep->private_data;
struct uacce_device *uacce = q->uacce;
+ long ret = -ENXIO;
+
+ /*
+ * uacce->ops->ioctl() may take the mmap_lock when copying arg to/from
+ * user. Avoid a circular lock dependency with uacce_fops_mmap(), which
+ * gets called with mmap_lock held, by taking uacce->mutex instead of
+ * q->mutex. Doing this in uacce_fops_mmap() is not possible because
+ * uacce_fops_open() calls iommu_sva_bind_device(), which takes
+ * mmap_lock, while holding uacce->mutex.
+ */
+ mutex_lock(&uacce->mutex);
+ if (!uacce_queue_is_valid(q))
+ goto out_unlock;
switch (cmd) {
case UACCE_CMD_START_Q:
- return uacce_start_queue(q);
-
+ ret = uacce_start_queue(q);
+ break;
case UACCE_CMD_PUT_Q:
- return uacce_put_queue(q);
-
+ ret = uacce_put_queue(q);
+ break;
default:
- if (!uacce->ops->ioctl)
- return -EINVAL;
-
- return uacce->ops->ioctl(q, cmd, arg);
+ if (uacce->ops->ioctl)
+ ret = uacce->ops->ioctl(q, cmd, arg);
+ else
+ ret = -EINVAL;
}
+out_unlock:
+ mutex_unlock(&uacce->mutex);
+ return ret;
}
#ifdef CONFIG_COMPAT
@@ -136,6 +145,13 @@
if (!q)
return -ENOMEM;
+ mutex_lock(&uacce->mutex);
+
+ if (!uacce->parent) {
+ ret = -EINVAL;
+ goto out_with_mem;
+ }
+
ret = uacce_bind_queue(uacce, q);
if (ret)
goto out_with_mem;
@@ -152,10 +168,9 @@
filep->private_data = q;
uacce->inode = inode;
q->state = UACCE_Q_INIT;
-
- mutex_lock(&uacce->queues_lock);
+ mutex_init(&q->mutex);
list_add(&q->list, &uacce->queues);
- mutex_unlock(&uacce->queues_lock);
+ mutex_unlock(&uacce->mutex);
return 0;
@@ -163,18 +178,20 @@
uacce_unbind_queue(q);
out_with_mem:
kfree(q);
+ mutex_unlock(&uacce->mutex);
return ret;
}
static int uacce_fops_release(struct inode *inode, struct file *filep)
{
struct uacce_queue *q = filep->private_data;
+ struct uacce_device *uacce = q->uacce;
- mutex_lock(&q->uacce->queues_lock);
- list_del(&q->list);
- mutex_unlock(&q->uacce->queues_lock);
+ mutex_lock(&uacce->mutex);
uacce_put_queue(q);
uacce_unbind_queue(q);
+ list_del(&q->list);
+ mutex_unlock(&uacce->mutex);
kfree(q);
return 0;
@@ -217,10 +234,9 @@
vma->vm_private_data = q;
qfr->type = type;
- mutex_lock(&uacce_mutex);
-
- if (q->state != UACCE_Q_INIT && q->state != UACCE_Q_STARTED) {
- ret = -EINVAL;
+ mutex_lock(&q->mutex);
+ if (!uacce_queue_is_valid(q)) {
+ ret = -ENXIO;
goto out_with_lock;
}
@@ -259,12 +275,12 @@
}
q->qfrs[type] = qfr;
- mutex_unlock(&uacce_mutex);
+ mutex_unlock(&q->mutex);
return ret;
out_with_lock:
- mutex_unlock(&uacce_mutex);
+ mutex_unlock(&q->mutex);
kfree(qfr);
return ret;
}
@@ -273,12 +289,20 @@
{
struct uacce_queue *q = file->private_data;
struct uacce_device *uacce = q->uacce;
+ __poll_t ret = 0;
+
+ mutex_lock(&q->mutex);
+ if (!uacce_queue_is_valid(q))
+ goto out_unlock;
poll_wait(file, &q->wait, wait);
- if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
- return EPOLLIN | EPOLLRDNORM;
- return 0;
+ if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
+ ret = EPOLLIN | EPOLLRDNORM;
+
+out_unlock:
+ mutex_unlock(&q->mutex);
+ return ret;
}
static const struct file_operations uacce_fops = {
@@ -431,7 +455,7 @@
goto err_with_uacce;
INIT_LIST_HEAD(&uacce->queues);
- mutex_init(&uacce->queues_lock);
+ mutex_init(&uacce->mutex);
device_initialize(&uacce->dev);
uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
uacce->dev.class = uacce_class;
@@ -489,13 +513,23 @@
if (uacce->inode)
unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
+ /*
+ * uacce_fops_open() may be running concurrently, even after we remove
+ * the cdev. Holding uacce->mutex ensures that open() does not obtain a
+ * removed uacce device.
+ */
+ mutex_lock(&uacce->mutex);
/* ensure no open queue remains */
- mutex_lock(&uacce->queues_lock);
list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
+ /*
+ * Taking q->mutex ensures that fops do not use the defunct
+ * uacce->ops after the queue is disabled.
+ */
+ mutex_lock(&q->mutex);
uacce_put_queue(q);
+ mutex_unlock(&q->mutex);
uacce_unbind_queue(q);
}
- mutex_unlock(&uacce->queues_lock);
/* disable sva now since no opened queues */
if (uacce->flags & UACCE_DEV_SVA)
@@ -504,6 +538,13 @@
if (uacce->cdev)
cdev_device_del(uacce->cdev, &uacce->dev);
xa_erase(&uacce_xa, uacce->dev_id);
+ /*
+ * uacce exists as long as there are open fds, but ops will be freed
+ * now. Ensure that bugs cause NULL deref rather than use-after-free.
+ */
+ uacce->ops = NULL;
+ uacce->parent = NULL;
+ mutex_unlock(&uacce->mutex);
put_device(&uacce->dev);
}
EXPORT_SYMBOL_GPL(uacce_remove);
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index a49782d..d4d388f 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -852,6 +852,7 @@
u32 context_id = vmci_get_context_id();
struct vmci_event_qp ev;
+ memset(&ev, 0, sizeof(ev));
ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_CONTEXT_RESOURCE_ID);
@@ -1465,6 +1466,7 @@
* kernel.
*/
+ memset(&ev, 0, sizeof(ev));
ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_CONTEXT_RESOURCE_ID);
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 99b981a..6622e32 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -169,7 +169,7 @@
unsigned int part_type);
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
- int disable_multi,
+ int recovery_mode,
struct mmc_queue *mq);
static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
@@ -1069,6 +1069,11 @@
nr = blk_rq_sectors(req);
do {
+ unsigned int erase_arg = card->erase_arg;
+
+ if (mmc_card_broken_sd_discard(card))
+ erase_arg = SD_ERASE_ARG;
+
err = 0;
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1079,7 +1084,7 @@
card->ext_csd.generic_cmd6_time);
}
if (!err)
- err = mmc_erase(card, from, nr, card->erase_arg);
+ err = mmc_erase(card, from, nr, erase_arg);
} while (err == -EIO && !mmc_blk_reset(md, card->host, type));
if (err)
status = BLK_STS_IOERR;
@@ -1247,7 +1252,7 @@
}
static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
- int disable_multi, bool *do_rel_wr_p,
+ int recovery_mode, bool *do_rel_wr_p,
bool *do_data_tag_p)
{
struct mmc_blk_data *md = mq->blkdata;
@@ -1311,12 +1316,12 @@
brq->data.blocks--;
/*
- * After a read error, we redo the request one sector
+ * After a read error, we redo the request one (native) sector
* at a time in order to accurately determine which
* sectors can be read successfully.
*/
- if (disable_multi)
- brq->data.blocks = 1;
+ if (recovery_mode)
+ brq->data.blocks = queue_physical_block_size(mq->queue) >> 9;
/*
* Some controllers have HW issues while operating
@@ -1442,8 +1447,7 @@
err = mmc_cqe_recovery(host);
if (err)
mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
- else
- mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
+ mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
}
@@ -1534,7 +1538,7 @@
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
- int disable_multi,
+ int recovery_mode,
struct mmc_queue *mq)
{
u32 readcmd, writecmd;
@@ -1543,7 +1547,7 @@
struct mmc_blk_data *md = mq->blkdata;
bool do_rel_wr, do_data_tag;
- mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag);
+ mmc_blk_data_prep(mq, mqrq, recovery_mode, &do_rel_wr, &do_data_tag);
brq->mrq.cmd = &brq->cmd;
@@ -1634,7 +1638,7 @@
#define MMC_READ_SINGLE_RETRIES 2
-/* Single sector read during recovery */
+/* Single (native) sector read during recovery */
static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
@@ -1642,6 +1646,7 @@
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
blk_status_t error = BLK_STS_OK;
+ size_t bytes_per_read = queue_physical_block_size(mq->queue);
do {
u32 status;
@@ -1676,13 +1681,13 @@
else
error = BLK_STS_OK;
- } while (blk_update_request(req, error, 512));
+ } while (blk_update_request(req, error, bytes_per_read));
return;
error_exit:
mrq->data->bytes_xfered = 0;
- blk_update_request(req, BLK_STS_IOERR, 512);
+ blk_update_request(req, BLK_STS_IOERR, bytes_per_read);
/* Let it try the remaining request again */
if (mqrq->retries > MMC_MAX_RETRIES - 1)
mqrq->retries = MMC_MAX_RETRIES - 1;
@@ -1823,10 +1828,9 @@
return;
}
- /* FIXME: Missing single sector read for large sector size */
- if (!mmc_large_sector(card) && rq_data_dir(req) == READ &&
- brq->data.blocks > 1) {
- /* Read one sector at a time */
+ if (rq_data_dir(req) == READ && brq->data.blocks >
+ queue_physical_block_size(mq->queue) >> 9) {
+ /* Read one (native) sector at a time */
mmc_blk_read_single(mq, req);
return;
}
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
index 7bd392d..5c69861 100644
--- a/drivers/mmc/core/card.h
+++ b/drivers/mmc/core/card.h
@@ -70,6 +70,7 @@
#define EXT_CSD_REV_ANY (-1u)
#define CID_MANFID_SANDISK 0x2
+#define CID_MANFID_SANDISK_SD 0x3
#define CID_MANFID_ATP 0x9
#define CID_MANFID_TOSHIBA 0x11
#define CID_MANFID_MICRON 0x13
@@ -222,4 +223,9 @@
return c->quirks & MMC_QUIRK_BROKEN_HPI;
}
+static inline int mmc_card_broken_sd_discard(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_SD_DISCARD;
+}
+
#endif
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index eb82f6a..7d9ec91 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1128,7 +1128,13 @@
mmc_power_cycle(host, ocr);
} else {
bit = fls(ocr) - 1;
- ocr &= 3 << bit;
+ /*
+ * The bit variable represents the highest voltage bit set in
+ * the OCR register.
+ * To keep a range of 2 values (e.g. 3.2V/3.3V and 3.3V/3.4V),
+ * we must shift the mask '3' with (bit - 1).
+ */
+ ocr &= 3 << (bit - 1);
if (bit != host->ios.vdd)
dev_warn(mmc_dev(host), "exceeding card's volts\n");
}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 864c8c2..03e2f96 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -513,6 +513,16 @@
EXPORT_SYMBOL(mmc_alloc_host);
+static int mmc_validate_host_caps(struct mmc_host *host)
+{
+ if (host->caps & MMC_CAP_SDIO_IRQ && !host->ops->enable_sdio_irq) {
+ dev_warn(host->parent, "missing ->enable_sdio_irq() ops\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/**
* mmc_add_host - initialise host hardware
* @host: mmc host
@@ -525,8 +535,9 @@
{
int err;
- WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&
- !host->ops->enable_sdio_irq);
+ err = mmc_validate_host_caps(host);
+ if (err)
+ return err;
err = device_add(&host->class_dev);
if (err)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 7494d59..87807ef 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1378,13 +1378,17 @@
goto out_err;
}
+ /*
+ * Bump to HS timing and frequency. Some cards don't handle
+ * SEND_STATUS reliably at the initial frequency.
+ */
mmc_set_timing(host, MMC_TIMING_MMC_HS);
+ mmc_set_bus_speed(card);
+
err = mmc_switch_status(card, true);
if (err)
goto out_err;
- mmc_set_clock(host, card->ext_csd.hs_max_dtr);
-
/* Switch card to DDR with strobe bit */
val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1442,7 +1446,7 @@
static int mmc_select_hs200(struct mmc_card *card)
{
struct mmc_host *host = card->host;
- unsigned int old_timing, old_signal_voltage;
+ unsigned int old_timing, old_signal_voltage, old_clock;
int err = -EINVAL;
u8 val;
@@ -1473,8 +1477,17 @@
false, true);
if (err)
goto err;
+
+ /*
+ * Bump to HS timing and frequency. Some cards don't handle
+ * SEND_STATUS reliably at the initial frequency.
+ * NB: We can't move to full (HS200) speeds until after we've
+ * successfully switched over.
+ */
old_timing = host->ios.timing;
+ old_clock = host->ios.clock;
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
+ mmc_set_clock(card->host, card->ext_csd.hs_max_dtr);
/*
* For HS200, CRC errors are not a reliable way to know the
@@ -1487,8 +1500,10 @@
* mmc_select_timing() assumes timing has not changed if
* it is a switch error.
*/
- if (err == -EBADMSG)
+ if (err == -EBADMSG) {
+ mmc_set_clock(host, old_clock);
mmc_set_timing(host, old_timing);
+ }
}
err:
if (err) {
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index d68e6e5..c8c0f50 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -99,6 +99,12 @@
MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_TRIM_BROKEN),
+ /*
+ * Some SD cards reports discard support while they don't
+ */
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd,
+ MMC_QUIRK_BROKEN_SD_DISCARD),
+
END_FIXUP
};
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index bac343a..868b121 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -853,7 +853,8 @@
* the CCS bit is set as well. We deliberately deviate from the spec in
* regards to this, which allows UHS-I to be supported for SDSC cards.
*/
- if (!mmc_host_is_spi(host) && rocr && (*rocr & 0x01000000)) {
+ if (!mmc_host_is_spi(host) && (ocr & SD_OCR_S18R) &&
+ rocr && (*rocr & SD_ROCR_S18A)) {
err = mmc_set_uhs_voltage(host, pocr);
if (err == -EAGAIN) {
retries--;
@@ -932,16 +933,17 @@
/* Erase init depends on CSD and SSR */
mmc_init_erase(card);
-
- /*
- * Fetch switch information from card.
- */
- err = mmc_read_switch(card);
- if (err)
- return err;
}
/*
+ * Fetch switch information from card. Note, sd3_bus_mode can change if
+ * voltage switch outcome changes, so do this always.
+ */
+ err = mmc_read_switch(card);
+ if (err)
+ return err;
+
+ /*
* For SPI, enable CRC as appropriate.
* This CRC enable is located AFTER the reading of the
* card registers because some SDHC cards are not able
@@ -1089,26 +1091,15 @@
if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) &&
mmc_sd_card_using_v18(card) &&
host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
- /*
- * Re-read switch information in case it has changed since
- * oldcard was initialized.
- */
- if (oldcard) {
- err = mmc_read_switch(card);
- if (err)
- goto free_card;
+ if (mmc_host_set_uhs_voltage(host) ||
+ mmc_sd_init_uhs_card(card)) {
+ v18_fixup_failed = true;
+ mmc_power_cycle(host, ocr);
+ if (!oldcard)
+ mmc_remove_card(card);
+ goto retry;
}
- if (mmc_sd_card_using_v18(card)) {
- if (mmc_host_set_uhs_voltage(host) ||
- mmc_sd_init_uhs_card(card)) {
- v18_fixup_failed = true;
- mmc_power_cycle(host, ocr);
- if (!oldcard)
- mmc_remove_card(card);
- goto retry;
- }
- goto done;
- }
+ goto cont;
}
/* Initialization sequence for UHS-I cards */
@@ -1143,7 +1134,7 @@
mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
}
}
-
+cont:
if (host->cqe_ops && !host->cqe_enabled) {
err = host->cqe_ops->cqe_enable(host, card);
if (!err) {
@@ -1161,7 +1152,7 @@
err = -EINVAL;
goto free_card;
}
-done:
+
host->card = card;
return 0;
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 3d70902..a448535 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -292,7 +292,8 @@
{
struct sdio_func *func = dev_to_sdio_func(dev);
- sdio_free_func_cis(func);
+ if (!(func->card->quirks & MMC_QUIRK_NONSTD_SDIO))
+ sdio_free_func_cis(func);
kfree(func->info);
kfree(func->tmpbuf);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 30ff42f..82e1fbd 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -1079,9 +1079,10 @@
config MMC_SDHCI_AM654
tristate "Support for the SDHCI Controller in TI's AM654 SOCs"
- depends on MMC_SDHCI_PLTFM && OF && REGMAP_MMIO
+ depends on MMC_SDHCI_PLTFM && OF
select MMC_SDHCI_IO_ACCESSORS
select MMC_CQHCI
+ select REGMAP_MMIO
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in TI's AM654 SOCs. The controller supports
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index bd00515..56a3bf5 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1097,8 +1097,9 @@
if (host->platdata && host->platdata->cd_setup &&
!(mmc->caps & MMC_CAP_NEEDS_POLL))
host->platdata->cd_setup(mmc, 0);
-out_clk:
+
clk_disable_unprepare(host->clk);
+out_clk:
clk_put(host->clk);
out_irq:
free_irq(host->irq, host);
diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c
index 2c4b2df..12dca91 100644
--- a/drivers/mmc/host/cavium-octeon.c
+++ b/drivers/mmc/host/cavium-octeon.c
@@ -277,6 +277,7 @@
if (ret) {
dev_err(&pdev->dev, "Error populating slots\n");
octeon_mmc_set_shared_power(host, 0);
+ of_node_put(cn);
goto error;
}
i++;
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c
index 76013bb..202b1d6 100644
--- a/drivers/mmc/host/cavium-thunderx.c
+++ b/drivers/mmc/host/cavium-thunderx.c
@@ -142,8 +142,10 @@
continue;
ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
- if (ret)
+ if (ret) {
+ of_node_put(child_node);
goto error;
+ }
}
i++;
}
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 90cd179..647928a 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1375,8 +1375,12 @@
static int davinci_mmcsd_resume(struct device *dev)
{
struct mmc_davinci_host *host = dev_get_drvdata(dev);
+ int ret;
- clk_enable(host->clk);
+ ret = clk_enable(host->clk);
+ if (ret)
+ return ret;
+
mmc_davinci_reset_ctrl(host, 0);
return 0;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index a1f92fe..aa3dfb9 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -236,6 +236,26 @@
return PTR_ERR(host->dma_rx);
}
+ /*
+ * Limit the maximum segment size in any SG entry according to
+ * the parameters of the DMA engine device.
+ */
+ if (host->dma_tx) {
+ struct device *dev = host->dma_tx->device->dev;
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
+
+ if (max_seg_size < host->mmc->max_seg_size)
+ host->mmc->max_seg_size = max_seg_size;
+ }
+
+ if (host->dma_rx) {
+ struct device *dev = host->dma_rx->device->dev;
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
+
+ if (max_seg_size < host->mmc->max_seg_size)
+ host->mmc->max_seg_size = max_seg_size;
+ }
+
return 0;
}
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 091e0e0..bccc85b 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -1161,8 +1161,10 @@
}
ret = device_reset_optional(&pdev->dev);
- if (ret)
- return dev_err_probe(&pdev->dev, ret, "device reset failed\n");
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "device reset failed\n");
+ goto free_host;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/mmc/host/mmc_hsq.c b/drivers/mmc/host/mmc_hsq.c
index a5e05ed..9d35453 100644
--- a/drivers/mmc/host/mmc_hsq.c
+++ b/drivers/mmc/host/mmc_hsq.c
@@ -34,7 +34,7 @@
spin_lock_irqsave(&hsq->lock, flags);
/* Make sure we are not already running a request now */
- if (hsq->mrq) {
+ if (hsq->mrq || hsq->recovery_halt) {
spin_unlock_irqrestore(&hsq->lock, flags);
return;
}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 9bde0de..b5684e5 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -2203,7 +2203,7 @@
return ret;
}
-static int mmci_remove(struct amba_device *dev)
+static void mmci_remove(struct amba_device *dev)
{
struct mmc_host *mmc = amba_get_drvdata(dev);
@@ -2231,8 +2231,6 @@
clk_disable_unprepare(host->clk);
mmc_free_host(mmc);
}
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index a75d3dd..4cceb9b 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -62,8 +62,8 @@
* excepted the last element which has no constraint on idmasize
*/
for_each_sg(data->sg, sg, data->sg_len - 1, i) {
- if (!IS_ALIGNED(data->sg->offset, sizeof(u32)) ||
- !IS_ALIGNED(data->sg->length, SDMMC_IDMA_BURST)) {
+ if (!IS_ALIGNED(sg->offset, sizeof(u32)) ||
+ !IS_ALIGNED(sg->length, SDMMC_IDMA_BURST)) {
dev_err(mmc_dev(host->mmc),
"unaligned scatterlist: ofst:%x length:%d\n",
data->sg->offset, data->sg->length);
@@ -71,7 +71,7 @@
}
}
- if (!IS_ALIGNED(data->sg->offset, sizeof(u32))) {
+ if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
dev_err(mmc_dev(host->mmc),
"unaligned last scatterlist: ofst:%x length:%d\n",
data->sg->offset, data->sg->length);
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index ea67a7e..c16300b 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -111,8 +111,8 @@
#define CLK_DIV_MASK 0x7f
/* REG_BUS_WIDTH */
-#define BUS_WIDTH_8 BIT(2)
-#define BUS_WIDTH_4 BIT(1)
+#define BUS_WIDTH_4_SUPPORT BIT(3)
+#define BUS_WIDTH_4 BIT(2)
#define BUS_WIDTH_1 BIT(0)
#define MMC_VDD_360 23
@@ -527,9 +527,6 @@
case MMC_BUS_WIDTH_4:
writel(BUS_WIDTH_4, host->base + REG_BUS_WIDTH);
break;
- case MMC_BUS_WIDTH_8:
- writel(BUS_WIDTH_8, host->base + REG_BUS_WIDTH);
- break;
default:
writel(BUS_WIDTH_1, host->base + REG_BUS_WIDTH);
break;
@@ -654,16 +651,8 @@
dmaengine_slave_config(host->dma_chan_rx, &cfg);
}
- switch ((readl(host->base + REG_BUS_WIDTH) >> 3) & 3) {
- case 1:
+ if (readl(host->base + REG_BUS_WIDTH) & BUS_WIDTH_4_SUPPORT)
mmc->caps |= MMC_CAP_4_BIT_DATA;
- break;
- case 2:
- mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
- break;
- default:
- break;
- }
writel(0, host->base + REG_INTERRUPT_MASK);
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index f5c965d..d71c113 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -2293,6 +2293,9 @@
/* disable busy check */
sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
+ val = readl(host->base + MSDC_INT);
+ writel(val, host->base + MSDC_INT);
+
if (recovery) {
sdr_set_field(host->base + MSDC_DMA_CTRL,
MSDC_DMA_CTRL_STOP, 1);
@@ -2693,11 +2696,14 @@
{
struct mmc_host *mmc = dev_get_drvdata(dev);
int ret;
+ u32 val;
if (mmc->caps2 & MMC_CAP2_CQE) {
ret = cqhci_suspend(mmc);
if (ret)
return ret;
+ val = readl(((struct msdc_host *)mmc_priv(mmc))->base + MSDC_INT);
+ writel(val, ((struct msdc_host *)mmc_priv(mmc))->base + MSDC_INT);
}
return pm_runtime_force_suspend(dev);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 316393c..55868b6 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -648,7 +648,7 @@
ret = pxamci_of_init(pdev, mmc);
if (ret)
- return ret;
+ goto out;
host = mmc_priv(mmc);
host->mmc = mmc;
@@ -672,7 +672,7 @@
ret = pxamci_init_ocr(host);
if (ret < 0)
- return ret;
+ goto out;
mmc->caps = 0;
host->cmdat = 0;
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 782879d..ac01fb5 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -390,10 +390,10 @@
SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) |
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2));
- /* Set the sampling clock selection range of HS400 mode */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN |
- 0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
+ sd_scc_read32(host, priv,
+ SH_MOBILE_SDHI_SCC_DTCNTL));
/* Avoid bad TAP */
if (bad_taps & BIT(priv->tap_set)) {
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index e00167b..b5cb83b 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -37,10 +37,7 @@
bool double_clk;
bool eject;
bool initial_mode;
- int power_state;
-#define SDMMC_POWER_ON 1
-#define SDMMC_POWER_OFF 0
-
+ int prev_power_state;
int sg_count;
s32 cookie;
int cookie_sg_count;
@@ -902,14 +899,21 @@
return err;
}
-static int sd_power_on(struct realtek_pci_sdmmc *host)
+static int sd_power_on(struct realtek_pci_sdmmc *host, unsigned char power_mode)
{
struct rtsx_pcr *pcr = host->pcr;
int err;
- if (host->power_state == SDMMC_POWER_ON)
+ if (host->prev_power_state == MMC_POWER_ON)
return 0;
+ if (host->prev_power_state == MMC_POWER_UP) {
+ rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, 0);
+ goto finish;
+ }
+
+ msleep(100);
+
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SELECT, 0x07, SD_MOD_SEL);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SHARE_MODE,
@@ -928,11 +932,17 @@
if (err < 0)
return err;
+ mdelay(1);
+
err = rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
if (err < 0)
return err;
- host->power_state = SDMMC_POWER_ON;
+ /* send at least 74 clocks */
+ rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, SD_CLK_TOGGLE_EN);
+
+finish:
+ host->prev_power_state = power_mode;
return 0;
}
@@ -941,7 +951,7 @@
struct rtsx_pcr *pcr = host->pcr;
int err;
- host->power_state = SDMMC_POWER_OFF;
+ host->prev_power_state = MMC_POWER_OFF;
rtsx_pci_init_cmd(pcr);
@@ -967,7 +977,7 @@
if (power_mode == MMC_POWER_OFF)
err = sd_power_off(host);
else
- err = sd_power_on(host);
+ err = sd_power_on(host, power_mode);
return err;
}
@@ -1404,10 +1414,11 @@
host = mmc_priv(mmc);
host->pcr = pcr;
+ mmc->ios.power_delay_ms = 5;
host->mmc = mmc;
host->pdev = pdev;
host->cookie = -1;
- host->power_state = SDMMC_POWER_OFF;
+ host->prev_power_state = MMC_POWER_OFF;
INIT_WORK(&host->work, sd_request);
platform_set_drvdata(pdev, host);
pcr->slots[RTSX_SD_CARD].p_dev = pdev;
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index f24623a..4d42b18 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -12,28 +12,55 @@
#include <linux/bitops.h>
#include <linux/delay.h>
+#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h"
#include "cqhci.h"
#define SDHCI_VENDOR 0x78
#define SDHCI_VENDOR_ENHANCED_STRB 0x1
+#define SDHCI_VENDOR_GATE_SDCLK_EN 0x2
-#define BRCMSTB_PRIV_FLAGS_NO_64BIT BIT(0)
-#define BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT BIT(1)
+#define BRCMSTB_MATCH_FLAGS_NO_64BIT BIT(0)
+#define BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT BIT(1)
+#define BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE BIT(2)
+
+#define BRCMSTB_PRIV_FLAGS_HAS_CQE BIT(0)
+#define BRCMSTB_PRIV_FLAGS_GATE_CLOCK BIT(1)
#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200
struct sdhci_brcmstb_priv {
void __iomem *cfg_regs;
- bool has_cqe;
+ unsigned int flags;
};
struct brcmstb_match_priv {
void (*hs400es)(struct mmc_host *mmc, struct mmc_ios *ios);
struct sdhci_ops *ops;
- unsigned int flags;
+ const unsigned int flags;
};
+static inline void enable_clock_gating(struct sdhci_host *host)
+{
+ u32 reg;
+
+ reg = sdhci_readl(host, SDHCI_VENDOR);
+ reg |= SDHCI_VENDOR_GATE_SDCLK_EN;
+ sdhci_writel(host, reg, SDHCI_VENDOR);
+}
+
+void brcmstb_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
+
+ sdhci_and_cqhci_reset(host, mask);
+
+ /* Reset will clear this, so re-enable it */
+ if (priv->flags & BRCMSTB_PRIV_FLAGS_GATE_CLOCK)
+ enable_clock_gating(host);
+}
+
static void sdhci_brcmstb_hs400es(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
@@ -129,22 +156,23 @@
static struct sdhci_ops sdhci_brcmstb_ops_7216 = {
.set_clock = sdhci_brcmstb_set_clock,
.set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
+ .reset = brcmstb_reset,
.set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling,
};
static struct brcmstb_match_priv match_priv_7425 = {
- .flags = BRCMSTB_PRIV_FLAGS_NO_64BIT |
- BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT,
+ .flags = BRCMSTB_MATCH_FLAGS_NO_64BIT |
+ BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT,
.ops = &sdhci_brcmstb_ops,
};
static struct brcmstb_match_priv match_priv_7445 = {
- .flags = BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT,
+ .flags = BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT,
.ops = &sdhci_brcmstb_ops,
};
static const struct brcmstb_match_priv match_priv_7216 = {
+ .flags = BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE,
.hs400es = sdhci_brcmstb_hs400es,
.ops = &sdhci_brcmstb_ops_7216,
};
@@ -176,7 +204,7 @@
bool dma64;
int ret;
- if (!priv->has_cqe)
+ if ((priv->flags & BRCMSTB_PRIV_FLAGS_HAS_CQE) == 0)
return sdhci_add_host(host);
dev_dbg(mmc_dev(host->mmc), "CQE is enabled\n");
@@ -225,7 +253,6 @@
struct sdhci_brcmstb_priv *priv;
struct sdhci_host *host;
struct resource *iomem;
- bool has_cqe = false;
struct clk *clk;
int res;
@@ -244,10 +271,6 @@
return res;
memset(&brcmstb_pdata, 0, sizeof(brcmstb_pdata));
- if (device_property_read_bool(&pdev->dev, "supports-cqe")) {
- has_cqe = true;
- match_priv->ops->irq = sdhci_brcmstb_cqhci_irq;
- }
brcmstb_pdata.ops = match_priv->ops;
host = sdhci_pltfm_init(pdev, &brcmstb_pdata,
sizeof(struct sdhci_brcmstb_priv));
@@ -258,7 +281,10 @@
pltfm_host = sdhci_priv(host);
priv = sdhci_pltfm_priv(pltfm_host);
- priv->has_cqe = has_cqe;
+ if (device_property_read_bool(&pdev->dev, "supports-cqe")) {
+ priv->flags |= BRCMSTB_PRIV_FLAGS_HAS_CQE;
+ match_priv->ops->irq = sdhci_brcmstb_cqhci_irq;
+ }
/* Map in the non-standard CFG registers */
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -274,6 +300,14 @@
goto err;
/*
+ * Automatic clock gating does not work for SD cards that may
+ * voltage switch so only enable it for non-removable devices.
+ */
+ if ((match_priv->flags & BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE) &&
+ (host->mmc->caps & MMC_CAP_NONREMOVABLE))
+ priv->flags |= BRCMSTB_PRIV_FLAGS_GATE_CLOCK;
+
+ /*
* If the chip has enhanced strobe and it's enabled, add
* callback
*/
@@ -287,14 +321,14 @@
* properties through mmc_of_parse().
*/
host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
- if (match_priv->flags & BRCMSTB_PRIV_FLAGS_NO_64BIT)
+ if (match_priv->flags & BRCMSTB_MATCH_FLAGS_NO_64BIT)
host->caps &= ~SDHCI_CAN_64BIT;
host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
SDHCI_SUPPORT_DDR50);
host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
- if (match_priv->flags & BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT)
+ if (match_priv->flags & BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT)
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
res = sdhci_brcmstb_add_host(host, priv);
diff --git a/drivers/mmc/host/sdhci-cqhci.h b/drivers/mmc/host/sdhci-cqhci.h
new file mode 100644
index 0000000..cf8e7ba
--- /dev/null
+++ b/drivers/mmc/host/sdhci-cqhci.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2022 The Chromium OS Authors
+ *
+ * Support that applies to the combination of SDHCI and CQHCI, while not
+ * expressing a dependency between the two modules.
+ */
+
+#ifndef __MMC_HOST_SDHCI_CQHCI_H__
+#define __MMC_HOST_SDHCI_CQHCI_H__
+
+#include "cqhci.h"
+#include "sdhci.h"
+
+static inline void sdhci_and_cqhci_reset(struct sdhci_host *host, u8 mask)
+{
+ if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) &&
+ host->mmc->cqe_private)
+ cqhci_deactivate(host->mmc);
+
+ sdhci_reset(host, mask);
+}
+
+#endif /* __MMC_HOST_SDHCI_CQHCI_H__ */
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index a4bd85b..1f1bdd3 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -26,6 +26,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/platform_data/mmc-esdhc-imx.h>
#include <linux/pm_runtime.h>
+#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
#include "cqhci.h"
@@ -294,22 +295,6 @@
struct pm_qos_request pm_qos_req;
};
-static const struct platform_device_id imx_esdhc_devtype[] = {
- {
- .name = "sdhci-esdhc-imx25",
- .driver_data = (kernel_ulong_t) &esdhc_imx25_data,
- }, {
- .name = "sdhci-esdhc-imx35",
- .driver_data = (kernel_ulong_t) &esdhc_imx35_data,
- }, {
- .name = "sdhci-esdhc-imx51",
- .driver_data = (kernel_ulong_t) &esdhc_imx51_data,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(platform, imx_esdhc_devtype);
-
static const struct of_device_id imx_esdhc_dt_ids[] = {
{ .compatible = "fsl,imx25-esdhc", .data = &esdhc_imx25_data, },
{ .compatible = "fsl,imx35-esdhc", .data = &esdhc_imx35_data, },
@@ -1243,7 +1228,7 @@
static void esdhc_reset(struct sdhci_host *host, u8 mask)
{
- sdhci_reset(host, mask);
+ sdhci_and_cqhci_reset(host, mask);
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
@@ -1545,72 +1530,6 @@
}
#endif
-static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
- struct sdhci_host *host,
- struct pltfm_imx_data *imx_data)
-{
- struct esdhc_platform_data *boarddata = &imx_data->boarddata;
- int err;
-
- if (!host->mmc->parent->platform_data) {
- dev_err(mmc_dev(host->mmc), "no board data!\n");
- return -EINVAL;
- }
-
- imx_data->boarddata = *((struct esdhc_platform_data *)
- host->mmc->parent->platform_data);
- /* write_protect */
- if (boarddata->wp_type == ESDHC_WP_GPIO) {
- host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
-
- err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0);
- if (err) {
- dev_err(mmc_dev(host->mmc),
- "failed to request write-protect gpio!\n");
- return err;
- }
- }
-
- /* card_detect */
- switch (boarddata->cd_type) {
- case ESDHC_CD_GPIO:
- err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
- if (err) {
- dev_err(mmc_dev(host->mmc),
- "failed to request card-detect gpio!\n");
- return err;
- }
- fallthrough;
-
- case ESDHC_CD_CONTROLLER:
- /* we have a working card_detect back */
- host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
- break;
-
- case ESDHC_CD_PERMANENT:
- host->mmc->caps |= MMC_CAP_NONREMOVABLE;
- break;
-
- case ESDHC_CD_NONE:
- break;
- }
-
- switch (boarddata->max_bus_width) {
- case 8:
- host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
- break;
- case 4:
- host->mmc->caps |= MMC_CAP_4_BIT_DATA;
- break;
- case 1:
- default:
- host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
- break;
- }
-
- return 0;
-}
-
static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
@@ -1630,8 +1549,7 @@
imx_data = sdhci_pltfm_priv(pltfm_host);
- imx_data->socdata = of_id ? of_id->data : (struct esdhc_soc_data *)
- pdev->id_entry->driver_data;
+ imx_data->socdata = of_id->data;
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0);
@@ -1692,6 +1610,10 @@
host->mmc_host_ops.execute_tuning = usdhc_execute_tuning;
}
+ err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
+ if (err)
+ goto disable_ahb_clk;
+
if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
sdhci_esdhc_ops.platform_execute_tuning =
esdhc_executing_tuning;
@@ -1699,13 +1621,15 @@
if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
- if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
+ if (host->mmc->caps & MMC_CAP_8_BIT_DATA &&
+ imx_data->socdata->flags & ESDHC_FLAG_HS400)
host->quirks2 |= SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400;
if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23)
host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN;
- if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
+ if (host->mmc->caps & MMC_CAP_8_BIT_DATA &&
+ imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
host->mmc->caps2 |= MMC_CAP2_HS400_ES;
host->mmc_host_ops.hs400_enhanced_strobe =
esdhc_hs400_enhanced_strobe;
@@ -1727,13 +1651,6 @@
goto disable_ahb_clk;
}
- if (of_id)
- err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
- else
- err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data);
- if (err)
- goto disable_ahb_clk;
-
sdhci_esdhc_imx_hwinit(host);
err = sdhci_add_host(host);
@@ -1944,7 +1861,6 @@
.of_match_table = imx_esdhc_dt_ids,
.pm = &sdhci_esdhc_pmops,
},
- .id_table = imx_esdhc_devtype,
.probe = sdhci_esdhc_imx_probe,
.remove = sdhci_esdhc_imx_remove,
};
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 588b9a5..ad2e73f 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -16,6 +16,7 @@
#include <linux/regulator/consumer.h>
#include <linux/interconnect.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/reset.h>
#include "sdhci-pltfm.h"
#include "cqhci.h"
@@ -2181,6 +2182,7 @@
static const struct of_device_id sdhci_msm_dt_match[] = {
{.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
{.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
+ {.compatible = "qcom,sdm670-sdhci", .data = &sdm845_sdhci_var},
{.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var},
{.compatible = "qcom,sm8250-sdhci", .data = &sm8250_sdhci_var},
{.compatible = "qcom,sc7180-sdhci", .data = &sdm845_sdhci_var},
@@ -2228,6 +2230,43 @@
of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config);
}
+static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host)
+{
+ struct reset_control *reset;
+ int ret = 0;
+
+ reset = reset_control_get_optional_exclusive(dev, NULL);
+ if (IS_ERR(reset))
+ return dev_err_probe(dev, PTR_ERR(reset),
+ "unable to acquire core_reset\n");
+
+ if (!reset)
+ return ret;
+
+ ret = reset_control_assert(reset);
+ if (ret) {
+ reset_control_put(reset);
+ return dev_err_probe(dev, ret, "core_reset assert failed\n");
+ }
+
+ /*
+ * The hardware requirement for delay between assert/deassert
+ * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
+ * ~125us (4/32768). To be on the safe side add 200us delay.
+ */
+ usleep_range(200, 210);
+
+ ret = reset_control_deassert(reset);
+ if (ret) {
+ reset_control_put(reset);
+ return dev_err_probe(dev, ret, "core_reset deassert failed\n");
+ }
+
+ usleep_range(200, 210);
+ reset_control_put(reset);
+
+ return ret;
+}
static int sdhci_msm_probe(struct platform_device *pdev)
{
@@ -2276,6 +2315,10 @@
msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
+ ret = sdhci_msm_gcc_reset(&pdev->dev, host);
+ if (ret)
+ goto pltfm_free;
+
/* Setup SDCC bus voter clock. */
msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
if (!IS_ERR(msm_host->bus_clk)) {
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index fc38db6..9da49dc 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -25,6 +25,7 @@
#include <linux/firmware/xlnx-zynqmp.h>
#include "cqhci.h"
+#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h"
#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
@@ -359,7 +360,7 @@
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
- sdhci_reset(host, mask);
+ sdhci_and_cqhci_reset(host, mask);
if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_FORCE_CDTEST) {
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index d1a1c54..0452c31 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -100,8 +100,13 @@
static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
- if (timing == MMC_TIMING_MMC_DDR52)
- sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R);
+ u8 mc1r;
+
+ if (timing == MMC_TIMING_MMC_DDR52) {
+ mc1r = sdhci_readb(host, SDMMC_MC1R);
+ mc1r |= SDMMC_MC1R_DDR;
+ sdhci_writeb(host, mc1r, SDMMC_MC1R);
+ }
sdhci_set_uhs_signaling(host, timing);
}
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 343648f..d533749 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -904,6 +904,7 @@
scfg_node = of_find_matching_node(NULL, scfg_device_ids);
if (scfg_node)
scfg_base = of_iomap(scfg_node, 0);
+ of_node_put(scfg_node);
if (scfg_base) {
sdhciovselcr = SDHCIOVSELCR_TGLEN |
SDHCIOVSELCR_VSELVAL;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index a78b060..8b02fe3 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -967,6 +967,12 @@
dmi_match(DMI_SYS_VENDOR, "IRBIS"));
}
+static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot)
+{
+ return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_JSL_EMMC &&
+ dmi_match(DMI_BIOS_VENDOR, "ASUSTeK COMPUTER INC.");
+}
+
static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
int ret = byt_emmc_probe_slot(slot);
@@ -975,9 +981,11 @@
slot->host->mmc->caps2 |= MMC_CAP2_CQE;
if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
- slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES,
- slot->host->mmc_host_ops.hs400_enhanced_strobe =
- intel_hs400_enhanced_strobe;
+ if (!jsl_broken_hs400es(slot)) {
+ slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES;
+ slot->host->mmc_host_ops.hs400_enhanced_strobe =
+ intel_hs400_enhanced_strobe;
+ }
slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
}
@@ -1791,6 +1799,8 @@
}
}
+ pci_dev_put(smbus_dev);
+
if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ)
chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 94e3f72..7223479 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -31,6 +31,7 @@
#define O2_SD_CAPS 0xE0
#define O2_SD_ADMA1 0xE2
#define O2_SD_ADMA2 0xE7
+#define O2_SD_MISC_CTRL2 0xF0
#define O2_SD_INF_MOD 0xF1
#define O2_SD_MISC_CTRL4 0xFC
#define O2_SD_MISC_CTRL 0x1C0
@@ -147,6 +148,8 @@
if (!(sdhci_readw(host, O2_PLL_DLL_WDT_CONTROL1) & O2_PLL_LOCK_STATUS))
sdhci_o2_enable_internal_clock(host);
+ else
+ sdhci_o2_wait_card_detect_stable(host);
return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
}
@@ -820,6 +823,12 @@
/* Set Tuning Windows to 5 */
pci_write_config_byte(chip->pdev,
O2_SD_TUNING_CTRL, 0x55);
+ //Adjust 1st and 2nd CD debounce time
+ pci_read_config_dword(chip->pdev, O2_SD_MISC_CTRL2, &scratch_32);
+ scratch_32 &= 0xFFE7FFFF;
+ scratch_32 |= 0x00180000;
+ pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL2, scratch_32);
+ pci_write_config_dword(chip->pdev, O2_SD_DETECT_SETTING, 1);
/* Lock WP */
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 9cd8862..8575f45 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -296,7 +296,7 @@
static unsigned int sdhci_sprd_get_min_clock(struct sdhci_host *host)
{
- return 400000;
+ return 100000;
}
static void sdhci_sprd_set_uhs_signaling(struct sdhci_host *host,
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index d50b691..d8fd2b5 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -24,6 +24,7 @@
#include <linux/gpio/consumer.h>
#include <linux/ktime.h>
+#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h"
#include "cqhci.h"
@@ -361,7 +362,7 @@
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
u32 misc_ctrl, clk_ctrl, pad_ctrl;
- sdhci_reset(host, mask);
+ sdhci_and_cqhci_reset(host, mask);
if (!(mask & SDHCI_RESET_ALL))
return;
@@ -760,7 +761,7 @@
*/
host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
clk_set_rate(pltfm_host->clk, host_clk);
- tegra_host->curr_clk_rate = host_clk;
+ tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk);
if (tegra_host->ddr_signaling)
host->max_clk = host_clk;
else
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 0e5234a..d509198 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -240,16 +240,6 @@
{
/* Wait for 5ms after set 1.8V signal enable bit */
usleep_range(5000, 5500);
-
- /*
- * For some reason the controller's Host Control2 register reports
- * the bit representing 1.8V signaling as 0 when read after it was
- * written as 1. Subsequent read reports 1.
- *
- * Since this may cause some issues, do an empty read of the Host
- * Control2 register here to circumvent this.
- */
- sdhci_readw(host, SDHCI_HOST_CONTROL2);
}
static const struct sdhci_ops sdhci_xenon_ops = {
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index a64ea14..24cd6d3 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -15,6 +15,7 @@
#include <linux/sys_soc.h>
#include "cqhci.h"
+#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h"
/* CTL_CFG Registers */
@@ -147,6 +148,9 @@
int drv_strength;
int strb_sel;
u32 flags;
+ u32 quirks;
+
+#define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0)
};
struct sdhci_am654_driver_data {
@@ -369,6 +373,21 @@
}
}
+static void sdhci_am654_reset(struct sdhci_host *host, u8 mask)
+{
+ u8 ctrl;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+
+ sdhci_and_cqhci_reset(host, mask);
+
+ if (sdhci_am654->quirks & SDHCI_AM654_QUIRK_FORCE_CDTEST) {
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ ctrl |= SDHCI_CTRL_CDTEST_INS | SDHCI_CTRL_CDTEST_EN;
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+ }
+}
+
static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
@@ -446,7 +465,7 @@
.set_clock = sdhci_am654_set_clock,
.write_b = sdhci_am654_write_b,
.irq = sdhci_am654_cqhci_irq,
- .reset = sdhci_reset,
+ .reset = sdhci_and_cqhci_reset,
};
static const struct sdhci_pltfm_data sdhci_am654_pdata = {
@@ -476,7 +495,7 @@
.set_clock = sdhci_am654_set_clock,
.write_b = sdhci_am654_write_b,
.irq = sdhci_am654_cqhci_irq,
- .reset = sdhci_reset,
+ .reset = sdhci_and_cqhci_reset,
};
static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = {
@@ -500,7 +519,7 @@
.set_clock = sdhci_j721e_4bit_set_clock,
.write_b = sdhci_am654_write_b,
.irq = sdhci_am654_cqhci_irq,
- .reset = sdhci_reset,
+ .reset = sdhci_am654_reset,
};
static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = {
@@ -719,6 +738,9 @@
device_property_read_u32(dev, "ti,clkbuf-sel",
&sdhci_am654->clkbuf_sel);
+ if (device_property_read_bool(dev, "ti,fails-without-test-cd"))
+ sdhci_am654->quirks |= SDHCI_AM654_QUIRK_FORCE_CDTEST;
+
sdhci_get_of_property(pdev);
return 0;
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index cf10949..8df722e 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -849,7 +849,7 @@
if (IS_ERR(priv->clk_sdmmc)) {
dev_err(&pdev->dev, "Error getting clock\n");
ret = PTR_ERR(priv->clk_sdmmc);
- goto fail5;
+ goto fail5_and_a_half;
}
ret = clk_prepare_enable(priv->clk_sdmmc);
@@ -866,6 +866,9 @@
return 0;
fail6:
clk_put(priv->clk_sdmmc);
+fail5_and_a_half:
+ dma_free_coherent(&pdev->dev, mmc->max_blk_count * 16,
+ priv->dma_desc_buffer, priv->dma_desc_device_addr);
fail5:
free_irq(dma_irq, priv);
fail4:
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 96a27e0..9bd65f3 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -59,6 +59,10 @@
#define CFI_SR_WBASB BIT(3)
#define CFI_SR_SLSB BIT(1)
+enum cfi_quirks {
+ CFI_QUIRK_DQ_TRUE_DATA = BIT(0),
+};
+
static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
#if !FORCE_WORD_WRITE
@@ -432,6 +436,15 @@
mtd->name);
}
+static void fixup_quirks(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ if (cfi->mfr == CFI_MFR_AMD && cfi->id == 0x0c01)
+ cfi->quirks |= CFI_QUIRK_DQ_TRUE_DATA;
+}
+
/* Used to fix CFI-Tables of chips without Extended Query Tables */
static struct cfi_fixup cfi_nopri_fixup_table[] = {
{ CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
@@ -470,6 +483,7 @@
#if !FORCE_WORD_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
#endif
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_quirks },
{ 0, 0, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
@@ -798,46 +812,10 @@
}
/*
- * Return true if the chip is ready.
- *
- * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
- * non-suspended sector) and is indicated by no toggle bits toggling.
- *
- * Note that anything more complicated than checking if no bits are toggling
- * (including checking DQ5 for an error status) is tricky to get working
- * correctly and is therefore not done (particularly with interleaved chips
- * as each chip must be checked independently of the others).
- */
-static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
- unsigned long addr)
-{
- struct cfi_private *cfi = map->fldrv_priv;
- map_word d, t;
-
- if (cfi_use_status_reg(cfi)) {
- map_word ready = CMD(CFI_SR_DRB);
- /*
- * For chips that support status register, check device
- * ready bit
- */
- cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
- cfi->device_type, NULL);
- d = map_read(map, addr);
-
- return map_word_andequal(map, d, ready, ready);
- }
-
- d = map_read(map, addr);
- t = map_read(map, addr);
-
- return map_word_equal(map, d, t);
-}
-
-/*
* Return true if the chip is ready and has the correct value.
*
* Ready is one of: read mode, query mode, erase-suspend-read mode (in any
- * non-suspended sector) and it is indicated by no bits toggling.
+ * non-suspended sector) and is indicated by no toggle bits toggling.
*
* Error are indicated by toggling bits or bits held with the wrong value,
* or with bits toggling.
@@ -846,33 +824,48 @@
* (including checking DQ5 for an error status) is tricky to get working
* correctly and is therefore not done (particularly with interleaved chips
* as each chip must be checked independently of the others).
- *
*/
-static int __xipram chip_good(struct map_info *map, struct flchip *chip,
- unsigned long addr, map_word expected)
+static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
+ unsigned long addr, map_word *expected)
{
struct cfi_private *cfi = map->fldrv_priv;
- map_word oldd, curd;
+ map_word d, t;
+ int ret;
if (cfi_use_status_reg(cfi)) {
map_word ready = CMD(CFI_SR_DRB);
-
/*
* For chips that support status register, check device
* ready bit
*/
cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
- curd = map_read(map, addr);
+ t = map_read(map, addr);
- return map_word_andequal(map, curd, ready, ready);
+ return map_word_andequal(map, t, ready, ready);
}
- oldd = map_read(map, addr);
- curd = map_read(map, addr);
+ d = map_read(map, addr);
+ t = map_read(map, addr);
- return map_word_equal(map, oldd, curd) &&
- map_word_equal(map, curd, expected);
+ ret = map_word_equal(map, d, t);
+
+ if (!ret || !expected)
+ return ret;
+
+ return map_word_equal(map, t, *expected);
+}
+
+static int __xipram chip_good(struct map_info *map, struct flchip *chip,
+ unsigned long addr, map_word *expected)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word *datum = expected;
+
+ if (cfi->quirks & CFI_QUIRK_DQ_TRUE_DATA)
+ datum = NULL;
+
+ return chip_ready(map, chip, addr, datum);
}
static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
@@ -889,7 +882,7 @@
case FL_STATUS:
for (;;) {
- if (chip_ready(map, chip, adr))
+ if (chip_ready(map, chip, adr, NULL))
break;
if (time_after(jiffies, timeo)) {
@@ -927,7 +920,7 @@
chip->state = FL_ERASE_SUSPENDING;
chip->erase_suspended = 1;
for (;;) {
- if (chip_ready(map, chip, adr))
+ if (chip_ready(map, chip, adr, NULL))
break;
if (time_after(jiffies, timeo)) {
@@ -1458,7 +1451,7 @@
/* wait for chip to become ready */
timeo = jiffies + msecs_to_jiffies(2);
for (;;) {
- if (chip_ready(map, chip, adr))
+ if (chip_ready(map, chip, adr, NULL))
break;
if (time_after(jiffies, timeo)) {
@@ -1694,7 +1687,7 @@
* "chip_good" to avoid the failure due to scheduling.
*/
if (time_after(jiffies, timeo) &&
- !chip_good(map, chip, adr, datum)) {
+ !chip_good(map, chip, adr, &datum)) {
xip_enable(map, chip, adr);
printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
xip_disable(map, chip, adr);
@@ -1702,7 +1695,7 @@
break;
}
- if (chip_good(map, chip, adr, datum)) {
+ if (chip_good(map, chip, adr, &datum)) {
if (cfi_check_err_status(map, chip, adr))
ret = -EIO;
break;
@@ -1974,14 +1967,14 @@
* "chip_good" to avoid the failure due to scheduling.
*/
if (time_after(jiffies, timeo) &&
- !chip_good(map, chip, adr, datum)) {
+ !chip_good(map, chip, adr, &datum)) {
pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
__func__, adr);
ret = -EIO;
break;
}
- if (chip_good(map, chip, adr, datum)) {
+ if (chip_good(map, chip, adr, &datum)) {
if (cfi_check_err_status(map, chip, adr))
ret = -EIO;
break;
@@ -2190,7 +2183,7 @@
* If the driver thinks the chip is idle, and no toggle bits
* are changing, then the chip is actually idle for sure.
*/
- if (chip->state == FL_READY && chip_ready(map, chip, adr))
+ if (chip->state == FL_READY && chip_ready(map, chip, adr, NULL))
return 0;
/*
@@ -2207,7 +2200,7 @@
/* wait for the chip to become ready */
for (i = 0; i < jiffies_to_usecs(timeo); i++) {
- if (chip_ready(map, chip, adr))
+ if (chip_ready(map, chip, adr, NULL))
return 0;
udelay(1);
@@ -2271,13 +2264,13 @@
map_write(map, datum, adr);
for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
- if (chip_ready(map, chip, adr))
+ if (chip_ready(map, chip, adr, NULL))
break;
udelay(1);
}
- if (!chip_good(map, chip, adr, datum) ||
+ if (!chip_ready(map, chip, adr, &datum) ||
cfi_check_err_status(map, chip, adr)) {
/* reset on all failures. */
map_write(map, CMD(0xF0), chip->start);
@@ -2419,6 +2412,7 @@
DECLARE_WAITQUEUE(wait, current);
int ret;
int retry_cnt = 0;
+ map_word datum = map_word_ff(map);
adr = cfi->addr_unlock1;
@@ -2473,7 +2467,7 @@
chip->erase_suspended = 0;
}
- if (chip_good(map, chip, adr, map_word_ff(map))) {
+ if (chip_ready(map, chip, adr, &datum)) {
if (cfi_check_err_status(map, chip, adr))
ret = -EIO;
break;
@@ -2518,6 +2512,7 @@
DECLARE_WAITQUEUE(wait, current);
int ret;
int retry_cnt = 0;
+ map_word datum = map_word_ff(map);
adr += chip->start;
@@ -2572,7 +2567,7 @@
chip->erase_suspended = 0;
}
- if (chip_good(map, chip, adr, map_word_ff(map))) {
+ if (chip_ready(map, chip, adr, &datum)) {
if (cfi_check_err_status(map, chip, adr))
ret = -EIO;
break;
@@ -2766,7 +2761,7 @@
*/
timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */
for (;;) {
- if (chip_ready(map, chip, adr))
+ if (chip_ready(map, chip, adr, NULL))
break;
if (time_after(jiffies, timeo)) {
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index a030792..fa42473 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -1975,9 +1975,14 @@
dev_err(dev, "No I/O memory resource defined\n");
return ret;
}
- base = devm_ioremap(dev, ress->start, DOC_IOSPACE_SIZE);
ret = -ENOMEM;
+ base = devm_ioremap(dev, ress->start, DOC_IOSPACE_SIZE);
+ if (!base) {
+ dev_err(dev, "devm_ioremap dev failed\n");
+ return ret;
+ }
+
cascade = devm_kcalloc(dev, DOC_MAX_NBFLOORS, sizeof(*cascade),
GFP_KERNEL);
if (!cascade)
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index 1888523..9bee99f 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -2115,10 +2115,12 @@
(long long)fsm->mtd.size, (long long)(fsm->mtd.size >> 20),
fsm->mtd.erasesize, (fsm->mtd.erasesize >> 10));
- return mtd_device_register(&fsm->mtd, NULL, 0);
-
+ ret = mtd_device_register(&fsm->mtd, NULL, 0);
+ if (ret) {
err_clk_unprepare:
- clk_disable_unprepare(fsm->clk);
+ clk_disable_unprepare(fsm->clk);
+ }
+
return ret;
}
diff --git a/drivers/mtd/maps/physmap-versatile.c b/drivers/mtd/maps/physmap-versatile.c
index ad7cd9c..a1b8b7b 100644
--- a/drivers/mtd/maps/physmap-versatile.c
+++ b/drivers/mtd/maps/physmap-versatile.c
@@ -93,6 +93,7 @@
return -ENODEV;
}
ebi_base = of_iomap(ebi, 0);
+ of_node_put(ebi);
if (!ebi_base)
return -ENODEV;
@@ -207,6 +208,7 @@
versatile_flashprot = (enum versatile_flashprot)devid->data;
rmap = syscon_node_to_regmap(sysnp);
+ of_node_put(sysnp);
if (IS_ERR(rmap))
return PTR_ERR(rmap);
diff --git a/drivers/mtd/nand/onenand/generic.c b/drivers/mtd/nand/onenand/generic.c
index 8b6f4da..a4b8b65 100644
--- a/drivers/mtd/nand/onenand/generic.c
+++ b/drivers/mtd/nand/onenand/generic.c
@@ -53,7 +53,12 @@
}
info->onenand.mmcontrol = pdata ? pdata->mmcontrol : NULL;
- info->onenand.irq = platform_get_irq(pdev, 0);
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0)
+ goto out_iounmap;
+
+ info->onenand.irq = err;
info->mtd.dev.parent = &pdev->dev;
info->mtd.priv = &info->onenand;
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
index 0ee3192..6a0d48c 100644
--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -91,7 +91,7 @@
#define DATA_INTERFACE_REG 0x6C
#define DIFACE_SDR_MODE(x) FIELD_PREP(GENMASK(2, 0), (x))
-#define DIFACE_DDR_MODE(x) FIELD_PREP(GENMASK(5, 3), (X))
+#define DIFACE_DDR_MODE(x) FIELD_PREP(GENMASK(5, 3), (x))
#define DIFACE_SDR 0
#define DIFACE_NVDDR BIT(9)
@@ -283,17 +283,17 @@
/* Update clock frequency */
if (nfc->cur_clk != anand->clk) {
- clk_disable_unprepare(nfc->controller_clk);
- ret = clk_set_rate(nfc->controller_clk, anand->clk);
+ clk_disable_unprepare(nfc->bus_clk);
+ ret = clk_set_rate(nfc->bus_clk, anand->clk);
if (ret) {
dev_err(nfc->dev, "Failed to change clock rate\n");
return ret;
}
- ret = clk_prepare_enable(nfc->controller_clk);
+ ret = clk_prepare_enable(nfc->bus_clk);
if (ret) {
dev_err(nfc->dev,
- "Failed to re-enable the controller clock\n");
+ "Failed to re-enable the bus clock\n");
return ret;
}
@@ -884,21 +884,60 @@
struct anand *anand = to_anand(chip);
struct arasan_nfc *nfc = to_anfc(chip->controller);
struct device_node *np = nfc->dev->of_node;
+ const struct nand_sdr_timings *sdr;
+ const struct nand_nvddr_timings *nvddr;
+
+ if (nand_interface_is_nvddr(conf)) {
+ nvddr = nand_get_nvddr_timings(conf);
+ if (IS_ERR(nvddr))
+ return PTR_ERR(nvddr);
+
+ /*
+ * The controller only supports data payload requests which are
+ * a multiple of 4. In practice, most data accesses are 4-byte
+ * aligned and this is not an issue. However, rounding up will
+ * simply be refused by the controller if we reached the end of
+ * the device *and* we are using the NV-DDR interface(!). In
+ * this situation, unaligned data requests ending at the device
+ * boundary will confuse the controller and cannot be performed.
+ *
+ * This is something that happens in nand_read_subpage() when
+ * selecting software ECC support and must be avoided.
+ */
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT)
+ return -ENOTSUPP;
+ } else {
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+ }
if (target < 0)
return 0;
- anand->timings = DIFACE_SDR | DIFACE_SDR_MODE(conf->timings.mode);
- anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK;
+ if (nand_interface_is_sdr(conf))
+ anand->timings = DIFACE_SDR |
+ DIFACE_SDR_MODE(conf->timings.mode);
+ else
+ anand->timings = DIFACE_NVDDR |
+ DIFACE_DDR_MODE(conf->timings.mode);
+
+ if (nand_interface_is_sdr(conf)) {
+ anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK;
+ } else {
+ /* ONFI timings are defined in picoseconds */
+ anand->clk = div_u64((u64)NSEC_PER_SEC * 1000,
+ conf->timings.nvddr.tCK_min);
+ }
/*
* Due to a hardware bug in the ZynqMP SoC, SDR timing modes 0-1 work
* with f > 90MHz (default clock is 100MHz) but signals are unstable
* with higher modes. Hence we decrease a little bit the clock rate to
- * 80MHz when using modes 2-5 with this SoC.
+ * 80MHz when using SDR modes 2-5 with this SoC.
*/
if (of_device_is_compatible(np, "xlnx,zynqmp-nand-controller") &&
- conf->timings.mode >= 2)
+ nand_interface_is_sdr(conf) && conf->timings.mode >= 2)
anand->clk = ANFC_XLNX_SDR_HS_CORE_CLK;
return 0;
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 8aab101..0d84f81 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -405,6 +405,7 @@
dma_async_issue_pending(nc->dmac);
wait_for_completion(&finished);
+ dma_unmap_single(nc->dev, buf_dma, len, dir);
return 0;
@@ -1246,7 +1247,7 @@
nc = to_nand_controller(nand->base.controller);
/* DDR interface not supported. */
- if (conf->type != NAND_SDR_IFACE)
+ if (!nand_interface_is_sdr(conf))
return -ENOTSUPP;
/*
@@ -2057,13 +2058,15 @@
nc->mck = of_clk_get(dev->parent->of_node, 0);
if (IS_ERR(nc->mck)) {
dev_err(dev, "Failed to retrieve MCK clk\n");
- return PTR_ERR(nc->mck);
+ ret = PTR_ERR(nc->mck);
+ goto out_release_dma;
}
np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
if (!np) {
dev_err(dev, "Missing or invalid atmel,smc property\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_release_dma;
}
nc->smc = syscon_node_to_regmap(np);
@@ -2071,10 +2074,16 @@
if (IS_ERR(nc->smc)) {
ret = PTR_ERR(nc->smc);
dev_err(dev, "Could not get SMC regmap (err = %d)\n", ret);
- return ret;
+ goto out_release_dma;
}
return 0;
+
+out_release_dma:
+ if (nc->dmac)
+ dma_release_channel(nc->dmac);
+
+ return ret;
}
static int
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index b46786c..4fdb392 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -2983,11 +2983,10 @@
if (IS_ERR(cdns_ctrl->reg))
return PTR_ERR(cdns_ctrl->reg);
- res = platform_get_resource(ofdev, IORESOURCE_MEM, 1);
- cdns_ctrl->io.dma = res->start;
- cdns_ctrl->io.virt = devm_ioremap_resource(&ofdev->dev, res);
+ cdns_ctrl->io.virt = devm_platform_get_and_ioremap_resource(ofdev, 1, &res);
if (IS_ERR(cdns_ctrl->io.virt))
return PTR_ERR(cdns_ctrl->io.virt);
+ cdns_ctrl->io.dma = res->start;
dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
if (IS_ERR(dt->clk))
diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c
index 20c085a..de7e722 100644
--- a/drivers/mtd/nand/raw/denali_pci.c
+++ b/drivers/mtd/nand/raw/denali_pci.c
@@ -74,22 +74,21 @@
return ret;
}
- denali->reg = ioremap(csr_base, csr_len);
+ denali->reg = devm_ioremap(denali->dev, csr_base, csr_len);
if (!denali->reg) {
dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
return -ENOMEM;
}
- denali->host = ioremap(mem_base, mem_len);
+ denali->host = devm_ioremap(denali->dev, mem_base, mem_len);
if (!denali->host) {
dev_err(&dev->dev, "Spectra: ioremap failed!");
- ret = -ENOMEM;
- goto out_unmap_reg;
+ return -ENOMEM;
}
ret = denali_init(denali);
if (ret)
- goto out_unmap_host;
+ return ret;
nsels = denali->nbanks;
@@ -117,10 +116,6 @@
out_remove_denali:
denali_remove(denali);
-out_unmap_host:
- iounmap(denali->host);
-out_unmap_reg:
- iounmap(denali->reg);
return ret;
}
@@ -129,8 +124,6 @@
struct denali_controller *denali = pci_get_drvdata(dev);
denali_remove(denali);
- iounmap(denali->reg);
- iounmap(denali->host);
}
static struct pci_driver denali_pci_driver = {
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index b2af7f8..c174b6d 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -727,36 +727,40 @@
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
unsigned int al;
- switch (chip->ecc.engine_type) {
/*
* if ECC was not chosen in DT, decide whether to use HW or SW ECC from
* CS Base Register
*/
- case NAND_ECC_ENGINE_TYPE_NONE:
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID) {
/* If CS Base Register selects full hardware ECC then use it */
if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
BR_DECC_CHK_GEN) {
- chip->ecc.read_page = fsl_elbc_read_page;
- chip->ecc.write_page = fsl_elbc_write_page;
- chip->ecc.write_subpage = fsl_elbc_write_subpage;
-
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
- mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops);
- chip->ecc.size = 512;
- chip->ecc.bytes = 3;
- chip->ecc.strength = 1;
} else {
/* otherwise fall back to default software ECC */
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
}
+ }
+
+ switch (chip->ecc.engine_type) {
+ /* if HW ECC was chosen, setup ecc and oob layout */
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ chip->ecc.read_page = fsl_elbc_read_page;
+ chip->ecc.write_page = fsl_elbc_write_page;
+ chip->ecc.write_subpage = fsl_elbc_write_subpage;
+ mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops);
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
break;
- /* if SW ECC was chosen in DT, we do not need to set anything here */
+ /* if none or SW ECC was chosen, we do not need to set anything here */
+ case NAND_ECC_ENGINE_TYPE_NONE:
case NAND_ECC_ENGINE_TYPE_SOFT:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
break;
- /* should we also implement *_ECC_ENGINE_CONTROLLER to do as above? */
default:
return -EINVAL;
}
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index cb76311..200d3ab 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -646,14 +646,16 @@
const struct nand_sdr_timings *sdr)
{
struct gpmi_nfc_hardware_timing *hw = &this->hw;
+ struct resources *r = &this->resources;
unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
unsigned int period_ps, reference_period_ps;
unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
unsigned int tRP_ps;
bool use_half_period;
int sample_delay_ps, sample_delay_factor;
- u16 busy_timeout_cycles;
+ unsigned int busy_timeout_cycles;
u8 wrn_dly_sel;
+ u64 busy_timeout_ps;
if (sdr->tRC_min >= 30000) {
/* ONFI non-EDO modes [0-3] */
@@ -669,13 +671,16 @@
wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
}
+ hw->clk_rate = clk_round_rate(r->clock[0], hw->clk_rate);
+
/* SDR core timings are given in picoseconds */
period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
- busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
+ busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max);
+ busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps);
hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index d00c916..dce35f8 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -2672,7 +2672,7 @@
chip->controller = &nfc->controller;
nand_set_flash_node(chip, np);
- if (!of_property_read_bool(np, "marvell,nand-keep-config"))
+ if (of_property_read_bool(np, "marvell,nand-keep-config"))
chip->options |= NAND_KEEP_TIMINGS;
mtd = nand_to_mtd(chip);
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index 817bddc..38f4900 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -454,7 +454,7 @@
if (ECC_ERR_CNT(*info) != ECC_UNCORRECTABLE) {
mtd->ecc_stats.corrected += ECC_ERR_CNT(*info);
*bitflips = max_t(u32, *bitflips, ECC_ERR_CNT(*info));
- *correct_bitmap |= 1 >> i;
+ *correct_bitmap |= BIT_ULL(i);
continue;
}
if ((nand->options & NAND_NEED_SCRAMBLING) &&
@@ -800,7 +800,7 @@
u8 *data = buf + i * ecc->size;
u8 *oob = nand->oob_poi + i * (ecc->bytes + 2);
- if (correct_bitmap & (1 << i))
+ if (correct_bitmap & BIT_ULL(i))
continue;
ret = nand_check_erased_ecc_chunk(data, ecc->size,
oob, ecc->bytes + 2,
@@ -1307,7 +1307,6 @@
if (ret)
return ret;
- meson_nfc_free_buffer(&meson_chip->nand);
nand_cleanup(&meson_chip->nand);
list_del(&meson_chip->node);
}
diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c
index 75f1fa3..c115e03 100644
--- a/drivers/mtd/nand/raw/mtk_ecc.c
+++ b/drivers/mtd/nand/raw/mtk_ecc.c
@@ -43,6 +43,7 @@
struct mtk_ecc_caps {
u32 err_mask;
+ u32 err_shift;
const u8 *ecc_strength;
const u32 *ecc_regs;
u8 num_ecc_strength;
@@ -76,7 +77,7 @@
};
static const u8 ecc_strength_mt7622[] = {
- 4, 6, 8, 10, 12, 14, 16
+ 4, 6, 8, 10, 12
};
enum mtk_ecc_regs {
@@ -221,7 +222,7 @@
for (i = 0; i < sectors; i++) {
offset = (i >> 2) << 2;
err = readl(ecc->regs + ECC_DECENUM0 + offset);
- err = err >> ((i % 4) * 8);
+ err = err >> ((i % 4) * ecc->caps->err_shift);
err &= ecc->caps->err_mask;
if (err == ecc->caps->err_mask) {
/* uncorrectable errors */
@@ -449,6 +450,7 @@
static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
.err_mask = 0x3f,
+ .err_shift = 8,
.ecc_strength = ecc_strength_mt2701,
.ecc_regs = mt2701_ecc_regs,
.num_ecc_strength = 20,
@@ -459,6 +461,7 @@
static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
.err_mask = 0x7f,
+ .err_shift = 8,
.ecc_strength = ecc_strength_mt2712,
.ecc_regs = mt2712_ecc_regs,
.num_ecc_strength = 23,
@@ -468,10 +471,11 @@
};
static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = {
- .err_mask = 0x3f,
+ .err_mask = 0x1f,
+ .err_shift = 5,
.ecc_strength = ecc_strength_mt7622,
.ecc_regs = mt7622_ecc_regs,
- .num_ecc_strength = 7,
+ .num_ecc_strength = 5,
.ecc_mode_shift = 4,
.parity_bits = 13,
.pg_irq_sel = 0,
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 1f0d542..c41c0ff 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -297,16 +297,19 @@
*
* Return: -EBUSY if the chip has been suspended, 0 otherwise
*/
-static int nand_get_device(struct nand_chip *chip)
+static void nand_get_device(struct nand_chip *chip)
{
- mutex_lock(&chip->lock);
- if (chip->suspended) {
+ /* Wait until the device is resumed. */
+ while (1) {
+ mutex_lock(&chip->lock);
+ if (!chip->suspended) {
+ mutex_lock(&chip->controller->lock);
+ return;
+ }
mutex_unlock(&chip->lock);
- return -EBUSY;
- }
- mutex_lock(&chip->controller->lock);
- return 0;
+ wait_event(chip->resume_wq, !chip->suspended);
+ }
}
/**
@@ -531,9 +534,7 @@
nand_erase_nand(chip, &einfo, 0);
/* Write bad block marker to OOB */
- ret = nand_get_device(chip);
- if (ret)
- return ret;
+ nand_get_device(chip);
ret = nand_markbad_bbm(chip, ofs);
nand_release_device(chip);
@@ -3534,9 +3535,7 @@
ops->mode != MTD_OPS_RAW)
return -ENOTSUPP;
- ret = nand_get_device(chip);
- if (ret)
- return ret;
+ nand_get_device(chip);
if (!ops->datbuf)
ret = nand_do_read_oob(chip, from, ops);
@@ -4119,13 +4118,11 @@
struct mtd_oob_ops *ops)
{
struct nand_chip *chip = mtd_to_nand(mtd);
- int ret;
+ int ret = 0;
ops->retlen = 0;
- ret = nand_get_device(chip);
- if (ret)
- return ret;
+ nand_get_device(chip);
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
@@ -4181,9 +4178,7 @@
return -EINVAL;
/* Grab the lock and see if the device is available */
- ret = nand_get_device(chip);
- if (ret)
- return ret;
+ nand_get_device(chip);
/* Shift to get first page */
page = (int)(instr->addr >> chip->page_shift);
@@ -4270,7 +4265,7 @@
pr_debug("%s: called\n", __func__);
/* Grab the lock and see if the device is available */
- WARN_ON(nand_get_device(chip));
+ nand_get_device(chip);
/* Release it and go back */
nand_release_device(chip);
}
@@ -4287,9 +4282,7 @@
int ret;
/* Select the NAND device */
- ret = nand_get_device(chip);
- if (ret)
- return ret;
+ nand_get_device(chip);
nand_select_target(chip, chipnr);
@@ -4360,6 +4353,8 @@
__func__);
}
mutex_unlock(&chip->lock);
+
+ wake_up_all(&chip->resume_wq);
}
/**
@@ -5068,6 +5063,7 @@
chip->cur_cs = -1;
mutex_init(&chip->lock);
+ init_waitqueue_head(&chip->resume_wq);
/* Enforce the right timings for reset/detection */
chip->current_interface_config = nand_get_reset_interface_config();
diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c
index 94d8326..481b56d 100644
--- a/drivers/mtd/nand/raw/nand_timings.c
+++ b/drivers/mtd/nand/raw/nand_timings.c
@@ -292,6 +292,261 @@
},
};
+static const struct nand_interface_config onfi_nvddr_timings[] = {
+ /* Mode 0 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 0,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 10000,
+ .tCALH_min = 10000,
+ .tCALS_min = 10000,
+ .tCAS_min = 10000,
+ .tCEH_min = 20000,
+ .tCH_min = 10000,
+ .tCK_min = 50000,
+ .tCS_min = 35000,
+ .tDH_min = 5000,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 5000,
+ .tDS_min = 5000,
+ .tDSC_min = 50000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 6000,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 1 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 1,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 5000,
+ .tCALH_min = 5000,
+ .tCALS_min = 5000,
+ .tCAS_min = 5000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCK_min = 30000,
+ .tCS_min = 25000,
+ .tDH_min = 2500,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 2500,
+ .tDS_min = 3000,
+ .tDSC_min = 30000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 3000,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 2 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 2,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 4000,
+ .tCALH_min = 4000,
+ .tCALS_min = 4000,
+ .tCAS_min = 4000,
+ .tCEH_min = 20000,
+ .tCH_min = 4000,
+ .tCK_min = 20000,
+ .tCS_min = 15000,
+ .tDH_min = 1700,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 1700,
+ .tDS_min = 2000,
+ .tDSC_min = 20000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 2000,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 3 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 3,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 3000,
+ .tCALH_min = 3000,
+ .tCALS_min = 3000,
+ .tCAS_min = 3000,
+ .tCEH_min = 20000,
+ .tCH_min = 3000,
+ .tCK_min = 15000,
+ .tCS_min = 15000,
+ .tDH_min = 1300,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 1300,
+ .tDS_min = 1500,
+ .tDSC_min = 15000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 1500,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 4 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 4,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 2500,
+ .tCALH_min = 2500,
+ .tCALS_min = 2500,
+ .tCAS_min = 2500,
+ .tCEH_min = 20000,
+ .tCH_min = 2500,
+ .tCK_min = 12000,
+ .tCS_min = 15000,
+ .tDH_min = 1100,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 1000,
+ .tDS_min = 1100,
+ .tDSC_min = 12000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 1200,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 5 */
+ {
+ .type = NAND_NVDDR_IFACE,
+ .timings.mode = 5,
+ .timings.nvddr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tAC_min = 3000,
+ .tAC_max = 25000,
+ .tADL_min = 400000,
+ .tCAD_min = 45000,
+ .tCAH_min = 2000,
+ .tCALH_min = 2000,
+ .tCALS_min = 2000,
+ .tCAS_min = 2000,
+ .tCEH_min = 20000,
+ .tCH_min = 2000,
+ .tCK_min = 10000,
+ .tCS_min = 15000,
+ .tDH_min = 900,
+ .tDQSCK_min = 3000,
+ .tDQSCK_max = 25000,
+ .tDQSD_min = 0,
+ .tDQSD_max = 18000,
+ .tDQSHZ_max = 20000,
+ .tDQSQ_max = 850,
+ .tDS_min = 900,
+ .tDSC_min = 10000,
+ .tFEAT_max = 1000000,
+ .tITC_max = 1000000,
+ .tQHS_max = 1000,
+ .tRHW_min = 100000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWHR_min = 80000,
+ .tWRCK_min = 20000,
+ .tWW_min = 100000,
+ },
+ },
+};
+
/* All NAND chips share the same reset data interface: SDR mode 0 */
const struct nand_interface_config *nand_get_reset_interface_config(void)
{
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index 13df4bd..8f89e2d 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -384,7 +384,8 @@
dma_addr_t dma_addr;
dma_cookie_t cookie;
uint32_t reg;
- int ret;
+ int ret = 0;
+ unsigned long time_left;
if (dir == DMA_FROM_DEVICE) {
chan = flctl->chan_fifo0_rx;
@@ -425,13 +426,14 @@
goto out;
}
- ret =
+ time_left =
wait_for_completion_timeout(&flctl->dma_complete,
msecs_to_jiffies(3000));
- if (ret <= 0) {
+ if (time_left == 0) {
dmaengine_terminate_all(chan);
dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
+ ret = -ETIMEDOUT;
}
out:
@@ -441,7 +443,7 @@
dma_unmap_single(chan->device->dev, dma_addr, len, dir);
- /* ret > 0 is success */
+ /* ret == 0 is success */
return ret;
}
@@ -465,7 +467,7 @@
/* initiate DMA transfer */
if (flctl->chan_fifo0_rx && rlen >= 32 &&
- flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE) > 0)
+ !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE))
goto convert; /* DMA success */
/* do polling transfer */
@@ -524,7 +526,7 @@
/* initiate DMA transfer */
if (flctl->chan_fifo0_tx && rlen >= 32 &&
- flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE) > 0)
+ !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE))
return; /* DMA success */
/* do polling transfer */
diff --git a/drivers/mtd/parsers/bcm47xxpart.c b/drivers/mtd/parsers/bcm47xxpart.c
index 6012a10..13daf9b 100644
--- a/drivers/mtd/parsers/bcm47xxpart.c
+++ b/drivers/mtd/parsers/bcm47xxpart.c
@@ -233,11 +233,11 @@
}
/* Read middle of the block */
- err = mtd_read(master, offset + 0x8000, 0x4, &bytes_read,
+ err = mtd_read(master, offset + (blocksize / 2), 0x4, &bytes_read,
(uint8_t *)buf);
if (err && !mtd_is_bitflip(err)) {
pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
- offset, err);
+ offset + (blocksize / 2), err);
continue;
}
diff --git a/drivers/mtd/parsers/redboot.c b/drivers/mtd/parsers/redboot.c
index 3ccd636..4f3bcc5 100644
--- a/drivers/mtd/parsers/redboot.c
+++ b/drivers/mtd/parsers/redboot.c
@@ -58,6 +58,7 @@
return;
ret = of_property_read_u32(npart, "fis-index-block", &dirblock);
+ of_node_put(npart);
if (ret)
return;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index b9f2724..2fedae6 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1098,9 +1098,9 @@
{
struct sm_ftl *ftl = dev->priv;
- mutex_lock(&ftl->mutex);
del_timer_sync(&ftl->timer);
cancel_work_sync(&ftl->flush_work);
+ mutex_lock(&ftl->mutex);
sm_cache_flush(ftl);
mutex_unlock(&ftl->mutex);
}
diff --git a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
index 555fe55..8a3c1f3 100644
--- a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
@@ -16,12 +16,30 @@
#define BCR 0xdc
#define BCR_WPD BIT(0)
+static bool intel_spi_pci_set_writeable(void __iomem *base, void *data)
+{
+ struct pci_dev *pdev = data;
+ u32 bcr;
+
+ /* Try to make the chip read/write */
+ pci_read_config_dword(pdev, BCR, &bcr);
+ if (!(bcr & BCR_WPD)) {
+ bcr |= BCR_WPD;
+ pci_write_config_dword(pdev, BCR, bcr);
+ pci_read_config_dword(pdev, BCR, &bcr);
+ }
+
+ return bcr & BCR_WPD;
+}
+
static const struct intel_spi_boardinfo bxt_info = {
.type = INTEL_SPI_BXT,
+ .set_writeable = intel_spi_pci_set_writeable,
};
static const struct intel_spi_boardinfo cnl_info = {
.type = INTEL_SPI_CNL,
+ .set_writeable = intel_spi_pci_set_writeable,
};
static int intel_spi_pci_probe(struct pci_dev *pdev,
@@ -29,7 +47,6 @@
{
struct intel_spi_boardinfo *info;
struct intel_spi *ispi;
- u32 bcr;
int ret;
ret = pcim_enable_device(pdev);
@@ -41,15 +58,7 @@
if (!info)
return -ENOMEM;
- /* Try to make the chip read/write */
- pci_read_config_dword(pdev, BCR, &bcr);
- if (!(bcr & BCR_WPD)) {
- bcr |= BCR_WPD;
- pci_write_config_dword(pdev, BCR, bcr);
- pci_read_config_dword(pdev, BCR, &bcr);
- }
- info->writeable = !!(bcr & BCR_WPD);
-
+ info->data = pdev;
ispi = intel_spi_probe(&pdev->dev, &pdev->resource[0], info);
if (IS_ERR(ispi))
return PTR_ERR(ispi);
diff --git a/drivers/mtd/spi-nor/controllers/intel-spi.c b/drivers/mtd/spi-nor/controllers/intel-spi.c
index b54a56a..6c802db 100644
--- a/drivers/mtd/spi-nor/controllers/intel-spi.c
+++ b/drivers/mtd/spi-nor/controllers/intel-spi.c
@@ -53,17 +53,17 @@
#define FRACC 0x50
#define FREG(n) (0x54 + ((n) * 4))
-#define FREG_BASE_MASK 0x3fff
+#define FREG_BASE_MASK GENMASK(14, 0)
#define FREG_LIMIT_SHIFT 16
-#define FREG_LIMIT_MASK (0x03fff << FREG_LIMIT_SHIFT)
+#define FREG_LIMIT_MASK GENMASK(30, 16)
/* Offset is from @ispi->pregs */
#define PR(n) ((n) * 4)
#define PR_WPE BIT(31)
#define PR_LIMIT_SHIFT 16
-#define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT)
+#define PR_LIMIT_MASK GENMASK(30, 16)
#define PR_RPE BIT(15)
-#define PR_BASE_MASK 0x3fff
+#define PR_BASE_MASK GENMASK(14, 0)
/* Offsets are from @ispi->sregs */
#define SSFSTS_CTL 0x00
@@ -117,7 +117,7 @@
#define ERASE_OPCODE_SHIFT 8
#define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
#define ERASE_64K_OPCODE_SHIFT 16
-#define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
+#define ERASE_64K_OPCODE_MASK (0xff << ERASE_64K_OPCODE_SHIFT)
#define INTEL_SPI_TIMEOUT 5000 /* ms */
#define INTEL_SPI_FIFO_SZ 64
@@ -132,7 +132,6 @@
* @sregs: Start of software sequencer registers
* @nregions: Maximum number of regions
* @pr_num: Maximum number of protected range registers
- * @writeable: Is the chip writeable
* @locked: Is SPI setting locked
* @swseq_reg: Use SW sequencer in register reads/writes
* @swseq_erase: Use SW sequencer in erase operation
@@ -150,7 +149,6 @@
void __iomem *sregs;
size_t nregions;
size_t pr_num;
- bool writeable;
bool locked;
bool swseq_reg;
bool swseq_erase;
@@ -305,6 +303,14 @@
INTEL_SPI_TIMEOUT * 1000);
}
+static bool intel_spi_set_writeable(struct intel_spi *ispi)
+{
+ if (!ispi->info->set_writeable)
+ return false;
+
+ return ispi->info->set_writeable(ispi->base, ispi->info->data);
+}
+
static int intel_spi_init(struct intel_spi *ispi)
{
u32 opmenu0, opmenu1, lvscc, uvscc, val;
@@ -317,19 +323,6 @@
ispi->nregions = BYT_FREG_NUM;
ispi->pr_num = BYT_PR_NUM;
ispi->swseq_reg = true;
-
- if (writeable) {
- /* Disable write protection */
- val = readl(ispi->base + BYT_BCR);
- if (!(val & BYT_BCR_WPD)) {
- val |= BYT_BCR_WPD;
- writel(val, ispi->base + BYT_BCR);
- val = readl(ispi->base + BYT_BCR);
- }
-
- ispi->writeable = !!(val & BYT_BCR_WPD);
- }
-
break;
case INTEL_SPI_LPT:
@@ -359,6 +352,12 @@
return -EINVAL;
}
+ /* Try to disable write protection if user asked to do so */
+ if (writeable && !intel_spi_set_writeable(ispi)) {
+ dev_warn(ispi->dev, "can't disable chip write protection\n");
+ writeable = false;
+ }
+
/* Disable #SMI generation from HW sequencer */
val = readl(ispi->base + HSFSTS_CTL);
val &= ~HSFSTS_CTL_FSMIE;
@@ -885,9 +884,12 @@
/*
* If any of the regions have protection bits set, make the
* whole partition read-only to be on the safe side.
+ *
+ * Also if the user did not ask the chip to be writeable
+ * mask the bit too.
*/
- if (intel_spi_is_protected(ispi, base, limit))
- ispi->writeable = false;
+ if (!writeable || intel_spi_is_protected(ispi, base, limit))
+ part->mask_flags |= MTD_WRITEABLE;
end = (limit << 12) + 4096;
if (end > part->size)
@@ -928,7 +930,6 @@
ispi->dev = dev;
ispi->info = info;
- ispi->writeable = info->writeable;
ret = intel_spi_init(ispi);
if (ret)
@@ -946,10 +947,6 @@
intel_spi_fill_partition(ispi, &part);
- /* Prevent writes if not explicitly enabled */
- if (!ispi->writeable || !writeable)
- ispi->nor.mtd.flags &= ~MTD_WRITEABLE;
-
ret = mtd_device_register(&ispi->nor.mtd, &part, 1);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 2b26a87..e8146a4 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -827,6 +827,15 @@
if (ret)
return ret;
+ ret = spi_nor_read_sr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ if (sr1 != sr_cr[0]) {
+ dev_dbg(nor->dev, "SR: Read back test failed\n");
+ return -EIO;
+ }
+
if (nor->flags & SNOR_F_NO_READ_CR)
return 0;
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index e85b04e..4153e0d 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -350,9 +350,6 @@
* we still can use 'ubi->ubi_num'.
*/
ubi = container_of(dev, struct ubi_device, dev);
- ubi = ubi_get_device(ubi->ubi_num);
- if (!ubi)
- return -ENODEV;
if (attr == &dev_eraseblock_size)
ret = sprintf(buf, "%d\n", ubi->leb_size);
@@ -381,7 +378,6 @@
else
ret = -EINVAL;
- ubi_put_device(ubi);
return ret;
}
@@ -980,9 +976,6 @@
goto out_detach;
}
- /* Make device "available" before it becomes accessible via sysfs */
- ubi_devices[ubi_num] = ubi;
-
err = uif_init(ubi);
if (err)
goto out_detach;
@@ -1027,6 +1020,7 @@
wake_up_process(ubi->bgt_thread);
spin_unlock(&ubi->wl_lock);
+ ubi_devices[ubi_num] = ubi;
ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
return ubi_num;
@@ -1035,7 +1029,6 @@
out_uif:
uif_close(ubi);
out_detach:
- ubi_devices[ubi_num] = NULL;
ubi_wl_close(ubi);
ubi_free_all_volumes(ubi);
vfree(ubi->vtbl);
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 28f55f9..053ab52 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -97,6 +97,33 @@
return e;
}
+/*
+ * has_enough_free_count - whether ubi has enough free pebs to fill fm pools
+ * @ubi: UBI device description object
+ * @is_wl_pool: whether UBI is filling wear leveling pool
+ *
+ * This helper function checks whether there are enough free pebs (deducted
+ * by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after
+ * there is at least one of free pebs is filled into fm_wl_pool.
+ * For wear leveling pool, UBI should also reserve free pebs for bad pebs
+ * handling, because there maybe no enough free pebs for user volumes after
+ * producing new bad pebs.
+ */
+static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool)
+{
+ int fm_used = 0; // fastmap non anchor pebs.
+ int beb_rsvd_pebs;
+
+ if (!ubi->free.rb_node)
+ return false;
+
+ beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0;
+ if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled))
+ fm_used = ubi->fm_size / ubi->leb_size - 1;
+
+ return ubi->free_count - beb_rsvd_pebs > fm_used;
+}
+
/**
* ubi_refill_pools - refills all fastmap PEB pools.
* @ubi: UBI device description object
@@ -120,21 +147,17 @@
wl_tree_add(ubi->fm_anchor, &ubi->free);
ubi->free_count++;
}
- if (ubi->fm_next_anchor) {
- wl_tree_add(ubi->fm_next_anchor, &ubi->free);
- ubi->free_count++;
- }
- /* All available PEBs are in ubi->free, now is the time to get
+ /*
+ * All available PEBs are in ubi->free, now is the time to get
* the best anchor PEBs.
*/
ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
- ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
for (;;) {
enough = 0;
if (pool->size < pool->max_size) {
- if (!ubi->free.rb_node)
+ if (!has_enough_free_count(ubi, false))
break;
e = wl_get_wle(ubi);
@@ -147,8 +170,7 @@
enough++;
if (wl_pool->size < wl_pool->max_size) {
- if (!ubi->free.rb_node ||
- (ubi->free_count - ubi->beb_rsvd_pebs < 5))
+ if (!has_enough_free_count(ubi, true))
break;
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
@@ -286,20 +308,26 @@
int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
{
struct ubi_work *wrk;
+ struct ubi_wl_entry *anchor;
spin_lock(&ubi->wl_lock);
- /* Do we have a next anchor? */
- if (!ubi->fm_next_anchor) {
- ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
- if (!ubi->fm_next_anchor)
- /* Tell wear leveling to produce a new anchor PEB */
- ubi->fm_do_produce_anchor = 1;
+ /* Do we already have an anchor? */
+ if (ubi->fm_anchor) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
}
- /* Do wear leveling to get a new anchor PEB or check the
- * existing next anchor candidate.
- */
+ /* See if we can find an anchor PEB on the list of free PEBs */
+ anchor = ubi_wl_get_fm_peb(ubi, 1);
+ if (anchor) {
+ ubi->fm_anchor = anchor;
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+
+ ubi->fm_do_produce_anchor = 1;
+ /* No luck, trigger wear leveling to produce a new anchor PEB. */
if (ubi->wl_scheduled) {
spin_unlock(&ubi->wl_lock);
return 0;
@@ -381,11 +409,6 @@
ubi->fm_anchor = NULL;
}
- if (ubi->fm_next_anchor) {
- return_unused_peb(ubi, ubi->fm_next_anchor);
- ubi->fm_next_anchor = NULL;
- }
-
if (ubi->fm) {
for (i = 0; i < ubi->fm->used_blocks; i++)
kfree(ubi->fm->e[i]);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 022af59..6e95c4b 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -468,7 +468,9 @@
if (err == UBI_IO_FF_BITFLIPS)
scrub = 1;
- add_aeb(ai, free, pnum, ec, scrub);
+ ret = add_aeb(ai, free, pnum, ec, scrub);
+ if (ret)
+ goto out;
continue;
} else if (err == 0 || err == UBI_IO_BITFLIPS) {
dbg_bld("Found non empty PEB:%i in pool", pnum);
@@ -638,8 +640,10 @@
if (fm_pos >= fm_size)
goto fail_bad;
- add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
- be32_to_cpu(fmec->ec), 0);
+ ret = add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 0);
+ if (ret)
+ goto fail;
}
/* read EC values from used list */
@@ -649,8 +653,10 @@
if (fm_pos >= fm_size)
goto fail_bad;
- add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
- be32_to_cpu(fmec->ec), 0);
+ ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 0);
+ if (ret)
+ goto fail;
}
/* read EC values from scrub list */
@@ -660,8 +666,10 @@
if (fm_pos >= fm_size)
goto fail_bad;
- add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
- be32_to_cpu(fmec->ec), 1);
+ ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 1);
+ if (ret)
+ goto fail;
}
/* read EC values from erase list */
@@ -671,8 +679,10 @@
if (fm_pos >= fm_size)
goto fail_bad;
- add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
- be32_to_cpu(fmec->ec), 1);
+ ret = add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 1);
+ if (ret)
+ goto fail;
}
ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
@@ -1220,17 +1230,6 @@
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
- if (ubi->fm_next_anchor) {
- fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
-
- fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum);
- set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs);
- fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec);
-
- free_peb_count++;
- fm_pos += sizeof(*fec);
- ubi_assert(fm_pos <= ubi->fm_size);
- }
fmh->free_peb_count = cpu_to_be32(free_peb_count);
ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index c2da771..da0bee1 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -491,8 +491,7 @@
* @fm_work: fastmap work queue
* @fm_work_scheduled: non-zero if fastmap work was scheduled
* @fast_attach: non-zero if UBI was attached by fastmap
- * @fm_anchor: The new anchor PEB used during fastmap update
- * @fm_next_anchor: An anchor PEB candidate for the next time fastmap is updated
+ * @fm_anchor: The next anchor PEB to use for fastmap
* @fm_do_produce_anchor: If true produce an anchor PEB in wl
*
* @used: RB-tree of used physical eraseblocks
@@ -603,7 +602,6 @@
int fm_work_scheduled;
int fast_attach;
struct ubi_wl_entry *fm_anchor;
- struct ubi_wl_entry *fm_next_anchor;
int fm_do_produce_anchor;
/* Wear-leveling sub-system's stuff */
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 139ee13..6ea95ad 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -56,16 +56,11 @@
{
int ret;
struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
- struct ubi_device *ubi;
-
- ubi = ubi_get_device(vol->ubi->ubi_num);
- if (!ubi)
- return -ENODEV;
+ struct ubi_device *ubi = vol->ubi;
spin_lock(&ubi->volumes_lock);
if (!ubi->volumes[vol->vol_id]) {
spin_unlock(&ubi->volumes_lock);
- ubi_put_device(ubi);
return -ENODEV;
}
/* Take a reference to prevent volume removal */
@@ -103,7 +98,6 @@
vol->ref_count -= 1;
ubi_assert(vol->ref_count >= 0);
spin_unlock(&ubi->volumes_lock);
- ubi_put_device(ubi);
return ret;
}
@@ -315,7 +309,6 @@
ubi->volumes[vol_id] = NULL;
ubi->vol_count -= 1;
spin_unlock(&ubi->volumes_lock);
- ubi_eba_destroy_table(eba_tbl);
out_acc:
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= vol->reserved_pebs;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 7847de7..820b5c1 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -688,16 +688,16 @@
#ifdef CONFIG_MTD_UBI_FASTMAP
e1 = find_anchor_wl_entry(&ubi->used);
- if (e1 && ubi->fm_next_anchor &&
- (ubi->fm_next_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
+ if (e1 && ubi->fm_anchor &&
+ (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
ubi->fm_do_produce_anchor = 1;
- /* fm_next_anchor is no longer considered a good anchor
- * candidate.
+ /*
+ * fm_anchor is no longer considered a good anchor.
* NULL assignment also prevents multiple wear level checks
* of this PEB.
*/
- wl_tree_add(ubi->fm_next_anchor, &ubi->free);
- ubi->fm_next_anchor = NULL;
+ wl_tree_add(ubi->fm_anchor, &ubi->free);
+ ubi->fm_anchor = NULL;
ubi->free_count++;
}
@@ -1086,12 +1086,13 @@
if (!err) {
spin_lock(&ubi->wl_lock);
- if (!ubi->fm_disabled && !ubi->fm_next_anchor &&
+ if (!ubi->fm_disabled && !ubi->fm_anchor &&
e->pnum < UBI_FM_MAX_START) {
- /* Abort anchor production, if needed it will be
+ /*
+ * Abort anchor production, if needed it will be
* enabled again in the wear leveling started below.
*/
- ubi->fm_next_anchor = e;
+ ubi->fm_anchor = e;
ubi->fm_do_produce_anchor = 0;
} else {
wl_tree_add(e, &ubi->free);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index cb865b7..f208080 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -80,7 +80,6 @@
select CRYPTO
select CRYPTO_LIB_CURVE25519
select CRYPTO_LIB_CHACHA20POLY1305
- select CRYPTO_LIB_BLAKE2S
select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT
select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 98df38f..12d0854 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -332,7 +332,7 @@
dev->irq = 9;
if (arcrimi_probe(dev)) {
- free_netdev(dev);
+ free_arcdev(dev);
return -EIO;
}
@@ -349,7 +349,7 @@
iounmap(lp->mem_start);
release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
free_irq(dev->irq, dev);
- free_netdev(dev);
+ free_arcdev(dev);
}
#ifndef MODULE
diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
index 22a49c6..5d4a4c7 100644
--- a/drivers/net/arcnet/arcdevice.h
+++ b/drivers/net/arcnet/arcdevice.h
@@ -298,6 +298,10 @@
int excnak_pending; /* We just got an excesive nak interrupt */
+ /* RESET flag handling */
+ int reset_in_progress;
+ struct work_struct reset_work;
+
struct {
uint16_t sequence; /* sequence number (incs with each packet) */
__be16 aborted_seq;
@@ -350,7 +354,9 @@
void arcnet_unregister_proto(struct ArcProto *proto);
irqreturn_t arcnet_interrupt(int irq, void *dev_id);
+
struct net_device *alloc_arcdev(const char *name);
+void free_arcdev(struct net_device *dev);
int arcnet_open(struct net_device *dev);
int arcnet_close(struct net_device *dev);
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index e04efc0..d76dd7d 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -387,10 +387,44 @@
struct arcnet_local *lp = from_timer(lp, t, timer);
struct net_device *dev = lp->dev;
- if (!netif_carrier_ok(dev)) {
+ spin_lock_irq(&lp->lock);
+
+ if (!lp->reset_in_progress && !netif_carrier_ok(dev)) {
netif_carrier_on(dev);
netdev_info(dev, "link up\n");
}
+
+ spin_unlock_irq(&lp->lock);
+}
+
+static void reset_device_work(struct work_struct *work)
+{
+ struct arcnet_local *lp;
+ struct net_device *dev;
+
+ lp = container_of(work, struct arcnet_local, reset_work);
+ dev = lp->dev;
+
+ /* Do not bring the network interface back up if an ifdown
+ * was already done.
+ */
+ if (!netif_running(dev) || !lp->reset_in_progress)
+ return;
+
+ rtnl_lock();
+
+ /* Do another check, in case of an ifdown that was triggered in
+ * the small race window between the exit condition above and
+ * acquiring RTNL.
+ */
+ if (!netif_running(dev) || !lp->reset_in_progress)
+ goto out;
+
+ dev_close(dev);
+ dev_open(dev, NULL);
+
+out:
+ rtnl_unlock();
}
static void arcnet_reply_tasklet(unsigned long data)
@@ -452,12 +486,25 @@
lp->dev = dev;
spin_lock_init(&lp->lock);
timer_setup(&lp->timer, arcnet_timer, 0);
+ INIT_WORK(&lp->reset_work, reset_device_work);
}
return dev;
}
EXPORT_SYMBOL(alloc_arcdev);
+void free_arcdev(struct net_device *dev)
+{
+ struct arcnet_local *lp = netdev_priv(dev);
+
+ /* Do not cancel this at ->ndo_close(), as the workqueue itself
+ * indirectly calls the ifdown path through dev_close().
+ */
+ cancel_work_sync(&lp->reset_work);
+ free_netdev(dev);
+}
+EXPORT_SYMBOL(free_arcdev);
+
/* Open/initialize the board. This is called sometime after booting when
* the 'ifconfig' program is run.
*
@@ -587,6 +634,10 @@
/* shut down the card */
lp->hw.close(dev);
+
+ /* reset counters */
+ lp->reset_in_progress = 0;
+
module_put(lp->hw.owner);
return 0;
}
@@ -820,6 +871,9 @@
spin_lock_irqsave(&lp->lock, flags);
+ if (lp->reset_in_progress)
+ goto out;
+
/* RESET flag was enabled - if device is not running, we must
* clear it right away (but nothing else).
*/
@@ -852,11 +906,14 @@
if (status & RESETflag) {
arc_printk(D_NORMAL, dev, "spurious reset (status=%Xh)\n",
status);
- arcnet_close(dev);
- arcnet_open(dev);
+
+ lp->reset_in_progress = 1;
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ schedule_work(&lp->reset_work);
/* get out of the interrupt handler! */
- break;
+ goto out;
}
/* RX is inhibited - we must have received something.
* Prepare to receive into the next buffer.
@@ -1052,6 +1109,7 @@
udelay(1);
lp->hw.intmask(dev, lp->intmask);
+out:
spin_unlock_irqrestore(&lp->lock, flags);
return retval;
}
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index f983c4c..be618e4 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -169,7 +169,7 @@
dev->irq = 9;
if (com20020isa_probe(dev)) {
- free_netdev(dev);
+ free_arcdev(dev);
return -EIO;
}
@@ -182,7 +182,7 @@
unregister_netdev(my_dev);
free_irq(my_dev->irq, my_dev);
release_region(my_dev->base_addr, ARCNET_TOTAL_SIZE);
- free_netdev(my_dev);
+ free_arcdev(my_dev);
}
#ifndef MODULE
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 9f44e2e..b4f8798 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -294,7 +294,7 @@
unregister_netdev(dev);
free_irq(dev->irq, dev);
- free_netdev(dev);
+ free_arcdev(dev);
}
}
diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index cf607ff..e0c7720 100644
--- a/drivers/net/arcnet/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
@@ -113,6 +113,7 @@
struct com20020_dev *info;
struct net_device *dev;
struct arcnet_local *lp;
+ int ret = -ENOMEM;
dev_dbg(&p_dev->dev, "com20020_attach()\n");
@@ -142,12 +143,18 @@
info->dev = dev;
p_dev->priv = info;
- return com20020_config(p_dev);
+ ret = com20020_config(p_dev);
+ if (ret)
+ goto fail_config;
+ return 0;
+
+fail_config:
+ free_arcdev(dev);
fail_alloc_dev:
kfree(info);
fail_alloc_info:
- return -ENOMEM;
+ return ret;
} /* com20020_attach */
static void com20020_detach(struct pcmcia_device *link)
@@ -177,7 +184,7 @@
dev = info->dev;
if (dev) {
dev_dbg(&link->dev, "kfree...\n");
- free_netdev(dev);
+ free_arcdev(dev);
}
dev_dbg(&link->dev, "kfree2...\n");
kfree(info);
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index cf214b7..3856b44 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -396,7 +396,7 @@
err = com90io_probe(dev);
if (err) {
- free_netdev(dev);
+ free_arcdev(dev);
return err;
}
@@ -419,7 +419,7 @@
free_irq(dev->irq, dev);
release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
- free_netdev(dev);
+ free_arcdev(dev);
}
module_init(com90io_init)
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index 3dc3d53..d8dfb9e 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -554,7 +554,7 @@
err_release_mem:
release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
err_free_dev:
- free_netdev(dev);
+ free_arcdev(dev);
return -EIO;
}
@@ -672,7 +672,7 @@
release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
release_mem_region(dev->mem_start,
dev->mem_end - dev->mem_start + 1);
- free_netdev(dev);
+ free_arcdev(dev);
}
}
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 39b1282..53ef485 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -140,14 +140,14 @@
oiph = skb_network_header(skb);
skb_reset_network_header(skb);
- if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
+ if (!ipv6_mod_enabled() || family == AF_INET)
err = IP_ECN_decapsulate(oiph, skb);
else
err = IP6_ECN_decapsulate(oiph, skb);
if (unlikely(err)) {
if (log_ecn_error) {
- if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
+ if (!ipv6_mod_enabled() || family == AF_INET)
net_info_ratelimited("non-ECT from %pI4 "
"with TOS=%#x\n",
&((struct iphdr *)oiph)->saddr,
@@ -213,11 +213,12 @@
int err;
memset(&udp_conf, 0, sizeof(udp_conf));
-#if IS_ENABLED(CONFIG_IPV6)
- udp_conf.family = AF_INET6;
-#else
- udp_conf.family = AF_INET;
-#endif
+
+ if (ipv6_mod_enabled())
+ udp_conf.family = AF_INET6;
+ else
+ udp_conf.family = AF_INET;
+
udp_conf.local_udp_port = port;
/* Open UDP socket */
err = udp_sock_create(net, &udp_conf, &sock);
@@ -246,12 +247,6 @@
tunnel_cfg.encap_destroy = NULL;
setup_udp_tunnel_sock(bareudp->net, sock, &tunnel_cfg);
- /* As the setup_udp_tunnel_sock does not call udp_encap_enable if the
- * socket type is v6 an explicit call to udp_encap_enable is needed.
- */
- if (sock->sk->sk_family == AF_INET6)
- udp_encap_enable();
-
rcu_assign_pointer(bareudp->sock, sock);
return 0;
}
@@ -445,7 +440,7 @@
}
rcu_read_lock();
- if (IS_ENABLED(CONFIG_IPV6) && info->mode & IP_TUNNEL_INFO_IPV6)
+ if (ipv6_mod_enabled() && info->mode & IP_TUNNEL_INFO_IPV6)
err = bareudp6_xmit_skb(skb, dev, bareudp, info);
else
err = bareudp_xmit_skb(skb, dev, bareudp, info);
@@ -475,7 +470,7 @@
use_cache = ip_tunnel_dst_cache_usable(skb, info);
- if (!IS_ENABLED(CONFIG_IPV6) || ip_tunnel_info_af(info) == AF_INET) {
+ if (!ipv6_mod_enabled() || ip_tunnel_info_af(info) == AF_INET) {
struct rtable *rt;
__be32 saddr;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index c2cef7b..acb6ff0 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -85,8 +85,9 @@
static u16 ad_ticks_per_sec;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
-static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
- MULTICAST_LACPDU_ADDR;
+const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = {
+ 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02
+};
/* ================= main 802.3ad protocol functions ================== */
static int ad_lacpdu_send(struct port *port);
@@ -1988,30 +1989,24 @@
*/
void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
{
- /* check that the bond is not initialized yet */
- if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
- bond->dev->dev_addr)) {
+ BOND_AD_INFO(bond).aggregator_identifier = 0;
+ BOND_AD_INFO(bond).system.sys_priority =
+ bond->params.ad_actor_sys_prio;
+ if (is_zero_ether_addr(bond->params.ad_actor_system))
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->dev->dev_addr);
+ else
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->params.ad_actor_system);
- BOND_AD_INFO(bond).aggregator_identifier = 0;
+ /* initialize how many times this module is called in one
+ * second (should be about every 100ms)
+ */
+ ad_ticks_per_sec = tick_resolution;
- BOND_AD_INFO(bond).system.sys_priority =
- bond->params.ad_actor_sys_prio;
- if (is_zero_ether_addr(bond->params.ad_actor_system))
- BOND_AD_INFO(bond).system.sys_mac_addr =
- *((struct mac_addr *)bond->dev->dev_addr);
- else
- BOND_AD_INFO(bond).system.sys_mac_addr =
- *((struct mac_addr *)bond->params.ad_actor_system);
-
- /* initialize how many times this module is called in one
- * second (should be about every 100ms)
- */
- ad_ticks_per_sec = tick_resolution;
-
- bond_3ad_initiate_agg_selection(bond,
- AD_AGGREGATOR_SELECTION_TIMER *
- ad_ticks_per_sec);
- }
+ bond_3ad_initiate_agg_selection(bond,
+ AD_AGGREGATOR_SELECTION_TIMER *
+ ad_ticks_per_sec);
}
/**
@@ -2209,7 +2204,8 @@
temp_aggregator->num_of_ports--;
if (__agg_active_ports(temp_aggregator) == 0) {
select_new_active_agg = temp_aggregator->is_active;
- ad_clear_agg(temp_aggregator);
+ if (temp_aggregator->num_of_ports == 0)
+ ad_clear_agg(temp_aggregator);
if (select_new_active_agg) {
slave_info(bond->dev, slave->dev, "Removing an active aggregator\n");
/* select new active aggregator */
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 0436aef..152f76f 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1279,12 +1279,12 @@
return res;
if (rlb_enabled) {
- bond->alb_info.rlb_enabled = 1;
res = rlb_initialize(bond);
if (res) {
tlb_deinitialize(bond);
return res;
}
+ bond->alb_info.rlb_enabled = 1;
} else {
bond->alb_info.rlb_enabled = 0;
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index cbeb69b..f38a6ce 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -827,12 +827,8 @@
dev_uc_unsync(slave_dev, bond_dev);
dev_mc_unsync(slave_dev, bond_dev);
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* del lacpdu mc addr from mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
-
- dev_mc_del(slave_dev, lacpdu_multicast);
- }
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ dev_mc_del(slave_dev, lacpdu_mcast_addr);
}
/*--------------------------- Active slave change ---------------------------*/
@@ -852,7 +848,8 @@
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(old_active->dev, -1);
- bond_hw_addr_flush(bond->dev, old_active->dev);
+ if (bond->dev->flags & IFF_UP)
+ bond_hw_addr_flush(bond->dev, old_active->dev);
}
if (new_active) {
@@ -863,10 +860,12 @@
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(new_active->dev, 1);
- netif_addr_lock_bh(bond->dev);
- dev_uc_sync(new_active->dev, bond->dev);
- dev_mc_sync(new_active->dev, bond->dev);
- netif_addr_unlock_bh(bond->dev);
+ if (bond->dev->flags & IFF_UP) {
+ netif_addr_lock_bh(bond->dev);
+ dev_uc_sync(new_active->dev, bond->dev);
+ dev_mc_sync(new_active->dev, bond->dev);
+ netif_addr_unlock_bh(bond->dev);
+ }
}
}
@@ -2073,16 +2072,14 @@
}
}
- netif_addr_lock_bh(bond_dev);
- dev_mc_sync_multiple(slave_dev, bond_dev);
- dev_uc_sync_multiple(slave_dev, bond_dev);
- netif_addr_unlock_bh(bond_dev);
+ if (bond_dev->flags & IFF_UP) {
+ netif_addr_lock_bh(bond_dev);
+ dev_mc_sync_multiple(slave_dev, bond_dev);
+ dev_uc_sync_multiple(slave_dev, bond_dev);
+ netif_addr_unlock_bh(bond_dev);
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* add lacpdu mc addr to mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
-
- dev_mc_add(slave_dev, lacpdu_multicast);
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ dev_mc_add(slave_dev, lacpdu_mcast_addr);
}
}
@@ -2310,7 +2307,8 @@
if (old_flags & IFF_ALLMULTI)
dev_set_allmulti(slave_dev, -1);
- bond_hw_addr_flush(bond_dev, slave_dev);
+ if (old_flags & IFF_UP)
+ bond_hw_addr_flush(bond_dev, slave_dev);
}
slave_disable_netpoll(slave);
@@ -3368,9 +3366,11 @@
if (!rtnl_trylock())
return;
- if (should_notify_peers)
+ if (should_notify_peers) {
+ bond->send_peer_notif--;
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
bond->dev);
+ }
if (should_notify_rtnl) {
bond_slave_state_notify(bond);
bond_slave_link_notify(bond);
@@ -3770,6 +3770,9 @@
/* register to receive LACPDUs */
bond->recv_probe = bond_3ad_lacpdu_recv;
bond_3ad_initiate_agg_selection(bond, 1);
+
+ bond_for_each_slave(bond, slave, iter)
+ dev_mc_add(slave->dev, lacpdu_mcast_addr);
}
if (bond_mode_can_use_xmit_hash(bond))
@@ -3781,6 +3784,7 @@
static int bond_close(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
bond_work_cancel_all(bond);
bond->send_peer_notif = 0;
@@ -3788,6 +3792,19 @@
bond_alb_deinitialize(bond);
bond->recv_probe = NULL;
+ if (bond_uses_primary(bond)) {
+ rcu_read_lock();
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (slave)
+ bond_hw_addr_flush(bond_dev, slave->dev);
+ rcu_read_unlock();
+ } else {
+ struct list_head *iter;
+
+ bond_for_each_slave(bond, slave, iter)
+ bond_hw_addr_flush(bond_dev, slave->dev);
+ }
+
return 0;
}
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 47a6d62..a701932 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -723,13 +723,21 @@
/* Carrier is off until netdevice is opened */
netif_carrier_off(netdev);
+ /* serialize netdev register + virtio_device_ready() with ndo_open() */
+ rtnl_lock();
+
/* register Netdev */
- err = register_netdev(netdev);
+ err = register_netdevice(netdev);
if (err) {
+ rtnl_unlock();
dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err);
goto err;
}
+ virtio_device_ready(vdev);
+
+ rtnl_unlock();
+
debugfs_init(cfv);
return 0;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 7cbaac2..4299502 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -954,11 +954,6 @@
u32 reg_ctrl, reg_id, reg_iflag1;
int i;
- if (unlikely(drop)) {
- skb = ERR_PTR(-ENOBUFS);
- goto mark_as_read;
- }
-
mb = flexcan_get_mb(priv, n);
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
@@ -987,6 +982,11 @@
reg_ctrl = priv->read(&mb->can_ctrl);
}
+ if (unlikely(drop)) {
+ skb = ERR_PTR(-ENOBUFS);
+ goto mark_as_read;
+ }
+
if (reg_ctrl & FLEXCAN_MB_CNT_EDL)
skb = alloc_canfd_skb(offload->dev, &cfd);
else
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 39802f1..3794a7b 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -241,13 +241,14 @@
.rxsize = GRCAN_DEFAULT_BUFFER_SIZE, \
}
-#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 0x4100
+#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 4100
#define GRLIB_VERSION_MASK 0xffff
/* GRCAN private data structure */
struct grcan_priv {
struct can_priv can; /* must be the first member */
struct net_device *dev;
+ struct device *ofdev_dev;
struct napi_struct napi;
struct grcan_registers __iomem *regs; /* ioremap'ed registers */
@@ -924,7 +925,7 @@
struct grcan_priv *priv = netdev_priv(dev);
struct grcan_dma *dma = &priv->dma;
- dma_free_coherent(&dev->dev, dma->base_size, dma->base_buf,
+ dma_free_coherent(priv->ofdev_dev, dma->base_size, dma->base_buf,
dma->base_handle);
memset(dma, 0, sizeof(*dma));
}
@@ -949,7 +950,7 @@
/* Extra GRCAN_BUFFER_ALIGNMENT to allow for alignment */
dma->base_size = lsize + ssize + GRCAN_BUFFER_ALIGNMENT;
- dma->base_buf = dma_alloc_coherent(&dev->dev,
+ dma->base_buf = dma_alloc_coherent(priv->ofdev_dev,
dma->base_size,
&dma->base_handle,
GFP_KERNEL);
@@ -1113,8 +1114,10 @@
priv->closing = true;
if (priv->need_txbug_workaround) {
+ spin_unlock_irqrestore(&priv->lock, flags);
del_timer_sync(&priv->hang_timer);
del_timer_sync(&priv->rr_timer);
+ spin_lock_irqsave(&priv->lock, flags);
}
netif_stop_queue(dev);
grcan_stop_hardware(dev);
@@ -1134,7 +1137,7 @@
return 0;
}
-static int grcan_transmit_catch_up(struct net_device *dev, int budget)
+static void grcan_transmit_catch_up(struct net_device *dev)
{
struct grcan_priv *priv = netdev_priv(dev);
unsigned long flags;
@@ -1142,7 +1145,7 @@
spin_lock_irqsave(&priv->lock, flags);
- work_done = catch_up_echo_skb(dev, budget, true);
+ work_done = catch_up_echo_skb(dev, -1, true);
if (work_done) {
if (!priv->resetting && !priv->closing &&
!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
@@ -1156,8 +1159,6 @@
}
spin_unlock_irqrestore(&priv->lock, flags);
-
- return work_done;
}
static int grcan_receive(struct net_device *dev, int budget)
@@ -1239,19 +1240,13 @@
struct net_device *dev = priv->dev;
struct grcan_registers __iomem *regs = priv->regs;
unsigned long flags;
- int tx_work_done, rx_work_done;
- int rx_budget = budget / 2;
- int tx_budget = budget - rx_budget;
+ int work_done;
- /* Half of the budget for receiving messages */
- rx_work_done = grcan_receive(dev, rx_budget);
+ work_done = grcan_receive(dev, budget);
- /* Half of the budget for transmitting messages as that can trigger echo
- * frames being received
- */
- tx_work_done = grcan_transmit_catch_up(dev, tx_budget);
+ grcan_transmit_catch_up(dev);
- if (rx_work_done < rx_budget && tx_work_done < tx_budget) {
+ if (work_done < budget) {
napi_complete(napi);
/* Guarantee no interference with a running reset that otherwise
@@ -1268,7 +1263,7 @@
spin_unlock_irqrestore(&priv->lock, flags);
}
- return rx_work_done + tx_work_done;
+ return work_done;
}
/* Work tx bug by waiting while for the risky situation to clear. If that fails,
@@ -1600,6 +1595,7 @@
memcpy(&priv->config, &grcan_module_config,
sizeof(struct grcan_device_config));
priv->dev = dev;
+ priv->ofdev_dev = &ofdev->dev;
priv->regs = base;
priv->can.bittiming_const = &grcan_bittiming_const;
priv->can.do_set_bittiming = grcan_set_bittiming;
@@ -1652,6 +1648,7 @@
static int grcan_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
+ struct device_node *sysid_parent;
u32 sysid, ambafreq;
int irq, err;
void __iomem *base;
@@ -1660,10 +1657,14 @@
/* Compare GRLIB version number with the first that does not
* have the tx bug (see start_xmit)
*/
- err = of_property_read_u32(np, "systemid", &sysid);
- if (!err && ((sysid & GRLIB_VERSION_MASK)
- >= GRCAN_TXBUG_SAFE_GRLIB_VERSION))
- txbug = false;
+ sysid_parent = of_find_node_by_path("/ambapp0");
+ if (sysid_parent) {
+ err = of_property_read_u32(sysid_parent, "systemid", &sysid);
+ if (!err && ((sysid & GRLIB_VERSION_MASK) >=
+ GRCAN_TXBUG_SAFE_GRLIB_VERSION))
+ txbug = false;
+ of_node_put(sysid_parent);
+ }
err = of_property_read_u32(np, "freq", &ambafreq);
if (err) {
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 19a7e4a..19a19a7 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1491,8 +1491,6 @@
M_CAN_FIFO_DATA(i / 4),
*(u32 *)(cf->data + i));
- can_put_echo_skb(skb, dev, 0);
-
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
cccr = m_can_read(cdev, M_CAN_CCCR);
cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT);
@@ -1509,6 +1507,9 @@
m_can_write(cdev, M_CAN_CCCR, cccr);
}
m_can_write(cdev, M_CAN_TXBTIE, 0x1);
+
+ can_put_echo_skb(skb, dev, 0);
+
m_can_write(cdev, M_CAN_TXBAR, 0x1);
/* End of xmit function for version 3.0.x */
} else {
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index e254e04..ef64976 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -325,14 +325,14 @@
&mscan_clksrc);
if (!priv->can.clock.freq) {
dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n");
- goto exit_free_mscan;
+ goto exit_put_clock;
}
err = register_mscandev(dev, mscan_clksrc);
if (err) {
dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
DRV_NAME, err);
- goto exit_free_mscan;
+ goto exit_put_clock;
}
dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
@@ -340,7 +340,9 @@
return 0;
-exit_free_mscan:
+exit_put_clock:
+ if (data->put_clock)
+ data->put_clock(ofdev);
free_candev(dev);
exit_dispose_irq:
irq_dispose_mapping(irq);
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 79d9abd..1272ec7 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -489,6 +489,7 @@
if (!skb)
return;
+ errc = ioread32(&priv->regs->errc);
if (status & PCH_BUS_OFF) {
pch_can_set_tx_all(priv, 0);
pch_can_set_rx_all(priv, 0);
@@ -496,9 +497,11 @@
cf->can_id |= CAN_ERR_BUSOFF;
priv->can.can_stats.bus_off++;
can_bus_off(ndev);
+ } else {
+ cf->data[6] = errc & PCH_TEC;
+ cf->data[7] = (errc & PCH_REC) >> 8;
}
- errc = ioread32(&priv->regs->errc);
/* Warning interrupt. */
if (status & PCH_EWARN) {
state = CAN_STATE_ERROR_WARNING;
@@ -556,9 +559,6 @@
break;
}
- cf->data[6] = errc & PCH_TEC;
- cf->data[7] = (errc & PCH_REC) >> 8;
-
priv->can.state = state;
netif_receive_skb(skb);
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 3570a4d..134eda6 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -235,11 +235,8 @@
if (eifr & (RCAR_CAN_EIFR_EWIF | RCAR_CAN_EIFR_EPIF)) {
txerr = readb(&priv->regs->tecr);
rxerr = readb(&priv->regs->recr);
- if (skb) {
+ if (skb)
cf->can_id |= CAN_ERR_CRTL;
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
}
if (eifr & RCAR_CAN_EIFR_BEIF) {
int rx_errors = 0, tx_errors = 0;
@@ -339,6 +336,9 @@
can_bus_off(ndev);
if (skb)
cf->can_id |= CAN_ERR_BUSOFF;
+ } else if (skb) {
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
}
if (eifr & RCAR_CAN_EIFR_ORIF) {
netdev_dbg(priv->ndev, "Receive overrun error interrupt\n");
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 67f0f14..c61534a 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1075,7 +1075,7 @@
struct rcar_canfd_global *gpriv = dev_id;
struct net_device *ndev;
struct rcar_canfd_channel *priv;
- u32 sts, gerfl;
+ u32 sts, cc, gerfl;
u32 ch, ridx;
/* Global error interrupts still indicate a condition specific
@@ -1093,7 +1093,9 @@
/* Handle Rx interrupts */
sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
- if (likely(sts & RCANFD_RFSTS_RFIF)) {
+ cc = rcar_canfd_read(priv->base, RCANFD_RFCC(ridx));
+ if (likely(sts & RCANFD_RFSTS_RFIF &&
+ cc & RCANFD_RFCC_RFIE)) {
if (napi_schedule_prep(&priv->napi)) {
/* Disable Rx FIFO interrupts */
rcar_canfd_clear_bit(priv->base,
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 25a4d7d..ee34bae 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -405,9 +405,6 @@
txerr = priv->read_reg(priv, SJA1000_TXERR);
rxerr = priv->read_reg(priv, SJA1000_RXERR);
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
-
if (isrc & IRQ_DOI) {
/* data overrun interrupt */
netdev_dbg(dev, "data overrun interrupt\n");
@@ -429,6 +426,10 @@
else
state = CAN_STATE_ERROR_ACTIVE;
}
+ if (state != CAN_STATE_BUS_OFF) {
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
if (isrc & IRQ_BEI) {
/* bus error interrupt */
priv->can.can_stats.bus_error++;
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 7d2315c..28273e8 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -670,8 +670,6 @@
txerr = hi3110_read(spi, HI3110_READ_TEC);
rxerr = hi3110_read(spi, HI3110_READ_REC);
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
tx_state = txerr >= rxerr ? new_state : 0;
rx_state = txerr <= rxerr ? new_state : 0;
can_change_state(net, cf, tx_state, rx_state);
@@ -684,6 +682,9 @@
hi3110_hw_sleep(spi);
break;
}
+ } else {
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
}
}
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 89897a2..ffcb04a 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1074,9 +1074,6 @@
mcp251x_read_2regs(spi, CANINTF, &intf, &eflag);
- /* mask out flags we don't care about */
- intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
-
/* receive buffer 0 */
if (intf & CANINTF_RX0IF) {
mcp251x_hw_rx(spi, 0);
@@ -1086,6 +1083,18 @@
if (mcp251x_is_2510(spi))
mcp251x_write_bits(spi, CANINTF,
CANINTF_RX0IF, 0x00);
+
+ /* check if buffer 1 is already known to be full, no need to re-read */
+ if (!(intf & CANINTF_RX1IF)) {
+ u8 intf1, eflag1;
+
+ /* intf needs to be read again to avoid a race condition */
+ mcp251x_read_2regs(spi, CANINTF, &intf1, &eflag1);
+
+ /* combine flags from both operations for error handling */
+ intf |= intf1;
+ eflag |= eflag1;
+ }
}
/* receive buffer 1 */
@@ -1096,6 +1105,9 @@
clear_intf |= CANINTF_RX1IF;
}
+ /* mask out flags we don't care about */
+ intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
+
/* any error or tx interrupt we need to clear? */
if (intf & (CANINTF_ERR | CANINTF_TX))
clear_intf |= intf & (CANINTF_ERR | CANINTF_TX);
@@ -1407,11 +1419,14 @@
ret = mcp251x_gpio_setup(priv);
if (ret)
- goto error_probe;
+ goto out_unregister_candev;
netdev_info(net, "MCP%x successfully initialized.\n", priv->model);
return 0;
+out_unregister_candev:
+ unregister_candev(net);
+
error_probe:
destroy_workqueue(priv->wq);
priv->wq = NULL;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index abe00a0..189d226 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -2578,7 +2578,7 @@
out_kfree_buf_rx:
kfree(buf_rx);
- return 0;
+ return err;
}
#define MCP251XFD_QUIRK_ACTIVE(quirk) \
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index fa1246e..766dbd1 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -426,7 +426,7 @@
/* The tx_obj_raw version is used in spi async, i.e. without
* regmap. We have to take care of endianness ourselves.
*/
-struct mcp251xfd_hw_tx_obj_raw {
+struct __packed mcp251xfd_hw_tx_obj_raw {
__le32 id;
__le32 flags;
u8 data[sizeof_field(struct canfd_frame, data)];
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index b3f2f4f..39ddb3d 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -525,11 +525,6 @@
rxerr = (errc >> 16) & 0xFF;
txerr = errc & 0xFF;
- if (skb) {
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
-
if (isrc & SUN4I_INT_DATA_OR) {
/* data overrun interrupt */
netdev_dbg(dev, "data overrun interrupt\n");
@@ -560,6 +555,10 @@
else
state = CAN_STATE_ERROR_ACTIVE;
}
+ if (skb && state != CAN_STATE_BUS_OFF) {
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
if (isrc & SUN4I_INT_BUS_ERR) {
/* bus error interrupt */
netdev_dbg(dev, "bus error interrupt\n");
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 249d2fb..ff05b52 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -194,7 +194,7 @@
__le32 ts_sec; /* timestamp in seconds */
__le32 ts_nsec; /* timestamp in nano seconds */
- union {
+ union __packed {
u8 generic[64];
struct cpc_can_msg can_msg;
struct cpc_can_params can_params;
@@ -823,7 +823,6 @@
usb_unanchor_urb(urb);
usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
- dev_kfree_skb(skb);
atomic_dec(&dev->active_tx_urbs);
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index e023c40..a879200 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -184,6 +184,8 @@
struct usb_anchor tx_submitted;
atomic_t active_tx_urbs;
+ void *rxbuf[GS_MAX_RX_URBS];
+ dma_addr_t rxbuf_dma[GS_MAX_RX_URBS];
};
/* usb interface struct */
@@ -592,6 +594,7 @@
for (i = 0; i < GS_MAX_RX_URBS; i++) {
struct urb *urb;
u8 *buf;
+ dma_addr_t buf_dma;
/* alloc rx urb */
urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -602,7 +605,7 @@
buf = usb_alloc_coherent(dev->udev,
sizeof(struct gs_host_frame),
GFP_KERNEL,
- &urb->transfer_dma);
+ &buf_dma);
if (!buf) {
netdev_err(netdev,
"No memory left for USB buffer\n");
@@ -610,6 +613,8 @@
return -ENOMEM;
}
+ urb->transfer_dma = buf_dma;
+
/* fill, anchor, and submit rx urb */
usb_fill_bulk_urb(urb,
dev->udev,
@@ -633,10 +638,17 @@
rc);
usb_unanchor_urb(urb);
+ usb_free_coherent(dev->udev,
+ sizeof(struct gs_host_frame),
+ buf,
+ buf_dma);
usb_free_urb(urb);
break;
}
+ dev->rxbuf[i] = buf;
+ dev->rxbuf_dma[i] = buf_dma;
+
/* Drop reference,
* USB core will take care of freeing it
*/
@@ -666,6 +678,7 @@
flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
/* finally start device */
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
dm->mode = cpu_to_le32(GS_CAN_MODE_START);
dm->flags = cpu_to_le32(flags);
rc = usb_control_msg(interface_to_usbdev(dev->iface),
@@ -682,13 +695,12 @@
if (rc < 0) {
netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
kfree(dm);
+ dev->can.state = CAN_STATE_STOPPED;
return rc;
}
kfree(dm);
- dev->can.state = CAN_STATE_ERROR_ACTIVE;
-
parent->active_channels++;
if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
netif_start_queue(netdev);
@@ -701,13 +713,20 @@
int rc;
struct gs_can *dev = netdev_priv(netdev);
struct gs_usb *parent = dev->parent;
+ unsigned int i;
netif_stop_queue(netdev);
/* Stop polling */
parent->active_channels--;
- if (!parent->active_channels)
+ if (!parent->active_channels) {
usb_kill_anchored_urbs(&parent->rx_submitted);
+ for (i = 0; i < GS_MAX_RX_URBS; i++)
+ usb_free_coherent(dev->udev,
+ sizeof(struct gs_host_frame),
+ dev->rxbuf[i],
+ dev->rxbuf_dma[i]);
+ }
/* Stop sending URBs */
usb_kill_anchored_urbs(&dev->tx_submitted);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
index 390b6bd..62958f0 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -35,9 +35,10 @@
#define KVASER_USB_RX_BUFFER_SIZE 3072
#define KVASER_USB_MAX_NET_DEVICES 5
-/* USB devices features */
-#define KVASER_USB_HAS_SILENT_MODE BIT(0)
-#define KVASER_USB_HAS_TXRX_ERRORS BIT(1)
+/* Kvaser USB device quirks */
+#define KVASER_USB_QUIRK_HAS_SILENT_MODE BIT(0)
+#define KVASER_USB_QUIRK_HAS_TXRX_ERRORS BIT(1)
+#define KVASER_USB_QUIRK_IGNORE_CLK_FREQ BIT(2)
/* Device capabilities */
#define KVASER_USB_CAP_BERR_CAP 0x01
@@ -65,12 +66,7 @@
struct kvaser_usb_dev_card_data {
u32 ctrlmode_supported;
u32 capabilities;
- union {
- struct {
- enum kvaser_usb_leaf_family family;
- } leaf;
- struct kvaser_usb_dev_card_data_hydra hydra;
- };
+ struct kvaser_usb_dev_card_data_hydra hydra;
};
/* Context for an outstanding, not yet ACKed, transmission */
@@ -84,7 +80,7 @@
struct usb_device *udev;
struct usb_interface *intf;
struct kvaser_usb_net_priv *nets[KVASER_USB_MAX_NET_DEVICES];
- const struct kvaser_usb_dev_ops *ops;
+ const struct kvaser_usb_driver_info *driver_info;
const struct kvaser_usb_dev_cfg *cfg;
struct usb_endpoint_descriptor *bulk_in, *bulk_out;
@@ -166,6 +162,12 @@
int *cmd_len, u16 transid);
};
+struct kvaser_usb_driver_info {
+ u32 quirks;
+ enum kvaser_usb_leaf_family family;
+ const struct kvaser_usb_dev_ops *ops;
+};
+
struct kvaser_usb_dev_cfg {
const struct can_clock clock;
const unsigned int timestamp_freq;
@@ -176,6 +178,8 @@
extern const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops;
extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops;
+void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv);
+
int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
int *actual_len);
@@ -185,4 +189,7 @@
int len);
int kvaser_usb_can_rx_over_error(struct net_device *netdev);
+
+extern const struct can_bittiming_const kvaser_usb_flexc_bittiming_const;
+
#endif /* KVASER_USB_H */
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 0f1d3e8..7491f85 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -79,104 +79,134 @@
#define USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID 269
#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 270
-static inline bool kvaser_is_leaf(const struct usb_device_id *id)
-{
- return (id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
- id->idProduct <= USB_CAN_R_PRODUCT_ID) ||
- (id->idProduct >= USB_LEAF_LITE_V2_PRODUCT_ID &&
- id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID);
-}
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = {
+ .quirks = 0,
+ .ops = &kvaser_usb_hydra_dev_ops,
+};
-static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
-{
- return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID &&
- id->idProduct <= USB_MEMORATOR_PRODUCT_ID;
-}
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_usbcan = {
+ .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS |
+ KVASER_USB_QUIRK_HAS_SILENT_MODE,
+ .family = KVASER_USBCAN,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
-static inline bool kvaser_is_hydra(const struct usb_device_id *id)
-{
- return id->idProduct >= USB_BLACKBIRD_V2_PRODUCT_ID &&
- id->idProduct <= USB_HYBRID_PRO_CANLIN_PRODUCT_ID;
-}
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf = {
+ .quirks = KVASER_USB_QUIRK_IGNORE_CLK_FREQ,
+ .family = KVASER_LEAF,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err = {
+ .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS |
+ KVASER_USB_QUIRK_IGNORE_CLK_FREQ,
+ .family = KVASER_LEAF,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err_listen = {
+ .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS |
+ KVASER_USB_QUIRK_HAS_SILENT_MODE |
+ KVASER_USB_QUIRK_IGNORE_CLK_FREQ,
+ .family = KVASER_LEAF,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = {
+ .quirks = 0,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
static const struct usb_device_id kvaser_usb_table[] = {
- /* Leaf USB product IDs */
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
+ /* Leaf M32C USB product IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
+
+ /* Leaf i.MX28 USB product IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
/* USBCANII USB product IDs */
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
/* Minihydra USB product IDs */
- { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
{ }
};
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
@@ -267,6 +297,7 @@
static void kvaser_usb_read_bulk_callback(struct urb *urb)
{
struct kvaser_usb *dev = urb->context;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int err;
unsigned int i;
@@ -283,8 +314,8 @@
goto resubmit_urb;
}
- dev->ops->dev_read_bulk_callback(dev, urb->transfer_buffer,
- urb->actual_length);
+ ops->dev_read_bulk_callback(dev, urb->transfer_buffer,
+ urb->actual_length);
resubmit_urb:
usb_fill_bulk_urb(urb, dev->udev,
@@ -378,6 +409,7 @@
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int err;
err = open_candev(netdev);
@@ -388,11 +420,11 @@
if (err)
goto error;
- err = dev->ops->dev_set_opt_mode(priv);
+ err = ops->dev_set_opt_mode(priv);
if (err)
goto error;
- err = dev->ops->dev_start_chip(priv);
+ err = ops->dev_start_chip(priv);
if (err) {
netdev_warn(netdev, "Cannot start device, error %d\n", err);
goto error;
@@ -421,7 +453,7 @@
/* This method might sleep. Do not call it in the atomic context
* of URB completions.
*/
-static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
+void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
{
usb_kill_anchored_urbs(&priv->tx_submitted);
kvaser_usb_reset_tx_urb_contexts(priv);
@@ -449,22 +481,23 @@
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int err;
netif_stop_queue(netdev);
- err = dev->ops->dev_flush_queue(priv);
+ err = ops->dev_flush_queue(priv);
if (err)
netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
- if (dev->ops->dev_reset_chip) {
- err = dev->ops->dev_reset_chip(dev, priv->channel);
+ if (ops->dev_reset_chip) {
+ err = ops->dev_reset_chip(dev, priv->channel);
if (err)
netdev_warn(netdev, "Cannot reset card, error %d\n",
err);
}
- err = dev->ops->dev_stop_chip(priv);
+ err = ops->dev_stop_chip(priv);
if (err)
netdev_warn(netdev, "Cannot stop device, error %d\n", err);
@@ -503,6 +536,7 @@
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
struct net_device_stats *stats = &netdev->stats;
struct kvaser_usb_tx_urb_context *context = NULL;
struct urb *urb;
@@ -545,8 +579,8 @@
goto freeurb;
}
- buf = dev->ops->dev_frame_to_cmd(priv, skb, &context->dlc, &cmd_len,
- context->echo_index);
+ buf = ops->dev_frame_to_cmd(priv, skb, &context->dlc, &cmd_len,
+ context->echo_index);
if (!buf) {
stats->tx_dropped++;
dev_kfree_skb(skb);
@@ -630,15 +664,16 @@
}
}
-static int kvaser_usb_init_one(struct kvaser_usb *dev,
- const struct usb_device_id *id, int channel)
+static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
{
struct net_device *netdev;
struct kvaser_usb_net_priv *priv;
+ const struct kvaser_usb_driver_info *driver_info = dev->driver_info;
+ const struct kvaser_usb_dev_ops *ops = driver_info->ops;
int err;
- if (dev->ops->dev_reset_chip) {
- err = dev->ops->dev_reset_chip(dev, channel);
+ if (ops->dev_reset_chip) {
+ err = ops->dev_reset_chip(dev, channel);
if (err)
return err;
}
@@ -655,6 +690,7 @@
init_usb_anchor(&priv->tx_submitted);
init_completion(&priv->start_comp);
init_completion(&priv->stop_comp);
+ init_completion(&priv->flush_comp);
priv->can.ctrlmode_supported = 0;
priv->dev = dev;
@@ -667,20 +703,19 @@
priv->can.state = CAN_STATE_STOPPED;
priv->can.clock.freq = dev->cfg->clock.freq;
priv->can.bittiming_const = dev->cfg->bittiming_const;
- priv->can.do_set_bittiming = dev->ops->dev_set_bittiming;
- priv->can.do_set_mode = dev->ops->dev_set_mode;
- if ((id->driver_info & KVASER_USB_HAS_TXRX_ERRORS) ||
+ priv->can.do_set_bittiming = ops->dev_set_bittiming;
+ priv->can.do_set_mode = ops->dev_set_mode;
+ if ((driver_info->quirks & KVASER_USB_QUIRK_HAS_TXRX_ERRORS) ||
(priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP))
- priv->can.do_get_berr_counter = dev->ops->dev_get_berr_counter;
- if (id->driver_info & KVASER_USB_HAS_SILENT_MODE)
+ priv->can.do_get_berr_counter = ops->dev_get_berr_counter;
+ if (driver_info->quirks & KVASER_USB_QUIRK_HAS_SILENT_MODE)
priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported;
if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
priv->can.data_bittiming_const = dev->cfg->data_bittiming_const;
- priv->can.do_set_data_bittiming =
- dev->ops->dev_set_data_bittiming;
+ priv->can.do_set_data_bittiming = ops->dev_set_data_bittiming;
}
netdev->flags |= IFF_ECHO;
@@ -711,29 +746,22 @@
struct kvaser_usb *dev;
int err;
int i;
+ const struct kvaser_usb_driver_info *driver_info;
+ const struct kvaser_usb_dev_ops *ops;
+
+ driver_info = (const struct kvaser_usb_driver_info *)id->driver_info;
+ if (!driver_info)
+ return -ENODEV;
dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
- if (kvaser_is_leaf(id)) {
- dev->card_data.leaf.family = KVASER_LEAF;
- dev->ops = &kvaser_usb_leaf_dev_ops;
- } else if (kvaser_is_usbcan(id)) {
- dev->card_data.leaf.family = KVASER_USBCAN;
- dev->ops = &kvaser_usb_leaf_dev_ops;
- } else if (kvaser_is_hydra(id)) {
- dev->ops = &kvaser_usb_hydra_dev_ops;
- } else {
- dev_err(&intf->dev,
- "Product ID (%d) is not a supported Kvaser USB device\n",
- id->idProduct);
- return -ENODEV;
- }
-
dev->intf = intf;
+ dev->driver_info = driver_info;
+ ops = driver_info->ops;
- err = dev->ops->dev_setup_endpoints(dev);
+ err = ops->dev_setup_endpoints(dev);
if (err) {
dev_err(&intf->dev, "Cannot get usb endpoint(s)");
return err;
@@ -747,22 +775,22 @@
dev->card_data.ctrlmode_supported = 0;
dev->card_data.capabilities = 0;
- err = dev->ops->dev_init_card(dev);
+ err = ops->dev_init_card(dev);
if (err) {
dev_err(&intf->dev,
"Failed to initialize card, error %d\n", err);
return err;
}
- err = dev->ops->dev_get_software_info(dev);
+ err = ops->dev_get_software_info(dev);
if (err) {
dev_err(&intf->dev,
"Cannot get software info, error %d\n", err);
return err;
}
- if (dev->ops->dev_get_software_details) {
- err = dev->ops->dev_get_software_details(dev);
+ if (ops->dev_get_software_details) {
+ err = ops->dev_get_software_details(dev);
if (err) {
dev_err(&intf->dev,
"Cannot get software details, error %d\n", err);
@@ -780,14 +808,14 @@
dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs);
- err = dev->ops->dev_get_card_info(dev);
+ err = ops->dev_get_card_info(dev);
if (err) {
dev_err(&intf->dev, "Cannot get card info, error %d\n", err);
return err;
}
- if (dev->ops->dev_get_capabilities) {
- err = dev->ops->dev_get_capabilities(dev);
+ if (ops->dev_get_capabilities) {
+ err = ops->dev_get_capabilities(dev);
if (err) {
dev_err(&intf->dev,
"Cannot get capabilities, error %d\n", err);
@@ -797,7 +825,7 @@
}
for (i = 0; i < dev->nchannels; i++) {
- err = kvaser_usb_init_one(dev, id, i);
+ err = kvaser_usb_init_one(dev, i);
if (err) {
kvaser_usb_remove_interfaces(dev);
return err;
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index 218fadc..45d2787 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -371,7 +371,7 @@
.brp_inc = 1,
};
-static const struct can_bittiming_const kvaser_usb_hydra_flexc_bittiming_c = {
+const struct can_bittiming_const kvaser_usb_flexc_bittiming_const = {
.name = "kvaser_usb_flex",
.tseg1_min = 4,
.tseg1_max = 16,
@@ -890,8 +890,10 @@
new_state < CAN_STATE_BUS_OFF)
priv->can.can_stats.restarts++;
- cf->data[6] = bec->txerr;
- cf->data[7] = bec->rxerr;
+ if (new_state != CAN_STATE_BUS_OFF) {
+ cf->data[6] = bec->txerr;
+ cf->data[7] = bec->rxerr;
+ }
stats = &netdev->stats;
stats->rx_packets++;
@@ -1045,8 +1047,10 @@
shhwtstamps->hwtstamp = hwtstamp;
cf->can_id |= CAN_ERR_BUSERROR;
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
+ if (new_state != CAN_STATE_BUS_OFF) {
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
@@ -1841,7 +1845,7 @@
{
int err;
- init_completion(&priv->start_comp);
+ reinit_completion(&priv->start_comp);
err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_START_CHIP_REQ,
priv->channel);
@@ -1859,7 +1863,7 @@
{
int err;
- init_completion(&priv->stop_comp);
+ reinit_completion(&priv->stop_comp);
/* Make sure we do not report invalid BUS_OFF from CMD_CHIP_STATE_EVENT
* see comment in kvaser_usb_hydra_update_state()
@@ -1882,7 +1886,7 @@
{
int err;
- init_completion(&priv->flush_comp);
+ reinit_completion(&priv->flush_comp);
err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_FLUSH_QUEUE,
priv->channel);
@@ -2024,5 +2028,5 @@
.freq = 24000000,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_hydra_flexc_bittiming_c,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 8b5d1ad..15380cc 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -100,16 +100,6 @@
#define USBCAN_ERROR_STATE_RX_ERROR BIT(1)
#define USBCAN_ERROR_STATE_BUSERROR BIT(2)
-/* bittiming parameters */
-#define KVASER_USB_TSEG1_MIN 1
-#define KVASER_USB_TSEG1_MAX 16
-#define KVASER_USB_TSEG2_MIN 1
-#define KVASER_USB_TSEG2_MAX 8
-#define KVASER_USB_SJW_MAX 4
-#define KVASER_USB_BRP_MIN 1
-#define KVASER_USB_BRP_MAX 64
-#define KVASER_USB_BRP_INC 1
-
/* ctrl modes */
#define KVASER_CTRL_MODE_NORMAL 1
#define KVASER_CTRL_MODE_SILENT 2
@@ -319,6 +309,38 @@
} u;
} __packed;
+#define CMD_SIZE_ANY 0xff
+#define kvaser_fsize(field) sizeof_field(struct kvaser_cmd, field)
+
+static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = {
+ [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
+ [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.leaf.softinfo),
+ [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
+ [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
+ [CMD_LEAF_LOG_MESSAGE] = kvaser_fsize(u.leaf.log_message),
+ [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.leaf.chip_state_event),
+ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event),
+ /* ignored events: */
+ [CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY,
+};
+
+static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = {
+ [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
+ [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.usbcan.softinfo),
+ [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
+ [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
+ [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event),
+ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event),
+ /* ignored events: */
+ [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY,
+};
+
/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
* handling. Some discrepancies between the two families exist:
*
@@ -342,50 +364,107 @@
};
};
-static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
- .name = "kvaser_usb",
- .tseg1_min = KVASER_USB_TSEG1_MIN,
- .tseg1_max = KVASER_USB_TSEG1_MAX,
- .tseg2_min = KVASER_USB_TSEG2_MIN,
- .tseg2_max = KVASER_USB_TSEG2_MAX,
- .sjw_max = KVASER_USB_SJW_MAX,
- .brp_min = KVASER_USB_BRP_MIN,
- .brp_max = KVASER_USB_BRP_MAX,
- .brp_inc = KVASER_USB_BRP_INC,
+static const struct can_bittiming_const kvaser_usb_leaf_m16c_bittiming_const = {
+ .name = "kvaser_usb_ucii",
+ .tseg1_min = 4,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 16,
+ .brp_inc = 1,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = {
+static const struct can_bittiming_const kvaser_usb_leaf_m32c_bittiming_const = {
+ .name = "kvaser_usb_leaf",
+ .tseg1_min = 3,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 2,
+ .brp_max = 128,
+ .brp_inc = 2,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_usbcan_dev_cfg = {
.clock = {
.freq = 8000000,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+ .bittiming_const = &kvaser_usb_leaf_m16c_bittiming_const,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg = {
.clock = {
.freq = 16000000,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+ .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_16mhz = {
+ .clock = {
+ .freq = 16000000,
+ },
+ .timestamp_freq = 1,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_24mhz = {
.clock = {
.freq = 24000000,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = {
.clock = {
.freq = 32000000,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
+static int kvaser_usb_leaf_verify_size(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ /* buffer size >= cmd->len ensured by caller */
+ u8 min_size = 0;
+
+ switch (dev->driver_info->family) {
+ case KVASER_LEAF:
+ if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_leaf))
+ min_size = kvaser_usb_leaf_cmd_sizes_leaf[cmd->id];
+ break;
+ case KVASER_USBCAN:
+ if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_usbcan))
+ min_size = kvaser_usb_leaf_cmd_sizes_usbcan[cmd->id];
+ break;
+ }
+
+ if (min_size == CMD_SIZE_ANY)
+ return 0;
+
+ if (min_size) {
+ min_size += CMD_HEADER_LEN;
+ if (cmd->len >= min_size)
+ return 0;
+
+ dev_err_ratelimited(&dev->intf->dev,
+ "Received command %u too short (size %u, needed %u)",
+ cmd->id, cmd->len, min_size);
+ return -EIO;
+ }
+
+ dev_warn_ratelimited(&dev->intf->dev,
+ "Unhandled command (%d, size %d)\n",
+ cmd->id, cmd->len);
+ return -EINVAL;
+}
+
static void *
kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
const struct sk_buff *skb, int *frame_len,
@@ -405,7 +484,7 @@
sizeof(struct kvaser_cmd_tx_can);
cmd->u.tx_can.channel = priv->channel;
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
cmd_tx_can_flags = &cmd->u.tx_can.leaf.flags;
break;
@@ -493,6 +572,9 @@
end:
kfree(buf);
+ if (err == 0)
+ err = kvaser_usb_leaf_verify_size(dev, cmd);
+
return err;
}
@@ -525,16 +607,23 @@
dev->fw_version = le32_to_cpu(softinfo->fw_version);
dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
- switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
- case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
- dev->cfg = &kvaser_usb_leaf_dev_cfg_16mhz;
- break;
- case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
- dev->cfg = &kvaser_usb_leaf_dev_cfg_24mhz;
- break;
- case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
- dev->cfg = &kvaser_usb_leaf_dev_cfg_32mhz;
- break;
+ if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) {
+ /* Firmware expects bittiming parameters calculated for 16MHz
+ * clock, regardless of the actual clock
+ */
+ dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg;
+ } else {
+ switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
+ case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_16mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_24mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_32mhz;
+ break;
+ }
}
}
@@ -551,7 +640,7 @@
if (err)
return err;
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo);
break;
@@ -559,7 +648,7 @@
dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
dev->max_tx_urbs =
le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
- dev->cfg = &kvaser_usb_leaf_dev_cfg_8mhz;
+ dev->cfg = &kvaser_usb_leaf_usbcan_dev_cfg;
break;
}
@@ -598,7 +687,7 @@
dev->nchannels = cmd.u.cardinfo.nchannels;
if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES ||
- (dev->card_data.leaf.family == KVASER_USBCAN &&
+ (dev->driver_info->family == KVASER_USBCAN &&
dev->nchannels > MAX_USBCAN_NET_DEVICES))
return -EINVAL;
@@ -734,7 +823,7 @@
new_state < CAN_STATE_BUS_OFF)
priv->can.can_stats.restarts++;
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
if (es->leaf.error_factor) {
priv->can.can_stats.bus_error++;
@@ -813,7 +902,7 @@
}
}
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
if (es->leaf.error_factor) {
cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
@@ -840,8 +929,10 @@
break;
}
- cf->data[6] = es->txerr;
- cf->data[7] = es->rxerr;
+ if (new_state != CAN_STATE_BUS_OFF) {
+ cf->data[6] = es->txerr;
+ cf->data[7] = es->rxerr;
+ }
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
@@ -1005,7 +1096,7 @@
stats = &priv->netdev->stats;
if ((cmd->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) &&
- (dev->card_data.leaf.family == KVASER_LEAF &&
+ (dev->driver_info->family == KVASER_LEAF &&
cmd->id == CMD_LEAF_LOG_MESSAGE)) {
kvaser_usb_leaf_leaf_rx_error(dev, cmd);
return;
@@ -1021,7 +1112,7 @@
return;
}
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
rx_data = cmd->u.leaf.rx_can.data;
break;
@@ -1036,7 +1127,7 @@
return;
}
- if (dev->card_data.leaf.family == KVASER_LEAF && cmd->id ==
+ if (dev->driver_info->family == KVASER_LEAF && cmd->id ==
CMD_LEAF_LOG_MESSAGE) {
cf->can_id = le32_to_cpu(cmd->u.leaf.log_message.id);
if (cf->can_id & KVASER_EXTENDED_FRAME)
@@ -1118,6 +1209,9 @@
static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
+ if (kvaser_usb_leaf_verify_size(dev, cmd) < 0)
+ return;
+
switch (cmd->id) {
case CMD_START_CHIP_REPLY:
kvaser_usb_leaf_start_chip_reply(dev, cmd);
@@ -1133,14 +1227,14 @@
break;
case CMD_LEAF_LOG_MESSAGE:
- if (dev->card_data.leaf.family != KVASER_LEAF)
+ if (dev->driver_info->family != KVASER_LEAF)
goto warn;
kvaser_usb_leaf_rx_can_msg(dev, cmd);
break;
case CMD_CHIP_STATE_EVENT:
case CMD_CAN_ERROR_EVENT:
- if (dev->card_data.leaf.family == KVASER_LEAF)
+ if (dev->driver_info->family == KVASER_LEAF)
kvaser_usb_leaf_leaf_rx_error(dev, cmd);
else
kvaser_usb_leaf_usbcan_rx_error(dev, cmd);
@@ -1152,12 +1246,12 @@
/* Ignored commands */
case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
- if (dev->card_data.leaf.family != KVASER_USBCAN)
+ if (dev->driver_info->family != KVASER_USBCAN)
goto warn;
break;
case CMD_FLUSH_QUEUE_REPLY:
- if (dev->card_data.leaf.family != KVASER_LEAF)
+ if (dev->driver_info->family != KVASER_LEAF)
goto warn;
break;
@@ -1230,7 +1324,7 @@
{
int err;
- init_completion(&priv->start_comp);
+ reinit_completion(&priv->start_comp);
err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP,
priv->channel);
@@ -1248,7 +1342,7 @@
{
int err;
- init_completion(&priv->stop_comp);
+ reinit_completion(&priv->stop_comp);
err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP,
priv->channel);
@@ -1336,9 +1430,13 @@
switch (mode) {
case CAN_MODE_START:
+ kvaser_usb_unlink_tx_urbs(priv);
+
err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP);
if (err)
return err;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
break;
default:
return -EOPNOTSUPP;
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 912160f..2106333 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -33,10 +33,6 @@
#define MCBA_USB_RX_BUFF_SIZE 64
#define MCBA_USB_TX_BUFF_SIZE (sizeof(struct mcba_usb_msg))
-/* MCBA endpoint numbers */
-#define MCBA_USB_EP_IN 1
-#define MCBA_USB_EP_OUT 1
-
/* Microchip command id */
#define MBCA_CMD_RECEIVE_MESSAGE 0xE3
#define MBCA_CMD_I_AM_ALIVE_FROM_CAN 0xF5
@@ -84,6 +80,8 @@
atomic_t free_ctx_cnt;
void *rxbuf[MCBA_MAX_RX_URBS];
dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS];
+ int rx_pipe;
+ int tx_pipe;
};
/* CAN frame */
@@ -272,10 +270,8 @@
memcpy(buf, usb_msg, MCBA_USB_TX_BUFF_SIZE);
- usb_fill_bulk_urb(urb, priv->udev,
- usb_sndbulkpipe(priv->udev, MCBA_USB_EP_OUT), buf,
- MCBA_USB_TX_BUFF_SIZE, mcba_usb_write_bulk_callback,
- ctx);
+ usb_fill_bulk_urb(urb, priv->udev, priv->tx_pipe, buf, MCBA_USB_TX_BUFF_SIZE,
+ mcba_usb_write_bulk_callback, ctx);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &priv->tx_submitted);
@@ -368,7 +364,6 @@
xmit_failed:
can_free_echo_skb(priv->netdev, ctx->ndx);
mcba_usb_free_ctx(ctx);
- dev_kfree_skb(skb);
stats->tx_dropped++;
return NETDEV_TX_OK;
@@ -611,7 +606,7 @@
resubmit_urb:
usb_fill_bulk_urb(urb, priv->udev,
- usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_OUT),
+ priv->rx_pipe,
urb->transfer_buffer, MCBA_USB_RX_BUFF_SIZE,
mcba_usb_read_bulk_callback, priv);
@@ -656,7 +651,7 @@
urb->transfer_dma = buf_dma;
usb_fill_bulk_urb(urb, priv->udev,
- usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN),
+ priv->rx_pipe,
buf, MCBA_USB_RX_BUFF_SIZE,
mcba_usb_read_bulk_callback, priv);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -810,6 +805,13 @@
struct mcba_priv *priv;
int err;
struct usb_device *usbdev = interface_to_usbdev(intf);
+ struct usb_endpoint_descriptor *in, *out;
+
+ err = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL, NULL);
+ if (err) {
+ dev_err(&intf->dev, "Can't find endpoints\n");
+ return err;
+ }
netdev = alloc_candev(sizeof(struct mcba_priv), MCBA_MAX_TX_URBS);
if (!netdev) {
@@ -855,6 +857,9 @@
goto cleanup_free_candev;
}
+ priv->rx_pipe = usb_rcvbulkpipe(priv->udev, in->bEndpointAddress);
+ priv->tx_pipe = usb_sndbulkpipe(priv->udev, out->bEndpointAddress);
+
devm_can_led_init(netdev);
/* Start USB dev only if we have successfully registered CAN device */
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index ca7c55d..885c54c 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -442,9 +442,10 @@
if (rx_errors)
stats->rx_errors++;
-
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
+ if (priv->can.state != CAN_STATE_BUS_OFF) {
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
priv->bec.txerr = txerr;
priv->bec.rxerr = rxerr;
@@ -670,9 +671,20 @@
atomic_inc(&priv->active_tx_urbs);
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(err))
- goto failed;
- else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
+ if (unlikely(err)) {
+ can_free_echo_skb(netdev, context->echo_index);
+
+ usb_unanchor_urb(urb);
+ usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
+
+ atomic_dec(&priv->active_tx_urbs);
+
+ if (err == -ENODEV)
+ netif_device_detach(netdev);
+ else
+ netdev_warn(netdev, "failed tx_urb %d\n", err);
+ stats->tx_dropped++;
+ } else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
/* Slow down tx path */
netif_stop_queue(netdev);
@@ -691,19 +703,6 @@
return NETDEV_TX_BUSY;
-failed:
- can_free_echo_skb(netdev, context->echo_index);
-
- usb_unanchor_urb(urb);
- usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
-
- atomic_dec(&priv->active_tx_urbs);
-
- if (err == -ENODEV)
- netif_device_detach(netdev);
- else
- netdev_warn(netdev, "failed tx_urb %d\n", err);
-
nomembuf:
usb_free_urb(urb);
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 7000c6c..282c53e 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -148,7 +148,7 @@
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 0;
- dev->flags = (IFF_NOARP|IFF_ECHO);
+ dev->flags = IFF_NOARP;
dev->netdev_ops = &vxcan_netdev_ops;
dev->needs_free_netdev = true;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 3759982..1a3fba3 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -239,7 +239,7 @@
};
/* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */
-static struct can_bittiming_const xcan_data_bittiming_const_canfd = {
+static const struct can_bittiming_const xcan_data_bittiming_const_canfd = {
.name = DRIVER_NAME,
.tseg1_min = 1,
.tseg1_max = 16,
@@ -259,20 +259,20 @@
.tseg2_min = 1,
.tseg2_max = 128,
.sjw_max = 128,
- .brp_min = 2,
+ .brp_min = 1,
.brp_max = 256,
.brp_inc = 1,
};
/* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */
-static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
+static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
.name = DRIVER_NAME,
.tseg1_min = 1,
.tseg1_max = 32,
.tseg2_min = 1,
.tseg2_max = 16,
.sjw_max = 16,
- .brp_min = 2,
+ .brp_min = 1,
.brp_max = 256,
.brp_inc = 1,
};
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 08a675a..c6563d2 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -710,6 +710,9 @@
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 reg, offset;
+ if (priv->wol_ports_mask & BIT(port))
+ return;
+
if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
if (priv->type == BCM7445_DEVICE_ID)
offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
@@ -771,6 +774,11 @@
if (duplex == DUPLEX_FULL)
reg |= DUPLX_MODE;
+ if (tx_pause)
+ reg |= TXFLOW_CNTL;
+ if (rx_pause)
+ reg |= RXFLOW_CNTL;
+
core_writel(priv, reg, offset);
}
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index d82cee5..cbf44fc 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -567,14 +567,14 @@
static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
int port, u32 location)
{
- struct cfp_rule *rule = NULL;
+ struct cfp_rule *rule;
list_for_each_entry(rule, &priv->cfp.rules_list, next) {
if (rule->port == port && rule->fs.location == location)
- break;
+ return rule;
}
- return rule;
+ return NULL;
}
static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index e38906a..fbeb99a 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -376,6 +376,17 @@
#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
+static void dsa_loop_phydevs_unregister(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_FIXED_PHYS; i++)
+ if (!IS_ERR(phydevs[i])) {
+ fixed_phy_unregister(phydevs[i]);
+ phy_device_free(phydevs[i]);
+ }
+}
+
static int __init dsa_loop_init(void)
{
struct fixed_phy_status status = {
@@ -383,23 +394,23 @@
.speed = SPEED_100,
.duplex = DUPLEX_FULL,
};
- unsigned int i;
+ unsigned int i, ret;
for (i = 0; i < NUM_FIXED_PHYS; i++)
phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL);
- return mdio_driver_register(&dsa_loop_drv);
+ ret = mdio_driver_register(&dsa_loop_drv);
+ if (ret)
+ dsa_loop_phydevs_unregister();
+
+ return ret;
}
module_init(dsa_loop_init);
static void __exit dsa_loop_exit(void)
{
- unsigned int i;
-
mdio_driver_unregister(&dsa_loop_drv);
- for (i = 0; i < NUM_FIXED_PHYS; i++)
- if (!IS_ERR(phydevs[i]))
- fixed_phy_unregister(phydevs[i]);
+ dsa_loop_phydevs_unregister();
}
module_exit(dsa_loop_exit);
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 80ef7ea..70895e4 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -1629,9 +1629,6 @@
break;
case PHY_INTERFACE_MODE_RMII:
miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
-
- /* Configure the RMII clock as output: */
- miicfg |= GSWIP_MII_CFG_RMII_CLK;
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -1984,8 +1981,10 @@
for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
gphy_fw_np, i);
- if (err)
+ if (err) {
+ of_node_put(gphy_fw_np);
goto remove_gphy;
+ }
i++;
}
diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c
index 8b00f8e..5639c5c 100644
--- a/drivers/net/dsa/microchip/ksz8795_spi.c
+++ b/drivers/net/dsa/microchip/ksz8795_spi.c
@@ -86,12 +86,23 @@
};
MODULE_DEVICE_TABLE(of, ksz8795_dt_ids);
+static const struct spi_device_id ksz8795_spi_ids[] = {
+ { "ksz8765" },
+ { "ksz8794" },
+ { "ksz8795" },
+ { "ksz8863" },
+ { "ksz8873" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, ksz8795_spi_ids);
+
static struct spi_driver ksz8795_spi_driver = {
.driver = {
.name = "ksz8795-switch",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(ksz8795_dt_ids),
},
+ .id_table = ksz8795_spi_ids,
.probe = ksz8795_spi_probe,
.remove = ksz8795_spi_remove,
.shutdown = ksz8795_spi_shutdown,
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index b3aa99e..ece4c05 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -762,6 +762,9 @@
goto exit;
}
+ if (!(ksz_data & ALU_VALID))
+ continue;
+
/* read ALU table */
ksz9477_read_table(dev, alu_table);
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
index 1142768..9bda83d 100644
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -88,12 +88,24 @@
};
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
+static const struct spi_device_id ksz9477_spi_ids[] = {
+ { "ksz9477" },
+ { "ksz9897" },
+ { "ksz9893" },
+ { "ksz9563" },
+ { "ksz8563" },
+ { "ksz9567" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, ksz9477_spi_ids);
+
static struct spi_driver ksz9477_spi_driver = {
.driver = {
.name = "ksz9477-switch",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(ksz9477_dt_ids),
},
+ .id_table = ksz9477_spi_ids,
.probe = ksz9477_spi_probe,
.remove = ksz9477_spi_remove,
.shutdown = ksz9477_spi_shutdown,
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 5ee8809..70155e9 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -502,14 +502,19 @@
static int
mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
{
- struct mt7530_priv *priv = ds->priv;
+ return 0;
+}
+
+static void
+mt7531_pll_setup(struct mt7530_priv *priv)
+{
u32 top_sig;
u32 hwstrap;
u32 xtal;
u32 val;
if (mt7531_dual_sgmii_supported(priv))
- return 0;
+ return;
val = mt7530_read(priv, MT7531_CREV);
top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
@@ -588,8 +593,6 @@
val |= EN_COREPLL;
mt7530_write(priv, MT7531_PLLGP_EN, val);
usleep_range(25, 35);
-
- return 0;
}
static void
@@ -1663,6 +1666,7 @@
ret = of_get_phy_mode(mac_np, &interface);
if (ret && ret != -ENODEV) {
of_node_put(mac_np);
+ of_node_put(phy_node);
return ret;
}
id = of_mdio_parse_addr(ds->dev, phy_node);
@@ -1730,6 +1734,8 @@
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
SYS_CTRL_REG_RST);
+ mt7531_pll_setup(priv);
+
if (mt7531_dual_sgmii_supported(priv)) {
priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII;
@@ -1951,13 +1957,7 @@
/* Port5 supports ethier RGMII or SGMII.
* Port6 supports SGMII only.
*/
- switch (port) {
- case 5:
- if (mt7531_is_rgmii_port(priv, port))
- break;
- fallthrough;
- case 6:
- phylink_set(supported, 1000baseX_Full);
+ if (port == 6) {
phylink_set(supported, 2500baseX_Full);
phylink_set(supported, 2500baseT_Full);
}
@@ -2286,8 +2286,6 @@
case 6:
interface = PHY_INTERFACE_MODE_2500BASEX;
- mt7531_pad_setup(ds, interface);
-
priv->p6_interface = interface;
break;
default:
@@ -2314,8 +2312,6 @@
mt7530_mac_port_validate(struct dsa_switch *ds, int port,
unsigned long *supported)
{
- if (port == 5)
- phylink_set(supported, 1000baseX_Full);
}
static void mt7531_mac_port_validate(struct dsa_switch *ds, int port,
@@ -2352,8 +2348,10 @@
}
/* This switch only supports 1G full-duplex. */
- if (state->interface != PHY_INTERFACE_MODE_MII)
+ if (state->interface != PHY_INTERFACE_MODE_MII) {
phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ }
priv->info->mac_port_validate(ds, port, mask);
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 24b8219..dafddf8 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -118,6 +118,9 @@
int addr = REG_PORT(p);
int ret;
+ if (dsa_is_unused_port(priv->ds, p))
+ return 0;
+
/* Do not force flow control, disable Ingress and Egress
* Header tagging, disable VLAN tunneling, and set the port
* state to Forwarding. Additionally, if this is the CPU
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 1992be7..371b345 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -666,44 +666,48 @@
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_port *p;
- int err;
+ int err = 0;
p = &chip->ports[port];
- /* FIXME: is this the correct test? If we're in fixed mode on an
- * internal port, why should we process this any different from
- * PHY mode? On the other hand, the port may be automedia between
- * an internal PHY and the serdes...
- */
- if ((mode == MLO_AN_PHY) && mv88e6xxx_phy_is_internal(ds, port))
- return;
-
mv88e6xxx_reg_lock(chip);
- /* In inband mode, the link may come up at any time while the link
- * is not forced down. Force the link down while we reconfigure the
- * interface mode.
- */
- if (mode == MLO_AN_INBAND && p->interface != state->interface &&
- chip->info->ops->port_set_link)
- chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN);
- err = mv88e6xxx_port_config_interface(chip, port, state->interface);
- if (err && err != -EOPNOTSUPP)
- goto err_unlock;
+ if (mode != MLO_AN_PHY || !mv88e6xxx_phy_is_internal(ds, port)) {
+ /* In inband mode, the link may come up at any time while the
+ * link is not forced down. Force the link down while we
+ * reconfigure the interface mode.
+ */
+ if (mode == MLO_AN_INBAND &&
+ p->interface != state->interface &&
+ chip->info->ops->port_set_link)
+ chip->info->ops->port_set_link(chip, port,
+ LINK_FORCED_DOWN);
- err = mv88e6xxx_serdes_pcs_config(chip, port, mode, state->interface,
- state->advertising);
- /* FIXME: we should restart negotiation if something changed - which
- * is something we get if we convert to using phylinks PCS operations.
- */
- if (err > 0)
- err = 0;
+ err = mv88e6xxx_port_config_interface(chip, port,
+ state->interface);
+ if (err && err != -EOPNOTSUPP)
+ goto err_unlock;
+
+ err = mv88e6xxx_serdes_pcs_config(chip, port, mode,
+ state->interface,
+ state->advertising);
+ /* FIXME: we should restart negotiation if something changed -
+ * which is something we get if we convert to using phylinks
+ * PCS operations.
+ */
+ if (err > 0)
+ err = 0;
+ }
/* Undo the forced down state above after completing configuration
- * irrespective of its state on entry, which allows the link to come up.
+ * irrespective of its state on entry, which allows the link to come
+ * up in the in-band case where there is no separate SERDES. Also
+ * ensure that the link can come up if the PPU is in use and we are
+ * in PHY mode (we treat the PPU as an effective in-band mechanism.)
*/
- if (mode == MLO_AN_INBAND && p->interface != state->interface &&
- chip->info->ops->port_set_link)
+ if (chip->info->ops->port_set_link &&
+ ((mode == MLO_AN_INBAND && p->interface != state->interface) ||
+ (mode == MLO_AN_PHY && mv88e6xxx_port_ppu_updates(chip, port))))
chip->info->ops->port_set_link(chip, port, LINK_UNFORCED);
p->interface = state->interface;
@@ -3148,6 +3152,7 @@
*/
child = of_get_child_by_name(np, "mdio");
err = mv88e6xxx_mdio_register(chip, child, false);
+ of_node_put(child);
if (err)
return err;
@@ -3297,6 +3302,7 @@
.port_set_link = mv88e6xxx_port_set_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index cd8d9b0..161a5ea 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -578,7 +578,8 @@
{ .offset = 0x87, .name = "tx_frames_below_65_octets", },
{ .offset = 0x88, .name = "tx_frames_65_to_127_octets", },
{ .offset = 0x89, .name = "tx_frames_128_255_octets", },
- { .offset = 0x8B, .name = "tx_frames_256_511_octets", },
+ { .offset = 0x8A, .name = "tx_frames_256_511_octets", },
+ { .offset = 0x8B, .name = "tx_frames_512_1023_octets", },
{ .offset = 0x8C, .name = "tx_frames_1024_1526_octets", },
{ .offset = 0x8D, .name = "tx_frames_over_1526_octets", },
{ .offset = 0x8E, .name = "tx_yellow_prio_0", },
@@ -1466,7 +1467,7 @@
err = dsa_register_switch(ds);
if (err) {
- dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
+ dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n");
goto err_register_ds;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
index 4a2ec39..ec2ac91 100644
--- a/drivers/net/dsa/sja1105/sja1105_devlink.c
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -93,7 +93,7 @@
region = dsa_devlink_region_create(ds, ops, 1, size);
if (IS_ERR(region)) {
- while (i-- >= 0)
+ while (--i >= 0)
dsa_devlink_region_destroy(priv->regions[i]);
return PTR_ERR(region);
}
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index 4ad8031..065fdbe 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -406,12 +406,12 @@
static int mcf8390_probe(struct platform_device *pdev)
{
struct net_device *dev;
- struct resource *mem, *irq;
+ struct resource *mem;
resource_size_t msize;
- int ret;
+ int ret, irq;
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (irq == NULL) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
dev_err(&pdev->dev, "no IRQ specified?\n");
return -ENXIO;
}
@@ -434,7 +434,7 @@
SET_NETDEV_DEV(dev, &pdev->dev);
platform_set_drvdata(pdev, dev);
- dev->irq = irq->start;
+ dev->irq = irq;
dev->base_addr = mem->start;
ret = mcf8390_init(dev);
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index a7d8d45..b779f3a 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -163,7 +163,8 @@
mdio = mdiobus_alloc();
if (mdio == NULL) {
netdev_err(dev, "Error allocating MDIO bus\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto put_node;
}
mdio->name = ALTERA_TSE_RESOURCE_NAME;
@@ -180,6 +181,7 @@
mdio->id);
goto out_free_mdio;
}
+ of_node_put(mdio_node);
if (netif_msg_drv(priv))
netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
@@ -189,6 +191,8 @@
out_free_mdio:
mdiobus_free(mdio);
mdio = NULL;
+put_node:
+ of_node_put(mdio_node);
return ret;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 52414ac..1722d40 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -4488,13 +4488,19 @@
static int __init ena_init(void)
{
+ int ret;
+
ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
if (!ena_wq) {
pr_err("Failed to create workqueue\n");
return -ENOMEM;
}
- return pci_register_driver(&ena_pci_driver);
+ ret = pci_register_driver(&ena_pci_driver);
+ if (ret)
+ destroy_workqueue(ena_wq);
+
+ return ret;
}
static void __exit ena_cleanup(void)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 2137690..a7166cd 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -239,6 +239,7 @@
#define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d
#define XGBE_SFP_BASE_BR_10GBE_MIN 0x64
#define XGBE_SFP_BASE_BR_10GBE_MAX 0x68
+#define XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX 0x78
#define XGBE_SFP_BASE_CU_CABLE_LEN 18
@@ -284,6 +285,8 @@
#define XGBE_BEL_FUSE_VENDOR "BEL-FUSE "
#define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 "
+#define XGBE_MOLEX_VENDOR "Molex Inc. "
+
struct xgbe_sfp_ascii {
union {
char vendor[XGBE_SFP_BASE_VENDOR_NAME_LEN + 1];
@@ -834,7 +837,11 @@
break;
case XGBE_SFP_SPEED_10000:
min = XGBE_SFP_BASE_BR_10GBE_MIN;
- max = XGBE_SFP_BASE_BR_10GBE_MAX;
+ if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
+ XGBE_MOLEX_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN) == 0)
+ max = XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX;
+ else
+ max = XGBE_SFP_BASE_BR_10GBE_MAX;
break;
default:
return false;
@@ -1151,7 +1158,10 @@
}
/* Determine the type of SFP */
- if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
+ if (phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE &&
+ xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
+ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
phy_data->sfp_base = XGBE_SFP_BASE_10000_SR;
else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LR)
phy_data->sfp_base = XGBE_SFP_BASE_10000_LR;
@@ -1167,9 +1177,6 @@
phy_data->sfp_base = XGBE_SFP_BASE_1000_CX;
else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_T)
phy_data->sfp_base = XGBE_SFP_BASE_1000_T;
- else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) &&
- xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
- phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
switch (phy_data->sfp_base) {
case XGBE_SFP_BASE_1000_T:
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 78c7cbc..71151f6 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1004,8 +1004,10 @@
xgene_enet_napi_enable(pdata);
ret = xgene_enet_register_irq(ndev);
- if (ret)
+ if (ret) {
+ xgene_enet_napi_disable(pdata);
return ret;
+ }
if (ndev->phydev) {
phy_start(ndev->phydev);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
index 4a6dfac..ee823a1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
@@ -585,6 +585,7 @@
ret = aq_mss_set_egress_sakey_record(hw, &key_rec, sa_idx);
+ memzero_explicit(&key_rec, sizeof(key_rec));
return ret;
}
@@ -932,6 +933,7 @@
ret = aq_mss_set_ingress_sakey_record(hw, &sa_key_record, sa_idx);
+ memzero_explicit(&sa_key_record, sizeof(sa_key_record));
return ret;
}
@@ -1451,26 +1453,57 @@
egress_sa_threshold_expired);
}
+#define AQ_LOCKED_MDO_DEF(mdo) \
+static int aq_locked_mdo_##mdo(struct macsec_context *ctx) \
+{ \
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev); \
+ int ret; \
+ mutex_lock(&nic->macsec_mutex); \
+ ret = aq_mdo_##mdo(ctx); \
+ mutex_unlock(&nic->macsec_mutex); \
+ return ret; \
+}
+
+AQ_LOCKED_MDO_DEF(dev_open)
+AQ_LOCKED_MDO_DEF(dev_stop)
+AQ_LOCKED_MDO_DEF(add_secy)
+AQ_LOCKED_MDO_DEF(upd_secy)
+AQ_LOCKED_MDO_DEF(del_secy)
+AQ_LOCKED_MDO_DEF(add_rxsc)
+AQ_LOCKED_MDO_DEF(upd_rxsc)
+AQ_LOCKED_MDO_DEF(del_rxsc)
+AQ_LOCKED_MDO_DEF(add_rxsa)
+AQ_LOCKED_MDO_DEF(upd_rxsa)
+AQ_LOCKED_MDO_DEF(del_rxsa)
+AQ_LOCKED_MDO_DEF(add_txsa)
+AQ_LOCKED_MDO_DEF(upd_txsa)
+AQ_LOCKED_MDO_DEF(del_txsa)
+AQ_LOCKED_MDO_DEF(get_dev_stats)
+AQ_LOCKED_MDO_DEF(get_tx_sc_stats)
+AQ_LOCKED_MDO_DEF(get_tx_sa_stats)
+AQ_LOCKED_MDO_DEF(get_rx_sc_stats)
+AQ_LOCKED_MDO_DEF(get_rx_sa_stats)
+
const struct macsec_ops aq_macsec_ops = {
- .mdo_dev_open = aq_mdo_dev_open,
- .mdo_dev_stop = aq_mdo_dev_stop,
- .mdo_add_secy = aq_mdo_add_secy,
- .mdo_upd_secy = aq_mdo_upd_secy,
- .mdo_del_secy = aq_mdo_del_secy,
- .mdo_add_rxsc = aq_mdo_add_rxsc,
- .mdo_upd_rxsc = aq_mdo_upd_rxsc,
- .mdo_del_rxsc = aq_mdo_del_rxsc,
- .mdo_add_rxsa = aq_mdo_add_rxsa,
- .mdo_upd_rxsa = aq_mdo_upd_rxsa,
- .mdo_del_rxsa = aq_mdo_del_rxsa,
- .mdo_add_txsa = aq_mdo_add_txsa,
- .mdo_upd_txsa = aq_mdo_upd_txsa,
- .mdo_del_txsa = aq_mdo_del_txsa,
- .mdo_get_dev_stats = aq_mdo_get_dev_stats,
- .mdo_get_tx_sc_stats = aq_mdo_get_tx_sc_stats,
- .mdo_get_tx_sa_stats = aq_mdo_get_tx_sa_stats,
- .mdo_get_rx_sc_stats = aq_mdo_get_rx_sc_stats,
- .mdo_get_rx_sa_stats = aq_mdo_get_rx_sa_stats,
+ .mdo_dev_open = aq_locked_mdo_dev_open,
+ .mdo_dev_stop = aq_locked_mdo_dev_stop,
+ .mdo_add_secy = aq_locked_mdo_add_secy,
+ .mdo_upd_secy = aq_locked_mdo_upd_secy,
+ .mdo_del_secy = aq_locked_mdo_del_secy,
+ .mdo_add_rxsc = aq_locked_mdo_add_rxsc,
+ .mdo_upd_rxsc = aq_locked_mdo_upd_rxsc,
+ .mdo_del_rxsc = aq_locked_mdo_del_rxsc,
+ .mdo_add_rxsa = aq_locked_mdo_add_rxsa,
+ .mdo_upd_rxsa = aq_locked_mdo_upd_rxsa,
+ .mdo_del_rxsa = aq_locked_mdo_del_rxsa,
+ .mdo_add_txsa = aq_locked_mdo_add_txsa,
+ .mdo_upd_txsa = aq_locked_mdo_upd_txsa,
+ .mdo_del_txsa = aq_locked_mdo_del_txsa,
+ .mdo_get_dev_stats = aq_locked_mdo_get_dev_stats,
+ .mdo_get_tx_sc_stats = aq_locked_mdo_get_tx_sc_stats,
+ .mdo_get_tx_sa_stats = aq_locked_mdo_get_tx_sa_stats,
+ .mdo_get_rx_sc_stats = aq_locked_mdo_get_rx_sc_stats,
+ .mdo_get_rx_sa_stats = aq_locked_mdo_get_rx_sa_stats,
};
int aq_macsec_init(struct aq_nic_s *nic)
@@ -1492,6 +1525,7 @@
nic->ndev->features |= NETIF_F_HW_MACSEC;
nic->ndev->macsec_ops = &aq_macsec_ops;
+ mutex_init(&nic->macsec_mutex);
return 0;
}
@@ -1515,7 +1549,7 @@
if (!nic->macsec_cfg)
return 0;
- rtnl_lock();
+ mutex_lock(&nic->macsec_mutex);
if (nic->aq_fw_ops->send_macsec_req) {
struct macsec_cfg_request cfg = { 0 };
@@ -1564,7 +1598,7 @@
ret = aq_apply_macsec_cfg(nic);
unlock:
- rtnl_unlock();
+ mutex_unlock(&nic->macsec_mutex);
return ret;
}
@@ -1576,9 +1610,9 @@
if (!netif_carrier_ok(nic->ndev))
return;
- rtnl_lock();
+ mutex_lock(&nic->macsec_mutex);
aq_check_txsa_expiration(nic);
- rtnl_unlock();
+ mutex_unlock(&nic->macsec_mutex);
}
int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic)
@@ -1589,21 +1623,30 @@
if (!cfg)
return 0;
+ mutex_lock(&nic->macsec_mutex);
+
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (!test_bit(i, &cfg->rxsc_idx_busy))
continue;
cnt += hweight_long(cfg->aq_rxsc[i].rx_sa_idx_busy);
}
+ mutex_unlock(&nic->macsec_mutex);
return cnt;
}
int aq_macsec_tx_sc_cnt(struct aq_nic_s *nic)
{
+ int cnt;
+
if (!nic->macsec_cfg)
return 0;
- return hweight_long(nic->macsec_cfg->txsc_idx_busy);
+ mutex_lock(&nic->macsec_mutex);
+ cnt = hweight_long(nic->macsec_cfg->txsc_idx_busy);
+ mutex_unlock(&nic->macsec_mutex);
+
+ return cnt;
}
int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic)
@@ -1614,12 +1657,15 @@
if (!cfg)
return 0;
+ mutex_lock(&nic->macsec_mutex);
+
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (!test_bit(i, &cfg->txsc_idx_busy))
continue;
cnt += hweight_long(cfg->aq_txsc[i].tx_sa_idx_busy);
}
+ mutex_unlock(&nic->macsec_mutex);
return cnt;
}
@@ -1691,6 +1737,8 @@
if (!cfg)
return data;
+ mutex_lock(&nic->macsec_mutex);
+
aq_macsec_update_stats(nic);
common_stats = &cfg->stats;
@@ -1773,5 +1821,7 @@
data += i;
+ mutex_unlock(&nic->macsec_mutex);
+
return data;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 4af0cd9..ff245f7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -89,11 +89,8 @@
int err = 0;
err = aq_nic_stop(aq_nic);
- if (err < 0)
- goto err_exit;
aq_nic_deinit(aq_nic, true);
-err_exit:
return err;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 0cf8ae8..2d491ef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -265,12 +265,10 @@
static void aq_nic_polling_timer_cb(struct timer_list *t)
{
struct aq_nic_s *self = from_timer(self, t, polling_timer);
- struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_vec_isr(i, (void *)aq_vec);
+ for (i = 0U; self->aq_vecs > i; ++i)
+ aq_vec_isr(i, (void *)self->aq_vec[i]);
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
@@ -480,8 +478,8 @@
if (err < 0)
goto err_exit;
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ aq_vec = self->aq_vec[i];
err = aq_vec_start(aq_vec);
if (err < 0)
goto err_exit;
@@ -511,8 +509,8 @@
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
} else {
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ aq_vec = self->aq_vec[i];
err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
aq_vec_isr, aq_vec,
aq_vec_get_affinity_mask(aq_vec));
@@ -872,7 +870,6 @@
u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
{
- struct aq_vec_s *aq_vec = NULL;
struct aq_stats_s *stats;
unsigned int count = 0U;
unsigned int i = 0U;
@@ -922,11 +919,11 @@
data += i;
for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) {
- for (i = 0U, aq_vec = self->aq_vec[0];
- aq_vec && self->aq_vecs > i;
- ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ if (!self->aq_vec[i])
+ break;
data += count;
- count = aq_vec_get_sw_stats(aq_vec, tc, data);
+ count = aq_vec_get_sw_stats(self->aq_vec[i], tc, data);
}
}
@@ -1240,7 +1237,6 @@
int aq_nic_stop(struct aq_nic_s *self)
{
- struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
netif_tx_disable(self->ndev);
@@ -1258,9 +1254,8 @@
aq_ptp_irq_free(self);
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_vec_stop(aq_vec);
+ for (i = 0U; self->aq_vecs > i; ++i)
+ aq_vec_stop(self->aq_vec[i]);
aq_ptp_ring_stop(self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 926cca9..6da3efa 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -152,6 +152,8 @@
struct mutex fwreq_mutex;
#if IS_ENABLED(CONFIG_MACSEC)
struct aq_macsec_cfg *macsec_cfg;
+ /* mutex to protect data in macsec_cfg */
+ struct mutex macsec_mutex;
#endif
/* PTP support */
struct aq_ptp_s *aq_ptp;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 1826253..a0ce213 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -385,7 +385,7 @@
}
}
-static int aq_suspend_common(struct device *dev, bool deep)
+static int aq_suspend_common(struct device *dev)
{
struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev));
@@ -398,17 +398,15 @@
if (netif_running(nic->ndev))
aq_nic_stop(nic);
- if (deep) {
- aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
- aq_nic_set_power(nic);
- }
+ aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
+ aq_nic_set_power(nic);
rtnl_unlock();
return 0;
}
-static int atl_resume_common(struct device *dev, bool deep)
+static int atl_resume_common(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct aq_nic_s *nic;
@@ -421,11 +419,6 @@
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- if (deep) {
- /* Reinitialize Nic/Vecs objects */
- aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
- }
-
if (netif_running(nic->ndev)) {
ret = aq_nic_init(nic);
if (ret)
@@ -450,22 +443,22 @@
static int aq_pm_freeze(struct device *dev)
{
- return aq_suspend_common(dev, false);
+ return aq_suspend_common(dev);
}
static int aq_pm_suspend_poweroff(struct device *dev)
{
- return aq_suspend_common(dev, true);
+ return aq_suspend_common(dev);
}
static int aq_pm_thaw(struct device *dev)
{
- return atl_resume_common(dev, false);
+ return atl_resume_common(dev);
}
static int aq_pm_resume_restore(struct device *dev)
{
- return atl_resume_common(dev, true);
+ return atl_resume_common(dev);
}
static const struct dev_pm_ops aq_pm_ops = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 72f8751..e9c6f1f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -345,7 +345,6 @@
int budget)
{
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
- bool is_rsc_completed = true;
int err = 0;
for (; (self->sw_head != self->hw_head) && budget;
@@ -363,12 +362,17 @@
continue;
if (!buff->is_eop) {
+ unsigned int frag_cnt = 0U;
buff_ = buff;
do {
+ bool is_rsc_completed = true;
+
if (buff_->next >= self->size) {
err = -EIO;
goto err_exit;
}
+
+ frag_cnt++;
next_ = buff_->next,
buff_ = &self->buff_ring[next_];
is_rsc_completed =
@@ -376,18 +380,17 @@
next_,
self->hw_head);
- if (unlikely(!is_rsc_completed))
- break;
+ if (unlikely(!is_rsc_completed) ||
+ frag_cnt > MAX_SKB_FRAGS) {
+ err = 0;
+ goto err_exit;
+ }
buff->is_error |= buff_->is_error;
buff->is_cso_err |= buff_->is_cso_err;
} while (!buff_->is_eop);
- if (!is_rsc_completed) {
- err = 0;
- goto err_exit;
- }
if (buff->is_error ||
(buff->is_lro && buff->is_cso_err)) {
buff_ = buff;
@@ -445,7 +448,7 @@
ALIGN(hdr_len, sizeof(long)));
if (buff->len - hdr_len > 0) {
- skb_add_rx_frag(skb, 0, buff->rxdata.page,
+ skb_add_rx_frag(skb, i++, buff->rxdata.page,
buff->rxdata.pg_off + hdr_len,
buff->len - hdr_len,
AQ_CFG_RX_FRAME_MAX);
@@ -454,7 +457,6 @@
if (!buff->is_eop) {
buff_ = buff;
- i = 1U;
do {
next_ = buff_->next;
buff_ = &self->buff_ring[next_];
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index f4774cf..6ab1f32 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -43,8 +43,8 @@
if (!self) {
err = -EINVAL;
} else {
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
ring[AQ_VEC_RX_ID].stats.rx.polls++;
u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
@@ -182,8 +182,8 @@
self->aq_hw_ops = aq_hw_ops;
self->aq_hw = aq_hw;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
if (err < 0)
goto err_exit;
@@ -224,8 +224,8 @@
unsigned int i = 0U;
int err = 0;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
&ring[AQ_VEC_TX_ID]);
if (err < 0)
@@ -248,8 +248,8 @@
struct aq_ring_s *ring = NULL;
unsigned int i = 0U;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
&ring[AQ_VEC_TX_ID]);
@@ -268,8 +268,8 @@
if (!self)
goto err_exit;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
}
@@ -297,8 +297,8 @@
if (!self)
goto err_exit;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
aq_ring_free(&ring[AQ_VEC_TX_ID]);
if (i < self->rx_rings)
aq_ring_free(&ring[AQ_VEC_RX_ID]);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 9f1b150..45c17c5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -889,6 +889,13 @@
err = -ENXIO;
goto err_exit;
}
+
+ /* Validate that the new hw_head_ is reasonable. */
+ if (hw_head_ >= ring->size) {
+ err = -ENXIO;
+ goto err_exit;
+ }
+
ring->hw_head = hw_head_;
err = aq_hw_err_from_flags(self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
index 36c7cf0..4319249 100644
--- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
@@ -757,6 +757,7 @@
u16 table_index)
{
u16 packed_record[18];
+ int ret;
if (table_index >= NUMROWS_INGRESSSAKEYRECORD)
return -EINVAL;
@@ -789,9 +790,12 @@
packed_record[16] = rec->key_len & 0x3;
- return set_raw_ingress_record(hw, packed_record, 18, 2,
- ROWOFFSET_INGRESSSAKEYRECORD +
- table_index);
+ ret = set_raw_ingress_record(hw, packed_record, 18, 2,
+ ROWOFFSET_INGRESSSAKEYRECORD +
+ table_index);
+
+ memzero_explicit(packed_record, sizeof(packed_record));
+ return ret;
}
int aq_mss_set_ingress_sakey_record(struct aq_hw_s *hw,
@@ -1739,14 +1743,14 @@
ret = set_raw_egress_record(hw, packed_record, 8, 2,
ROWOFFSET_EGRESSSAKEYRECORD + table_index);
if (unlikely(ret))
- return ret;
+ goto clear_key;
ret = set_raw_egress_record(hw, packed_record + 8, 8, 2,
ROWOFFSET_EGRESSSAKEYRECORD + table_index -
32);
- if (unlikely(ret))
- return ret;
- return 0;
+clear_key:
+ memzero_explicit(packed_record, sizeof(packed_record));
+ return ret;
}
int aq_mss_set_egress_sakey_record(struct aq_hw_s *hw,
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index c26c9b0..fe3ca3a 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1468,7 +1468,7 @@
if (ret) {
netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n",
ret);
- goto err;
+ return ret;
}
max_frame_len = ag71xx_max_frame_len(ndev->mtu);
@@ -1489,6 +1489,7 @@
err:
ag71xx_rings_cleanup(ag);
+ phylink_disconnect_phy(ag->phylink);
return ret;
}
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index 7046ad6..ac50da4 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -16,3 +16,8 @@
obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o
obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
obj-$(CONFIG_BNXT) += bnxt/
+
+# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
+ifndef KBUILD_EXTRA_WARN
+CFLAGS_tg3.o += -Wno-array-bounds
+endif
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 1a703b9..82d369d 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2592,8 +2592,10 @@
device_set_wakeup_capable(&pdev->dev, 1);
priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
- if (IS_ERR(priv->wol_clk))
- return PTR_ERR(priv->wol_clk);
+ if (IS_ERR(priv->wol_clk)) {
+ ret = PTR_ERR(priv->wol_clk);
+ goto err_deregister_fixed_link;
+ }
/* Set the needed headroom once and for all */
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index a5fd161..2674619 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -323,7 +323,6 @@
bcma_mdio_mii_unregister(bgmac->mii_bus);
bgmac_enet_remove(bgmac);
bcma_set_drvdata(core, NULL);
- kfree(bgmac);
}
static struct bcma_driver bgmac_bcma_driver = {
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 98ec1b8..9960127 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -189,8 +189,8 @@
}
slot->skb = skb;
- ring->end += nr_frags + 1;
netdev_sent_queue(net_dev, skb->len);
+ ring->end += nr_frags + 1;
wmb();
@@ -1568,7 +1568,6 @@
phy_disconnect(bgmac->net_dev->phydev);
netif_napi_del(&bgmac->napi);
bgmac_dma_free(bgmac);
- free_netdev(bgmac->net_dev);
}
EXPORT_SYMBOL_GPL(bgmac_enet_remove);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 198e041..4f669e7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -788,6 +788,7 @@
BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
pad, len, fp->rx_buf_size);
bnx2x_panic();
+ bnx2x_frag_free(fp, new_data);
return;
}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 6333471..afb6d3e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -14213,10 +14213,6 @@
/* Stop Tx */
bnx2x_tx_disable(bp);
- /* Delete all NAPI objects */
- bnx2x_del_all_napi(bp);
- if (CNIC_LOADED(bp))
- bnx2x_del_all_napi_cnic(bp);
netdev_reset_tc(bp->dev);
del_timer_sync(&bp->timer);
@@ -14321,6 +14317,11 @@
bnx2x_drain_tx_queues(bp);
bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
bnx2x_netif_stop(bp, 1);
+ bnx2x_del_all_napi(bp);
+
+ if (CNIC_LOADED(bp))
+ bnx2x_del_all_napi_cnic(bp);
+
bnx2x_free_irq(bp);
/* Report UNLOAD_DONE to MCP */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 08437ea..ac32783 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -795,16 +795,20 @@
static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
{
- struct pci_dev *dev;
struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+ struct pci_dev *dev;
+ bool pending;
if (!vf)
return false;
dev = pci_get_domain_bus_and_slot(vf->domain, vf->bus, vf->devfn);
- if (dev)
- return bnx2x_is_pcie_pending(dev);
- return false;
+ if (!dev)
+ return false;
+ pending = bnx2x_is_pcie_pending(dev);
+ pci_dev_put(dev);
+
+ return pending;
}
int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index cb0c270..92f54e3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2575,6 +2575,10 @@
u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
struct bnxt_cp_ring_info *cpr2;
+ /* No more budget for RX work */
+ if (budget && work_done >= budget && idx == BNXT_RX_HDL)
+ break;
+
cpr2 = cpr->cp_ring_arr[idx];
work_done += __bnxt_poll_work(bp, cpr2,
budget - work_done);
@@ -10453,7 +10457,7 @@
if (bp->flags & BNXT_FLAG_CHIP_P5)
return bnxt_rfs_supported(bp);
- if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
+ if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
vnics = 1 + bp->rx_nr_rings;
@@ -12004,8 +12008,8 @@
rcu_read_lock();
hlist_for_each_entry_rcu(fltr, head, hash) {
if (bnxt_fltr_match(fltr, new_fltr)) {
+ rc = fltr->sw_id;
rcu_read_unlock();
- rc = 0;
goto err_free;
}
}
@@ -12481,10 +12485,9 @@
goto init_dflt_ring_err;
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
- if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
- bp->flags |= BNXT_FLAG_RFS;
- bp->dev->features |= NETIF_F_NTUPLE;
- }
+
+ bnxt_set_dflt_rfs(bp);
+
init_dflt_ring_err:
bnxt_ulp_irq_restart(bp, rc);
return rc;
@@ -13108,8 +13111,16 @@
static int __init bnxt_init(void)
{
+ int err;
+
bnxt_debug_init();
- return pci_register_driver(&bnxt_pci_driver);
+ err = pci_register_driver(&bnxt_pci_driver);
+ if (err) {
+ bnxt_debug_exit();
+ return err;
+ }
+
+ return 0;
}
static void __exit bnxt_exit(void)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 92f9f7f..34affd1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -569,7 +569,8 @@
#define BNXT_MAX_MTU 9500
#define BNXT_MAX_PAGE_MODE_MTU \
((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \
- XDP_PACKET_HEADROOM)
+ XDP_PACKET_HEADROOM - \
+ SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)))
#define BNXT_MIN_PKT_SIZE 52
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 98087b2..81b63d1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -125,7 +125,7 @@
}
reset_coalesce:
- if (netif_running(dev)) {
+ if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
if (update_stats) {
rc = bnxt_close_nic(bp, true, false);
if (!rc)
@@ -2041,9 +2041,7 @@
}
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
- if (bp->hwrm_spec_code >= 0x10201)
- link_info->req_flow_ctrl =
- PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
+ link_info->req_flow_ctrl = 0;
} else {
/* when transition from auto pause to force pause,
* force a link change
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 23b80aa..819f9df 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -599,7 +599,7 @@
hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
if (bp->flags & BNXT_FLAG_CHIP_P5)
- hw_resc->max_irqs -= vf_msix * n;
+ hw_resc->max_nqs -= vf_msix;
rc = pf->active_vfs;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index a206214..e0a6a2e 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1987,6 +1987,11 @@
return skb;
}
+static void bcmgenet_hide_tsb(struct sk_buff *skb)
+{
+ __skb_pull(skb, sizeof(struct status_64));
+}
+
static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -2093,6 +2098,8 @@
}
GENET_CB(skb)->last_cb = tx_cb_ptr;
+
+ bcmgenet_hide_tsb(skb);
skb_tx_timestamp(skb);
/* Decrement total BD count and advance our write pointer */
@@ -3939,6 +3946,10 @@
goto err;
}
priv->wol_irq = platform_get_irq_optional(pdev, 2);
+ if (priv->wol_irq == -EPROBE_DEFER) {
+ err = priv->wol_irq;
+ goto err;
+ }
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 5143cdd..be96116 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -18146,16 +18146,20 @@
struct net_device *dev = pci_get_drvdata(pdev);
struct tg3 *tp = netdev_priv(dev);
+ tg3_reset_task_cancel(tp);
+
rtnl_lock();
+
netif_device_detach(dev);
if (netif_running(dev))
dev_close(dev);
- if (system_state == SYSTEM_POWER_OFF)
- tg3_power_down(tp);
+ tg3_power_down(tp);
rtnl_unlock();
+
+ pci_disable_device(pdev);
}
/**
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index f29ec76..792c814 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1092,7 +1092,6 @@
/* Make hw descriptor updates visible to CPU */
rmb();
- queue->rx_prepared_head++;
desc = macb_rx_desc(queue, entry);
if (!queue->rx_skbuff[entry]) {
@@ -1131,6 +1130,7 @@
dma_wmb();
desc->addr &= ~MACB_BIT(RX_USED);
}
+ queue->rx_prepared_head++;
}
/* Make descriptor updates visible to hardware */
@@ -1531,6 +1531,7 @@
unsigned int head = queue->tx_head;
unsigned int tail = queue->tx_tail;
struct macb *bp = queue->bp;
+ unsigned int head_idx, tbqp;
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(TXUBR));
@@ -1538,6 +1539,13 @@
if (head == tail)
return;
+ tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
+ tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
+ head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head));
+
+ if (tbqp == head_idx)
+ return;
+
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index e0d18e9..eefb25b 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1798,13 +1798,10 @@
ifstate_set(lio, LIO_IFSTATE_RUNNING);
- if (OCTEON_CN23XX_PF(oct)) {
- if (!oct->msix_on)
- if (setup_tx_poll_fn(netdev))
- return -1;
- } else {
- if (setup_tx_poll_fn(netdev))
- return -1;
+ if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) {
+ ret = setup_tx_poll_fn(netdev);
+ if (ret)
+ goto err_poll;
}
netif_tx_start_all_queues(netdev);
@@ -1817,7 +1814,7 @@
/* tell Octeon to start forwarding packets to host */
ret = send_rx_ctrl_cmd(lio, 1);
if (ret)
- return ret;
+ goto err_rx_ctrl;
/* start periodical statistics fetch */
INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
@@ -1828,6 +1825,27 @@
dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
netdev->name);
+ return 0;
+
+err_rx_ctrl:
+ if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on)
+ cleanup_tx_poll_fn(netdev);
+err_poll:
+ if (lio->ptp_clock) {
+ ptp_clock_unregister(lio->ptp_clock);
+ lio->ptp_clock = NULL;
+ }
+
+ if (oct->props[lio->ifidx].napi_enabled == 1) {
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_disable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 0;
+
+ if (OCTEON_CN23XX_PF(oct))
+ oct->droq[0]->ops.poll_mode = 0;
+ }
+
return ret;
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 8ff28ed..f0e48b9 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1438,8 +1438,10 @@
return AE_OK;
}
- if (strncmp(string.pointer, bgx_sel, 4))
+ if (strncmp(string.pointer, bgx_sel, 4)) {
+ kfree(string.pointer);
return AE_OK;
+ }
acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
bgx_acpi_register_phy, NULL, bgx, NULL);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 84ad726..8a167ee 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1302,6 +1302,7 @@
if (ret < 0) {
CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
t3_intr_disable(adap);
+ quiesce_rx(adap);
free_irq_resources(adap);
err = ret;
goto out;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index c5b0e72..2169351 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -14,6 +14,7 @@
#include "cudbg_entity.h"
#include "cudbg_lib.h"
#include "cudbg_zlib.h"
+#include "cxgb4_tc_mqprio.h"
static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = {
{0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */
@@ -3476,7 +3477,7 @@
for (i = 0; i < utxq->ntxq; i++)
QDESC_GET_TXQ(&utxq->uldtxq[i].q,
cudbg_uld_txq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
}
@@ -3493,7 +3494,7 @@
for (i = 0; i < urxq->nrxq; i++)
QDESC_GET_RXQ(&urxq->uldrxq[i].rspq,
cudbg_uld_rxq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
/* ULD FLQ */
@@ -3505,7 +3506,7 @@
for (i = 0; i < urxq->nrxq; i++)
QDESC_GET_FLQ(&urxq->uldrxq[i].fl,
cudbg_uld_flq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
/* ULD CIQ */
@@ -3518,29 +3519,34 @@
for (i = 0; i < urxq->nciq; i++)
QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq,
cudbg_uld_ciq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
}
+ mutex_unlock(&uld_mutex);
+ if (!padap->tc_mqprio)
+ goto out;
+
+ mutex_lock(&padap->tc_mqprio->mqprio_mutex);
/* ETHOFLD TXQ */
if (s->eohw_txq)
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_TXQ(&s->eohw_txq[i].q,
- CUDBG_QTYPE_ETHOFLD_TXQ, out);
+ CUDBG_QTYPE_ETHOFLD_TXQ, out_unlock_mqprio);
/* ETHOFLD RXQ and FLQ */
if (s->eohw_rxq) {
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_RXQ(&s->eohw_rxq[i].rspq,
- CUDBG_QTYPE_ETHOFLD_RXQ, out);
+ CUDBG_QTYPE_ETHOFLD_RXQ, out_unlock_mqprio);
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_FLQ(&s->eohw_rxq[i].fl,
- CUDBG_QTYPE_ETHOFLD_FLQ, out);
+ CUDBG_QTYPE_ETHOFLD_FLQ, out_unlock_mqprio);
}
-out_unlock:
- mutex_unlock(&uld_mutex);
+out_unlock_mqprio:
+ mutex_unlock(&padap->tc_mqprio->mqprio_mutex);
out:
qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry);
@@ -3578,6 +3584,10 @@
#undef QDESC_GET
return rc;
+
+out_unlock_uld:
+ mutex_unlock(&uld_mutex);
+ goto out;
}
int cudbg_collect_flash(struct cudbg_init *pdbg_init,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 2820a0b..5e1e464 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -858,7 +858,7 @@
*/
err = t4vf_update_port_info(pi);
if (err < 0)
- return err;
+ goto err_unwind;
/*
* Note that this interface is up and start everything up ...
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index d6b6ebb..cd6e016 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -1150,7 +1150,7 @@
fl6.daddr = ip6h->saddr;
fl6.fl6_dport = inet_rsk(oreq)->ir_rmt_port;
fl6.fl6_sport = htons(inet_rsk(oreq)->ir_num);
- security_req_classify_flow(oreq, flowi6_to_flowi(&fl6));
+ security_req_classify_flow(oreq, flowi6_to_flowi_common(&fl6));
dst = ip6_dst_lookup_flow(sock_net(lsk), lsk, &fl6, NULL);
if (IS_ERR(dst))
goto free_sk;
@@ -1235,8 +1235,8 @@
csk->sndbuf = newsk->sk_sndbuf;
csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
- sock_net(newsk)->
- ipv4.sysctl_tcp_window_scaling,
+ READ_ONCE(sock_net(newsk)->
+ ipv4.sysctl_tcp_window_scaling),
tp->window_clamp);
neigh_release(n);
inet_inherit_port(&tcp_hashinfo, lsk, newsk);
@@ -1383,7 +1383,7 @@
#endif
}
if (req->tcpopt.wsf <= 14 &&
- sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
inet_rsk(oreq)->wscale_ok = 1;
inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
}
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index d11fcfd..85ea073 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1920,7 +1920,7 @@
/* Racing with RX NAPI */
do {
- start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
stats->rx_packets = port->stats.rx_packets;
stats->rx_bytes = port->stats.rx_bytes;
@@ -1932,11 +1932,11 @@
stats->rx_crc_errors = port->stats.rx_crc_errors;
stats->rx_frame_errors = port->stats.rx_frame_errors;
- } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
/* Racing with MIB and TX completion interrupts */
do {
- start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
stats->tx_errors = port->stats.tx_errors;
stats->tx_packets = port->stats.tx_packets;
@@ -1946,15 +1946,15 @@
stats->rx_missed_errors = port->stats.rx_missed_errors;
stats->rx_fifo_errors = port->stats.rx_fifo_errors;
- } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
/* Racing with hard_start_xmit */
do {
- start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
stats->tx_dropped = port->stats.tx_dropped;
- } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
stats->rx_dropped += stats->rx_missed_errors;
}
@@ -2032,18 +2032,18 @@
/* Racing with MIB interrupt */
do {
p = values;
- start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
for (i = 0; i < RX_STATS_NUM; i++)
*p++ = port->hw_stats[i];
- } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
values = p;
/* Racing with RX NAPI */
do {
p = values;
- start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
for (i = 0; i < RX_STATUS_NUM; i++)
*p++ = port->rx_stats[i];
@@ -2051,13 +2051,13 @@
*p++ = port->rx_csum_stats[i];
*p++ = port->rx_napi_exits;
- } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
values = p;
/* Racing with TX start_xmit */
do {
p = values;
- start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
for (i = 0; i < TX_MAX_FRAGS; i++) {
*values++ = port->tx_frag_stats[i];
@@ -2066,7 +2066,7 @@
*values++ = port->tx_frags_linearized;
*values++ = port->tx_hw_csummed;
- } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
}
static int gmac_get_ksettings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index e7b0d7d..c22d945 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1396,8 +1396,10 @@
/* alloc_etherdev ensures aligned and zeroed private structures */
dev = alloc_etherdev (sizeof (*tp));
- if (!dev)
+ if (!dev) {
+ pci_disable_device(pdev);
return -ENOMEM;
+ }
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
@@ -1774,6 +1776,7 @@
err_out_free_netdev:
free_netdev (dev);
+ pci_disable_device(pdev);
return -ENODEV;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 649c5c4..1288b5e 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -2287,7 +2287,7 @@
/* Uses sync mcc */
int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
- u8 page_num, u8 *data)
+ u8 page_num, u32 off, u32 len, u8 *data)
{
struct be_dma_mem cmd;
struct be_mcc_wrb *wrb;
@@ -2321,10 +2321,10 @@
req->port = cpu_to_le32(adapter->hba_port_num);
req->page_num = cpu_to_le32(page_num);
status = be_mcc_notify_wait(adapter);
- if (!status) {
+ if (!status && len > 0) {
struct be_cmd_resp_port_type *resp = cmd.va;
- memcpy(data, resp->page_data, PAGE_DATA_LEN);
+ memcpy(data, resp->page_data + off, len);
}
err:
mutex_unlock(&adapter->mcc_lock);
@@ -2415,7 +2415,7 @@
int status;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- page_data);
+ 0, PAGE_DATA_LEN, page_data);
if (!status) {
switch (adapter->phy.interface_type) {
case PHY_TYPE_QSFP:
@@ -2440,7 +2440,7 @@
int status;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- page_data);
+ 0, PAGE_DATA_LEN, page_data);
if (!status) {
strlcpy(adapter->phy.vendor_name, page_data +
SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index c30d6d6..9e17d6a 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2427,7 +2427,7 @@
int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
u32 *state);
int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
- u8 page_num, u8 *data);
+ u8 page_num, u32 off, u32 len, u8 *data);
int be_cmd_query_cable_type(struct be_adapter *adapter);
int be_cmd_query_sfp_info(struct be_adapter *adapter);
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 99cc1c4..d90bf45 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1338,7 +1338,7 @@
return -EOPNOTSUPP;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- page_data);
+ 0, PAGE_DATA_LEN, page_data);
if (!status) {
if (!page_data[SFP_PLUS_SFF_8472_COMP]) {
modinfo->type = ETH_MODULE_SFF_8079;
@@ -1356,25 +1356,32 @@
{
struct be_adapter *adapter = netdev_priv(netdev);
int status;
+ u32 begin, end;
if (!check_privilege(adapter, MAX_PRIVILEGES))
return -EOPNOTSUPP;
- status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- data);
- if (status)
- goto err;
+ begin = eeprom->offset;
+ end = eeprom->offset + eeprom->len;
- if (eeprom->offset + eeprom->len > PAGE_DATA_LEN) {
- status = be_cmd_read_port_transceiver_data(adapter,
- TR_PAGE_A2,
- data +
- PAGE_DATA_LEN);
+ if (begin < PAGE_DATA_LEN) {
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, begin,
+ min_t(u32, end, PAGE_DATA_LEN) - begin,
+ data);
+ if (status)
+ goto err;
+
+ data += PAGE_DATA_LEN - begin;
+ begin = PAGE_DATA_LEN;
+ }
+
+ if (end > PAGE_DATA_LEN) {
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A2,
+ begin - PAGE_DATA_LEN,
+ end - begin, data);
if (status)
goto err;
}
- if (eeprom->offset)
- memcpy(data, data + eeprom->offset, eeprom->len);
err:
return be_cmd_status(status);
}
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 5bc11d1..969af4d 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1747,6 +1747,19 @@
return rc;
}
+static bool ftgmac100_has_child_node(struct device_node *np, const char *name)
+{
+ struct device_node *child_np = of_get_child_by_name(np, name);
+ bool ret = false;
+
+ if (child_np) {
+ ret = true;
+ of_node_put(child_np);
+ }
+
+ return ret;
+}
+
static int ftgmac100_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -1860,7 +1873,7 @@
/* Display what we found */
phy_attached_info(phy);
- } else if (np && !of_get_child_by_name(np, "mdio")) {
+ } else if (np && !ftgmac100_has_child_node(np, "mdio")) {
/* Support legacy ASPEED devicetree descriptions that decribe a
* MAC with an embedded MDIO controller but have no "mdio"
* child node. Automatically scan the MDIO bus for available
@@ -1893,6 +1906,11 @@
/* AST2400 doesn't have working HW checksum generation */
if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
netdev->hw_features &= ~NETIF_F_HW_CSUM;
+
+ /* AST2600 tx checksum with NCSI is broken */
+ if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac"))
+ netdev->hw_features &= ~NETIF_F_HW_CSUM;
+
if (np && of_get_property(np, "no-hw-checksum", NULL))
netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
netdev->features |= netdev->hw_features;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 1268996..2f90754 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -489,11 +489,15 @@
info->phc_index = -1;
fman_node = of_get_parent(mac_node);
- if (fman_node)
+ if (fman_node) {
ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
+ of_node_put(fman_node);
+ }
- if (ptp_node)
+ if (ptp_node) {
ptp_dev = of_find_device_by_node(ptp_node);
+ of_node_put(ptp_node);
+ }
if (ptp_dev)
ptp = platform_get_drvdata(ptp_dev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index d89ddc1..3540120 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1349,8 +1349,8 @@
buf_array[i] = addr;
/* tracing point */
- trace_dpaa2_eth_buf_seed(priv->net_dev,
- page, DPAA2_ETH_RX_BUF_RAW_SIZE,
+ trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page),
+ DPAA2_ETH_RX_BUF_RAW_SIZE,
addr, priv->rx_buf_size,
bpid);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index 32b5faa..208a345 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -168,7 +168,7 @@
base = of_iomap(node, 0);
if (!base) {
err = -ENOMEM;
- goto err_close;
+ goto err_put;
}
err = fsl_mc_allocate_irqs(mc_dev);
@@ -212,6 +212,8 @@
fsl_mc_free_irqs(mc_dev);
err_unmap:
iounmap(base);
+err_put:
+ of_node_put(node);
err_close:
dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
err_free_mcp:
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 15aa3b3..975762c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1212,7 +1212,7 @@
/* enable Tx ints by setting pkt thr to 1 */
enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
- tbmr = ENETC_TBMR_EN;
+ tbmr = ENETC_TBMR_EN | ENETC_TBMR_SET_PRIO(tx_ring->prio);
if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
tbmr |= ENETC_TBMR_VIH;
@@ -1241,7 +1241,12 @@
enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
+ /* Also prepare the consumer index in case page allocation never
+ * succeeds. In that case, hardware will never advance producer index
+ * to match consumer index, and will drop all frames.
+ */
enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
+ enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1);
/* enable Rx ints by setting pkt thr to 1 */
enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
@@ -1267,13 +1272,14 @@
static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
+ enetc_setup_txbdr(hw, priv->tx_ring[i]);
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
+ enetc_setup_rxbdr(hw, priv->rx_ring[i]);
}
static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
@@ -1306,13 +1312,14 @@
static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
+ enetc_clear_txbdr(hw, priv->tx_ring[i]);
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
+ enetc_clear_rxbdr(hw, priv->rx_ring[i]);
udelay(1);
}
@@ -1320,13 +1327,13 @@
static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
+ struct enetc_hw *hw = &priv->si->hw;
int i, j, err;
for (i = 0; i < priv->bdr_int_num; i++) {
int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
struct enetc_int_vector *v = priv->int_vector[i];
int entry = ENETC_BDR_INT_BASE_IDX + i;
- struct enetc_hw *hw = &priv->si->hw;
snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
priv->ndev->name, i);
@@ -1414,13 +1421,14 @@
static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
+ enetc_txbdr_wr(hw, i, ENETC_TBIER, 0);
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
+ enetc_rxbdr_wr(hw, i, ENETC_RBIER, 0);
}
static int enetc_phylink_connect(struct net_device *ndev)
@@ -1560,6 +1568,7 @@
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct tc_mqprio_qopt *mqprio = type_data;
+ struct enetc_hw *hw = &priv->si->hw;
struct enetc_bdr *tx_ring;
u8 num_tc;
int i;
@@ -1574,7 +1583,8 @@
/* Reset all ring priorities to 0 */
for (i = 0; i < priv->num_tx_rings; i++) {
tx_ring = priv->tx_ring[i];
- enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
+ tx_ring->prio = 0;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
}
return 0;
@@ -1593,7 +1603,8 @@
*/
for (i = 0; i < num_tc; i++) {
tx_ring = priv->tx_ring[i];
- enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
+ tx_ring->prio = i;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
}
/* Reset the number of netdev queues based on the TC count */
@@ -1671,52 +1682,29 @@
return 0;
}
-static int enetc_set_psfp(struct net_device *ndev, int en)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- int err;
-
- if (en) {
- err = enetc_psfp_enable(priv);
- if (err)
- return err;
-
- priv->active_offloads |= ENETC_F_QCI;
- return 0;
- }
-
- err = enetc_psfp_disable(priv);
- if (err)
- return err;
-
- priv->active_offloads &= ~ENETC_F_QCI;
-
- return 0;
-}
-
static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
+ enetc_bdr_enable_rxvlan(hw, i, en);
}
static void enetc_enable_txvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
+ enetc_bdr_enable_txvlan(hw, i, en);
}
-int enetc_set_features(struct net_device *ndev,
- netdev_features_t features)
+void enetc_set_features(struct net_device *ndev, netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
- int err = 0;
if (changed & NETIF_F_RXHASH)
enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
@@ -1728,11 +1716,6 @@
if (changed & NETIF_F_HW_VLAN_CTAG_TX)
enetc_enable_txvlan(ndev,
!!(features & NETIF_F_HW_VLAN_CTAG_TX));
-
- if (changed & NETIF_F_HW_TC)
- err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
-
- return err;
}
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 15d19cb..725c3d1 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -58,6 +58,7 @@
void __iomem *rcir;
};
u16 index;
+ u16 prio;
int bd_count; /* # of BDs */
int next_to_use;
int next_to_clean;
@@ -301,8 +302,7 @@
void enetc_stop(struct net_device *ndev);
netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
struct net_device_stats *enetc_get_stats(struct net_device *ndev);
-int enetc_set_features(struct net_device *ndev,
- netdev_features_t features);
+void enetc_set_features(struct net_device *ndev, netdev_features_t features);
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data);
@@ -335,22 +335,24 @@
int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
int enetc_psfp_init(struct enetc_ndev_priv *priv);
int enetc_psfp_clean(struct enetc_ndev_priv *priv);
+int enetc_set_psfp(struct net_device *ndev, bool en);
static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
u32 reg;
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSIDCAPR);
+ reg = enetc_port_rd(hw, ENETC_PSIDCAPR);
priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK;
/* Port stream filter capability */
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSFCAPR);
+ reg = enetc_port_rd(hw, ENETC_PSFCAPR);
priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK;
/* Port stream gate capability */
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSGCAPR);
+ reg = enetc_port_rd(hw, ENETC_PSGCAPR);
priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK);
priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16;
/* Port flow meter capability */
- reg = enetc_port_rd(&priv->si->hw, ENETC_PFMCAPR);
+ reg = enetc_port_rd(hw, ENETC_PFMCAPR);
priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK;
}
@@ -410,4 +412,9 @@
{
return 0;
}
+
+static inline int enetc_set_psfp(struct net_device *ndev, bool en)
+{
+ return 0;
+}
#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 9c1690f..cf98a00 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -651,7 +651,10 @@
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ SOF_TIMESTAMPING_RAW_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 716b396..515db7e 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -671,6 +671,13 @@
{
netdev_features_t changed = ndev->features ^ features;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (changed & NETIF_F_HW_TC) {
+ err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
+ if (err)
+ return err;
+ }
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
struct enetc_pf *pf = enetc_si_priv(priv->si);
@@ -684,7 +691,9 @@
if (changed & NETIF_F_LOOPBACK)
enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
- return enetc_set_features(ndev, features);
+ enetc_set_features(ndev, features);
+
+ return 0;
}
static const struct net_device_ops enetc_ndev_ops = {
@@ -739,9 +748,6 @@
ndev->priv_flags |= IFF_UNICAST_FLT;
- if (si->hw_features & ENETC_SI_F_QBV)
- priv->active_offloads |= ENETC_F_QBV;
-
if (si->hw_features & ENETC_SI_F_PSFP && !enetc_psfp_enable(priv)) {
priv->active_offloads |= ENETC_F_QCI;
ndev->features |= NETIF_F_HW_TC;
@@ -987,7 +993,8 @@
struct enetc_ndev_priv *priv;
priv = netdev_priv(pf->si->ndev);
- if (priv->active_offloads & ENETC_F_QBV)
+
+ if (pf->si->hw_features & ENETC_SI_F_QBV)
enetc_sched_speed_set(priv, speed);
if (!phylink_autoneg_inband(mode) &&
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 9e6988f..5841721 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -17,8 +17,9 @@
void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
{
+ struct enetc_hw *hw = &priv->si->hw;
u32 old_speed = priv->speed;
- u32 pspeed;
+ u32 pspeed, tmp;
if (speed == old_speed)
return;
@@ -39,16 +40,15 @@
}
priv->speed = speed;
- enetc_port_wr(&priv->si->hw, ENETC_PMR,
- (enetc_port_rd(&priv->si->hw, ENETC_PMR)
- & (~ENETC_PMR_PSPEED_MASK))
- | pspeed);
+ tmp = enetc_port_rd(hw, ENETC_PMR);
+ enetc_port_wr(hw, ENETC_PMR, (tmp & ~ENETC_PMR_PSPEED_MASK) | pspeed);
}
static int enetc_setup_taprio(struct net_device *ndev,
struct tc_taprio_qopt_offload *admin_conf)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
struct enetc_cbd cbd = {.cmd = 0};
struct tgs_gcl_conf *gcl_config;
struct tgs_gcl_data *gcl_data;
@@ -60,15 +60,16 @@
int err;
int i;
- if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw))
+ if (admin_conf->num_entries > enetc_get_max_gcl_len(hw))
return -EINVAL;
gcl_len = admin_conf->num_entries;
- tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET);
+ tge = enetc_rd(hw, ENETC_QBV_PTGCR_OFFSET);
if (!admin_conf->enable) {
- enetc_wr(&priv->si->hw,
- ENETC_QBV_PTGCR_OFFSET,
- tge & (~ENETC_QBV_TGE));
+ enetc_wr(hw, ENETC_QBV_PTGCR_OFFSET, tge & ~ENETC_QBV_TGE);
+
+ priv->active_offloads &= ~ENETC_F_QBV;
+
return 0;
}
@@ -123,18 +124,18 @@
cbd.cls = BDCR_CMD_PORT_GCL;
cbd.status_flags = 0;
- enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET,
- tge | ENETC_QBV_TGE);
+ enetc_wr(hw, ENETC_QBV_PTGCR_OFFSET, tge | ENETC_QBV_TGE);
err = enetc_send_cmd(priv->si, &cbd);
if (err)
- enetc_wr(&priv->si->hw,
- ENETC_QBV_PTGCR_OFFSET,
- tge & (~ENETC_QBV_TGE));
+ enetc_wr(hw, ENETC_QBV_PTGCR_OFFSET, tge & ~ENETC_QBV_TGE);
dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE);
kfree(gcl_data);
+ if (!err)
+ priv->active_offloads |= ENETC_F_QBV;
+
return err;
}
@@ -142,6 +143,8 @@
{
struct tc_taprio_qopt_offload *taprio = type_data;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_bdr *tx_ring;
int err;
int i;
@@ -150,18 +153,20 @@
if (priv->tx_ring[i]->tsd_enable)
return -EBUSY;
- for (i = 0; i < priv->num_tx_rings; i++)
- enetc_set_bdr_prio(&priv->si->hw,
- priv->tx_ring[i]->index,
- taprio->enable ? i : 0);
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ tx_ring = priv->tx_ring[i];
+ tx_ring->prio = taprio->enable ? i : 0;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+ }
err = enetc_setup_taprio(ndev, taprio);
-
- if (err)
- for (i = 0; i < priv->num_tx_rings; i++)
- enetc_set_bdr_prio(&priv->si->hw,
- priv->tx_ring[i]->index,
- taprio->enable ? 0 : i);
+ if (err) {
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ tx_ring = priv->tx_ring[i];
+ tx_ring->prio = taprio->enable ? 0 : i;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+ }
+ }
return err;
}
@@ -182,7 +187,7 @@
struct tc_cbs_qopt_offload *cbs = type_data;
u32 port_transmit_rate = priv->speed;
u8 tc_nums = netdev_get_num_tc(ndev);
- struct enetc_si *si = priv->si;
+ struct enetc_hw *hw = &priv->si->hw;
u32 hi_credit_bit, hi_credit_reg;
u32 max_interference_size;
u32 port_frame_max_size;
@@ -203,15 +208,15 @@
* lower than this TC have been disabled.
*/
if (tc == prio_top &&
- enetc_get_cbs_enable(&si->hw, prio_next)) {
+ enetc_get_cbs_enable(hw, prio_next)) {
dev_err(&ndev->dev,
"Disable TC%d before disable TC%d\n",
prio_next, tc);
return -EINVAL;
}
- enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0);
- enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0);
+ enetc_port_wr(hw, ENETC_PTCCBSR1(tc), 0);
+ enetc_port_wr(hw, ENETC_PTCCBSR0(tc), 0);
return 0;
}
@@ -228,13 +233,13 @@
* higher than this TC have been enabled.
*/
if (tc == prio_next) {
- if (!enetc_get_cbs_enable(&si->hw, prio_top)) {
+ if (!enetc_get_cbs_enable(hw, prio_top)) {
dev_err(&ndev->dev,
"Enable TC%d first before enable TC%d\n",
prio_top, prio_next);
return -EINVAL;
}
- bw_sum += enetc_get_cbs_bw(&si->hw, prio_top);
+ bw_sum += enetc_get_cbs_bw(hw, prio_top);
}
if (bw_sum + bw >= 100) {
@@ -243,7 +248,7 @@
return -EINVAL;
}
- enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
+ enetc_port_rd(hw, ENETC_PTCMSDUR(tc));
/* For top prio TC, the max_interfrence_size is maxSizedFrame.
*
@@ -263,8 +268,8 @@
u32 m0, ma, r0, ra;
m0 = port_frame_max_size * 8;
- ma = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(prio_top)) * 8;
- ra = enetc_get_cbs_bw(&si->hw, prio_top) *
+ ma = enetc_port_rd(hw, ENETC_PTCMSDUR(prio_top)) * 8;
+ ra = enetc_get_cbs_bw(hw, prio_top) *
port_transmit_rate * 10000ULL;
r0 = port_transmit_rate * 1000000ULL;
max_interference_size = m0 + ma +
@@ -284,10 +289,10 @@
hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit,
port_transmit_rate * 1000000ULL);
- enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
+ enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
/* Set bw register and enable this traffic class */
- enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
+ enetc_port_wr(hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
return 0;
}
@@ -297,6 +302,7 @@
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct tc_etf_qopt_offload *qopt = type_data;
u8 tc_nums = netdev_get_num_tc(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int tc;
if (!tc_nums)
@@ -312,12 +318,11 @@
return -EBUSY;
/* TSD and Qbv are mutually exclusive in hardware */
- if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE)
+ if (enetc_rd(hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE)
return -EBUSY;
priv->tx_ring[tc]->tsd_enable = qopt->enable;
- enetc_port_wr(&priv->si->hw, ENETC_PTCTSDR(tc),
- qopt->enable ? ENETC_TSDE : 0);
+ enetc_port_wr(hw, ENETC_PTCTSDR(tc), qopt->enable ? ENETC_TSDE : 0);
return 0;
}
@@ -1525,6 +1530,29 @@
}
}
+int enetc_set_psfp(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (en) {
+ err = enetc_psfp_enable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads |= ENETC_F_QCI;
+ return 0;
+ }
+
+ err = enetc_psfp_disable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads &= ~ENETC_F_QCI;
+
+ return 0;
+}
+
int enetc_psfp_init(struct enetc_ndev_priv *priv)
{
if (epsfp.psfp_sfi_bitmap)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 33c1257..5ce3e25 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -88,7 +88,9 @@
static int enetc_vf_set_features(struct net_device *ndev,
netdev_features_t features)
{
- return enetc_set_features(ndev, features);
+ enetc_set_features(ndev, features);
+
+ return 0;
}
/* Probing/ Init */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 166bc3f..686bb87 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -623,7 +623,7 @@
dev_kfree_skb_any(skb);
if (net_ratelimit())
netdev_err(ndev, "Tx DMA memory map failed\n");
- return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
}
bdp->cbd_datlen = cpu_to_fec16(size);
@@ -685,7 +685,7 @@
dev_kfree_skb_any(skb);
if (net_ratelimit())
netdev_err(ndev, "Tx DMA memory map failed\n");
- return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
}
}
@@ -2251,6 +2251,31 @@
IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
IEEE_R_FDXFC, IEEE_R_OCTETS_OK
};
+/* for i.MX6ul */
+static u32 fec_enet_register_offset_6ul[] = {
+ FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
+ FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
+ FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0,
+ FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH,
+ FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0,
+ FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
+ FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC,
+ RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
+ RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
+ RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
+ RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
+ RMON_T_P_GTE2048, RMON_T_OCTETS,
+ IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
+ IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
+ IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
+ RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
+ RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
+ RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
+ RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
+ RMON_R_P_GTE2048, RMON_R_OCTETS,
+ IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
+ IEEE_R_FDXFC, IEEE_R_OCTETS_OK
+};
#else
static __u32 fec_enet_register_version = 1;
static u32 fec_enet_register_offset[] = {
@@ -2275,7 +2300,24 @@
u32 *buf = (u32 *)regbuf;
u32 i, off;
int ret;
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+ defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
+ u32 *reg_list;
+ u32 reg_cnt;
+ if (!of_machine_is_compatible("fsl,imx6ul")) {
+ reg_list = fec_enet_register_offset;
+ reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
+ } else {
+ reg_list = fec_enet_register_offset_6ul;
+ reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul);
+ }
+#else
+ /* coldfire */
+ static u32 *reg_list = fec_enet_register_offset;
+ static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
+#endif
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return;
@@ -2284,8 +2326,8 @@
memset(buf, 0, regs->len);
- for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
- off = fec_enet_register_offset[i];
+ for (i = 0; i < reg_cnt; i++) {
+ off = reg_list[i];
if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
!(fep->quirks & FEC_QUIRK_HAS_FRREG))
@@ -3529,7 +3571,7 @@
ARRAY_SIZE(out_val));
if (ret) {
dev_dbg(&fep->pdev->dev, "no stop mode property\n");
- return ret;
+ goto out;
}
fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index d71eac7..c5ae673 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -136,11 +136,7 @@
* NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
* to current timer would be next second.
*/
- tempval = readl(fep->hwp + FEC_ATIME_CTRL);
- tempval |= FEC_T_CTRL_CAPTURE;
- writel(tempval, fep->hwp + FEC_ATIME_CTRL);
-
- tempval = readl(fep->hwp + FEC_ATIME);
+ tempval = fep->cc.read(&fep->cc);
/* Convert the ptp local counter to 1588 timestamp */
ns = timecounter_cyc2time(&fep->tc, tempval);
ts = ns_to_timespec64(ns);
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 6eeccc1..3312dc4 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -884,12 +884,21 @@
return err;
}
+static int mac_remove(struct platform_device *pdev)
+{
+ struct mac_device *mac_dev = platform_get_drvdata(pdev);
+
+ platform_device_unregister(mac_dev->priv->eth_dev);
+ return 0;
+}
+
static struct platform_driver mac_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = mac_match,
},
.probe = mac_probe,
+ .remove = mac_remove,
};
builtin_platform_driver(mac_driver);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 99fe2c2..61f4b6e 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -98,7 +98,7 @@
return -EINVAL;
fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
- if (!fep->fcc.fccp)
+ if (!fep->fec.fecp)
return -EINVAL;
return 0;
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 7b44769..c53a043 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -172,14 +172,14 @@
struct gve_rx_ring *rx = &priv->rx[ring];
start =
- u64_stats_fetch_begin(&priv->rx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
tmp_rx_pkts = rx->rpackets;
tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt;
- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
start));
rx_pkts += tmp_rx_pkts;
rx_bytes += tmp_rx_bytes;
@@ -193,10 +193,10 @@
if (priv->tx) {
do {
start =
- u64_stats_fetch_begin(&priv->tx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
tmp_tx_pkts = priv->tx[ring].pkt_done;
tmp_tx_bytes = priv->tx[ring].bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
start));
tx_pkts += tmp_tx_pkts;
tx_bytes += tmp_tx_bytes;
@@ -254,13 +254,13 @@
data[i++] = rx->cnt;
do {
start =
- u64_stats_fetch_begin(&priv->rx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt;
- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
start));
data[i++] = tmp_rx_bytes;
/* rx dropped packets */
@@ -313,9 +313,9 @@
data[i++] = tx->done;
do {
start =
- u64_stats_fetch_begin(&priv->tx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
tmp_tx_bytes = tx->bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
start));
data[i++] = tmp_tx_bytes;
data[i++] = tx->wake_queue;
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 6cb75bb..f0c1e6c 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -40,10 +40,10 @@
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
do {
start =
- u64_stats_fetch_begin(&priv->rx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
packets = priv->rx[ring].rpackets;
bytes = priv->rx[ring].rbytes;
- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
start));
s->rx_packets += packets;
s->rx_bytes += bytes;
@@ -53,10 +53,10 @@
for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
do {
start =
- u64_stats_fetch_begin(&priv->tx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
packets = priv->tx[ring].pkt_done;
bytes = priv->tx[ring].bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
start));
s->tx_packets += packets;
s->tx_bytes += bytes;
@@ -1041,9 +1041,9 @@
if (priv->tx) {
for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
do {
- start = u64_stats_fetch_begin(&priv->tx[idx].statss);
+ start = u64_stats_fetch_begin_irq(&priv->tx[idx].statss);
tx_bytes = priv->tx[idx].bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
+ } while (u64_stats_fetch_retry_irq(&priv->tx[idx].statss, start));
stats[stats_idx++] = (struct stats) {
.stat_name = cpu_to_be32(TX_WAKE_CNT),
.value = cpu_to_be64(priv->tx[idx].wake_queue),
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 00fafc0..430ecce 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -419,8 +419,10 @@
hdev->cls_dev.release = hnae_release;
(void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
ret = device_register(&hdev->cls_dev);
- if (ret)
+ if (ret) {
+ put_device(&hdev->cls_dev);
return ret;
+ }
__module_get(THIS_MODULE);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 7b94764..2070e26 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -7616,12 +7616,11 @@
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr, false);
ret = hclge_remove_mac_vlan_tbl(vport, &req);
- if (!ret) {
+ if (!ret || ret == -ENOENT) {
mutex_lock(&hdev->vport_lock);
hclge_update_umv_space(vport, true);
mutex_unlock(&hdev->vport_lock);
- } else if (ret == -ENOENT) {
- ret = 0;
+ return 0;
}
return ret;
@@ -9198,11 +9197,11 @@
}
if (!ret) {
- if (is_kill)
- hclge_rm_vport_vlan_table(vport, vlan_id, false);
- else
+ if (!is_kill)
hclge_add_vport_vlan_table(vport, vlan_id,
writen_to_tbl);
+ else if (is_kill && vlan_id != 0)
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
} else if (is_kill) {
/* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 5d39967..51b7b46 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -91,6 +91,13 @@
enum hclge_cmd_status status;
struct hclge_desc desc;
+ if (msg_len > HCLGE_MBX_MAX_MSG_SIZE) {
+ dev_err(&hdev->pdev->dev,
+ "msg data length(=%u) exceeds maximum(=%u)\n",
+ msg_len, HCLGE_MBX_MAX_MSG_SIZE);
+ return -EMSGSIZE;
+ }
+
resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
@@ -173,7 +180,7 @@
ring_num = req->msg.ring_num;
if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
- return -ENOMEM;
+ return -EINVAL;
for (i = 0; i < ring_num; i++) {
if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
@@ -577,9 +584,9 @@
return hclge_set_vport_mtu(vport, mtu);
}
-static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- struct hclge_respond_to_vf_msg *resp_msg)
+static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
struct hnae3_handle *handle = &vport->nic;
struct hclge_dev *hdev = vport->back;
@@ -589,17 +596,18 @@
if (queue_id >= handle->kinfo.num_tqps) {
dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n",
queue_id, mbx_req->mbx_src_vfid);
- return;
+ return -EINVAL;
}
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
resp_msg->len = sizeof(qid_in_pf);
+ return 0;
}
-static void hclge_get_rss_key(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- struct hclge_respond_to_vf_msg *resp_msg)
+static int hclge_get_rss_key(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
#define HCLGE_RSS_MBX_RESP_LEN 8
struct hclge_dev *hdev = vport->back;
@@ -615,13 +623,14 @@
dev_warn(&hdev->pdev->dev,
"failed to get the rss hash key, the index(%u) invalid !\n",
index);
- return;
+ return -EINVAL;
}
memcpy(resp_msg->data,
&hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
HCLGE_RSS_MBX_RESP_LEN);
resp_msg->len = HCLGE_RSS_MBX_RESP_LEN;
+ return 0;
}
static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
@@ -800,10 +809,10 @@
"VF fail(%d) to set mtu\n", ret);
break;
case HCLGE_MBX_GET_QID_IN_PF:
- hclge_get_queue_id_in_pf(vport, req, &resp_msg);
+ ret = hclge_get_queue_id_in_pf(vport, req, &resp_msg);
break;
case HCLGE_MBX_GET_RSS_KEY:
- hclge_get_rss_key(vport, req, &resp_msg);
+ ret = hclge_get_rss_key(vport, req, &resp_msg);
break;
case HCLGE_MBX_GET_LINK_MODE:
hclge_get_link_mode(vport, req);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
index 19eb839..061952c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
@@ -85,6 +85,7 @@
struct tag_sml_funcfg_tbl *funcfg_table_elem;
struct hinic_cmd_lt_rd *read_data;
u16 out_size = sizeof(*read_data);
+ int ret = ~0;
int err;
read_data = kzalloc(sizeof(*read_data), GFP_KERNEL);
@@ -111,20 +112,25 @@
switch (idx) {
case VALID:
- return funcfg_table_elem->dw0.bs.valid;
+ ret = funcfg_table_elem->dw0.bs.valid;
+ break;
case RX_MODE:
- return funcfg_table_elem->dw0.bs.nic_rx_mode;
+ ret = funcfg_table_elem->dw0.bs.nic_rx_mode;
+ break;
case MTU:
- return funcfg_table_elem->dw1.bs.mtu;
+ ret = funcfg_table_elem->dw1.bs.mtu;
+ break;
case RQ_DEPTH:
- return funcfg_table_elem->dw13.bs.cfg_rq_depth;
+ ret = funcfg_table_elem->dw13.bs.cfg_rq_depth;
+ break;
case QUEUE_NUM:
- return funcfg_table_elem->dw13.bs.cfg_q_num;
+ ret = funcfg_table_elem->dw13.bs.cfg_q_num;
+ break;
}
kfree(read_data);
- return ~0;
+ return ret;
}
static ssize_t hinic_dbg_cmd_read(struct file *filp, char __user *buffer, size_t count,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
index fb3e891..a4fbf44 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
@@ -95,9 +95,6 @@
u16 sq_depth;
u16 rq_depth;
- struct hinic_txq_stats tx_stats;
- struct hinic_rxq_stats rx_stats;
-
u8 rss_tmpl_idx;
u8 rss_hash_engine;
u16 num_rss;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
index 4e4029d..9553d28 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
@@ -818,7 +818,6 @@
{
struct hinic_hwif *hwif = attr->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t cell_ctxt_size;
chain->hwif = hwif;
chain->chain_type = attr->chain_type;
@@ -830,8 +829,8 @@
sema_init(&chain->sem, 1);
- cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt);
- chain->cell_ctxt = devm_kzalloc(&pdev->dev, cell_ctxt_size, GFP_KERNEL);
+ chain->cell_ctxt = devm_kcalloc(&pdev->dev, chain->num_cells,
+ sizeof(*chain->cell_ctxt), GFP_KERNEL);
if (!chain->cell_ctxt)
return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index 5a6bbee..dff979f 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -796,11 +796,10 @@
struct hinic_cmdq_ctxt *cmdq_ctxts;
struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
- size_t cmdq_ctxts_size;
int err;
- cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts);
- cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL);
+ cmdq_ctxts = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES,
+ sizeof(*cmdq_ctxts), GFP_KERNEL);
if (!cmdq_ctxts)
return -ENOMEM;
@@ -884,7 +883,6 @@
struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
struct pci_dev *pdev = hwif->pdev;
struct hinic_hwdev *hwdev;
- size_t saved_wqs_size;
u16 max_wqe_size;
int err;
@@ -895,8 +893,8 @@
if (!cmdqs->cmdq_buf_pool)
return -ENOMEM;
- saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
- cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL);
+ cmdqs->saved_wqs = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES,
+ sizeof(*cmdqs->saved_wqs), GFP_KERNEL);
if (!cmdqs->saved_wqs) {
err = -ENOMEM;
goto err_saved_wqs;
@@ -931,7 +929,7 @@
err_set_cmdq_depth:
hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
-
+ free_cmdq(&cmdqs->cmdq[HINIC_CMDQ_SYNC]);
err_cmdq_ctxt:
hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
HINIC_MAX_CMDQ_TYPES);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 0c74f66..bcf2476 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -162,7 +162,6 @@
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
int nr_irqs, num_aeqs, num_ceqs;
- size_t msix_entries_size;
int i, err;
num_aeqs = HINIC_HWIF_NUM_AEQS(hwif);
@@ -171,8 +170,8 @@
if (nr_irqs > HINIC_HWIF_NUM_IRQS(hwif))
nr_irqs = HINIC_HWIF_NUM_IRQS(hwif);
- msix_entries_size = nr_irqs * sizeof(*hwdev->msix_entries);
- hwdev->msix_entries = devm_kzalloc(&pdev->dev, msix_entries_size,
+ hwdev->msix_entries = devm_kcalloc(&pdev->dev, nr_irqs,
+ sizeof(*hwdev->msix_entries),
GFP_KERNEL);
if (!hwdev->msix_entries)
return -ENOMEM;
@@ -893,7 +892,7 @@
if (err)
return -EINVAL;
- interrupt_info->lli_credit_cnt = temp_info.lli_timer_cnt;
+ interrupt_info->lli_credit_cnt = temp_info.lli_credit_cnt;
interrupt_info->lli_timer_cnt = temp_info.lli_timer_cnt;
err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index 19942fe..7396158 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -631,16 +631,15 @@
struct hinic_hwif *hwif = eq->hwif;
struct pci_dev *pdev = hwif->pdev;
u32 init_val, addr, val;
- size_t addr_size;
int err, pg;
- addr_size = eq->num_pages * sizeof(*eq->dma_addr);
- eq->dma_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
+ eq->dma_addr = devm_kcalloc(&pdev->dev, eq->num_pages,
+ sizeof(*eq->dma_addr), GFP_KERNEL);
if (!eq->dma_addr)
return -ENOMEM;
- addr_size = eq->num_pages * sizeof(*eq->virt_addr);
- eq->virt_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
+ eq->virt_addr = devm_kcalloc(&pdev->dev, eq->num_pages,
+ sizeof(*eq->virt_addr), GFP_KERNEL);
if (!eq->virt_addr) {
err = -ENOMEM;
goto err_virt_addr_alloc;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
index 819fa13..027dcc4 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
@@ -647,6 +647,7 @@
err = alloc_msg_buf(pf_to_mgmt);
if (err) {
dev_err(&pdev->dev, "Failed to allocate msg buffers\n");
+ destroy_workqueue(pf_to_mgmt->workq);
hinic_health_reporters_destroy(hwdev->devlink_dev);
return err;
}
@@ -654,6 +655,7 @@
err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif);
if (err) {
dev_err(&pdev->dev, "Failed to initialize cmd chains\n");
+ destroy_workqueue(pf_to_mgmt->workq);
hinic_health_reporters_destroy(hwdev->devlink_dev);
return err;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 5dc3743..f930cd6 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -192,20 +192,20 @@
{
struct hinic_hwif *hwif = wqs->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t size;
- size = wqs->num_pages * sizeof(*wqs->page_paddr);
- wqs->page_paddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wqs->page_paddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
+ sizeof(*wqs->page_paddr), GFP_KERNEL);
if (!wqs->page_paddr)
return -ENOMEM;
- size = wqs->num_pages * sizeof(*wqs->page_vaddr);
- wqs->page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wqs->page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
+ sizeof(*wqs->page_vaddr), GFP_KERNEL);
if (!wqs->page_vaddr)
goto err_page_vaddr;
- size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr);
- wqs->shadow_page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wqs->shadow_page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
+ sizeof(*wqs->shadow_page_vaddr),
+ GFP_KERNEL);
if (!wqs->shadow_page_vaddr)
goto err_page_shadow_vaddr;
@@ -378,15 +378,14 @@
{
struct hinic_hwif *hwif = wq->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t size;
- size = wq->num_q_pages * wq->max_wqe_size;
- wq->shadow_wqe = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wq->shadow_wqe = devm_kcalloc(&pdev->dev, wq->num_q_pages,
+ wq->max_wqe_size, GFP_KERNEL);
if (!wq->shadow_wqe)
return -ENOMEM;
- size = wq->num_q_pages * sizeof(wq->prod_idx);
- wq->shadow_idx = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wq->shadow_idx = devm_kcalloc(&pdev->dev, wq->num_q_pages,
+ sizeof(*wq->shadow_idx), GFP_KERNEL);
if (!wq->shadow_idx)
goto err_shadow_idx;
@@ -771,7 +770,7 @@
/* If we only have one page, still need to get shadown wqe when
* wqe rolling-over page
*/
- if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) {
+ if (curr_pg != end_pg || end_prod_idx < *prod_idx) {
void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx);
@@ -841,7 +840,10 @@
*cons_idx = curr_cons_idx;
- if (curr_pg != end_pg) {
+ /* If we only have one page, still need to get shadown wqe when
+ * wqe rolling-over page
+ */
+ if (curr_pg != end_pg || end_cons_idx < curr_cons_idx) {
void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 350225b..6ec042d 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -62,8 +62,6 @@
#define HINIC_LRO_RX_TIMER_DEFAULT 16
-#define VLAN_BITMAP_SIZE(nic_dev) (ALIGN(VLAN_N_VID, 8) / 8)
-
#define work_to_rx_mode_work(work) \
container_of(work, struct hinic_rx_mode_work, work)
@@ -82,56 +80,44 @@
netdev_features_t pre_features,
netdev_features_t features, bool force_change);
-static void update_rx_stats(struct hinic_dev *nic_dev, struct hinic_rxq *rxq)
+static void gather_rx_stats(struct hinic_rxq_stats *nic_rx_stats, struct hinic_rxq *rxq)
{
- struct hinic_rxq_stats *nic_rx_stats = &nic_dev->rx_stats;
struct hinic_rxq_stats rx_stats;
- u64_stats_init(&rx_stats.syncp);
-
hinic_rxq_get_stats(rxq, &rx_stats);
- u64_stats_update_begin(&nic_rx_stats->syncp);
nic_rx_stats->bytes += rx_stats.bytes;
nic_rx_stats->pkts += rx_stats.pkts;
nic_rx_stats->errors += rx_stats.errors;
nic_rx_stats->csum_errors += rx_stats.csum_errors;
nic_rx_stats->other_errors += rx_stats.other_errors;
- u64_stats_update_end(&nic_rx_stats->syncp);
-
- hinic_rxq_clean_stats(rxq);
}
-static void update_tx_stats(struct hinic_dev *nic_dev, struct hinic_txq *txq)
+static void gather_tx_stats(struct hinic_txq_stats *nic_tx_stats, struct hinic_txq *txq)
{
- struct hinic_txq_stats *nic_tx_stats = &nic_dev->tx_stats;
struct hinic_txq_stats tx_stats;
- u64_stats_init(&tx_stats.syncp);
-
hinic_txq_get_stats(txq, &tx_stats);
- u64_stats_update_begin(&nic_tx_stats->syncp);
nic_tx_stats->bytes += tx_stats.bytes;
nic_tx_stats->pkts += tx_stats.pkts;
nic_tx_stats->tx_busy += tx_stats.tx_busy;
nic_tx_stats->tx_wake += tx_stats.tx_wake;
nic_tx_stats->tx_dropped += tx_stats.tx_dropped;
nic_tx_stats->big_frags_pkts += tx_stats.big_frags_pkts;
- u64_stats_update_end(&nic_tx_stats->syncp);
-
- hinic_txq_clean_stats(txq);
}
-static void update_nic_stats(struct hinic_dev *nic_dev)
+static void gather_nic_stats(struct hinic_dev *nic_dev,
+ struct hinic_rxq_stats *nic_rx_stats,
+ struct hinic_txq_stats *nic_tx_stats)
{
int i, num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
for (i = 0; i < num_qps; i++)
- update_rx_stats(nic_dev, &nic_dev->rxqs[i]);
+ gather_rx_stats(nic_rx_stats, &nic_dev->rxqs[i]);
for (i = 0; i < num_qps; i++)
- update_tx_stats(nic_dev, &nic_dev->txqs[i]);
+ gather_tx_stats(nic_tx_stats, &nic_dev->txqs[i]);
}
/**
@@ -144,13 +130,12 @@
{
int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
struct net_device *netdev = nic_dev->netdev;
- size_t txq_size;
if (nic_dev->txqs)
return -EINVAL;
- txq_size = num_txqs * sizeof(*nic_dev->txqs);
- nic_dev->txqs = devm_kzalloc(&netdev->dev, txq_size, GFP_KERNEL);
+ nic_dev->txqs = devm_kcalloc(&netdev->dev, num_txqs,
+ sizeof(*nic_dev->txqs), GFP_KERNEL);
if (!nic_dev->txqs)
return -ENOMEM;
@@ -242,13 +227,12 @@
{
int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev);
struct net_device *netdev = nic_dev->netdev;
- size_t rxq_size;
if (nic_dev->rxqs)
return -EINVAL;
- rxq_size = num_rxqs * sizeof(*nic_dev->rxqs);
- nic_dev->rxqs = devm_kzalloc(&netdev->dev, rxq_size, GFP_KERNEL);
+ nic_dev->rxqs = devm_kcalloc(&netdev->dev, num_rxqs,
+ sizeof(*nic_dev->rxqs), GFP_KERNEL);
if (!nic_dev->rxqs)
return -ENOMEM;
@@ -569,8 +553,6 @@
netif_carrier_off(netdev);
netif_tx_disable(netdev);
- update_nic_stats(nic_dev);
-
up(&nic_dev->mgmt_lock);
if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
@@ -864,26 +846,19 @@
struct rtnl_link_stats64 *stats)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_rxq_stats *nic_rx_stats;
- struct hinic_txq_stats *nic_tx_stats;
-
- nic_rx_stats = &nic_dev->rx_stats;
- nic_tx_stats = &nic_dev->tx_stats;
-
- down(&nic_dev->mgmt_lock);
+ struct hinic_rxq_stats nic_rx_stats = {};
+ struct hinic_txq_stats nic_tx_stats = {};
if (nic_dev->flags & HINIC_INTF_UP)
- update_nic_stats(nic_dev);
+ gather_nic_stats(nic_dev, &nic_rx_stats, &nic_tx_stats);
- up(&nic_dev->mgmt_lock);
+ stats->rx_bytes = nic_rx_stats.bytes;
+ stats->rx_packets = nic_rx_stats.pkts;
+ stats->rx_errors = nic_rx_stats.errors;
- stats->rx_bytes = nic_rx_stats->bytes;
- stats->rx_packets = nic_rx_stats->pkts;
- stats->rx_errors = nic_rx_stats->errors;
-
- stats->tx_bytes = nic_tx_stats->bytes;
- stats->tx_packets = nic_tx_stats->pkts;
- stats->tx_errors = nic_tx_stats->tx_dropped;
+ stats->tx_bytes = nic_tx_stats.bytes;
+ stats->tx_packets = nic_tx_stats.pkts;
+ stats->tx_errors = nic_tx_stats.tx_dropped;
}
static int hinic_set_features(struct net_device *netdev,
@@ -1182,8 +1157,6 @@
static int nic_dev_init(struct pci_dev *pdev)
{
struct hinic_rx_mode_work *rx_mode_work;
- struct hinic_txq_stats *tx_stats;
- struct hinic_rxq_stats *rx_stats;
struct hinic_dev *nic_dev;
struct net_device *netdev;
struct hinic_hwdev *hwdev;
@@ -1244,15 +1217,8 @@
sema_init(&nic_dev->mgmt_lock, 1);
- tx_stats = &nic_dev->tx_stats;
- rx_stats = &nic_dev->rx_stats;
-
- u64_stats_init(&tx_stats->syncp);
- u64_stats_init(&rx_stats->syncp);
-
- nic_dev->vlan_bitmap = devm_kzalloc(&pdev->dev,
- VLAN_BITMAP_SIZE(nic_dev),
- GFP_KERNEL);
+ nic_dev->vlan_bitmap = devm_bitmap_zalloc(&pdev->dev, VLAN_N_VID,
+ GFP_KERNEL);
if (!nic_dev->vlan_bitmap) {
err = -ENOMEM;
goto err_vlan_bitmap;
@@ -1536,8 +1502,15 @@
static int __init hinic_module_init(void)
{
+ int ret;
+
hinic_dbg_register_debugfs(HINIC_DRV_NAME);
- return pci_register_driver(&hinic_driver);
+
+ ret = pci_register_driver(&hinic_driver);
+ if (ret)
+ hinic_dbg_unregister_debugfs();
+
+ return ret;
}
static void __exit hinic_module_exit(void)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 070a7cc..30ab052 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -73,17 +73,15 @@
struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
unsigned int start;
- u64_stats_update_begin(&stats->syncp);
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ start = u64_stats_fetch_begin_irq(&rxq_stats->syncp);
stats->pkts = rxq_stats->pkts;
stats->bytes = rxq_stats->bytes;
stats->errors = rxq_stats->csum_errors +
rxq_stats->other_errors;
stats->csum_errors = rxq_stats->csum_errors;
stats->other_errors = rxq_stats->other_errors;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
- u64_stats_update_end(&stats->syncp);
+ } while (u64_stats_fetch_retry_irq(&rxq_stats->syncp, start));
}
/**
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
index f8a2645..4d82ebf 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -1178,7 +1178,6 @@
dev_err(&hwdev->hwif->pdev->dev,
"Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n",
err, register_info.status, out_size);
- hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
return -EIO;
}
} else {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 8da7d46..c12d814 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -97,17 +97,15 @@
struct hinic_txq_stats *txq_stats = &txq->txq_stats;
unsigned int start;
- u64_stats_update_begin(&stats->syncp);
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
+ start = u64_stats_fetch_begin_irq(&txq_stats->syncp);
stats->pkts = txq_stats->pkts;
stats->bytes = txq_stats->bytes;
stats->tx_busy = txq_stats->tx_busy;
stats->tx_wake = txq_stats->tx_wake;
stats->tx_dropped = txq_stats->tx_dropped;
stats->big_frags_pkts = txq_stats->big_frags_pkts;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
- u64_stats_update_end(&stats->syncp);
+ } while (u64_stats_fetch_retry_irq(&txq_stats->syncp, start));
}
/**
@@ -861,7 +859,6 @@
struct hinic_dev *nic_dev = netdev_priv(netdev);
struct hinic_hwdev *hwdev = nic_dev->hwdev;
int err, irqname_len;
- size_t sges_size;
txq->netdev = netdev;
txq->sq = sq;
@@ -870,13 +867,13 @@
txq->max_sges = HINIC_MAX_SQ_BUFDESCS;
- sges_size = txq->max_sges * sizeof(*txq->sges);
- txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
+ txq->sges = devm_kcalloc(&netdev->dev, txq->max_sges,
+ sizeof(*txq->sges), GFP_KERNEL);
if (!txq->sges)
return -ENOMEM;
- sges_size = txq->max_sges * sizeof(*txq->free_sges);
- txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
+ txq->free_sges = devm_kcalloc(&netdev->dev, txq->max_sges,
+ sizeof(*txq->free_sges), GFP_KERNEL);
if (!txq->free_sges) {
err = -ENOMEM;
goto err_alloc_free_sges;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index f630667..28a5f8d 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -2897,6 +2897,7 @@
ret = of_device_register(&port->ofdev);
if (ret) {
pr_err("failed to register device. ret=%d\n", ret);
+ put_device(&port->ofdev.dev);
goto out;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 95bee3d..7fe2e47 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -117,7 +117,7 @@
#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
offsetof(struct ibmvnic_statistics, stat))
-#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
+#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
static const struct ibmvnic_stat ibmvnic_stats[] = {
{"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
@@ -2063,14 +2063,14 @@
rc = reset_tx_pools(adapter);
if (rc) {
netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
- rc);
+ rc);
goto out;
}
rc = reset_rx_pools(adapter);
if (rc) {
netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
- rc);
+ rc);
goto out;
}
}
@@ -2331,7 +2331,8 @@
if (adapter->state == VNIC_PROBING) {
netdev_warn(netdev, "Adapter reset during probe\n");
- ret = adapter->init_done_rc = EAGAIN;
+ adapter->init_done_rc = EAGAIN;
+ ret = EAGAIN;
goto err;
}
@@ -2665,13 +2666,8 @@
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
- ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
- ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
- } else {
- ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
- ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
- }
+ ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
+ ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
@@ -2684,23 +2680,21 @@
struct ethtool_ringparam *ring)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- int ret;
- ret = 0;
+ if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
+ ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
+ netdev_err(netdev, "Invalid request.\n");
+ netdev_err(netdev, "Max tx buffers = %llu\n",
+ adapter->max_rx_add_entries_per_subcrq);
+ netdev_err(netdev, "Max rx buffers = %llu\n",
+ adapter->max_tx_entries_per_subcrq);
+ return -EINVAL;
+ }
+
adapter->desired.rx_entries = ring->rx_pending;
adapter->desired.tx_entries = ring->tx_pending;
- ret = wait_for_reset(adapter);
-
- if (!ret &&
- (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
- adapter->req_tx_entries_per_subcrq != ring->tx_pending))
- netdev_info(netdev,
- "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
- ring->rx_pending, ring->tx_pending,
- adapter->req_rx_add_entries_per_subcrq,
- adapter->req_tx_entries_per_subcrq);
- return ret;
+ return wait_for_reset(adapter);
}
static void ibmvnic_get_channels(struct net_device *netdev,
@@ -2708,14 +2702,8 @@
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
- channels->max_rx = adapter->max_rx_queues;
- channels->max_tx = adapter->max_tx_queues;
- } else {
- channels->max_rx = IBMVNIC_MAX_QUEUES;
- channels->max_tx = IBMVNIC_MAX_QUEUES;
- }
-
+ channels->max_rx = adapter->max_rx_queues;
+ channels->max_tx = adapter->max_tx_queues;
channels->max_other = 0;
channels->max_combined = 0;
channels->rx_count = adapter->req_rx_queues;
@@ -2728,23 +2716,11 @@
struct ethtool_channels *channels)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- int ret;
- ret = 0;
adapter->desired.rx_queues = channels->rx_count;
adapter->desired.tx_queues = channels->tx_count;
- ret = wait_for_reset(adapter);
-
- if (!ret &&
- (adapter->req_rx_queues != channels->rx_count ||
- adapter->req_tx_queues != channels->tx_count))
- netdev_info(netdev,
- "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
- channels->rx_count, channels->tx_count,
- adapter->req_rx_queues, adapter->req_tx_queues);
- return ret;
-
+ return wait_for_reset(adapter);
}
static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -2752,43 +2728,32 @@
struct ibmvnic_adapter *adapter = netdev_priv(dev);
int i;
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
- i++, data += ETH_GSTRING_LEN)
- memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
-
- for (i = 0; i < adapter->req_tx_queues; i++) {
- snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
- data += ETH_GSTRING_LEN;
-
- snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
- data += ETH_GSTRING_LEN;
-
- snprintf(data, ETH_GSTRING_LEN,
- "tx%d_dropped_packets", i);
- data += ETH_GSTRING_LEN;
- }
-
- for (i = 0; i < adapter->req_rx_queues; i++) {
- snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
- data += ETH_GSTRING_LEN;
-
- snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
- data += ETH_GSTRING_LEN;
-
- snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
- data += ETH_GSTRING_LEN;
- }
- break;
-
- case ETH_SS_PRIV_FLAGS:
- for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
- strcpy(data + i * ETH_GSTRING_LEN,
- ibmvnic_priv_flags[i]);
- break;
- default:
+ if (stringset != ETH_SS_STATS)
return;
+
+ for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
+ memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
+
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
+ data += ETH_GSTRING_LEN;
+
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
+ data += ETH_GSTRING_LEN;
+
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
+ data += ETH_GSTRING_LEN;
+
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
+ data += ETH_GSTRING_LEN;
+
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
+ data += ETH_GSTRING_LEN;
}
}
@@ -2801,8 +2766,6 @@
return ARRAY_SIZE(ibmvnic_stats) +
adapter->req_tx_queues * NUM_TX_STATS +
adapter->req_rx_queues * NUM_RX_STATS;
- case ETH_SS_PRIV_FLAGS:
- return ARRAY_SIZE(ibmvnic_priv_flags);
default:
return -EOPNOTSUPP;
}
@@ -2833,8 +2796,8 @@
return;
for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
- data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
- ibmvnic_stats[i].offset));
+ data[i] = be64_to_cpu(IBMVNIC_GET_STAT
+ (adapter, ibmvnic_stats[i].offset));
for (j = 0; j < adapter->req_tx_queues; j++) {
data[i] = adapter->tx_stats_buffers[j].packets;
@@ -2855,25 +2818,6 @@
}
}
-static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
-{
- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
-
- return adapter->priv_flags;
-}
-
-static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
-{
- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
-
- if (which_maxes)
- adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
- else
- adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
-
- return 0;
-}
static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_drvinfo = ibmvnic_get_drvinfo,
.get_msglevel = ibmvnic_get_msglevel,
@@ -2887,8 +2831,6 @@
.get_sset_count = ibmvnic_get_sset_count,
.get_ethtool_stats = ibmvnic_get_ethtool_stats,
.get_link_ksettings = ibmvnic_get_link_ksettings,
- .get_priv_flags = ibmvnic_get_priv_flags,
- .set_priv_flags = ibmvnic_set_priv_flags,
};
/* Routines for managing CRQs/sCRQs */
@@ -3119,7 +3061,7 @@
/* H_EOI would fail with rc = H_FUNCTION when running
* in XIVE mode which is expected, but not an error.
*/
- if (rc && (rc != H_FUNCTION))
+ if (rc && rc != H_FUNCTION)
dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
val, rc);
}
@@ -5286,6 +5228,15 @@
release_sub_crqs(adapter, 0);
rc = init_sub_crqs(adapter);
} else {
+ /* no need to reinitialize completely, but we do
+ * need to clean up transmits that were in flight
+ * when we processed the reset. Failure to do so
+ * will confound the upper layer, usually TCP, by
+ * creating the illusion of transmits that are
+ * awaiting completion.
+ */
+ clean_tx_pools(adapter);
+
rc = reset_sub_crq_queues(adapter);
}
} else {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index b272110..2eb9a4d 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -41,11 +41,6 @@
#define IBMVNIC_RESET_DELAY 100
-static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
-#define IBMVNIC_USE_SERVER_MAXES 0x1
- "use-server-maxes"
-};
-
struct ibmvnic_login_buffer {
__be32 len;
__be32 version;
@@ -974,7 +969,6 @@
struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
dma_addr_t ip_offload_ctrl_tok;
u32 msg_enable;
- u32 priv_flags;
/* Vital Product Data (VPD) */
struct ibmvnic_vpd *vpd;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 15b1503..1f51252 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1006,8 +1006,8 @@
{
u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
- u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
- u16 lat_enc_d = 0; /* latency decoded */
+ u32 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
+ u32 lat_enc_d = 0; /* latency decoded */
u16 lat_enc = 0; /* latency encoded */
if (link) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index effdc33..dd630b6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -37,6 +37,7 @@
#include <net/tc_act/tc_mirred.h>
#include <net/udp_tunnel.h>
#include <net/xdp_sock.h>
+#include <linux/bitfield.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
#include <linux/net/intel/i40e_client.h>
@@ -991,6 +992,21 @@
(u32)(val & 0xFFFFFFFFULL));
}
+/**
+ * i40e_get_pf_count - get PCI PF count.
+ * @hw: pointer to a hw.
+ *
+ * Reports the function number of the highest PCI physical
+ * function plus 1 as it is loaded from the NVM.
+ *
+ * Return: PCI PF count.
+ **/
+static inline u32 i40e_get_pf_count(struct i40e_hw *hw)
+{
+ return FIELD_GET(I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK,
+ rd32(hw, I40E_GLGEN_PCIFCNCNT));
+}
+
/* needed by i40e_ethtool.c */
int i40e_up(struct i40e_vsi *vsi);
void i40e_down(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 32f3fac..b3cb5d1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -178,6 +178,10 @@
"Cannot locate client instance close routine\n");
return;
}
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n");
+ return;
+ }
cdev->client->ops->close(&cdev->lan_info, cdev->client, reset);
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
i40e_client_release_qvlist(&cdev->lan_info);
@@ -374,7 +378,6 @@
/* Remove failed client instance */
clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
&cdev->state);
- i40e_client_del_instance(pf);
return;
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index a2bdb29..144c482 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2083,9 +2083,6 @@
err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err)
goto rx_unwind;
- err = i40e_alloc_rx_bi(&rx_rings[i]);
- if (err)
- goto rx_unwind;
/* now allocate the Rx buffers to make sure the OS
* has enough memory, any failure here means abort
@@ -2582,15 +2579,16 @@
set_bit(__I40E_TESTING, pf->state);
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
+ dev_warn(&pf->pdev->dev,
+ "Cannot start offline testing when PF is in reset state.\n");
+ goto skip_ol_tests;
+ }
+
if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
dev_warn(&pf->pdev->dev,
"Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
- data[I40E_ETH_TEST_REG] = 1;
- data[I40E_ETH_TEST_EEPROM] = 1;
- data[I40E_ETH_TEST_INTR] = 1;
- data[I40E_ETH_TEST_LINK] = 1;
- eth_test->flags |= ETH_TEST_FL_FAILED;
- clear_bit(__I40E_TESTING, pf->state);
goto skip_ol_tests;
}
@@ -2637,9 +2635,17 @@
data[I40E_ETH_TEST_INTR] = 0;
}
-skip_ol_tests:
-
netif_info(pf, drv, netdev, "testing finished\n");
+ return;
+
+skip_ol_tests:
+ data[I40E_ETH_TEST_REG] = 1;
+ data[I40E_ETH_TEST_EEPROM] = 1;
+ data[I40E_ETH_TEST_INTR] = 1;
+ data[I40E_ETH_TEST_LINK] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ clear_bit(__I40E_TESTING, pf->state);
+ netif_info(pf, drv, netdev, "testing failed\n");
}
static void i40e_get_wol(struct net_device *netdev,
@@ -3077,10 +3083,17 @@
if (cmd->flow_type == TCP_V4_FLOW ||
cmd->flow_type == UDP_V4_FLOW) {
- if (i_set & I40E_L3_SRC_MASK)
- cmd->data |= RXH_IP_SRC;
- if (i_set & I40E_L3_DST_MASK)
- cmd->data |= RXH_IP_DST;
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (i_set & I40E_X722_L3_SRC_MASK)
+ cmd->data |= RXH_IP_SRC;
+ if (i_set & I40E_X722_L3_DST_MASK)
+ cmd->data |= RXH_IP_DST;
+ } else {
+ if (i_set & I40E_L3_SRC_MASK)
+ cmd->data |= RXH_IP_SRC;
+ if (i_set & I40E_L3_DST_MASK)
+ cmd->data |= RXH_IP_DST;
+ }
} else if (cmd->flow_type == TCP_V6_FLOW ||
cmd->flow_type == UDP_V6_FLOW) {
if (i_set & I40E_L3_V6_SRC_MASK)
@@ -3387,12 +3400,15 @@
/**
* i40e_get_rss_hash_bits - Read RSS Hash bits from register
+ * @hw: hw structure
* @nfc: pointer to user request
* @i_setc: bits currently set
*
* Returns value of bits to be set per user request
**/
-static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
+static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw,
+ struct ethtool_rxnfc *nfc,
+ u64 i_setc)
{
u64 i_set = i_setc;
u64 src_l3 = 0, dst_l3 = 0;
@@ -3411,8 +3427,13 @@
dst_l3 = I40E_L3_V6_DST_MASK;
} else if (nfc->flow_type == TCP_V4_FLOW ||
nfc->flow_type == UDP_V4_FLOW) {
- src_l3 = I40E_L3_SRC_MASK;
- dst_l3 = I40E_L3_DST_MASK;
+ if (hw->mac.type == I40E_MAC_X722) {
+ src_l3 = I40E_X722_L3_SRC_MASK;
+ dst_l3 = I40E_X722_L3_DST_MASK;
+ } else {
+ src_l3 = I40E_L3_SRC_MASK;
+ dst_l3 = I40E_L3_DST_MASK;
+ }
} else {
/* Any other flow type are not supported here */
return i_set;
@@ -3430,6 +3451,7 @@
return i_set;
}
+#define FLOW_PCTYPES_SIZE 64
/**
* i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
* @pf: pointer to the physical function struct
@@ -3442,9 +3464,11 @@
struct i40e_hw *hw = &pf->hw;
u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
- u8 flow_pctype = 0;
+ DECLARE_BITMAP(flow_pctypes, FLOW_PCTYPES_SIZE);
u64 i_set, i_setc;
+ bitmap_zero(flow_pctypes, FLOW_PCTYPES_SIZE);
+
if (pf->flags & I40E_FLAG_MFP_ENABLED) {
dev_err(&pf->pdev->dev,
"Change of RSS hash input set is not supported when MFP mode is enabled\n");
@@ -3460,36 +3484,35 @@
switch (nfc->flow_type) {
case TCP_V4_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
+ flow_pctypes);
break;
case TCP_V6_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
+ flow_pctypes);
break;
case UDP_V4_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
-
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
+ if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
+ flow_pctypes);
+ set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
+ flow_pctypes);
+ }
hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
break;
case UDP_V6_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
-
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
+ if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
+ flow_pctypes);
+ set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
+ flow_pctypes);
+ }
hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
break;
case AH_ESP_V4_FLOW:
@@ -3522,17 +3545,20 @@
return -EINVAL;
}
- if (flow_pctype) {
- i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0,
- flow_pctype)) |
- ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1,
- flow_pctype)) << 32);
- i_set = i40e_get_rss_hash_bits(nfc, i_setc);
- i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype),
- (u32)i_set);
- i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype),
- (u32)(i_set >> 32));
- hena |= BIT_ULL(flow_pctype);
+ if (bitmap_weight(flow_pctypes, FLOW_PCTYPES_SIZE)) {
+ u8 flow_id;
+
+ for_each_set_bit(flow_id, flow_pctypes, FLOW_PCTYPES_SIZE) {
+ i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id)) |
+ ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id)) << 32);
+ i_set = i40e_get_rss_hash_bits(&pf->hw, nfc, i_setc);
+
+ i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id),
+ (u32)i_set);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id),
+ (u32)(i_set >> 32));
+ hena |= BIT_ULL(flow_id);
+ }
}
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index bd18a78..ea6a984 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -382,7 +382,9 @@
set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
break;
default:
- netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+ netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
+ set_bit(__I40E_DOWN_REQUESTED, pf->state);
+ set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state);
break;
}
@@ -549,6 +551,47 @@
}
/**
+ * i40e_compute_pci_to_hw_id - compute index form PCI function.
+ * @vsi: ptr to the VSI to read from.
+ * @hw: ptr to the hardware info.
+ **/
+static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw)
+{
+ int pf_count = i40e_get_pf_count(hw);
+
+ if (vsi->type == I40E_VSI_SRIOV)
+ return (hw->port * BIT(7)) / pf_count + vsi->vf_id;
+
+ return hw->port + BIT(7);
+}
+
+/**
+ * i40e_stat_update64 - read and update a 64 bit stat from the chip.
+ * @hw: ptr to the hardware info.
+ * @hireg: the high 32 bit reg to read.
+ * @loreg: the low 32 bit reg to read.
+ * @offset_loaded: has the initial offset been loaded yet.
+ * @offset: ptr to current offset value.
+ * @stat: ptr to the stat.
+ *
+ * Since the device stats are not reset at PFReset, they will not
+ * be zeroed when the driver starts. We'll save the first values read
+ * and use them as offsets to be subtracted from the raw values in order
+ * to report stats that count from zero.
+ **/
+static void i40e_stat_update64(struct i40e_hw *hw, u32 hireg, u32 loreg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ u64 new_data;
+
+ new_data = rd64(hw, loreg);
+
+ if (!offset_loaded || new_data < *offset)
+ *offset = new_data;
+ *stat = new_data - *offset;
+}
+
+/**
* i40e_stat_update48 - read and update a 48 bit stat from the chip
* @hw: ptr to the hardware info
* @hireg: the high 32 bit reg to read
@@ -620,6 +663,34 @@
}
/**
+ * i40e_stats_update_rx_discards - update rx_discards.
+ * @vsi: ptr to the VSI to be updated.
+ * @hw: ptr to the hardware info.
+ * @stat_idx: VSI's stat_counter_idx.
+ * @offset_loaded: ptr to the VSI's stat_offsets_loaded.
+ * @stat_offset: ptr to stat_offset to store first read of specific register.
+ * @stat: ptr to VSI's stat to be updated.
+ **/
+static void
+i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw,
+ int stat_idx, bool offset_loaded,
+ struct i40e_eth_stats *stat_offset,
+ struct i40e_eth_stats *stat)
+{
+ u64 rx_rdpc, rx_rxerr;
+
+ i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded,
+ &stat_offset->rx_discards, &rx_rdpc);
+ i40e_stat_update64(hw,
+ I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)),
+ I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)),
+ offset_loaded, &stat_offset->rx_discards_other,
+ &rx_rxerr);
+
+ stat->rx_discards = rx_rdpc + rx_rxerr;
+}
+
+/**
* i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
* @vsi: the VSI to be updated
**/
@@ -678,6 +749,10 @@
I40E_GLV_BPTCL(stat_idx),
vsi->stat_offsets_loaded,
&oes->tx_broadcast, &es->tx_broadcast);
+
+ i40e_stats_update_rx_discards(vsi, hw, stat_idx,
+ vsi->stat_offsets_loaded, oes, es);
+
vsi->stat_offsets_loaded = true;
}
@@ -1834,11 +1909,15 @@
* non-zero req_queue_pairs says that user requested a new
* queue count via ethtool's set_channels, so use this
* value for queues distribution across traffic classes
+ * We need at least one queue pair for the interface
+ * to be usable as we see in else statement.
*/
if (vsi->req_queue_pairs > 0)
vsi->num_queue_pairs = vsi->req_queue_pairs;
else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
vsi->num_queue_pairs = pf->num_lan_msix;
+ else
+ vsi->num_queue_pairs = 1;
}
/* Number of queues per enabled TC */
@@ -3330,12 +3409,8 @@
if (ring->vsi->type == I40E_VSI_MAIN)
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
- kfree(ring->rx_bi);
ring->xsk_pool = i40e_xsk_pool(ring);
if (ring->xsk_pool) {
- ret = i40e_alloc_rx_bi_zc(ring);
- if (ret)
- return ret;
ring->rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
/* For AF_XDP ZC, we disallow packets to span on
@@ -3353,9 +3428,6 @@
ring->queue_index);
} else {
- ret = i40e_alloc_rx_bi(ring);
- if (ret)
- return ret;
ring->rx_buf_len = vsi->rx_buf_len;
if (ring->vsi->type == I40E_VSI_MAIN) {
ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
@@ -5655,6 +5727,26 @@
}
/**
+ * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
+ * @vsi: Pointer to vsi structure
+ * @max_tx_rate: max TX rate in bytes to be converted into Mbits
+ *
+ * Helper function to convert units before send to set BW limit
+ **/
+static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
+{
+ if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
+ dev_warn(&vsi->back->pdev->dev,
+ "Setting max tx rate to minimum usable value of 50Mbps.\n");
+ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
+ } else {
+ do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
+ }
+
+ return max_tx_rate;
+}
+
+/**
* i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
* @vsi: VSI to be configured
* @seid: seid of the channel/VSI
@@ -5676,10 +5768,10 @@
max_tx_rate, seid);
return -EINVAL;
}
- if (max_tx_rate && max_tx_rate < 50) {
+ if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
dev_warn(&pf->pdev->dev,
"Setting max tx rate to minimum usable value of 50Mbps.\n");
- max_tx_rate = 50;
+ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
}
/* Tx rate credits are in values of 50Mbps, 0 is disabled */
@@ -7175,42 +7267,43 @@
static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
struct i40e_fwd_adapter *fwd)
{
+ struct i40e_channel *ch = NULL, *ch_tmp, *iter;
int ret = 0, num_tc = 1, i, aq_err;
- struct i40e_channel *ch, *ch_tmp;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- if (list_empty(&vsi->macvlan_list))
- return -EINVAL;
-
/* Go through the list and find an available channel */
- list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
- if (!i40e_is_channel_macvlan(ch)) {
- ch->fwd = fwd;
+ list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
+ if (!i40e_is_channel_macvlan(iter)) {
+ iter->fwd = fwd;
/* record configuration for macvlan interface in vdev */
for (i = 0; i < num_tc; i++)
netdev_bind_sb_channel_queue(vsi->netdev, vdev,
i,
- ch->num_queue_pairs,
- ch->base_queue);
- for (i = 0; i < ch->num_queue_pairs; i++) {
+ iter->num_queue_pairs,
+ iter->base_queue);
+ for (i = 0; i < iter->num_queue_pairs; i++) {
struct i40e_ring *tx_ring, *rx_ring;
u16 pf_q;
- pf_q = ch->base_queue + i;
+ pf_q = iter->base_queue + i;
/* Get to TX ring ptr */
tx_ring = vsi->tx_rings[pf_q];
- tx_ring->ch = ch;
+ tx_ring->ch = iter;
/* Get the RX ring ptr */
rx_ring = vsi->rx_rings[pf_q];
- rx_ring->ch = ch;
+ rx_ring->ch = iter;
}
+ ch = iter;
break;
}
}
+ if (!ch)
+ return -EINVAL;
+
/* Guarantee all rings are updated before we update the
* MAC address filter.
*/
@@ -7639,9 +7732,9 @@
if (pf->flags & I40E_FLAG_TC_MQPRIO) {
if (vsi->mqprio_qopt.max_rate[0]) {
- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
+ vsi->mqprio_qopt.max_rate[0]);
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (!ret) {
u64 credits = max_tx_rate;
@@ -8162,6 +8255,11 @@
return -EOPNOTSUPP;
}
+ if (!tc) {
+ dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
+ return -EINVAL;
+ }
+
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
return -EBUSY;
@@ -10108,7 +10206,7 @@
**/
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{
- int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
+ const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
@@ -10116,13 +10214,11 @@
int v;
if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
- i40e_check_recovery_mode(pf)) {
+ is_recovery_mode_reported)
i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
- }
if (test_bit(__I40E_DOWN, pf->state) &&
- !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
- !old_recovery_mode_bit)
+ !test_bit(__I40E_RECOVERY_MODE, pf->state))
goto clear_recovery;
dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
@@ -10149,13 +10245,12 @@
* accordingly with regard to resources initialization
* and deinitialization
*/
- if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
- old_recovery_mode_bit) {
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
if (i40e_get_capabilities(pf,
i40e_aqc_opc_list_func_capabilities))
goto end_unlock;
- if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+ if (is_recovery_mode_reported) {
/* we're staying in recovery mode so we'll reinitialize
* misc vector here
*/
@@ -10284,10 +10379,10 @@
}
if (vsi->mqprio_qopt.max_rate[0]) {
- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
+ vsi->mqprio_qopt.max_rate[0]);
u64 credits = 0;
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (ret)
goto end_unlock;
@@ -12582,6 +12677,14 @@
i40e_reset_and_rebuild(pf, true, true);
}
+ if (!i40e_enabled_xdp_vsi(vsi) && prog) {
+ if (i40e_realloc_rx_bi_zc(vsi, true))
+ return -ENOMEM;
+ } else if (i40e_enabled_xdp_vsi(vsi) && !prog) {
+ if (i40e_realloc_rx_bi_zc(vsi, false))
+ return -ENOMEM;
+ }
+
for (i = 0; i < vsi->num_queue_pairs; i++)
WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
@@ -12814,6 +12917,7 @@
i40e_queue_pair_disable_irq(vsi, queue_pair);
err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
+ i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
i40e_queue_pair_clean_rings(vsi, queue_pair);
i40e_queue_pair_reset_stats(vsi, queue_pair);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 8335f15..117fd96 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -77,6 +77,11 @@
#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
@@ -461,6 +466,14 @@
#define I40E_VFQF_HKEY1_MAX_INDEX 12
#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_GL_RXERR1H(_i) (0x00318004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1H_MAX_INDEX 143
+#define I40E_GL_RXERR1H_RXERR1H_SHIFT 0
+#define I40E_GL_RXERR1H_RXERR1H_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1H_RXERR1H_SHIFT)
+#define I40E_GL_RXERR1L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1L_MAX_INDEX 143
+#define I40E_GL_RXERR1L_RXERR1L_SHIFT 0
+#define I40E_GL_RXERR1L_RXERR1L_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1L_RXERR1L_SHIFT)
#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 5ad2812..43be33d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1305,14 +1305,6 @@
return -ENOMEM;
}
-int i40e_alloc_rx_bi(struct i40e_ring *rx_ring)
-{
- unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count;
-
- rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL);
- return rx_ring->rx_bi ? 0 : -ENOMEM;
-}
-
static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
{
memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
@@ -1443,6 +1435,11 @@
rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
+ rx_ring->rx_bi =
+ kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL);
+ if (!rx_ring->rx_bi)
+ return -ENOMEM;
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 93ac201..af843e8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -465,7 +465,6 @@
bool __i40e_chk_linearize(struct sk_buff *skb);
int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
-int i40e_alloc_rx_bi(struct i40e_ring *rx_ring);
/**
* i40e_get_head - Retrieve head from head writeback
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index add67f7..0872448 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1172,6 +1172,7 @@
u64 tx_broadcast; /* bptc */
u64 tx_discards; /* tdpc */
u64 tx_errors; /* tepc */
+ u64 rx_discards_other; /* rxerr1 */
};
/* Statistics collected per VEB per TC */
@@ -1403,6 +1404,10 @@
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
/* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define I40E_X722_L3_SRC_SHIFT 49
+#define I40E_X722_L3_SRC_MASK (0x3ULL << I40E_X722_L3_SRC_SHIFT)
+#define I40E_X722_L3_DST_SHIFT 41
+#define I40E_X722_L3_DST_MASK (0x3ULL << I40E_X722_L3_DST_SHIFT)
#define I40E_L3_SRC_SHIFT 47
#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT)
#define I40E_L3_V6_SRC_SHIFT 43
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 9181e00..381b28a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1483,10 +1483,12 @@
if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
return true;
- /* If the VFs have been disabled, this means something else is
- * resetting the VF, so we shouldn't continue.
- */
- if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
+ /* Bail out if VFs are disabled. */
+ if (test_bit(__I40E_VF_DISABLE, pf->state))
+ return true;
+
+ /* If VF is being reset already we don't need to continue. */
+ if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
return true;
i40e_trigger_vf_reset(vf, flr);
@@ -1523,7 +1525,7 @@
i40e_cleanup_reset_vf(vf);
i40e_flush(hw);
- clear_bit(__I40E_VF_DISABLE, pf->state);
+ clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
return true;
}
@@ -1556,8 +1558,12 @@
return false;
/* Begin reset on all VFs at once */
- for (v = 0; v < pf->num_alloc_vfs; v++)
- i40e_trigger_vf_reset(&pf->vf[v], flr);
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ vf = &pf->vf[v];
+ /* If VF is being reset no need to trigger reset again */
+ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ i40e_trigger_vf_reset(&pf->vf[v], flr);
+ }
/* HW requires some time to make sure it can flush the FIFO for a VF
* when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
@@ -1573,9 +1579,11 @@
*/
while (v < pf->num_alloc_vfs) {
vf = &pf->vf[v];
- reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
- if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
- break;
+ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
+ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+ if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
+ break;
+ }
/* If the current VF has finished resetting, move on
* to the next VF in sequence.
@@ -1603,6 +1611,10 @@
if (pf->vf[v].lan_vsi_idx == 0)
continue;
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
}
@@ -1614,6 +1626,10 @@
if (pf->vf[v].lan_vsi_idx == 0)
continue;
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
}
@@ -1623,8 +1639,13 @@
mdelay(50);
/* Finish the reset on each VF */
- for (v = 0; v < pf->num_alloc_vfs; v++)
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
i40e_cleanup_reset_vf(&pf->vf[v]);
+ }
i40e_flush(hw);
clear_bit(__I40E_VF_DISABLE, pf->state);
@@ -1986,6 +2007,25 @@
}
/**
+ * i40e_vc_get_max_frame_size
+ * @vf: pointer to the VF
+ *
+ * Max frame size is determined based on the current port's max frame size and
+ * whether a port VLAN is configured on this VF. The VF is not aware whether
+ * it's in a port VLAN so the PF needs to account for this in max frame size
+ * checks and sending the max frame size to the VF.
+ **/
+static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
+{
+ u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
+
+ if (vf->port_vlan_id)
+ max_frame_size -= VLAN_HLEN;
+
+ return max_frame_size;
+}
+
+/**
* i40e_vc_get_vf_resources_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2085,6 +2125,7 @@
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
+ vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
if (vf->lan_vsi_idx) {
vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
@@ -2228,7 +2269,7 @@
}
if (vf->adq_enabled) {
- for (i = 0; i < I40E_MAX_VF_VSI; i++)
+ for (i = 0; i < vf->num_tc; i++)
num_qps_all += vf->ch[i].num_qps;
if (num_qps_all != qci->num_queue_pairs) {
aq_ret = I40E_ERR_PARAM;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index a554d0a..358bbdb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -39,6 +39,7 @@
I40E_VF_STATE_MC_PROMISC,
I40E_VF_STATE_UC_PROMISC,
I40E_VF_STATE_PRE_ENABLE,
+ I40E_VF_STATE_RESETTING
};
/* VF capabilities */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 86c79f7..7f12261 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -9,14 +9,6 @@
#include "i40e_txrx_common.h"
#include "i40e_xsk.h"
-int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring)
-{
- unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count;
-
- rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL);
- return rx_ring->rx_bi_zc ? 0 : -ENOMEM;
-}
-
void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
{
memset(rx_ring->rx_bi_zc, 0,
@@ -29,6 +21,58 @@
}
/**
+ * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer
+ * @rx_ring: Current rx ring
+ * @pool_present: is pool for XSK present
+ *
+ * Try allocating memory and return ENOMEM, if failed to allocate.
+ * If allocation was successful, substitute buffer with allocated one.
+ * Returns 0 on success, negative on failure
+ */
+static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present)
+{
+ size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) :
+ sizeof(*rx_ring->rx_bi);
+ void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
+
+ if (!sw_ring)
+ return -ENOMEM;
+
+ if (pool_present) {
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+ rx_ring->rx_bi_zc = sw_ring;
+ } else {
+ kfree(rx_ring->rx_bi_zc);
+ rx_ring->rx_bi_zc = NULL;
+ rx_ring->rx_bi = sw_ring;
+ }
+ return 0;
+}
+
+/**
+ * i40e_realloc_rx_bi_zc - reallocate rx SW rings
+ * @vsi: Current VSI
+ * @zc: is zero copy set
+ *
+ * Reallocate buffer for rx_rings that might be used by XSK.
+ * XDP requires more memory, than rx_buf provides.
+ * Returns 0 on success, negative on failure
+ */
+int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc)
+{
+ struct i40e_ring *rx_ring;
+ unsigned long q;
+
+ for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) {
+ rx_ring = vsi->rx_rings[q];
+ if (i40e_realloc_rx_xdp_bi(rx_ring, zc))
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
* i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
* certain ring/qid
* @vsi: Current VSI
@@ -68,6 +112,10 @@
if (err)
return err;
+ err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true);
+ if (err)
+ return err;
+
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
@@ -112,6 +160,9 @@
xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
if (if_running) {
+ err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false);
+ if (err)
+ return err;
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
@@ -247,21 +298,25 @@
static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
struct xdp_buff *xdp)
{
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
- unsigned int datasize = xdp->data_end - xdp->data;
struct sk_buff *skb;
+ net_prefetch(xdp->data_meta);
+
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- xdp->data_end - xdp->data_hard_start,
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, xdp->data - xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), xdp->data, datasize);
- if (metasize)
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
xsk_buff_free(xdp);
return skb;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index 7adfd85..36f5b6d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -17,7 +17,7 @@
bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring);
int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
-int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring);
+int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc);
void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring);
#endif /* _I40E_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index ce1e2fb..a994a29 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -86,6 +86,7 @@
#define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4)
#define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4)
#define IAVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */
+#define IAVF_MBPS_QUANTA 50
#define IAVF_VIRTCHNL_VF_RESOURCE_SIZE (sizeof(struct virtchnl_vf_resource) + \
(IAVF_MAX_VF_VSI * \
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
index 9fa3fa9..897b349 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
@@ -324,6 +324,7 @@
static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
+ int i;
if (hw->aq.asq.count > 0) {
/* queue already initialized */
@@ -354,12 +355,17 @@
/* initialize base registers */
ret_code = iavf_config_asq_regs(hw);
if (ret_code)
- goto init_adminq_free_rings;
+ goto init_free_asq_bufs;
/* success! */
hw->aq.asq.count = hw->aq.num_asq_entries;
goto init_adminq_exit;
+init_free_asq_bufs:
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
init_adminq_free_rings:
iavf_free_adminq_asq(hw);
@@ -383,6 +389,7 @@
static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
+ int i;
if (hw->aq.arq.count > 0) {
/* queue already initialized */
@@ -413,12 +420,16 @@
/* initialize base registers */
ret_code = iavf_config_arq_regs(hw);
if (ret_code)
- goto init_adminq_free_rings;
+ goto init_free_arq_bufs;
/* success! */
hw->aq.arq.count = hw->aq.num_arq_entries;
goto init_adminq_exit;
+init_free_arq_bufs:
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
init_adminq_free_rings:
iavf_free_adminq_arq(hw);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index bd1fb37..a9cea7c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -2578,6 +2578,7 @@
struct tc_mqprio_qopt_offload *mqprio_qopt)
{
u64 total_max_rate = 0;
+ u32 tx_rate_rem = 0;
int i, num_qps = 0;
u64 tx_rate = 0;
int ret = 0;
@@ -2592,12 +2593,32 @@
return -EINVAL;
if (mqprio_qopt->min_rate[i]) {
dev_err(&adapter->pdev->dev,
- "Invalid min tx rate (greater than 0) specified\n");
+ "Invalid min tx rate (greater than 0) specified for TC%d\n",
+ i);
return -EINVAL;
}
- /*convert to Mbps */
+
+ /* convert to Mbps */
tx_rate = div_u64(mqprio_qopt->max_rate[i],
IAVF_MBPS_DIVISOR);
+
+ if (mqprio_qopt->max_rate[i] &&
+ tx_rate < IAVF_MBPS_QUANTA) {
+ dev_err(&adapter->pdev->dev,
+ "Invalid max tx rate for TC%d, minimum %dMbps\n",
+ i, IAVF_MBPS_QUANTA);
+ return -EINVAL;
+ }
+
+ (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);
+
+ if (tx_rate_rem != 0) {
+ dev_err(&adapter->pdev->dev,
+ "Invalid max tx rate for TC%d, not divisible by %d\n",
+ i, IAVF_MBPS_QUANTA);
+ return -EINVAL;
+ }
+
total_max_rate += tx_rate;
num_qps += mqprio_qopt->qopt.count[i];
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 256fa07..d481a92 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -114,8 +114,11 @@
{
u32 head, tail;
+ /* underlying hardware might not allow access and/or always return
+ * 0 for the head/tail registers so just use the cached values
+ */
head = ring->next_to_clean;
- tail = readl(ring->tail);
+ tail = ring->next_to_use;
if (head != tail)
return (head < tail) ?
@@ -1263,11 +1266,10 @@
{
struct iavf_rx_buffer *rx_buffer;
- if (!size)
- return NULL;
-
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
prefetchw(rx_buffer->page);
+ if (!size)
+ return rx_buffer;
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
@@ -1369,7 +1371,7 @@
#endif
struct sk_buff *skb;
- if (!rx_buffer)
+ if (!rx_buffer || !size)
return NULL;
/* prefetch first cache line of first page */
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
@@ -1527,7 +1529,7 @@
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
- if (rx_buffer)
+ if (rx_buffer && size)
rx_buffer->pagecnt_bias++;
break;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index ff479bf..5deee75 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -241,11 +241,14 @@
void iavf_configure_queues(struct iavf_adapter *adapter)
{
struct virtchnl_vsi_queue_config_info *vqci;
- struct virtchnl_queue_pair_info *vqpi;
+ int i, max_frame = adapter->vf_res->max_mtu;
int pairs = adapter->num_active_queues;
- int i, max_frame = IAVF_MAX_RXBUFFER;
+ struct virtchnl_queue_pair_info *vqpi;
size_t len;
+ if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
+ max_frame = IAVF_MAX_RXBUFFER;
+
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 6a57b41..7794703 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -498,7 +498,7 @@
static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
{
- return !!vsi->xdp_prog;
+ return !!READ_ONCE(vsi->xdp_prog);
}
static inline void ice_set_ring_xdp(struct ice_ring *ring)
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 421fc70..7f1bf71 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -652,7 +652,8 @@
rx_desc = ICE_RX_DESC(rx_ring, i);
if (!(rx_desc->wb.status_error0 &
- cpu_to_le16(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)))
+ (cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) |
+ cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)))))
continue;
rx_buf = &rx_ring->rx_buf[i];
@@ -2175,6 +2176,42 @@
}
/**
+ * ice_set_phy_type_from_speed - set phy_types based on speeds
+ * and advertised modes
+ * @ks: ethtool link ksettings struct
+ * @phy_type_low: pointer to the lower part of phy_type
+ * @phy_type_high: pointer to the higher part of phy_type
+ * @adv_link_speed: targeted link speeds bitmap
+ */
+static void
+ice_set_phy_type_from_speed(const struct ethtool_link_ksettings *ks,
+ u64 *phy_type_low, u64 *phy_type_high,
+ u16 adv_link_speed)
+{
+ /* Handle 1000M speed in a special way because ice_update_phy_type
+ * enables all link modes, but having mixed copper and optical
+ * standards is not supported.
+ */
+ adv_link_speed &= ~ICE_AQ_LINK_SPEED_1000MB;
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseT_Full))
+ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_T |
+ ICE_PHY_TYPE_LOW_1G_SGMII;
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseKX_Full))
+ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_KX;
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseX_Full))
+ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_SX |
+ ICE_PHY_TYPE_LOW_1000BASE_LX;
+
+ ice_update_phy_type(phy_type_low, phy_type_high, adv_link_speed);
+}
+
+/**
* ice_set_link_ksettings - Set Speed and Duplex
* @netdev: network interface device structure
* @ks: ethtool ksettings
@@ -2310,7 +2347,8 @@
adv_link_speed = curr_link_speed;
/* Convert the advertise link speeds to their corresponded PHY_TYPE */
- ice_update_phy_type(&phy_type_low, &phy_type_high, adv_link_speed);
+ ice_set_phy_type_from_speed(ks, &phy_type_low, &phy_type_high,
+ adv_link_speed);
if (!autoneg_changed && adv_link_speed == curr_link_speed) {
netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 52ac6cc..ea8d868 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1265,6 +1265,7 @@
ring->vsi = vsi;
ring->dev = dev;
ring->count = vsi->num_tx_desc;
+ ring->txq_teid = ICE_INVAL_TEID;
WRITE_ONCE(vsi->tx_rings[i], ring);
}
@@ -2667,6 +2668,8 @@
}
}
+ if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
+ ice_clear_dflt_vsi(pf->first_sw);
ice_fltr_remove_all(vsi);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_delete(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 20c9d55..f193709 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2475,8 +2475,10 @@
for (i = 0; i < vsi->num_xdp_txq; i++)
if (vsi->xdp_rings[i]) {
- if (vsi->xdp_rings[i]->desc)
+ if (vsi->xdp_rings[i]->desc) {
+ synchronize_rcu();
ice_free_tx_ring(vsi->xdp_rings[i]);
+ }
kfree_rcu(vsi->xdp_rings[i], rcu);
vsi->xdp_rings[i] = NULL;
}
@@ -3402,7 +3404,7 @@
pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
if (!pf->avail_rxqs) {
- devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
+ bitmap_free(pf->avail_txqs);
pf->avail_txqs = NULL;
return -ENOMEM;
}
@@ -5201,10 +5203,12 @@
if (vsi->netdev) {
ice_set_rx_mode(vsi->netdev);
- err = ice_vsi_vlan_setup(vsi);
+ if (vsi->type != ICE_VSI_LB) {
+ err = ice_vsi_vlan_setup(vsi);
- if (err)
- return err;
+ if (err)
+ return err;
+ }
}
ice_vsi_cfg_dcb_rings(vsi);
@@ -5269,9 +5273,10 @@
netif_carrier_on(vsi->netdev);
}
- /* clear this now, and the first stats read will be used as baseline */
- vsi->stat_offsets_loaded = false;
-
+ /* Perform an initial read of the statistics registers now to
+ * set the baseline so counters are ready when interface is up
+ */
+ ice_update_eth_stats(vsi);
ice_service_task_schedule(pf);
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 5ce8590..0155c45 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -2590,7 +2590,7 @@
else
status = ice_set_vsi_promisc(hw, vsi_handle,
promisc_mask, vlan_id);
- if (status)
+ if (status && status != -EEXIST)
break;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 5134342..a980d33 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -2723,9 +2723,9 @@
goto error_param;
}
- /* Skip queue if not enabled */
if (!test_bit(vf_q_id, vf->txq_ena))
- continue;
+ dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
+ vf_q_id, vsi->vsi_num);
ice_fill_txq_meta(vsi, ring, &txq_meta);
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 9f36f8d..59963b9 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -36,8 +36,10 @@
static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
{
ice_clean_tx_ring(vsi->tx_rings[q_idx]);
- if (ice_is_xdp_ena_vsi(vsi))
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ synchronize_rcu();
ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
+ }
ice_clean_rx_ring(vsi->rx_rings[q_idx]);
}
@@ -369,6 +371,19 @@
bool if_running, pool_present = !!pool;
int ret = 0, pool_failure = 0;
+ if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
+ netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n");
+ pool_failure = -EINVAL;
+ goto failure;
+ }
+
+ if (!is_power_of_2(vsi->rx_rings[qid]->count) ||
+ !is_power_of_2(vsi->tx_rings[qid]->count)) {
+ netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n");
+ pool_failure = -EINVAL;
+ goto failure;
+ }
+
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
if (if_running) {
@@ -391,6 +406,7 @@
netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
}
+failure:
if (pool_failure) {
netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
pool_present ? "en" : "dis", pool_failure);
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 7bda8c5..e6d2800 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -664,6 +664,8 @@
struct igb_mac_addr *mac_table;
struct vf_mac_filter vf_macs;
struct vf_mac_filter *vf_mac_list;
+ /* lock for VF resources */
+ spinlock_t vfs_lock;
};
/* flags controlling PTP/1588 function */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index f854d41..327196d 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3638,6 +3638,7 @@
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
/* reclaim resources allocated to VFs */
if (adapter->vf_data) {
@@ -3650,12 +3651,13 @@
pci_disable_sriov(pdev);
msleep(500);
}
-
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
kfree(adapter->vf_mac_list);
adapter->vf_mac_list = NULL;
kfree(adapter->vf_data);
adapter->vf_data = NULL;
adapter->vfs_allocated_count = 0;
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
wrfl();
msleep(100);
@@ -3815,7 +3817,9 @@
igb_release_hw_control(adapter);
#ifdef CONFIG_PCI_IOV
+ rtnl_lock();
igb_disable_sriov(pdev);
+ rtnl_unlock();
#endif
unregister_netdev(netdev);
@@ -3975,6 +3979,9 @@
spin_lock_init(&adapter->nfc_lock);
spin_lock_init(&adapter->stats64_lock);
+
+ /* init spinlock to avoid concurrency of VF resources */
+ spin_lock_init(&adapter->vfs_lock);
#ifdef CONFIG_PCI_IOV
switch (hw->mac.type) {
case e1000_82576:
@@ -4813,8 +4820,11 @@
while (i != tx_ring->next_to_use) {
union e1000_adv_tx_desc *eop_desc, *tx_desc;
- /* Free all the Tx ring sk_buffs */
- dev_kfree_skb_any(tx_buffer->skb);
+ /* Free all the Tx ring sk_buffs or xdp frames */
+ if (tx_buffer->type == IGB_TYPE_SKB)
+ dev_kfree_skb_any(tx_buffer->skb);
+ else
+ xdp_return_frame(tx_buffer->xdpf);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -5499,7 +5509,8 @@
break;
}
- if (adapter->link_speed != SPEED_1000)
+ if (adapter->link_speed != SPEED_1000 ||
+ !hw->phy.ops.read_reg)
goto no_wait;
/* wait for Remote receiver status OK */
@@ -7848,8 +7859,10 @@
static void igb_msg_task(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
u32 vf;
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
/* process any reset requests */
if (!igb_check_for_rst(hw, vf))
@@ -7863,6 +7876,7 @@
if (!igb_check_for_ack(hw, vf))
igb_rcv_ack_from_vf(adapter, vf);
}
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
}
/**
@@ -9825,11 +9839,10 @@
struct e1000_hw *hw = &adapter->hw;
u32 dmac_thr;
u16 hwm;
+ u32 reg;
if (hw->mac.type > e1000_82580) {
if (adapter->flags & IGB_FLAG_DMAC) {
- u32 reg;
-
/* force threshold to 0. */
wr32(E1000_DMCTXTH, 0);
@@ -9862,7 +9875,6 @@
/* Disable BMC-to-OS Watchdog Enable */
if (hw->mac.type != e1000_i354)
reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
-
wr32(E1000_DMACR, reg);
/* no lower threshold to disable
@@ -9879,12 +9891,12 @@
*/
wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
(IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
+ }
- /* make low power state decision controlled
- * by DMA coal
- */
+ if (hw->mac.type >= e1000_i210 ||
+ (adapter->flags & IGB_FLAG_DMAC)) {
reg = rd32(E1000_PCIEMISC);
- reg &= ~E1000_PCIEMISC_LX_DECISION;
+ reg |= E1000_PCIEMISC_LX_DECISION;
wr32(E1000_PCIEMISC, reg);
} /* endif adapter->dmac is not disabled */
} else if (hw->mac.type == e1000_82580) {
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index fd37d2c..7f3523f 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -187,15 +187,7 @@
igc_check_for_copper_link(hw);
- /* Verify phy id and set remaining function pointers */
- switch (phy->id) {
- case I225_I_PHY_ID:
- phy->type = igc_phy_i225;
- break;
- default:
- ret_val = -IGC_ERR_PHY;
- goto out;
- }
+ phy->type = igc_phy_i225;
out:
return ret_val;
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 55dae7c..7e29f41 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -22,6 +22,7 @@
#define IGC_DEV_ID_I220_V 0x15F7
#define IGC_DEV_ID_I225_K 0x3100
#define IGC_DEV_ID_I225_K2 0x3101
+#define IGC_DEV_ID_I226_K 0x3102
#define IGC_DEV_ID_I225_LMVP 0x5502
#define IGC_DEV_ID_I225_IT 0x0D9F
#define IGC_DEV_ID_I226_LM 0x125B
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index 553d6bc..624236a 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -156,8 +156,15 @@
{
u32 swfw_sync;
- while (igc_get_hw_semaphore_i225(hw))
- ; /* Empty */
+ /* Releasing the resource requires first getting the HW semaphore.
+ * If we fail to get the semaphore, there is nothing we can do,
+ * except log an error and quit. We are not allowed to hang here
+ * indefinitely, as it may cause denial of service or system crash.
+ */
+ if (igc_get_hw_semaphore_i225(hw)) {
+ hw_dbg("Failed to release SW_FW_SYNC.\n");
+ return;
+ }
swfw_sync = rd32(IGC_SW_FW_SYNC);
swfw_sync &= ~mask;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 61cebb7..e7ffe63 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -9,6 +9,7 @@
#include <linux/udp.h>
#include <linux/ip.h>
#include <linux/pm_runtime.h>
+#include <linux/pci.h>
#include <net/pkt_sched.h>
#include <net/ipv6.h>
@@ -4177,20 +4178,12 @@
* false until the igc_check_for_link establishes link
* for copper adapters ONLY
*/
- switch (hw->phy.media_type) {
- case igc_media_type_copper:
- if (!hw->mac.get_link_status)
- return true;
- hw->mac.ops.check_for_link(hw);
- link_active = !hw->mac.get_link_status;
- break;
- default:
- case igc_media_type_unknown:
- break;
- }
+ if (!hw->mac.get_link_status)
+ return true;
+ hw->mac.ops.check_for_link(hw);
+ link_active = !hw->mac.get_link_status;
- if (hw->mac.type == igc_i225 &&
- hw->phy.id == I225_I_PHY_ID) {
+ if (hw->mac.type == igc_i225) {
if (!netif_carrier_ok(adapter->netdev)) {
adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
} else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
@@ -4940,6 +4933,9 @@
u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
u32 value = 0;
+ if (IGC_REMOVED(hw_addr))
+ return ~value;
+
value = readl(&hw_addr[reg]);
/* reads should not return all F's */
@@ -5049,6 +5045,10 @@
pci_enable_pcie_error_reporting(pdev);
+ err = pci_enable_ptm(pdev, NULL);
+ if (err < 0)
+ dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n");
+
pci_set_master(pdev);
err = -ENOMEM;
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index e380b7a..3a10340 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -249,8 +249,7 @@
return ret_val;
}
- if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
- hw->phy.id == I225_I_PHY_ID) {
+ if (phy->autoneg_mask & ADVERTISE_2500_FULL) {
/* Read the MULTI GBT AN Control Register - reg 7.32 */
ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
@@ -390,8 +389,7 @@
ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
mii_1000t_ctrl_reg);
- if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
- hw->phy.id == I225_I_PHY_ID)
+ if (phy->autoneg_mask & ADVERTISE_2500_FULL)
ret_val = phy->ops.write_reg(hw,
(STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
@@ -583,7 +581,7 @@
* the lower time out
*/
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
- usleep_range(500, 1000);
+ udelay(50);
mdic = rd32(IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;
@@ -640,7 +638,7 @@
* the lower time out
*/
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
- usleep_range(500, 1000);
+ udelay(50);
mdic = rd32(IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index b52dd9d..a273e1c 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -252,7 +252,8 @@
#define wr32(reg, val) \
do { \
u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
- writel((val), &hw_addr[(reg)]); \
+ if (!IGC_REMOVED(hw_addr)) \
+ writel((val), &hw_addr[(reg)]); \
} while (0)
#define rd32(reg) (igc_rd32(hw, reg))
@@ -264,4 +265,6 @@
#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2)))
+#define IGC_REMOVED(h) unlikely(!(h))
+
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index de0fc6e..27c6f91 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -769,6 +769,7 @@
#ifdef CONFIG_IXGBE_IPSEC
struct ixgbe_ipsec *ipsec;
#endif /* CONFIG_IXGBE_IPSEC */
+ spinlock_t vfs_lock;
};
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 54d4726..3196208 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -903,7 +903,8 @@
/* Tx IPsec offload doesn't seem to work on this
* device, so block these requests for now.
*/
- if (!(sam->flags & XFRM_OFFLOAD_INBOUND)) {
+ sam->flags = sam->flags & ~XFRM_OFFLOAD_IPV6;
+ if (sam->flags != XFRM_OFFLOAD_INBOUND) {
err = -EOPNOTSUPP;
goto err_out;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index a3a02e2..b5b8be4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6403,6 +6403,9 @@
/* n-tuple support exists, always init our spinlock */
spin_lock_init(&adapter->fdir_perfect_lock);
+ /* init spinlock to avoid concurrency of VF resources */
+ spin_lock_init(&adapter->vfs_lock);
+
#ifdef CONFIG_IXGBE_DCB
ixgbe_init_dcb(adapter);
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 22a874e..8b7f300 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1211,7 +1211,6 @@
struct cyclecounter cc;
unsigned long flags;
u32 incval = 0;
- u32 tsauxc = 0;
u32 fuse0 = 0;
/* For some of the boards below this mask is technically incorrect.
@@ -1246,18 +1245,6 @@
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
cc.read = ixgbe_ptp_read_X550;
-
- /* enable SYSTIME counter */
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
- tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
- IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
- tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
- IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
-
- IXGBE_WRITE_FLUSH(hw);
break;
case ixgbe_mac_X540:
cc.read = ixgbe_ptp_read_82599;
@@ -1290,6 +1277,50 @@
}
/**
+ * ixgbe_ptp_init_systime - Initialize SYSTIME registers
+ * @adapter: the ixgbe private board structure
+ *
+ * Initialize and start the SYSTIME registers.
+ */
+static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 tsauxc;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ case ixgbe_mac_X550:
+ tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
+
+ /* Reset SYSTIME registers to 0 */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
+
+ /* Reset interrupt settings */
+ IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
+
+ /* Activate the SYSTIME counter */
+ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
+ tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
+ break;
+ case ixgbe_mac_X540:
+ case ixgbe_mac_82599EB:
+ /* Reset SYSTIME registers to 0 */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
+ break;
+ default:
+ /* Other devices aren't supported */
+ return;
+ };
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
* ixgbe_ptp_reset
* @adapter: the ixgbe private board structure
*
@@ -1315,6 +1346,8 @@
ixgbe_ptp_start_cyclecounter(adapter);
+ ixgbe_ptp_init_systime(adapter);
+
spin_lock_irqsave(&adapter->tmreg_lock, flags);
timecounter_init(&adapter->hw_tc, &adapter->hw_cc,
ktime_to_ns(ktime_get_real()));
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 214a38d..0078ae5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -204,10 +204,13 @@
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
unsigned int num_vfs = adapter->num_vfs, vf;
+ unsigned long flags;
int rss;
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
/* set num VFs to 0 to prevent access to vfinfo */
adapter->num_vfs = 0;
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
/* put the reference to all of the vf devices */
for (vf = 0; vf < num_vfs; ++vf) {
@@ -1157,9 +1160,9 @@
switch (xcast_mode) {
case IXGBEVF_XCAST_MODE_NONE:
- disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
+ disable = IXGBE_VMOLR_ROMPE |
IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
- enable = 0;
+ enable = IXGBE_VMOLR_BAM;
break;
case IXGBEVF_XCAST_MODE_MULTI:
disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
@@ -1181,9 +1184,9 @@
return -EPERM;
}
- disable = 0;
+ disable = IXGBE_VMOLR_VPE;
enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
- IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
+ IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE;
break;
default:
return -EOPNOTSUPP;
@@ -1305,8 +1308,10 @@
void ixgbe_msg_task(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ unsigned long flags;
u32 vf;
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
for (vf = 0; vf < adapter->num_vfs; vf++) {
/* process any reset requests */
if (!ixgbe_check_for_rst(hw, vf))
@@ -1320,6 +1325,7 @@
if (!ixgbe_check_for_ack(hw, vf))
ixgbe_rcv_ack_from_vf(adapter, vf);
}
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
}
void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 2d0c52f..5ea626b 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -466,7 +466,6 @@
len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
- dev_kfree_skb_any(skb);
netdev_err(dev, "tx ring full\n");
netif_tx_stop_queue(txq);
return NETDEV_TX_BUSY;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 90e6111..735b76e 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2471,6 +2471,7 @@
for (i = 0; i < mp->rxq_count; i++)
rxq_deinit(mp->rxq + i);
out:
+ napi_disable(&mp->napi);
free_irq(dev->irq, dev);
return err;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index d825eb0..e999ac2 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -1434,6 +1434,7 @@
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
+void mvpp2_dbgfs_exit(void);
#ifdef CONFIG_MVPP2_PTP
int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index 4a3baa7..75e83ea 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -691,6 +691,13 @@
return 0;
}
+static struct dentry *mvpp2_root;
+
+void mvpp2_dbgfs_exit(void)
+{
+ debugfs_remove(mvpp2_root);
+}
+
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
{
debugfs_remove_recursive(priv->dbgfs_dir);
@@ -700,10 +707,9 @@
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
{
- struct dentry *mvpp2_dir, *mvpp2_root;
+ struct dentry *mvpp2_dir;
int ret, i;
- mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
if (!mvpp2_root)
mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 542cd6f..68c5ed8 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -7155,7 +7155,18 @@
},
};
-module_platform_driver(mvpp2_driver);
+static int __init mvpp2_driver_init(void)
+{
+ return platform_driver_register(&mvpp2_driver);
+}
+module_init(mvpp2_driver_init);
+
+static void __exit mvpp2_driver_exit(void)
+{
+ platform_driver_unregister(&mvpp2_driver);
+ mvpp2_dbgfs_exit();
+}
+module_exit(mvpp2_driver_exit);
MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
index 2a13c31..59a3ea0 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
@@ -771,6 +771,7 @@
int prestera_rxtx_switch_init(struct prestera_switch *sw)
{
struct prestera_rxtx *rxtx;
+ int err;
rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL);
if (!rxtx)
@@ -778,7 +779,11 @@
sw->rxtx = rxtx;
- return prestera_sdma_switch_init(sw);
+ err = prestera_sdma_switch_init(sw);
+ if (err)
+ kfree(rxtx);
+
+ return err;
}
void prestera_rxtx_switch_fini(struct prestera_switch *sw)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 7d7dc07..217dc67 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -806,6 +806,17 @@
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
}
+static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
+{
+ unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
+ unsigned long data;
+
+ data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
+ get_order(size));
+
+ return (void *)data;
+}
+
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
@@ -1303,7 +1314,10 @@
goto release_desc;
/* alloc new buffer */
- new_data = napi_alloc_frag(ring->frag_size);
+ if (ring->frag_size <= PAGE_SIZE)
+ new_data = napi_alloc_frag(ring->frag_size);
+ else
+ new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
if (unlikely(!new_data)) {
netdev->stats.rx_dropped++;
goto release_desc;
@@ -1700,7 +1714,10 @@
return -ENOMEM;
for (i = 0; i < rx_dma_size; i++) {
- ring->data[i] = netdev_alloc_frag(ring->frag_size);
+ if (ring->frag_size <= PAGE_SIZE)
+ ring->data[i] = netdev_alloc_frag(ring->frag_size);
+ else
+ ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
if (!ring->data[i])
return -ENOMEM;
}
@@ -1966,6 +1983,9 @@
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
+ if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
+ return -EINVAL;
+
/* only tcp dst ipv4 is meaningful, others are meaningless */
fsp->flow_type = TCP_V4_FLOW;
fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
@@ -2279,7 +2299,10 @@
int err = mtk_start_dma(eth);
if (err)
+ if (err) {
+ phylink_disconnect_phy(mac->phylink);
return err;
+ }
mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
index 32d8342..5897940 100644
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
@@ -26,6 +26,7 @@
break;
ss->regmap[i] = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(ss->regmap[i]))
return PTR_ERR(ss->regmap[i]);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 01275c3..9628510 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -2099,7 +2099,7 @@
en_err(priv,
"mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
i, offset, ee->len - i, ret);
- return 0;
+ return ret;
}
i += ret;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 427e7a3..d7f2890 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -697,7 +697,8 @@
err = mlx4_bitmap_init(*bitmap + k, 1,
MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0,
0);
- mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
+ if (!err)
+ mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
}
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 94426d2..c838d86 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -959,6 +959,7 @@
cmd_ent_get(ent);
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
+ cmd_ent_get(ent); /* for the _real_ FW event on completion */
/* Skip sending command to fw if internal error */
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {
u8 status = 0;
@@ -972,7 +973,6 @@
return;
}
- cmd_ent_get(ent); /* for the _real_ FW event on completion */
/* ring doorbell after the descriptor is valid */
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb();
@@ -1586,8 +1586,8 @@
cmd_ent_put(ent); /* timeout work was canceled */
if (!forced || /* Real FW completion */
- pci_channel_offline(dev->pdev) || /* FW is inaccessible */
- dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ mlx5_cmd_is_down(dev) || /* No real FW completion is expected */
+ !opcode_allowed(cmd, ent->op))
cmd_ent_put(ent);
ent->ts2 = ktime_get_ns();
@@ -1687,12 +1687,17 @@
struct mlx5_cmd *cmd = &dev->cmd;
int i;
- for (i = 0; i < cmd->max_reg_cmds; i++)
- while (down_trylock(&cmd->sem))
+ for (i = 0; i < cmd->max_reg_cmds; i++) {
+ while (down_trylock(&cmd->sem)) {
mlx5_cmd_trigger_completions(dev);
+ cond_resched();
+ }
+ }
- while (down_trylock(&cmd->pages_sem))
+ while (down_trylock(&cmd->pages_sem)) {
mlx5_cmd_trigger_completions(dev);
+ cond_resched();
+ }
/* Unlock cmdif */
up(&cmd->pages_sem);
@@ -1853,7 +1858,7 @@
ctx->dev = dev;
/* Starts at 1 to avoid doing wake_up if we are not cleaning up */
atomic_set(&ctx->num_inflight, 1);
- init_waitqueue_head(&ctx->wait);
+ init_completion(&ctx->inflight_done);
}
EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
@@ -1867,8 +1872,8 @@
*/
void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)
{
- atomic_dec(&ctx->num_inflight);
- wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0);
+ if (!atomic_dec_and_test(&ctx->num_inflight))
+ wait_for_completion(&ctx->inflight_done);
}
EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
@@ -1879,7 +1884,7 @@
work->user_callback(status, work);
if (atomic_dec_and_test(&ctx->num_inflight))
- wake_up(&ctx->wait);
+ complete(&ctx->inflight_done);
}
int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
@@ -1895,7 +1900,7 @@
ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
mlx5_cmd_exec_cb_handler, work, false);
if (ret && atomic_dec_and_test(&ctx->num_inflight))
- wake_up(&ctx->wait);
+ complete(&ctx->inflight_done);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 857be86..f800e1c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -638,7 +638,7 @@
trace_timestamp = (timestamp_event.timestamp & MASK_52_7) |
(str_frmt->timestamp & MASK_6_0);
else
- trace_timestamp = ((timestamp_event.timestamp & MASK_52_7) - 1) |
+ trace_timestamp = ((timestamp_event.timestamp - 1) & MASK_52_7) |
(str_frmt->timestamp & MASK_6_0);
mlx5_tracer_print_trace(str_frmt, dev, trace_timestamp);
@@ -675,6 +675,9 @@
if (!tracer->owner)
return;
+ if (unlikely(!tracer->str_db.loaded))
+ goto arm;
+
block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE;
start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;
@@ -732,6 +735,7 @@
&tmp_trace_block[TRACES_PER_BLOCK - 1]);
}
+arm:
mlx5_fw_tracer_arm(dev);
}
@@ -1138,8 +1142,7 @@
queue_work(tracer->work_queue, &tracer->ownership_change_work);
break;
case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
- if (likely(tracer->str_db.loaded))
- queue_work(tracer->work_queue, &tracer->handle_traces_work);
+ queue_work(tracer->work_queue, &tracer->handle_traces_work);
break;
default:
mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
index ed4fb79..75b6060 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
@@ -31,6 +31,7 @@
struct mlx5_rsc_dump {
u32 pdn;
struct mlx5_core_mkey mkey;
+ u32 number_of_menu_items;
u16 fw_segment_type[MLX5_SGMT_TYPE_NUM];
};
@@ -50,21 +51,37 @@
return -EINVAL;
}
-static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page)
+#define MLX5_RSC_DUMP_MENU_HEADER_SIZE (MLX5_ST_SZ_BYTES(resource_dump_info_segment) + \
+ MLX5_ST_SZ_BYTES(resource_dump_command_segment) + \
+ MLX5_ST_SZ_BYTES(resource_dump_menu_segment))
+
+static int mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page,
+ int read_size, int start_idx)
{
void *data = page_address(page);
enum mlx5_sgmt_type sgmt_idx;
int num_of_items;
char *sgmt_name;
void *member;
+ int size = 0;
void *menu;
int i;
- menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu);
- num_of_items = MLX5_GET(resource_dump_menu_segment, menu, num_of_records);
+ if (!start_idx) {
+ menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu);
+ rsc_dump->number_of_menu_items = MLX5_GET(resource_dump_menu_segment, menu,
+ num_of_records);
+ size = MLX5_RSC_DUMP_MENU_HEADER_SIZE;
+ data += size;
+ }
+ num_of_items = rsc_dump->number_of_menu_items;
- for (i = 0; i < num_of_items; i++) {
- member = MLX5_ADDR_OF(resource_dump_menu_segment, menu, record[i]);
+ for (i = 0; start_idx + i < num_of_items; i++) {
+ size += MLX5_ST_SZ_BYTES(resource_dump_menu_record);
+ if (size >= read_size)
+ return start_idx + i;
+
+ member = data + MLX5_ST_SZ_BYTES(resource_dump_menu_record) * i;
sgmt_name = MLX5_ADDR_OF(resource_dump_menu_record, member, segment_name);
sgmt_idx = mlx5_rsc_dump_sgmt_get_by_name(sgmt_name);
if (sgmt_idx == -EINVAL)
@@ -72,6 +89,7 @@
rsc_dump->fw_segment_type[sgmt_idx] = MLX5_GET(resource_dump_menu_record,
member, segment_type);
}
+ return 0;
}
static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
@@ -168,6 +186,7 @@
struct mlx5_rsc_dump_cmd *cmd = NULL;
struct mlx5_rsc_key key = {};
struct page *page;
+ int start_idx = 0;
int size;
int err;
@@ -189,7 +208,7 @@
if (err < 0)
goto destroy_cmd;
- mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page);
+ start_idx = mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page, size, start_idx);
} while (err > 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 73060b3..b0229ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -101,7 +101,7 @@
#define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
#define MLX5E_MAX_RQ_NUM_MTTS \
- ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
+ (ALIGN_DOWN(U16_MAX, 4) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
(ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index 673f1c8..c9d5d8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -309,8 +309,8 @@
if (err)
return err;
- err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz,
- xoff, &port_buffer, &update_buffer);
+ err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, xoff,
+ port_buff_cell_sz, &port_buffer, &update_buffer);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 0469f53..511c431 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -1629,6 +1629,8 @@
static void
mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
{
+ struct mlx5e_priv *priv;
+
if (!refcount_dec_and_test(&ft->refcount))
return;
@@ -1638,6 +1640,8 @@
rhashtable_free_and_destroy(&ft->ct_entries_ht,
mlx5_tc_ct_flush_ft_entry,
ct_priv);
+ priv = netdev_priv(ct_priv->netdev);
+ flush_workqueue(priv->wq);
mlx5_tc_ct_free_pre_ct_tables(ft);
mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id);
kfree(ft);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 26f7fab..d08bd22 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -113,7 +113,6 @@
struct xfrm_replay_state_esn *replay_esn;
u32 seq_bottom = 0;
u8 overlap;
- u32 *esn;
if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) {
sa_entry->esn_state.trigger = 0;
@@ -128,11 +127,9 @@
sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x,
htonl(seq_bottom));
- esn = &sa_entry->esn_state.esn;
sa_entry->esn_state.trigger = 1;
if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
- ++(*esn);
sa_entry->esn_state.overlap = 0;
return true;
} else if (unlikely(!overlap &&
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index 1b39269..f824d78 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -15,7 +15,7 @@
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- if (WARN_ON(!mlx5e_ktls_type_check(mdev, crypto_info)))
+ if (!mlx5e_ktls_type_check(mdev, crypto_info))
return -EOPNOTSUPP;
if (direction == TLS_OFFLOAD_CTX_DIR_TX)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index d06532d..634777f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -231,8 +231,7 @@
struct mlx5e_ktls_offload_context_rx **ctx =
__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX);
- BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_rx *) >
- TLS_OFFLOAD_CONTEXT_SIZE_RX);
+ BUILD_BUG_ON(sizeof(priv_rx) > TLS_DRIVER_STATE_SIZE_RX);
*ctx = priv_rx;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index b140e13..679747d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -63,8 +63,7 @@
struct mlx5e_ktls_offload_context_tx **ctx =
__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
- BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_tx *) >
- TLS_OFFLOAD_CONTEXT_SIZE_TX);
+ BUILD_BUG_ON(sizeof(priv_tx) > TLS_DRIVER_STATE_SIZE_TX);
*ctx = priv_tx;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index f23c675..7c0ae7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1210,6 +1210,16 @@
if (err)
return err;
+ if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_PCP && priv->dcbx.dscp_app_cnt) {
+ /*
+ * Align the driver state with the register state.
+ * Temporary state change is required to enable the app list reset.
+ */
+ priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_DSCP;
+ mlx5e_dcbnl_delete_app(priv);
+ priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
+ }
+
mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params,
priv->dcbx_dp.trust_state);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 16e98ac..cfc3bfc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4009,6 +4009,13 @@
}
}
+ if (params->xdp_prog) {
+ if (features & NETIF_F_LRO) {
+ netdev_warn(netdev, "LRO is incompatible with XDP\n");
+ features &= ~NETIF_F_LRO;
+ }
+ }
+
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
features &= ~NETIF_F_RXHASH;
if (netdev->features & NETIF_F_RXHASH)
@@ -4569,6 +4576,11 @@
unlock:
mutex_unlock(&priv->state_lock);
+
+ /* Need to fix some features. */
+ if (!err)
+ netdev_update_features(netdev);
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 304435e..b991f03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -706,6 +706,8 @@
params->num_tc = 1;
params->tunneled_offload_en = false;
+ if (rep->vport != MLX5_VPORT_UPLINK)
+ params->vlan_strip_disable = true;
mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 78f6a6f..ff4f10d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -536,7 +536,7 @@
u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
struct mlx5_core_dev *mdev = priv->mdev;
- if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
+ if (!mlx5e_stats_grp_vnic_env_num_stats(priv))
return;
MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 1ad1692..f1bf7f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2396,6 +2396,17 @@
match.key->vlan_priority);
*match_level = MLX5_MATCH_L2;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) &&
+ match.mask->vlan_eth_type &&
+ MLX5_CAP_FLOWTABLE_TYPE(priv->mdev,
+ ft_field_support.outer_second_vid,
+ fs_type)) {
+ MLX5_SET(fte_match_set_misc, misc_c,
+ outer_second_cvlan_tag, 1);
+ spec->match_criteria_enable |=
+ MLX5_MATCH_MISC_PARAMETERS;
+ }
}
} else if (*match_level != MLX5_MATCH_NONE) {
/* cvlan_tag enabled in match criteria and
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index 1cbb330..6c865cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -30,9 +30,9 @@
sizeof(dest->vport.num), hash);
hash = jhash((const void *)&dest->vport.vhca_id,
sizeof(dest->vport.num), hash);
- if (dest->vport.pkt_reformat)
- hash = jhash(dest->vport.pkt_reformat,
- sizeof(*dest->vport.pkt_reformat),
+ if (flow_act->pkt_reformat)
+ hash = jhash(flow_act->pkt_reformat,
+ sizeof(*flow_act->pkt_reformat),
hash);
return hash;
}
@@ -53,9 +53,11 @@
if (ret)
return ret;
- return dest1->vport.pkt_reformat && dest2->vport.pkt_reformat ?
- memcmp(dest1->vport.pkt_reformat, dest2->vport.pkt_reformat,
- sizeof(*dest1->vport.pkt_reformat)) : 0;
+ if (flow_act1->pkt_reformat && flow_act2->pkt_reformat)
+ return memcmp(flow_act1->pkt_reformat, flow_act2->pkt_reformat,
+ sizeof(*flow_act1->pkt_reformat));
+
+ return !(flow_act1->pkt_reformat == flow_act2->pkt_reformat);
}
static int
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 55772f0..4bdccef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1520,9 +1520,22 @@
return NULL;
}
-static bool check_conflicting_actions(u32 action1, u32 action2)
+static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0,
+ const struct mlx5_fs_vlan *vlan1)
{
- u32 xored_actions = action1 ^ action2;
+ return vlan0->ethtype != vlan1->ethtype ||
+ vlan0->vid != vlan1->vid ||
+ vlan0->prio != vlan1->prio;
+}
+
+static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
+ const struct mlx5_flow_act *act2)
+{
+ u32 action1 = act1->action;
+ u32 action2 = act2->action;
+ u32 xored_actions;
+
+ xored_actions = action1 ^ action2;
/* if one rule only wants to count, it's ok */
if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
@@ -1539,6 +1552,22 @@
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
return true;
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
+ act1->pkt_reformat != act2->pkt_reformat)
+ return true;
+
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+ act1->modify_hdr != act2->modify_hdr)
+ return true;
+
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
+ check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0]))
+ return true;
+
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 &&
+ check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1]))
+ return true;
+
return false;
}
@@ -1546,7 +1575,7 @@
const struct mlx5_flow_context *flow_context,
const struct mlx5_flow_act *flow_act)
{
- if (check_conflicting_actions(flow_act->action, fte->action.action)) {
+ if (check_conflicting_actions(flow_act, &fte->action)) {
mlx5_core_warn(get_dev(&fte->node),
"Found two FTEs with conflicting actions\n");
return -EEXIST;
@@ -2024,16 +2053,16 @@
down_write_ref_node(&fte->node, false);
for (i = handle->num_rules - 1; i >= 0; i--)
tree_remove_node(&handle->rule[i]->node, true);
- if (fte->dests_size) {
- if (fte->modify_mask)
- modify_fte(fte);
- up_write_ref_node(&fte->node, false);
- } else if (list_empty(&fte->node.children)) {
+ if (list_empty(&fte->node.children)) {
del_hw_fte(&fte->node);
/* Avoid double call to del_hw_fte */
fte->node.del_hw_func = NULL;
up_write_ref_node(&fte->node, false);
tree_put_node(&fte->node, false);
+ } else if (fte->dests_size) {
+ if (fte->modify_mask)
+ modify_fte(fte);
+ up_write_ref_node(&fte->node, false);
} else {
up_write_ref_node(&fte->node, false);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 9b472e7..e29db4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -134,14 +134,19 @@
del_timer_sync(&fw_reset->timer);
}
-static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
+static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (!test_and_clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) {
+ mlx5_core_warn(dev, "Reset request was already cleared\n");
+ return -EALREADY;
+ }
+
mlx5_stop_sync_reset_poll(dev);
- clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags);
if (poll_health)
mlx5_start_health_poll(dev);
+ return 0;
}
#define MLX5_RESET_POLL_INTERVAL (HZ / 10)
@@ -185,13 +190,17 @@
return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, 0, 2, false);
}
-static void mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
+static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (test_and_set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) {
+ mlx5_core_warn(dev, "Reset request was already set\n");
+ return -EALREADY;
+ }
mlx5_stop_health_poll(dev, true);
- set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags);
mlx5_start_sync_reset_poll(dev);
+ return 0;
}
static void mlx5_fw_live_patch_event(struct work_struct *work)
@@ -225,7 +234,9 @@
err ? "Failed" : "Sent");
return;
}
- mlx5_sync_reset_set_reset_requested(dev);
+ if (mlx5_sync_reset_set_reset_requested(dev))
+ return;
+
err = mlx5_fw_reset_set_reset_sync_ack(dev);
if (err)
mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack Failed. Error code: %d\n", err);
@@ -325,7 +336,8 @@
struct mlx5_core_dev *dev = fw_reset->dev;
int err;
- mlx5_sync_reset_clear_reset_requested(dev, false);
+ if (mlx5_sync_reset_clear_reset_requested(dev, false))
+ return;
mlx5_core_warn(dev, "Sync Reset now. Device is going to reset.\n");
@@ -354,10 +366,8 @@
reset_abort_work);
struct mlx5_core_dev *dev = fw_reset->dev;
- if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags))
+ if (mlx5_sync_reset_clear_reset_requested(dev, true))
return;
-
- mlx5_sync_reset_clear_reset_requested(dev, true);
mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n");
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 11cc3ea..9fb3e5e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -274,7 +274,7 @@
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
- struct lag_tracker tracker;
+ struct lag_tracker tracker = { };
bool do_bond, roce_lag;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
index 839a01d..8ff1631 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
@@ -122,7 +122,7 @@
{
struct mlx5_mpfs *mpfs = dev->priv.mpfs;
- if (!MLX5_ESWITCH_MANAGER(dev))
+ if (!mpfs)
return;
WARN_ON(!hlist_empty(mpfs->hash));
@@ -137,7 +137,7 @@
int err = 0;
u32 index;
- if (!MLX5_ESWITCH_MANAGER(dev))
+ if (!mpfs)
return 0;
mutex_lock(&mpfs->lock);
@@ -185,7 +185,7 @@
int err = 0;
u32 index;
- if (!MLX5_ESWITCH_MANAGER(dev))
+ if (!mpfs)
return 0;
mutex_lock(&mpfs->lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 96c39a1..b227fa9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -43,11 +43,10 @@
err = mlx5dr_table_set_miss_action(ft->fs_dr_table.dr_table, action);
if (err && action) {
err = mlx5dr_action_destroy(action);
- if (err) {
- action = NULL;
- mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n",
- err);
- }
+ if (err)
+ mlx5_core_err(ns->dev,
+ "Failed to destroy action (%d)\n", err);
+ action = NULL;
}
ft->fs_dr_table.miss_action = action;
if (old_miss_action) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 7ec1d0e..ecd1856 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -133,7 +133,7 @@
/* Allow mlxsw thermal zone binding to an external cooling device */
for (i = 0; i < ARRAY_SIZE(mlxsw_thermal_external_allowed_cdev); i++) {
if (strnstr(cdev->type, mlxsw_thermal_external_allowed_cdev[i],
- sizeof(cdev->type)))
+ strlen(cdev->type)))
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 939b692..ce843ea 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -650,6 +650,7 @@
return 0;
errout:
+ mutex_destroy(&mlxsw_i2c->cmd.lock);
i2c_set_clientdata(client, NULL);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
index a68d931..15c8d4d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
@@ -8,8 +8,8 @@
#include "spectrum.h"
enum mlxsw_sp_counter_sub_pool_id {
- MLXSW_SP_COUNTER_SUB_POOL_FLOW,
MLXSW_SP_COUNTER_SUB_POOL_RIF,
+ MLXSW_SP_COUNTER_SUB_POOL_FLOW,
};
int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index 5f92b16..aff6d4f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -168,8 +168,6 @@
static int mlxsw_sp_dcbnl_app_validate(struct net_device *dev,
struct dcb_app *app)
{
- int prio;
-
if (app->priority >= IEEE_8021QAZ_MAX_TCS) {
netdev_err(dev, "APP entry with priority value %u is invalid\n",
app->priority);
@@ -183,17 +181,6 @@
app->protocol);
return -EINVAL;
}
-
- /* Warn about any DSCP APP entries with the same PID. */
- prio = fls(dcb_ieee_getapp_mask(dev, app));
- if (prio--) {
- if (prio < app->priority)
- netdev_warn(dev, "Choosing priority %d for DSCP %d in favor of previously-active value of %d\n",
- app->priority, app->protocol, prio);
- else if (prio > app->priority)
- netdev_warn(dev, "Ignoring new priority %d for DSCP %d in favor of current value of %d\n",
- app->priority, app->protocol, prio);
- }
break;
case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 5312838..d2887ae 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -4003,7 +4003,7 @@
{
const struct fib_nh *nh = fib_info_nh(fi, 0);
- return nh->fib_nh_scope == RT_SCOPE_LINK ||
+ return nh->fib_nh_gw_family ||
mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
}
@@ -8038,13 +8038,14 @@
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
struct net *net = mlxsw_sp_net(mlxsw_sp);
- bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
u64 max_rifs;
+ bool usp;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
return -EIO;
max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+ usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 433f14a..02ba6aa 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -709,7 +709,7 @@
.trap = MLXSW_SP_TRAP_CONTROL(LLDP, LLDP, TRAP),
.listeners_arr = {
MLXSW_RXL(mlxsw_sp_rx_ptp_listener, LLDP, TRAP_TO_CPU,
- false, SP_LLDP, DISCARD),
+ true, SP_LLDP, DISCARD),
},
},
{
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 9ed264e..1fa1606 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -6923,7 +6923,7 @@
char banner[sizeof(version)];
struct ksz_switch *sw = NULL;
- result = pci_enable_device(pdev);
+ result = pcim_enable_device(pdev);
if (result)
return result;
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index a0e1ccc..73aac97 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -74,11 +74,6 @@
static void moxart_mac_free_memory(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
- int i;
-
- for (i = 0; i < RX_DESC_NUM; i++)
- dma_unmap_single(&ndev->dev, priv->rx_mapping[i],
- priv->rx_buf_size, DMA_FROM_DEVICE);
if (priv->tx_desc_base)
dma_free_coherent(&priv->pdev->dev,
@@ -147,11 +142,11 @@
desc + RX_REG_OFFSET_DESC1);
priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
- priv->rx_mapping[i] = dma_map_single(&ndev->dev,
+ priv->rx_mapping[i] = dma_map_single(&priv->pdev->dev,
priv->rx_buf[i],
priv->rx_buf_size,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
+ if (dma_mapping_error(&priv->pdev->dev, priv->rx_mapping[i]))
netdev_err(ndev, "DMA mapping error\n");
moxart_desc_write(priv->rx_mapping[i],
@@ -193,6 +188,7 @@
static int moxart_mac_stop(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ int i;
napi_disable(&priv->napi);
@@ -204,6 +200,11 @@
/* disable all functions */
writel(0, priv->base + REG_MAC_CTRL);
+ /* unmap areas mapped in moxart_mac_setup_desc_ring() */
+ for (i = 0; i < RX_DESC_NUM; i++)
+ dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+
return 0;
}
@@ -240,7 +241,7 @@
if (len > RX_BUF_SIZE)
len = RX_BUF_SIZE;
- dma_sync_single_for_cpu(&ndev->dev,
+ dma_sync_single_for_cpu(&priv->pdev->dev,
priv->rx_mapping[rx_head],
priv->rx_buf_size, DMA_FROM_DEVICE);
skb = netdev_alloc_skb_ip_align(ndev, len);
@@ -294,7 +295,7 @@
unsigned int tx_tail = priv->tx_tail;
while (tx_tail != tx_head) {
- dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
+ dma_unmap_single(&priv->pdev->dev, priv->tx_mapping[tx_tail],
priv->tx_len[tx_tail], DMA_TO_DEVICE);
ndev->stats.tx_packets++;
@@ -358,9 +359,9 @@
len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
- priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
+ priv->tx_mapping[tx_head] = dma_map_single(&priv->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
+ if (dma_mapping_error(&priv->pdev->dev, priv->tx_mapping[tx_head])) {
netdev_err(ndev, "DMA mapping error\n");
goto out_unlock;
}
@@ -379,7 +380,7 @@
len = ETH_ZLEN;
}
- dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
+ dma_sync_single_for_device(&priv->pdev->dev, priv->tx_mapping[tx_head],
priv->tx_buf_size, DMA_TO_DEVICE);
txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
@@ -494,7 +495,7 @@
priv->tx_buf_size = TX_BUF_SIZE;
priv->rx_buf_size = RX_BUF_SIZE;
- priv->tx_desc_base = dma_alloc_coherent(&pdev->dev, TX_REG_DESC_SIZE *
+ priv->tx_desc_base = dma_alloc_coherent(p_dev, TX_REG_DESC_SIZE *
TX_DESC_NUM, &priv->tx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->tx_desc_base) {
@@ -502,7 +503,7 @@
goto init_fail;
}
- priv->rx_desc_base = dma_alloc_coherent(&pdev->dev, RX_REG_DESC_SIZE *
+ priv->rx_desc_base = dma_alloc_coherent(p_dev, RX_REG_DESC_SIZE *
RX_DESC_NUM, &priv->rx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->rx_desc_base) {
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index a06466e..a55861e 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1593,8 +1593,12 @@
ocelot_write_rix(ocelot,
ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
ANA_PGID_PGID, PGID_MC);
- ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4);
- ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6);
+ ocelot_write_rix(ocelot,
+ ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
+ ANA_PGID_PGID, PGID_MCIPV4);
+ ocelot_write_rix(ocelot,
+ ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
+ ANA_PGID_PGID, PGID_MCIPV6);
/* Allow manual injection via DEVCPU_QS registers, and byte swap these
* registers endianness.
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index c4c4649..b221b83 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -206,9 +206,10 @@
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_TRAP:
- if (filter->block_id != VCAP_IS2) {
+ if (filter->block_id != VCAP_IS2 ||
+ filter->lookup != 0) {
NL_SET_ERR_MSG_MOD(extack,
- "Trap action can only be offloaded to VCAP IS2");
+ "Trap action can only be offloaded to VCAP IS2 lookup 0");
return -EOPNOTSUPP;
}
if (filter->goto_target != -1) {
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index d8c778e..1185725 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -373,7 +373,6 @@
OCELOT_VCAP_BIT_0);
vcap_key_set(vcap, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0,
~filter->ingress_port_mask);
- vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_FIRST, OCELOT_VCAP_BIT_ANY);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_HOST_MATCH,
OCELOT_VCAP_BIT_ANY);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L2_MC, filter->dmac_mc);
@@ -1143,6 +1142,8 @@
struct ocelot_vcap_filter *tmp;
tmp = ocelot_vcap_block_find_filter_by_index(block, i);
+ /* Read back the filter's counters before moving it */
+ vcap_entry_get(ocelot, i - 1, tmp);
vcap_entry_set(ocelot, i, tmp);
}
@@ -1181,7 +1182,11 @@
struct ocelot_vcap_filter del_filter;
int i, index;
+ /* Need to inherit the block_id so that vcap_entry_set()
+ * does not get confused and knows where to install it.
+ */
memset(&del_filter, 0, sizeof(del_filter));
+ del_filter.block_id = filter->block_id;
/* Gets index of the filter */
index = ocelot_vcap_block_get_filter_index(block, filter);
@@ -1196,6 +1201,8 @@
struct ocelot_vcap_filter *tmp;
tmp = ocelot_vcap_block_find_filter_by_index(block, i);
+ /* Read back the filter's counters before moving it */
+ vcap_entry_get(ocelot, i + 1, tmp);
vcap_entry_set(ocelot, i, tmp);
}
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index fc99ad8..1664e91 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2895,11 +2895,9 @@
status = myri10ge_xmit(curr, dev);
if (status != 0) {
dev_kfree_skb_any(curr);
- if (segs != NULL) {
- curr = segs;
- segs = next;
+ skb_list_walk_safe(next, curr, next) {
curr->next = NULL;
- dev_kfree_skb_any(segs);
+ dev_kfree_skb_any(curr);
}
goto drop;
}
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 3cae844..8a30be6 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -7114,9 +7114,8 @@
if (ret) {
DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
dev->name);
- s2io_reset(sp);
- free_rx_buffers(sp);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_fill_buff;
}
DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
ring->rx_bufs_left);
@@ -7154,18 +7153,16 @@
/* Enable Rx Traffic and interrupts on the NIC */
if (start_nic(sp)) {
DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
- s2io_reset(sp);
- free_rx_buffers(sp);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_out;
}
/* Add interrupt service routine */
if (s2io_add_isr(sp) != 0) {
if (sp->config.intr_type == MSI_X)
s2io_rem_isr(sp);
- s2io_reset(sp);
- free_rx_buffers(sp);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_out;
}
timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
@@ -7185,6 +7182,20 @@
}
return 0;
+
+err_out:
+ if (config->napi) {
+ if (config->intr_type == MSI_X) {
+ for (i = 0; i < sp->config.rx_ring_num; i++)
+ napi_disable(&sp->mac_control.rings[i].napi);
+ } else {
+ napi_disable(&sp->napi);
+ }
+ }
+err_fill_buff:
+ s2io_reset(sp);
+ free_rx_buffers(sp);
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
index 2643ea5..154399c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -196,7 +196,7 @@
}
reg->dst_lmextn = swreg_lmextn(dst);
- reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+ reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
return 0;
}
@@ -277,7 +277,7 @@
}
reg->dst_lmextn = swreg_lmextn(dst);
- reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+ reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index 7a81874..24578c4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -363,7 +363,7 @@
return ret;
attrs.split = eth_port.is_split;
- attrs.splittable = !attrs.split;
+ attrs.splittable = eth_port.port_lanes > 1 && !attrs.split;
attrs.lanes = eth_port.port_lanes;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = eth_port.label_port;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index dfc1f32..5ab230a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -3373,21 +3373,21 @@
unsigned int start;
do {
- start = u64_stats_fetch_begin(&r_vec->rx_sync);
+ start = u64_stats_fetch_begin_irq(&r_vec->rx_sync);
data[0] = r_vec->rx_pkts;
data[1] = r_vec->rx_bytes;
data[2] = r_vec->rx_drops;
- } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&r_vec->rx_sync, start));
stats->rx_packets += data[0];
stats->rx_bytes += data[1];
stats->rx_dropped += data[2];
do {
- start = u64_stats_fetch_begin(&r_vec->tx_sync);
+ start = u64_stats_fetch_begin_irq(&r_vec->tx_sync);
data[0] = r_vec->tx_pkts;
data[1] = r_vec->tx_bytes;
data[2] = r_vec->tx_errors;
- } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&r_vec->tx_sync, start));
stats->tx_packets += data[0];
stats->tx_bytes += data[1];
stats->tx_errors += data[2];
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index cd0c962..311873f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -286,8 +286,6 @@
/* Init to unknowns */
ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
- ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
cmd->base.port = PORT_OTHER;
cmd->base.speed = SPEED_UNKNOWN;
cmd->base.duplex = DUPLEX_UNKNOWN;
@@ -295,6 +293,8 @@
port = nfp_port_from_netdev(netdev);
eth_port = nfp_port_get_eth_port(port);
if (eth_port) {
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
cmd->base.autoneg = eth_port->aneg != NFP_ANEG_DISABLED ?
AUTONEG_ENABLE : AUTONEG_DISABLE;
nfp_net_set_fec_link_mode(eth_port, cmd);
@@ -494,7 +494,7 @@
unsigned int start;
do {
- start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
+ start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].rx_sync);
data[0] = nn->r_vecs[i].rx_pkts;
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
@@ -502,10 +502,10 @@
tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
tmp[5] = nn->r_vecs[i].hw_tls_rx;
- } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].rx_sync, start));
do {
- start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
+ start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].tx_sync);
data[1] = nn->r_vecs[i].tx_pkts;
data[2] = nn->r_vecs[i].tx_busy;
tmp[6] = nn->r_vecs[i].hw_csum_tx;
@@ -515,7 +515,7 @@
tmp[10] = nn->r_vecs[i].hw_tls_tx;
tmp[11] = nn->r_vecs[i].tls_tx_fallback;
tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
- } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].tx_sync, start));
data += NN_RVEC_PER_Q_STATS;
@@ -1225,6 +1225,11 @@
u8 data;
port = nfp_port_from_netdev(netdev);
+ if (!port)
+ return -EOPNOTSUPP;
+
+ /* update port state to get latest interface */
+ set_bit(NFP_PORT_CHANGED, &port->flags);
eth_port = nfp_port_get_eth_port(port);
if (!eth_port)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index a6861df..9c48fd8 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -899,6 +899,7 @@
err_rx_irq:
free_irq(priv->tx_irq, ndev);
err_tx_irq:
+ napi_disable(&priv->napi);
phy_stop(phy);
phy_disconnect(phy);
tasklet_kill(&priv->dma_err_tasklet);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 2942102..bde32f0 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1166,6 +1166,7 @@
buffer_info->dma = 0;
buffer_info->time_stamp = 0;
tx_ring->next_to_use = ring_num;
+ dev_kfree_skb_any(skb);
return;
}
buffer_info->mapped = true;
@@ -2481,6 +2482,7 @@
unregister_netdev(netdev);
pch_gbe_phy_hw_reset(&adapter->hw);
+ pci_dev_put(adapter->ptp_pdev);
free_netdev(netdev);
}
@@ -2562,7 +2564,7 @@
/* setup the private structure */
ret = pch_gbe_sw_init(adapter);
if (ret)
- goto err_free_netdev;
+ goto err_put_dev;
/* Initialize PHY */
ret = pch_gbe_init_phy(adapter);
@@ -2620,6 +2622,8 @@
err_free_adapter:
pch_gbe_phy_hw_reset(&adapter->hw);
+err_put_dev:
+ pci_dev_put(adapter->ptp_pdev);
err_free_netdev:
free_netdev(netdev);
return ret;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index b0d8499..31fbe89 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -251,7 +251,7 @@
err = ionic_map_bars(ionic);
if (err)
- goto err_out_pci_disable_device;
+ goto err_out_pci_release_regions;
/* Configure the device */
err = ionic_setup(ionic);
@@ -353,6 +353,7 @@
err_out_unmap_bars:
ionic_unmap_bars(ionic);
+err_out_pci_release_regions:
pci_release_regions(pdev);
err_out_pci_disable_device:
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index e95c09d..cb12d01 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1286,7 +1286,7 @@
if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
- if ((vlan_flags & features) &&
+ if ((vlan_flags & le64_to_cpu(ctx.cmd.lif_setattr.features)) &&
!(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
@@ -2383,11 +2383,15 @@
* than the full array, but leave the qcq shells in place
*/
for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
- lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
- ionic_qcq_free(lif, lif->txqcqs[i]);
+ if (lif->txqcqs && lif->txqcqs[i]) {
+ lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_qcq_free(lif, lif->txqcqs[i]);
+ }
- lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
- ionic_qcq_free(lif, lif->rxqcqs[i]);
+ if (lif->rxqcqs && lif->rxqcqs[i]) {
+ lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_qcq_free(lif, lif->rxqcqs[i]);
+ }
}
return err;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index d355676..00b6985 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -311,10 +311,10 @@
static void ionic_dev_cmd_clean(struct ionic *ionic)
{
- union __iomem ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs;
+ struct ionic_dev *idev = &ionic->idev;
- iowrite32(0, ®s->doorbell);
- memset_io(®s->cmd, 0, sizeof(regs->cmd));
+ iowrite32(0, &idev->dev_cmd_regs->doorbell);
+ memset_io(&idev->dev_cmd_regs->cmd, 0, sizeof(idev->dev_cmd_regs->cmd));
}
int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
@@ -378,8 +378,8 @@
ionic_opcode_to_str(opcode), opcode,
ionic_error_to_str(err), err);
- msleep(1000);
iowrite32(0, &idev->dev_cmd_regs->done);
+ msleep(1000);
iowrite32(1, &idev->dev_cmd_regs->doorbell);
goto try_again;
}
@@ -392,6 +392,8 @@
return ionic_error_to_errno(err);
}
+ ionic_dev_cmd_clean(ionic);
+
return 0;
}
@@ -567,8 +569,14 @@
static int __init ionic_init_module(void)
{
+ int ret;
+
ionic_debugfs_create();
- return ionic_bus_register_driver();
+ ret = ionic_bus_register_driver();
+ if (ret)
+ ionic_debugfs_destroy();
+
+ return ret;
}
static void __exit ionic_cleanup_module(void)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index ef0ad4c..3541bc9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -2982,12 +2982,16 @@
u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
struct qed_filter_accept_flags *flags = ¶ms->accept_flags;
struct qed_public_vf_info *vf_info;
+ u16 tlv_mask;
+
+ tlv_mask = BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM) |
+ BIT(QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN);
/* Untrusted VFs can't even be trusted to know that fact.
* Simply indicate everything is configured fine, and trace
* configuration 'behind their back'.
*/
- if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM)))
+ if (!(*tlvs & tlv_mask))
return 0;
vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
@@ -3004,6 +3008,13 @@
flags->tx_accept_filter &= ~mask;
}
+ if (params->update_accept_any_vlan_flg) {
+ vf_info->accept_any_vlan = params->accept_any_vlan;
+
+ if (vf_info->forced_vlan && !vf_info->is_trusted_configured)
+ params->accept_any_vlan = false;
+ }
+
return 0;
}
@@ -4691,6 +4702,7 @@
tx_rate = vf_info->tx_rate;
ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
+ ivi->trusted = vf_info->is_trusted_request;
return 0;
}
@@ -5120,6 +5132,12 @@
params.update_ctl_frame_check = 1;
params.mac_chk_en = !vf_info->is_trusted_configured;
+ params.update_accept_any_vlan_flg = 0;
+
+ if (vf_info->accept_any_vlan && vf_info->forced_vlan) {
+ params.update_accept_any_vlan_flg = 1;
+ params.accept_any_vlan = vf_info->accept_any_vlan;
+ }
if (vf_info->rx_accept_mode & mask) {
flags->update_rx_mode_config = 1;
@@ -5135,13 +5153,20 @@
if (!vf_info->is_trusted_configured) {
flags->rx_accept_filter &= ~mask;
flags->tx_accept_filter &= ~mask;
+ params.accept_any_vlan = false;
}
if (flags->update_rx_mode_config ||
flags->update_tx_mode_config ||
- params.update_ctl_frame_check)
+ params.update_ctl_frame_check ||
+ params.update_accept_any_vlan_flg) {
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "vport update config for %s VF[abs 0x%x rel 0x%x]\n",
+ vf_info->is_trusted_configured ? "trusted" : "untrusted",
+ vf->abs_vf_id, vf->relative_vf_id);
qed_sp_vport_update(hwfn, ¶ms,
QED_SPQ_MODE_EBLOCK, NULL);
+ }
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index eacd645..7ff23ef 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -62,6 +62,7 @@
bool is_trusted_request;
u8 rx_accept_mode;
u8 tx_accept_mode;
+ bool accept_any_vlan;
};
struct qed_iov_vf_init_params {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 21c9062..d210632 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -752,6 +752,9 @@
buf = page_address(bd->data) + bd->page_offset;
skb = build_skb(buf, rxq->rx_buf_seg_size);
+ if (unlikely(!skb))
+ return NULL;
+
skb_reserve(skb, pad);
skb_put(skb, len);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index c9f32fc..99fd35a 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -2475,6 +2475,7 @@
skb_shinfo(skb)->nr_frags);
if (tx_cb->seg_count == -1) {
netdev_err(ndev, "%s: invalid segment count!\n", __func__);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -3628,7 +3629,8 @@
qdev->mem_map_registers;
unsigned long hw_flags;
- if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
+ if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) ||
+ test_bit(QL_RESET_START, &qdev->flags)) {
clear_bit(QL_LINK_MASTER, &qdev->flags);
/*
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
index 5d79ee4..7519773 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -51,7 +51,7 @@
if (dcb && dcb->ops->get_hw_capability)
return dcb->ops->get_hw_capability(dcb);
- return 0;
+ return -EOPNOTSUPP;
}
static inline void qlcnic_dcb_free(struct qlcnic_dcb *dcb)
@@ -65,7 +65,7 @@
if (dcb && dcb->ops->attach)
return dcb->ops->attach(dcb);
- return 0;
+ return -EOPNOTSUPP;
}
static inline int
@@ -74,7 +74,7 @@
if (dcb && dcb->ops->query_hw_capability)
return dcb->ops->query_hw_capability(dcb, buf);
- return 0;
+ return -EOPNOTSUPP;
}
static inline void qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
@@ -89,7 +89,7 @@
if (dcb && dcb->ops->query_cee_param)
return dcb->ops->query_cee_param(dcb, buf, type);
- return 0;
+ return -EOPNOTSUPP;
}
static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
@@ -97,7 +97,7 @@
if (dcb && dcb->ops->get_cee_cfg)
return dcb->ops->get_cee_cfg(dcb);
- return 0;
+ return -EOPNOTSUPP;
}
static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg)
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 5eac3f4..c025dad 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4183,7 +4183,6 @@
static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
struct sk_buff *skb, u32 *opts)
{
- u32 transport_offset = (u32)skb_transport_offset(skb);
struct skb_shared_info *shinfo = skb_shinfo(skb);
u32 mss = shinfo->gso_size;
@@ -4200,7 +4199,7 @@
WARN_ON_ONCE(1);
}
- opts[0] |= transport_offset << GTTCPHO_SHIFT;
+ opts[0] |= skb_transport_offset(skb) << GTTCPHO_SHIFT;
opts[1] |= mss << TD1_MSS_SHIFT;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 ip_protocol;
@@ -4228,7 +4227,7 @@
else
WARN_ON_ONCE(1);
- opts[1] |= transport_offset << TCPHO_SHIFT;
+ opts[1] |= skb_transport_offset(skb) << TCPHO_SHIFT;
} else {
unsigned int padto = rtl_quirk_packet_padto(tp, skb);
@@ -4401,14 +4400,13 @@
struct net_device *dev,
netdev_features_t features)
{
- int transport_offset = skb_transport_offset(skb);
struct rtl8169_private *tp = netdev_priv(dev);
if (skb_is_gso(skb)) {
if (tp->mac_version == RTL_GIGA_MAC_VER_34)
features = rtl8168evl_fix_tso(skb, features);
- if (transport_offset > GTTCPHO_MAX &&
+ if (skb_transport_offset(skb) > GTTCPHO_MAX &&
rtl_chip_supports_csum_v2(tp))
features &= ~NETIF_F_ALL_TSO;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -4419,7 +4417,7 @@
if (rtl_quirk_packet_padto(tp, skb))
features &= ~NETIF_F_CSUM_MASK;
- if (transport_offset > TCPHO_MAX &&
+ if (skb_transport_offset(skb) > TCPHO_MAX &&
rtl_chip_supports_csum_v2(tp))
features &= ~NETIF_F_CSUM_MASK;
}
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 8157666..e4d919d 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1273,7 +1273,7 @@
bool removing;
int err = 0;
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 4fa72b5..eb1be73 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1916,7 +1916,10 @@
efx_update_sw_stats(efx, stats);
out:
+ /* releasing a DMA coherent buffer with BH disabled can panic */
+ spin_unlock_bh(&efx->stats_lock);
efx_nic_free_buffer(efx, &stats_buf);
+ spin_lock_bh(&efx->stats_lock);
return rc;
}
@@ -2240,7 +2243,7 @@
* guaranteed to satisfy the second as we only attempt TSO if
* inner_network_header <= 208.
*/
- ip_tot_len = -EFX_TSO2_MAX_HDRLEN;
+ ip_tot_len = 0x10000 - EFX_TSO2_MAX_HDRLEN;
EFX_WARN_ON_ONCE_PARANOID(mss + EFX_TSO2_MAX_HDRLEN +
(tcp->doff << 2u) > ip_tot_len);
@@ -3252,6 +3255,30 @@
bool was_enabled = efx->port_enabled;
int rc;
+#ifdef CONFIG_SFC_SRIOV
+ /* If this function is a VF and we have access to the parent PF,
+ * then use the PF control path to attempt to change the VF MAC address.
+ */
+ if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
+ struct efx_nic *efx_pf = pci_get_drvdata(efx->pci_dev->physfn);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u8 mac[ETH_ALEN];
+
+ /* net_dev->dev_addr can be zeroed by efx_net_stop in
+ * efx_ef10_sriov_set_vf_mac, so pass in a copy.
+ */
+ ether_addr_copy(mac, efx->net_dev->dev_addr);
+
+ rc = efx_ef10_sriov_set_vf_mac(efx_pf, nic_data->vf_index, mac);
+ if (!rc)
+ return 0;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "Updating VF mac via PF failed (%d), setting directly\n",
+ rc);
+ }
+#endif
+
efx_device_detach_sync(efx);
efx_net_stop(efx->net_dev);
@@ -3274,40 +3301,6 @@
efx_net_open(efx->net_dev);
efx_device_attach_if_not_resetting(efx);
-#ifdef CONFIG_SFC_SRIOV
- if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
-
- if (rc == -EPERM) {
- struct efx_nic *efx_pf;
-
- /* Switch to PF and change MAC address on vport */
- efx_pf = pci_get_drvdata(pci_dev_pf);
-
- rc = efx_ef10_sriov_set_vf_mac(efx_pf,
- nic_data->vf_index,
- efx->net_dev->dev_addr);
- } else if (!rc) {
- struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
- struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
- unsigned int i;
-
- /* MAC address successfully changed by VF (with MAC
- * spoofing) so update the parent PF if possible.
- */
- for (i = 0; i < efx_pf->vf_count; ++i) {
- struct ef10_vf *vf = nic_data->vf + i;
-
- if (vf->efx == efx) {
- ether_addr_copy(vf->mac,
- efx->net_dev->dev_addr);
- return 0;
- }
- }
- }
- } else
-#endif
if (rc == -EPERM) {
netif_err(efx, drv, efx->net_dev,
"Cannot change MAC address; use sfboot to enable"
@@ -3563,6 +3556,11 @@
n_parts++;
}
+ if (!n_parts) {
+ kfree(parts);
+ return 0;
+ }
+
rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
fail:
if (rc)
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index 67fe44d..63a44ee 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -200,6 +200,7 @@
skb->len, skb->data_len, channel->channel);
if (!efx->n_channels || !efx->n_tx_channels || !channel) {
netif_stop_queue(net_dev);
+ dev_kfree_skb_any(skb);
goto err;
}
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 84041cd..b44acb6 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -411,8 +411,9 @@
static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
{
struct pci_dev *dev = efx->pci_dev;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
unsigned int vfs_assigned = pci_vfs_assigned(dev);
- int rc = 0;
+ int i, rc = 0;
if (vfs_assigned && !force) {
netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
@@ -420,10 +421,13 @@
return -EBUSY;
}
- if (!vfs_assigned)
+ if (!vfs_assigned) {
+ for (i = 0; i < efx->vf_count; i++)
+ nic_data->vf[i].pci_dev = NULL;
pci_disable_sriov(dev);
- else
+ } else {
rc = -EBUSY;
+ }
efx_ef10_sriov_free_vf_vswitching(efx);
efx->vf_count = 0;
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 0a8799a..c49168b 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -287,6 +287,7 @@
efx->n_channels = 1;
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
+ efx->tx_channel_offset = 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
rc = pci_enable_msi(efx->pci_dev);
@@ -307,6 +308,7 @@
efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
+ efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
efx->legacy_irq = efx->pci_dev->irq;
@@ -744,7 +746,9 @@
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
{
- struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
+ struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel,
+ *ptp_channel = efx_ptp_channel(efx);
+ struct efx_ptp_data *ptp_data = efx->ptp_data;
unsigned int i, next_buffer_table = 0;
u32 old_rxq_entries, old_txq_entries;
int rc, rc2;
@@ -797,11 +801,8 @@
old_txq_entries = efx->txq_entries;
efx->rxq_entries = rxq_entries;
efx->txq_entries = txq_entries;
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- efx->channel[i] = other_channel[i];
- other_channel[i] = channel;
- }
+ for (i = 0; i < efx->n_channels; i++)
+ swap(efx->channel[i], other_channel[i]);
/* Restart buffer table allocation */
efx->next_buffer_table = next_buffer_table;
@@ -817,6 +818,7 @@
}
out:
+ efx->ptp_data = NULL;
/* Destroy unused channel structures */
for (i = 0; i < efx->n_channels; i++) {
channel = other_channel[i];
@@ -827,6 +829,7 @@
}
}
+ efx->ptp_data = ptp_data;
rc2 = efx_soft_enable_interrupts(efx);
if (rc2) {
rc = rc ? rc : rc2;
@@ -843,11 +846,9 @@
/* Swap back */
efx->rxq_entries = old_rxq_entries;
efx->txq_entries = old_txq_entries;
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- efx->channel[i] = other_channel[i];
- other_channel[i] = channel;
- }
+ for (i = 0; i < efx->n_channels; i++)
+ swap(efx->channel[i], other_channel[i]);
+ efx_ptp_update_channel(efx, ptp_channel);
goto out;
}
@@ -859,10 +860,6 @@
int xdp_queue_number;
int rc;
- efx->tx_channel_offset =
- efx_separate_tx_channels ?
- efx->n_channels - efx->n_tx_channels : 0;
-
if (efx->xdp_tx_queue_count) {
EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 40b2af8..2ac3c8f 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -157,7 +157,8 @@
u32 flags:6;
u32 dmaq_id:12;
u32 rss_context;
- __be16 outer_vid __aligned(4); /* allow jhash2() of match values */
+ u32 vport_id;
+ __be16 outer_vid;
__be16 inner_vid;
u8 loc_mac[ETH_ALEN];
u8 rem_mac[ETH_ALEN];
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 9f7dfdf..8aecb4b 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1522,7 +1522,7 @@
static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
{
- return true;
+ return channel && channel->channel >= channel->efx->tx_channel_offset;
}
static inline unsigned int efx_channel_num_tx_queues(struct efx_channel *channel)
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 797e518..a2b4e3b 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -45,6 +45,7 @@
#include "farch_regs.h"
#include "tx.h"
#include "nic.h" /* indirectly includes ptp.h */
+#include "efx_channels.h"
/* Maximum number of events expected to make up a PTP event */
#define MAX_EVENT_FRAGS 3
@@ -541,6 +542,12 @@
return efx->ptp_data ? efx->ptp_data->channel : NULL;
}
+void efx_ptp_update_channel(struct efx_nic *efx, struct efx_channel *channel)
+{
+ if (efx->ptp_data)
+ efx->ptp_data->channel = channel;
+}
+
static u32 last_sync_timestamp_major(struct efx_nic *efx)
{
struct efx_channel *channel = efx_ptp_channel(efx);
@@ -1093,7 +1100,29 @@
tx_queue = efx_channel_get_tx_queue(ptp_data->channel, type);
if (tx_queue && tx_queue->timestamping) {
+ /* This code invokes normal driver TX code which is always
+ * protected from softirqs when called from generic TX code,
+ * which in turn disables preemption. Look at __dev_queue_xmit
+ * which uses rcu_read_lock_bh disabling preemption for RCU
+ * plus disabling softirqs. We do not need RCU reader
+ * protection here.
+ *
+ * Although it is theoretically safe for current PTP TX/RX code
+ * running without disabling softirqs, there are three good
+ * reasond for doing so:
+ *
+ * 1) The code invoked is mainly implemented for non-PTP
+ * packets and it is always executed with softirqs
+ * disabled.
+ * 2) This being a single PTP packet, better to not
+ * interrupt its processing by softirqs which can lead
+ * to high latencies.
+ * 3) netdev_xmit_more checks preemption is disabled and
+ * triggers a BUG_ON if not.
+ */
+ local_bh_disable();
efx_enqueue_skb(tx_queue, skb);
+ local_bh_enable();
} else {
WARN_ONCE(1, "PTP channel has no timestamped tx queue\n");
dev_kfree_skb_any(skb);
@@ -1443,6 +1472,11 @@
int rc = 0;
unsigned int pos;
+ if (efx->ptp_data) {
+ efx->ptp_data->channel = channel;
+ return 0;
+ }
+
ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL);
efx->ptp_data = ptp;
if (!efx->ptp_data)
@@ -2179,7 +2213,7 @@
.pre_probe = efx_ptp_probe_channel,
.post_remove = efx_ptp_remove_channel,
.get_name = efx_ptp_get_channel_name,
- /* no copy operation; there is no need to reallocate this channel */
+ .copy = efx_copy_channel,
.receive_skb = efx_ptp_rx,
.want_txqs = efx_ptp_want_txqs,
.keep_eventq = false,
diff --git a/drivers/net/ethernet/sfc/ptp.h b/drivers/net/ethernet/sfc/ptp.h
index 9855e8c..7b1ef70 100644
--- a/drivers/net/ethernet/sfc/ptp.h
+++ b/drivers/net/ethernet/sfc/ptp.h
@@ -16,6 +16,7 @@
int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
struct efx_channel *efx_ptp_channel(struct efx_nic *efx);
+void efx_ptp_update_channel(struct efx_nic *efx, struct efx_channel *channel);
void efx_ptp_remove(struct efx_nic *efx);
int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index e423b17..36b46dd 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -166,6 +166,9 @@
struct efx_nic *efx = rx_queue->efx;
int i;
+ if (unlikely(!rx_queue->page_ring))
+ return;
+
/* Unmap and release the pages in the recycle ring. Remove the ring. */
for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
struct page *page = rx_queue->page_ring[i];
@@ -673,17 +676,17 @@
(EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
return false;
- return memcmp(&left->outer_vid, &right->outer_vid,
+ return memcmp(&left->vport_id, &right->vport_id,
sizeof(struct efx_filter_spec) -
- offsetof(struct efx_filter_spec, outer_vid)) == 0;
+ offsetof(struct efx_filter_spec, vport_id)) == 0;
}
u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
{
- BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
- return jhash2((const u32 *)&spec->outer_vid,
+ BUILD_BUG_ON(offsetof(struct efx_filter_spec, vport_id) & 3);
+ return jhash2((const u32 *)&spec->vport_id,
(sizeof(struct efx_filter_spec) -
- offsetof(struct efx_filter_spec, outer_vid)) / 4,
+ offsetof(struct efx_filter_spec, vport_id)) / 4,
0);
}
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 1665529..fcc7de8 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -545,7 +545,7 @@
* previous packets out.
*/
if (!netdev_xmit_more())
- efx_tx_send_pending(tx_queue->channel);
+ efx_tx_send_pending(efx_get_tx_channel(efx, index));
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 51cd7dc..3a7d2ba 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -1513,14 +1513,14 @@
struct net_device *dev = pci_get_drvdata(pdev);
struct epic_private *ep = netdev_priv(dev);
+ unregister_netdev(dev);
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
ep->tx_ring_dma);
dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
ep->rx_ring_dma);
- unregister_netdev(dev);
pci_iounmap(pdev, ep->ioaddr);
- pci_release_regions(pdev);
free_netdev(dev);
+ pci_release_regions(pdev);
pci_disable_device(pdev);
/* pci_power_off(pdev, -1); */
}
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 823d9a7..6c4479c 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2422,7 +2422,7 @@
if (irq == -EPROBE_DEFER) {
retval = -EPROBE_DEFER;
goto out_0;
- } else if (irq <= 0) {
+ } else if (irq < 0) {
pr_warn("Could not allocate irq resource\n");
retval = -ENODEV;
goto out_0;
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index ef3634d..b9acee2 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1958,11 +1958,13 @@
ret = PTR_ERR(priv->phydev);
dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
priv->phydev = NULL;
+ mdiobus_unregister(bus);
return -ENODEV;
}
ret = phy_device_register(priv->phydev);
if (ret) {
+ phy_device_free(priv->phydev);
mdiobus_unregister(bus);
dev_err(priv->dev,
"phy_device_register err(%d)\n", ret);
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
index cd478d2..00f6d34 100644
--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
@@ -57,10 +57,6 @@
#define TSE_PCS_USE_SGMII_ENA BIT(0)
#define TSE_PCS_IF_USE_SGMII 0x03
-#define SGMII_ADAPTER_CTRL_REG 0x00
-#define SGMII_ADAPTER_DISABLE 0x0001
-#define SGMII_ADAPTER_ENABLE 0x0000
-
#define AUTONEGO_LINK_TIMER 20
static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs)
@@ -202,12 +198,8 @@
unsigned int speed)
{
void __iomem *tse_pcs_base = pcs->tse_pcs_base;
- void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
u32 val;
- writew(SGMII_ADAPTER_ENABLE,
- sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
-
pcs->autoneg = phy_dev->autoneg;
if (phy_dev->autoneg == AUTONEG_ENABLE) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
index 442812c..694ac25 100644
--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
@@ -10,6 +10,10 @@
#include <linux/phy.h>
#include <linux/timer.h>
+#define SGMII_ADAPTER_CTRL_REG 0x00
+#define SGMII_ADAPTER_ENABLE 0x0000
+#define SGMII_ADAPTER_DISABLE 0x0001
+
struct tse_pcs {
struct device *dev;
void __iomem *tse_pcs_base;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 2342d49..fd1b0cc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -363,6 +363,7 @@
data->fix_mac_speed = tegra_eqos_fix_speed;
data->init = tegra_eqos_init;
data->bsp_priv = eqos;
+ data->sph_disable = 1;
err = tegra_eqos_init(pdev, eqos);
if (err < 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index a9087da..5406f5a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -243,6 +243,7 @@
plat->has_gmac4 = 1;
plat->force_sf_dma_mode = 0;
plat->tso_en = 1;
+ plat->sph_disable = 1;
plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
@@ -668,6 +669,7 @@
pci_free_irq_vectors(pdev);
+ clk_disable_unprepare(priv->plat->stmmac_clk);
clk_unregister_fixed_rate(priv->plat->stmmac_clk);
pcim_iounmap_regions(pdev, BIT(0));
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 752658e..50ef684 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -261,11 +261,9 @@
if (ret)
return ret;
- devm_add_action_or_reset(dwmac->dev,
- (void(*)(void *))clk_disable_unprepare,
- dwmac->rgmii_tx_clk);
-
- return 0;
+ return devm_add_action_or_reset(dwmac->dev,
+ (void(*)(void *))clk_disable_unprepare,
+ clk);
}
static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index f37b6d5..142bf91 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -18,9 +18,6 @@
#include "altr_tse_pcs.h"
-#define SGMII_ADAPTER_CTRL_REG 0x00
-#define SGMII_ADAPTER_DISABLE 0x0001
-
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2
@@ -62,14 +59,13 @@
{
struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
void __iomem *splitter_base = dwmac->splitter_base;
- void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base;
void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base;
struct device *dev = dwmac->dev;
struct net_device *ndev = dev_get_drvdata(dev);
struct phy_device *phy_dev = ndev->phydev;
u32 val;
- if ((tse_pcs_base) && (sgmii_adapter_base))
+ if (sgmii_adapter_base)
writew(SGMII_ADAPTER_DISABLE,
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
@@ -93,8 +89,11 @@
writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
}
- if (tse_pcs_base && sgmii_adapter_base)
+ if (phy_dev && sgmii_adapter_base) {
+ writew(SGMII_ADAPTER_ENABLE,
+ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed);
+ }
}
static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index cad6588..958bbcf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -895,6 +895,7 @@
ret = mdio_mux_init(priv->device, mdio_mux, mdio_mux_syscon_switch_fn,
&gmac->mux_handle, priv, priv->mii);
+ of_node_put(mdio_mux);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 16c538c..2e71e51 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -215,6 +215,9 @@
if (queue == 0 || queue == 4) {
value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
value |= MTL_RXQ_DMA_Q04MDMACH(chan);
+ } else if (queue > 4) {
+ value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
+ value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
} else {
value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 07b1b83..53efcc9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -68,9 +68,9 @@
writel(value, ioaddr + PTP_TCR);
/* wait for present system time initialize to complete */
- return readl_poll_timeout(ioaddr + PTP_TCR, value,
+ return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value,
!(value & PTP_TCR_TSINIT),
- 10000, 100000);
+ 10, 100000);
}
static int config_addend(void __iomem *ioaddr, u32 addend)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a46c322..41e71a2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -738,19 +738,10 @@
struct timespec64 now;
u32 sec_inc = 0;
u64 temp = 0;
- int ret;
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
- ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
- if (ret < 0) {
- netdev_warn(priv->dev,
- "failed to enable PTP reference clock: %pe\n",
- ERR_PTR(ret));
- return ret;
- }
-
stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
priv->systime_flags = systime_flags;
@@ -2755,6 +2746,14 @@
stmmac_mmc_setup(priv);
+ if (ptp_register) {
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0)
+ netdev_warn(priv->dev,
+ "failed to enable PTP reference clock: %pe\n",
+ ERR_PTR(ret));
+ }
+
ret = stmmac_init_ptp(priv);
if (ret == -EOPNOTSUPP)
netdev_warn(priv->dev, "PTP not supported by HW\n");
@@ -2908,6 +2907,15 @@
goto init_error;
}
+ if (priv->plat->serdes_powerup) {
+ ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: Serdes powerup failed\n",
+ __func__);
+ goto init_error;
+ }
+ }
+
ret = stmmac_hw_setup(dev, true);
if (ret < 0) {
netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
@@ -3023,6 +3031,10 @@
/* Disable the MAC Rx/Tx */
stmmac_mac_set(priv, priv->ioaddr, false);
+ /* Powerdown Serdes if there is */
+ if (priv->plat->serdes_powerdown)
+ priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
+
netif_carrier_off(dev);
stmmac_release_ptp(priv);
@@ -5046,7 +5058,7 @@
dev_info(priv->device, "TSO feature enabled\n");
}
- if (priv->dma_cap.sphen) {
+ if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
ndev->hw_features |= NETIF_F_GRO;
priv->sph = true;
dev_info(priv->device, "SPH feature enabled\n");
@@ -5179,14 +5191,6 @@
goto error_netdev_register;
}
- if (priv->plat->serdes_powerup) {
- ret = priv->plat->serdes_powerup(ndev,
- priv->plat->bsp_priv);
-
- if (ret < 0)
- goto error_serdes_powerup;
- }
-
#ifdef CONFIG_DEBUG_FS
stmmac_init_fs(ndev);
#endif
@@ -5198,8 +5202,6 @@
return ret;
-error_serdes_powerup:
- unregister_netdev(ndev);
error_netdev_register:
phylink_destroy(priv->phylink);
error_phy_setup:
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 272cb47..a7a1227 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -175,7 +175,7 @@
return -ENOMEM;
/* Enable pci device */
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
__func__);
@@ -227,8 +227,6 @@
pcim_iounmap_regions(pdev, BIT(i));
break;
}
-
- pci_disable_device(pdev);
}
static int __maybe_unused stmmac_pci_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3183d88..f70d8d1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -432,8 +432,7 @@
plat->phylink_node = np;
/* Get max speed of operation from device tree */
- if (of_property_read_u32(np, "max-speed", &plat->max_speed))
- plat->max_speed = -1;
+ of_property_read_u32(np, "max-speed", &plat->max_speed);
plat->bus_id = of_alias_get_id(np, "ethernet");
if (plat->bus_id < 0)
@@ -815,7 +814,13 @@
if (ret)
return ret;
- stmmac_init_tstamp_counter(priv, priv->systime_flags);
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0) {
+ netdev_warn(priv->dev,
+ "failed to enable PTP reference clock: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index 0462dcc..dd5c4ef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -1084,8 +1084,9 @@
unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00};
struct tc_cls_u32_offload cls_u32 = { };
struct stmmac_packet_attrs attr = { };
- struct tc_action **actions, *act;
+ struct tc_action **actions;
struct tc_u32_sel *sel;
+ struct tcf_gact *gact;
struct tcf_exts *exts;
int ret, i, nk = 1;
@@ -1104,14 +1105,14 @@
goto cleanup_sel;
}
- actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL);
+ actions = kcalloc(nk, sizeof(*actions), GFP_KERNEL);
if (!actions) {
ret = -ENOMEM;
goto cleanup_exts;
}
- act = kzalloc(nk * sizeof(*act), GFP_KERNEL);
- if (!act) {
+ gact = kcalloc(nk, sizeof(*gact), GFP_KERNEL);
+ if (!gact) {
ret = -ENOMEM;
goto cleanup_actions;
}
@@ -1126,9 +1127,7 @@
exts->nr_actions = nk;
exts->actions = actions;
for (i = 0; i < nk; i++) {
- struct tcf_gact *gact = to_gact(&act[i]);
-
- actions[i] = &act[i];
+ actions[i] = (struct tc_action *)&gact[i];
gact->tcf_action = TC_ACT_SHOT;
}
@@ -1152,7 +1151,7 @@
stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
cleanup_act:
- kfree(act);
+ kfree(gact);
cleanup_actions:
kfree(actions);
cleanup_exts:
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 54b53db..940db4e 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2063,9 +2063,9 @@
skb_reserve(copy_skb, 2);
skb_put(copy_skb, len);
- dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
skb_copy_from_linear_data(skb, copy_skb->data, len);
- dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
/* Reuse original ring buffer. */
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
@@ -3163,7 +3163,7 @@
if (err) {
printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
"aborting.\n");
- goto err_out_iounmap;
+ goto err_out_free_coherent;
}
pci_set_drvdata(pdev, hp);
@@ -3196,6 +3196,10 @@
return 0;
+err_out_free_coherent:
+ dma_free_coherent(hp->dma_dev, PAGE_SIZE,
+ hp->happy_block, hp->hblock_dvma);
+
err_out_iounmap:
iounmap(hp->gregs);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 0805ede..059d68d 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -1716,6 +1716,7 @@
if (IS_ERR(cpts)) {
int ret = PTR_ERR(cpts);
+ of_node_put(node);
if (ret == -EOPNOTSUPP) {
dev_info(dev, "cpts disabled\n");
return 0;
@@ -2064,9 +2065,9 @@
if (!node)
return -ENOENT;
common->port_num = of_get_child_count(node);
+ of_node_put(node);
if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS)
return -ENOENT;
- of_node_put(node);
if (common->port_num != 1)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index b0f00b4..5af0f9f 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -864,6 +864,8 @@
err_cleanup:
if (!cpsw->usage_count) {
+ napi_disable(&cpsw->napi_rx);
+ napi_disable(&cpsw->napi_tx);
cpdma_ctlr_stop(cpsw->dma);
cpsw_destroy_xdp_rxqs(cpsw);
}
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 31cefd6..a1ee205 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1255,8 +1255,10 @@
data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM,
sizeof(struct cpsw_slave_data),
GFP_KERNEL);
- if (!data->slave_data)
+ if (!data->slave_data) {
+ of_node_put(tmp_node);
return -ENOMEM;
+ }
/* Populate all the child nodes here...
*/
@@ -1353,6 +1355,7 @@
err_node_put:
of_node_put(port_np);
+ of_node_put(tmp_node);
return ret;
}
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index c62f474..fcebd24 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1302,12 +1302,15 @@
data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size,
&data->rxdma, GFP_KERNEL);
- if (!data->rxring)
+ if (!data->rxring) {
+ free_irq(data->irq_num, dev);
return -ENOMEM;
+ }
data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size,
&data->txdma, GFP_KERNEL);
if (!data->txring) {
+ free_irq(data->irq_num, dev);
dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring,
data->rxdma);
return -ENOMEM;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 0baf851..3d91baf 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -857,46 +857,53 @@
while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
dma_addr_t phys;
- tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
-
/* Ensure we see complete descriptor update */
dma_rmb();
- phys = desc_get_phys_addr(lp, cur_p);
- dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
- DMA_FROM_DEVICE);
skb = cur_p->skb;
cur_p->skb = NULL;
- length = cur_p->app4 & 0x0000FFFF;
- skb_put(skb, length);
- skb->protocol = eth_type_trans(skb, ndev);
- /*skb_checksum_none_assert(skb);*/
- skb->ip_summed = CHECKSUM_NONE;
+ /* skb could be NULL if a previous pass already received the
+ * packet for this slot in the ring, but failed to refill it
+ * with a newly allocated buffer. In this case, don't try to
+ * receive it again.
+ */
+ if (likely(skb)) {
+ length = cur_p->app4 & 0x0000FFFF;
- /* if we're doing Rx csum offload, set it up */
- if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
- csumstatus = (cur_p->app2 &
- XAE_FULL_CSUM_STATUS_MASK) >> 3;
- if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
- (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ phys = desc_get_phys_addr(lp, cur_p);
+ dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, ndev);
+ /*skb_checksum_none_assert(skb);*/
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* if we're doing Rx csum offload, set it up */
+ if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
+ csumstatus = (cur_p->app2 &
+ XAE_FULL_CSUM_STATUS_MASK) >> 3;
+ if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
+ csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
+ skb->protocol == htons(ETH_P_IP) &&
+ skb->len > 64) {
+ skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
+ skb->ip_summed = CHECKSUM_COMPLETE;
}
- } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
- skb->protocol == htons(ETH_P_IP) &&
- skb->len > 64) {
- skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
- skb->ip_summed = CHECKSUM_COMPLETE;
+
+ netif_rx(skb);
+
+ size += length;
+ packets++;
}
- netif_rx(skb);
-
- size += length;
- packets++;
-
new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
if (!new_skb)
- return;
+ break;
phys = dma_map_single(ndev->dev.parent, new_skb->data,
lp->max_frm_size,
@@ -905,7 +912,7 @@
if (net_ratelimit())
netdev_err(ndev, "RX DMA mapping error\n");
dev_kfree_skb(new_skb);
- return;
+ break;
}
desc_set_phys_addr(lp, phys, cur_p);
@@ -913,6 +920,11 @@
cur_p->status = 0;
cur_p->skb = new_skb;
+ /* Only update tail_p to mark this slot as usable after it has
+ * been successfully refilled.
+ */
+ tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
+
if (++lp->rx_bd_ci >= lp->rx_bd_num)
lp->rx_bd_ci = 0;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
@@ -2048,15 +2060,14 @@
if (ret)
goto cleanup_clk;
- lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
- if (lp->phy_node) {
- ret = axienet_mdio_setup(lp);
- if (ret)
- dev_warn(&pdev->dev,
- "error registering MDIO bus: %d\n", ret);
- }
+ ret = axienet_mdio_setup(lp);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "error registering MDIO bus: %d\n", ret);
+
if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
+ lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (!lp->phy_node) {
dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n");
ret = -EINVAL;
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 4bd44fb..f6ea4a0 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -820,10 +820,10 @@
static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
{
struct mii_bus *bus;
- int rc;
struct resource res;
struct device_node *np = of_get_parent(lp->phy_node);
struct device_node *npp;
+ int rc, ret;
/* Don't register the MDIO bus if the phy_node or its parent node
* can't be found.
@@ -833,8 +833,14 @@
return -ENODEV;
}
npp = of_get_parent(np);
-
- of_address_to_resource(npp, 0, &res);
+ ret = of_address_to_resource(npp, 0, &res);
+ of_node_put(npp);
+ if (ret) {
+ dev_err(dev, "%s resource error!\n",
+ dev->of_node->full_name);
+ of_node_put(np);
+ return ret;
+ }
if (lp->ndev->mem_start != res.start) {
struct phy_device *phydev;
phydev = of_phy_find_device(lp->phy_node);
@@ -843,6 +849,7 @@
"MDIO of the phy is not registered yet\n");
else
put_device(&phydev->mdio.dev);
+ of_node_put(np);
return 0;
}
@@ -855,6 +862,7 @@
bus = mdiobus_alloc();
if (!bus) {
dev_err(dev, "Failed to allocate mdiobus\n");
+ of_node_put(np);
return -ENOMEM;
}
@@ -867,6 +875,7 @@
bus->parent = dev;
rc = of_mdiobus_register(bus, np);
+ of_node_put(np);
if (rc) {
dev_err(dev, "Failed to register mdio bus.\n");
goto err_register;
@@ -923,8 +932,6 @@
xemaclite_disable_interrupts(lp);
if (lp->phy_node) {
- u32 bmcr;
-
lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
xemaclite_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
@@ -935,19 +942,6 @@
/* EmacLite doesn't support giga-bit speeds */
phy_set_max_speed(lp->phy_dev, SPEED_100);
-
- /* Don't advertise 1000BASE-T Full/Half duplex speeds */
- phy_write(lp->phy_dev, MII_CTRL1000, 0);
-
- /* Advertise only 10 and 100mbps full/half duplex speeds */
- phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL |
- ADVERTISE_CSMA);
-
- /* Restart auto negotiation */
- bmcr = phy_read(lp->phy_dev, MII_BMCR);
- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
- phy_write(lp->phy_dev, MII_BMCR, bmcr);
-
phy_start(lp->phy_dev);
}
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 5ddb2db..081939c 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -772,7 +772,8 @@
struct geneve_sock *gs4,
struct flowi4 *fl4,
const struct ip_tunnel_info *info,
- __be16 dport, __be16 sport)
+ __be16 dport, __be16 sport,
+ __u8 *full_tos)
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
@@ -797,6 +798,8 @@
use_cache = false;
}
fl4->flowi4_tos = RT_TOS(tos);
+ if (full_tos)
+ *full_tos = tos;
dst_cache = (struct dst_cache *)&info->dst_cache;
if (use_cache) {
@@ -850,8 +853,7 @@
use_cache = false;
}
- fl6->flowlabel = ip6_make_flowinfo(RT_TOS(prio),
- info->key.label);
+ fl6->flowlabel = ip6_make_flowinfo(prio, info->key.label);
dst_cache = (struct dst_cache *)&info->dst_cache;
if (use_cache) {
dst = dst_cache_get_ip6(dst_cache, &fl6->saddr);
@@ -885,6 +887,7 @@
const struct ip_tunnel_key *key = &info->key;
struct rtable *rt;
struct flowi4 fl4;
+ __u8 full_tos;
__u8 tos, ttl;
__be16 df = 0;
__be16 sport;
@@ -895,7 +898,7 @@
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
- geneve->cfg.info.key.tp_dst, sport);
+ geneve->cfg.info.key.tp_dst, sport, &full_tos);
if (IS_ERR(rt))
return PTR_ERR(rt);
@@ -939,7 +942,7 @@
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
} else {
- tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
+ tos = ip_tunnel_ecn_encap(full_tos, ip_hdr(skb), skb);
if (geneve->cfg.ttl_inherit)
ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb);
else
@@ -1121,7 +1124,7 @@
1, USHRT_MAX, true);
rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
- geneve->cfg.info.key.tp_dst, sport);
+ geneve->cfg.info.key.tp_dst, sport, NULL);
if (IS_ERR(rt))
return PTR_ERR(rt);
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index bd0beb1..83dc1c2 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -311,7 +311,6 @@
{
/* Finish setting up the DEVICE info. */
dev->netdev_ops = &sp_netdev_ops;
- dev->needs_free_netdev = true;
dev->mtu = SIXP_MTU;
dev->hard_header_len = AX25_MAX_HEADER_LEN;
dev->header_ops = &ax25_header_ops;
@@ -674,14 +673,16 @@
*/
netif_stop_queue(sp->dev);
+ unregister_netdev(sp->dev);
+
del_timer_sync(&sp->tx_t);
del_timer_sync(&sp->resync_t);
- /* Free all 6pack frame buffers. */
+ /* Free all 6pack frame buffers after unreg. */
kfree(sp->rbuff);
kfree(sp->xbuff);
- unregister_netdev(sp->dev);
+ free_netdev(sp->dev);
}
/* Perform I/O control on an active 6pack channel. */
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 1ad6085..5c17c92 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -533,7 +533,7 @@
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
- if (!dev_is_ethdev(dev))
+ if (!dev_is_ethdev(dev) && !bpq_get_ax25_dev(dev))
return NOTIFY_DONE;
switch (event) {
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 2201038..b9646b3 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1353,7 +1353,9 @@
rrpriv->fw_running = 0;
+ spin_unlock_irqrestore(&rrpriv->lock, flags);
del_timer_sync(&rrpriv->timer);
+ spin_lock_irqsave(&rrpriv->lock, flags);
writel(0, ®s->TxPi);
writel(0, ®s->IpRxPi);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index a0f338c..3678784 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -977,7 +977,8 @@
u32 vf_alloc;
/* Serial number of the VF to team with */
u32 vf_serial;
-
+ /* completion variable to confirm vf association */
+ struct completion vf_add;
/* Is the current data path through the VF NIC? */
bool data_path_is_vf;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 6a7ab93..d15da82 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1327,6 +1327,10 @@
net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
+
+ if (net_device_ctx->vf_alloc)
+ complete(&net_device_ctx->vf_add);
+
netdev_info(ndev, "VF slot %u %s\n",
net_device_ctx->vf_serial,
net_device_ctx->vf_alloc ? "added" : "removed");
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index e367638..f2020be 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2290,6 +2290,7 @@
{
struct device *parent = vf_netdev->dev.parent;
struct net_device_context *ndev_ctx;
+ struct net_device *ndev;
struct pci_dev *pdev;
u32 serial;
@@ -2316,6 +2317,18 @@
return hv_get_drvdata(ndev_ctx->device_ctx);
}
+ /* Fallback path to check synthetic vf with
+ * help of mac addr
+ */
+ list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
+ ndev = hv_get_drvdata(ndev_ctx->device_ctx);
+ if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) {
+ netdev_notice(vf_netdev,
+ "falling back to mac addr based matching\n");
+ return ndev;
+ }
+ }
+
netdev_notice(vf_netdev,
"no netdev found for vf serial:%u\n", serial);
return NULL;
@@ -2406,6 +2419,11 @@
return NOTIFY_OK;
net_device_ctx->data_path_is_vf = vf_is_up;
+ if (vf_is_up && !net_device_ctx->vf_alloc) {
+ netdev_info(ndev, "Waiting for the VF association from host\n");
+ wait_for_completion(&net_device_ctx->vf_add);
+ }
+
netvsc_switch_datapath(ndev, vf_is_up);
netdev_info(ndev, "Data path switched %s VF: %s\n",
vf_is_up ? "to" : "from", vf_netdev->name);
@@ -2429,6 +2447,7 @@
netvsc_vf_setxdp(vf_netdev, NULL);
+ reinit_completion(&net_device_ctx->vf_add);
netdev_rx_handler_unregister(vf_netdev);
netdev_upper_dev_unlink(vf_netdev, ndev);
RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
@@ -2466,6 +2485,7 @@
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
+ init_completion(&net_device_ctx->vf_add);
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
@@ -2629,7 +2649,10 @@
/* Save the current config info */
ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
-
+ if (!ndev_ctx->saved_netvsc_dev_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
ret = netvsc_detach(net, nvdev);
out:
rtnl_unlock();
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 7db9cbd..07adbee 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1310,10 +1310,11 @@
debugfs_remove_recursive(lp->debugfs_root);
+ ieee802154_unregister_hw(lp->hw);
+
cancel_delayed_work_sync(&lp->work);
destroy_workqueue(lp->wqueue);
- ieee802154_unregister_hw(lp->hw);
mutex_destroy(&lp->bmux);
ieee802154_free_hw(lp->hw);
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index 89c046b..4517517 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -504,6 +504,7 @@
goto err_tx;
if (status & CC2520_STATUS_TX_UNDERFLOW) {
+ rc = -EINVAL;
dev_err(&priv->spi->dev, "cc2520 tx underflow exception\n");
goto err_tx;
}
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 2a65efd..64b12e4 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -1209,9 +1209,10 @@
struct gsi_event *event_done;
struct gsi_event *event;
struct gsi_trans *trans;
+ u32 trans_count = 0;
u32 byte_count = 0;
- u32 old_index;
u32 event_avail;
+ u32 old_index;
trans_info = &channel->trans_info;
@@ -1232,6 +1233,7 @@
do {
trans->len = __le16_to_cpu(event->len);
byte_count += trans->len;
+ trans_count++;
/* Move on to the next event and transaction */
if (--event_avail)
@@ -1243,26 +1245,24 @@
/* We record RX bytes when they are received */
channel->byte_count += byte_count;
- channel->trans_count++;
+ channel->trans_count += trans_count;
}
/* Initialize a ring, including allocating DMA memory for its entries */
static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
{
- size_t size = count * GSI_RING_ELEMENT_SIZE;
+ u32 size = count * GSI_RING_ELEMENT_SIZE;
struct device *dev = gsi->dev;
dma_addr_t addr;
- /* Hardware requires a 2^n ring size, with alignment equal to size */
+ /* Hardware requires a 2^n ring size, with alignment equal to size.
+ * The DMA address returned by dma_alloc_coherent() is guaranteed to
+ * be a power-of-2 number of pages, which satisfies the requirement.
+ */
ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
- if (ring->virt && addr % size) {
- dma_free_coherent(dev, size, ring->virt, addr);
- dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
- size);
- return -EINVAL; /* Not a good error value, but distinct */
- } else if (!ring->virt) {
+ if (!ring->virt)
return -ENOMEM;
- }
+
ring->addr = addr;
ring->count = count;
diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h
index 1785c9d..d58dce4 100644
--- a/drivers/net/ipa/gsi_private.h
+++ b/drivers/net/ipa/gsi_private.h
@@ -14,7 +14,7 @@
struct gsi_ring;
struct gsi_channel;
-#define GSI_RING_ELEMENT_SIZE 16 /* bytes */
+#define GSI_RING_ELEMENT_SIZE 16 /* bytes; must be a power of 2 */
/* Return the entry that follows one provided in a transaction pool */
void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element);
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index 6c3ed5b..70c2b58 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -153,11 +153,10 @@
size = __roundup_pow_of_two(size);
total_size = (count + max_alloc - 1) * size;
- /* The allocator will give us a power-of-2 number of pages. But we
- * can't guarantee that, so request it. That way we won't waste any
- * memory that would be available beyond the required space.
- *
- * Note that gsi_trans_pool_exit_dma() assumes the total allocated
+ /* The allocator will give us a power-of-2 number of pages
+ * sufficient to satisfy our request. Round up our requested
+ * size to avoid any unused space in the allocation. This way
+ * gsi_trans_pool_exit_dma() can assume the total allocated
* size is exactly (count * size).
*/
total_size = get_order(total_size) << PAGE_SHIFT;
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index a47378b..dc94ce0 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -154,7 +154,7 @@
* of entries, as and IPv4 and IPv6 route tables have the same number
* of entries.
*/
-#define TABLE_SIZE (TABLE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE)
+#define TABLE_SIZE (TABLE_COUNT_MAX * sizeof(__le64))
#define TABLE_COUNT_MAX max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX)
BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK));
BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index 7fc1058..ba05e26 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -72,8 +72,8 @@
* that can be included in a single transaction.
*/
struct gsi_channel_data {
- u16 tre_count;
- u16 event_count;
+ u16 tre_count; /* must be a power of 2 */
+ u16 event_count; /* must be a power of 2 */
u8 tlv_count;
};
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 621648c..7e12084 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -610,12 +610,14 @@
if (endpoint->data->aggregation) {
if (!endpoint->toward_ipa) {
+ u32 buffer_size;
u32 limit;
val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
- limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
+ buffer_size = IPA_RX_BUFFER_SIZE - NET_SKB_PAD;
+ limit = ipa_aggr_size_kb(buffer_size);
val |= u32_encode_bits(limit, AGGR_BYTE_LIMIT_FMASK);
limit = IPA_AGGR_TIME_LIMIT_DEFAULT;
@@ -882,7 +884,7 @@
err_trans_free:
gsi_trans_free(trans);
err_free_pages:
- __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
+ put_page(page);
return -ENOMEM;
}
@@ -1177,7 +1179,7 @@
struct page *page = trans->data;
if (page)
- __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
+ put_page(page);
}
}
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index a78d660..25a8d02 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -414,7 +414,7 @@
}
/* Align the address down and the size up to a page boundary */
- addr = qcom_smem_virt_to_phys(virt) & PAGE_MASK;
+ addr = qcom_smem_virt_to_phys(virt);
phys = addr & PAGE_MASK;
size = PAGE_ALIGN(size + addr - phys);
iova = phys; /* We just want a direct mapping */
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index 1a87a49..880ec35 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -308,12 +308,12 @@
mem = &ipa->mem[IPA_MEM_V4_ROUTE];
req.v4_route_tbl_info_valid = 1;
req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
- req.v4_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE;
+ req.v4_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
mem = &ipa->mem[IPA_MEM_V6_ROUTE];
req.v6_route_tbl_info_valid = 1;
req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
- req.v6_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE;
+ req.v6_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
mem = &ipa->mem[IPA_MEM_V4_FILTER];
req.v4_filter_tbl_start_valid = 1;
@@ -352,8 +352,7 @@
req.v4_hash_route_tbl_info_valid = 1;
req.v4_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
- req.v4_hash_route_tbl_info.count =
- mem->size / IPA_TABLE_ENTRY_SIZE;
+ req.v4_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
}
mem = &ipa->mem[IPA_MEM_V6_ROUTE_HASHED];
@@ -361,8 +360,7 @@
req.v6_hash_route_tbl_info_valid = 1;
req.v6_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
- req.v6_hash_route_tbl_info.count =
- mem->size / IPA_TABLE_ENTRY_SIZE;
+ req.v6_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
}
mem = &ipa->mem[IPA_MEM_V4_FILTER_HASHED];
diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
index 7341337..ecf9f86 100644
--- a/drivers/net/ipa/ipa_qmi_msg.c
+++ b/drivers/net/ipa/ipa_qmi_msg.c
@@ -271,7 +271,7 @@
.tlv_type = 0x12,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -292,7 +292,7 @@
.tlv_type = 0x13,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -456,7 +456,7 @@
.tlv_type = 0x1b,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_hash_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -477,7 +477,7 @@
.tlv_type = 0x1c,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_hash_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
index cfac456..58de425 100644
--- a/drivers/net/ipa/ipa_qmi_msg.h
+++ b/drivers/net/ipa/ipa_qmi_msg.h
@@ -82,9 +82,11 @@
IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 5, /* QNX MSM */
};
-/* This defines the start and end offset of a range of memory. Both
- * fields are offsets relative to the start of IPA shared memory.
- * The end value is the last addressable byte *within* the range.
+/* This defines the start and end offset of a range of memory. The start
+ * value is a byte offset relative to the start of IPA shared memory. The
+ * end value is the last addressable unit *within* the range. Typically
+ * the end value is in units of bytes, however it can also be a maximum
+ * array index value.
*/
struct ipa_mem_bounds {
u32 start;
@@ -125,18 +127,19 @@
u8 hdr_tbl_info_valid;
struct ipa_mem_bounds hdr_tbl_info;
- /* Routing table information. These define the location and size of
- * non-hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ /* Routing table information. These define the location and maximum
+ * *index* (not byte) for the modem portion of non-hashable IPv4 and
+ * IPv6 routing tables. The start values are byte offsets relative
+ * to the start of IPA shared memory.
*/
u8 v4_route_tbl_info_valid;
- struct ipa_mem_array v4_route_tbl_info;
+ struct ipa_mem_bounds v4_route_tbl_info;
u8 v6_route_tbl_info_valid;
- struct ipa_mem_array v6_route_tbl_info;
+ struct ipa_mem_bounds v6_route_tbl_info;
/* Filter table information. These define the location of the
* non-hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ * byte offsets relative to the start of IPA shared memory.
*/
u8 v4_filter_tbl_start_valid;
u32 v4_filter_tbl_start;
@@ -177,18 +180,20 @@
u8 zip_tbl_info_valid;
struct ipa_mem_bounds zip_tbl_info;
- /* Routing table information. These define the location and size
- * of hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ /* Routing table information. These define the location and maximum
+ * *index* (not byte) for the modem portion of hashable IPv4 and IPv6
+ * routing tables (if supported by hardware). The start values are
+ * byte offsets relative to the start of IPA shared memory.
*/
u8 v4_hash_route_tbl_info_valid;
- struct ipa_mem_array v4_hash_route_tbl_info;
+ struct ipa_mem_bounds v4_hash_route_tbl_info;
u8 v6_hash_route_tbl_info_valid;
- struct ipa_mem_array v6_hash_route_tbl_info;
+ struct ipa_mem_bounds v6_hash_route_tbl_info;
/* Filter table information. These define the location and size
- * of hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ * of hashable IPv4 and IPv6 filter tables (if supported by hardware).
+ * The start values are byte offsets relative to the start of IPA
+ * shared memory.
*/
u8 v4_hash_filter_tbl_start_valid;
u32 v4_hash_filter_tbl_start;
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 0747866..02c1928 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -27,28 +27,38 @@
/**
* DOC: IPA Filter and Route Tables
*
- * The IPA has tables defined in its local shared memory that define filter
- * and routing rules. Each entry in these tables contains a 64-bit DMA
- * address that refers to DRAM (system memory) containing a rule definition.
+ * The IPA has tables defined in its local (IPA-resident) memory that define
+ * filter and routing rules. An entry in either of these tables is a little
+ * endian 64-bit "slot" that holds the address of a rule definition. (The
+ * size of these slots is 64 bits regardless of the host DMA address size.)
+ *
+ * Separate tables (both filter and route) used for IPv4 and IPv6. There
+ * are normally another set of "hashed" filter and route tables, which are
+ * used with a hash of message metadata. Hashed operation is not supported
+ * by all IPA hardware (IPA v4.2 doesn't support hashed tables).
+ *
+ * Rules can be in local memory or in DRAM (system memory). The offset of
+ * an object (such as a route or filter table) in IPA-resident memory must
+ * 128-byte aligned. An object in system memory (such as a route or filter
+ * rule) must be at an 8-byte aligned address. We currently only place
+ * route or filter rules in system memory.
+ *
* A rule consists of a contiguous block of 32-bit values terminated with
* 32 zero bits. A special "zero entry" rule consisting of 64 zero bits
* represents "no filtering" or "no routing," and is the reset value for
- * filter or route table rules. Separate tables (both filter and route)
- * used for IPv4 and IPv6. Additionally, there can be hashed filter or
- * route tables, which are used when a hash of message metadata matches.
- * Hashed operation is not supported by all IPA hardware.
+ * filter or route table rules.
*
* Each filter rule is associated with an AP or modem TX endpoint, though
- * not all TX endpoints support filtering. The first 64-bit entry in a
+ * not all TX endpoints support filtering. The first 64-bit slot in a
* filter table is a bitmap indicating which endpoints have entries in
* the table. The low-order bit (bit 0) in this bitmap represents a
* special global filter, which applies to all traffic. This is not
* used in the current code. Bit 1, if set, indicates that there is an
- * entry (i.e. a DMA address referring to a rule) for endpoint 0 in the
- * table. Bit 2, if set, indicates there is an entry for endpoint 1,
- * and so on. Space is set aside in IPA local memory to hold as many
- * filter table entries as might be required, but typically they are not
- * all used.
+ * entry (i.e. slot containing a system address referring to a rule) for
+ * endpoint 0 in the table. Bit 3, if set, indicates there is an entry
+ * for endpoint 2, and so on. Space is set aside in IPA local memory to
+ * hold as many filter table entries as might be required, but typically
+ * they are not all used.
*
* The AP initializes all entries in a filter table to refer to a "zero"
* entry. Once initialized the modem and AP update the entries for
@@ -96,13 +106,8 @@
* ----------------------
*/
-/* IPA hardware constrains filter and route tables alignment */
-#define IPA_TABLE_ALIGN 128 /* Minimum table alignment */
-
/* Assignment of route table entries to the modem and AP */
#define IPA_ROUTE_MODEM_MIN 0
-#define IPA_ROUTE_MODEM_COUNT 8
-
#define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT
#define IPA_ROUTE_AP_COUNT \
(IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
@@ -118,21 +123,14 @@
/* Check things that can be validated at build time. */
static void ipa_table_validate_build(void)
{
- /* IPA hardware accesses memory 128 bytes at a time. Addresses
- * referred to by entries in filter and route tables must be
- * aligned on 128-byte byte boundaries. The only rule address
- * ever use is the "zero rule", and it's aligned at the base
- * of a coherent DMA allocation.
+ /* Filter and route tables contain DMA addresses that refer
+ * to filter or route rules. But the size of a table entry
+ * is 64 bits regardless of what the size of an AP DMA address
+ * is. A fixed constant defines the size of an entry, and
+ * code in ipa_table_init() uses a pointer to __le64 to
+ * initialize tables.
*/
- BUILD_BUG_ON(ARCH_DMA_MINALIGN % IPA_TABLE_ALIGN);
-
- /* Filter and route tables contain DMA addresses that refer to
- * filter or route rules. We use a fixed constant to represent
- * the size of either type of table entry. Code in ipa_table_init()
- * uses a pointer to __le64 to initialize table entriews.
- */
- BUILD_BUG_ON(IPA_TABLE_ENTRY_SIZE != sizeof(dma_addr_t));
- BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(__le64));
+ BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(__le64));
/* A "zero rule" is used to represent no filtering or no routing.
* It is a 64-bit block of zeroed memory. Code in ipa_table_init()
@@ -163,7 +161,7 @@
else
mem = hashed ? &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]
: &ipa->mem[IPA_MEM_V4_ROUTE];
- size = IPA_ROUTE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE;
+ size = IPA_ROUTE_COUNT_MAX * sizeof(__le64);
} else {
if (ipv6)
mem = hashed ? &ipa->mem[IPA_MEM_V6_FILTER_HASHED]
@@ -171,7 +169,7 @@
else
mem = hashed ? &ipa->mem[IPA_MEM_V4_FILTER_HASHED]
: &ipa->mem[IPA_MEM_V4_FILTER];
- size = (1 + IPA_FILTER_COUNT_MAX) * IPA_TABLE_ENTRY_SIZE;
+ size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64);
}
if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed))
@@ -270,8 +268,8 @@
if (filter)
first++; /* skip over bitmap */
- offset = mem->offset + first * IPA_TABLE_ENTRY_SIZE;
- size = count * IPA_TABLE_ENTRY_SIZE;
+ offset = mem->offset + first * sizeof(__le64);
+ size = count * sizeof(__le64);
addr = ipa_table_addr(ipa, false, count);
ipa_cmd_dma_shared_mem_add(trans, offset, size, addr, true);
@@ -455,11 +453,11 @@
count = 1 + hweight32(ipa->filter_map);
hash_count = hash_mem->size ? count : 0;
} else {
- count = mem->size / IPA_TABLE_ENTRY_SIZE;
- hash_count = hash_mem->size / IPA_TABLE_ENTRY_SIZE;
+ count = mem->size / sizeof(__le64);
+ hash_count = hash_mem->size / sizeof(__le64);
}
- size = count * IPA_TABLE_ENTRY_SIZE;
- hash_size = hash_count * IPA_TABLE_ENTRY_SIZE;
+ size = count * sizeof(__le64);
+ hash_size = hash_count * sizeof(__le64);
addr = ipa_table_addr(ipa, filter, count);
hash_addr = ipa_table_addr(ipa, filter, hash_count);
@@ -662,7 +660,13 @@
ipa_table_validate_build();
- size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE;
+ /* The IPA hardware requires route and filter table rules to be
+ * aligned on a 128-byte boundary. We put the "zero rule" at the
+ * base of the table area allocated here. The DMA address returned
+ * by dma_alloc_coherent() is guaranteed to be a power-of-2 number
+ * of pages, which satisfies the rule alignment requirement.
+ */
+ size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64);
virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
if (!virt)
return -ENOMEM;
@@ -694,7 +698,7 @@
struct device *dev = &ipa->pdev->dev;
size_t size;
- size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE;
+ size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64);
dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr);
ipa->table_addr = 0;
diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
index 78038d1..35e519c 100644
--- a/drivers/net/ipa/ipa_table.h
+++ b/drivers/net/ipa/ipa_table.h
@@ -10,12 +10,12 @@
struct ipa;
-/* The size of a filter or route table entry */
-#define IPA_TABLE_ENTRY_SIZE sizeof(__le64) /* Holds a physical address */
-
/* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */
#define IPA_FILTER_COUNT_MAX 14
+/* The number of route table entries allotted to the modem */
+#define IPA_ROUTE_MODEM_COUNT 8
+
/* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
#define IPA_ROUTE_COUNT_MAX 15
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 8801d09..a33149e 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -496,7 +496,6 @@
static int ipvlan_process_outbound(struct sk_buff *skb)
{
- struct ethhdr *ethh = eth_hdr(skb);
int ret = NET_XMIT_DROP;
/* The ipvlan is a pseudo-L2 device, so the packets that we receive
@@ -506,6 +505,8 @@
if (skb_mac_header_was_set(skb)) {
/* In this mode we dont care about
* multicast and broadcast traffic */
+ struct ethhdr *ethh = eth_hdr(skb);
+
if (is_multicast_ether_addr(ethh->h_dest)) {
pr_debug_ratelimited(
"Dropped {multi|broad}cast of type=[%x]\n",
@@ -590,7 +591,7 @@
static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
{
const struct ipvl_dev *ipvlan = netdev_priv(dev);
- struct ethhdr *eth = eth_hdr(skb);
+ struct ethhdr *eth = skb_eth_hdr(skb);
struct ipvl_addr *addr;
void *lyr3h;
int addr_type;
@@ -620,6 +621,7 @@
return dev_forward_skb(ipvlan->phy_dev, skb);
} else if (is_multicast_ether_addr(eth->h_dest)) {
+ skb_reset_mac_header(skb);
ipvlan_skb_crossing_ns(skb, NULL);
ipvlan_multicast_enqueue(ipvlan->port, skb, true);
return NET_XMIT_SUCCESS;
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index 1cedb63..f01078b 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -194,7 +194,7 @@
.notifier_call = ipvtap_device_event,
};
-static int ipvtap_init(void)
+static int __init ipvtap_init(void)
{
int err;
@@ -228,7 +228,7 @@
}
module_init(ipvtap_init);
-static void ipvtap_exit(void)
+static void __exit ipvtap_exit(void)
{
rtnl_link_unregister(&ipvtap_link_ops);
unregister_netdevice_notifier(&ipvtap_notifier_block);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 789a124..3e56415 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -240,6 +240,7 @@
#define DEFAULT_SEND_SCI true
#define DEFAULT_ENCRYPT false
#define DEFAULT_ENCODING_SA 0
+#define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))
static bool send_sci(const struct macsec_secy *secy)
{
@@ -1389,7 +1390,8 @@
return NULL;
}
-static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
+static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci,
+ bool active)
{
struct macsec_rx_sc *rx_sc;
struct macsec_dev *macsec;
@@ -1413,7 +1415,7 @@
}
rx_sc->sci = sci;
- rx_sc->active = true;
+ rx_sc->active = active;
refcount_set(&rx_sc->refcnt, 1);
secy = &macsec_priv(dev)->secy;
@@ -1694,7 +1696,7 @@
return false;
if (attrs[MACSEC_SA_ATTR_PN] &&
- *(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0)
+ nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
return false;
if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -1750,7 +1752,8 @@
}
pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
- if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
+ if (tb_sa[MACSEC_SA_ATTR_PN] &&
+ nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
rtnl_unlock();
@@ -1766,7 +1769,7 @@
if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
- MACSEC_SA_ATTR_SALT);
+ MACSEC_SALT_LEN);
rtnl_unlock();
return -EINVAL;
}
@@ -1821,6 +1824,7 @@
secy->key_len);
err = macsec_offload(ops->mdo_add_rxsa, &ctx);
+ memzero_explicit(ctx.sa.key, secy->key_len);
if (err)
goto cleanup;
}
@@ -1839,7 +1843,7 @@
return 0;
cleanup:
- kfree(rx_sa);
+ macsec_rxsa_put(rx_sa);
rtnl_unlock();
return err;
}
@@ -1865,7 +1869,7 @@
struct macsec_rx_sc *rx_sc;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
struct macsec_secy *secy;
- bool was_active;
+ bool active = true;
int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
@@ -1887,16 +1891,15 @@
secy = &macsec_priv(dev)->secy;
sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
- rx_sc = create_rx_sc(dev, sci);
+ if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
+ active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
+
+ rx_sc = create_rx_sc(dev, sci, active);
if (IS_ERR(rx_sc)) {
rtnl_unlock();
return PTR_ERR(rx_sc);
}
- was_active = rx_sc->active;
- if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
- rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
-
if (macsec_is_offloaded(netdev_priv(dev))) {
const struct macsec_ops *ops;
struct macsec_context ctx;
@@ -1920,7 +1923,8 @@
return 0;
cleanup:
- rx_sc->active = was_active;
+ del_rx_sc(secy, sci);
+ free_rx_sc(rx_sc);
rtnl_unlock();
return ret;
}
@@ -1936,7 +1940,7 @@
if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
return false;
- if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
+ if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
return false;
if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -2008,7 +2012,7 @@
if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
- MACSEC_SA_ATTR_SALT);
+ MACSEC_SALT_LEN);
rtnl_unlock();
return -EINVAL;
}
@@ -2063,6 +2067,7 @@
secy->key_len);
err = macsec_offload(ops->mdo_add_txsa, &ctx);
+ memzero_explicit(ctx.sa.key, secy->key_len);
if (err)
goto cleanup;
}
@@ -2082,7 +2087,7 @@
cleanup:
secy->operational = was_operational;
- kfree(tx_sa);
+ macsec_txsa_put(tx_sa);
rtnl_unlock();
return err;
}
@@ -2290,7 +2295,7 @@
if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
return false;
- if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
+ if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
return false;
if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -2559,7 +2564,7 @@
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
int i;
- if (secy->n_rx_sc > 0)
+ if (secy->rx_sc)
return true;
for (i = 0; i < MACSEC_NUM_AN; i++)
@@ -2643,11 +2648,6 @@
if (ret)
goto rollback;
- /* Force features update, since they are different for SW MACSec and
- * HW offloading cases.
- */
- netdev_update_features(dev);
-
rtnl_unlock();
return 0;
@@ -3415,16 +3415,9 @@
return ret;
}
-#define SW_MACSEC_FEATURES \
+#define MACSEC_FEATURES \
(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
-/* If h/w offloading is enabled, use real device features save for
- * VLAN_FEATURES - they require additional ops
- * HW_MACSEC - no reason to report it
- */
-#define REAL_DEV_FEATURES(dev) \
- ((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC))
-
static int macsec_dev_init(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
@@ -3441,12 +3434,8 @@
return err;
}
- if (macsec_is_offloaded(macsec)) {
- dev->features = REAL_DEV_FEATURES(real_dev);
- } else {
- dev->features = real_dev->features & SW_MACSEC_FEATURES;
- dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
- }
+ dev->features = real_dev->features & MACSEC_FEATURES;
+ dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
dev->needed_headroom = real_dev->needed_headroom +
MACSEC_NEEDED_HEADROOM;
@@ -3475,10 +3464,7 @@
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
- if (macsec_is_offloaded(macsec))
- return REAL_DEV_FEATURES(real_dev);
-
- features &= (real_dev->features & SW_MACSEC_FEATURES) |
+ features &= (real_dev->features & MACSEC_FEATURES) |
NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
features |= NETIF_F_LLTX;
@@ -3737,9 +3723,6 @@
secy->operational = tx_sa && tx_sa->active;
}
- if (data[IFLA_MACSEC_WINDOW])
- secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
-
if (data[IFLA_MACSEC_ENCRYPT])
tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
@@ -3785,6 +3768,16 @@
}
}
+ if (data[IFLA_MACSEC_WINDOW]) {
+ secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
+
+ /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window
+ * for XPN cipher suites */
+ if (secy->xpn &&
+ secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW)
+ return -EINVAL;
+ }
+
return 0;
}
@@ -3814,13 +3807,12 @@
ret = macsec_changelink_common(dev, data);
if (ret)
- return ret;
+ goto cleanup;
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) {
const struct macsec_ops *ops;
struct macsec_context ctx;
- int ret;
ops = macsec_get_ops(netdev_priv(dev), &ctx);
if (!ops) {
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index c8d803d..5869bc2 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -139,7 +139,7 @@
u32 idx = macvlan_eth_hash(addr);
struct hlist_head *h = &vlan->port->vlan_source_hash[idx];
- hlist_for_each_entry_rcu(entry, h, hlist) {
+ hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) {
if (ether_addr_equal_64bits(entry->addr, addr) &&
entry->vlan == vlan)
return entry;
@@ -1176,7 +1176,7 @@
{
ether_setup(dev);
- dev->min_mtu = 0;
+ /* ether_setup() has set dev->min_mtu to ETH_MIN_MTU. */
dev->max_mtu = ETH_MAX_MTU;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
netif_keep_dst(dev);
@@ -1509,8 +1509,10 @@
/* the macvlan port may be freed by macvlan_uninit when fail to register.
* so we destroy the macvlan port only when it's valid.
*/
- if (create && macvlan_port_get_rtnl(lowerdev))
+ if (create && macvlan_port_get_rtnl(lowerdev)) {
+ macvlan_flush_sources(port, vlan);
macvlan_port_destroy(port->dev);
+ }
return err;
}
EXPORT_SYMBOL_GPL(macvlan_common_newlink);
@@ -1612,7 +1614,7 @@
struct hlist_head *h = &vlan->port->vlan_source_hash[i];
struct macvlan_source_entry *entry;
- hlist_for_each_entry_rcu(entry, h, hlist) {
+ hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) {
if (entry->vlan != vlan)
continue;
if (nla_put(skb, IFLA_MACVLAN_MACADDR, ETH_ALEN, entry->addr))
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 694e2f5..39801c3 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -133,11 +133,17 @@
dev->tx_queue_len = TUN_READQ_SIZE;
}
+static struct net *macvtap_link_net(const struct net_device *dev)
+{
+ return dev_net(macvlan_dev_real_dev(dev));
+}
+
static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
.kind = "macvtap",
.setup = macvtap_setup,
.newlink = macvtap_newlink,
.dellink = macvtap_dellink,
+ .get_link_net = macvtap_link_net,
.priv_size = sizeof(struct macvtap_dev),
};
diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
index fbd3689..5d171e7 100644
--- a/drivers/net/mdio/mdio-bcm-unimac.c
+++ b/drivers/net/mdio/mdio-bcm-unimac.c
@@ -5,20 +5,18 @@
* Copyright (C) 2014-2017 Broadcom
*/
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
#include <linux/phy.h>
+#include <linux/platform_data/mdio-bcm-unimac.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_mdio.h>
-
-#include <linux/platform_data/mdio-bcm-unimac.h>
#define MDIO_CMD 0x00
#define MDIO_START_BUSY (1 << 29)
diff --git a/drivers/net/mdio/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c
index 5136275..9958819 100644
--- a/drivers/net/mdio/mdio-bitbang.c
+++ b/drivers/net/mdio/mdio-bitbang.c
@@ -14,10 +14,10 @@
* Vitaly Bordug <vbordug@ru.mvista.com>
*/
-#include <linux/module.h>
-#include <linux/mdio-bitbang.h>
-#include <linux/types.h>
#include <linux/delay.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/module.h>
+#include <linux/types.h>
#define MDIO_READ 2
#define MDIO_WRITE 1
diff --git a/drivers/net/mdio/mdio-cavium.c b/drivers/net/mdio/mdio-cavium.c
index 1afd6fc..95ce274 100644
--- a/drivers/net/mdio/mdio-cavium.c
+++ b/drivers/net/mdio/mdio-cavium.c
@@ -4,9 +4,9 @@
*/
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/phy.h>
-#include <linux/io.h>
#include "mdio-cavium.h"
diff --git a/drivers/net/mdio/mdio-gpio.c b/drivers/net/mdio/mdio-gpio.c
index 1b00235..56c8f91 100644
--- a/drivers/net/mdio/mdio-gpio.c
+++ b/drivers/net/mdio/mdio-gpio.c
@@ -17,15 +17,15 @@
* Vitaly Bordug <vbordug@ru.mvista.com>
*/
-#include <linux/module.h>
-#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/mdio-gpio.h>
#include <linux/mdio-bitbang.h>
#include <linux/mdio-gpio.h>
-#include <linux/gpio/consumer.h>
+#include <linux/module.h>
#include <linux/of_mdio.h>
+#include <linux/platform_data/mdio-gpio.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
struct mdio_gpio_info {
struct mdiobb_ctrl ctrl;
diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
index 25c25ea..9cd71d8 100644
--- a/drivers/net/mdio/mdio-ipq4019.c
+++ b/drivers/net/mdio/mdio-ipq4019.c
@@ -3,10 +3,10 @@
/* Copyright (c) 2020 Sartura Ltd. */
#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
diff --git a/drivers/net/mdio/mdio-ipq8064.c b/drivers/net/mdio/mdio-ipq8064.c
index f0a6bfa..49d4e9a 100644
--- a/drivers/net/mdio/mdio-ipq8064.c
+++ b/drivers/net/mdio/mdio-ipq8064.c
@@ -7,12 +7,12 @@
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/regmap.h>
#include <linux/of_mdio.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
-#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
/* MII address register definitions */
#define MII_ADDR_REG_ADDR 0x10
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index 11f583f..037649b 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -6,14 +6,14 @@
* Copyright (c) 2017 Microsemi Corporation
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/phy.h>
-#include <linux/platform_device.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
#define MSCC_MIIM_REG_STATUS 0x0
#define MSCC_MIIM_STATUS_STAT_PENDING BIT(2)
@@ -76,6 +76,9 @@
u32 val;
int ret;
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
ret = mscc_miim_wait_pending(bus);
if (ret)
goto out;
@@ -105,6 +108,9 @@
struct mscc_miim_dev *miim = bus->priv;
int ret;
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
ret = mscc_miim_wait_pending(bus);
if (ret < 0)
goto out;
diff --git a/drivers/net/mdio/mdio-mux-bcm-iproc.c b/drivers/net/mdio/mdio-mux-bcm-iproc.c
index 42fb5f1..641cfa4 100644
--- a/drivers/net/mdio/mdio-mux-bcm-iproc.c
+++ b/drivers/net/mdio/mdio-mux-bcm-iproc.c
@@ -3,14 +3,14 @@
* Copyright 2016 Broadcom
*/
#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/device.h>
-#include <linux/of_mdio.h>
-#include <linux/module.h>
-#include <linux/phy.h>
-#include <linux/mdio-mux.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/iopoll.h>
+#include <linux/mdio-mux.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
#define MDIO_RATE_ADJ_EXT_OFFSET 0x000
#define MDIO_RATE_ADJ_INT_OFFSET 0x004
diff --git a/drivers/net/mdio/mdio-mux-gpio.c b/drivers/net/mdio/mdio-mux-gpio.c
index 10a758f..3c7f16f 100644
--- a/drivers/net/mdio/mdio-mux-gpio.c
+++ b/drivers/net/mdio/mdio-mux-gpio.c
@@ -3,13 +3,13 @@
* Copyright (C) 2011, 2012 Cavium, Inc.
*/
-#include <linux/platform_device.h>
#include <linux/device.h>
-#include <linux/of_mdio.h>
-#include <linux/module.h>
-#include <linux/phy.h>
-#include <linux/mdio-mux.h>
#include <linux/gpio/consumer.h>
+#include <linux/mdio-mux.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
#define DRV_VERSION "1.1"
#define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver"
diff --git a/drivers/net/mdio/mdio-mux-mmioreg.c b/drivers/net/mdio/mdio-mux-mmioreg.c
index d1a8780..c02fb2a 100644
--- a/drivers/net/mdio/mdio-mux-mmioreg.c
+++ b/drivers/net/mdio/mdio-mux-mmioreg.c
@@ -7,13 +7,13 @@
* Copyright 2012 Freescale Semiconductor, Inc.
*/
-#include <linux/platform_device.h>
#include <linux/device.h>
+#include <linux/mdio-mux.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
-#include <linux/module.h>
#include <linux/phy.h>
-#include <linux/mdio-mux.h>
+#include <linux/platform_device.h>
struct mdio_mux_mmioreg_state {
void *mux_handle;
diff --git a/drivers/net/mdio/mdio-mux-multiplexer.c b/drivers/net/mdio/mdio-mux-multiplexer.c
index d656438..527acfc 100644
--- a/drivers/net/mdio/mdio-mux-multiplexer.c
+++ b/drivers/net/mdio/mdio-mux-multiplexer.c
@@ -4,10 +4,10 @@
* Copyright 2019 NXP
*/
-#include <linux/platform_device.h>
#include <linux/mdio-mux.h>
#include <linux/module.h>
#include <linux/mux/consumer.h>
+#include <linux/platform_device.h>
struct mdio_mux_multiplexer_state {
struct mux_control *muxc;
diff --git a/drivers/net/mdio/mdio-mux.c b/drivers/net/mdio/mdio-mux.c
index ccb3ee7..3dde0c2 100644
--- a/drivers/net/mdio/mdio-mux.c
+++ b/drivers/net/mdio/mdio-mux.c
@@ -3,12 +3,12 @@
* Copyright (C) 2011, 2012 Cavium, Inc.
*/
-#include <linux/platform_device.h>
-#include <linux/mdio-mux.h>
-#include <linux/of_mdio.h>
#include <linux/device.h>
+#include <linux/mdio-mux.h>
#include <linux/module.h>
+#include <linux/of_mdio.h>
#include <linux/phy.h>
+#include <linux/platform_device.h>
#define DRV_DESCRIPTION "MDIO bus multiplexer driver"
diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c
index 6faf393..e096e68 100644
--- a/drivers/net/mdio/mdio-octeon.c
+++ b/drivers/net/mdio/mdio-octeon.c
@@ -3,13 +3,13 @@
* Copyright (C) 2009-2015 Cavium, Inc.
*/
-#include <linux/platform_device.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
-#include <linux/module.h>
-#include <linux/gfp.h>
#include <linux/phy.h>
-#include <linux/io.h>
+#include <linux/platform_device.h>
#include "mdio-cavium.h"
diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c
index dd7430c..822d2cd 100644
--- a/drivers/net/mdio/mdio-thunder.c
+++ b/drivers/net/mdio/mdio-thunder.c
@@ -3,14 +3,14 @@
* Copyright (C) 2009-2016 Cavium, Inc.
*/
+#include <linux/acpi.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
-#include <linux/module.h>
-#include <linux/gfp.h>
-#include <linux/phy.h>
-#include <linux/io.h>
-#include <linux/acpi.h>
#include <linux/pci.h>
+#include <linux/phy.h>
#include "mdio-cavium.h"
diff --git a/drivers/net/mdio/mdio-xgene.c b/drivers/net/mdio/mdio-xgene.c
index 461207c..7ab4e26 100644
--- a/drivers/net/mdio/mdio-xgene.c
+++ b/drivers/net/mdio/mdio-xgene.c
@@ -13,11 +13,11 @@
#include <linux/io.h>
#include <linux/mdio/mdio-xgene.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
-#include <linux/of_net.h>
#include <linux/of_mdio.h>
-#include <linux/prefetch.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
#include <linux/phy.h>
+#include <linux/prefetch.h>
#include <net/ip.h>
static bool xgene_mdio_status;
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 4daf94b..5bae47f 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -8,17 +8,17 @@
* out of the OpenFirmware device tree and using it to populate an mii_bus.
*/
-#include <linux/kernel.h>
#include <linux/device.h>
-#include <linux/netdevice.h>
#include <linux/err.h>
-#include <linux/phy.h>
-#include <linux/phy_fixed.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#define DEFAULT_GPIO_RESET_DELAY 10 /* in microseconds */
@@ -332,6 +332,7 @@
return 0;
unregister:
+ of_node_put(child);
mdiobus_unregister(mdio);
return rc;
}
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index a438202..5085426 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -351,10 +351,12 @@
{
struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
- nmap->entry[idx].key = kmalloc(offmap->map.key_size, GFP_USER);
+ nmap->entry[idx].key = kmalloc(offmap->map.key_size,
+ GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (!nmap->entry[idx].key)
return -ENOMEM;
- nmap->entry[idx].value = kmalloc(offmap->map.value_size, GFP_USER);
+ nmap->entry[idx].value = kmalloc(offmap->map.value_size,
+ GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (!nmap->entry[idx].value) {
kfree(nmap->entry[idx].key);
nmap->entry[idx].key = NULL;
@@ -496,7 +498,7 @@
if (offmap->map.map_flags)
return -EINVAL;
- nmap = kzalloc(sizeof(*nmap), GFP_USER);
+ nmap = kzalloc(sizeof(*nmap), GFP_KERNEL_ACCOUNT);
if (!nmap)
return -ENOMEM;
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index ad6dbf0..4fb0638 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -67,10 +67,10 @@
unsigned int start;
do {
- start = u64_stats_fetch_begin(&ns->syncp);
+ start = u64_stats_fetch_begin_irq(&ns->syncp);
stats->tx_bytes = ns->tx_bytes;
stats->tx_packets = ns->tx_packets;
- } while (u64_stats_fetch_retry(&ns->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ns->syncp, start));
}
static int
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 41e7c14..7045595 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -34,6 +34,8 @@
#define MDIO_AN_VEND_PROV 0xc400
#define MDIO_AN_VEND_PROV_1000BASET_FULL BIT(15)
#define MDIO_AN_VEND_PROV_1000BASET_HALF BIT(14)
+#define MDIO_AN_VEND_PROV_5000BASET_FULL BIT(11)
+#define MDIO_AN_VEND_PROV_2500BASET_FULL BIT(10)
#define MDIO_AN_VEND_PROV_DOWNSHIFT_EN BIT(4)
#define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK GENMASK(3, 0)
#define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT 4
@@ -87,6 +89,9 @@
#define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8)
#define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0)
+#define VEND1_GLOBAL_GEN_STAT2 0xc831
+#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15)
+
#define VEND1_GLOBAL_RSVD_STAT1 0xc885
#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4)
#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0)
@@ -121,6 +126,12 @@
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
+/* Sleep and timeout for checking if the Processor-Intensive
+ * MDIO operation is finished
+ */
+#define AQR107_OP_IN_PROG_SLEEP 1000
+#define AQR107_OP_IN_PROG_TIMEOUT 100000
+
struct aqr107_hw_stat {
const char *name;
int reg;
@@ -230,9 +241,20 @@
phydev->advertising))
reg |= MDIO_AN_VEND_PROV_1000BASET_HALF;
+ /* Handle the case when the 2.5G and 5G speeds are not advertised */
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_2500BASET_FULL;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_5000BASET_FULL;
+
ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
MDIO_AN_VEND_PROV_1000BASET_HALF |
- MDIO_AN_VEND_PROV_1000BASET_FULL, reg);
+ MDIO_AN_VEND_PROV_1000BASET_FULL |
+ MDIO_AN_VEND_PROV_2500BASET_FULL |
+ MDIO_AN_VEND_PROV_5000BASET_FULL, reg);
if (ret < 0)
return ret;
if (ret > 0)
@@ -556,16 +578,52 @@
phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
}
+static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
+{
+ int val, err;
+
+ /* The datasheet notes to wait at least 1ms after issuing a
+ * processor intensive operation before checking.
+ * We cannot use the 'sleep_before_read' parameter of read_poll_timeout
+ * because that just determines the maximum time slept, not the minimum.
+ */
+ usleep_range(1000, 5000);
+
+ err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_GEN_STAT2, val,
+ !(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG),
+ AQR107_OP_IN_PROG_SLEEP,
+ AQR107_OP_IN_PROG_TIMEOUT, false);
+ if (err) {
+ phydev_err(phydev, "timeout: processor-intensive MDIO operation\n");
+ return err;
+ }
+
+ return 0;
+}
+
static int aqr107_suspend(struct phy_device *phydev)
{
- return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
- MDIO_CTRL1_LPOWER);
+ int err;
+
+ err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+ if (err)
+ return err;
+
+ return aqr107_wait_processor_intensive_op(phydev);
}
static int aqr107_resume(struct phy_device *phydev)
{
- return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
- MDIO_CTRL1_LPOWER);
+ int err;
+
+ err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+ if (err)
+ return err;
+
+ return aqr107_wait_processor_intensive_op(phydev);
}
static int aqr107_probe(struct phy_device *phydev)
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 6448613..0cde17b 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -11,6 +11,7 @@
*/
#include "bcm-phy-lib.h"
+#include <linux/delay.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/brcmphy.h>
@@ -622,6 +623,26 @@
if (err < 0)
return err;
+ /* The datasheet indicates the PHY needs up to 1us to complete a reset,
+ * build some slack here.
+ */
+ usleep_range(1000, 2000);
+
+ /* The PHY requires 65 MDC clock cycles to complete a write operation
+ * and turnaround the line properly.
+ *
+ * We ignore -EIO here as the MDIO controller (e.g.: mdio-bcm-unimac)
+ * may flag the lack of turn-around as a read failure. This is
+ * particularly true with this combination since the MDIO controller
+ * only used 64 MDC cycles. This is not a critical failure in this
+ * specific case and it has no functional impact otherwise, so we let
+ * that one go through. If there is a genuine bus error, the next read
+ * of MII_BRCM_FET_INTREG will error out.
+ */
+ err = phy_read(phydev, MII_BMCR);
+ if (err < 0 && err != -EIO)
+ return err;
+
reg = phy_read(phydev, MII_BRCM_FET_INTREG);
if (reg < 0)
return reg;
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 3d75b98..db65164 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -243,9 +243,7 @@
if (misr_status < 0)
return misr_status;
- misr_status |= (DP83822_RX_ERR_HF_INT_EN |
- DP83822_FALSE_CARRIER_HF_INT_EN |
- DP83822_LINK_STAT_INT_EN |
+ misr_status |= (DP83822_LINK_STAT_INT_EN |
DP83822_ENERGY_DET_INT_EN |
DP83822_LINK_QUAL_INT_EN);
@@ -270,8 +268,7 @@
DP83822_EEE_ERROR_CHANGE_INT_EN);
if (!dp83822->fx_enabled)
- misr_status |= DP83822_MDI_XOVER_INT_EN |
- DP83822_ANEG_ERR_INT_EN |
+ misr_status |= DP83822_ANEG_ERR_INT_EN |
DP83822_WOL_PKT_INT_EN;
err = phy_write(phydev, MII_DP83822_MISR2, misr_status);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index c716074..c8031e2 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -137,6 +137,7 @@
#define DP83867_DOWNSHIFT_2_COUNT 2
#define DP83867_DOWNSHIFT_4_COUNT 4
#define DP83867_DOWNSHIFT_8_COUNT 8
+#define DP83867_SGMII_AUTONEG_EN BIT(7)
/* CFG3 bits */
#define DP83867_CFG3_INT_OE BIT(7)
@@ -756,6 +757,14 @@
else
val &= ~DP83867_SGMII_TYPE;
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val);
+
+ /* This is a SW workaround for link instability if RX_CTRL is
+ * not strapped to mode 3 or 4 in HW. This is required for SGMII
+ * in addition to clearing bit 7, handled above.
+ */
+ if (dp83867->rxctrl_strap_quirk)
+ phy_set_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
+ BIT(8));
}
val = phy_read(phydev, DP83867_CFG3);
@@ -802,6 +811,32 @@
DP83867_PHYCR_FORCE_LINK_GOOD, 0);
}
+static void dp83867_link_change_notify(struct phy_device *phydev)
+{
+ /* There is a limitation in DP83867 PHY device where SGMII AN is
+ * only triggered once after the device is booted up. Even after the
+ * PHY TPI is down and up again, SGMII AN is not triggered and
+ * hence no new in-band message from PHY to MAC side SGMII.
+ * This could cause an issue during power up, when PHY is up prior
+ * to MAC. At this condition, once MAC side SGMII is up, MAC side
+ * SGMII wouldn`t receive new in-band message from TI PHY with
+ * correct link status, speed and duplex info.
+ * Thus, implemented a SW solution here to retrigger SGMII Auto-Neg
+ * whenever there is a link change.
+ */
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+ int val = 0;
+
+ val = phy_clear_bits(phydev, DP83867_CFG2,
+ DP83867_SGMII_AUTONEG_EN);
+ if (val < 0)
+ return;
+
+ phy_set_bits(phydev, DP83867_CFG2,
+ DP83867_SGMII_AUTONEG_EN);
+ }
+}
+
static struct phy_driver dp83867_driver[] = {
{
.phy_id = DP83867_PHY_ID,
@@ -826,6 +861,8 @@
.suspend = genphy_suspend,
.resume = genphy_resume,
+
+ .link_change_notify = dp83867_link_change_notify,
},
};
module_phy_driver(dp83867_driver);
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index b1bb9b8..2b64318 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -650,7 +650,7 @@
cssr1 = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_CSSR1);
if (cssr1 < 0)
- return val;
+ return cssr1;
/* If the link settings are not resolved, mark the link down */
if (!(cssr1 & MV_PCS_CSSR1_RESOLVED)) {
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index c416ab1..77ba6c3 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -574,7 +574,7 @@
}
for (i = 0; i < PHY_MAX_ADDR; i++) {
- if ((bus->phy_mask & (1 << i)) == 0) {
+ if ((bus->phy_mask & BIT(i)) == 0) {
struct phy_device *phydev;
phydev = mdiobus_scan(bus, i);
@@ -1008,7 +1008,6 @@
return ret;
}
-EXPORT_SYMBOL_GPL(mdio_bus_init);
#if IS_ENABLED(CONFIG_PHYLIB)
void mdio_bus_exit(void)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 92e94ac..bbbe198 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -283,7 +283,7 @@
}
}
- if (priv->led_mode >= 0)
+ if (priv->type && priv->led_mode >= 0)
kszphy_setup_led(phydev, priv->type->led_mode_reg, priv->led_mode);
return 0;
@@ -299,10 +299,10 @@
type = priv->type;
- if (type->has_broadcast_disable)
+ if (type && type->has_broadcast_disable)
kszphy_broadcast_disable(phydev);
- if (type->has_nand_tree_disable)
+ if (type && type->has_nand_tree_disable)
kszphy_nand_tree_disable(phydev);
return kszphy_config_reset(phydev);
@@ -1112,7 +1112,7 @@
priv->type = type;
- if (type->led_mode_reg) {
+ if (type && type->led_mode_reg) {
ret = of_property_read_u32(np, "micrel,led-mode",
&priv->led_mode);
if (ret)
@@ -1133,7 +1133,8 @@
unsigned long rate = clk_get_rate(clk);
bool rmii_ref_clk_sel_25_mhz;
- priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel;
+ if (type)
+ priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel;
rmii_ref_clk_sel_25_mhz = of_property_read_bool(np,
"micrel,rmii-reference-clock-select-25-mhz");
diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
index b7b2521..c00eef4 100644
--- a/drivers/net/phy/mscc/mscc_macsec.c
+++ b/drivers/net/phy/mscc/mscc_macsec.c
@@ -632,6 +632,7 @@
list_del(&flow->list);
clear_bit(flow->index, bitmap);
+ memzero_explicit(flow->key, sizeof(flow->key));
kfree(flow);
}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index db7866b..18e67eb 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -124,10 +124,15 @@
*/
static int phy_clear_interrupt(struct phy_device *phydev)
{
- if (phydev->drv->ack_interrupt)
- return phydev->drv->ack_interrupt(phydev);
+ int ret = 0;
- return 0;
+ if (phydev->drv->ack_interrupt) {
+ mutex_lock(&phydev->lock);
+ ret = phydev->drv->ack_interrupt(phydev);
+ mutex_unlock(&phydev->lock);
+ }
+
+ return ret;
}
/**
@@ -982,6 +987,36 @@
}
/**
+ * phy_did_interrupt - Checks if the PHY generated an interrupt
+ * @phydev: target phy_device struct
+ */
+static int phy_did_interrupt(struct phy_device *phydev)
+{
+ int ret;
+
+ mutex_lock(&phydev->lock);
+ ret = phydev->drv->did_interrupt(phydev);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
+}
+
+/**
+ * phy_handle_interrupt - Handle PHY interrupt
+ * @phydev: target phy_device struct
+ */
+static irqreturn_t phy_handle_interrupt(struct phy_device *phydev)
+{
+ irqreturn_t ret;
+
+ mutex_lock(&phydev->lock);
+ ret = phydev->drv->handle_interrupt(phydev);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
+}
+
+/**
* phy_interrupt - PHY interrupt handler
* @irq: interrupt line
* @phy_dat: phy_device pointer
@@ -994,9 +1029,9 @@
struct phy_driver *drv = phydev->drv;
if (drv->handle_interrupt)
- return drv->handle_interrupt(phydev);
+ return phy_handle_interrupt(phydev);
- if (drv->did_interrupt && !drv->did_interrupt(phydev))
+ if (drv->did_interrupt && !phy_did_interrupt(phydev))
return IRQ_NONE;
/* reschedule state queue work to run as soon as possible */
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index a05d837..850915a 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -75,6 +75,12 @@
.part = "MA5671A",
.modes = sfp_quirk_2500basex,
}, {
+ // Lantech 8330-262D-E can operate at 2500base-X, but
+ // incorrectly report 2500MBd NRZ in their EEPROM
+ .vendor = "Lantech",
+ .part = "8330-262D-E",
+ .modes = sfp_quirk_2500basex,
+ }, {
.vendor = "UBNT",
.part = "UF-INSTANT",
.modes = sfp_quirk_ubnt_uf_instant,
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index efffa65..dcbe278 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -249,6 +249,7 @@
struct sfp_eeprom_id id;
unsigned int module_power_mW;
unsigned int module_t_start_up;
+ bool tx_fault_ignore;
#if IS_ENABLED(CONFIG_HWMON)
struct sfp_diag diag;
@@ -1893,6 +1894,12 @@
else
sfp->module_t_start_up = T_START_UP;
+ if (!memcmp(id.base.vendor_name, "HUAWEI ", 16) &&
+ !memcmp(id.base.vendor_pn, "MA5671A ", 16))
+ sfp->tx_fault_ignore = true;
+ else
+ sfp->tx_fault_ignore = false;
+
return 0;
}
@@ -2320,7 +2327,10 @@
mutex_lock(&sfp->st_mutex);
state = sfp_get_state(sfp);
changed = state ^ sfp->state;
- changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT;
+ if (sfp->tx_fault_ignore)
+ changed &= SFP_F_PRESENT | SFP_F_LOS;
+ else
+ changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT;
for (i = 0; i < GPIO_MAX; i++)
if (changed & BIT(i))
@@ -2417,7 +2427,7 @@
platform_set_drvdata(pdev, sfp);
- err = devm_add_action(sfp->dev, sfp_cleanup, sfp);
+ err = devm_add_action_or_reset(sfp->dev, sfp_cleanup, sfp);
if (err < 0)
return err;
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 4406b35..5a0e5a8 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -1103,7 +1103,7 @@
/* Any address will do - we take the first. We already
have the first two bytes filled with 0xfc, from
plip_init_dev(). */
- const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
+ const struct in_ifaddr *ifa = rtnl_dereference(in_dev->ifa_list);
if (ifa != NULL) {
memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
}
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index f81fb0b..369bd30 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -468,7 +468,7 @@
spin_lock(&sl->lock);
if (netif_queue_stopped(dev)) {
- if (!netif_running(dev))
+ if (!netif_running(dev) || !sl->tty)
goto out;
/* May be we must check transmitter timeout here ?
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index 291fa44..45f2954 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -454,6 +454,7 @@
int can_low_power = 1;
if (np == NULL || of_get_property(np, "no-autolowpower", NULL))
can_low_power = 0;
+ of_node_put(np);
if (can_low_power) {
/* Enable automatic low-power */
sungem_phy_write(phy, 0x1c, 0x9002);
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index f549d3a..8f7bb15 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -1202,7 +1202,8 @@
struct xdp_buff *xdp;
int i;
- if (ctl && (ctl->type == TUN_MSG_PTR)) {
+ if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
+ ctl && ctl->type == TUN_MSG_PTR) {
for (i = 0; i < ctl->num; i++) {
xdp = &((struct xdp_buff *)ctl->ptr)[i];
tap_get_user_xdp(q, xdp);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 615f377..7117d55 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1270,10 +1270,12 @@
}
}
- netif_addr_lock_bh(dev);
- dev_uc_sync_multiple(port_dev, dev);
- dev_mc_sync_multiple(port_dev, dev);
- netif_addr_unlock_bh(dev);
+ if (dev->flags & IFF_UP) {
+ netif_addr_lock_bh(dev);
+ dev_uc_sync_multiple(port_dev, dev);
+ dev_mc_sync_multiple(port_dev, dev);
+ netif_addr_unlock_bh(dev);
+ }
port->index = -1;
list_add_tail_rcu(&port->list, &team->port_list);
@@ -1344,8 +1346,10 @@
netdev_rx_handler_unregister(port_dev);
team_port_disable_netpoll(port);
vlan_vids_del_by_dev(port_dev, dev);
- dev_uc_unsync(port_dev, dev);
- dev_mc_unsync(port_dev, dev);
+ if (dev->flags & IFF_UP) {
+ dev_uc_unsync(port_dev, dev);
+ dev_mc_unsync(port_dev, dev);
+ }
dev_close(port_dev);
team_port_leave(team, port);
@@ -1695,6 +1699,14 @@
static int team_close(struct net_device *dev)
{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+
+ list_for_each_entry(port, &team->port_list, list) {
+ dev_uc_unsync(port->dev, dev);
+ dev_mc_unsync(port->dev, dev);
+ }
+
return 0;
}
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index 3160443..5d96dc1 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -1343,12 +1343,21 @@
TBNET_MATCH_FRAGS_ID);
ret = tb_register_property_dir("network", tbnet_dir);
- if (ret) {
- tb_property_free_dir(tbnet_dir);
- return ret;
- }
+ if (ret)
+ goto err_free_dir;
- return tb_register_service_driver(&tbnet_driver);
+ ret = tb_register_service_driver(&tbnet_driver);
+ if (ret)
+ goto err_unregister;
+
+ return 0;
+
+err_unregister:
+ tb_unregister_property_dir("network", tbnet_dir);
+err_free_dir:
+ tb_property_free_dir(tbnet_dir);
+
+ return ret;
}
module_init(tbnet_init);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index ffbc7ed..cb42fdb 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -220,6 +220,9 @@
struct tun_prog __rcu *steering_prog;
struct tun_prog __rcu *filter_prog;
struct ethtool_link_ksettings link_ksettings;
+ /* init args */
+ struct file *file;
+ struct ifreq *ifr;
};
struct veth {
@@ -227,6 +230,9 @@
__be16 h_vlan_TCI;
};
+static void tun_flow_init(struct tun_struct *tun);
+static void tun_flow_uninit(struct tun_struct *tun);
+
static int tun_napi_receive(struct napi_struct *napi, int budget)
{
struct tun_file *tfile = container_of(napi, struct tun_file, napi);
@@ -279,6 +285,12 @@
}
}
+static void tun_napi_enable(struct tun_file *tfile)
+{
+ if (tfile->napi_enabled)
+ napi_enable(&tfile->napi);
+}
+
static void tun_napi_disable(struct tun_file *tfile)
{
if (tfile->napi_enabled)
@@ -640,7 +652,8 @@
tun = rtnl_dereference(tfile->tun);
if (tun && clean) {
- tun_napi_disable(tfile);
+ if (!tfile->detached)
+ tun_napi_disable(tfile);
tun_napi_del(tfile);
}
@@ -659,8 +672,10 @@
if (clean) {
RCU_INIT_POINTER(tfile->tun, NULL);
sock_put(&tfile->sk);
- } else
+ } else {
tun_disable_queue(tun, tfile);
+ tun_napi_disable(tfile);
+ }
synchronize_net();
tun_flow_delete_by_queue(tun, tun->numqueues + 1);
@@ -733,6 +748,7 @@
sock_put(&tfile->sk);
}
list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
+ tun_napi_del(tfile);
tun_enable_queue(tfile);
tun_queue_purge(tfile);
xdp_rxq_info_unreg(&tfile->xdp_rxq);
@@ -813,6 +829,7 @@
if (tfile->detached) {
tun_enable_queue(tfile);
+ tun_napi_enable(tfile);
} else {
sock_hold(&tfile->sk);
tun_napi_init(tun, tfile, napi, napi_frags);
@@ -964,6 +981,49 @@
static const struct ethtool_ops tun_ethtool_ops;
+static int tun_net_init(struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ struct ifreq *ifr = tun->ifr;
+ int err;
+
+ tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
+ if (!tun->pcpu_stats)
+ return -ENOMEM;
+
+ spin_lock_init(&tun->lock);
+
+ err = security_tun_dev_alloc_security(&tun->security);
+ if (err < 0) {
+ free_percpu(tun->pcpu_stats);
+ return err;
+ }
+
+ tun_flow_init(tun);
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
+ TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ dev->features = dev->hw_features | NETIF_F_LLTX;
+ dev->vlan_features = dev->features &
+ ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX);
+
+ tun->flags = (tun->flags & ~TUN_FEATURES) |
+ (ifr->ifr_flags & TUN_FEATURES);
+
+ INIT_LIST_HEAD(&tun->disabled);
+ err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI,
+ ifr->ifr_flags & IFF_NAPI_FRAGS, false);
+ if (err < 0) {
+ tun_flow_uninit(tun);
+ security_tun_dev_free_security(tun->security);
+ free_percpu(tun->pcpu_stats);
+ return err;
+ }
+ return 0;
+}
+
/* Net device detach from fd. */
static void tun_net_uninit(struct net_device *dev)
{
@@ -1205,6 +1265,7 @@
}
static const struct net_device_ops tun_netdev_ops = {
+ .ndo_init = tun_net_init,
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
.ndo_stop = tun_net_close,
@@ -1285,6 +1346,7 @@
}
static const struct net_device_ops tap_netdev_ops = {
+ .ndo_init = tun_net_init,
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
.ndo_stop = tun_net_close,
@@ -1325,7 +1387,7 @@
#define MAX_MTU 65535
/* Initialize net device. */
-static void tun_net_init(struct net_device *dev)
+static void tun_net_initialize(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
@@ -1413,7 +1475,8 @@
int err;
int i;
- if (it->nr_segs > MAX_SKB_FRAGS + 1)
+ if (it->nr_segs > MAX_SKB_FRAGS + 1 ||
+ len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN))
return ERR_PTR(-EMSGSIZE);
local_bh_disable();
@@ -1923,17 +1986,25 @@
skb_headlen(skb));
if (unlikely(headlen > skb_headlen(skb))) {
+ WARN_ON_ONCE(1);
+ err = -ENOMEM;
this_cpu_inc(tun->pcpu_stats->rx_dropped);
+napi_busy:
napi_free_frags(&tfile->napi);
rcu_read_unlock();
mutex_unlock(&tfile->napi_mutex);
- WARN_ON(1);
- return -ENOMEM;
+ return err;
}
- local_bh_disable();
- napi_gro_frags(&tfile->napi);
- local_bh_enable();
+ if (likely(napi_schedule_prep(&tfile->napi))) {
+ local_bh_disable();
+ napi_gro_frags(&tfile->napi);
+ napi_complete(&tfile->napi);
+ local_bh_enable();
+ } else {
+ err = -EBUSY;
+ goto napi_busy;
+ }
mutex_unlock(&tfile->napi_mutex);
} else if (tfile->napi_enabled) {
struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
@@ -2257,10 +2328,6 @@
BUG_ON(!(list_empty(&tun->disabled)));
free_percpu(tun->pcpu_stats);
- /* We clear pcpu_stats so that tun_set_iff() can tell if
- * tun_free_netdev() has been called from register_netdevice().
- */
- tun->pcpu_stats = NULL;
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
@@ -2499,7 +2566,8 @@
if (!tun)
return -EBADFD;
- if (ctl && (ctl->type == TUN_MSG_PTR)) {
+ if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
+ ctl && ctl->type == TUN_MSG_PTR) {
struct tun_page tpage;
int n = ctl->num;
int flush = 0;
@@ -2772,41 +2840,16 @@
tun->rx_batched = 0;
RCU_INIT_POINTER(tun->steering_prog, NULL);
- tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
- if (!tun->pcpu_stats) {
- err = -ENOMEM;
- goto err_free_dev;
- }
+ tun->ifr = ifr;
+ tun->file = file;
- spin_lock_init(&tun->lock);
-
- err = security_tun_dev_alloc_security(&tun->security);
- if (err < 0)
- goto err_free_stat;
-
- tun_net_init(dev);
- tun_flow_init(tun);
-
- dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
- TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX;
- dev->features = dev->hw_features | NETIF_F_LLTX;
- dev->vlan_features = dev->features &
- ~(NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX);
-
- tun->flags = (tun->flags & ~TUN_FEATURES) |
- (ifr->ifr_flags & TUN_FEATURES);
-
- INIT_LIST_HEAD(&tun->disabled);
- err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
- ifr->ifr_flags & IFF_NAPI_FRAGS, false);
- if (err < 0)
- goto err_free_flow;
+ tun_net_initialize(dev);
err = register_netdevice(tun->dev);
- if (err < 0)
- goto err_detach;
+ if (err < 0) {
+ free_netdev(dev);
+ return err;
+ }
/* free_netdev() won't check refcnt, to aovid race
* with dev_put() we need publish tun after registration.
*/
@@ -2823,24 +2866,6 @@
strcpy(ifr->ifr_name, tun->dev->name);
return 0;
-
-err_detach:
- tun_detach_all(dev);
- /* We are here because register_netdevice() has failed.
- * If register_netdevice() already called tun_free_netdev()
- * while dealing with the error, tun->pcpu_stats has been cleared.
- */
- if (!tun->pcpu_stats)
- goto err_free_dev;
-
-err_free_flow:
- tun_flow_uninit(tun);
- security_tun_dev_free_security(tun->security);
-err_free_stat:
- free_percpu(tun->pcpu_stats);
-err_free_dev:
- free_netdev(dev);
- return err;
}
static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 0717c18..c9c4095 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -1102,10 +1102,15 @@
if (start_of_descs != desc_offset)
goto err;
- /* self check desc_offset from header*/
- if (desc_offset >= skb_len)
+ /* self check desc_offset from header and make sure that the
+ * bounds of the metadata array are inside the SKB
+ */
+ if (pkt_count * 2 + desc_offset >= skb_len)
goto err;
+ /* Packets must not overlap the metadata array */
+ skb_trim(skb, desc_offset);
+
if (pkt_count == 0)
goto err;
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 0b0cbce..79a53fe 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1471,6 +1471,42 @@
* are bundled into this buffer and where we can find an array of
* per-packet metadata (which contains elements encoded into u16).
*/
+
+ /* SKB contents for current firmware:
+ * <packet 1> <padding>
+ * ...
+ * <packet N> <padding>
+ * <per-packet metadata entry 1> <dummy header>
+ * ...
+ * <per-packet metadata entry N> <dummy header>
+ * <padding2> <rx_hdr>
+ *
+ * where:
+ * <packet N> contains pkt_len bytes:
+ * 2 bytes of IP alignment pseudo header
+ * packet received
+ * <per-packet metadata entry N> contains 4 bytes:
+ * pkt_len and fields AX_RXHDR_*
+ * <padding> 0-7 bytes to terminate at
+ * 8 bytes boundary (64-bit).
+ * <padding2> 4 bytes to make rx_hdr terminate at
+ * 8 bytes boundary (64-bit)
+ * <dummy-header> contains 4 bytes:
+ * pkt_len=0 and AX_RXHDR_DROP_ERR
+ * <rx-hdr> contains 4 bytes:
+ * pkt_cnt and hdr_off (offset of
+ * <per-packet metadata entry 1>)
+ *
+ * pkt_cnt is number of entrys in the per-packet metadata.
+ * In current firmware there is 2 entrys per packet.
+ * The first points to the packet and the
+ * second is a dummy header.
+ * This was done probably to align fields in 64-bit and
+ * maintain compatibility with old firmware.
+ * This code assumes that <dummy header> and <padding2> are
+ * optional.
+ */
+
if (skb->len < 4)
return 0;
skb_trim(skb, skb->len - 4);
@@ -1484,51 +1520,66 @@
/* Make sure that the bounds of the metadata array are inside the SKB
* (and in front of the counter at the end).
*/
- if (pkt_cnt * 2 + hdr_off > skb->len)
+ if (pkt_cnt * 4 + hdr_off > skb->len)
return 0;
pkt_hdr = (u32 *)(skb->data + hdr_off);
/* Packets must not overlap the metadata array */
skb_trim(skb, hdr_off);
- for (; ; pkt_cnt--, pkt_hdr++) {
+ for (; pkt_cnt > 0; pkt_cnt--, pkt_hdr++) {
+ u16 pkt_len_plus_padd;
u16 pkt_len;
le32_to_cpus(pkt_hdr);
pkt_len = (*pkt_hdr >> 16) & 0x1fff;
+ pkt_len_plus_padd = (pkt_len + 7) & 0xfff8;
- if (pkt_len > skb->len)
+ /* Skip dummy header used for alignment
+ */
+ if (pkt_len == 0)
+ continue;
+
+ if (pkt_len_plus_padd > skb->len)
return 0;
/* Check CRC or runt packet */
- if (((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) == 0) &&
- pkt_len >= 2 + ETH_HLEN) {
- bool last = (pkt_cnt == 0);
-
- if (last) {
- ax_skb = skb;
- } else {
- ax_skb = skb_clone(skb, GFP_ATOMIC);
- if (!ax_skb)
- return 0;
- }
- ax_skb->len = pkt_len;
- /* Skip IP alignment pseudo header */
- skb_pull(ax_skb, 2);
- skb_set_tail_pointer(ax_skb, ax_skb->len);
- ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
- ax88179_rx_checksum(ax_skb, pkt_hdr);
-
- if (last)
- return 1;
-
- usbnet_skb_return(dev, ax_skb);
+ if ((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) ||
+ pkt_len < 2 + ETH_HLEN) {
+ dev->net->stats.rx_errors++;
+ skb_pull(skb, pkt_len_plus_padd);
+ continue;
}
- /* Trim this packet away from the SKB */
- if (!skb_pull(skb, (pkt_len + 7) & 0xFFF8))
+ /* last packet */
+ if (pkt_len_plus_padd == skb->len) {
+ skb_trim(skb, pkt_len);
+
+ /* Skip IP alignment pseudo header */
+ skb_pull(skb, 2);
+
+ skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd);
+ ax88179_rx_checksum(skb, pkt_hdr);
+ return 1;
+ }
+
+ ax_skb = skb_clone(skb, GFP_ATOMIC);
+ if (!ax_skb)
return 0;
+ skb_trim(ax_skb, pkt_len);
+
+ /* Skip IP alignment pseudo header */
+ skb_pull(ax_skb, 2);
+
+ skb->truesize = pkt_len_plus_padd +
+ SKB_DATA_ALIGN(sizeof(struct sk_buff));
+ ax88179_rx_checksum(ax_skb, pkt_hdr);
+ usbnet_skb_return(dev, ax_skb);
+
+ skb_pull(skb, pkt_len_plus_padd);
}
+
+ return 0;
}
static struct sk_buff *
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 43ddbe6..935cd29 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -763,6 +763,13 @@
},
#endif
+/* Lenovo ThinkPad OneLink+ Dock (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3054, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* ThinkPad USB-C Dock (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 597766d..7313e6e 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1024,6 +1024,7 @@
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)}, /* Quectel EM160R-GL */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0801)}, /* Quectel RM520N */
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
@@ -1291,8 +1292,11 @@
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x103a, 0)}, /* Telit LE910C4-WWX */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
@@ -1329,6 +1333,7 @@
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
+ {QMI_FIXED_INTF(0x413c, 0x81c2, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 0bb5b1c..f9a79d6 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1689,7 +1689,9 @@
"Stop submitting intr, status %d\n", status);
return;
case -EOVERFLOW:
- netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n");
+ if (net_ratelimit())
+ netif_info(tp, intr, tp->netdev,
+ "intr status -EOVERFLOW\n");
goto resubmit;
/* -EPIPE: should clear the halt */
default:
@@ -6868,6 +6870,7 @@
{REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)},
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082)},
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index e5b7448..e1cd4c2 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -61,6 +61,7 @@
u8 suspend_flags;
struct mii_bus *mdiobus;
struct phy_device *phydev;
+ struct task_struct *pm_task;
};
static bool turbo_mode = true;
@@ -70,13 +71,14 @@
static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
u32 *data, int in_pm)
{
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 buf;
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
BUG_ON(!dev);
- if (!in_pm)
+ if (current != pdata->pm_task)
fn = usbnet_read_cmd;
else
fn = usbnet_read_cmd_nopm;
@@ -100,13 +102,14 @@
static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index,
u32 data, int in_pm)
{
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 buf;
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
BUG_ON(!dev);
- if (!in_pm)
+ if (current != pdata->pm_task)
fn = usbnet_write_cmd;
else
fn = usbnet_write_cmd_nopm;
@@ -564,16 +567,12 @@
return smsc95xx_write_reg(dev, AFC_CFG, afc_cfg);
}
-static int smsc95xx_link_reset(struct usbnet *dev)
+static void smsc95xx_mac_update_fullduplex(struct usbnet *dev)
{
struct smsc95xx_priv *pdata = dev->driver_priv;
unsigned long flags;
int ret;
- ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
- if (ret < 0)
- return ret;
-
spin_lock_irqsave(&pdata->mac_cr_lock, flags);
if (pdata->phydev->duplex != DUPLEX_FULL) {
pdata->mac_cr &= ~MAC_CR_FDPX_;
@@ -585,14 +584,16 @@
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ if (ret != -ENODEV)
+ netdev_warn(dev->net,
+ "Error updating MAC full duplex mode\n");
+ return;
+ }
ret = smsc95xx_phy_update_flowcontrol(dev);
if (ret < 0)
netdev_warn(dev->net, "Error updating PHY flow control\n");
-
- return ret;
}
static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
@@ -609,7 +610,7 @@
netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata);
if (intdata & INT_ENP_PHY_INT_)
- usbnet_defer_kevent(dev, EVENT_LINK_RESET);
+ ;
else
netdev_warn(dev->net, "unexpected interrupt, intdata=0x%08X\n",
intdata);
@@ -1066,6 +1067,7 @@
struct usbnet *dev = netdev_priv(net);
phy_print_status(net->phydev);
+ smsc95xx_mac_update_fullduplex(dev);
usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
}
@@ -1469,9 +1471,12 @@
u32 val, link_up;
int ret;
+ pdata->pm_task = current;
+
ret = usbnet_suspend(intf, message);
if (ret < 0) {
netdev_warn(dev->net, "usbnet_suspend error\n");
+ pdata->pm_task = NULL;
return ret;
}
@@ -1718,6 +1723,7 @@
if (ret && PMSG_IS_AUTO(message))
usbnet_resume(intf);
+ pdata->pm_task = NULL;
return ret;
}
@@ -1738,29 +1744,31 @@
/* do this first to ensure it's cleared even in error case */
pdata->suspend_flags = 0;
+ pdata->pm_task = current;
+
if (suspend_flags & SUSPEND_ALLMODES) {
/* clear wake-up sources */
ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
if (ret < 0)
- return ret;
+ goto done;
val &= ~(WUCSR_WAKE_EN_ | WUCSR_MPEN_);
ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
if (ret < 0)
- return ret;
+ goto done;
/* clear wake-up status */
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
if (ret < 0)
- return ret;
+ goto done;
val &= ~PM_CTL_WOL_EN_;
val |= PM_CTL_WUPS_;
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
if (ret < 0)
- return ret;
+ goto done;
}
ret = usbnet_resume(intf);
@@ -1768,15 +1776,21 @@
netdev_warn(dev->net, "usbnet_resume error\n");
phy_init_hw(pdata->phydev);
+
+done:
+ pdata->pm_task = NULL;
return ret;
}
static int smsc95xx_reset_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
int ret;
+ pdata->pm_task = current;
ret = smsc95xx_reset(dev);
+ pdata->pm_task = NULL;
if (ret < 0)
return ret;
@@ -1972,7 +1986,6 @@
.description = "smsc95xx USB 2.0 Ethernet",
.bind = smsc95xx_bind,
.unbind = smsc95xx_unbind,
- .link_reset = smsc95xx_link_reset,
.reset = smsc95xx_reset,
.check_connect = smsc95xx_start_phy,
.stop = smsc95xx_stop,
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 402390b..43d7034 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -830,13 +830,11 @@
mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
- /* deferred work (task, timer, softirq) must also stop.
- * can't flush_scheduled_work() until we drop rtnl (later),
- * else workers could deadlock; so make workers a NOP.
- */
+ /* deferred work (timer, softirq, task) must also stop */
dev->flags = 0;
del_timer_sync (&dev->delay);
tasklet_kill (&dev->bh);
+ cancel_work_sync(&dev->kevent);
if (!pm)
usb_autopm_put_interface(dev->intf);
@@ -1569,6 +1567,7 @@
struct usbnet *dev;
struct usb_device *xdev;
struct net_device *net;
+ struct urb *urb;
dev = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
@@ -1585,9 +1584,11 @@
net = dev->net;
unregister_netdev (net);
- cancel_work_sync(&dev->kevent);
-
- usb_scuttle_anchored_urbs(&dev->deferred);
+ while ((urb = usb_get_from_anchor(&dev->deferred))) {
+ dev_kfree_skb(urb->context);
+ kfree(urb->sg);
+ usb_free_urb(urb);
+ }
if (dev->driver_info->unbind)
dev->driver_info->unbind (dev, intf);
@@ -1969,7 +1970,7 @@
cmd, reqtype, value, index, size);
if (size) {
- buf = kmalloc(size, GFP_KERNEL);
+ buf = kmalloc(size, GFP_NOIO);
if (!buf)
goto out;
}
@@ -2001,7 +2002,7 @@
cmd, reqtype, value, index, size);
if (data) {
- buf = kmemdup(data, size, GFP_KERNEL);
+ buf = kmemdup(data, size, GFP_NOIO);
if (!buf)
goto out;
} else {
@@ -2102,7 +2103,7 @@
int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
u16 value, u16 index, const void *data, u16 size)
{
- struct usb_ctrlrequest *req = NULL;
+ struct usb_ctrlrequest *req;
struct urb *urb;
int err = -ENOMEM;
void *buf = NULL;
@@ -2120,7 +2121,7 @@
if (!buf) {
netdev_err(dev->net, "Error allocating buffer"
" in %s!\n", __func__);
- goto fail_free;
+ goto fail_free_urb;
}
}
@@ -2144,14 +2145,21 @@
if (err < 0) {
netdev_err(dev->net, "Error submitting the control"
" message: status=%d\n", err);
- goto fail_free;
+ goto fail_free_all;
}
return 0;
+fail_free_all:
+ kfree(req);
fail_free_buf:
kfree(buf);
-fail_free:
- kfree(req);
+ /*
+ * avoid a double free
+ * needed because the flag can be set only
+ * after filling the URB
+ */
+ urb->transfer_flags = 0;
+fail_free_urb:
usb_free_urb(urb);
fail:
return err;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index f7e3eb3..5be8ed9 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -292,7 +292,7 @@
rcu_read_lock();
rcv = rcu_dereference(priv->peer);
- if (unlikely(!rcv)) {
+ if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) {
kfree_skb(skb);
goto drop;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index cbe47ee..c942cd6 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -213,9 +213,15 @@
/* Packet virtio header size */
u8 hdr_len;
- /* Work struct for refilling if we run low on memory. */
+ /* Work struct for delayed refilling if we run low on memory. */
struct delayed_work refill;
+ /* Is delayed refill enabled? */
+ bool refill_enabled;
+
+ /* The lock to synchronize the access to refill_enabled */
+ spinlock_t refill_lock;
+
/* Work struct for config space updates */
struct work_struct config_work;
@@ -319,6 +325,20 @@
return p;
}
+static void enable_delayed_refill(struct virtnet_info *vi)
+{
+ spin_lock_bh(&vi->refill_lock);
+ vi->refill_enabled = true;
+ spin_unlock_bh(&vi->refill_lock);
+}
+
+static void disable_delayed_refill(struct virtnet_info *vi)
+{
+ spin_lock_bh(&vi->refill_lock);
+ vi->refill_enabled = false;
+ spin_unlock_bh(&vi->refill_lock);
+}
+
static void virtqueue_napi_schedule(struct napi_struct *napi,
struct virtqueue *vq)
{
@@ -948,8 +968,11 @@
case XDP_TX:
stats->xdp_tx++;
xdpf = xdp_convert_buff_to_frame(&xdp);
- if (unlikely(!xdpf))
+ if (unlikely(!xdpf)) {
+ if (unlikely(xdp_page != page))
+ put_page(xdp_page);
goto err_xdp;
+ }
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
if (unlikely(err < 0)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
@@ -1403,8 +1426,12 @@
}
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
- if (!try_fill_recv(vi, rq, GFP_ATOMIC))
- schedule_delayed_work(&vi->refill, 0);
+ if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
+ spin_lock(&vi->refill_lock);
+ if (vi->refill_enabled)
+ schedule_delayed_work(&vi->refill, 0);
+ spin_unlock(&vi->refill_lock);
+ }
}
u64_stats_update_begin(&rq->stats.syncp);
@@ -1523,6 +1550,8 @@
struct virtnet_info *vi = netdev_priv(dev);
int i, err;
+ enable_delayed_refill(vi);
+
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
/* Make sure we have some buffers: if oom use wq. */
@@ -1893,6 +1922,8 @@
struct virtnet_info *vi = netdev_priv(dev);
int i;
+ /* Make sure NAPI doesn't schedule refill work */
+ disable_delayed_refill(vi);
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
@@ -2366,7 +2397,6 @@
static void virtnet_freeze_down(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
- int i;
/* Make sure no work handler is accessing the device */
flush_work(&vi->config_work);
@@ -2374,14 +2404,8 @@
netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
netif_tx_unlock_bh(vi->dev);
- cancel_delayed_work_sync(&vi->refill);
-
- if (netif_running(vi->dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++) {
- napi_disable(&vi->rq[i].napi);
- virtnet_napi_tx_disable(&vi->sq[i].napi);
- }
- }
+ if (netif_running(vi->dev))
+ virtnet_close(vi->dev);
}
static int init_vqs(struct virtnet_info *vi);
@@ -2389,7 +2413,7 @@
static int virtnet_restore_up(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
- int err, i;
+ int err;
err = init_vqs(vi);
if (err)
@@ -2397,16 +2421,12 @@
virtio_device_ready(vdev);
- if (netif_running(vi->dev)) {
- for (i = 0; i < vi->curr_queue_pairs; i++)
- if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
- schedule_delayed_work(&vi->refill, 0);
+ enable_delayed_refill(vi);
- for (i = 0; i < vi->max_queue_pairs; i++) {
- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
- virtnet_napi_tx_enable(vi, vi->sq[i].vq,
- &vi->sq[i].napi);
- }
+ if (netif_running(vi->dev)) {
+ err = virtnet_open(vi->dev);
+ if (err)
+ return err;
}
netif_tx_lock_bh(vi->dev);
@@ -3105,6 +3125,7 @@
vdev->priv = vi;
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
+ spin_lock_init(&vi->refill_lock);
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
@@ -3184,14 +3205,20 @@
}
}
- err = register_netdev(dev);
+ /* serialize netdev register + virtio_device_ready() with ndo_open() */
+ rtnl_lock();
+
+ err = register_netdevice(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
+ rtnl_unlock();
goto free_failover;
}
virtio_device_ready(vdev);
+ rtnl_unlock();
+
err = virtnet_cpu_notif_add(vi);
if (err) {
pr_debug("virtio_net: registering cpu notifier failed\n");
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 932a399..6678a73 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -595,6 +595,7 @@
if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) {
dev_kfree_skb_any(rbi->skb);
+ rbi->skb = NULL;
rq->stats.rx_buf_alloc_failure++;
break;
}
@@ -619,6 +620,7 @@
if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) {
put_page(rbi->page);
+ rbi->page = NULL;
rq->stats.rx_buf_alloc_failure++;
break;
}
@@ -1654,6 +1656,10 @@
u32 i, ring_idx;
struct Vmxnet3_RxDesc *rxd;
+ /* ring has already been cleaned up */
+ if (!rq->rx_ring[0].base)
+ return;
+
for (ring_idx = 0; ring_idx < 2; ring_idx++) {
for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
#ifdef __BIG_ENDIAN_BITFIELD
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 48fbdce..72d6706 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -710,11 +710,11 @@
rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
if (rd == NULL)
- return -ENOBUFS;
+ return -ENOMEM;
if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
kfree(rd);
- return -ENOBUFS;
+ return -ENOMEM;
}
rd->remote_ip = *ip;
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index f6562a3..b965eb6 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -403,7 +403,7 @@
if (dev_net(dev) != &init_net)
return NOTIFY_DONE;
- if (!dev_is_ethdev(dev))
+ if (!dev_is_ethdev(dev) && !lapbeth_get_x25_dev(dev))
return NOTIFY_DONE;
switch (event) {
diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
index 9a4c8ff..5bf7822 100644
--- a/drivers/net/wireguard/allowedips.c
+++ b/drivers/net/wireguard/allowedips.c
@@ -6,6 +6,8 @@
#include "allowedips.h"
#include "peer.h"
+enum { MAX_ALLOWEDIPS_BITS = 128 };
+
static struct kmem_cache *node_cache;
static void swap_endian(u8 *dst, const u8 *src, u8 bits)
@@ -40,7 +42,8 @@
struct allowedips_node __rcu *p, unsigned int *len)
{
if (rcu_access_pointer(p)) {
- WARN_ON(IS_ENABLED(DEBUG) && *len >= 128);
+ if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_BITS))
+ return;
stack[(*len)++] = rcu_dereference_raw(p);
}
}
@@ -52,7 +55,7 @@
static void root_free_rcu(struct rcu_head *rcu)
{
- struct allowedips_node *node, *stack[128] = {
+ struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = {
container_of(rcu, struct allowedips_node, rcu) };
unsigned int len = 1;
@@ -65,7 +68,7 @@
static void root_remove_peer_lists(struct allowedips_node *root)
{
- struct allowedips_node *node, *stack[128] = { root };
+ struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = { root };
unsigned int len = 1;
while (len > 0 && (node = stack[--len])) {
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index e189eb9..e0693cd 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -19,6 +19,7 @@
#include <linux/if_arp.h>
#include <linux/icmp.h>
#include <linux/suspend.h>
+#include <net/dst_metadata.h>
#include <net/icmp.h>
#include <net/rtnetlink.h>
#include <net/ip_tunnels.h>
@@ -152,7 +153,7 @@
goto err_peer;
}
- mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
+ mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
__skb_queue_head_init(&packets);
if (!skb_is_gso(skb)) {
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
index d0f3b6d..5c804bc 100644
--- a/drivers/net/wireguard/netlink.c
+++ b/drivers/net/wireguard/netlink.c
@@ -436,14 +436,13 @@
if (attrs[WGPEER_A_ENDPOINT]) {
struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]);
size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]);
+ struct endpoint endpoint = { { { 0 } } };
- if ((len == sizeof(struct sockaddr_in) &&
- addr->sa_family == AF_INET) ||
- (len == sizeof(struct sockaddr_in6) &&
- addr->sa_family == AF_INET6)) {
- struct endpoint endpoint = { { { 0 } } };
-
- memcpy(&endpoint.addr, addr, len);
+ if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) {
+ endpoint.addr4 = *(struct sockaddr_in *)addr;
+ wg_socket_set_peer_endpoint(peer, &endpoint);
+ } else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) {
+ endpoint.addr6 = *(struct sockaddr_in6 *)addr;
wg_socket_set_peer_endpoint(peer, &endpoint);
}
}
diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
index c0cfd9b..720952b 100644
--- a/drivers/net/wireguard/noise.c
+++ b/drivers/net/wireguard/noise.c
@@ -302,6 +302,41 @@
static_identity->static_public, private_key);
}
+static void hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen, const size_t keylen)
+{
+ struct blake2s_state state;
+ u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 };
+ u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32));
+ int i;
+
+ if (keylen > BLAKE2S_BLOCK_SIZE) {
+ blake2s_init(&state, BLAKE2S_HASH_SIZE);
+ blake2s_update(&state, key, keylen);
+ blake2s_final(&state, x_key);
+ } else
+ memcpy(x_key, key, keylen);
+
+ for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
+ x_key[i] ^= 0x36;
+
+ blake2s_init(&state, BLAKE2S_HASH_SIZE);
+ blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
+ blake2s_update(&state, in, inlen);
+ blake2s_final(&state, i_hash);
+
+ for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
+ x_key[i] ^= 0x5c ^ 0x36;
+
+ blake2s_init(&state, BLAKE2S_HASH_SIZE);
+ blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
+ blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE);
+ blake2s_final(&state, i_hash);
+
+ memcpy(out, i_hash, BLAKE2S_HASH_SIZE);
+ memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE);
+ memzero_explicit(i_hash, BLAKE2S_HASH_SIZE);
+}
+
/* This is Hugo Krawczyk's HKDF:
* - https://eprint.iacr.org/2010/264.pdf
* - https://tools.ietf.org/html/rfc5869
@@ -322,14 +357,14 @@
((third_len || third_dst) && (!second_len || !second_dst))));
/* Extract entropy from data into secret */
- blake2s256_hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN);
+ hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN);
if (!first_dst || !first_len)
goto out;
/* Expand first key: key = secret, data = 0x1 */
output[0] = 1;
- blake2s256_hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE);
+ hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE);
memcpy(first_dst, output, first_len);
if (!second_dst || !second_len)
@@ -337,8 +372,7 @@
/* Expand second key: key = secret, data = first-key || 0x2 */
output[BLAKE2S_HASH_SIZE] = 2;
- blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
- BLAKE2S_HASH_SIZE);
+ hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE);
memcpy(second_dst, output, second_len);
if (!third_dst || !third_len)
@@ -346,8 +380,7 @@
/* Expand third key: key = secret, data = second-key || 0x3 */
output[BLAKE2S_HASH_SIZE] = 3;
- blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
- BLAKE2S_HASH_SIZE);
+ hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE);
memcpy(third_dst, output, third_len);
out:
diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c
index 1de413b..8084e74 100644
--- a/drivers/net/wireguard/queueing.c
+++ b/drivers/net/wireguard/queueing.c
@@ -4,6 +4,7 @@
*/
#include "queueing.h"
+#include <linux/skb_array.h>
struct multicore_worker __percpu *
wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
@@ -42,7 +43,7 @@
{
free_percpu(queue->worker);
WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
- ptr_ring_cleanup(&queue->ring, purge ? (void(*)(void*))kfree_skb : NULL);
+ ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL);
}
#define NEXT(skb) ((skb)->prev)
diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c
index e173204..41db10f 100644
--- a/drivers/net/wireguard/selftest/allowedips.c
+++ b/drivers/net/wireguard/selftest/allowedips.c
@@ -593,10 +593,10 @@
wg_allowedips_remove_by_peer(&t, a, &mutex);
test_negative(4, a, 192, 168, 0, 1);
- /* These will hit the WARN_ON(len >= 128) in free_node if something
- * goes wrong.
+ /* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_BITS) in free_node
+ * if something goes wrong.
*/
- for (i = 0; i < 128; ++i) {
+ for (i = 0; i < MAX_ALLOWEDIPS_BITS; ++i) {
part = cpu_to_be64(~(1LLU << (i % 64)));
memset(&ip, 0xff, 16);
memcpy((u8 *)&ip + (i < 64) * 8, &part, 8);
diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c
index 007cd44..d4bb40a 100644
--- a/drivers/net/wireguard/selftest/ratelimiter.c
+++ b/drivers/net/wireguard/selftest/ratelimiter.c
@@ -167,7 +167,7 @@
++test;
#endif
- for (trials = TRIALS_BEFORE_GIVING_UP;;) {
+ for (trials = TRIALS_BEFORE_GIVING_UP; IS_ENABLED(DEBUG_RATELIMITER_TIMINGS);) {
int test_count = 0, ret;
ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count);
@@ -176,7 +176,6 @@
test += test_count;
goto err;
}
- msleep(500);
continue;
} else if (ret < 0) {
test += test_count;
@@ -195,7 +194,6 @@
test += test_count;
goto err;
}
- msleep(50);
continue;
}
test += test_count;
diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
index 52b9bc8..eef5911 100644
--- a/drivers/net/wireguard/socket.c
+++ b/drivers/net/wireguard/socket.c
@@ -49,7 +49,7 @@
rt = dst_cache_get_ip4(cache, &fl.saddr);
if (!rt) {
- security_sk_classify_flow(sock, flowi4_to_flowi(&fl));
+ security_sk_classify_flow(sock, flowi4_to_flowi_common(&fl));
if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0,
fl.saddr, RT_SCOPE_HOST))) {
endpoint->src4.s_addr = 0;
@@ -129,7 +129,7 @@
dst = dst_cache_get_ip6(cache, &fl.saddr);
if (!dst) {
- security_sk_classify_flow(sock, flowi6_to_flowi(&fl));
+ security_sk_classify_flow(sock, flowi6_to_flowi_common(&fl));
if (unlikely(!ipv6_addr_any(&fl.saddr) &&
!ipv6_chk_addr(sock_net(sock), &fl.saddr, NULL, 0))) {
endpoint->src6 = fl.saddr = in6addr_any;
@@ -160,6 +160,7 @@
rcu_read_unlock_bh();
return ret;
#else
+ kfree_skb(skb);
return -EAFNOSUPPORT;
#endif
}
@@ -241,7 +242,7 @@
endpoint->addr4.sin_addr.s_addr = ip_hdr(skb)->saddr;
endpoint->src4.s_addr = ip_hdr(skb)->daddr;
endpoint->src_if4 = skb->skb_iif;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) {
endpoint->addr6.sin6_family = AF_INET6;
endpoint->addr6.sin6_port = udp_hdr(skb)->source;
endpoint->addr6.sin6_addr = ipv6_hdr(skb)->saddr;
@@ -284,7 +285,7 @@
peer->endpoint.addr4 = endpoint->addr4;
peer->endpoint.src4 = endpoint->src4;
peer->endpoint.src_if4 = endpoint->src_if4;
- } else if (endpoint->addr.sa_family == AF_INET6) {
+ } else if (IS_ENABLED(CONFIG_IPV6) && endpoint->addr.sa_family == AF_INET6) {
peer->endpoint.addr6 = endpoint->addr6;
peer->endpoint.src6 = endpoint->src6;
} else {
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index b59d482..15f02bf 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -853,11 +853,36 @@
return 0;
}
+static void ath10k_peer_map_cleanup(struct ath10k *ar, struct ath10k_peer *peer)
+{
+ int peer_id, i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ for_each_set_bit(peer_id, peer->peer_ids,
+ ATH10K_MAX_NUM_PEER_IDS) {
+ ar->peer_map[peer_id] = NULL;
+ }
+
+ /* Double check that peer is properly un-referenced from
+ * the peer_map
+ */
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+ if (ar->peer_map[i] == peer) {
+ ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
+ peer->addr, peer, i);
+ ar->peer_map[i] = NULL;
+ }
+ }
+
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+}
+
static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
{
struct ath10k_peer *peer, *tmp;
- int peer_id;
- int i;
lockdep_assert_held(&ar->conf_mutex);
@@ -869,25 +894,7 @@
ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
peer->addr, vdev_id);
- for_each_set_bit(peer_id, peer->peer_ids,
- ATH10K_MAX_NUM_PEER_IDS) {
- ar->peer_map[peer_id] = NULL;
- }
-
- /* Double check that peer is properly un-referenced from
- * the peer_map
- */
- for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
- if (ar->peer_map[i] == peer) {
- ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
- peer->addr, peer, i);
- ar->peer_map[i] = NULL;
- }
- }
-
- list_del(&peer->list);
- kfree(peer);
- ar->num_peers--;
+ ath10k_peer_map_cleanup(ar, peer);
}
spin_unlock_bh(&ar->data_lock);
}
@@ -5170,13 +5177,29 @@
static void ath10k_stop(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
+ u32 opt;
ath10k_drain_tx(ar);
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_OFF) {
- if (!ar->hw_rfkill_on)
- ath10k_halt(ar);
+ if (!ar->hw_rfkill_on) {
+ /* If the current driver state is RESTARTING but not yet
+ * fully RESTARTED because of incoming suspend event,
+ * then ath10k_halt() is already called via
+ * ath10k_core_restart() and should not be called here.
+ */
+ if (ar->state != ATH10K_STATE_RESTARTING) {
+ ath10k_halt(ar);
+ } else {
+ /* Suspending here, because when in RESTARTING
+ * state, ath10k_core_stop() skips
+ * ath10k_wait_for_suspend().
+ */
+ opt = WMI_PDEV_SUSPEND_AND_DISABLE_INTR;
+ ath10k_wait_for_suspend(ar, opt);
+ }
+ }
ar->state = ATH10K_STATE_OFF;
}
mutex_unlock(&ar->conf_mutex);
@@ -7454,10 +7477,7 @@
/* Clean up the peer object as well since we
* must have failed to do this above.
*/
- list_del(&peer->list);
- ar->peer_map[i] = NULL;
- kfree(peer);
- ar->num_peers--;
+ ath10k_peer_map_cleanup(ar, peer);
}
}
spin_unlock_bh(&ar->data_lock);
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index daae470..4870a3d 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -1205,13 +1205,12 @@
static int ath10k_snoc_request_irq(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- int irqflags = IRQF_TRIGGER_RISING;
int ret, id;
for (id = 0; id < CE_COUNT_MAX; id++) {
ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
- ath10k_snoc_per_engine_handler,
- irqflags, ce_name[id], ar);
+ ath10k_snoc_per_engine_handler, 0,
+ ce_name[id], ar);
if (ret) {
ath10k_err(ar,
"failed to register IRQ handler for CE %d: %d\n",
@@ -1477,11 +1476,11 @@
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (node) {
ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
if (ret) {
dev_err(dev, "failed to resolve msa fixed region\n");
return ret;
}
- of_node_put(node);
ar->msa.paddr = r.start;
ar->msa.mem_size = resource_size(&r);
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index 7d65c11..20b9aa8 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -337,14 +337,15 @@
if (patterns[i].mask[j / 8] & BIT(j % 8))
bitmask[j] = 0xff;
old_pattern.mask = bitmask;
- new_pattern = old_pattern;
if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) {
- if (patterns[i].pkt_offset < ETH_HLEN)
+ if (patterns[i].pkt_offset < ETH_HLEN) {
ath10k_wow_convert_8023_to_80211(&new_pattern,
&old_pattern);
- else
+ } else {
+ new_pattern = old_pattern;
new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
+ }
}
if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index 9ff6e68..190bc57 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -366,6 +366,8 @@
for (j = 0; j < irq_grp->num_irq; j++)
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
+
+ netif_napi_del(&irq_grp->napi);
}
}
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 28de2c7..473d922 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -476,23 +476,23 @@
return ret;
}
- ret = ath11k_mac_register(ab);
- if (ret) {
- ath11k_err(ab, "failed register the radio with mac80211: %d\n", ret);
- goto err_pdev_debug;
- }
-
ret = ath11k_dp_pdev_alloc(ab);
if (ret) {
ath11k_err(ab, "failed to attach DP pdev: %d\n", ret);
- goto err_mac_unregister;
+ goto err_pdev_debug;
+ }
+
+ ret = ath11k_mac_register(ab);
+ if (ret) {
+ ath11k_err(ab, "failed register the radio with mac80211: %d\n", ret);
+ goto err_dp_pdev_free;
}
ret = ath11k_thermal_register(ab);
if (ret) {
ath11k_err(ab, "could not register thermal device: %d\n",
ret);
- goto err_dp_pdev_free;
+ goto err_mac_unregister;
}
ret = ath11k_spectral_init(ab);
@@ -505,10 +505,10 @@
err_thermal_unregister:
ath11k_thermal_unregister(ab);
-err_dp_pdev_free:
- ath11k_dp_pdev_free(ab);
err_mac_unregister:
ath11k_mac_unregister(ab);
+err_dp_pdev_free:
+ ath11k_dp_pdev_free(ab);
err_pdev_debug:
ath11k_debugfs_pdev_destroy(ab);
diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h
index 659a275..694ebba 100644
--- a/drivers/net/wireless/ath/ath11k/debug.h
+++ b/drivers/net/wireless/ath/ath11k/debug.h
@@ -23,8 +23,8 @@
ATH11K_DBG_TESTMODE = 0x00000400,
ATH11k_DBG_HAL = 0x00000800,
ATH11K_DBG_PCI = 0x00001000,
- ATH11K_DBG_DP_TX = 0x00001000,
- ATH11K_DBG_DP_RX = 0x00002000,
+ ATH11K_DBG_DP_TX = 0x00002000,
+ ATH11K_DBG_DP_RX = 0x00004000,
ATH11K_DBG_ANY = 0xffffffff,
};
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index cc9122f..67faf62 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -3419,6 +3419,8 @@
if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) {
nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ if (nsts > (ar->num_rx_chains - 1))
+ nsts = ar->num_rx_chains - 1;
value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
}
@@ -3459,7 +3461,7 @@
static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
{
bool subfer, subfee;
- int sound_dim = 0;
+ int sound_dim = 0, nsts = 0;
subfer = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE));
subfee = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE));
@@ -3469,6 +3471,11 @@
subfer = false;
}
+ if (ar->num_rx_chains < 2) {
+ *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE);
+ subfee = false;
+ }
+
/* If SU Beaformer is not set, then disable MU Beamformer Capability */
if (!subfer)
*vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
@@ -3481,7 +3488,9 @@
sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
*vht_cap &= ~IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
- /* TODO: Need to check invalid STS and Sound_dim values set by FW? */
+ nsts = (*vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK);
+ nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ *vht_cap &= ~IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
/* Enable Sounding Dimension Field only if SU BF is enabled */
if (subfer) {
@@ -3493,9 +3502,15 @@
*vht_cap |= sound_dim;
}
- /* Use the STS advertised by FW unless SU Beamformee is not supported*/
- if (!subfee)
- *vht_cap &= ~(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK);
+ /* Enable Beamformee STS Field only if SU BF is enabled */
+ if (subfee) {
+ if (nsts > (ar->num_rx_chains - 1))
+ nsts = ar->num_rx_chains - 1;
+
+ nsts <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+ *vht_cap |= nsts;
+ }
}
static struct ieee80211_sta_vht_cap
@@ -4008,8 +4023,8 @@
}
arvif = ath11k_vif_to_arvif(skb_cb->vif);
- if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) &&
- arvif->is_started) {
+ mutex_lock(&ar->conf_mutex);
+ if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) {
ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
@@ -4025,6 +4040,7 @@
arvif->is_started);
ieee80211_free_txskb(ar->hw, skb);
}
+ mutex_unlock(&ar->conf_mutex);
}
}
@@ -5325,6 +5341,7 @@
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct ath11k_peer *peer;
int ret;
mutex_lock(&ar->conf_mutex);
@@ -5336,9 +5353,13 @@
WARN_ON(!arvif->is_started);
if (ab->hw_params.vdev_start_delay &&
- arvif->vdev_type == WMI_VDEV_TYPE_MONITOR &&
- ath11k_peer_find_by_addr(ab, ar->mac_addr))
- ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_addr(ab, ar->mac_addr);
+ spin_unlock_bh(&ab->base_lock);
+ if (peer)
+ ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
+ }
ret = ath11k_mac_vdev_stop(arvif);
if (ret)
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index aded9a7..84db9e5 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -402,7 +402,7 @@
ret = 0;
break;
case ATH11K_MHI_POWER_ON:
- ret = mhi_async_power_up(ab_pci->mhi_ctrl);
+ ret = mhi_sync_power_up(ab_pci->mhi_ctrl);
break;
case ATH11K_MHI_POWER_OFF:
mhi_power_down(ab_pci->mhi_ctrl, true);
diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
index ac2a8cf..f5ab455 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.c
+++ b/drivers/net/wireless/ath/ath11k/spectral.c
@@ -214,7 +214,10 @@
return -ENODEV;
arvif->spectral_enabled = (mode != ATH11K_SPECTRAL_DISABLED);
+
+ spin_lock_bh(&ar->spectral.lock);
ar->spectral.mode = mode;
+ spin_unlock_bh(&ar->spectral.lock);
ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id,
ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
@@ -829,9 +832,6 @@
{
struct ath11k_spectral *sp = &ar->spectral;
- if (!sp->enabled)
- return;
-
ath11k_dbring_srng_cleanup(ar, &sp->rx_ring);
ath11k_dbring_buf_cleanup(ar, &sp->rx_ring);
}
@@ -883,15 +883,16 @@
if (!sp->enabled)
continue;
- ath11k_spectral_debug_unregister(ar);
- ath11k_spectral_ring_free(ar);
+ mutex_lock(&ar->conf_mutex);
+ ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_DISABLED);
+ mutex_unlock(&ar->conf_mutex);
spin_lock_bh(&sp->lock);
-
- sp->mode = ATH11K_SPECTRAL_DISABLED;
sp->enabled = false;
-
spin_unlock_bh(&sp->lock);
+
+ ath11k_spectral_debug_unregister(ar);
+ ath11k_spectral_ring_free(ar);
}
}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 1fbc2c1..d444b3d 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -746,6 +746,9 @@
}
}
+ if (idx == AR5K_EEPROM_N_PD_CURVES)
+ goto err_out;
+
ee->ee_pd_gains[mode] = 1;
pd = &chinfo[pier].pd_curves[idx];
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index b0a4ca3..abed1ef 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -5615,7 +5615,7 @@
static u8 ar9003_get_eepmisc(struct ath_hw *ah)
{
- return ah->eeprom.map4k.baseEepHeader.eepMisc;
+ return ah->eeprom.ar9300_eep.baseEepHeader.opCapFlags.eepMisc;
}
const struct eeprom_ops eep_ar9300_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index a171dbb..ad949eb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -720,7 +720,7 @@
#define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \
(AR_SREV_9462(ah) ? 0x16290 : 0x16284))
#define AR_CH0_TOP2_XPABIASLVL (AR_SREV_9561(ah) ? 0x1e00 : 0xf000)
-#define AR_CH0_TOP2_XPABIASLVL_S 12
+#define AR_CH0_TOP2_XPABIASLVL_S (AR_SREV_9561(ah) ? 9 : 12)
#define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \
((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16298 : \
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 0a16342..e3d546e 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -281,6 +281,7 @@
struct ath9k_htc_rx {
struct list_head rxbuf;
spinlock_t rxbuflock;
+ bool initialized;
};
#define ATH9K_HTC_TX_CLEANUP_INTERVAL 50 /* ms */
@@ -305,6 +306,7 @@
DECLARE_BITMAP(tx_slot, MAX_TX_BUF_NUM);
struct timer_list cleanup_timer;
spinlock_t tx_lock;
+ bool initialized;
};
struct ath9k_htc_tx_ctl {
@@ -325,11 +327,11 @@
}
#ifdef CONFIG_ATH9K_HTC_DEBUGFS
-
-#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
-#define TX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a)
-#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c++)
-#define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c += a)
+#define __STAT_SAFE(expr) (hif_dev->htc_handle->drv_priv ? (expr) : 0)
+#define TX_STAT_INC(c) __STAT_SAFE(hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
+#define TX_STAT_ADD(c, a) __STAT_SAFE(hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a)
+#define RX_STAT_INC(c) __STAT_SAFE(hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c++)
+#define RX_STAT_ADD(c, a) __STAT_SAFE(hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c += a)
#define CAB_STAT_INC priv->debug.tx_stats.cab_queued++
#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index ff61ae3..07ac88f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -944,7 +944,6 @@
priv->hw = hw;
priv->htc = htc_handle;
priv->dev = dev;
- htc_handle->drv_priv = priv;
SET_IEEE80211_DEV(hw, priv->dev);
ret = ath9k_htc_wait_for_target(priv);
@@ -965,6 +964,8 @@
if (ret)
goto err_init;
+ htc_handle->drv_priv = priv;
+
return 0;
err_init:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 0bdc4dc..43a743e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -808,6 +808,11 @@
skb_queue_head_init(&priv->tx.data_vi_queue);
skb_queue_head_init(&priv->tx.data_vo_queue);
skb_queue_head_init(&priv->tx.tx_failed);
+
+ /* Allow ath9k_wmi_event_tasklet(WMI_TXSTATUS_EVENTID) to operate. */
+ smp_wmb();
+ priv->tx.initialized = true;
+
return 0;
}
@@ -1006,6 +1011,14 @@
goto rx_next;
}
+ if (rxstatus->rs_keyix >= ATH_KEYMAX &&
+ rxstatus->rs_keyix != ATH9K_RXKEYIX_INVALID) {
+ ath_dbg(common, ANY,
+ "Invalid keyix, dropping (keyix: %d)\n",
+ rxstatus->rs_keyix);
+ goto rx_next;
+ }
+
/* Get the RX status information */
memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
@@ -1125,6 +1138,10 @@
struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
unsigned long flags;
+ /* Check if ath9k_rx_init() completed. */
+ if (!data_race(priv->rx.initialized))
+ goto err;
+
spin_lock_irqsave(&priv->rx.rxbuflock, flags);
list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
if (!tmp_buf->in_process) {
@@ -1180,6 +1197,10 @@
list_add_tail(&rxbuf->list, &priv->rx.rxbuf);
}
+ /* Allow ath9k_htc_rxep() to operate. */
+ smp_wmb();
+ priv->rx.initialized = true;
+
return 0;
err:
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 510e61e..ca05b07 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -30,6 +30,7 @@
hdr->endpoint_id = epid;
hdr->flags = flags;
hdr->payload_len = cpu_to_be16(len);
+ memset(hdr->control, 0, sizeof(hdr->control));
status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb);
@@ -272,6 +273,10 @@
conn_msg->dl_pipeid = endpoint->dl_pipeid;
conn_msg->ul_pipeid = endpoint->ul_pipeid;
+ /* To prevent infoleak */
+ conn_msg->svc_meta_len = 0;
+ conn_msg->pad = 0;
+
ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0);
if (ret)
goto err;
@@ -359,33 +364,27 @@
}
static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 len)
{
uint32_t *pattern = (uint32_t *)skb->data;
- switch (*pattern) {
- case 0x33221199:
- {
+ if (*pattern == 0x33221199 && len >= sizeof(struct htc_panic_bad_vaddr)) {
struct htc_panic_bad_vaddr *htc_panic;
htc_panic = (struct htc_panic_bad_vaddr *) skb->data;
dev_err(htc_handle->dev, "ath: firmware panic! "
"exccause: 0x%08x; pc: 0x%08x; badvaddr: 0x%08x.\n",
htc_panic->exccause, htc_panic->pc,
htc_panic->badvaddr);
- break;
- }
- case 0x33221299:
- {
+ return;
+ }
+ if (*pattern == 0x33221299) {
struct htc_panic_bad_epid *htc_panic;
htc_panic = (struct htc_panic_bad_epid *) skb->data;
dev_err(htc_handle->dev, "ath: firmware panic! "
"bad epid: 0x%08x\n", htc_panic->epid);
- break;
- }
- default:
- dev_err(htc_handle->dev, "ath: unknown panic pattern!\n");
- break;
+ return;
}
+ dev_err(htc_handle->dev, "ath: unknown panic pattern!\n");
}
/*
@@ -406,16 +405,26 @@
if (!htc_handle || !skb)
return;
+ /* A valid message requires len >= 8.
+ *
+ * sizeof(struct htc_frame_hdr) == 8
+ * sizeof(struct htc_ready_msg) == 8
+ * sizeof(struct htc_panic_bad_vaddr) == 16
+ * sizeof(struct htc_panic_bad_epid) == 8
+ */
+ if (unlikely(len < sizeof(struct htc_frame_hdr)))
+ goto invalid;
htc_hdr = (struct htc_frame_hdr *) skb->data;
epid = htc_hdr->endpoint_id;
if (epid == 0x99) {
- ath9k_htc_fw_panic_report(htc_handle, skb);
+ ath9k_htc_fw_panic_report(htc_handle, skb, len);
kfree_skb(skb);
return;
}
if (epid < 0 || epid >= ENDPOINT_MAX) {
+invalid:
if (pipe_id != USB_REG_IN_PIPE)
dev_kfree_skb_any(skb);
else
@@ -427,21 +436,30 @@
/* Handle trailer */
if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) {
- if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000)
+ if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000) {
/* Move past the Watchdog pattern */
htc_hdr = (struct htc_frame_hdr *)(skb->data + 4);
+ len -= 4;
+ }
}
/* Get the message ID */
+ if (unlikely(len < sizeof(struct htc_frame_hdr) + sizeof(__be16)))
+ goto invalid;
msg_id = (__be16 *) ((void *) htc_hdr +
sizeof(struct htc_frame_hdr));
/* Now process HTC messages */
switch (be16_to_cpu(*msg_id)) {
case HTC_MSG_READY_ID:
+ if (unlikely(len < sizeof(struct htc_ready_msg)))
+ goto invalid;
htc_process_target_rdy(htc_handle, htc_hdr);
break;
case HTC_MSG_CONNECT_SERVICE_RESPONSE_ID:
+ if (unlikely(len < sizeof(struct htc_frame_hdr) +
+ sizeof(struct htc_conn_svc_rspmsg)))
+ goto invalid;
htc_process_conn_rsp(htc_handle, htc_hdr);
break;
default:
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index af36769..ac354df 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -839,7 +839,7 @@
continue;
txinfo = IEEE80211_SKB_CB(bf->bf_mpdu);
- fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0];
+ fi = (struct ath_frame_info *)&txinfo->status.status_driver_data[0];
if (fi->keyix == keyix)
return true;
}
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index fe29ad4..f315c54 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -169,6 +169,10 @@
&wmi->drv_priv->fatal_work);
break;
case WMI_TXSTATUS_EVENTID:
+ /* Check if ath9k_tx_init() completed. */
+ if (!data_race(priv->tx.initialized))
+ break;
+
spin_lock_bh(&priv->tx.tx_lock);
if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
spin_unlock_bh(&priv->tx.tx_lock);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 5691bd6..6555abf 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -141,8 +141,8 @@
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
BUILD_BUG_ON(sizeof(struct ath_frame_info) >
- sizeof(tx_info->rate_driver_data));
- return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
+ sizeof(tx_info->status.status_driver_data));
+ return (struct ath_frame_info *) &tx_info->status.status_driver_data[0];
}
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
@@ -2501,6 +2501,16 @@
spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
}
+static void ath_clear_tx_status(struct ieee80211_tx_info *tx_info)
+{
+ void *ptr = &tx_info->status;
+
+ memset(ptr + sizeof(tx_info->status.rates), 0,
+ sizeof(tx_info->status) -
+ sizeof(tx_info->status.rates) -
+ sizeof(tx_info->status.status_driver_data));
+}
+
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, int nframes, int nbad,
int txok)
@@ -2512,6 +2522,8 @@
struct ath_hw *ah = sc->sc_ah;
u8 i, tx_rateindex;
+ ath_clear_tx_status(tx_info);
+
if (txok)
tx_info->status.ack_signal = ts->ts_rssi;
@@ -2526,6 +2538,13 @@
tx_info->status.ampdu_len = nframes;
tx_info->status.ampdu_ack_len = nframes - nbad;
+ tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
+
+ for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
+ tx_info->status.rates[i].count = 0;
+ tx_info->status.rates[i].idx = -1;
+ }
+
if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
(tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
/*
@@ -2547,16 +2566,6 @@
tx_info->status.rates[tx_rateindex].count =
hw->max_rate_tries;
}
-
- for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
- tx_info->status.rates[i].count = 0;
- tx_info->status.rates[i].idx = -1;
- }
-
- tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
-
- /* we report airtime in ath_tx_count_airtime(), don't report twice */
- tx_info->status.tx_time = 0;
}
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index dbef9d8..b903b85 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1916,7 +1916,7 @@
WARN_ON(!(tx_streams >= 1 && tx_streams <=
IEEE80211_HT_MCS_TX_MAX_STREAMS));
- tx_params = (tx_streams - 1) <<
+ tx_params |= (tx_streams - 1) <<
IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 235cf77..43050dc 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -1557,6 +1557,9 @@
goto out;
}
} while (ar->beacon_enabled && i--);
+
+ /* no entry found in list */
+ return NULL;
}
out:
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 2d618f9..cb40162 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1010,20 +1010,14 @@
void *cmd;
int cmdlen = len - sizeof(struct wmi_cmd_hdr);
u16 cmdid;
- int rc, rc1;
+ int rc1;
- if (cmdlen < 0)
+ if (cmdlen < 0 || *ppos != 0)
return -EINVAL;
- wmi = kmalloc(len, GFP_KERNEL);
- if (!wmi)
- return -ENOMEM;
-
- rc = simple_write_to_buffer(wmi, len, ppos, buf, len);
- if (rc < 0) {
- kfree(wmi);
- return rc;
- }
+ wmi = memdup_user(buf, len);
+ if (IS_ERR(wmi))
+ return PTR_ERR(wmi);
cmd = (cmdlen > 0) ? &wmi[1] : NULL;
cmdid = le16_to_cpu(wmi->command_id);
@@ -1033,7 +1027,7 @@
wil_info(wil, "0x%04x[%d] -> %d\n", cmdid, cmdlen, rc1);
- return rc;
+ return len;
}
static const struct file_operations fops_wmi = {
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index 665b737..39975b7 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -582,7 +582,7 @@
u16 data[4];
s16 gain[2];
u16 minmax[2];
- static const u16 lna_gain[4] = { -2, 10, 19, 25 };
+ static const s16 lna_gain[4] = { -2, 10, 19, 25 };
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, 1);
diff --git a/drivers/net/wireless/broadcom/b43legacy/phy.c b/drivers/net/wireless/broadcom/b43legacy/phy.c
index 05404fb..c1395e6 100644
--- a/drivers/net/wireless/broadcom/b43legacy/phy.c
+++ b/drivers/net/wireless/broadcom/b43legacy/phy.c
@@ -1123,7 +1123,7 @@
struct b43legacy_phy *phy = &dev->phy;
u16 regstack[12] = { 0 };
u16 mls;
- u16 fval;
+ s16 fval;
int i;
int j;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 6103953..c8e1d50 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -290,6 +290,7 @@
struct brcmf_pub *drvr = ifp->drvr;
struct ethhdr *eh;
int head_delta;
+ unsigned int tx_bytes = skb->len;
brcmf_dbg(DATA, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
@@ -364,7 +365,7 @@
ndev->stats.tx_dropped++;
} else {
ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_bytes += tx_bytes;
}
/* Return ok: we always eat the packet */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index d821a47..a2b8d91 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -207,6 +207,8 @@
size = BRCMF_FW_MAX_NVRAM_SIZE;
else
size = data_len;
+ /* Add space for properties we may add */
+ size += strlen(BRCMF_FW_DEFAULT_BOARDREV) + 1;
/* Alloc for extra 0 byte + roundup by 4 + length field */
size += 1 + 3 + sizeof(u32);
nvp->nvram = kzalloc(size, GFP_KERNEL);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index 430d2cc..1285d36 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -228,6 +228,10 @@
brcmf_fweh_event_name(event->code), event->code,
event->emsg.ifidx, event->emsg.bsscfgidx,
event->emsg.addr);
+ if (event->emsg.bsscfgidx >= BRCMF_MAX_IFS) {
+ bphy_err(drvr, "invalid bsscfg index: %u\n", event->emsg.bsscfgidx);
+ goto event_free;
+ }
/* convert event message */
emsg_be = &event->emsg;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 1f12dfb..61febc9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/bcma/bcma.h>
#include <linux/sched.h>
+#include <linux/io.h>
#include <asm/unaligned.h>
#include <soc.h>
@@ -447,47 +448,6 @@
static void
-brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
- void *srcaddr, u32 len)
-{
- void __iomem *address = devinfo->tcm + mem_offset;
- __le32 *src32;
- __le16 *src16;
- u8 *src8;
-
- if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
- if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
- src8 = (u8 *)srcaddr;
- while (len) {
- iowrite8(*src8, address);
- address++;
- src8++;
- len--;
- }
- } else {
- len = len / 2;
- src16 = (__le16 *)srcaddr;
- while (len) {
- iowrite16(le16_to_cpu(*src16), address);
- address += 2;
- src16++;
- len--;
- }
- }
- } else {
- len = len / 4;
- src32 = (__le32 *)srcaddr;
- while (len) {
- iowrite32(le32_to_cpu(*src32), address);
- address += 4;
- src32++;
- len--;
- }
- }
-}
-
-
-static void
brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
void *dstaddr, u32 len)
{
@@ -1346,6 +1306,18 @@
{
}
+static int brcmf_pcie_preinit(struct device *dev)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+
+ brcmf_dbg(PCIE, "Enter\n");
+
+ brcmf_pcie_intr_enable(buspub->devinfo);
+ brcmf_pcie_hostready(buspub->devinfo);
+
+ return 0;
+}
static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
{
@@ -1454,6 +1426,7 @@
}
static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
+ .preinit = brcmf_pcie_preinit,
.txdata = brcmf_pcie_tx,
.stop = brcmf_pcie_down,
.txctl = brcmf_pcie_tx_ctlpkt,
@@ -1561,8 +1534,8 @@
return err;
brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
- brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase,
- (void *)fw->data, fw->size);
+ memcpy_toio(devinfo->tcm + devinfo->ci->rambase,
+ (void *)fw->data, fw->size);
resetintr = get_unaligned_le32(fw->data);
release_firmware(fw);
@@ -1576,7 +1549,7 @@
brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
address = devinfo->ci->rambase + devinfo->ci->ramsize -
nvram_len;
- brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len);
+ memcpy_toio(devinfo->tcm + address, nvram, nvram_len);
brcmf_fw_nvram_free(nvram);
} else {
brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
@@ -1775,6 +1748,8 @@
ret = brcmf_chip_get_raminfo(devinfo->ci);
if (ret) {
brcmf_err(bus, "Failed to get RAM info\n");
+ release_firmware(fw);
+ brcmf_fw_nvram_free(nvram);
goto fail;
}
@@ -1824,9 +1799,6 @@
init_waitqueue_head(&devinfo->mbdata_resp_wait);
- brcmf_pcie_intr_enable(devinfo);
- brcmf_pcie_hostready(devinfo);
-
ret = brcmf_attach(&devinfo->pdev->dev);
if (ret)
goto fail;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
index fabfbb0..d0a7465 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
@@ -158,12 +158,12 @@
struct brcmf_pno_macaddr_le pfn_mac;
u8 *mac_addr = NULL;
u8 *mac_mask = NULL;
- int err, i;
+ int err, i, ri;
- for (i = 0; i < pi->n_reqs; i++)
- if (pi->reqs[i]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
- mac_addr = pi->reqs[i]->mac_addr;
- mac_mask = pi->reqs[i]->mac_addr_mask;
+ for (ri = 0; ri < pi->n_reqs; ri++)
+ if (pi->reqs[ri]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ mac_addr = pi->reqs[ri]->mac_addr;
+ mac_mask = pi->reqs[ri]->mac_addr_mask;
break;
}
@@ -185,7 +185,7 @@
pfn_mac.mac[0] |= 0x02;
brcmf_dbg(SCAN, "enabling random mac: reqid=%llu mac=%pM\n",
- pi->reqs[i]->reqid, pfn_mac.mac);
+ pi->reqs[ri]->reqid, pfn_mac.mac);
err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac,
sizeof(pfn_mac));
if (err)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 6d5d5c3..9929e90 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -557,7 +557,7 @@
BRCMF_SDIO_FT_SUB,
};
-#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
+#define SDIOD_DRVSTR_KEY(chip, pmu) (((unsigned int)(chip) << 16) | (pmu))
/* SDIO Pad drive strength to select value mappings */
struct sdiod_drive_str {
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 0569f37..8c9c6bf 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -5236,7 +5236,7 @@
return -1;
}
-static int set_wep_key(struct airo_info *ai, u16 index, const char *key,
+static int set_wep_key(struct airo_info *ai, u16 index, const u8 *key,
u16 keylen, int perm, int lock)
{
static const unsigned char macaddr[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
@@ -5287,7 +5287,7 @@
struct net_device *dev = PDE_DATA(inode);
struct airo_info *ai = dev->ml_priv;
int i, rc;
- char key[16];
+ u8 key[16];
u16 index = 0;
int j = 0;
@@ -5315,12 +5315,22 @@
}
for (i = 0; i < 16*3 && data->wbuffer[i+j]; i++) {
+ int val;
+
+ if (i % 3 == 2)
+ continue;
+
+ val = hex_to_bin(data->wbuffer[i+j]);
+ if (val < 0) {
+ airo_print_err(ai->dev->name, "WebKey passed invalid key hex");
+ return;
+ }
switch(i%3) {
case 0:
- key[i/3] = hex_to_bin(data->wbuffer[i+j])<<4;
+ key[i/3] = (u8)val << 4;
break;
case 1:
- key[i/3] |= hex_to_bin(data->wbuffer[i+j]);
+ key[i/3] |= (u8)val;
break;
}
}
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
index d9baa2f..e4c60ca 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
@@ -383,7 +383,7 @@
/* Each fragment may need to have room for encryption
* pre/postfix */
- if (host_encrypt)
+ if (host_encrypt && crypt && crypt->ops)
bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
crypt->ops->extra_mpdu_postfix_len;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index 9a491e5..150805a 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -2403,7 +2403,7 @@
/* Repeat initial/next rate.
* For legacy IL_NUMBER_TRY == 1, this loop will not execute.
* For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
- while (repeat_rate > 0 && idx < LINK_QUAL_MAX_RETRY_NUM) {
+ while (repeat_rate > 0 && idx < (LINK_QUAL_MAX_RETRY_NUM - 1)) {
if (is_legacy(tbl_type.lq_type)) {
if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
ant_toggle_cnt++;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 423d3c3..1e21cdb 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -304,7 +304,7 @@
priv->is_open = 1;
IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
+ return ret;
}
static void iwlagn_mac_stop(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index fcad5cd..3c931b1 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -367,7 +367,7 @@
struct iwl_dbg_tlv_timer_node *node, *tmp;
list_for_each_entry_safe(node, tmp, timer_list, list) {
- del_timer(&node->timer);
+ del_timer_sync(&node->timer);
list_del(&node->list);
kfree(node);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 6348dfa..54b28f0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1495,8 +1495,10 @@
while (!sband && i < NUM_NL80211_BANDS)
sband = mvm->hw->wiphy->bands[i++];
- if (WARN_ON_ONCE(!sband))
+ if (WARN_ON_ONCE(!sband)) {
+ ret = -ENODEV;
goto error;
+ }
chan = &sband->channels[0];
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index c146303..66a968d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -621,6 +621,9 @@
struct iwl_power_vifs *power_iterator = _data;
bool active = mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX;
+ if (!mvmvif->uploaded)
+ return;
+
switch (ieee80211_vif_type_p2p(vif)) {
case NL80211_IFTYPE_P2P_DEVICE:
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 46255d2..17b9925 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1706,7 +1706,10 @@
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
/* set fragmented ebs for fragmented scan on HB channels */
- if (iwl_mvm_is_scan_fragmented(params->hb_type))
+ if ((!iwl_mvm_is_cdb_supported(mvm) &&
+ iwl_mvm_is_scan_fragmented(params->type)) ||
+ (iwl_mvm_is_cdb_supported(mvm) &&
+ iwl_mvm_is_scan_fragmented(params->hb_type)))
flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
return flags;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index ef62839..09f870c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1840,6 +1840,7 @@
iwl_mvm_txq_from_mac80211(sta->txq[i]);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ list_del_init(&mvmtxq->list);
}
}
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index a3ca662..8fa3ec7 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -682,7 +682,7 @@
* queues have already been stopped and no new frames can sneak
* up from behind.
*/
- while ((total = p54_flush_count(priv) && i--)) {
+ while ((total = p54_flush_count(priv)) && i--) {
/* waste time */
msleep(20);
}
diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c
index ab0fe85..cdb5781 100644
--- a/drivers/net/wireless/intersil/p54/p54spi.c
+++ b/drivers/net/wireless/intersil/p54/p54spi.c
@@ -164,7 +164,7 @@
ret = p54_parse_firmware(dev, priv->firmware);
if (ret) {
- release_firmware(priv->firmware);
+ /* the firmware is released by the caller */
return ret;
}
@@ -659,6 +659,7 @@
return 0;
err_free_common:
+ release_firmware(priv->firmware);
free_irq(gpio_to_irq(p54spi_gpio_irq), spi);
err_free_gpio_irq:
gpio_free(p54spi_gpio_irq);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index cc550ba..255286b 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -593,7 +593,7 @@
bool ps_poll_pending;
struct dentry *debugfs;
- uintptr_t pending_cookie;
+ atomic_t pending_cookie;
struct sk_buff_head pending; /* packets pending */
/*
* Only radios in the same group can communicate together (the
@@ -775,6 +775,7 @@
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *cb;
if (!vp->assoc)
return;
@@ -796,6 +797,10 @@
memcpy(hdr->addr2, mac, ETH_ALEN);
memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
+ cb = IEEE80211_SKB_CB(skb);
+ cb->control.rates[0].count = 1;
+ cb->control.rates[1].idx = -1;
+
rcu_read_lock();
mac80211_hwsim_tx_frame(data->hw, skb,
rcu_dereference(vif->chanctx_conf)->def.chan);
@@ -1269,8 +1274,7 @@
goto nla_put_failure;
/* We create a cookie to identify this skb */
- data->pending_cookie++;
- cookie = data->pending_cookie;
+ cookie = atomic_inc_return(&data->pending_cookie);
info->rate_driver_data[0] = (void *)cookie;
if (nla_put_u64_64bit(skb, HWSIM_ATTR_COOKIE, cookie, HWSIM_ATTR_PAD))
goto nla_put_failure;
@@ -2264,11 +2268,13 @@
if (req->ie_len)
skb_put_data(probe, req->ie, req->ie_len);
+ rcu_read_lock();
if (!ieee80211_tx_prepare_skb(hwsim->hw,
hwsim->hw_scan_vif,
probe,
hwsim->tmp_chan->band,
NULL)) {
+ rcu_read_unlock();
kfree_skb(probe);
continue;
}
@@ -2276,6 +2282,7 @@
local_bh_disable();
mac80211_hwsim_tx_frame(hwsim->hw, probe,
hwsim->tmp_chan);
+ rcu_read_unlock();
local_bh_enable();
}
}
@@ -3505,6 +3512,7 @@
const u8 *src;
unsigned int hwsim_flags;
int i;
+ unsigned long flags;
bool found = false;
if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
@@ -3532,18 +3540,20 @@
}
/* look for the skb matching the cookie passed back from user */
+ spin_lock_irqsave(&data2->pending.lock, flags);
skb_queue_walk_safe(&data2->pending, skb, tmp) {
- u64 skb_cookie;
+ uintptr_t skb_cookie;
txi = IEEE80211_SKB_CB(skb);
- skb_cookie = (u64)(uintptr_t)txi->rate_driver_data[0];
+ skb_cookie = (uintptr_t)txi->rate_driver_data[0];
if (skb_cookie == ret_skb_cookie) {
- skb_unlink(skb, &data2->pending);
+ __skb_unlink(skb, &data2->pending);
found = true;
break;
}
}
+ spin_unlock_irqrestore(&data2->pending.lock, flags);
/* not found */
if (!found)
@@ -3670,6 +3680,8 @@
rx_status.band = channel->band;
rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]);
+ if (rx_status.rate_idx >= data2->hw->wiphy->bands[rx_status.band]->n_bitrates)
+ goto out;
rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
hdr = (void *)skb->data;
@@ -4204,6 +4216,10 @@
nlh = nlmsg_hdr(skb);
gnlh = nlmsg_data(nlh);
+
+ if (skb->len < nlh->nlmsg_len)
+ return -EINVAL;
+
err = genlmsg_parse(nlh, &hwsim_genl_family, tb, HWSIM_ATTR_MAX,
hwsim_genl_policy, NULL);
if (err) {
@@ -4246,7 +4262,8 @@
spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
skb->data = skb->head;
- skb_set_tail_pointer(skb, len);
+ skb_reset_tail_pointer(skb);
+ skb_put(skb, len);
hwsim_virtio_handle_cmd(skb);
spin_lock_irqsave(&hwsim_virtio_lock, flags);
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index 5d6dc1d..32fdc41 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -287,6 +287,7 @@
return 0;
err_get_fw:
+ usb_put_dev(udev);
lbs_remove_card(priv);
err_add_card:
if_usb_reset_device(cardp);
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index d2ee646..3fa25cd 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -303,5 +303,7 @@
mwifiex_dbg(priv->adapter, MSG,
"indicating channel switch completion to kernel\n");
+ mutex_lock(&priv->wdev.mtx);
cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef);
+ mutex_unlock(&priv->wdev.mtx);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 5923c5c..f4e3dce 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1054,6 +1054,8 @@
void *devdump_data;
int devdump_len;
struct timer_list devdump_timer;
+
+ bool ignore_btcoex_events;
};
void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 7c137eb..b002489 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -3142,6 +3142,9 @@
if (ret)
goto err_alloc_buffers;
+ if (pdev->device == PCIE_DEVICE_ID_MARVELL_88W8897)
+ adapter->ignore_btcoex_events = true;
+
return 0;
err_alloc_buffers:
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 7534586..05073a4 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -1061,6 +1061,9 @@
break;
case EVENT_BT_COEX_WLAN_PARA_CHANGE:
dev_dbg(adapter->dev, "EVENT: BT coex wlan param update\n");
+ if (adapter->ignore_btcoex_events)
+ break;
+
mwifiex_bt_coex_wlan_param_update_event(priv,
adapter->event_skb);
break;
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 0fdfead..f01b455 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -455,6 +455,7 @@
qbuf.addr = addr + offset;
qbuf.len = len - offset;
+ qbuf.skip_unmap = false;
mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
frames++;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 466447a..81ff3b4 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -107,6 +107,7 @@
if (!of_property_read_u32(np, "led-sources", &led_pin))
dev->led_pin = led_pin;
dev->led_al = of_property_read_bool(np, "led-active-low");
+ of_node_put(np);
}
return led_classdev_register(dev->dev, &dev->led_cdev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index c9226dc..bdff89c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -618,6 +618,9 @@
struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
int i;
+ if (!sta_rates)
+ return;
+
spin_lock_bh(&dev->mt76.lock);
for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
msta->rates[i].idx = sta_rates->rate[i].idx;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index 424be10..b266170 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -950,7 +950,7 @@
offset %= 32;
val = mt76_rr(dev, addr);
- val >>= (tid % 32);
+ val >>= offset;
if (offset > 20) {
addr += 4;
@@ -1626,7 +1626,7 @@
struct mt7615_dev *dev = phy->dev;
int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck;
bool ext_phy = phy != &dev->phy;
- u16 def_th = ofdm ? -98 : -110;
+ s16 def_th = ofdm ? -98 : -110;
bool update = false;
s8 *sensitivity;
int signal;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 88cdc2b..defa207 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -673,6 +673,9 @@
struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
int i;
+ if (!sta_rates)
+ return;
+
spin_lock_bh(&dev->mt76.lock);
for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
msta->rates[i].idx = sta_rates->rate[i].idx;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
index e43d13d..2dad61f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
@@ -108,7 +108,7 @@
ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500,
MT_EP_OUT_INBAND_CMD);
if (ret)
- return ret;
+ goto out;
if (wait_resp)
ret = mt76x02u_mcu_wait_resp(dev, seq);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index ecaf85b..e57e49a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -80,7 +80,7 @@
mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
/* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
- mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
+ mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf);
/* RG_SSUSB_CDR_BR_PE1D = 0x3 */
mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 9a7f317..41054ee 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -1259,8 +1259,11 @@
generic = (struct wtbl_generic *)tlv;
if (sta) {
+ if (vif->type == NL80211_IFTYPE_STATION)
+ generic->partial_aid = cpu_to_le16(vif->bss_conf.aid);
+ else
+ generic->partial_aid = cpu_to_le16(sta->aid);
memcpy(generic->peer_addr, sta->addr, ETH_ALEN);
- generic->partial_aid = cpu_to_le16(sta->aid);
generic->muar_idx = mvif->omac_idx;
generic->qos = sta->wme;
} else {
@@ -1314,12 +1317,15 @@
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
basic->conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
+ basic->aid = cpu_to_le16(sta->aid);
break;
case NL80211_IFTYPE_STATION:
basic->conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
+ basic->aid = cpu_to_le16(vif->bss_conf.aid);
break;
case NL80211_IFTYPE_ADHOC:
basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
+ basic->aid = cpu_to_le16(sta->aid);
break;
default:
WARN_ON(1);
@@ -1327,7 +1333,6 @@
}
memcpy(basic->peer_addr, sta->addr, ETH_ALEN);
- basic->aid = cpu_to_le16(sta->aid);
basic->qos = sta->wme;
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c
index 6bcc4a1..cc77204 100644
--- a/drivers/net/wireless/mediatek/mt7601u/usb.c
+++ b/drivers/net/wireless/mediatek/mt7601u/usb.c
@@ -26,6 +26,7 @@
{ USB_DEVICE(0x2717, 0x4106) },
{ USB_DEVICE(0x2955, 0x0001) },
{ USB_DEVICE(0x2955, 0x1001) },
+ { USB_DEVICE(0x2955, 0x1003) },
{ USB_DEVICE(0x2a5f, 0x1000) },
{ USB_DEVICE(0x7392, 0x7710) },
{ 0, }
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index 6be5ac8..dd26f20 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -939,30 +939,52 @@
return;
while (index + sizeof(*e) <= len) {
+ u16 attr_size;
+
e = (struct wilc_attr_entry *)&buf[index];
- if (e->attr_type == IEEE80211_P2P_ATTR_CHANNEL_LIST)
+ attr_size = le16_to_cpu(e->attr_len);
+
+ if (index + sizeof(*e) + attr_size > len)
+ return;
+
+ if (e->attr_type == IEEE80211_P2P_ATTR_CHANNEL_LIST &&
+ attr_size >= (sizeof(struct wilc_attr_ch_list) - sizeof(*e)))
ch_list_idx = index;
- else if (e->attr_type == IEEE80211_P2P_ATTR_OPER_CHANNEL)
+ else if (e->attr_type == IEEE80211_P2P_ATTR_OPER_CHANNEL &&
+ attr_size == (sizeof(struct wilc_attr_oper_ch) - sizeof(*e)))
op_ch_idx = index;
+
if (ch_list_idx && op_ch_idx)
break;
- index += le16_to_cpu(e->attr_len) + sizeof(*e);
+
+ index += sizeof(*e) + attr_size;
}
if (ch_list_idx) {
- u16 attr_size;
- struct wilc_ch_list_elem *e;
- int i;
+ unsigned int i;
+ u16 elem_size;
ch_list = (struct wilc_attr_ch_list *)&buf[ch_list_idx];
- attr_size = le16_to_cpu(ch_list->attr_len);
- for (i = 0; i < attr_size;) {
+ /* the number of bytes following the final 'elem' member */
+ elem_size = le16_to_cpu(ch_list->attr_len) -
+ (sizeof(*ch_list) - sizeof(struct wilc_attr_entry));
+ for (i = 0; i < elem_size;) {
+ struct wilc_ch_list_elem *e;
+
e = (struct wilc_ch_list_elem *)(ch_list->elem + i);
+
+ i += sizeof(*e);
+ if (i > elem_size)
+ break;
+
+ i += e->no_of_channels;
+ if (i > elem_size)
+ break;
+
if (e->op_class == WILC_WLAN_OPERATING_CLASS_2_4GHZ) {
memset(e->ch_list, sta_ch, e->no_of_channels);
break;
}
- i += e->no_of_channels;
}
}
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index d025a30..b258477 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -467,14 +467,25 @@
rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies->data, ies->len);
if (rsn_ie) {
+ int rsn_ie_len = sizeof(struct element) + rsn_ie[1];
int offset = 8;
- param->mode_802_11i = 2;
- param->rsn_found = true;
/* extract RSN capabilities */
- offset += (rsn_ie[offset] * 4) + 2;
- offset += (rsn_ie[offset] * 4) + 2;
- memcpy(param->rsn_cap, &rsn_ie[offset], 2);
+ if (offset < rsn_ie_len) {
+ /* skip over pairwise suites */
+ offset += (rsn_ie[offset] * 4) + 2;
+
+ if (offset < rsn_ie_len) {
+ /* skip over authentication suites */
+ offset += (rsn_ie[offset] * 4) + 2;
+
+ if (offset + 1 < rsn_ie_len) {
+ param->mode_802_11i = 2;
+ param->rsn_found = true;
+ memcpy(param->rsn_cap, &rsn_ie[offset], 2);
+ }
+ }
+ }
}
if (param->rsn_found) {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index fed6d21..4bdd3a9 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -4151,7 +4151,10 @@
rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
- rt2800_bbp_write(rt2x00dev, 86, 0);
+ if (rt2x00_rt(rt2x00dev, RT6352))
+ rt2800_bbp_write(rt2x00dev, 86, 0x38);
+ else
+ rt2800_bbp_write(rt2x00dev, 86, 0);
}
if (rf->channel <= 14) {
@@ -4352,7 +4355,8 @@
reg = (rf->channel <= 14 ? 0x1c : 0x24) + 2*rt2x00dev->lna_gain;
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
- rt2800_iq_calibrate(rt2x00dev, rf->channel);
+ if (rt2x00_rt(rt2x00dev, RT5592))
+ rt2800_iq_calibrate(rt2x00dev, rf->channel);
}
bbp = rt2800_bbp_read(rt2x00dev, 4);
@@ -5625,7 +5629,8 @@
if (qual->vgc_level != vgc_level) {
if (rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT3593) ||
- rt2x00_rt(rt2x00dev, RT3883)) {
+ rt2x00_rt(rt2x00dev, RT3883) ||
+ rt2x00_rt(rt2x00dev, RT6352)) {
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66,
vgc_level);
} else if (rt2x00_rt(rt2x00dev, RT5592)) {
@@ -5848,7 +5853,7 @@
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
} else if (rt2x00_rt(rt2x00dev, RT6352)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000401);
- rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0000);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0001);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x00000000);
rt2800_register_write(rt2x00dev, TX0_BB_GAIN_ATTEN, 0x0);
@@ -6110,6 +6115,27 @@
reg = rt2800_register_read(rt2x00dev, US_CYC_CNT);
rt2x00_set_field32(®, US_CYC_CNT_CLOCK_CYCLE, 125);
rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
+ } else if (rt2x00_is_soc(rt2x00dev)) {
+ struct clk *clk = clk_get_sys("bus", NULL);
+ int rate;
+
+ if (IS_ERR(clk)) {
+ clk = clk_get_sys("cpu", NULL);
+
+ if (IS_ERR(clk)) {
+ rate = 125;
+ } else {
+ rate = clk_get_rate(clk) / 3000000;
+ clk_put(clk);
+ }
+ } else {
+ rate = clk_get_rate(clk) / 1000000;
+ clk_put(clk);
+ }
+
+ reg = rt2800_register_read(rt2x00dev, US_CYC_CNT);
+ rt2x00_set_field32(®, US_CYC_CNT_CLOCK_CYCLE, rate);
+ rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
}
reg = rt2800_register_read(rt2x00dev, HT_FBK_CFG0);
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index bf3fbd1..091eea0 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -382,6 +382,8 @@
goto failed;
local->sram = ioremap(link->resource[2]->start,
resource_size(link->resource[2]));
+ if (!local->sram)
+ goto failed;
/*** Set up 16k window for shared memory (receive buffer) ***************/
link->resource[3]->flags |=
@@ -396,6 +398,8 @@
goto failed;
local->rmem = ioremap(link->resource[3]->start,
resource_size(link->resource[3]));
+ if (!local->rmem)
+ goto failed;
/*** Set up window for attribute memory ***********************************/
link->resource[4]->flags |=
@@ -410,6 +414,8 @@
goto failed;
local->amem = ioremap(link->resource[4]->start,
resource_size(link->resource[4]));
+ if (!local->amem)
+ goto failed;
dev_dbg(&link->dev, "ray_config sram=%p\n", local->sram);
dev_dbg(&link->dev, "ray_config rmem=%p\n", local->rmem);
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index 2477e18..025619c 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -460,8 +460,10 @@
struct rtl8180_priv *priv = dev->priv;
struct rtl8180_tx_ring *ring;
struct rtl8180_tx_desc *entry;
+ unsigned int prio = 0;
unsigned long flags;
- unsigned int idx, prio, hw_prio;
+ unsigned int idx, hw_prio;
+
dma_addr_t mapping;
u32 tx_flags;
u8 rc_flags;
@@ -470,7 +472,9 @@
/* do arithmetic and then convert to le16 */
u16 frame_duration = 0;
- prio = skb_get_queue_mapping(skb);
+ /* rtl8180/rtl8185 only has one useable tx queue */
+ if (dev->queues > IEEE80211_AC_BK)
+ prio = skb_get_queue_mapping(skb);
ring = &priv->tx_ring[prio];
mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 0d374a2..e34cd6f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -1874,13 +1874,6 @@
/* We have 8 bits to indicate validity */
map_addr = offset * 8;
- if (map_addr >= EFUSE_MAP_LEN) {
- dev_warn(dev, "%s: Illegal map_addr (%04x), "
- "efuse corrupt!\n",
- __func__, map_addr);
- ret = -EINVAL;
- goto exit;
- }
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
/* Check word enable condition in the section */
if (word_mask & BIT(i)) {
@@ -1891,6 +1884,13 @@
ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &val8);
if (ret)
goto exit;
+ if (map_addr >= EFUSE_MAP_LEN - 1) {
+ dev_warn(dev, "%s: Illegal map_addr (%04x), "
+ "efuse corrupt!\n",
+ __func__, map_addr);
+ ret = -EINVAL;
+ goto exit;
+ }
priv->efuse_wifi.raw[map_addr++] = val8;
ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &val8);
@@ -2925,12 +2925,12 @@
}
if (!(simubitmap & 0x30) && priv->tx_paths > 1) {
- /* path B RX OK */
+ /* path B TX OK */
for (i = 4; i < 6; i++)
result[3][i] = result[c1][i];
}
- if (!(simubitmap & 0x30) && priv->tx_paths > 1) {
+ if (!(simubitmap & 0xc0) && priv->tx_paths > 1) {
/* path B RX OK */
for (i = 6; i < 8; i++)
result[3][i] = result[c1][i];
@@ -4338,15 +4338,14 @@
h2c.b_macid_cfg.ramask2 = (ramask >> 16) & 0xff;
h2c.b_macid_cfg.ramask3 = (ramask >> 24) & 0xff;
- h2c.ramask.arg = 0x80;
h2c.b_macid_cfg.data1 = rateid;
if (sgi)
h2c.b_macid_cfg.data1 |= BIT(7);
h2c.b_macid_cfg.data2 = bw;
- dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x, size %zi\n",
- __func__, ramask, h2c.ramask.arg, sizeof(h2c.b_macid_cfg));
+ dev_dbg(&priv->udev->dev, "%s: rate mask %08x, rateid %02x, sgi %d, size %zi\n",
+ __func__, ramask, rateid, sgi, sizeof(h2c.b_macid_cfg));
rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.b_macid_cfg));
}
@@ -4508,6 +4507,53 @@
return network_type;
}
+static void rtl8xxxu_set_aifs(struct rtl8xxxu_priv *priv, u8 slot_time)
+{
+ u32 reg_edca_param[IEEE80211_NUM_ACS] = {
+ [IEEE80211_AC_VO] = REG_EDCA_VO_PARAM,
+ [IEEE80211_AC_VI] = REG_EDCA_VI_PARAM,
+ [IEEE80211_AC_BE] = REG_EDCA_BE_PARAM,
+ [IEEE80211_AC_BK] = REG_EDCA_BK_PARAM,
+ };
+ u32 val32;
+ u16 wireless_mode = 0;
+ u8 aifs, aifsn, sifs;
+ int i;
+
+ if (priv->vif) {
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(priv->vif, priv->vif->bss_conf.bssid);
+ if (sta)
+ wireless_mode = rtl8xxxu_wireless_mode(priv->hw, sta);
+ rcu_read_unlock();
+ }
+
+ if (priv->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ ||
+ (wireless_mode & WIRELESS_MODE_N_24G))
+ sifs = 16;
+ else
+ sifs = 10;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ val32 = rtl8xxxu_read32(priv, reg_edca_param[i]);
+
+ /* It was set in conf_tx. */
+ aifsn = val32 & 0xff;
+
+ /* aifsn not set yet or already fixed */
+ if (aifsn < 2 || aifsn > 15)
+ continue;
+
+ aifs = aifsn * slot_time + sifs;
+
+ val32 &= ~0xff;
+ val32 |= aifs;
+ rtl8xxxu_write32(priv, reg_edca_param[i], val32);
+ }
+}
+
static void
rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf, u32 changed)
@@ -4593,6 +4639,8 @@
else
val8 = 20;
rtl8xxxu_write8(priv, REG_SLOT, val8);
+
+ rtl8xxxu_set_aifs(priv, val8);
}
if (changed & BSS_CHANGED_BSSID) {
@@ -4984,6 +5032,8 @@
if (control && control->sta)
sta = control->sta;
+ queue = rtl8xxxu_queue_select(hw, skb);
+
tx_desc = skb_push(skb, tx_desc_size);
memset(tx_desc, 0, tx_desc_size);
@@ -4996,7 +5046,6 @@
is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
tx_desc->txdw0 |= TXDESC_BROADMULTICAST;
- queue = rtl8xxxu_queue_select(hw, skb);
tx_desc->txdw1 = cpu_to_le32(queue << TXDESC_QUEUE_SHIFT);
if (tx_info->control.hw_key) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
index 901cdfe..0b1bc04 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.c
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.c
@@ -329,8 +329,8 @@
tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
- if (!buffer || copy_from_user(tmp, buffer, tmp_len))
- return count;
+ if (copy_from_user(tmp, buffer, tmp_len))
+ return -EFAULT;
tmp[tmp_len] = '\0';
@@ -340,8 +340,8 @@
&h2c_data[4], &h2c_data[5],
&h2c_data[6], &h2c_data[7]);
- if (h2c_len <= 0)
- return count;
+ if (h2c_len == 0)
+ return -EINVAL;
for (i = 0; i < h2c_len; i++)
h2c_data_packed[i] = (u8)h2c_data[i];
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 06e073d..c6e4fda 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1015,7 +1015,7 @@
hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) +
sizeof(struct rtl_usb_priv), &rtl_ops);
if (!hw) {
- WARN_ONCE(true, "rtl_usb: ieee80211 alloc failed\n");
+ pr_warn("rtl_usb: ieee80211 alloc failed\n");
return -ENOMEM;
}
rtlpriv = hw->priv;
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index dbac4c0..a033540 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -495,6 +495,7 @@
queue->rx_copy.completed = &completed_skbs;
while (xenvif_rx_ring_slots_available(queue) &&
+ !skb_queue_empty(&queue->rx_queue) &&
work_done < RX_BATCH_SIZE) {
xenvif_rx_skb(queue);
work_done++;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index ca261e0..9ee9ce0 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -256,7 +256,6 @@
unsigned int queue_index;
xen_unregister_watchers(vif);
- xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(vif);
#endif /* CONFIG_DEBUG_FS */
@@ -984,6 +983,7 @@
struct backend_info *be = dev_get_drvdata(&dev->dev);
unregister_hotplug_status_watch(be);
+ xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
if (be->vif) {
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
backend_disconnect(be);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 1a69b52..569f3c8 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -66,6 +66,10 @@
MODULE_PARM_DESC(max_queues,
"Maximum number of queues per virtual interface");
+static bool __read_mostly xennet_trusted = true;
+module_param_named(trusted, xennet_trusted, bool, 0644);
+MODULE_PARM_DESC(trusted, "Is the backend trusted");
+
#define XENNET_TIMEOUT (5 * HZ)
static const struct ethtool_ops xennet_ethtool_ops;
@@ -175,6 +179,9 @@
/* Is device behaving sane? */
bool broken;
+ /* Should skbs be bounced into a zeroed buffer? */
+ bool bounce;
+
atomic_t rx_gso_checksum_fixup;
};
@@ -273,7 +280,8 @@
if (unlikely(!skb))
return NULL;
- page = page_pool_dev_alloc_pages(queue->page_pool);
+ page = page_pool_alloc_pages(queue->page_pool,
+ GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO);
if (unlikely(!page)) {
kfree_skb(skb);
return NULL;
@@ -669,6 +677,33 @@
return n - drops;
}
+struct sk_buff *bounce_skb(const struct sk_buff *skb)
+{
+ unsigned int headerlen = skb_headroom(skb);
+ /* Align size to allocate full pages and avoid contiguous data leaks */
+ unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len,
+ XEN_PAGE_SIZE);
+ struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO);
+
+ if (!n)
+ return NULL;
+
+ if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) {
+ WARN_ONCE(1, "misaligned skb allocated\n");
+ kfree_skb(n);
+ return NULL;
+ }
+
+ /* Set the data pointer */
+ skb_reserve(n, headerlen);
+ /* Set the tail pointer and length */
+ skb_put(n, skb->len);
+
+ BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
+
+ skb_copy_header(n, skb);
+ return n;
+}
#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
@@ -722,9 +757,13 @@
/* The first req should be at least ETH_HLEN size or the packet will be
* dropped by netback.
+ *
+ * If the backend is not trusted bounce all data to zeroed pages to
+ * avoid exposing contiguous data on the granted page not belonging to
+ * the skb.
*/
- if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
- nskb = skb_copy(skb, GFP_ATOMIC);
+ if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
+ nskb = bounce_skb(skb);
if (!nskb)
goto drop;
dev_consume_skb_any(skb);
@@ -1057,8 +1096,10 @@
}
}
rcu_read_unlock();
-next:
+
__skb_queue_tail(list, skb);
+
+next:
if (!(rx->flags & XEN_NETRXF_more_data))
break;
@@ -2248,6 +2289,10 @@
info->netdev->irq = 0;
+ /* Check if backend is trusted. */
+ info->bounce = !xennet_trusted ||
+ !xenbus_read_unsigned(dev->nodename, "trusted", 1);
+
/* Check if backend supports multiple queues */
max_queues = xenbus_read_unsigned(info->xbdev->otherend,
"multi-queue-max-queues", 1);
@@ -2414,6 +2459,9 @@
return err;
if (np->netback_has_xdp_headroom)
pr_info("backend supports XDP headroom\n");
+ if (np->bounce)
+ dev_info(&np->xbdev->dev,
+ "bouncing transmitted data to zeroed pages\n");
/* talk_to_netback() sets the correct number of queues */
num_queues = dev->real_num_tx_queues;
diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c
index 4dc7bd7..90bea6a 100644
--- a/drivers/nfc/fdp/fdp.c
+++ b/drivers/nfc/fdp/fdp.c
@@ -238,9 +238,6 @@
{
int r;
struct fdp_nci_info *info = nci_get_drvdata(ndev);
- struct device *dev = &info->phy->i2c_dev->dev;
-
- dev_dbg(dev, "%s\n", __func__);
r = info->phy_ops->enable(info->phy);
@@ -249,35 +246,26 @@
static int fdp_nci_close(struct nci_dev *ndev)
{
- struct fdp_nci_info *info = nci_get_drvdata(ndev);
- struct device *dev = &info->phy->i2c_dev->dev;
-
- dev_dbg(dev, "%s\n", __func__);
return 0;
}
static int fdp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
- struct device *dev = &info->phy->i2c_dev->dev;
-
- dev_dbg(dev, "%s\n", __func__);
+ int ret;
if (atomic_dec_and_test(&info->data_pkt_counter))
info->data_pkt_counter_cb(ndev);
- return info->phy_ops->write(info->phy, skb);
-}
+ ret = info->phy_ops->write(info->phy, skb);
+ if (ret < 0) {
+ kfree_skb(skb);
+ return ret;
+ }
-int fdp_nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
-{
- struct fdp_nci_info *info = nci_get_drvdata(ndev);
- struct device *dev = &info->phy->i2c_dev->dev;
-
- dev_dbg(dev, "%s\n", __func__);
- return nci_recv_frame(ndev, skb);
+ consume_skb(skb);
+ return 0;
}
-EXPORT_SYMBOL(fdp_nci_recv_frame);
static int fdp_nci_request_firmware(struct nci_dev *ndev)
{
@@ -489,8 +477,6 @@
int r;
u8 patched = 0;
- dev_dbg(dev, "%s\n", __func__);
-
r = nci_core_init(ndev);
if (r)
goto error;
@@ -598,9 +584,7 @@
struct sk_buff *skb)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
- struct device *dev = &info->phy->i2c_dev->dev;
- dev_dbg(dev, "%s\n", __func__);
info->setup_reset_ntf = 1;
wake_up(&info->setup_wq);
@@ -611,9 +595,7 @@
struct sk_buff *skb)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
- struct device *dev = &info->phy->i2c_dev->dev;
- dev_dbg(dev, "%s\n", __func__);
info->setup_patch_ntf = 1;
info->setup_patch_status = skb->data[0];
wake_up(&info->setup_wq);
@@ -786,11 +768,6 @@
void fdp_nci_remove(struct nci_dev *ndev)
{
- struct fdp_nci_info *info = nci_get_drvdata(ndev);
- struct device *dev = &info->phy->i2c_dev->dev;
-
- dev_dbg(dev, "%s\n", __func__);
-
nci_unregister_device(ndev);
nci_free_device(ndev);
}
diff --git a/drivers/nfc/fdp/fdp.h b/drivers/nfc/fdp/fdp.h
index 9bd1f3f..ead3b21 100644
--- a/drivers/nfc/fdp/fdp.h
+++ b/drivers/nfc/fdp/fdp.h
@@ -25,6 +25,5 @@
struct nci_dev **ndev, int tx_headroom, int tx_tailroom,
u8 clock_type, u32 clock_freq, u8 *fw_vsc_cfg);
void fdp_nci_remove(struct nci_dev *ndev);
-int fdp_nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
#endif /* __LOCAL_FDP_H_ */
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index ad0abb1..5e30078 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -49,7 +49,6 @@
{
struct fdp_i2c_phy *phy = phy_id;
- dev_dbg(&phy->i2c_dev->dev, "%s\n", __func__);
fdp_nci_i2c_reset(phy);
return 0;
@@ -59,7 +58,6 @@
{
struct fdp_i2c_phy *phy = phy_id;
- dev_dbg(&phy->i2c_dev->dev, "%s\n", __func__);
fdp_nci_i2c_reset(phy);
}
@@ -197,7 +195,6 @@
static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
{
struct fdp_i2c_phy *phy = phy_id;
- struct i2c_client *client;
struct sk_buff *skb;
int r;
@@ -206,9 +203,6 @@
return IRQ_NONE;
}
- client = phy->i2c_dev;
- dev_dbg(&client->dev, "%s\n", __func__);
-
r = fdp_nci_i2c_read(phy, &skb);
if (r == -EREMOTEIO)
@@ -217,7 +211,7 @@
return IRQ_HANDLED;
if (skb != NULL)
- fdp_nci_recv_frame(phy->ndev, skb);
+ nci_recv_frame(phy->ndev, skb);
return IRQ_HANDLED;
}
@@ -288,8 +282,6 @@
u32 clock_freq;
int r = 0;
- dev_dbg(dev, "%s\n", __func__);
-
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
nfc_err(dev, "No I2C_FUNC_I2C support\n");
return -ENODEV;
@@ -351,8 +343,6 @@
{
struct fdp_i2c_phy *phy = i2c_get_clientdata(client);
- dev_dbg(&client->dev, "%s\n", __func__);
-
fdp_nci_remove(phy->ndev);
fdp_nci_i2c_disable(phy);
diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
index 18cd962..41f27e1 100644
--- a/drivers/nfc/nfcmrvl/i2c.c
+++ b/drivers/nfc/nfcmrvl/i2c.c
@@ -151,10 +151,15 @@
ret = -EREMOTEIO;
} else
ret = 0;
- kfree_skb(skb);
}
- return ret;
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ consume_skb(skb);
+ return 0;
}
static void nfcmrvl_i2c_nci_update_config(struct nfcmrvl_private *priv,
@@ -186,9 +191,9 @@
pdata->irq_polarity = IRQF_TRIGGER_RISING;
ret = irq_of_parse_and_map(node, 0);
- if (ret < 0) {
- pr_err("Unable to get irq, error: %d\n", ret);
- return ret;
+ if (!ret) {
+ pr_err("Unable to get irq\n");
+ return -EINVAL;
}
pdata->irq = ret;
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
index 529be35..54d228a 100644
--- a/drivers/nfc/nfcmrvl/main.c
+++ b/drivers/nfc/nfcmrvl/main.c
@@ -194,6 +194,7 @@
{
struct nci_dev *ndev = priv->ndev;
+ nci_unregister_device(ndev);
if (priv->ndev->nfc_dev->fw_download_in_progress)
nfcmrvl_fw_dnld_abort(priv);
@@ -202,7 +203,6 @@
if (gpio_is_valid(priv->config.reset_n_io))
gpio_free(priv->config.reset_n_io);
- nci_unregister_device(ndev);
nci_free_device(ndev);
kfree(priv);
}
diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c
index 8e0ddb4..1f4120e 100644
--- a/drivers/nfc/nfcmrvl/spi.c
+++ b/drivers/nfc/nfcmrvl/spi.c
@@ -129,9 +129,9 @@
}
ret = irq_of_parse_and_map(node, 0);
- if (ret < 0) {
- pr_err("Unable to get irq, error: %d\n", ret);
- return ret;
+ if (!ret) {
+ pr_err("Unable to get irq\n");
+ return -EINVAL;
}
pdata->irq = ret;
diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
index 888e298..f26986e 100644
--- a/drivers/nfc/nfcmrvl/usb.c
+++ b/drivers/nfc/nfcmrvl/usb.c
@@ -401,13 +401,25 @@
int err;
while ((urb = usb_get_from_anchor(&drv_data->deferred))) {
+ usb_anchor_urb(urb, &drv_data->tx_anchor);
+
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err)
+ if (err) {
+ kfree(urb->setup_packet);
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
break;
+ }
drv_data->tx_in_flight++;
+ usb_free_urb(urb);
}
- usb_scuttle_anchored_urbs(&drv_data->deferred);
+
+ /* Cleanup the rest deferred urbs. */
+ while ((urb = usb_get_from_anchor(&drv_data->deferred))) {
+ kfree(urb->setup_packet);
+ usb_free_urb(urb);
+ }
}
static int nfcmrvl_resume(struct usb_interface *intf)
diff --git a/drivers/nfc/nxp-nci/core.c b/drivers/nfc/nxp-nci/core.c
index a0ce95a..b68b315 100644
--- a/drivers/nfc/nxp-nci/core.c
+++ b/drivers/nfc/nxp-nci/core.c
@@ -70,22 +70,20 @@
struct nxp_nci_info *info = nci_get_drvdata(ndev);
int r;
- if (!info->phy_ops->write) {
- r = -ENOTSUPP;
- goto send_exit;
- }
+ if (!info->phy_ops->write)
+ return -EOPNOTSUPP;
- if (info->mode != NXP_NCI_MODE_NCI) {
- r = -EINVAL;
- goto send_exit;
- }
+ if (info->mode != NXP_NCI_MODE_NCI)
+ return -EINVAL;
r = info->phy_ops->write(info->phy_id, skb);
- if (r < 0)
+ if (r < 0) {
kfree_skb(skb);
+ return r;
+ }
-send_exit:
- return r;
+ consume_skb(skb);
+ return 0;
}
static struct nci_ops nxp_nci_ops = {
@@ -104,10 +102,8 @@
int r;
info = devm_kzalloc(pdev, sizeof(struct nxp_nci_info), GFP_KERNEL);
- if (!info) {
- r = -ENOMEM;
- goto probe_exit;
- }
+ if (!info)
+ return -ENOMEM;
info->phy_id = phy_id;
info->pdev = pdev;
@@ -120,31 +116,25 @@
if (info->phy_ops->set_mode) {
r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_COLD);
if (r < 0)
- goto probe_exit;
+ return r;
}
info->mode = NXP_NCI_MODE_COLD;
info->ndev = nci_allocate_device(&nxp_nci_ops, NXP_NCI_NFC_PROTOCOLS,
NXP_NCI_HDR_LEN, 0);
- if (!info->ndev) {
- r = -ENOMEM;
- goto probe_exit;
- }
+ if (!info->ndev)
+ return -ENOMEM;
nci_set_parent_dev(info->ndev, pdev);
nci_set_drvdata(info->ndev, info);
r = nci_register_device(info->ndev);
- if (r < 0)
- goto probe_exit_free_nci;
+ if (r < 0) {
+ nci_free_device(info->ndev);
+ return r;
+ }
*ndev = info->ndev;
-
- goto probe_exit;
-
-probe_exit_free_nci:
- nci_free_device(info->ndev);
-probe_exit:
return r;
}
EXPORT_SYMBOL(nxp_nci_probe);
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index 9f60e4d..f426dcd 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -122,7 +122,9 @@
skb_put_data(*skb, &header, NXP_NCI_FW_HDR_LEN);
r = i2c_master_recv(client, skb_put(*skb, frame_len), frame_len);
- if (r != frame_len) {
+ if (r < 0) {
+ goto fw_read_exit_free_skb;
+ } else if (r != frame_len) {
nfc_err(&client->dev,
"Invalid frame length: %u (expected %zu)\n",
r, frame_len);
@@ -162,8 +164,13 @@
skb_put_data(*skb, (void *)&header, NCI_CTRL_HDR_SIZE);
+ if (!header.plen)
+ return 0;
+
r = i2c_master_recv(client, skb_put(*skb, header.plen), header.plen);
- if (r != header.plen) {
+ if (r < 0) {
+ goto nci_read_exit_free_skb;
+ } else if (r != header.plen) {
nfc_err(&client->dev,
"Invalid frame payload length: %u (expected %u)\n",
r, header.plen);
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index d2c0116..8d7e29d 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -2844,13 +2844,14 @@
{
struct pn533_cmd *cmd, *n;
+ /* delete the timer before cleanup the worker */
+ del_timer_sync(&priv->listen_timer);
+
flush_delayed_work(&priv->poll_work);
destroy_workqueue(priv->wq);
skb_queue_purge(&priv->resp_q);
- del_timer(&priv->listen_timer);
-
list_for_each_entry_safe(cmd, n, &priv->cmd_queue, queue) {
list_del(&cmd->queue);
kfree(cmd);
diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
index a0665d8..e92535e 100644
--- a/drivers/nfc/pn533/uart.c
+++ b/drivers/nfc/pn533/uart.c
@@ -310,6 +310,7 @@
pn53x_unregister_nfc(pn532->priv);
serdev_device_close(serdev);
pn53x_common_clean(pn532->priv);
+ del_timer_sync(&pn532->cmd_timeout);
kfree_skb(pn532->recv_skb);
kfree(pn532);
}
diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c
index ba6c486..9b43cd3 100644
--- a/drivers/nfc/s3fwrn5/core.c
+++ b/drivers/nfc/s3fwrn5/core.c
@@ -97,11 +97,15 @@
}
ret = s3fwrn5_write(info, skb);
- if (ret < 0)
+ if (ret < 0) {
kfree_skb(skb);
+ mutex_unlock(&info->mutex);
+ return ret;
+ }
+ consume_skb(skb);
mutex_unlock(&info->mutex);
- return ret;
+ return 0;
}
static int s3fwrn5_nci_post_setup(struct nci_dev *ndev)
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index 807eae0..37d397a 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -327,7 +327,7 @@
* AID 81 5 to 16
* PARAMETERS 82 0 to 255
*/
- if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
+ if (skb->len < NFC_MIN_AID_LENGTH + 2 ||
skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
return -EPROTO;
@@ -340,8 +340,10 @@
/* Check next byte is PARAMETERS tag (82) */
if (skb->data[transaction->aid_len + 2] !=
- NFC_EVT_TRANSACTION_PARAMS_TAG)
+ NFC_EVT_TRANSACTION_PARAMS_TAG) {
+ devm_kfree(dev, transaction);
return -EPROTO;
+ }
transaction->params_len = skb->data[transaction->aid_len + 3];
memcpy(transaction->params, skb->data +
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index 0841e0e..d416365 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -241,7 +241,7 @@
}
EXPORT_SYMBOL(st21nfca_hci_se_io);
-static void st21nfca_se_wt_timeout(struct timer_list *t)
+static void st21nfca_se_wt_work(struct work_struct *work)
{
/*
* No answer from the secure element
@@ -254,8 +254,9 @@
*/
/* hardware reset managed through VCC_UICC_OUT power supply */
u8 param = 0x01;
- struct st21nfca_hci_info *info = from_timer(info, t,
- se_info.bwi_timer);
+ struct st21nfca_hci_info *info = container_of(work,
+ struct st21nfca_hci_info,
+ se_info.timeout_work);
pr_debug("\n");
@@ -273,6 +274,13 @@
info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
}
+static void st21nfca_se_wt_timeout(struct timer_list *t)
+{
+ struct st21nfca_hci_info *info = from_timer(info, t, se_info.bwi_timer);
+
+ schedule_work(&info->se_info.timeout_work);
+}
+
static void st21nfca_se_activation_timeout(struct timer_list *t)
{
struct st21nfca_hci_info *info = from_timer(info, t,
@@ -296,6 +304,8 @@
int r = 0;
struct device *dev = &hdev->ndev->dev;
struct nfc_evt_transaction *transaction;
+ u32 aid_len;
+ u8 params_len;
pr_debug("connectivity gate event: %x\n", event);
@@ -304,43 +314,48 @@
r = nfc_se_connectivity(hdev->ndev, host);
break;
case ST21NFCA_EVT_TRANSACTION:
- /*
- * According to specification etsi 102 622
+ /* According to specification etsi 102 622
* 11.2.2.4 EVT_TRANSACTION Table 52
* Description Tag Length
* AID 81 5 to 16
* PARAMETERS 82 0 to 255
+ *
+ * The key differences are aid storage length is variably sized
+ * in the packet, but fixed in nfc_evt_transaction, and that the aid_len
+ * is u8 in the packet, but u32 in the structure, and the tags in
+ * the packet are not included in nfc_evt_transaction.
+ *
+ * size in bytes: 1 1 5-16 1 1 0-255
+ * offset: 0 1 2 aid_len + 2 aid_len + 3 aid_len + 4
+ * member name: aid_tag(M) aid_len aid params_tag(M) params_len params
+ * example: 0x81 5-16 X 0x82 0-255 X
*/
- if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
- skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
+ if (skb->len < 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
return -EPROTO;
- transaction = devm_kzalloc(dev, skb->len - 2, GFP_KERNEL);
+ aid_len = skb->data[1];
+
+ if (skb->len < aid_len + 4 || aid_len > sizeof(transaction->aid))
+ return -EPROTO;
+
+ params_len = skb->data[aid_len + 3];
+
+ /* Verify PARAMETERS tag is (82), and final check that there is enough
+ * space in the packet to read everything.
+ */
+ if ((skb->data[aid_len + 2] != NFC_EVT_TRANSACTION_PARAMS_TAG) ||
+ (skb->len < aid_len + 4 + params_len))
+ return -EPROTO;
+
+ transaction = devm_kzalloc(dev, sizeof(*transaction) + params_len, GFP_KERNEL);
if (!transaction)
return -ENOMEM;
- transaction->aid_len = skb->data[1];
+ transaction->aid_len = aid_len;
+ transaction->params_len = params_len;
- /* Checking if the length of the AID is valid */
- if (transaction->aid_len > sizeof(transaction->aid))
- return -EINVAL;
-
- memcpy(transaction->aid, &skb->data[2],
- transaction->aid_len);
-
- /* Check next byte is PARAMETERS tag (82) */
- if (skb->data[transaction->aid_len + 2] !=
- NFC_EVT_TRANSACTION_PARAMS_TAG)
- return -EPROTO;
-
- transaction->params_len = skb->data[transaction->aid_len + 3];
-
- /* Total size is allocated (skb->len - 2) minus fixed array members */
- if (transaction->params_len > ((skb->len - 2) - sizeof(struct nfc_evt_transaction)))
- return -EINVAL;
-
- memcpy(transaction->params, skb->data +
- transaction->aid_len + 4, transaction->params_len);
+ memcpy(transaction->aid, &skb->data[2], aid_len);
+ memcpy(transaction->params, &skb->data[aid_len + 4], params_len);
r = nfc_se_transaction(hdev->ndev, host, transaction);
break;
@@ -364,6 +379,7 @@
switch (event) {
case ST21NFCA_EVT_TRANSMIT_DATA:
del_timer_sync(&info->se_info.bwi_timer);
+ cancel_work_sync(&info->se_info.timeout_work);
info->se_info.bwi_active = false;
r = nfc_hci_send_event(hdev, ST21NFCA_DEVICE_MGNT_GATE,
ST21NFCA_EVT_SE_END_OF_APDU_TRANSFER, NULL, 0);
@@ -393,6 +409,7 @@
struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
init_completion(&info->se_info.req_completion);
+ INIT_WORK(&info->se_info.timeout_work, st21nfca_se_wt_work);
/* initialize timers */
timer_setup(&info->se_info.bwi_timer, st21nfca_se_wt_timeout, 0);
info->se_info.bwi_active = false;
@@ -420,6 +437,7 @@
if (info->se_info.se_active)
del_timer_sync(&info->se_info.se_active_timer);
+ cancel_work_sync(&info->se_info.timeout_work);
info->se_info.bwi_active = false;
info->se_info.se_active = false;
}
diff --git a/drivers/nfc/st21nfca/st21nfca.h b/drivers/nfc/st21nfca/st21nfca.h
index 5e0de0f..0e4a93d 100644
--- a/drivers/nfc/st21nfca/st21nfca.h
+++ b/drivers/nfc/st21nfca/st21nfca.h
@@ -141,6 +141,7 @@
se_io_cb_t cb;
void *cb_context;
+ struct work_struct timeout_work;
};
struct st21nfca_hci_info {
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index b7bf3f8..5ee0afa 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -367,14 +367,16 @@
u64 bits;
int n;
+ if (*offp)
+ return 0;
+
buf = kmalloc(size + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = simple_write_to_buffer(buf, size, offp, ubuf, size);
- if (ret < 0) {
+ if (copy_from_user(buf, ubuf, size)) {
kfree(buf);
- return ret;
+ return -EFAULT;
}
buf[size] = 0;
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 2304c61..9ec5996 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -187,8 +187,8 @@
ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1;
/* make sure we are in the region */
- if (ctx->phys < nd_region->ndr_start
- || (ctx->phys + ctx->cleared) > ndr_end)
+ if (ctx->phys < nd_region->ndr_start ||
+ (ctx->phys + ctx->cleared - 1) > ndr_end)
return 0;
sector = (ctx->phys - nd_region->ndr_start) / 512;
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index c21ba06..1c92c88 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -400,9 +400,7 @@
if (!nd_desc->fw_ops)
return -EOPNOTSUPP;
- nvdimm_bus_lock(dev);
cap = nd_desc->fw_ops->capability(nd_desc);
- nvdimm_bus_unlock(dev);
switch (cap) {
case NVDIMM_FWA_CAP_QUIESCE:
@@ -427,10 +425,8 @@
if (!nd_desc->fw_ops)
return -EOPNOTSUPP;
- nvdimm_bus_lock(dev);
cap = nd_desc->fw_ops->capability(nd_desc);
state = nd_desc->fw_ops->activate_state(nd_desc);
- nvdimm_bus_unlock(dev);
if (cap < NVDIMM_FWA_CAP_QUIESCE)
return -EOPNOTSUPP;
@@ -475,7 +471,6 @@
else
return -EINVAL;
- nvdimm_bus_lock(dev);
state = nd_desc->fw_ops->activate_state(nd_desc);
switch (state) {
@@ -493,7 +488,6 @@
default:
rc = -ENXIO;
}
- nvdimm_bus_unlock(dev);
if (rc == 0)
rc = len;
@@ -516,10 +510,7 @@
if (!nd_desc->fw_ops)
return 0;
- nvdimm_bus_lock(dev);
cap = nd_desc->fw_ops->capability(nd_desc);
- nvdimm_bus_unlock(dev);
-
if (cap < NVDIMM_FWA_CAP_QUIESCE)
return 0;
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index e05cc9f..1d72653 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -1018,6 +1018,9 @@
}
}
+ if (nd_region->ndr_size < MEMREMAP_COMPAT_ALIGN_MAX)
+ align = PAGE_SIZE;
+
mappings = max_t(u16, 1, nd_region->ndr_mappings);
div_u64_rem(align, mappings, &remainder);
if (remainder)
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index 4b80150..b5aa55c 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -379,11 +379,6 @@
|| !nvdimm->sec.flags)
return -EOPNOTSUPP;
- if (dev->driver == NULL) {
- dev_dbg(dev, "Unable to overwrite while DIMM active.\n");
- return -EINVAL;
- }
-
rc = check_security_state(nvdimm);
if (rc)
return rc;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 71c85c9..d9c78fe 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -531,36 +531,54 @@
static inline void nvme_clear_nvme_request(struct request *req)
{
- if (!(req->rq_flags & RQF_DONTPREP)) {
- nvme_req(req)->retries = 0;
- nvme_req(req)->flags = 0;
- req->rq_flags |= RQF_DONTPREP;
- }
+ nvme_req(req)->retries = 0;
+ nvme_req(req)->flags = 0;
+ req->rq_flags |= RQF_DONTPREP;
}
-struct request *nvme_alloc_request(struct request_queue *q,
- struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
+static inline unsigned int nvme_req_op(struct nvme_command *cmd)
{
- unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
- struct request *req;
+ return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
+}
- if (qid == NVME_QID_ANY) {
- req = blk_mq_alloc_request(q, op, flags);
- } else {
- req = blk_mq_alloc_request_hctx(q, op, flags,
- qid ? qid - 1 : 0);
- }
- if (IS_ERR(req))
- return req;
+static inline void nvme_init_request(struct request *req,
+ struct nvme_command *cmd)
+{
+ if (req->q->queuedata)
+ req->timeout = NVME_IO_TIMEOUT;
+ else /* no queuedata implies admin queue */
+ req->timeout = ADMIN_TIMEOUT;
req->cmd_flags |= REQ_FAILFAST_DRIVER;
nvme_clear_nvme_request(req);
nvme_req(req)->cmd = cmd;
+}
+struct request *nvme_alloc_request(struct request_queue *q,
+ struct nvme_command *cmd, blk_mq_req_flags_t flags)
+{
+ struct request *req;
+
+ req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
+ if (!IS_ERR(req))
+ nvme_init_request(req, cmd);
return req;
}
EXPORT_SYMBOL_GPL(nvme_alloc_request);
+struct request *nvme_alloc_request_qid(struct request_queue *q,
+ struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
+{
+ struct request *req;
+
+ req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
+ qid ? qid - 1 : 0);
+ if (!IS_ERR(req))
+ nvme_init_request(req, cmd);
+ return req;
+}
+EXPORT_SYMBOL_GPL(nvme_alloc_request_qid);
+
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
{
struct nvme_command c;
@@ -663,7 +681,7 @@
req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
}
-static void nvme_setup_passthrough(struct request *req,
+static inline void nvme_setup_passthrough(struct request *req,
struct nvme_command *cmd)
{
memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
@@ -834,7 +852,8 @@
struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
blk_status_t ret = BLK_STS_OK;
- nvme_clear_nvme_request(req);
+ if (!(req->rq_flags & RQF_DONTPREP))
+ nvme_clear_nvme_request(req);
memset(cmd, 0, sizeof(*cmd));
switch (req_op(req)) {
@@ -923,11 +942,15 @@
struct request *req;
int ret;
- req = nvme_alloc_request(q, cmd, flags, qid);
+ if (qid == NVME_QID_ANY)
+ req = nvme_alloc_request(q, cmd, flags);
+ else
+ req = nvme_alloc_request_qid(q, cmd, flags, qid);
if (IS_ERR(req))
return PTR_ERR(req);
- req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+ if (timeout)
+ req->timeout = timeout;
if (buffer && bufflen) {
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
@@ -1093,11 +1116,12 @@
void *meta = NULL;
int ret;
- req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
+ req = nvme_alloc_request(q, cmd, 0);
if (IS_ERR(req))
return PTR_ERR(req);
- req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+ if (timeout)
+ req->timeout = timeout;
nvme_req(req)->flags |= NVME_REQ_USERCMD;
if (ubuffer && bufflen) {
@@ -1167,8 +1191,8 @@
{
struct request *rq;
- rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
- NVME_QID_ANY);
+ rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd,
+ BLK_MQ_REQ_RESERVED);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -1270,6 +1294,8 @@
warn_str, cur->nidl);
return -1;
}
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+ return NVME_NIDT_EUI64_LEN;
memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
return NVME_NIDT_EUI64_LEN;
case NVME_NIDT_NGUID:
@@ -1278,6 +1304,8 @@
warn_str, cur->nidl);
return -1;
}
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+ return NVME_NIDT_NGUID_LEN;
memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
return NVME_NIDT_NGUID_LEN;
case NVME_NIDT_UUID:
@@ -1286,6 +1314,8 @@
warn_str, cur->nidl);
return -1;
}
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+ return NVME_NIDT_UUID_LEN;
uuid_copy(&ids->uuid, data + sizeof(*cur));
return NVME_NIDT_UUID_LEN;
case NVME_NIDT_CSI:
@@ -1381,12 +1411,18 @@
if ((*id)->ncap == 0) /* namespace not allocated or attached */
goto out_free_id;
- if (ctrl->vs >= NVME_VS(1, 1, 0) &&
- !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
- memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
- if (ctrl->vs >= NVME_VS(1, 2, 0) &&
- !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
- memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
+
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
+ dev_info(ctrl->device,
+ "Ignoring bogus Namespace Identifiers\n");
+ } else {
+ if (ctrl->vs >= NVME_VS(1, 1, 0) &&
+ !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
+ memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
+ if (ctrl->vs >= NVME_VS(1, 2, 0) &&
+ !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+ memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
+ }
return 0;
@@ -2012,7 +2048,7 @@
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
}
blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
- blk_queue_dma_alignment(q, 7);
+ blk_queue_dma_alignment(q, 3);
blk_queue_write_cache(q, vwc, vwc);
}
@@ -2249,18 +2285,21 @@
enum pr_type type, bool abort)
{
u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
+
return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
}
static int nvme_pr_clear(struct block_device *bdev, u64 key)
{
- u32 cdw10 = 1 | (key ? 1 << 3 : 0);
- return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
+ u32 cdw10 = 1 | (key ? 0 : 1 << 3);
+
+ return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}
static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
- u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
+ u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 0 : 1 << 3);
+
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}
@@ -2663,6 +2702,34 @@
.vid = 0x14a4,
.fr = "22301111",
.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
+ },
+ {
+ /*
+ * This Kioxia CD6-V Series / HPE PE8030 device times out and
+ * aborts I/O during any load, but more easily reproducible
+ * with discards (fstrim).
+ *
+ * The device is left in a state where it is also not possible
+ * to use "nvme set-feature" to disable APST, but booting with
+ * nvme_core.default_ps_max_latency=0 works.
+ */
+ .vid = 0x1e0f,
+ .mn = "KCD6XVUL6T40",
+ .quirks = NVME_QUIRK_NO_APST,
+ },
+ {
+ /*
+ * The external Samsung X5 SSD fails initialization without a
+ * delay before checking if it is ready and has a whole set of
+ * other problems. To make this even more interesting, it
+ * shares the PCI ID with internal Samsung 970 Evo Plus that
+ * does not need or want these quirks.
+ */
+ .vid = 0x144d,
+ .mn = "Samsung Portable SSD X5",
+ .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
+ NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN,
}
};
@@ -2801,8 +2868,8 @@
{ \
struct nvme_subsystem *subsys = \
container_of(dev, struct nvme_subsystem, dev); \
- return sprintf(buf, "%.*s\n", \
- (int)sizeof(subsys->field), subsys->field); \
+ return sysfs_emit(buf, "%.*s\n", \
+ (int)sizeof(subsys->field), subsys->field); \
} \
static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
@@ -2882,7 +2949,6 @@
nvme_init_subnqn(subsys, ctrl, id);
memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
memcpy(subsys->model, id->mn, sizeof(subsys->model));
- memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
subsys->vendor_id = le16_to_cpu(id->vid);
subsys->cmic = id->cmic;
subsys->awupf = le16_to_cpu(id->awupf);
@@ -3043,6 +3109,8 @@
ctrl->quirks |= core_quirks[i].quirks;
}
}
+ memcpy(ctrl->subsys->firmware_rev, id->fr,
+ sizeof(ctrl->subsys->firmware_rev));
if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
@@ -3164,8 +3232,12 @@
return ret;
if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
+ /*
+ * Do not return errors unless we are in a controller reset,
+ * the controller works perfectly fine without hwmon.
+ */
ret = nvme_hwmon_init(ctrl);
- if (ret < 0)
+ if (ret == -EINTR)
return ret;
}
@@ -3258,11 +3330,17 @@
case NVME_IOCTL_IO_CMD:
return nvme_dev_user_cmd(ctrl, argp);
case NVME_IOCTL_RESET:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
dev_warn(ctrl->device, "resetting controller\n");
return nvme_reset_ctrl_sync(ctrl);
case NVME_IOCTL_SUBSYS_RESET:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
return nvme_reset_subsystem(ctrl);
case NVME_IOCTL_RESCAN:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
nvme_queue_scan(ctrl);
return 0;
default:
@@ -3323,13 +3401,13 @@
int model_len = sizeof(subsys->model);
if (!uuid_is_null(&ids->uuid))
- return sprintf(buf, "uuid.%pU\n", &ids->uuid);
+ return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
- return sprintf(buf, "eui.%16phN\n", ids->nguid);
+ return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
- return sprintf(buf, "eui.%8phN\n", ids->eui64);
+ return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
subsys->serial[serial_len - 1] == '\0'))
@@ -3338,7 +3416,7 @@
subsys->model[model_len - 1] == '\0'))
model_len--;
- return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
+ return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
serial_len, subsys->serial, model_len, subsys->model,
head->ns_id);
}
@@ -3347,7 +3425,7 @@
static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
+ return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
}
static DEVICE_ATTR_RO(nguid);
@@ -3360,25 +3438,25 @@
* we have no UUID set
*/
if (uuid_is_null(&ids->uuid)) {
- printk_ratelimited(KERN_WARNING
- "No UUID available providing old NGUID\n");
- return sprintf(buf, "%pU\n", ids->nguid);
+ dev_warn_ratelimited(dev,
+ "No UUID available providing old NGUID\n");
+ return sysfs_emit(buf, "%pU\n", ids->nguid);
}
- return sprintf(buf, "%pU\n", &ids->uuid);
+ return sysfs_emit(buf, "%pU\n", &ids->uuid);
}
static DEVICE_ATTR_RO(uuid);
static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
+ return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
}
static DEVICE_ATTR_RO(eui);
static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
+ return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
}
static DEVICE_ATTR_RO(nsid);
@@ -3443,7 +3521,7 @@
struct device_attribute *attr, char *buf) \
{ \
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
- return sprintf(buf, "%.*s\n", \
+ return sysfs_emit(buf, "%.*s\n", \
(int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
} \
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
@@ -3457,7 +3535,7 @@
struct device_attribute *attr, char *buf) \
{ \
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
- return sprintf(buf, "%d\n", ctrl->field); \
+ return sysfs_emit(buf, "%d\n", ctrl->field); \
} \
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
@@ -3505,9 +3583,9 @@
if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
state_name[ctrl->state])
- return sprintf(buf, "%s\n", state_name[ctrl->state]);
+ return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
- return sprintf(buf, "unknown state\n");
+ return sysfs_emit(buf, "unknown state\n");
}
static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
@@ -3559,9 +3637,9 @@
struct nvmf_ctrl_options *opts = ctrl->opts;
if (ctrl->opts->max_reconnects == -1)
- return sprintf(buf, "off\n");
- return sprintf(buf, "%d\n",
- opts->max_reconnects * opts->reconnect_delay);
+ return sysfs_emit(buf, "off\n");
+ return sysfs_emit(buf, "%d\n",
+ opts->max_reconnects * opts->reconnect_delay);
}
static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
@@ -3591,8 +3669,8 @@
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
if (ctrl->opts->reconnect_delay == -1)
- return sprintf(buf, "off\n");
- return sprintf(buf, "%d\n", ctrl->opts->reconnect_delay);
+ return sysfs_emit(buf, "off\n");
+ return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
}
static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
@@ -3681,16 +3759,15 @@
return NULL;
}
-static int __nvme_check_ids(struct nvme_subsystem *subsys,
- struct nvme_ns_head *new)
+static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys,
+ struct nvme_ns_ids *ids)
{
struct nvme_ns_head *h;
lockdep_assert_held(&subsys->lock);
list_for_each_entry(h, &subsys->nsheads, entry) {
- if (nvme_ns_ids_valid(&new->ids) &&
- nvme_ns_ids_equal(&new->ids, &h->ids))
+ if (nvme_ns_ids_valid(ids) && nvme_ns_ids_equal(ids, &h->ids))
return -EINVAL;
}
@@ -3724,7 +3801,7 @@
head->ids = *ids;
kref_init(&head->ref);
- ret = __nvme_check_ids(ctrl->subsys, head);
+ ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &head->ids);
if (ret) {
dev_err(ctrl->device,
"duplicate IDs for nsid %d\n", nsid);
@@ -4397,6 +4474,8 @@
nvme_stop_keep_alive(ctrl);
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fw_act_work);
+ if (ctrl->ops->stop_ctrl)
+ ctrl->ops->stop_ctrl(ctrl);
}
EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
@@ -4409,12 +4488,14 @@
if (ctrl->queue_count > 1) {
nvme_queue_scan(ctrl);
nvme_start_queues(ctrl);
+ nvme_mpath_update(ctrl);
}
}
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{
+ nvme_hwmon_exit(ctrl);
nvme_fault_inject_fini(&ctrl->fault_inject);
dev_pm_qos_hide_latency_tolerance(ctrl->device);
cdev_device_del(&ctrl->cdev, ctrl->device);
diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c
index 552dbc0..9e6e56c 100644
--- a/drivers/nvme/host/hwmon.c
+++ b/drivers/nvme/host/hwmon.c
@@ -12,7 +12,7 @@
struct nvme_hwmon_data {
struct nvme_ctrl *ctrl;
- struct nvme_smart_log log;
+ struct nvme_smart_log *log;
struct mutex read_lock;
};
@@ -60,14 +60,14 @@
static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data)
{
return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
- NVME_CSI_NVM, &data->log, sizeof(data->log), 0);
+ NVME_CSI_NVM, data->log, sizeof(*data->log), 0);
}
static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
struct nvme_hwmon_data *data = dev_get_drvdata(dev);
- struct nvme_smart_log *log = &data->log;
+ struct nvme_smart_log *log = data->log;
int temp;
int err;
@@ -163,7 +163,7 @@
case hwmon_temp_max:
case hwmon_temp_min:
if ((!channel && data->ctrl->wctemp) ||
- (channel && data->log.temp_sensor[channel - 1])) {
+ (channel && data->log->temp_sensor[channel - 1])) {
if (data->ctrl->quirks &
NVME_QUIRK_NO_TEMP_THRESH_CHANGE)
return 0444;
@@ -176,7 +176,7 @@
break;
case hwmon_temp_input:
case hwmon_temp_label:
- if (!channel || data->log.temp_sensor[channel - 1])
+ if (!channel || data->log->temp_sensor[channel - 1])
return 0444;
break;
default:
@@ -223,33 +223,57 @@
int nvme_hwmon_init(struct nvme_ctrl *ctrl)
{
- struct device *dev = ctrl->dev;
+ struct device *dev = ctrl->device;
struct nvme_hwmon_data *data;
struct device *hwmon;
int err;
- data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
- return 0;
+ return -ENOMEM;
+
+ data->log = kzalloc(sizeof(*data->log), GFP_KERNEL);
+ if (!data->log) {
+ err = -ENOMEM;
+ goto err_free_data;
+ }
data->ctrl = ctrl;
mutex_init(&data->read_lock);
err = nvme_hwmon_get_smart_log(data);
if (err) {
- dev_warn(ctrl->device,
- "Failed to read smart log (error %d)\n", err);
- devm_kfree(dev, data);
- return err;
+ dev_warn(dev, "Failed to read smart log (error %d)\n", err);
+ goto err_free_log;
}
- hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data,
- &nvme_hwmon_chip_info,
- NULL);
+ hwmon = hwmon_device_register_with_info(dev, "nvme",
+ data, &nvme_hwmon_chip_info,
+ NULL);
if (IS_ERR(hwmon)) {
dev_warn(dev, "Failed to instantiate hwmon device\n");
- devm_kfree(dev, data);
+ err = PTR_ERR(hwmon);
+ goto err_free_log;
}
-
+ ctrl->hwmon_device = hwmon;
return 0;
+
+err_free_log:
+ kfree(data->log);
+err_free_data:
+ kfree(data);
+ return err;
+}
+
+void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->hwmon_device) {
+ struct nvme_hwmon_data *data =
+ dev_get_drvdata(ctrl->hwmon_device);
+
+ hwmon_device_unregister(ctrl->hwmon_device);
+ ctrl->hwmon_device = NULL;
+ kfree(data->log);
+ kfree(data);
+ }
}
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 8e562d0..470cef3 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -653,7 +653,7 @@
nvme_nvm_rqtocmd(rqd, ns, cmd);
- rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
+ rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0);
if (IS_ERR(rq))
return rq;
@@ -767,14 +767,14 @@
DECLARE_COMPLETION_ONSTACK(wait);
int ret = 0;
- rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
- NVME_QID_ANY);
+ rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0);
if (IS_ERR(rq)) {
ret = -ENOMEM;
goto err_cmd;
}
- rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+ if (timeout)
+ rq->timeout = timeout;
if (ppa_buf && ppa_len) {
ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 18a7564..379d681 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -484,8 +484,17 @@
ns->ana_grpid = le32_to_cpu(desc->grpid);
ns->ana_state = desc->state;
clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
-
- if (nvme_state_is_live(ns->ana_state))
+ /*
+ * nvme_mpath_set_live() will trigger I/O to the multipath path device
+ * and in turn to this path device. However we cannot accept this I/O
+ * if the controller is not live. This may deadlock if called from
+ * nvme_mpath_init_identify() and the ctrl will never complete
+ * initialization, preventing I/O from completing. For this case we
+ * will reprocess the ANA log page in nvme_mpath_update() once the
+ * controller is ready.
+ */
+ if (nvme_state_is_live(ns->ana_state) &&
+ ns->ctrl->state == NVME_CTRL_LIVE)
nvme_mpath_set_live(ns);
}
@@ -572,6 +581,18 @@
nvme_read_ana_log(ctrl);
}
+void nvme_mpath_update(struct nvme_ctrl *ctrl)
+{
+ u32 nr_change_groups = 0;
+
+ if (!ctrl->ana_log_buf)
+ return;
+
+ mutex_lock(&ctrl->ana_lock);
+ nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
+ mutex_unlock(&ctrl->ana_lock);
+}
+
static void nvme_anatt_timeout(struct timer_list *t)
{
struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
@@ -603,8 +624,8 @@
struct nvme_subsystem *subsys =
container_of(dev, struct nvme_subsystem, dev);
- return sprintf(buf, "%s\n",
- nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
+ return sysfs_emit(buf, "%s\n",
+ nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
}
static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
@@ -629,7 +650,7 @@
static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
+ return sysfs_emit(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
}
DEVICE_ATTR_RO(ana_grpid);
@@ -638,7 +659,7 @@
{
struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
- return sprintf(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
+ return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
}
DEVICE_ATTR_RO(ana_state);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 5dd1dd8..8633649 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -150,6 +150,11 @@
* encoding the generation sequence number.
*/
NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
+
+ /*
+ * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
+ */
+ NVME_QUIRK_BOGUS_NID = (1 << 18),
};
/*
@@ -252,6 +257,9 @@
struct rw_semaphore namespaces_rwsem;
struct device ctrl_device;
struct device *device; /* char device */
+#ifdef CONFIG_NVME_HWMON
+ struct device *hwmon_device;
+#endif
struct cdev cdev;
struct work_struct reset_work;
struct work_struct delete_work;
@@ -473,6 +481,7 @@
void (*free_ctrl)(struct nvme_ctrl *ctrl);
void (*submit_async_event)(struct nvme_ctrl *ctrl);
void (*delete_ctrl)(struct nvme_ctrl *ctrl);
+ void (*stop_ctrl)(struct nvme_ctrl *ctrl);
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
};
@@ -535,11 +544,23 @@
static inline void nvme_should_fail(struct request *req) {}
#endif
+bool nvme_wait_reset(struct nvme_ctrl *ctrl);
+int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
+
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
{
+ int ret;
+
if (!ctrl->subsystem)
return -ENOTTY;
- return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
+ if (!nvme_wait_reset(ctrl))
+ return -EBUSY;
+
+ ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
+ if (ret)
+ return ret;
+
+ return nvme_try_sched_reset(ctrl);
}
/*
@@ -626,7 +647,6 @@
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state);
-bool nvme_wait_reset(struct nvme_ctrl *ctrl);
int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
@@ -657,6 +677,8 @@
#define NVME_QID_ANY -1
struct request *nvme_alloc_request(struct request_queue *q,
+ struct nvme_command *cmd, blk_mq_req_flags_t flags);
+struct request *nvme_alloc_request_qid(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
void nvme_cleanup_cmd(struct request *req);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
@@ -677,7 +699,6 @@
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
-int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
@@ -707,6 +728,7 @@
void nvme_mpath_remove_disk(struct nvme_ns_head *head);
int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
+void nvme_mpath_update(struct nvme_ctrl *ctrl);
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
@@ -793,6 +815,9 @@
"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
return 0;
}
+static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
+{
+}
static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
{
}
@@ -864,11 +889,16 @@
#ifdef CONFIG_NVME_HWMON
int nvme_hwmon_init(struct nvme_ctrl *ctrl);
+void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
#else
static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
{
return 0;
}
+
+static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
+{
+}
#endif
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 97afeb8..089f391 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -224,6 +224,7 @@
*/
struct nvme_iod {
struct nvme_request req;
+ struct nvme_command cmd;
struct nvme_queue *nvmeq;
bool use_sgl;
int aborted;
@@ -917,7 +918,7 @@
struct nvme_dev *dev = nvmeq->dev;
struct request *req = bd->rq;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct nvme_command cmnd;
+ struct nvme_command *cmnd = &iod->cmd;
blk_status_t ret;
iod->aborted = 0;
@@ -931,24 +932,24 @@
if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
return BLK_STS_IOERR;
- ret = nvme_setup_cmd(ns, req, &cmnd);
+ ret = nvme_setup_cmd(ns, req, cmnd);
if (ret)
return ret;
if (blk_rq_nr_phys_segments(req)) {
- ret = nvme_map_data(dev, req, &cmnd);
+ ret = nvme_map_data(dev, req, cmnd);
if (ret)
goto out_free_cmd;
}
if (blk_integrity_rq(req)) {
- ret = nvme_map_metadata(dev, req, &cmnd);
+ ret = nvme_map_metadata(dev, req, cmnd);
if (ret)
goto out_unmap_data;
}
blk_mq_start_request(req);
- nvme_submit_cmd(nvmeq, &cmnd, bd->last);
+ nvme_submit_cmd(nvmeq, cmnd, bd->last);
return BLK_STS_OK;
out_unmap_data:
nvme_unmap_data(dev, req);
@@ -1350,13 +1351,12 @@
req->tag, nvmeq->qid);
abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
- BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
+ BLK_MQ_REQ_NOWAIT);
if (IS_ERR(abort_req)) {
atomic_inc(&dev->ctrl.abort_limit);
return BLK_EH_RESET_TIMER;
}
- abort_req->timeout = ADMIN_TIMEOUT;
abort_req->end_io_data = NULL;
blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
@@ -1666,6 +1666,7 @@
dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
if (IS_ERR(dev->ctrl.admin_q)) {
blk_mq_free_tag_set(&dev->admin_tagset);
+ dev->ctrl.admin_q = NULL;
return -ENOMEM;
}
if (!blk_get_queue(dev->ctrl.admin_q)) {
@@ -2278,11 +2279,10 @@
cmd.delete_queue.opcode = opcode;
cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
- req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
+ req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT);
if (IS_ERR(req))
return PTR_ERR(req);
- req->timeout = ADMIN_TIMEOUT;
req->end_io_data = nvmeq;
init_completion(&nvmeq->delete_done);
@@ -2624,6 +2624,8 @@
if (result)
goto out_unlock;
+ dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1);
+
/*
* Limit the max command size to prevent iod->sg allocations going
* over a single page.
@@ -2636,7 +2638,6 @@
* Don't limit the IOMMU merged segment size.
*/
dma_set_max_seg_size(dev->dev, 0xffffffff);
- dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1);
mutex_unlock(&dev->shutdown_lock);
@@ -3212,7 +3213,10 @@
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS |
- NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ NVME_QUIRK_DISABLE_WRITE_ZEROES |
+ NVME_QUIRK_BOGUS_NID, },
+ { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
{ PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
@@ -3231,7 +3235,8 @@
NVME_QUIRK_DISABLE_WRITE_ZEROES|
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
- .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
+ NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
@@ -3242,10 +3247,15 @@
{ PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */
.driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
- .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
+ NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN },
+ { PCI_DEVICE(0x1344, 0x6001), /* Micron Nitro NVMe */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
@@ -3262,7 +3272,6 @@
NVME_QUIRK_128_BYTES_SQES |
NVME_QUIRK_SHARED_TAGS |
NVME_QUIRK_SKIP_CID_GEN },
-
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ 0, }
};
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 8eacc9b..b619243 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1057,6 +1057,14 @@
}
}
+static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
+
+ cancel_work_sync(&ctrl->err_work);
+ cancel_delayed_work_sync(&ctrl->reconnect_work);
+}
+
static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
@@ -2236,9 +2244,6 @@
static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
{
- cancel_work_sync(&ctrl->err_work);
- cancel_delayed_work_sync(&ctrl->reconnect_work);
-
nvme_rdma_teardown_io_queues(ctrl, shutdown);
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
if (shutdown)
@@ -2288,6 +2293,7 @@
.submit_async_event = nvme_rdma_submit_async_event,
.delete_ctrl = nvme_rdma_delete_ctrl,
.get_address = nvmf_get_address,
+ .stop_ctrl = nvme_rdma_stop_ctrl,
};
/*
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 6105894..57df87d 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -30,6 +30,44 @@
module_param(so_priority, int, 0644);
MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+/* lockdep can detect a circular dependency of the form
+ * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
+ * because dependencies are tracked for both nvme-tcp and user contexts. Using
+ * a separate class prevents lockdep from conflating nvme-tcp socket use with
+ * user-space socket API use.
+ */
+static struct lock_class_key nvme_tcp_sk_key[2];
+static struct lock_class_key nvme_tcp_slock_key[2];
+
+static void nvme_tcp_reclassify_socket(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
+ return;
+
+ switch (sk->sk_family) {
+ case AF_INET:
+ sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
+ &nvme_tcp_slock_key[0],
+ "sk_lock-AF_INET-NVME",
+ &nvme_tcp_sk_key[0]);
+ break;
+ case AF_INET6:
+ sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
+ &nvme_tcp_slock_key[1],
+ "sk_lock-AF_INET6-NVME",
+ &nvme_tcp_sk_key[1]);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+#else
+static void nvme_tcp_reclassify_socket(struct socket *sock) { }
+#endif
+
enum nvme_tcp_send_state {
NVME_TCP_SEND_CMD_PDU = 0,
NVME_TCP_SEND_H2C_PDU,
@@ -80,7 +118,6 @@
struct mutex send_mutex;
struct llist_head req_list;
struct list_head send_list;
- bool more_requests;
/* recv state */
void *pdu;
@@ -276,7 +313,7 @@
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
{
return !list_empty(&queue->send_list) ||
- !llist_empty(&queue->req_list) || queue->more_requests;
+ !llist_empty(&queue->req_list);
}
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
@@ -295,9 +332,7 @@
*/
if (queue->io_cpu == raw_smp_processor_id() &&
sync && empty && mutex_trylock(&queue->send_mutex)) {
- queue->more_requests = !last;
nvme_tcp_send_all(queue);
- queue->more_requests = false;
mutex_unlock(&queue->send_mutex);
}
@@ -1111,8 +1146,7 @@
} else if (ret < 0) {
dev_err(queue->ctrl->ctrl.device,
"failed to send request %d\n", ret);
- if (ret != -EPIPE && ret != -ECONNRESET)
- nvme_tcp_fail_request(queue->request);
+ nvme_tcp_fail_request(queue->request);
nvme_tcp_done_send_req(queue);
}
return ret;
@@ -1159,7 +1193,7 @@
else if (unlikely(result < 0))
return;
- if (!pending)
+ if (!pending || !queue->rd_enabled)
return;
} while (!time_after(jiffies, deadline)); /* quota is exhausted */
@@ -1422,6 +1456,8 @@
goto err_destroy_mutex;
}
+ nvme_tcp_reclassify_socket(queue->sock);
+
/* Single syn retry */
tcp_sock_set_syncnt(queue->sock->sk, 1);
@@ -2096,9 +2132,6 @@
static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
{
- cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
- cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
-
nvme_tcp_teardown_io_queues(ctrl, shutdown);
blk_mq_quiesce_queue(ctrl->admin_q);
if (shutdown)
@@ -2138,6 +2171,12 @@
nvme_tcp_reconnect_or_remove(ctrl);
}
+static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
+{
+ cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
+ cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
+}
+
static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
@@ -2460,6 +2499,7 @@
.submit_async_event = nvme_tcp_submit_async_event,
.delete_ctrl = nvme_tcp_delete_ctrl,
.get_address = nvmf_get_address,
+ .stop_ctrl = nvme_tcp_stop_ctrl,
};
static bool
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 35bac7a..aa8b0f8 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -98,7 +98,7 @@
TP_fast_assign(
__entry->ctrl_id = nvme_req(req)->ctrl->instance;
__entry->qid = nvme_req_qid(req);
- __entry->cid = req->tag;
+ __entry->cid = nvme_req(req)->cmd->common.command_id;
__entry->result = le64_to_cpu(nvme_req(req)->result.u64);
__entry->retries = nvme_req(req)->retries;
__entry->flags = nvme_req(req)->flags;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 9a8fa2e..bc88ff2 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -730,6 +730,8 @@
static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
{
+ struct nvmet_ns *ns = req->ns;
+
if (!req->sq->sqhd_disabled)
nvmet_update_sq_head(req);
req->cqe->sq_id = cpu_to_le16(req->sq->qid);
@@ -740,9 +742,9 @@
trace_nvmet_req_complete(req);
- if (req->ns)
- nvmet_put_namespace(req->ns);
req->ops->queue_response(req);
+ if (ns)
+ nvmet_put_namespace(ns);
}
void nvmet_req_complete(struct nvmet_req *req, u16 status)
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 8ee94f0..d24251e 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -244,7 +244,7 @@
q = ns->queue;
}
- rq = nvme_alloc_request(q, req->cmd, 0, NVME_QID_ANY);
+ rq = nvme_alloc_request(q, req->cmd, 0);
if (IS_ERR(rq)) {
status = NVME_SC_INTERNAL;
goto out_put_ns;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 96b67a7..2ddbd4f 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -922,10 +922,17 @@
struct nvme_tcp_data_pdu *data = &queue->pdu.data;
struct nvmet_tcp_cmd *cmd;
- if (likely(queue->nr_cmds))
+ if (likely(queue->nr_cmds)) {
+ if (unlikely(data->ttag >= queue->nr_cmds)) {
+ pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
+ queue->idx, data->ttag, queue->nr_cmds);
+ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
+ }
cmd = &queue->cmds[data->ttag];
- else
+ } else {
cmd = &queue->connect;
+ }
if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
pr_err("ttag %u unexpected data offset %u (expected %u)\n",
@@ -1471,6 +1478,9 @@
goto done;
switch (sk->sk_state) {
+ case TCP_FIN_WAIT2:
+ case TCP_LAST_ACK:
+ break;
case TCP_FIN_WAIT1:
case TCP_CLOSE_WAIT:
case TCP_CLOSE:
@@ -1802,7 +1812,8 @@
{
int ret;
- nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0);
+ nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
if (!nvmet_tcp_wq)
return -ENOMEM;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 57ff31b..5a1b868 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -315,7 +315,7 @@
for (offset = 0;
offset >= 0 && depth >= initial_depth;
offset = fdt_next_node(blob, offset, &depth)) {
- if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
+ if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1))
continue;
if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 43a77d7..c8a0c0e 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -170,9 +170,7 @@
ret = blocking_notifier_call_chain(&overlay_notify_chain,
action, &nd);
- if (ret == NOTIFY_OK || ret == NOTIFY_STOP)
- return 0;
- if (ret) {
+ if (notifier_to_errno(ret)) {
ret = notifier_to_errno(ret);
pr_err("overlay changeset %s notifier error %d, target: %pOF\n",
of_overlay_action_name[action], ret, nd.target);
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 903b465..7ed605f 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -2052,8 +2052,8 @@
}
virt_dev = dev_pm_domain_attach_by_name(dev, *name);
- if (IS_ERR(virt_dev)) {
- ret = PTR_ERR(virt_dev);
+ if (IS_ERR_OR_NULL(virt_dev)) {
+ ret = PTR_ERR(virt_dev) ? : -ENODEV;
dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret);
goto err;
}
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 5de46aa..3d7adc0 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -346,11 +346,11 @@
/* Checking only first OPP is sufficient */
np = of_get_next_available_child(opp_np, NULL);
+ of_node_put(opp_np);
if (!np) {
dev_err(dev, "OPP table empty\n");
return -EINVAL;
}
- of_node_put(opp_np);
prop = of_find_property(np, "opp-peak-kBps", NULL);
of_node_put(np);
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index b916fab..be81b76 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1380,15 +1380,17 @@
}
}
-static void __init ccio_init_resources(struct ioc *ioc)
+static int __init ccio_init_resources(struct ioc *ioc)
{
struct resource *res = ioc->mmio_region;
char *name = kmalloc(14, GFP_KERNEL);
-
+ if (unlikely(!name))
+ return -ENOMEM;
snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
+ return 0;
}
static int new_ioc_area(struct resource *res, unsigned long size,
@@ -1543,7 +1545,11 @@
return -ENOMEM;
}
ccio_ioc_init(ioc);
- ccio_init_resources(ioc);
+ if (ccio_init_resources(ioc)) {
+ iounmap(ioc->ioc_regs);
+ kfree(ioc);
+ return -ENOMEM;
+ }
hppa_dma_ops = &ccio_ops;
hba = kzalloc(sizeof(*hba), GFP_KERNEL);
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 952a925..e330362 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -142,9 +142,8 @@
{
struct pci_hba_data hba; /* 'C' inheritance - must be first */
spinlock_t dinosaur_pen;
- unsigned long txn_addr; /* EIR addr to generate interrupt */
- u32 txn_data; /* EIR data assign to each dino */
u32 imr; /* IRQ's which are enabled */
+ struct gsc_irq gsc_irq;
int global_irq[DINO_LOCAL_IRQS]; /* map IMR bit to global irq */
#ifdef DINO_DEBUG
unsigned int dino_irr0; /* save most recent IRQ line stat */
@@ -339,14 +338,43 @@
if (tmp & DINO_MASK_IRQ(local_irq)) {
DBG(KERN_WARNING "%s(): IRQ asserted! (ILR 0x%x)\n",
__func__, tmp);
- gsc_writel(dino_dev->txn_data, dino_dev->txn_addr);
+ gsc_writel(dino_dev->gsc_irq.txn_data, dino_dev->gsc_irq.txn_addr);
}
}
+#ifdef CONFIG_SMP
+static int dino_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
+ bool force)
+{
+ struct dino_device *dino_dev = irq_data_get_irq_chip_data(d);
+ struct cpumask tmask;
+ int cpu_irq;
+ u32 eim;
+
+ if (!cpumask_and(&tmask, dest, cpu_online_mask))
+ return -EINVAL;
+
+ cpu_irq = cpu_check_affinity(d, &tmask);
+ if (cpu_irq < 0)
+ return cpu_irq;
+
+ dino_dev->gsc_irq.txn_addr = txn_affinity_addr(d->irq, cpu_irq);
+ eim = ((u32) dino_dev->gsc_irq.txn_addr) | dino_dev->gsc_irq.txn_data;
+ __raw_writel(eim, dino_dev->hba.base_addr+DINO_IAR0);
+
+ irq_data_update_effective_affinity(d, &tmask);
+
+ return IRQ_SET_MASK_OK;
+}
+#endif
+
static struct irq_chip dino_interrupt_type = {
.name = "GSC-PCI",
.irq_unmask = dino_unmask_irq,
.irq_mask = dino_mask_irq,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = dino_set_affinity_irq,
+#endif
};
@@ -806,7 +834,6 @@
{
int status;
u32 eim;
- struct gsc_irq gsc_irq;
struct resource *res;
pcibios_register_hba(&dino_dev->hba);
@@ -821,10 +848,8 @@
** still only has 11 IRQ input lines - just map some of them
** to a different processor.
*/
- dev->irq = gsc_alloc_irq(&gsc_irq);
- dino_dev->txn_addr = gsc_irq.txn_addr;
- dino_dev->txn_data = gsc_irq.txn_data;
- eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
+ dev->irq = gsc_alloc_irq(&dino_dev->gsc_irq);
+ eim = ((u32) dino_dev->gsc_irq.txn_addr) | dino_dev->gsc_irq.txn_data;
/*
** Dino needs a PA "IRQ" to get a processor's attention.
diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c
index ed9371a..ec175ae 100644
--- a/drivers/parisc/gsc.c
+++ b/drivers/parisc/gsc.c
@@ -135,10 +135,41 @@
*/
}
+#ifdef CONFIG_SMP
+static int gsc_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
+ bool force)
+{
+ struct gsc_asic *gsc_dev = irq_data_get_irq_chip_data(d);
+ struct cpumask tmask;
+ int cpu_irq;
+
+ if (!cpumask_and(&tmask, dest, cpu_online_mask))
+ return -EINVAL;
+
+ cpu_irq = cpu_check_affinity(d, &tmask);
+ if (cpu_irq < 0)
+ return cpu_irq;
+
+ gsc_dev->gsc_irq.txn_addr = txn_affinity_addr(d->irq, cpu_irq);
+ gsc_dev->eim = ((u32) gsc_dev->gsc_irq.txn_addr) | gsc_dev->gsc_irq.txn_data;
+
+ /* switch IRQ's for devices below LASI/WAX to other CPU */
+ gsc_writel(gsc_dev->eim, gsc_dev->hpa + OFFSET_IAR);
+
+ irq_data_update_effective_affinity(d, &tmask);
+
+ return IRQ_SET_MASK_OK;
+}
+#endif
+
+
static struct irq_chip gsc_asic_interrupt_type = {
.name = "GSC-ASIC",
.irq_unmask = gsc_asic_unmask_irq,
.irq_mask = gsc_asic_mask_irq,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = gsc_set_affinity_irq,
+#endif
};
int gsc_assign_irq(struct irq_chip *type, void *data)
diff --git a/drivers/parisc/gsc.h b/drivers/parisc/gsc.h
index 86abad3..73cbd0b 100644
--- a/drivers/parisc/gsc.h
+++ b/drivers/parisc/gsc.h
@@ -31,6 +31,7 @@
int version;
int type;
int eim;
+ struct gsc_irq gsc_irq;
int global_irq[32];
};
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 8a3b0c3..fd99735 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -875,6 +875,7 @@
return vi->txn_irq;
}
+EXPORT_SYMBOL(iosapic_serial_irq);
#endif
diff --git a/drivers/parisc/lasi.c b/drivers/parisc/lasi.c
index 4e4fd12..6ef621a 100644
--- a/drivers/parisc/lasi.c
+++ b/drivers/parisc/lasi.c
@@ -163,7 +163,6 @@
{
extern void (*chassis_power_off)(void);
struct gsc_asic *lasi;
- struct gsc_irq gsc_irq;
int ret;
lasi = kzalloc(sizeof(*lasi), GFP_KERNEL);
@@ -185,7 +184,7 @@
lasi_init_irq(lasi);
/* the IRQ lasi should use */
- dev->irq = gsc_alloc_irq(&gsc_irq);
+ dev->irq = gsc_alloc_irq(&lasi->gsc_irq);
if (dev->irq < 0) {
printk(KERN_ERR "%s(): cannot get GSC irq\n",
__func__);
@@ -193,9 +192,9 @@
return -EBUSY;
}
- lasi->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
+ lasi->eim = ((u32) lasi->gsc_irq.txn_addr) | lasi->gsc_irq.txn_data;
- ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "lasi", lasi);
+ ret = request_irq(lasi->gsc_irq.irq, gsc_asic_intr, 0, "lasi", lasi);
if (ret < 0) {
kfree(lasi);
return ret;
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 732b516..afc6e66 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1476,9 +1476,13 @@
u32 func_class;
void *tmp_obj;
char *version;
- void __iomem *addr = ioremap(dev->hpa.start, 4096);
+ void __iomem *addr;
int max;
+ addr = ioremap(dev->hpa.start, 4096);
+ if (addr == NULL)
+ return -ENOMEM;
+
/* Read HW Rev First */
func_class = READ_REG32(addr + LBA_FCLASS);
diff --git a/drivers/parisc/wax.c b/drivers/parisc/wax.c
index 5b6df15..73a2b01 100644
--- a/drivers/parisc/wax.c
+++ b/drivers/parisc/wax.c
@@ -68,7 +68,6 @@
{
struct gsc_asic *wax;
struct parisc_device *parent;
- struct gsc_irq gsc_irq;
int ret;
wax = kzalloc(sizeof(*wax), GFP_KERNEL);
@@ -85,7 +84,7 @@
wax_init_irq(wax);
/* the IRQ wax should use */
- dev->irq = gsc_claim_irq(&gsc_irq, WAX_GSC_IRQ);
+ dev->irq = gsc_claim_irq(&wax->gsc_irq, WAX_GSC_IRQ);
if (dev->irq < 0) {
printk(KERN_ERR "%s(): cannot get GSC irq\n",
__func__);
@@ -93,9 +92,9 @@
return -EBUSY;
}
- wax->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
+ wax->eim = ((u32) wax->gsc_irq.txn_addr) | wax->gsc_irq.txn_data;
- ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "wax", wax);
+ ret = request_irq(wax->gsc_irq.irq, gsc_asic_intr, 0, "wax", wax);
if (ret < 0) {
kfree(wax);
return ret;
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index eda4ded..925be41 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -468,7 +468,7 @@
const unsigned char *bufp = buf;
size_t left = length;
unsigned long expire = jiffies + port->physport->cad->timeout;
- const int fifo = FIFO(port);
+ const unsigned long fifo = FIFO(port);
int poll_for = 8; /* 80 usecs */
const struct parport_pc_private *priv = port->physport->private_data;
const int fifo_depth = priv->fifo_depth;
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 4693569..8d0d1f6 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -160,9 +160,12 @@
* write happen to have any RW1C (write-one-to-clear) bits set, we
* just inadvertently cleared something we shouldn't have.
*/
- dev_warn_ratelimited(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n",
- size, pci_domain_nr(bus), bus->number,
- PCI_SLOT(devfn), PCI_FUNC(devfn), where);
+ if (!bus->unsafe_warn) {
+ dev_warn(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n",
+ size, pci_domain_nr(bus), bus->number,
+ PCI_SLOT(devfn), PCI_FUNC(devfn), where);
+ bus->unsafe_warn = 1;
+ }
mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
tmp = readl(addr) & mask;
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 1af1447..4c5e634 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -154,8 +154,7 @@
struct cdns_pcie *pcie = &ep->pcie;
u32 r;
- r = find_first_zero_bit(&ep->ob_region_map,
- sizeof(ep->ob_region_map) * BITS_PER_LONG);
+ r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
if (r >= ep->max_regions - 1) {
dev_err(&epc->dev, "no free outbound region\n");
return -EINVAL;
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 5cf1ef1..ceb4815 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -401,6 +401,11 @@
dev_err(dev, "failed to disable vpcie regulator: %d\n",
ret);
}
+
+ /* Some boards don't have PCIe reset GPIO. */
+ if (gpio_is_valid(imx6_pcie->reset_gpio))
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+ imx6_pcie->gpio_active_high);
}
static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
@@ -523,15 +528,6 @@
/* allow the clocks to stabilize */
usleep_range(200, 500);
- /* Some boards don't have PCIe reset GPIO. */
- if (gpio_is_valid(imx6_pcie->reset_gpio)) {
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- imx6_pcie->gpio_active_high);
- msleep(100);
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- !imx6_pcie->gpio_active_high);
- }
-
switch (imx6_pcie->drvdata->variant) {
case IMX8MQ:
reset_control_deassert(imx6_pcie->pciephy_reset);
@@ -574,6 +570,15 @@
break;
}
+ /* Some boards don't have PCIe reset GPIO. */
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ msleep(100);
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+ !imx6_pcie->gpio_active_high);
+ /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
+ msleep(100);
+ }
+
return;
err_ref_clk:
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index ad7da4e..95ed719 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -773,8 +773,9 @@
ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
epc->mem->window.page_size);
if (!ep->msi_mem) {
+ ret = -ENOMEM;
dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
- return -ENOMEM;
+ goto err_exit_epc_mem;
}
if (ep->ops->get_features) {
@@ -783,6 +784,19 @@
return 0;
}
- return dw_pcie_ep_init_complete(ep);
+ ret = dw_pcie_ep_init_complete(ep);
+ if (ret)
+ goto err_free_epc_mem;
+
+ return 0;
+
+err_free_epc_mem:
+ pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
+ epc->mem->window.page_size);
+
+err_exit_epc_mem:
+ pci_epc_mem_exit(epc);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 44c2a65..42d8116 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -392,7 +392,8 @@
sizeof(pp->msi_msg),
DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
- if (dma_mapping_error(pci->dev, pp->msi_data)) {
+ ret = dma_mapping_error(pci->dev, pp->msi_data);
+ if (ret) {
dev_err(pci->dev, "Failed to map MSI data\n");
pp->msi_data = 0;
goto err_free_msi;
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index c2dea8f..2b74ff8 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -439,7 +439,7 @@
void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
enum dw_pcie_region_type type)
{
- int region;
+ u32 region;
switch (type) {
case DW_PCIE_REGION_INBOUND:
@@ -452,8 +452,18 @@
return;
}
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(u32)PCIE_ATU_ENABLE);
+ if (pci->iatu_unroll_enabled) {
+ if (region == PCIE_ATU_REGION_INBOUND) {
+ dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
+ ~(u32)PCIE_ATU_ENABLE);
+ } else {
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
+ ~(u32)PCIE_ATU_ENABLE);
+ }
+ } else {
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(u32)PCIE_ATU_ENABLE);
+ }
}
int dw_pcie_wait_for_link(struct dw_pcie *pci)
@@ -588,6 +598,13 @@
val |= PORT_LINK_DLL_LINK_EN;
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+ if (of_property_read_bool(np, "snps,enable-cdm-check")) {
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+ val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
+ PCIE_PL_CHK_REG_CHK_REG_START;
+ dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+ }
+
of_property_read_u32(np, "num-lanes", &pci->num_lanes);
if (!pci->num_lanes) {
dev_dbg(pci->dev, "Using h/w default number of lanes\n");
@@ -634,11 +651,4 @@
break;
}
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
-
- if (of_property_read_bool(np, "snps,enable-cdm-check")) {
- val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
- val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
- PCIE_PL_CHK_REG_CHK_REG_START;
- dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
- }
}
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 557554f..5fbd809 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -320,8 +320,6 @@
reset_control_assert(res->ext_reset);
reset_control_assert(res->phy_reset);
- writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
-
ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
if (ret < 0) {
dev_err(dev, "cannot enable regulators\n");
@@ -364,15 +362,15 @@
goto err_deassert_axi;
}
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
- if (ret)
- goto err_clks;
-
/* enable PCIe clocks and resets */
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
val &= ~BIT(0);
writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ if (ret)
+ goto err_clks;
+
if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
@@ -1192,12 +1190,6 @@
goto err_disable_clocks;
}
- ret = clk_prepare_enable(res->pipe_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable pipe clock\n");
- goto err_disable_clocks;
- }
-
/* configure PCIe to RC mode */
writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
@@ -1443,22 +1435,21 @@
}
ret = phy_init(pcie->phy);
- if (ret) {
- pm_runtime_disable(&pdev->dev);
+ if (ret)
goto err_pm_runtime_put;
- }
platform_set_drvdata(pdev, pcie);
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "cannot initialize host\n");
- pm_runtime_disable(&pdev->dev);
- goto err_pm_runtime_put;
+ goto err_phy_exit;
}
return 0;
+err_phy_exit:
+ phy_exit(pcie->phy);
err_pm_runtime_put:
pm_runtime_put(dev);
pm_runtime_disable(dev);
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index a5b677e..1222f57 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -370,15 +370,14 @@
struct tegra_pcie_dw *pcie = arg;
struct dw_pcie *pci = &pcie->pci;
struct pcie_port *pp = &pci->pp;
- u32 val, tmp;
+ u32 val, status_l0, status_l1;
u16 val_w;
- val = appl_readl(pcie, APPL_INTR_STATUS_L0);
- if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
- val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
- if (val & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
- appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0);
-
+ status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
+ if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
+ if (status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
/* SBR & Surprise Link Down WAR */
val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
@@ -394,15 +393,15 @@
}
}
- if (val & APPL_INTR_STATUS_L0_INT_INT) {
- val = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
- if (val & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
+ if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
+ if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
appl_writel(pcie,
APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,
APPL_INTR_STATUS_L1_8_0);
apply_bad_link_workaround(pp);
}
- if (val & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
+ if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
appl_writel(pcie,
APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,
APPL_INTR_STATUS_L1_8_0);
@@ -414,25 +413,24 @@
}
}
- val = appl_readl(pcie, APPL_INTR_STATUS_L0);
- if (val & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
- val = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
- tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
- if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
+ if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
dev_info(pci->dev, "CDM check complete\n");
- tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
+ val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
}
- if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
dev_err(pci->dev, "CDM comparison mismatch\n");
- tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
+ val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
}
- if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
dev_err(pci->dev, "CDM Logic error\n");
- tmp |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
+ val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
}
- dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, tmp);
- tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
- dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", tmp);
+ dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
+ dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val);
}
return IRQ_HANDLED;
@@ -965,7 +963,7 @@
offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);
val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);
val &= ~PCI_DLF_EXCHANGE_ENABLE;
- dw_pcie_writel_dbi(pci, offset, val);
+ dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val);
tegra_pcie_prepare_host(pp);
@@ -1970,6 +1968,7 @@
if (ret) {
dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",
ret);
+ pm_runtime_disable(dev);
return ret;
}
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index f30144c..0c603e0 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -114,6 +114,7 @@
#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
+#define PCIE_MSI_ALL_MASK GENMASK(31, 0)
#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
#define PCIE_MSI_DATA_MASK GENMASK(15, 0)
@@ -577,6 +578,7 @@
advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
/* Clear all interrupts */
+ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG);
advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
@@ -589,7 +591,7 @@
advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
/* Unmask all MSIs */
- advk_writel(pcie, 0, PCIE_MSI_MASK_REG);
+ advk_writel(pcie, ~(u32)PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
/* Enable summary interrupt for GIC SPI source */
reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
@@ -851,7 +853,9 @@
case PCI_EXP_RTSTA: {
u32 isr0 = advk_readl(pcie, PCIE_ISR0_REG);
u32 msglog = advk_readl(pcie, PCIE_MSG_LOG_REG);
- *value = (isr0 & PCIE_MSG_PM_PME_MASK) << 16 | (msglog >> 16);
+ *value = msglog >> 16;
+ if (isr0 & PCIE_MSG_PM_PME_MASK)
+ *value |= PCI_EXP_RTSTA_PME;
return PCI_BRIDGE_EMUL_HANDLED;
}
@@ -1184,7 +1188,7 @@
msg->address_lo = lower_32_bits(msi_msg);
msg->address_hi = upper_32_bits(msi_msg);
- msg->data = data->irq;
+ msg->data = data->hwirq;
}
static int advk_msi_set_affinity(struct irq_data *irq_data,
@@ -1201,15 +1205,11 @@
int hwirq, i;
mutex_lock(&pcie->msi_used_lock);
- hwirq = bitmap_find_next_zero_area(pcie->msi_used, MSI_IRQ_NUM,
- 0, nr_irqs, 0);
- if (hwirq >= MSI_IRQ_NUM) {
- mutex_unlock(&pcie->msi_used_lock);
- return -ENOSPC;
- }
-
- bitmap_set(pcie->msi_used, hwirq, nr_irqs);
+ hwirq = bitmap_find_free_region(pcie->msi_used, MSI_IRQ_NUM,
+ order_base_2(nr_irqs));
mutex_unlock(&pcie->msi_used_lock);
+ if (hwirq < 0)
+ return -ENOSPC;
for (i = 0; i < nr_irqs; i++)
irq_domain_set_info(domain, virq + i, hwirq + i,
@@ -1227,7 +1227,7 @@
struct advk_pcie *pcie = domain->host_data;
mutex_lock(&pcie->msi_used_lock);
- bitmap_clear(pcie->msi_used, d->hwirq, nr_irqs);
+ bitmap_release_region(pcie->msi_used, d->hwirq, order_base_2(nr_irqs));
mutex_unlock(&pcie->msi_used_lock);
}
@@ -1388,23 +1388,19 @@
static void advk_pcie_handle_msi(struct advk_pcie *pcie)
{
u32 msi_val, msi_mask, msi_status, msi_idx;
- u16 msi_data;
+ int virq;
msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG);
- msi_status = msi_val & ~msi_mask;
+ msi_status = msi_val & ((~msi_mask) & PCIE_MSI_ALL_MASK);
for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) {
if (!(BIT(msi_idx) & msi_status))
continue;
- /*
- * msi_idx contains bits [4:0] of the msi_data and msi_data
- * contains 16bit MSI interrupt number
- */
advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG);
- msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & PCIE_MSI_DATA_MASK;
- generic_handle_irq(msi_data);
+ virq = irq_find_mapping(pcie->msi_inner_domain, msi_idx);
+ generic_handle_irq(virq);
}
advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING,
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index a070e69..4353443 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1118,6 +1118,10 @@
u8 buffer[sizeof(struct pci_delete_interrupt)];
} ctxt;
+ if (!int_desc->vector_count) {
+ kfree(int_desc);
+ return;
+ }
memset(&ctxt, 0, sizeof(ctxt));
int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
int_pkt->message_type.type =
@@ -1180,6 +1184,28 @@
pci_msi_mask_irq(data);
}
+static unsigned int hv_msi_get_int_vector(struct irq_data *data)
+{
+ struct irq_cfg *cfg = irqd_cfg(data);
+
+ return cfg->vector;
+}
+
+static int hv_msi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ int ret = pci_msi_prepare(domain, dev, nvec, info);
+
+ /*
+ * By using the interrupt remapper in the hypervisor IOMMU, contiguous
+ * CPU vectors is not needed for multi-MSI
+ */
+ if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
+ info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
+
+ return ret;
+}
+
/**
* hv_irq_unmask() - "Unmask" the IRQ by setting its current
* affinity.
@@ -1195,6 +1221,7 @@
struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
struct irq_cfg *cfg = irqd_cfg(data);
struct hv_retarget_device_interrupt *params;
+ struct tran_int_desc *int_desc;
struct hv_pcibus_device *hbus;
struct cpumask *dest;
cpumask_var_t tmp;
@@ -1209,6 +1236,7 @@
pdev = msi_desc_to_pci_dev(msi_desc);
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
+ int_desc = data->chip_data;
spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
@@ -1216,7 +1244,8 @@
memset(params, 0, sizeof(*params));
params->partition_id = HV_PARTITION_ID_SELF;
params->int_entry.source = 1; /* MSI(-X) */
- hv_set_msi_entry_from_desc(¶ms->int_entry.msi_entry, msi_desc);
+ params->int_entry.msi_entry.address = int_desc->address & 0xffffffff;
+ params->int_entry.msi_entry.data = int_desc->data;
params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
(hbus->hdev->dev_instance.b[4] << 16) |
(hbus->hdev->dev_instance.b[7] << 8) |
@@ -1317,12 +1346,12 @@
static u32 hv_compose_msi_req_v1(
struct pci_create_interrupt *int_pkt, struct cpumask *affinity,
- u32 slot, u8 vector)
+ u32 slot, u8 vector, u8 vector_count)
{
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
- int_pkt->int_desc.vector_count = 1;
+ int_pkt->int_desc.vector_count = vector_count;
int_pkt->int_desc.delivery_mode = dest_Fixed;
/*
@@ -1336,14 +1365,14 @@
static u32 hv_compose_msi_req_v2(
struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity,
- u32 slot, u8 vector)
+ u32 slot, u8 vector, u8 vector_count)
{
int cpu;
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
- int_pkt->int_desc.vector_count = 1;
+ int_pkt->int_desc.vector_count = vector_count;
int_pkt->int_desc.delivery_mode = dest_Fixed;
/*
@@ -1371,7 +1400,6 @@
*/
static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- struct irq_cfg *cfg = irqd_cfg(data);
struct hv_pcibus_device *hbus;
struct vmbus_channel *channel;
struct hv_pci_dev *hpdev;
@@ -1380,6 +1408,8 @@
struct cpumask *dest;
struct compose_comp_ctxt comp;
struct tran_int_desc *int_desc;
+ struct msi_desc *msi_desc;
+ u8 vector, vector_count;
struct {
struct pci_packet pci_pkt;
union {
@@ -1391,7 +1421,17 @@
u32 size;
int ret;
- pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
+ /* Reuse the previous allocation */
+ if (data->chip_data) {
+ int_desc = data->chip_data;
+ msg->address_hi = int_desc->address >> 32;
+ msg->address_lo = int_desc->address & 0xffffffff;
+ msg->data = int_desc->data;
+ return;
+ }
+
+ msi_desc = irq_data_get_msi_desc(data);
+ pdev = msi_desc_to_pci_dev(msi_desc);
dest = irq_data_get_effective_affinity_mask(data);
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
@@ -1400,17 +1440,40 @@
if (!hpdev)
goto return_null_message;
- /* Free any previous message that might have already been composed. */
- if (data->chip_data) {
- int_desc = data->chip_data;
- data->chip_data = NULL;
- hv_int_desc_free(hpdev, int_desc);
- }
-
int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
if (!int_desc)
goto drop_reference;
+ if (!msi_desc->msi_attrib.is_msix && msi_desc->nvec_used > 1) {
+ /*
+ * If this is not the first MSI of Multi MSI, we already have
+ * a mapping. Can exit early.
+ */
+ if (msi_desc->irq != data->irq) {
+ data->chip_data = int_desc;
+ int_desc->address = msi_desc->msg.address_lo |
+ (u64)msi_desc->msg.address_hi << 32;
+ int_desc->data = msi_desc->msg.data +
+ (data->irq - msi_desc->irq);
+ msg->address_hi = msi_desc->msg.address_hi;
+ msg->address_lo = msi_desc->msg.address_lo;
+ msg->data = int_desc->data;
+ put_pcichild(hpdev);
+ return;
+ }
+ /*
+ * The vector we select here is a dummy value. The correct
+ * value gets sent to the hypervisor in unmask(). This needs
+ * to be aligned with the count, and also not zero. Multi-msi
+ * is powers of 2 up to 32, so 32 will always work here.
+ */
+ vector = 32;
+ vector_count = msi_desc->nvec_used;
+ } else {
+ vector = hv_msi_get_int_vector(data);
+ vector_count = 1;
+ }
+
memset(&ctxt, 0, sizeof(ctxt));
init_completion(&comp.comp_pkt.host_event);
ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
@@ -1421,7 +1484,8 @@
size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
dest,
hpdev->desc.win_slot.slot,
- cfg->vector);
+ vector,
+ vector_count);
break;
case PCI_PROTOCOL_VERSION_1_2:
@@ -1429,7 +1493,8 @@
size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
dest,
hpdev->desc.win_slot.slot,
- cfg->vector);
+ vector,
+ vector_count);
break;
default:
@@ -1545,7 +1610,7 @@
};
static struct msi_domain_ops hv_msi_ops = {
- .msi_prepare = pci_msi_prepare,
+ .msi_prepare = hv_msi_prepare,
.msi_free = hv_msi_free,
};
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index b651b6f..e1c2daa 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -467,7 +467,7 @@
return 1;
}
- if ((size > SZ_1K) && (size < SZ_4G) && !(*ib_reg_mask & (1 << 0))) {
+ if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) {
*ib_reg_mask |= (1 << 0);
return 0;
}
@@ -481,28 +481,27 @@
}
static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
- struct resource_entry *entry,
- u8 *ib_reg_mask)
+ struct of_pci_range *range, u8 *ib_reg_mask)
{
void __iomem *cfg_base = port->cfg_base;
struct device *dev = port->dev;
void *bar_addr;
u32 pim_reg;
- u64 cpu_addr = entry->res->start;
- u64 pci_addr = cpu_addr - entry->offset;
- u64 size = resource_size(entry->res);
+ u64 cpu_addr = range->cpu_addr;
+ u64 pci_addr = range->pci_addr;
+ u64 size = range->size;
u64 mask = ~(size - 1) | EN_REG;
u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64;
u32 bar_low;
int region;
- region = xgene_pcie_select_ib_reg(ib_reg_mask, size);
+ region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
if (region < 0) {
dev_warn(dev, "invalid pcie dma-range config\n");
return;
}
- if (entry->res->flags & IORESOURCE_PREFETCH)
+ if (range->flags & IORESOURCE_PREFETCH)
flags |= PCI_BASE_ADDRESS_MEM_PREFETCH;
bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
@@ -533,13 +532,25 @@
static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
{
- struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port);
- struct resource_entry *entry;
+ struct device_node *np = port->node;
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ struct device *dev = port->dev;
u8 ib_reg_mask = 0;
- resource_list_for_each_entry(entry, &bridge->dma_ranges)
- xgene_pcie_setup_ib_reg(port, entry, &ib_reg_mask);
+ if (of_pci_dma_range_parser_init(&parser, np)) {
+ dev_err(dev, "missing dma-ranges property\n");
+ return -EINVAL;
+ }
+ /* Get the dma-ranges from DT */
+ for_each_of_pci_range(&parser, &range) {
+ u64 end = range.cpu_addr + range.size - 1;
+
+ dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
+ range.flags, range.cpu_addr, end, range.pci_addr);
+ xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
+ }
return 0;
}
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 7631dc3..379cde5 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -264,8 +264,7 @@
struct rockchip_pcie *pcie = &ep->rockchip;
u32 r;
- r = find_first_zero_bit(&ep->ob_region_map,
- sizeof(ep->ob_region_map) * BITS_PER_LONG);
+ r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
/*
* Region 0 is reserved for configuration space and shouldn't
* be used elsewhere per TRM, so leave it out.
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index d415707..ddfeca9 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -285,7 +285,17 @@
if (ret)
dev_err(dev, "Data transfer failed\n");
} else {
- memcpy(dst_addr, src_addr, reg->size);
+ void *buf;
+
+ buf = kzalloc(reg->size, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_map_addr;
+ }
+
+ memcpy_fromio(buf, src_addr, reg->size);
+ memcpy_toio(dst_addr, buf, reg->size);
+ kfree(buf);
}
ktime_get_ts64(&end);
pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma);
@@ -441,7 +451,7 @@
if (!epf_test->dma_supported) {
dev_err(dev, "Cannot transfer data using DMA\n");
ret = -EINVAL;
- goto err_map_addr;
+ goto err_dma_map;
}
src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
@@ -613,7 +623,6 @@
cancel_delayed_work(&epf_test->cmd_handler);
pci_epf_test_clean_dma_chan(epf_test);
- pci_epc_stop(epc);
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
epf_bar = &epf->bar[bar];
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 30708af..dda9523 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -98,6 +98,8 @@
if (slot_status & PCI_EXP_SLTSTA_CC) {
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_CC);
+ ctrl->cmd_busy = 0;
+ smp_mb();
return 1;
}
msleep(10);
@@ -1058,6 +1060,8 @@
}
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0110,
+ PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 0d71090..1162734 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2830,6 +2830,18 @@
DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
},
},
+ {
+ /*
+ * Downstream device is not accessible after putting a root port
+ * into D3cold and back into D0 on Elo i2.
+ */
+ .ident = "Elo i2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"),
+ },
+ },
#endif
{ }
};
@@ -4965,18 +4977,18 @@
static void pci_dev_lock(struct pci_dev *dev)
{
- pci_cfg_access_lock(dev);
/* block PM suspend, driver probe, etc. */
device_lock(&dev->dev);
+ pci_cfg_access_lock(dev);
}
/* Return 1 on successful lock, 0 on contention */
static int pci_dev_trylock(struct pci_dev *dev)
{
- if (pci_cfg_access_trylock(dev)) {
- if (device_trylock(&dev->dev))
+ if (device_trylock(&dev->dev)) {
+ if (pci_cfg_access_trylock(dev))
return 1;
- pci_cfg_access_unlock(dev);
+ device_unlock(&dev->dev);
}
return 0;
@@ -4984,8 +4996,8 @@
static void pci_dev_unlock(struct pci_dev *dev)
{
- device_unlock(&dev->dev);
pci_cfg_access_unlock(dev);
+ device_unlock(&dev->dev);
}
static void pci_dev_save_and_disable(struct pci_dev *dev)
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index a96dc6f..0039460 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -559,8 +559,8 @@
/* PCI error reporting and recovery */
pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
- pci_channel_state_t state,
- pci_ers_result_t (*reset_link)(struct pci_dev *pdev));
+ pci_channel_state_t state,
+ pci_ers_result_t (*reset_subordinates)(struct pci_dev *pdev));
bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
#ifdef CONFIG_PCIEASPM
@@ -585,11 +585,8 @@
#ifdef CONFIG_PCIE_PTM
void pci_ptm_init(struct pci_dev *dev);
-int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
#else
static inline void pci_ptm_init(struct pci_dev *dev) { }
-static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
-{ return -EINVAL; }
#endif
struct pci_dev_reset_methods {
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 65dff5f..9564b74 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -101,6 +101,11 @@
#define ERR_COR_ID(d) (d & 0xffff)
#define ERR_UNCOR_ID(d) (d >> 16)
+#define AER_ERR_STATUS_MASK (PCI_ERR_ROOT_UNCOR_RCV | \
+ PCI_ERR_ROOT_COR_RCV | \
+ PCI_ERR_ROOT_MULTI_COR_RCV | \
+ PCI_ERR_ROOT_MULTI_UNCOR_RCV)
+
static int pcie_aer_disable;
static pci_ers_result_t aer_root_reset(struct pci_dev *dev);
@@ -300,7 +305,8 @@
return -EIO;
port_type = pci_pcie_type(dev);
- if (port_type == PCI_EXP_TYPE_ROOT_PORT) {
+ if (port_type == PCI_EXP_TYPE_ROOT_PORT ||
+ port_type == PCI_EXP_TYPE_RC_EC) {
pci_read_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, &status);
pci_write_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, status);
}
@@ -532,7 +538,7 @@
struct pci_dev *pdev = to_pci_dev(dev); \
u64 *stats = pdev->aer_stats->stats_array; \
\
- for (i = 0; i < ARRAY_SIZE(strings_array); i++) { \
+ for (i = 0; i < ARRAY_SIZE(pdev->aer_stats->stats_array); i++) {\
if (strings_array[i]) \
str += sprintf(str, "%s %llu\n", \
strings_array[i], stats[i]); \
@@ -595,7 +601,8 @@
if ((a == &dev_attr_aer_rootport_total_err_cor.attr ||
a == &dev_attr_aer_rootport_total_err_fatal.attr ||
a == &dev_attr_aer_rootport_total_err_nonfatal.attr) &&
- pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT)
+ ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) &&
+ (pci_pcie_type(pdev) != PCI_EXP_TYPE_RC_EC)))
return 0;
return a->mode;
@@ -1034,6 +1041,7 @@
*/
int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
{
+ int type = pci_pcie_type(dev);
int aer = dev->aer_cap;
int temp;
@@ -1052,8 +1060,8 @@
&info->mask);
if (!(info->status & ~info->mask))
return 0;
- } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
- pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
+ } else if (type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_DOWNSTREAM ||
info->severity == AER_NONFATAL) {
/* Link is still healthy for IO reads */
@@ -1187,7 +1195,7 @@
struct aer_err_source e_src = {};
pci_read_config_dword(rp, aer + PCI_ERR_ROOT_STATUS, &e_src.status);
- if (!(e_src.status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV)))
+ if (!(e_src.status & AER_ERR_STATUS_MASK))
return IRQ_NONE;
pci_read_config_dword(rp, aer + PCI_ERR_ROOT_ERR_SRC, &e_src.id);
@@ -1205,6 +1213,7 @@
int type = pci_pcie_type(dev);
if ((type == PCI_EXP_TYPE_ROOT_PORT) ||
+ (type == PCI_EXP_TYPE_RC_EC) ||
(type == PCI_EXP_TYPE_UPSTREAM) ||
(type == PCI_EXP_TYPE_DOWNSTREAM)) {
if (enable)
@@ -1329,6 +1338,16 @@
struct device *device = &dev->device;
struct pci_dev *port = dev->port;
+ BUILD_BUG_ON(ARRAY_SIZE(aer_correctable_error_string) <
+ AER_MAX_TYPEOF_COR_ERRS);
+ BUILD_BUG_ON(ARRAY_SIZE(aer_uncorrectable_error_string) <
+ AER_MAX_TYPEOF_UNCOR_ERRS);
+
+ /* Limit to Root Ports or Root Complex Event Collectors */
+ if ((pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC) &&
+ (pci_pcie_type(port) != PCI_EXP_TYPE_ROOT_PORT))
+ return -ENODEV;
+
rpc = devm_kzalloc(device, sizeof(struct aer_rpc), GFP_KERNEL);
if (!rpc)
return -ENOMEM;
@@ -1350,41 +1369,60 @@
}
/**
- * aer_root_reset - reset link on Root Port
- * @dev: pointer to Root Port's pci_dev data structure
+ * aer_root_reset - reset Root Port hierarchy or RCEC
+ * @dev: pointer to Root Port or RCEC
*
- * Invoked by Port Bus driver when performing link reset at Root Port.
+ * Invoked by Port Bus driver when performing reset.
*/
static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
{
- int aer = dev->aer_cap;
+ int type = pci_pcie_type(dev);
+ struct pci_dev *root;
+ int aer;
+ struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
u32 reg32;
int rc;
+ root = dev; /* device with Root Error registers */
+ aer = root->aer_cap;
- /* Disable Root's interrupt in response to error messages */
- pci_read_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, ®32);
- reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
- pci_write_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, reg32);
+ if ((host->native_aer || pcie_ports_native) && aer) {
+ /* Disable Root's interrupt in response to error messages */
+ pci_read_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, ®32);
+ reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, reg32);
+ }
- rc = pci_bus_error_reset(dev);
- pci_info(dev, "Root Port link has been reset\n");
+ if (type == PCI_EXP_TYPE_RC_EC) {
+ if (pcie_has_flr(dev)) {
+ rc = pcie_flr(dev);
+ pci_info(dev, "has been reset (%d)\n", rc);
+ } else {
+ pci_info(dev, "not reset (no FLR support)\n");
+ rc = -ENOTTY;
+ }
+ } else {
+ rc = pci_bus_error_reset(dev);
+ pci_info(dev, "Root Port link has been reset (%d)\n", rc);
+ }
- /* Clear Root Error Status */
- pci_read_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, ®32);
- pci_write_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, reg32);
+ if ((host->native_aer || pcie_ports_native) && aer) {
+ /* Clear Root Error Status */
+ pci_read_config_dword(root, aer + PCI_ERR_ROOT_STATUS, ®32);
+ pci_write_config_dword(root, aer + PCI_ERR_ROOT_STATUS, reg32);
- /* Enable Root Port's interrupt in response to error messages */
- pci_read_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, ®32);
- reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
- pci_write_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, reg32);
+ /* Enable Root Port's interrupt in response to error messages */
+ pci_read_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, ®32);
+ reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, reg32);
+ }
return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
static struct pcie_port_service_driver aerdriver = {
.name = "aer",
- .port_type = PCI_EXP_TYPE_ROOT_PORT,
+ .port_type = PCIE_ANY_PORT,
.service = PCIE_PORT_SERVICE_AER,
.probe = aer_probe,
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index c543f41..984aa02 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -146,38 +146,68 @@
return 0;
}
-pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
- pci_channel_state_t state,
- pci_ers_result_t (*reset_link)(struct pci_dev *pdev))
+/**
+ * pci_walk_bridge - walk bridges potentially AER affected
+ * @bridge: bridge which may be a Port or an RCEC
+ * @cb: callback to be called for each device found
+ * @userdata: arbitrary pointer to be passed to callback
+ *
+ * If the device provided is a bridge, walk the subordinate bus, including
+ * any bridged devices on buses under this bus. Call the provided callback
+ * on each device found.
+ *
+ * If the device provided has no subordinate bus, e.g., an RCEC, call the
+ * callback on the device itself.
+ */
+static void pci_walk_bridge(struct pci_dev *bridge,
+ int (*cb)(struct pci_dev *, void *),
+ void *userdata)
{
+ if (bridge->subordinate)
+ pci_walk_bus(bridge->subordinate, cb, userdata);
+ else
+ cb(bridge, userdata);
+}
+
+pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
+ pci_channel_state_t state,
+ pci_ers_result_t (*reset_subordinates)(struct pci_dev *pdev))
+{
+ int type = pci_pcie_type(dev);
+ struct pci_dev *bridge;
pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER;
- struct pci_bus *bus;
/*
- * Error recovery runs on all subordinates of the first downstream port.
- * If the downstream port detected the error, it is cleared at the end.
+ * If the error was detected by a Root Port, Downstream Port, or
+ * RCEC, recovery runs on the device itself. For Ports, that also
+ * includes any subordinate devices.
+ *
+ * If it was detected by another device (Endpoint, etc), recovery
+ * runs on the device and anything else under the same Port, i.e.,
+ * everything under "bridge".
*/
- if (!(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
- pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM))
- dev = dev->bus->self;
- bus = dev->subordinate;
+ if (type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_DOWNSTREAM ||
+ type == PCI_EXP_TYPE_RC_EC)
+ bridge = dev;
+ else
+ bridge = pci_upstream_bridge(dev);
- pci_dbg(dev, "broadcast error_detected message\n");
+ pci_dbg(bridge, "broadcast error_detected message\n");
if (state == pci_channel_io_frozen) {
- pci_walk_bus(bus, report_frozen_detected, &status);
- status = reset_link(dev);
- if (status != PCI_ERS_RESULT_RECOVERED) {
- pci_warn(dev, "link reset failed\n");
+ pci_walk_bridge(bridge, report_frozen_detected, &status);
+ if (reset_subordinates(bridge) != PCI_ERS_RESULT_RECOVERED) {
+ pci_warn(bridge, "subordinate device reset failed\n");
goto failed;
}
} else {
- pci_walk_bus(bus, report_normal_detected, &status);
+ pci_walk_bridge(bridge, report_normal_detected, &status);
}
if (status == PCI_ERS_RESULT_CAN_RECOVER) {
status = PCI_ERS_RESULT_RECOVERED;
- pci_dbg(dev, "broadcast mmio_enabled message\n");
- pci_walk_bus(bus, report_mmio_enabled, &status);
+ pci_dbg(bridge, "broadcast mmio_enabled message\n");
+ pci_walk_bridge(bridge, report_mmio_enabled, &status);
}
if (status == PCI_ERS_RESULT_NEED_RESET) {
@@ -187,27 +217,27 @@
* drivers' slot_reset callbacks?
*/
status = PCI_ERS_RESULT_RECOVERED;
- pci_dbg(dev, "broadcast slot_reset message\n");
- pci_walk_bus(bus, report_slot_reset, &status);
+ pci_dbg(bridge, "broadcast slot_reset message\n");
+ pci_walk_bridge(bridge, report_slot_reset, &status);
}
if (status != PCI_ERS_RESULT_RECOVERED)
goto failed;
- pci_dbg(dev, "broadcast resume message\n");
- pci_walk_bus(bus, report_resume, &status);
+ pci_dbg(bridge, "broadcast resume message\n");
+ pci_walk_bridge(bridge, report_resume, &status);
- if (pcie_aer_is_native(dev))
- pcie_clear_device_status(dev);
- pci_aer_clear_nonfatal_status(dev);
- pci_info(dev, "device recovery successful\n");
+ if (pcie_aer_is_native(bridge))
+ pcie_clear_device_status(bridge);
+ pci_aer_clear_nonfatal_status(bridge);
+ pci_info(bridge, "device recovery successful\n");
return status;
failed:
- pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
+ pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT);
/* TODO: Should kernel panic here? */
- pci_info(dev, "device recovery failed\n");
+ pci_info(bridge, "device recovery failed\n");
return status;
}
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index d4559cf..aac1a68 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -101,12 +101,14 @@
static int pcie_portdrv_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
+ int type = pci_pcie_type(dev);
int status;
if (!pci_is_pcie(dev) ||
- ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
- (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM) &&
- (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
+ ((type != PCI_EXP_TYPE_ROOT_PORT) &&
+ (type != PCI_EXP_TYPE_UPSTREAM) &&
+ (type != PCI_EXP_TYPE_DOWNSTREAM) &&
+ (type != PCI_EXP_TYPE_RC_EC)))
return -ENODEV;
status = pcie_port_device_register(dev);
@@ -195,6 +197,8 @@
{ PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0) },
/* subtractive decode PCI-to-PCI bridge, class type is 060401h */
{ PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x01), ~0) },
+ /* handle any Root Complex Event Collector */
+ { PCI_DEVICE_CLASS(((PCI_CLASS_SYSTEM_RCEC << 8) | 0x00), ~0) },
{ },
};
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 95fcc73..fb2e52f 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1816,6 +1816,18 @@
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
#endif
+static void quirk_no_msi(struct pci_dev *dev)
+{
+ pci_info(dev, "avoiding MSI to work around a hardware defect\n");
+ dev->no_msi = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4386, quirk_no_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4387, quirk_no_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4388, quirk_no_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4389, quirk_no_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x438a, quirk_no_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x438b, quirk_no_msi);
+
static void quirk_pcie_mch(struct pci_dev *pdev)
{
pdev->no_msi = 1;
@@ -4885,6 +4897,9 @@
{ PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
/* Broadcom multi-function device */
{ PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1750, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1751, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1752, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
/* Amazon Annapurna Labs */
{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 7f1acb3..875d50c 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -210,6 +210,17 @@
root = pci_find_parent_resource(dev, res);
if (!root) {
+ /*
+ * If dev is behind a bridge, accesses will only reach it
+ * if res is inside the relevant bridge window.
+ */
+ if (pci_upstream_bridge(dev))
+ return -ENXIO;
+
+ /*
+ * On the root bus, assume the host bridge will forward
+ * everything.
+ */
if (res->flags & IORESOURCE_IO)
root = &ioport_resource;
else
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 82d10b6..73508fc 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -151,7 +151,7 @@
config PCMCIA_ALCHEMY_DEVBOARD
tristate "Alchemy Db/Pb1xxx PCMCIA socket services"
- depends on MIPS_ALCHEMY && PCMCIA
+ depends on MIPS_DB1XXX && PCMCIA
help
Enable this driver of you want PCMCIA support on your Alchemy
Db1000, Db/Pb1100, Db/Pb1500, Db/Pb1550, Db/Pb1200, DB1300
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index cb2f55f..7fd11ef 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -398,6 +398,9 @@
if (!validate_event(event->pmu, &fake_pmu, leader))
return -EINVAL;
+ if (event == leader)
+ return 0;
+
for_each_sibling_event(sibling, leader) {
if (!validate_event(event->pmu, &fake_pmu, sibling))
return -EINVAL;
@@ -487,12 +490,7 @@
local64_set(&hwc->period_left, hwc->sample_period);
}
- if (event->group_leader != event) {
- if (validate_group(event) != 0)
- return -EINVAL;
- }
-
- return 0;
+ return validate_group(event);
}
static int armpmu_event_init(struct perf_event *event)
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index ef96764..2e1f368 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -117,7 +117,7 @@
if (num_irqs == 1) {
int irq = platform_get_irq(pdev, 0);
- if (irq && irq_is_percpu_devid(irq))
+ if ((irq > 0) && irq_is_percpu_devid(irq))
return pmu_parse_percpu_irq(pmu, irq);
}
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index cc00915..6fbfcab 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -39,6 +39,24 @@
#include <asm/mmu.h>
#include <asm/sysreg.h>
+/*
+ * Cache if the event is allowed to trace Context information.
+ * This allows us to perform the check, i.e, perfmon_capable(),
+ * in the context of the event owner, once, during the event_init().
+ */
+#define SPE_PMU_HW_FLAGS_CX BIT(0)
+
+static void set_spe_event_has_cx(struct perf_event *event)
+{
+ if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
+ event->hw.flags |= SPE_PMU_HW_FLAGS_CX;
+}
+
+static bool get_spe_event_has_cx(struct perf_event *event)
+{
+ return !!(event->hw.flags & SPE_PMU_HW_FLAGS_CX);
+}
+
#define ARM_SPE_BUF_PAD_BYTE 0
struct arm_spe_pmu_buf {
@@ -274,7 +292,7 @@
if (!attr->exclude_kernel)
reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT);
- if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
+ if (get_spe_event_has_cx(event))
reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT);
return reg;
@@ -699,10 +717,10 @@
!(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
return -EOPNOTSUPP;
+ set_spe_event_has_cx(event);
reg = arm_spe_event_to_pmscr(event);
if (!perfmon_capable() &&
(reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) |
- BIT(SYS_PMSCR_EL1_CX_SHIFT) |
BIT(SYS_PMSCR_EL1_PCT_SHIFT))))
return -EACCES;
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 7f7bc09..e09bbf3 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -29,7 +29,7 @@
#define CNTL_OVER_MASK 0xFFFFFFFE
#define CNTL_CSV_SHIFT 24
-#define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT)
+#define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT)
#define EVENT_CYCLES_ID 0
#define EVENT_CYCLES_COUNTER 0
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index 23a0e00..d58810f 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -739,7 +739,7 @@
{
u64 mpidr;
int cpu_cluster_id;
- struct cluster_pmu *cluster = NULL;
+ struct cluster_pmu *cluster;
/*
* This assumes that the cluster_id is in MPIDR[aff1] for
@@ -761,10 +761,10 @@
cluster->cluster_id);
cpumask_set_cpu(cpu, &cluster->cluster_cpus);
*per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster;
- break;
+ return cluster;
}
- return cluster;
+ return NULL;
}
static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
diff --git a/drivers/phy/amlogic/phy-meson8b-usb2.c b/drivers/phy/amlogic/phy-meson8b-usb2.c
index 03c061d..8f40b93 100644
--- a/drivers/phy/amlogic/phy-meson8b-usb2.c
+++ b/drivers/phy/amlogic/phy-meson8b-usb2.c
@@ -261,8 +261,9 @@
return PTR_ERR(priv->clk_usb);
priv->reset = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
- if (PTR_ERR(priv->reset) == -EPROBE_DEFER)
- return PTR_ERR(priv->reset);
+ if (IS_ERR(priv->reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->reset),
+ "Failed to get the reset line");
priv->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node, -1);
if (priv->dr_mode == USB_DR_MODE_UNKNOWN) {
diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
index 5172971..3cd4d51 100644
--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
+++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
@@ -629,7 +629,8 @@
cleanup:
if (error < 0)
phy_mdm6600_device_power_off(ddata);
-
+ pm_runtime_disable(ddata->dev);
+ pm_runtime_dont_use_autosuspend(ddata->dev);
return error;
}
diff --git a/drivers/phy/phy-core-mipi-dphy.c b/drivers/phy/phy-core-mipi-dphy.c
index 14e0551..0aa740b 100644
--- a/drivers/phy/phy-core-mipi-dphy.c
+++ b/drivers/phy/phy-core-mipi-dphy.c
@@ -66,10 +66,10 @@
cfg->hs_trail = max(4 * 8 * ui, 60000 + 4 * 4 * ui);
cfg->init = 100;
- cfg->lpx = 60000;
+ cfg->lpx = 50000;
cfg->ta_get = 5 * cfg->lpx;
cfg->ta_go = 4 * cfg->lpx;
- cfg->ta_sure = 2 * cfg->lpx;
+ cfg->ta_sure = cfg->lpx;
cfg->wakeup = 1000;
cfg->hs_clk_rate = hs_clk_rate;
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 0cda168..afcc82a 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -3141,7 +3141,7 @@
ret = reset_control_deassert(qmp->ufs_reset);
if (ret)
- goto err_lane_rst;
+ goto err_pcs_ready;
qcom_qmp_phy_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl,
cfg->pcs_misc_tbl_num);
@@ -3717,6 +3717,11 @@
.owner = THIS_MODULE,
};
+static void qcom_qmp_reset_control_put(void *data)
+{
+ reset_control_put(data);
+}
+
static
int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id,
void __iomem *serdes, const struct qmp_phy_cfg *cfg)
@@ -3789,7 +3794,7 @@
* all phys that don't need this.
*/
snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
- qphy->pipe_clk = of_clk_get_by_name(np, prop_name);
+ qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name);
if (IS_ERR(qphy->pipe_clk)) {
if (cfg->type == PHY_TYPE_PCIE ||
cfg->type == PHY_TYPE_USB3) {
@@ -3811,6 +3816,10 @@
dev_err(dev, "failed to get lane%d reset\n", id);
return PTR_ERR(qphy->lane_rst);
}
+ ret = devm_add_action_or_reset(dev, qcom_qmp_reset_control_put,
+ qphy->lane_rst);
+ if (ret)
+ return ret;
}
if (cfg->type == PHY_TYPE_UFS || cfg->type == PHY_TYPE_PCIE)
diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hsic.c b/drivers/phy/qualcomm/phy-qcom-usb-hsic.c
index 04d18d5..d4741c2 100644
--- a/drivers/phy/qualcomm/phy-qcom-usb-hsic.c
+++ b/drivers/phy/qualcomm/phy-qcom-usb-hsic.c
@@ -54,8 +54,10 @@
/* Configure pins for HSIC functionality */
pins_default = pinctrl_lookup_state(uphy->pctl, PINCTRL_STATE_DEFAULT);
- if (IS_ERR(pins_default))
- return PTR_ERR(pins_default);
+ if (IS_ERR(pins_default)) {
+ ret = PTR_ERR(pins_default);
+ goto err_ulpi;
+ }
ret = pinctrl_select_state(uphy->pctl, pins_default);
if (ret)
diff --git a/drivers/phy/samsung/phy-exynos5250-sata.c b/drivers/phy/samsung/phy-exynos5250-sata.c
index 4dd7324..ea46576 100644
--- a/drivers/phy/samsung/phy-exynos5250-sata.c
+++ b/drivers/phy/samsung/phy-exynos5250-sata.c
@@ -190,6 +190,7 @@
return -EINVAL;
sata_phy->client = of_find_i2c_device_by_node(node);
+ of_node_put(node);
if (!sata_phy->client)
return -EPROBE_DEFER;
@@ -198,20 +199,21 @@
sata_phy->phyclk = devm_clk_get(dev, "sata_phyctrl");
if (IS_ERR(sata_phy->phyclk)) {
dev_err(dev, "failed to get clk for PHY\n");
- return PTR_ERR(sata_phy->phyclk);
+ ret = PTR_ERR(sata_phy->phyclk);
+ goto put_dev;
}
ret = clk_prepare_enable(sata_phy->phyclk);
if (ret < 0) {
dev_err(dev, "failed to enable source clk\n");
- return ret;
+ goto put_dev;
}
sata_phy->phy = devm_phy_create(dev, NULL, &exynos_sata_phy_ops);
if (IS_ERR(sata_phy->phy)) {
- clk_disable_unprepare(sata_phy->phyclk);
dev_err(dev, "failed to create PHY\n");
- return PTR_ERR(sata_phy->phy);
+ ret = PTR_ERR(sata_phy->phy);
+ goto clk_disable;
}
phy_set_drvdata(sata_phy->phy, sata_phy);
@@ -219,11 +221,18 @@
phy_provider = devm_of_phy_provider_register(dev,
of_phy_simple_xlate);
if (IS_ERR(phy_provider)) {
- clk_disable_unprepare(sata_phy->phyclk);
- return PTR_ERR(phy_provider);
+ ret = PTR_ERR(phy_provider);
+ goto clk_disable;
}
return 0;
+
+clk_disable:
+ clk_disable_unprepare(sata_phy->phyclk);
+put_dev:
+ put_device(&sata_phy->client->dev);
+
+ return ret;
}
static const struct of_device_id exynos_sata_phy_of_match[] = {
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index 2b3639c..03fc567 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -393,6 +393,8 @@
ret = of_property_read_u32(child, "reg", &index);
if (ret || index > usbphyc->nphys) {
dev_err(&phy->dev, "invalid reg property: %d\n", ret);
+ if (!ret)
+ ret = -EINVAL;
goto put_child;
}
diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c
index 2ff56ce..21c0088 100644
--- a/drivers/phy/ti/phy-am654-serdes.c
+++ b/drivers/phy/ti/phy-am654-serdes.c
@@ -838,7 +838,7 @@
clk_err:
of_clk_del_provider(node);
-
+ pm_runtime_disable(dev);
return ret;
}
diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
index 4fec90d..f77ac04 100644
--- a/drivers/phy/ti/phy-omap-usb2.c
+++ b/drivers/phy/ti/phy-omap-usb2.c
@@ -215,7 +215,7 @@
return 0;
err1:
- clk_disable(phy->wkupclk);
+ clk_disable_unprepare(phy->wkupclk);
err0:
return ret;
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index 5c1a109..c2ba406 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -1224,18 +1224,12 @@
FUNC_GROUP_DECL(WDTRST4, AA12);
#define AE12 196
-SIG_EXPR_LIST_DECL_SEMG(AE12, FWSPIDQ2, FWQSPID, FWSPID,
- SIG_DESC_SET(SCU438, 4));
SIG_EXPR_LIST_DECL_SESG(AE12, GPIOY4, GPIOY4);
-PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIDQ2),
- SIG_EXPR_LIST_PTR(AE12, GPIOY4));
+PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, GPIOY4));
#define AF12 197
-SIG_EXPR_LIST_DECL_SEMG(AF12, FWSPIDQ3, FWQSPID, FWSPID,
- SIG_DESC_SET(SCU438, 5));
SIG_EXPR_LIST_DECL_SESG(AF12, GPIOY5, GPIOY5);
-PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIDQ3),
- SIG_EXPR_LIST_PTR(AF12, GPIOY5));
+PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, GPIOY5));
#define AC12 198
SSSF_PIN_DECL(AC12, GPIOY6, FWSPIABR, SIG_DESC_SET(SCU438, 6));
@@ -1508,9 +1502,8 @@
PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7);
GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4);
-GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12);
GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4);
-FUNC_DECL_2(FWSPID, FWSPID, FWQSPID);
+FUNC_DECL_1(FWSPID, FWSPID);
FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4);
FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8);
/*
@@ -1906,7 +1899,6 @@
ASPEED_PINCTRL_GROUP(FSI2),
ASPEED_PINCTRL_GROUP(FWSPIABR),
ASPEED_PINCTRL_GROUP(FWSPID),
- ASPEED_PINCTRL_GROUP(FWQSPID),
ASPEED_PINCTRL_GROUP(FWSPIWP),
ASPEED_PINCTRL_GROUP(GPIT0),
ASPEED_PINCTRL_GROUP(GPIT1),
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 9c65d56..e792318 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -235,11 +235,11 @@
const struct aspeed_sig_expr **funcs;
const struct aspeed_sig_expr ***prios;
- pr_debug("Muxing pin %s for %s\n", pdesc->name, pfunc->name);
-
if (!pdesc)
return -EINVAL;
+ pr_debug("Muxing pin %s for %s\n", pdesc->name, pfunc->name);
+
prios = pdesc->prios;
if (!prios)
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 6768b2f..39d2024 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -351,6 +351,22 @@
return pinctrl_gpio_direction_output(chip->base + offset);
}
+static int bcm2835_of_gpio_ranges_fallback(struct gpio_chip *gc,
+ struct device_node *np)
+{
+ struct pinctrl_dev *pctldev = of_pinctrl_get(np);
+
+ of_node_put(np);
+
+ if (!pctldev)
+ return 0;
+
+ gpiochip_add_pin_range(gc, pinctrl_dev_get_devname(pctldev), 0, 0,
+ gc->ngpio);
+
+ return 0;
+}
+
static const struct gpio_chip bcm2835_gpio_chip = {
.label = MODULE_NAME,
.owner = THIS_MODULE,
@@ -365,6 +381,7 @@
.base = -1,
.ngpio = BCM2835_NUM_GPIOS,
.can_sleep = false,
+ .of_gpio_ranges_fallback = bcm2835_of_gpio_ranges_fallback,
};
static const struct gpio_chip bcm2711_gpio_chip = {
@@ -381,6 +398,7 @@
.base = -1,
.ngpio = BCM2711_NUM_GPIOS,
.can_sleep = false,
+ .of_gpio_ranges_fallback = bcm2835_of_gpio_ranges_fallback,
};
static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc,
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 3fb2387..eac55fe 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -220,6 +220,8 @@
for (state = 0; ; state++) {
/* Retrieve the pinctrl-* property */
propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state);
+ if (!propname)
+ return -ENOMEM;
prop = of_find_property(np, propname, &size);
kfree(propname);
if (!prop) {
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 348c670..4de832a 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -1571,16 +1571,14 @@
const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_device *pdev)
{
+ const struct intel_pinctrl_soc_data * const *table;
const struct intel_pinctrl_soc_data *data = NULL;
- const struct intel_pinctrl_soc_data **table;
- struct acpi_device *adev;
- unsigned int i;
- adev = ACPI_COMPANION(&pdev->dev);
- if (adev) {
- const void *match = device_get_match_data(&pdev->dev);
+ table = device_get_match_data(&pdev->dev);
+ if (table) {
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ unsigned int i;
- table = (const struct intel_pinctrl_soc_data **)match;
for (i = 0; table[i]; i++) {
if (!strcmp(adev->pnp.unique_id, table[i]->uid)) {
data = table[i];
@@ -1594,7 +1592,7 @@
if (!id)
return ERR_PTR(-ENODEV);
- table = (const struct intel_pinctrl_soc_data **)id->driver_data;
+ table = (const struct intel_pinctrl_soc_data * const *)id->driver_data;
data = table[pdev->id];
}
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index eef17f2..4ed41d3 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -30,6 +30,7 @@
select GENERIC_PINMUX_FUNCTIONS
select GPIOLIB
select OF_GPIO
+ select EINT_MTK
select PINCTRL_MTK_V2
config PINCTRL_MTK_PARIS
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index a02ad10..730581d 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -1039,6 +1039,7 @@
node = of_parse_phandle(np, "mediatek,pctl-regmap", 0);
if (node) {
pctl->regmap1 = syscon_node_to_regmap(node);
+ of_node_put(node);
if (IS_ERR(pctl->regmap1))
return PTR_ERR(pctl->regmap1);
} else if (regmap) {
@@ -1052,6 +1053,7 @@
node = of_parse_phandle(np, "mediatek,pctl-regmap", 1);
if (node) {
pctl->regmap2 = syscon_node_to_regmap(node);
+ of_node_put(node);
if (IS_ERR(pctl->regmap2))
return PTR_ERR(pctl->regmap2);
}
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index 623af44..d0a4ebb 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -96,20 +96,16 @@
err = hw->soc->bias_get_combo(hw, desc, &pullup, &ret);
if (err)
goto out;
+ if (ret == MTK_PUPD_SET_R1R0_00)
+ ret = MTK_DISABLE;
if (param == PIN_CONFIG_BIAS_DISABLE) {
- if (ret == MTK_PUPD_SET_R1R0_00)
- ret = MTK_DISABLE;
+ if (ret != MTK_DISABLE)
+ err = -EINVAL;
} else if (param == PIN_CONFIG_BIAS_PULL_UP) {
- /* When desire to get pull-up value, return
- * error if current setting is pull-down
- */
- if (!pullup)
+ if (!pullup || ret == MTK_DISABLE)
err = -EINVAL;
} else if (param == PIN_CONFIG_BIAS_PULL_DOWN) {
- /* When desire to get pull-down value, return
- * error if current setting is pull-up
- */
- if (pullup)
+ if (pullup || ret == MTK_DISABLE)
err = -EINVAL;
}
} else {
@@ -188,8 +184,7 @@
}
static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
- enum pin_config_param param,
- enum pin_config_param arg)
+ enum pin_config_param param, u32 arg)
{
struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
const struct mtk_pin_desc *desc;
@@ -585,6 +580,9 @@
if (gpio >= hw->soc->npins)
return -EINVAL;
+ if (mtk_is_virt_gpio(hw, gpio))
+ return -EINVAL;
+
desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
pinmux = mtk_pctrl_get_pinmux(hw, gpio);
if (pinmux >= hw->soc->nfuncs)
@@ -719,10 +717,10 @@
unsigned long *config)
{
struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+ struct mtk_pinctrl_group *grp = &hw->groups[group];
- *config = hw->groups[group].config;
-
- return 0;
+ /* One pin per group only */
+ return mtk_pinconf_get(pctldev, grp->pin, config);
}
static int mtk_pconf_group_set(struct pinctrl_dev *pctldev, unsigned group,
@@ -738,8 +736,6 @@
pinconf_to_config_argument(configs[i]));
if (ret < 0)
return ret;
-
- grp->config = configs[i];
}
return 0;
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index 5cb018f..85a0052 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -781,7 +781,7 @@
for (i = 0; i < nr_irq_parent; i++) {
int irq = irq_of_parse_and_map(np, i);
- if (irq < 0)
+ if (!irq)
continue;
girq->parents[i] = irq;
}
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 657e35a..d80ec0d 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -1421,8 +1421,10 @@
has_config = nmk_pinctrl_dt_get_config(np, &configs);
np_config = of_parse_phandle(np, "ste,config", 0);
- if (np_config)
+ if (np_config) {
has_config |= nmk_pinctrl_dt_get_config(np_config, &configs);
+ of_node_put(np_config);
+ }
if (has_config) {
const char *gpio_name;
const char *pin;
@@ -1883,8 +1885,10 @@
}
prcm_np = of_parse_phandle(np, "prcm", 0);
- if (prcm_np)
+ if (prcm_np) {
npct->prcm_base = of_iomap(prcm_np, 0);
+ of_node_put(prcm_np);
+ }
if (!npct->prcm_base) {
if (version == PINCTRL_NMK_STN8815) {
dev_info(&pdev->dev,
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index 6de31b5..ce36b6f 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -78,7 +78,6 @@
struct gpio_chip gc;
int irqbase;
int irq;
- void *priv;
struct irq_chip irq_chip;
u32 pinctrl_id;
int (*direction_input)(struct gpio_chip *chip, unsigned offset);
@@ -226,7 +225,7 @@
chained_irq_enter(chip, desc);
sts = ioread32(bank->base + NPCM7XX_GP_N_EVST);
en = ioread32(bank->base + NPCM7XX_GP_N_EVEN);
- dev_dbg(chip->parent_device, "==> got irq sts %.8x %.8x\n", sts,
+ dev_dbg(bank->gc.parent, "==> got irq sts %.8x %.8x\n", sts,
en);
sts &= en;
@@ -241,33 +240,33 @@
gpiochip_get_data(irq_data_get_irq_chip_data(d));
unsigned int gpio = BIT(d->hwirq);
- dev_dbg(d->chip->parent_device, "setirqtype: %u.%u = %u\n", gpio,
+ dev_dbg(bank->gc.parent, "setirqtype: %u.%u = %u\n", gpio,
d->irq, type);
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- dev_dbg(d->chip->parent_device, "edge.rising\n");
+ dev_dbg(bank->gc.parent, "edge.rising\n");
npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio);
npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
break;
case IRQ_TYPE_EDGE_FALLING:
- dev_dbg(d->chip->parent_device, "edge.falling\n");
+ dev_dbg(bank->gc.parent, "edge.falling\n");
npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio);
npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
break;
case IRQ_TYPE_EDGE_BOTH:
- dev_dbg(d->chip->parent_device, "edge.both\n");
+ dev_dbg(bank->gc.parent, "edge.both\n");
npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio);
break;
case IRQ_TYPE_LEVEL_LOW:
- dev_dbg(d->chip->parent_device, "level.low\n");
+ dev_dbg(bank->gc.parent, "level.low\n");
npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
break;
case IRQ_TYPE_LEVEL_HIGH:
- dev_dbg(d->chip->parent_device, "level.high\n");
+ dev_dbg(bank->gc.parent, "level.high\n");
npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
break;
default:
- dev_dbg(d->chip->parent_device, "invalid irq type\n");
+ dev_dbg(bank->gc.parent, "invalid irq type\n");
return -EINVAL;
}
@@ -289,7 +288,7 @@
gpiochip_get_data(irq_data_get_irq_chip_data(d));
unsigned int gpio = d->hwirq;
- dev_dbg(d->chip->parent_device, "irq_ack: %u.%u\n", gpio, d->irq);
+ dev_dbg(bank->gc.parent, "irq_ack: %u.%u\n", gpio, d->irq);
iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVST);
}
@@ -301,7 +300,7 @@
unsigned int gpio = d->hwirq;
/* Clear events */
- dev_dbg(d->chip->parent_device, "irq_mask: %u.%u\n", gpio, d->irq);
+ dev_dbg(bank->gc.parent, "irq_mask: %u.%u\n", gpio, d->irq);
iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENC);
}
@@ -313,7 +312,7 @@
unsigned int gpio = d->hwirq;
/* Enable events */
- dev_dbg(d->chip->parent_device, "irq_unmask: %u.%u\n", gpio, d->irq);
+ dev_dbg(bank->gc.parent, "irq_unmask: %u.%u\n", gpio, d->irq);
iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENS);
}
@@ -323,7 +322,7 @@
unsigned int gpio = d->hwirq;
/* active-high, input, clear interrupt, enable interrupt */
- dev_dbg(d->chip->parent_device, "startup: %u.%u\n", gpio, d->irq);
+ dev_dbg(gc->parent, "startup: %u.%u\n", gpio, d->irq);
npcmgpio_direction_input(gc, gpio);
npcmgpio_irq_ack(d);
npcmgpio_irq_unmask(d);
@@ -905,7 +904,7 @@
#define DRIVE_STRENGTH_HI_SHIFT 12
#define DRIVE_STRENGTH_MASK 0x0000FF00
-#define DS(lo, hi) (((lo) << DRIVE_STRENGTH_LO_SHIFT) | \
+#define DSTR(lo, hi) (((lo) << DRIVE_STRENGTH_LO_SHIFT) | \
((hi) << DRIVE_STRENGTH_HI_SHIFT))
#define DSLO(x) (((x) >> DRIVE_STRENGTH_LO_SHIFT) & 0xF)
#define DSHI(x) (((x) >> DRIVE_STRENGTH_HI_SHIFT) & 0xF)
@@ -925,31 +924,31 @@
static const struct npcm7xx_pincfg pincfg[] = {
/* PIN FUNCTION 1 FUNCTION 2 FUNCTION 3 FLAGS */
NPCM7XX_PINCFG(0, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, 0),
- NPCM7XX_PINCFG(1, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DS(8, 12)),
- NPCM7XX_PINCFG(2, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(1, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
+ NPCM7XX_PINCFG(2, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
NPCM7XX_PINCFG(3, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(4, iox2, MFSEL3, 14, smb1d, I2CSEGSEL, 7, none, NONE, 0, SLEW),
NPCM7XX_PINCFG(5, iox2, MFSEL3, 14, smb1d, I2CSEGSEL, 7, none, NONE, 0, SLEW),
NPCM7XX_PINCFG(6, iox2, MFSEL3, 14, smb2d, I2CSEGSEL, 10, none, NONE, 0, SLEW),
NPCM7XX_PINCFG(7, iox2, MFSEL3, 14, smb2d, I2CSEGSEL, 10, none, NONE, 0, SLEW),
- NPCM7XX_PINCFG(8, lkgpo1, FLOCKR1, 4, none, NONE, 0, none, NONE, 0, DS(8, 12)),
- NPCM7XX_PINCFG(9, lkgpo2, FLOCKR1, 8, none, NONE, 0, none, NONE, 0, DS(8, 12)),
- NPCM7XX_PINCFG(10, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)),
- NPCM7XX_PINCFG(11, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(8, lkgpo1, FLOCKR1, 4, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
+ NPCM7XX_PINCFG(9, lkgpo2, FLOCKR1, 8, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
+ NPCM7XX_PINCFG(10, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
+ NPCM7XX_PINCFG(11, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
NPCM7XX_PINCFG(12, gspi, MFSEL1, 24, smb5b, I2CSEGSEL, 19, none, NONE, 0, SLEW),
NPCM7XX_PINCFG(13, gspi, MFSEL1, 24, smb5b, I2CSEGSEL, 19, none, NONE, 0, SLEW),
NPCM7XX_PINCFG(14, gspi, MFSEL1, 24, smb5c, I2CSEGSEL, 20, none, NONE, 0, SLEW),
NPCM7XX_PINCFG(15, gspi, MFSEL1, 24, smb5c, I2CSEGSEL, 20, none, NONE, 0, SLEW),
- NPCM7XX_PINCFG(16, lkgpo0, FLOCKR1, 0, none, NONE, 0, none, NONE, 0, DS(8, 12)),
- NPCM7XX_PINCFG(17, pspi2, MFSEL3, 13, smb4den, I2CSEGSEL, 23, none, NONE, 0, DS(8, 12)),
- NPCM7XX_PINCFG(18, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DS(8, 12)),
- NPCM7XX_PINCFG(19, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(16, lkgpo0, FLOCKR1, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
+ NPCM7XX_PINCFG(17, pspi2, MFSEL3, 13, smb4den, I2CSEGSEL, 23, none, NONE, 0, DSTR(8, 12)),
+ NPCM7XX_PINCFG(18, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DSTR(8, 12)),
+ NPCM7XX_PINCFG(19, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DSTR(8, 12)),
NPCM7XX_PINCFG(20, smb4c, I2CSEGSEL, 15, smb15, MFSEL3, 8, none, NONE, 0, 0),
NPCM7XX_PINCFG(21, smb4c, I2CSEGSEL, 15, smb15, MFSEL3, 8, none, NONE, 0, 0),
NPCM7XX_PINCFG(22, smb4d, I2CSEGSEL, 16, smb14, MFSEL3, 7, none, NONE, 0, 0),
NPCM7XX_PINCFG(23, smb4d, I2CSEGSEL, 16, smb14, MFSEL3, 7, none, NONE, 0, 0),
- NPCM7XX_PINCFG(24, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)),
- NPCM7XX_PINCFG(25, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(24, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
+ NPCM7XX_PINCFG(25, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
NPCM7XX_PINCFG(26, smb5, MFSEL1, 2, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(27, smb5, MFSEL1, 2, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(28, smb4, MFSEL1, 1, none, NONE, 0, none, NONE, 0, 0),
@@ -965,12 +964,12 @@
NPCM7XX_PINCFG(39, smb3b, I2CSEGSEL, 11, none, NONE, 0, none, NONE, 0, SLEW),
NPCM7XX_PINCFG(40, smb3b, I2CSEGSEL, 11, none, NONE, 0, none, NONE, 0, SLEW),
NPCM7XX_PINCFG(41, bmcuart0a, MFSEL1, 9, none, NONE, 0, none, NONE, 0, 0),
- NPCM7XX_PINCFG(42, bmcuart0a, MFSEL1, 9, none, NONE, 0, none, NONE, 0, DS(2, 4) | GPO),
+ NPCM7XX_PINCFG(42, bmcuart0a, MFSEL1, 9, none, NONE, 0, none, NONE, 0, DSTR(2, 4) | GPO),
NPCM7XX_PINCFG(43, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, bmcuart1, MFSEL3, 24, 0),
NPCM7XX_PINCFG(44, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, bmcuart1, MFSEL3, 24, 0),
NPCM7XX_PINCFG(45, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, 0),
- NPCM7XX_PINCFG(46, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DS(2, 8)),
- NPCM7XX_PINCFG(47, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DS(2, 8)),
+ NPCM7XX_PINCFG(46, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DSTR(2, 8)),
+ NPCM7XX_PINCFG(47, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DSTR(2, 8)),
NPCM7XX_PINCFG(48, uart2, MFSEL1, 11, bmcuart0b, MFSEL4, 1, none, NONE, 0, GPO),
NPCM7XX_PINCFG(49, uart2, MFSEL1, 11, bmcuart0b, MFSEL4, 1, none, NONE, 0, 0),
NPCM7XX_PINCFG(50, uart2, MFSEL1, 11, none, NONE, 0, none, NONE, 0, 0),
@@ -980,8 +979,8 @@
NPCM7XX_PINCFG(54, uart2, MFSEL1, 11, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(55, uart2, MFSEL1, 11, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(56, r1err, MFSEL1, 12, none, NONE, 0, none, NONE, 0, 0),
- NPCM7XX_PINCFG(57, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DS(2, 4)),
- NPCM7XX_PINCFG(58, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DS(2, 4)),
+ NPCM7XX_PINCFG(57, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DSTR(2, 4)),
+ NPCM7XX_PINCFG(58, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DSTR(2, 4)),
NPCM7XX_PINCFG(59, smb3d, I2CSEGSEL, 13, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(60, smb3d, I2CSEGSEL, 13, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(61, uart1, MFSEL1, 10, none, NONE, 0, none, NONE, 0, GPO),
@@ -1004,19 +1003,19 @@
NPCM7XX_PINCFG(77, fanin13, MFSEL2, 13, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(78, fanin14, MFSEL2, 14, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(79, fanin15, MFSEL2, 15, none, NONE, 0, none, NONE, 0, 0),
- NPCM7XX_PINCFG(80, pwm0, MFSEL2, 16, none, NONE, 0, none, NONE, 0, DS(4, 8)),
- NPCM7XX_PINCFG(81, pwm1, MFSEL2, 17, none, NONE, 0, none, NONE, 0, DS(4, 8)),
- NPCM7XX_PINCFG(82, pwm2, MFSEL2, 18, none, NONE, 0, none, NONE, 0, DS(4, 8)),
- NPCM7XX_PINCFG(83, pwm3, MFSEL2, 19, none, NONE, 0, none, NONE, 0, DS(4, 8)),
- NPCM7XX_PINCFG(84, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(85, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(86, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(80, pwm0, MFSEL2, 16, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
+ NPCM7XX_PINCFG(81, pwm1, MFSEL2, 17, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
+ NPCM7XX_PINCFG(82, pwm2, MFSEL2, 18, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
+ NPCM7XX_PINCFG(83, pwm3, MFSEL2, 19, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
+ NPCM7XX_PINCFG(84, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(85, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(86, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
NPCM7XX_PINCFG(87, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(88, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(89, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(90, r2err, MFSEL1, 15, none, NONE, 0, none, NONE, 0, 0),
- NPCM7XX_PINCFG(91, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DS(2, 4)),
- NPCM7XX_PINCFG(92, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DS(2, 4)),
+ NPCM7XX_PINCFG(91, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DSTR(2, 4)),
+ NPCM7XX_PINCFG(92, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DSTR(2, 4)),
NPCM7XX_PINCFG(93, ga20kbc, MFSEL1, 17, smb5d, I2CSEGSEL, 21, none, NONE, 0, 0),
NPCM7XX_PINCFG(94, ga20kbc, MFSEL1, 17, smb5d, I2CSEGSEL, 21, none, NONE, 0, 0),
NPCM7XX_PINCFG(95, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, 0),
@@ -1062,34 +1061,34 @@
NPCM7XX_PINCFG(133, smb10, MFSEL4, 13, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(134, smb11, MFSEL4, 14, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(135, smb11, MFSEL4, 14, none, NONE, 0, none, NONE, 0, 0),
- NPCM7XX_PINCFG(136, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(137, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(138, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(139, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(140, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(136, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(137, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(138, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(139, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(140, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
NPCM7XX_PINCFG(141, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, 0),
- NPCM7XX_PINCFG(142, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(142, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
NPCM7XX_PINCFG(143, sd1, MFSEL3, 12, sd1pwr, MFSEL4, 5, none, NONE, 0, 0),
- NPCM7XX_PINCFG(144, pwm4, MFSEL2, 20, none, NONE, 0, none, NONE, 0, DS(4, 8)),
- NPCM7XX_PINCFG(145, pwm5, MFSEL2, 21, none, NONE, 0, none, NONE, 0, DS(4, 8)),
- NPCM7XX_PINCFG(146, pwm6, MFSEL2, 22, none, NONE, 0, none, NONE, 0, DS(4, 8)),
- NPCM7XX_PINCFG(147, pwm7, MFSEL2, 23, none, NONE, 0, none, NONE, 0, DS(4, 8)),
- NPCM7XX_PINCFG(148, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(149, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(150, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(151, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(152, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(144, pwm4, MFSEL2, 20, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
+ NPCM7XX_PINCFG(145, pwm5, MFSEL2, 21, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
+ NPCM7XX_PINCFG(146, pwm6, MFSEL2, 22, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
+ NPCM7XX_PINCFG(147, pwm7, MFSEL2, 23, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
+ NPCM7XX_PINCFG(148, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(149, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(150, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(151, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(152, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
NPCM7XX_PINCFG(153, mmcwp, FLOCKR1, 24, none, NONE, 0, none, NONE, 0, 0), /* Z1/A1 */
- NPCM7XX_PINCFG(154, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(154, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
NPCM7XX_PINCFG(155, mmccd, MFSEL3, 25, mmcrst, MFSEL4, 6, none, NONE, 0, 0), /* Z1/A1 */
- NPCM7XX_PINCFG(156, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(157, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(158, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(159, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(156, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(157, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(158, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(159, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
- NPCM7XX_PINCFG(160, clkout, MFSEL1, 21, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(161, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, DS(8, 12)),
- NPCM7XX_PINCFG(162, serirq, NONE, 0, gpio, MFSEL1, 31, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(160, clkout, MFSEL1, 21, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(161, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, DSTR(8, 12)),
+ NPCM7XX_PINCFG(162, serirq, NONE, 0, gpio, MFSEL1, 31, none, NONE, 0, DSTR(8, 12)),
NPCM7XX_PINCFG(163, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, 0),
NPCM7XX_PINCFG(164, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, SLEWLPC),
NPCM7XX_PINCFG(165, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, SLEWLPC),
@@ -1102,25 +1101,25 @@
NPCM7XX_PINCFG(172, smb6, MFSEL3, 1, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(173, smb7, MFSEL3, 2, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(174, smb7, MFSEL3, 2, none, NONE, 0, none, NONE, 0, 0),
- NPCM7XX_PINCFG(175, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DS(8, 12)),
- NPCM7XX_PINCFG(176, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DS(8, 12)),
- NPCM7XX_PINCFG(177, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DS(8, 12)),
- NPCM7XX_PINCFG(178, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(179, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(180, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(175, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DSTR(8, 12)),
+ NPCM7XX_PINCFG(176, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DSTR(8, 12)),
+ NPCM7XX_PINCFG(177, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DSTR(8, 12)),
+ NPCM7XX_PINCFG(178, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(179, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(180, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
NPCM7XX_PINCFG(181, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(182, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0),
- NPCM7XX_PINCFG(183, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(184, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO),
- NPCM7XX_PINCFG(185, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO),
- NPCM7XX_PINCFG(186, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12)),
- NPCM7XX_PINCFG(187, spi3cs1, MFSEL4, 17, none, NONE, 0, none, NONE, 0, DS(8, 12)),
- NPCM7XX_PINCFG(188, spi3quad, MFSEL4, 20, spi3cs2, MFSEL4, 18, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(189, spi3quad, MFSEL4, 20, spi3cs3, MFSEL4, 19, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(190, gpio, FLOCKR1, 20, nprd_smi, NONE, 0, none, NONE, 0, DS(2, 4)),
- NPCM7XX_PINCFG(191, none, NONE, 0, none, NONE, 0, none, NONE, 0, DS(8, 12)), /* XX */
+ NPCM7XX_PINCFG(183, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(184, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW | GPO),
+ NPCM7XX_PINCFG(185, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW | GPO),
+ NPCM7XX_PINCFG(186, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
+ NPCM7XX_PINCFG(187, spi3cs1, MFSEL4, 17, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
+ NPCM7XX_PINCFG(188, spi3quad, MFSEL4, 20, spi3cs2, MFSEL4, 18, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(189, spi3quad, MFSEL4, 20, spi3cs3, MFSEL4, 19, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(190, gpio, FLOCKR1, 20, nprd_smi, NONE, 0, none, NONE, 0, DSTR(2, 4)),
+ NPCM7XX_PINCFG(191, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), /* XX */
- NPCM7XX_PINCFG(192, none, NONE, 0, none, NONE, 0, none, NONE, 0, DS(8, 12)), /* XX */
+ NPCM7XX_PINCFG(192, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), /* XX */
NPCM7XX_PINCFG(193, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(194, smb0b, I2CSEGSEL, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(195, smb0b, I2CSEGSEL, 0, none, NONE, 0, none, NONE, 0, 0),
@@ -1131,11 +1130,11 @@
NPCM7XX_PINCFG(200, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(201, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(202, smb0c, I2CSEGSEL, 1, none, NONE, 0, none, NONE, 0, 0),
- NPCM7XX_PINCFG(203, faninx, MFSEL3, 3, none, NONE, 0, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(203, faninx, MFSEL3, 3, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
NPCM7XX_PINCFG(204, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, SLEW),
NPCM7XX_PINCFG(205, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, SLEW),
- NPCM7XX_PINCFG(206, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DS(4, 8)),
- NPCM7XX_PINCFG(207, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DS(4, 8)),
+ NPCM7XX_PINCFG(206, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DSTR(4, 8)),
+ NPCM7XX_PINCFG(207, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DSTR(4, 8)),
NPCM7XX_PINCFG(208, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0),
NPCM7XX_PINCFG(209, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0),
NPCM7XX_PINCFG(210, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0),
@@ -1147,20 +1146,20 @@
NPCM7XX_PINCFG(216, rg2mdio, MFSEL4, 23, ddr, MFSEL3, 26, none, NONE, 0, 0),
NPCM7XX_PINCFG(217, rg2mdio, MFSEL4, 23, ddr, MFSEL3, 26, none, NONE, 0, 0),
NPCM7XX_PINCFG(218, wdog1, MFSEL3, 19, none, NONE, 0, none, NONE, 0, 0),
- NPCM7XX_PINCFG(219, wdog2, MFSEL3, 20, none, NONE, 0, none, NONE, 0, DS(4, 8)),
+ NPCM7XX_PINCFG(219, wdog2, MFSEL3, 20, none, NONE, 0, none, NONE, 0, DSTR(4, 8)),
NPCM7XX_PINCFG(220, smb12, MFSEL3, 5, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(221, smb12, MFSEL3, 5, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(222, smb13, MFSEL3, 6, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(223, smb13, MFSEL3, 6, none, NONE, 0, none, NONE, 0, 0),
NPCM7XX_PINCFG(224, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, SLEW),
- NPCM7XX_PINCFG(225, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO),
- NPCM7XX_PINCFG(226, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO),
- NPCM7XX_PINCFG(227, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(228, spixcs1, MFSEL4, 28, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(229, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(230, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
- NPCM7XX_PINCFG(231, clkreq, MFSEL4, 9, none, NONE, 0, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(225, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW | GPO),
+ NPCM7XX_PINCFG(226, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW | GPO),
+ NPCM7XX_PINCFG(227, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(228, spixcs1, MFSEL4, 28, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(229, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(230, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM7XX_PINCFG(231, clkreq, MFSEL4, 9, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
NPCM7XX_PINCFG(253, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* SDHC1 power */
NPCM7XX_PINCFG(254, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* SDHC2 power */
NPCM7XX_PINCFG(255, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* DACOSEL */
@@ -1561,7 +1560,7 @@
{
struct npcm7xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
- dev_dbg(npcm->dev, "group size: %d\n", ARRAY_SIZE(npcm7xx_groups));
+ dev_dbg(npcm->dev, "group size: %zu\n", ARRAY_SIZE(npcm7xx_groups));
return ARRAY_SIZE(npcm7xx_groups);
}
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 1e225d5..42e27db 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -30,10 +30,10 @@
PCONFDUMP(PIN_CONFIG_BIAS_BUS_HOLD, "input bias bus hold", NULL, false),
PCONFDUMP(PIN_CONFIG_BIAS_DISABLE, "input bias disabled", NULL, false),
PCONFDUMP(PIN_CONFIG_BIAS_HIGH_IMPEDANCE, "input bias high impedance", NULL, false),
- PCONFDUMP(PIN_CONFIG_BIAS_PULL_DOWN, "input bias pull down", NULL, false),
+ PCONFDUMP(PIN_CONFIG_BIAS_PULL_DOWN, "input bias pull down", "ohms", true),
PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
- "input bias pull to pin specific state", NULL, false),
- PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false),
+ "input bias pull to pin specific state", "ohms", true),
+ PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", "ohms", true),
PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false),
PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false),
PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false),
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index e20bcc8..82b658a 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -815,6 +815,7 @@
{
struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ unsigned long flags;
int i;
for (i = 0; i < desc->npins; i++) {
@@ -823,7 +824,9 @@
if (!amd_gpio_should_save(gpio_dev, pin))
continue;
- gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin*4);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING;
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
return 0;
@@ -833,6 +836,7 @@
{
struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ unsigned long flags;
int i;
for (i = 0; i < desc->npins; i++) {
@@ -841,7 +845,10 @@
if (!amd_gpio_should_save(gpio_dev, pin))
continue;
- writel(gpio_dev->saved_regs[i], gpio_dev->base + pin*4);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ gpio_dev->saved_regs[i] |= readl(gpio_dev->base + pin * 4) & PIN_IRQ_PENDING;
+ writel(gpio_dev->saved_regs[i], gpio_dev->base + pin * 4);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
return 0;
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index ec761ba..989a37f 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1374,10 +1374,10 @@
}
irq = irq_of_parse_and_map(child, 0);
- if (irq < 0) {
- dev_err(pctl->dev, "No IRQ for bank %u: %d\n", i, irq);
+ if (!irq) {
+ dev_err(pctl->dev, "No IRQ for bank %u\n", i);
of_node_put(child);
- ret = irq;
+ ret = -EINVAL;
goto err;
}
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 53a0bad..07b1204 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -663,95 +663,110 @@
static struct rockchip_mux_recalced_data rk3308_mux_recalced_data[] = {
{
+ /* gpio1b6_sel */
.num = 1,
.pin = 14,
.reg = 0x28,
.bit = 12,
.mask = 0xf
}, {
+ /* gpio1b7_sel */
.num = 1,
.pin = 15,
.reg = 0x2c,
.bit = 0,
.mask = 0x3
}, {
+ /* gpio1c2_sel */
.num = 1,
.pin = 18,
.reg = 0x30,
.bit = 4,
.mask = 0xf
}, {
+ /* gpio1c3_sel */
.num = 1,
.pin = 19,
.reg = 0x30,
.bit = 8,
.mask = 0xf
}, {
+ /* gpio1c4_sel */
.num = 1,
.pin = 20,
.reg = 0x30,
.bit = 12,
.mask = 0xf
}, {
+ /* gpio1c5_sel */
.num = 1,
.pin = 21,
.reg = 0x34,
.bit = 0,
.mask = 0xf
}, {
+ /* gpio1c6_sel */
.num = 1,
.pin = 22,
.reg = 0x34,
.bit = 4,
.mask = 0xf
}, {
+ /* gpio1c7_sel */
.num = 1,
.pin = 23,
.reg = 0x34,
.bit = 8,
.mask = 0xf
}, {
+ /* gpio3b4_sel */
.num = 3,
.pin = 12,
.reg = 0x68,
.bit = 8,
.mask = 0xf
}, {
+ /* gpio3b5_sel */
.num = 3,
.pin = 13,
.reg = 0x68,
.bit = 12,
.mask = 0xf
}, {
+ /* gpio2a2_sel */
.num = 2,
.pin = 2,
- .reg = 0x608,
- .bit = 0,
- .mask = 0x7
+ .reg = 0x40,
+ .bit = 4,
+ .mask = 0x3
}, {
+ /* gpio2a3_sel */
.num = 2,
.pin = 3,
- .reg = 0x608,
- .bit = 4,
- .mask = 0x7
+ .reg = 0x40,
+ .bit = 6,
+ .mask = 0x3
}, {
+ /* gpio2c0_sel */
.num = 2,
.pin = 16,
- .reg = 0x610,
- .bit = 8,
- .mask = 0x7
+ .reg = 0x50,
+ .bit = 0,
+ .mask = 0x3
}, {
+ /* gpio3b2_sel */
.num = 3,
.pin = 10,
- .reg = 0x610,
- .bit = 0,
- .mask = 0x7
+ .reg = 0x68,
+ .bit = 4,
+ .mask = 0x3
}, {
+ /* gpio3b3_sel */
.num = 3,
.pin = 11,
- .reg = 0x610,
- .bit = 4,
- .mask = 0x7
+ .reg = 0x68,
+ .bit = 6,
+ .mask = 0x3
},
};
@@ -3774,6 +3789,7 @@
node = of_parse_phandle(np, "rockchip,grf", 0);
if (node) {
info->regmap_base = syscon_node_to_regmap(node);
+ of_node_put(node);
if (IS_ERR(info->regmap_base))
return PTR_ERR(info->regmap_base);
} else {
@@ -3810,6 +3826,7 @@
node = of_parse_phandle(np, "rockchip,pmu", 0);
if (node) {
info->regmap_pmu = syscon_node_to_regmap(node);
+ of_node_put(node);
if (IS_ERR(info->regmap_pmu))
return PTR_ERR(info->regmap_pmu);
}
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8916.c b/drivers/pinctrl/qcom/pinctrl-msm8916.c
index 396db12..bf68913 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8916.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8916.c
@@ -844,8 +844,8 @@
PINGROUP(28, pwr_modem_enabled_a, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, atest_combodac),
PINGROUP(29, cci_i2c, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, atest_combodac),
PINGROUP(30, cci_i2c, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
- PINGROUP(31, cci_timer0, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(32, cci_timer1, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(31, cci_timer0, flash_strobe, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(32, cci_timer1, flash_strobe, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(33, cci_async, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
PINGROUP(34, pwr_nav_enabled_a, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
PINGROUP(35, pwr_crypto_enabled_a, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c
index af144e7..3bd7f9f 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8250.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c
@@ -1316,7 +1316,7 @@
static const struct msm_gpio_wakeirq_map sm8250_pdc_map[] = {
{ 0, 79 }, { 1, 84 }, { 2, 80 }, { 3, 82 }, { 4, 107 }, { 7, 43 },
{ 11, 42 }, { 14, 44 }, { 15, 52 }, { 19, 67 }, { 23, 68 }, { 24, 105 },
- { 27, 92 }, { 28, 106 }, { 31, 69 }, { 35, 70 }, { 39, 37 },
+ { 27, 92 }, { 28, 106 }, { 31, 69 }, { 35, 70 }, { 39, 73 },
{ 40, 108 }, { 43, 71 }, { 45, 72 }, { 47, 83 }, { 51, 74 }, { 55, 77 },
{ 59, 78 }, { 63, 75 }, { 64, 81 }, { 65, 87 }, { 66, 88 }, { 67, 89 },
{ 68, 54 }, { 70, 85 }, { 77, 46 }, { 80, 90 }, { 81, 91 }, { 83, 97 },
diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c
index 9d168b9..54f1a73 100644
--- a/drivers/pinctrl/renesas/core.c
+++ b/drivers/pinctrl/renesas/core.c
@@ -71,12 +71,11 @@
/* Fill them. */
for (i = 0; i < num_windows; i++) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- windows->phys = res->start;
- windows->size = resource_size(res);
- windows->virt = devm_ioremap_resource(pfc->dev, res);
+ windows->virt = devm_platform_get_and_ioremap_resource(pdev, i, &res);
if (IS_ERR(windows->virt))
return -ENOMEM;
+ windows->phys = res->start;
+ windows->size = resource_size(res);
windows++;
}
for (i = 0; i < num_irqs; i++)
@@ -739,7 +738,7 @@
#ifdef DEBUG
#define SH_PFC_MAX_REGS 300
-#define SH_PFC_MAX_ENUMS 3000
+#define SH_PFC_MAX_ENUMS 5000
static unsigned int sh_pfc_errors __initdata = 0;
static unsigned int sh_pfc_warnings __initdata = 0;
@@ -851,7 +850,8 @@
sh_pfc_check_reg(drvname, cfg_reg->reg);
if (cfg_reg->field_width) {
- n = cfg_reg->reg_width / cfg_reg->field_width;
+ fw = cfg_reg->field_width;
+ n = (cfg_reg->reg_width / fw) << fw;
/* Skip field checks (done at build time) */
goto check_enum_ids;
}
diff --git a/drivers/pinctrl/renesas/pfc-r8a77470.c b/drivers/pinctrl/renesas/pfc-r8a77470.c
index b3b116d..1400572 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77470.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77470.c
@@ -2121,7 +2121,7 @@
VI0_CLK_MARK,
};
/* - VIN1 ------------------------------------------------------------------- */
-static const union vin_data vin1_data_pins = {
+static const union vin_data12 vin1_data_pins = {
.data12 = {
RCAR_GP_PIN(3, 1), RCAR_GP_PIN(3, 2),
RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 4),
@@ -2131,7 +2131,7 @@
RCAR_GP_PIN(3, 15), RCAR_GP_PIN(3, 16),
},
};
-static const union vin_data vin1_data_mux = {
+static const union vin_data12 vin1_data_mux = {
.data12 = {
VI1_DATA0_MARK, VI1_DATA1_MARK,
VI1_DATA2_MARK, VI1_DATA3_MARK,
diff --git a/drivers/pinctrl/renesas/pinctrl-rzn1.c b/drivers/pinctrl/renesas/pinctrl-rzn1.c
index ef5fb25..849d091 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzn1.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzn1.c
@@ -865,17 +865,15 @@
ipctl->mdio_func[0] = -1;
ipctl->mdio_func[1] = -1;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ipctl->lev1_protect_phys = (u32)res->start + 0x400;
- ipctl->lev1 = devm_ioremap_resource(&pdev->dev, res);
+ ipctl->lev1 = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(ipctl->lev1))
return PTR_ERR(ipctl->lev1);
+ ipctl->lev1_protect_phys = (u32)res->start + 0x400;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- ipctl->lev2_protect_phys = (u32)res->start + 0x400;
- ipctl->lev2 = devm_ioremap_resource(&pdev->dev, res);
+ ipctl->lev2 = devm_platform_get_and_ioremap_resource(pdev, 1, &res);
if (IS_ERR(ipctl->lev2))
return PTR_ERR(ipctl->lev2);
+ ipctl->lev2_protect_phys = (u32)res->start + 0x400;
ipctl->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(ipctl->clk))
diff --git a/drivers/pinctrl/samsung/Kconfig b/drivers/pinctrl/samsung/Kconfig
index dfd805e..7b0576f 100644
--- a/drivers/pinctrl/samsung/Kconfig
+++ b/drivers/pinctrl/samsung/Kconfig
@@ -4,14 +4,13 @@
#
config PINCTRL_SAMSUNG
bool
- depends on OF_GPIO
+ select GPIOLIB
select PINMUX
select PINCONF
config PINCTRL_EXYNOS
bool "Pinctrl common driver part for Samsung Exynos SoCs"
- depends on OF_GPIO
- depends on ARCH_EXYNOS || ARCH_S5PV210 || COMPILE_TEST
+ depends on ARCH_EXYNOS || ARCH_S5PV210 || (COMPILE_TEST && OF)
select PINCTRL_SAMSUNG
select PINCTRL_EXYNOS_ARM if ARM && (ARCH_EXYNOS || ARCH_S5PV210)
select PINCTRL_EXYNOS_ARM64 if ARM64 && ARCH_EXYNOS
@@ -26,12 +25,10 @@
config PINCTRL_S3C24XX
bool "Samsung S3C24XX SoC pinctrl driver"
- depends on OF_GPIO
- depends on ARCH_S3C24XX || COMPILE_TEST
+ depends on ARCH_S3C24XX || (COMPILE_TEST && OF)
select PINCTRL_SAMSUNG
config PINCTRL_S3C64XX
bool "Samsung S3C64XX SoC pinctrl driver"
- depends on OF_GPIO
- depends on ARCH_S3C64XX || COMPILE_TEST
+ depends on ARCH_S3C64XX || (COMPILE_TEST && OF)
select PINCTRL_SAMSUNG
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 7f809a5..56fff83 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -1002,6 +1002,16 @@
return &(of_data->ctrl[id]);
}
+static void samsung_banks_of_node_put(struct samsung_pinctrl_drv_data *d)
+{
+ struct samsung_pin_bank *bank;
+ unsigned int i;
+
+ bank = d->pin_banks;
+ for (i = 0; i < d->nr_banks; ++i, ++bank)
+ of_node_put(bank->of_node);
+}
+
/* retrieve the soc specific data */
static const struct samsung_pin_ctrl *
samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
@@ -1116,19 +1126,19 @@
if (ctrl->retention_data) {
drvdata->retention_ctrl = ctrl->retention_data->init(drvdata,
ctrl->retention_data);
- if (IS_ERR(drvdata->retention_ctrl))
- return PTR_ERR(drvdata->retention_ctrl);
+ if (IS_ERR(drvdata->retention_ctrl)) {
+ ret = PTR_ERR(drvdata->retention_ctrl);
+ goto err_put_banks;
+ }
}
ret = samsung_pinctrl_register(pdev, drvdata);
if (ret)
- return ret;
+ goto err_put_banks;
ret = samsung_gpiolib_register(pdev, drvdata);
- if (ret) {
- samsung_pinctrl_unregister(pdev, drvdata);
- return ret;
- }
+ if (ret)
+ goto err_unregister;
if (ctrl->eint_gpio_init)
ctrl->eint_gpio_init(drvdata);
@@ -1138,6 +1148,12 @@
platform_set_drvdata(pdev, drvdata);
return 0;
+
+err_unregister:
+ samsung_pinctrl_unregister(pdev, drvdata);
+err_put_banks:
+ samsung_banks_of_node_put(drvdata);
+ return ret;
}
/*
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index e13723b..60406f1 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -225,6 +225,13 @@
pinctrl_gpio_free(chip->base + offset);
}
+static int stm32_gpio_get_noclk(struct gpio_chip *chip, unsigned int offset)
+{
+ struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
+
+ return !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset));
+}
+
static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
@@ -232,7 +239,7 @@
clk_enable(bank->clk);
- ret = !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset));
+ ret = stm32_gpio_get_noclk(chip, offset);
clk_disable(bank->clk);
@@ -311,8 +318,12 @@
struct stm32_gpio_bank *bank = d->domain->host_data;
int level;
+ /* Do not access the GPIO if this is not LEVEL triggered IRQ. */
+ if (!(bank->irq_type[d->hwirq] & IRQ_TYPE_LEVEL_MASK))
+ return;
+
/* If level interrupt type then retrig */
- level = stm32_gpio_get(&bank->gpio_chip, d->hwirq);
+ level = stm32_gpio_get_noclk(&bank->gpio_chip, d->hwirq);
if ((level == 0 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_LOW) ||
(level == 1 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_HIGH))
irq_chip_retrigger_hierarchy(d);
@@ -354,6 +365,7 @@
{
struct stm32_gpio_bank *bank = irq_data->domain->host_data;
struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
+ unsigned long flags;
int ret;
ret = stm32_gpio_direction_input(&bank->gpio_chip, irq_data->hwirq);
@@ -367,6 +379,10 @@
return ret;
}
+ flags = irqd_get_trigger_type(irq_data);
+ if (flags & IRQ_TYPE_LEVEL_MASK)
+ clk_enable(bank->clk);
+
return 0;
}
@@ -374,6 +390,9 @@
{
struct stm32_gpio_bank *bank = irq_data->domain->host_data;
+ if (bank->irq_type[irq_data->hwirq] & IRQ_TYPE_LEVEL_MASK)
+ clk_disable(bank->clk);
+
gpiochip_unlock_as_irq(&bank->gpio_chip, irq_data->hwirq);
}
@@ -1284,15 +1303,17 @@
bank->bank_ioport_nr = bank_ioport_nr;
spin_lock_init(&bank->lock);
- /* create irq hierarchical domain */
- bank->fwnode = of_node_to_fwnode(np);
+ if (pctl->domain) {
+ /* create irq hierarchical domain */
+ bank->fwnode = of_node_to_fwnode(np);
- bank->domain = irq_domain_create_hierarchy(pctl->domain, 0,
- STM32_GPIO_IRQ_LINE, bank->fwnode,
- &stm32_gpio_domain_ops, bank);
+ bank->domain = irq_domain_create_hierarchy(pctl->domain, 0, STM32_GPIO_IRQ_LINE,
+ bank->fwnode, &stm32_gpio_domain_ops,
+ bank);
- if (!bank->domain)
- return -ENODEV;
+ if (!bank->domain)
+ return -ENODEV;
+ }
err = gpiochip_add_data(&bank->gpio_chip, bank);
if (err) {
@@ -1462,6 +1483,8 @@
pctl->domain = stm32_pctrl_get_irq_domain(np);
if (IS_ERR(pctl->domain))
return PTR_ERR(pctl->domain);
+ if (!pctl->domain)
+ dev_warn(dev, "pinctrl without interrupt support\n");
/* hwspinlock is optional */
hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
index 21054fc..18088f6 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
@@ -98,7 +98,7 @@
static struct platform_driver a100_r_pinctrl_driver = {
.probe = a100_r_pinctrl_probe,
.driver = {
- .name = "sun50iw10p1-r-pinctrl",
+ .name = "sun50i-a100-r-pinctrl",
.of_match_table = a100_r_pinctrl_match,
},
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
index 4557e18..12c40f9 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
@@ -105,6 +105,7 @@
.npins = ARRAY_SIZE(sun50i_h6_r_pins),
.pin_base = PL_BASE,
.irq_banks = 2,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_SEL,
};
static int sun50i_h6_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
index 4ada803..b5c1a8f 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
@@ -158,26 +158,26 @@
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand"), /* DQ6 */
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ6 */
SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand"), /* DQ7 */
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ7 */
SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand"), /* DQS */
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQS */
SUNXI_FUNCTION(0x3, "mmc2")), /* RST */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 17),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand")), /* CE2 */
+ SUNXI_FUNCTION(0x2, "nand0")), /* CE2 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 18),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand")), /* CE3 */
+ SUNXI_FUNCTION(0x2, "nand0")), /* CE3 */
/* Hole */
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
SUNXI_FUNCTION(0x0, "gpio_in"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c b/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c
index 2801ca7..68a5b62 100644
--- a/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c
+++ b/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c
@@ -204,7 +204,7 @@
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd"), /* D20 */
- SUNXI_FUNCTION(0x3, "lvds1"), /* RX */
+ SUNXI_FUNCTION(0x3, "uart2"), /* RX */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
SUNXI_FUNCTION(0x0, "gpio_in"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index be7f4f9..e4b41cc 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -544,6 +544,8 @@
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
int i;
+ pin -= pctl->desc->pin_base;
+
for (i = 0; i < num_configs; i++) {
enum pin_config_param param;
unsigned long flags;
@@ -622,7 +624,7 @@
unsigned pin,
struct regulator *supply)
{
- unsigned short bank = pin / PINS_PER_BANK;
+ unsigned short bank;
unsigned long flags;
u32 val, reg;
int uV;
@@ -638,6 +640,9 @@
if (uV == 0)
return 0;
+ pin -= pctl->desc->pin_base;
+ bank = pin / PINS_PER_BANK;
+
switch (pctl->desc->io_bias_cfg_variant) {
case BIAS_VOLTAGE_GRP_CONFIG:
/*
@@ -655,8 +660,6 @@
else
val = 0xD; /* 3.3V */
- pin -= pctl->desc->pin_base;
-
reg = readl(pctl->membase + sunxi_grp_config_reg(pin));
reg &= ~IO_BIAS_MASK;
writel(reg | val, pctl->membase + sunxi_grp_config_reg(pin));
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index f901d2e..88cbc43 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -2,6 +2,7 @@
# tell define_trace.h where to find the cros ec trace header
CFLAGS_cros_ec_trace.o:= -I$(src)
+CFLAGS_cros_ec_sensorhub_ring.o:= -I$(src)
obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
@@ -20,7 +21,7 @@
obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o
obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o
obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o
-cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o cros_ec_trace.o
+cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o
obj-$(CONFIG_CROS_EC_SENSORHUB) += cros-ec-sensorhub.o
obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o
obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 472a03d..109c191 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -718,6 +718,7 @@
chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop,
const struct chromeos_laptop *src)
{
+ struct i2c_peripheral *i2c_peripherals;
struct i2c_peripheral *i2c_dev;
struct i2c_board_info *info;
int i;
@@ -726,17 +727,15 @@
if (!src->num_i2c_peripherals)
return 0;
- cros_laptop->i2c_peripherals = kmemdup(src->i2c_peripherals,
- src->num_i2c_peripherals *
- sizeof(*src->i2c_peripherals),
- GFP_KERNEL);
- if (!cros_laptop->i2c_peripherals)
+ i2c_peripherals = kmemdup(src->i2c_peripherals,
+ src->num_i2c_peripherals *
+ sizeof(*src->i2c_peripherals),
+ GFP_KERNEL);
+ if (!i2c_peripherals)
return -ENOMEM;
- cros_laptop->num_i2c_peripherals = src->num_i2c_peripherals;
-
- for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
- i2c_dev = &cros_laptop->i2c_peripherals[i];
+ for (i = 0; i < src->num_i2c_peripherals; i++) {
+ i2c_dev = &i2c_peripherals[i];
info = &i2c_dev->board_info;
error = chromeos_laptop_setup_irq(i2c_dev);
@@ -754,16 +753,19 @@
}
}
+ cros_laptop->i2c_peripherals = i2c_peripherals;
+ cros_laptop->num_i2c_peripherals = src->num_i2c_peripherals;
+
return 0;
err_out:
while (--i >= 0) {
- i2c_dev = &cros_laptop->i2c_peripherals[i];
+ i2c_dev = &i2c_peripherals[i];
info = &i2c_dev->board_info;
if (info->properties)
property_entries_free(info->properties);
}
- kfree(cros_laptop->i2c_peripherals);
+ kfree(i2c_peripherals);
return error;
}
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index 3104680..5a62266 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -121,16 +121,16 @@
buf.msg.command = EC_CMD_HOST_SLEEP_EVENT;
ret = cros_ec_cmd_xfer_status(ec_dev, &buf.msg);
-
- /* For now, report failure to transition to S0ix with a warning. */
+ /* Report failure to transition to system wide suspend with a warning. */
if (ret >= 0 && ec_dev->host_sleep_v1 &&
- (sleep_event == HOST_SLEEP_EVENT_S0IX_RESUME)) {
+ (sleep_event == HOST_SLEEP_EVENT_S0IX_RESUME ||
+ sleep_event == HOST_SLEEP_EVENT_S3_RESUME)) {
ec_dev->last_resume_result =
buf.u.resp1.resume_response.sleep_transitions;
WARN_ONCE(buf.u.resp1.resume_response.sleep_transitions &
EC_HOST_RESUME_SLEEP_TIMEOUT,
- "EC detected sleep transition timeout. Total slp_s0 transitions: %d",
+ "EC detected sleep transition timeout. Total sleep transitions: %d",
buf.u.resp1.resume_response.sleep_transitions &
EC_HOST_RESUME_SLEEP_TRANSITIONS_MASK);
}
@@ -175,6 +175,8 @@
ec_dev->max_request = sizeof(struct ec_params_hello);
ec_dev->max_response = sizeof(struct ec_response_get_protocol_info);
ec_dev->max_passthru = 0;
+ ec_dev->ec = NULL;
+ ec_dev->pd = NULL;
ec_dev->din = devm_kzalloc(dev, ec_dev->din_size, GFP_KERNEL);
if (!ec_dev->din)
@@ -231,18 +233,16 @@
if (IS_ERR(ec_dev->pd)) {
dev_err(ec_dev->dev,
"Failed to create CrOS PD platform device\n");
- platform_device_unregister(ec_dev->ec);
- return PTR_ERR(ec_dev->pd);
+ err = PTR_ERR(ec_dev->pd);
+ goto exit;
}
}
if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
err = devm_of_platform_populate(dev);
if (err) {
- platform_device_unregister(ec_dev->pd);
- platform_device_unregister(ec_dev->ec);
dev_err(dev, "Failed to register sub-devices\n");
- return err;
+ goto exit;
}
}
@@ -264,12 +264,16 @@
err = blocking_notifier_chain_register(&ec_dev->event_notifier,
&ec_dev->notifier_ready);
if (err)
- return err;
+ goto exit;
}
dev_info(dev, "Chrome EC device registered\n");
return 0;
+exit:
+ platform_device_unregister(ec_dev->ec);
+ platform_device_unregister(ec_dev->pd);
+ return err;
}
EXPORT_SYMBOL(cros_ec_register);
@@ -328,10 +332,16 @@
static void cros_ec_report_events_during_suspend(struct cros_ec_device *ec_dev)
{
+ bool wake_event;
+
while (ec_dev->mkbp_event_supported &&
- cros_ec_get_next_event(ec_dev, NULL, NULL) > 0)
+ cros_ec_get_next_event(ec_dev, &wake_event, NULL) > 0) {
blocking_notifier_call_chain(&ec_dev->event_notifier,
1, ec_dev);
+
+ if (wake_event && device_may_wakeup(ec_dev->dev))
+ pm_wakeup_event(ec_dev->dev, 0);
+ }
}
/**
diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c
index e0bce86..0de7c25 100644
--- a/drivers/platform/chrome/cros_ec_chardev.c
+++ b/drivers/platform/chrome/cros_ec_chardev.c
@@ -301,7 +301,7 @@
}
s_cmd->command += ec->cmd_offset;
- ret = cros_ec_cmd_xfer_status(ec->ec_dev, s_cmd);
+ ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
/* Only copy data to userland if data was received. */
if (ret < 0)
goto exit;
@@ -327,6 +327,9 @@
if (copy_from_user(&s_mem, arg, sizeof(s_mem)))
return -EFAULT;
+ if (s_mem.bytes > sizeof(s_mem.buffer))
+ return -EINVAL;
+
num = ec_dev->cmd_readmem(ec_dev, s_mem.offset, s_mem.bytes,
s_mem.buffer);
if (num <= 0)
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 272c898..0dbceee 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -25,6 +25,9 @@
#define CIRC_ADD(idx, size, value) (((idx) + (value)) & ((size) - 1))
+/* waitqueue for log readers */
+static DECLARE_WAIT_QUEUE_HEAD(cros_ec_debugfs_log_wq);
+
/**
* struct cros_ec_debugfs - EC debugging information.
*
@@ -33,7 +36,6 @@
* @log_buffer: circular buffer for console log information
* @read_msg: preallocated EC command and buffer to read console log
* @log_mutex: mutex to protect circular buffer
- * @log_wq: waitqueue for log readers
* @log_poll_work: recurring task to poll EC for new console log data
* @panicinfo_blob: panicinfo debugfs blob
*/
@@ -44,7 +46,6 @@
struct circ_buf log_buffer;
struct cros_ec_command *read_msg;
struct mutex log_mutex;
- wait_queue_head_t log_wq;
struct delayed_work log_poll_work;
/* EC panicinfo */
struct debugfs_blob_wrapper panicinfo_blob;
@@ -107,7 +108,7 @@
buf_space--;
}
- wake_up(&debug_info->log_wq);
+ wake_up(&cros_ec_debugfs_log_wq);
}
mutex_unlock(&debug_info->log_mutex);
@@ -141,7 +142,7 @@
mutex_unlock(&debug_info->log_mutex);
- ret = wait_event_interruptible(debug_info->log_wq,
+ ret = wait_event_interruptible(cros_ec_debugfs_log_wq,
CIRC_CNT(cb->head, cb->tail, LOG_SIZE));
if (ret < 0)
return ret;
@@ -173,7 +174,7 @@
struct cros_ec_debugfs *debug_info = file->private_data;
__poll_t mask = 0;
- poll_wait(file, &debug_info->log_wq, wait);
+ poll_wait(file, &cros_ec_debugfs_log_wq, wait);
mutex_lock(&debug_info->log_mutex);
if (CIRC_CNT(debug_info->log_buffer.head,
@@ -377,7 +378,6 @@
debug_info->log_buffer.tail = 0;
mutex_init(&debug_info->log_mutex);
- init_waitqueue_head(&debug_info->log_wq);
debugfs_create_file("console_log", S_IFREG | 0444, debug_info->dir,
debug_info, &cros_ec_console_log_fops);
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 9f698a7..8ffbf92 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -507,13 +507,13 @@
ret = cros_ec_get_host_command_version_mask(ec_dev,
EC_CMD_GET_NEXT_EVENT,
&ver_mask);
- if (ret < 0 || ver_mask == 0)
+ if (ret < 0 || ver_mask == 0) {
ec_dev->mkbp_event_supported = 0;
- else
+ } else {
ec_dev->mkbp_event_supported = fls(ver_mask);
- dev_dbg(ec_dev->dev, "MKBP support version %u\n",
- ec_dev->mkbp_event_supported - 1);
+ dev_dbg(ec_dev->dev, "MKBP support version %u\n", ec_dev->mkbp_event_supported - 1);
+ }
/* Probe if host sleep v1 is supported for S0ix failure detection. */
ret = cros_ec_get_host_command_version_mask(ec_dev,
@@ -560,22 +560,28 @@
EXPORT_SYMBOL(cros_ec_query_all);
/**
- * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC.
+ * cros_ec_cmd_xfer() - Send a command to the ChromeOS EC.
* @ec_dev: EC device.
* @msg: Message to write.
*
- * Call this to send a command to the ChromeOS EC. This should be used instead of calling the EC's
- * cmd_xfer() callback directly. It returns success status only if both the command was transmitted
- * successfully and the EC replied with success status.
+ * Call this to send a command to the ChromeOS EC. This should be used instead
+ * of calling the EC's cmd_xfer() callback directly. This function does not
+ * convert EC command execution error codes to Linux error codes. Most
+ * in-kernel users will want to use cros_ec_cmd_xfer_status() instead since
+ * that function implements the conversion.
*
* Return:
- * >=0 - The number of bytes transferred
- * <0 - Linux error code
+ * >0 - EC command was executed successfully. The return value is the number
+ * of bytes returned by the EC (excluding the header).
+ * =0 - EC communication was successful. EC command execution results are
+ * reported in msg->result. The result will be EC_RES_SUCCESS if the
+ * command was executed successfully or report an EC command execution
+ * error.
+ * <0 - EC communication error. Return value is the Linux error code.
*/
-int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg)
+int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
{
- int ret, mapped;
+ int ret;
mutex_lock(&ec_dev->lock);
if (ec_dev->proto_version == EC_PROTO_VERSION_UNKNOWN) {
@@ -616,6 +622,32 @@
ret = send_command(ec_dev, msg);
mutex_unlock(&ec_dev->lock);
+ return ret;
+}
+EXPORT_SYMBOL(cros_ec_cmd_xfer);
+
+/**
+ * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC.
+ * @ec_dev: EC device.
+ * @msg: Message to write.
+ *
+ * Call this to send a command to the ChromeOS EC. This should be used instead of calling the EC's
+ * cmd_xfer() callback directly. It returns success status only if both the command was transmitted
+ * successfully and the EC replied with success status.
+ *
+ * Return:
+ * >=0 - The number of bytes transferred.
+ * <0 - Linux error code
+ */
+int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
+{
+ int ret, mapped;
+
+ ret = cros_ec_cmd_xfer(ec_dev, msg);
+ if (ret < 0)
+ return ret;
+
mapped = cros_ec_map_error(msg->result);
if (mapped) {
dev_dbg(ec_dev->dev, "Command result (err: %d [%d])\n",
@@ -716,6 +748,7 @@
u8 event_type;
u32 host_event;
int ret;
+ u32 ver_mask;
/*
* Default value for wake_event.
@@ -737,6 +770,37 @@
return get_keyboard_state_event(ec_dev);
ret = get_next_event(ec_dev);
+ /*
+ * -ENOPROTOOPT is returned when EC returns EC_RES_INVALID_VERSION.
+ * This can occur when EC based device (e.g. Fingerprint MCU) jumps to
+ * the RO image which doesn't support newer version of the command. In
+ * this case we will attempt to update maximum supported version of the
+ * EC_CMD_GET_NEXT_EVENT.
+ */
+ if (ret == -ENOPROTOOPT) {
+ dev_dbg(ec_dev->dev,
+ "GET_NEXT_EVENT returned invalid version error.\n");
+ ret = cros_ec_get_host_command_version_mask(ec_dev,
+ EC_CMD_GET_NEXT_EVENT,
+ &ver_mask);
+ if (ret < 0 || ver_mask == 0)
+ /*
+ * Do not change the MKBP supported version if we can't
+ * obtain supported version correctly. Please note that
+ * calling EC_CMD_GET_NEXT_EVENT returned
+ * EC_RES_INVALID_VERSION which means that the command
+ * is present.
+ */
+ return -ENOPROTOOPT;
+
+ ec_dev->mkbp_event_supported = fls(ver_mask);
+ dev_dbg(ec_dev->dev, "MKBP support version changed to %u\n",
+ ec_dev->mkbp_event_supported - 1);
+
+ /* Try to get next event with new MKBP support version set. */
+ ret = get_next_event(ec_dev);
+ }
+
if (ret <= 0)
return ret;
diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
index 98e3708..71948da 100644
--- a/drivers/platform/chrome/cros_ec_sensorhub_ring.c
+++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
@@ -17,7 +17,8 @@
#include <linux/sort.h>
#include <linux/slab.h>
-#include "cros_ec_trace.h"
+#define CREATE_TRACE_POINTS
+#include "cros_ec_sensorhub_trace.h"
/* Precision of fixed point for the m values from the filter */
#define M_PRECISION BIT(23)
diff --git a/drivers/platform/chrome/cros_ec_sensorhub_trace.h b/drivers/platform/chrome/cros_ec_sensorhub_trace.h
new file mode 100644
index 0000000..57d9b47
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_sensorhub_trace.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Trace events for the ChromeOS Sensorhub kernel module
+ *
+ * Copyright 2021 Google LLC.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cros_ec
+
+#if !defined(_CROS_EC_SENSORHUB_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _CROS_EC_SENSORHUB_TRACE_H_
+
+#include <linux/types.h>
+#include <linux/platform_data/cros_ec_sensorhub.h>
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(cros_ec_sensorhub_timestamp,
+ TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp,
+ s64 current_timestamp, s64 current_time),
+ TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp,
+ current_time),
+ TP_STRUCT__entry(
+ __field(u32, ec_sample_timestamp)
+ __field(u32, ec_fifo_timestamp)
+ __field(s64, fifo_timestamp)
+ __field(s64, current_timestamp)
+ __field(s64, current_time)
+ __field(s64, delta)
+ ),
+ TP_fast_assign(
+ __entry->ec_sample_timestamp = ec_sample_timestamp;
+ __entry->ec_fifo_timestamp = ec_fifo_timestamp;
+ __entry->fifo_timestamp = fifo_timestamp;
+ __entry->current_timestamp = current_timestamp;
+ __entry->current_time = current_time;
+ __entry->delta = current_timestamp - current_time;
+ ),
+ TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
+ __entry->ec_sample_timestamp,
+ __entry->ec_fifo_timestamp,
+ __entry->fifo_timestamp,
+ __entry->current_timestamp,
+ __entry->current_time,
+ __entry->delta
+ )
+);
+
+TRACE_EVENT(cros_ec_sensorhub_data,
+ TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp,
+ s64 current_timestamp, s64 current_time),
+ TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time),
+ TP_STRUCT__entry(
+ __field(u32, ec_sensor_num)
+ __field(u32, ec_fifo_timestamp)
+ __field(s64, fifo_timestamp)
+ __field(s64, current_timestamp)
+ __field(s64, current_time)
+ __field(s64, delta)
+ ),
+ TP_fast_assign(
+ __entry->ec_sensor_num = ec_sensor_num;
+ __entry->ec_fifo_timestamp = ec_fifo_timestamp;
+ __entry->fifo_timestamp = fifo_timestamp;
+ __entry->current_timestamp = current_timestamp;
+ __entry->current_time = current_time;
+ __entry->delta = current_timestamp - current_time;
+ ),
+ TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
+ __entry->ec_sensor_num,
+ __entry->ec_fifo_timestamp,
+ __entry->fifo_timestamp,
+ __entry->current_timestamp,
+ __entry->current_time,
+ __entry->delta
+ )
+);
+
+TRACE_EVENT(cros_ec_sensorhub_filter,
+ TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy),
+ TP_ARGS(state, dx, dy),
+ TP_STRUCT__entry(
+ __field(s64, dx)
+ __field(s64, dy)
+ __field(s64, median_m)
+ __field(s64, median_error)
+ __field(s64, history_len)
+ __field(s64, x)
+ __field(s64, y)
+ ),
+ TP_fast_assign(
+ __entry->dx = dx;
+ __entry->dy = dy;
+ __entry->median_m = state->median_m;
+ __entry->median_error = state->median_error;
+ __entry->history_len = state->history_len;
+ __entry->x = state->x_offset;
+ __entry->y = state->y_offset;
+ ),
+ TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld",
+ __entry->dx,
+ __entry->dy,
+ __entry->median_m,
+ __entry->median_error,
+ __entry->history_len,
+ __entry->x,
+ __entry->y
+ )
+);
+
+
+#endif /* _CROS_EC_SENSORHUB_TRACE_H_ */
+
+/* this part must be outside header guard */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cros_ec_sensorhub_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/platform/chrome/cros_ec_trace.h b/drivers/platform/chrome/cros_ec_trace.h
index 7e7cfc9..9bb5cd2 100644
--- a/drivers/platform/chrome/cros_ec_trace.h
+++ b/drivers/platform/chrome/cros_ec_trace.h
@@ -15,7 +15,6 @@
#include <linux/types.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
-#include <linux/platform_data/cros_ec_sensorhub.h>
#include <linux/tracepoint.h>
@@ -71,100 +70,6 @@
__entry->retval)
);
-TRACE_EVENT(cros_ec_sensorhub_timestamp,
- TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp,
- s64 current_timestamp, s64 current_time),
- TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp,
- current_time),
- TP_STRUCT__entry(
- __field(u32, ec_sample_timestamp)
- __field(u32, ec_fifo_timestamp)
- __field(s64, fifo_timestamp)
- __field(s64, current_timestamp)
- __field(s64, current_time)
- __field(s64, delta)
- ),
- TP_fast_assign(
- __entry->ec_sample_timestamp = ec_sample_timestamp;
- __entry->ec_fifo_timestamp = ec_fifo_timestamp;
- __entry->fifo_timestamp = fifo_timestamp;
- __entry->current_timestamp = current_timestamp;
- __entry->current_time = current_time;
- __entry->delta = current_timestamp - current_time;
- ),
- TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
- __entry->ec_sample_timestamp,
- __entry->ec_fifo_timestamp,
- __entry->fifo_timestamp,
- __entry->current_timestamp,
- __entry->current_time,
- __entry->delta
- )
-);
-
-TRACE_EVENT(cros_ec_sensorhub_data,
- TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp,
- s64 current_timestamp, s64 current_time),
- TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time),
- TP_STRUCT__entry(
- __field(u32, ec_sensor_num)
- __field(u32, ec_fifo_timestamp)
- __field(s64, fifo_timestamp)
- __field(s64, current_timestamp)
- __field(s64, current_time)
- __field(s64, delta)
- ),
- TP_fast_assign(
- __entry->ec_sensor_num = ec_sensor_num;
- __entry->ec_fifo_timestamp = ec_fifo_timestamp;
- __entry->fifo_timestamp = fifo_timestamp;
- __entry->current_timestamp = current_timestamp;
- __entry->current_time = current_time;
- __entry->delta = current_timestamp - current_time;
- ),
- TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
- __entry->ec_sensor_num,
- __entry->ec_fifo_timestamp,
- __entry->fifo_timestamp,
- __entry->current_timestamp,
- __entry->current_time,
- __entry->delta
- )
-);
-
-TRACE_EVENT(cros_ec_sensorhub_filter,
- TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy),
- TP_ARGS(state, dx, dy),
- TP_STRUCT__entry(
- __field(s64, dx)
- __field(s64, dy)
- __field(s64, median_m)
- __field(s64, median_error)
- __field(s64, history_len)
- __field(s64, x)
- __field(s64, y)
- ),
- TP_fast_assign(
- __entry->dx = dx;
- __entry->dy = dy;
- __entry->median_m = state->median_m;
- __entry->median_error = state->median_error;
- __entry->history_len = state->history_len;
- __entry->x = state->x_offset;
- __entry->y = state->y_offset;
- ),
- TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld",
- __entry->dx,
- __entry->dy,
- __entry->median_m,
- __entry->median_error,
- __entry->history_len,
- __entry->x,
- __entry->y
- )
-);
-
-
#endif /* _CROS_EC_TRACE_H_ */
/* this part must be outside header guard */
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 036d54d..cc33645 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -712,7 +712,13 @@
return -ENOMEM;
typec->dev = dev;
+
typec->ec = dev_get_drvdata(pdev->dev.parent);
+ if (!typec->ec) {
+ dev_err(dev, "couldn't find parent EC device\n");
+ return -ENODEV;
+ }
+
platform_set_drvdata(pdev, typec);
ret = cros_typec_get_cmd_version(typec);
diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig
index 8ac1491..495da33 100644
--- a/drivers/platform/mips/Kconfig
+++ b/drivers/platform/mips/Kconfig
@@ -17,7 +17,7 @@
if MIPS_PLATFORM_DEVICES
config CPU_HWMON
- tristate "Loongson-3 CPU HWMon Driver"
+ bool "Loongson-3 CPU HWMon Driver"
depends on MACH_LOONGSON64
select HWMON
default y
diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
index 386389f..d8c5f91 100644
--- a/drivers/platform/mips/cpu_hwmon.c
+++ b/drivers/platform/mips/cpu_hwmon.c
@@ -55,55 +55,6 @@
static int nr_packages;
static struct device *cpu_hwmon_dev;
-static SENSOR_DEVICE_ATTR(name, 0444, NULL, NULL, 0);
-
-static struct attribute *cpu_hwmon_attributes[] = {
- &sensor_dev_attr_name.dev_attr.attr,
- NULL
-};
-
-/* Hwmon device attribute group */
-static struct attribute_group cpu_hwmon_attribute_group = {
- .attrs = cpu_hwmon_attributes,
-};
-
-static ssize_t get_cpu_temp(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t cpu_temp_label(struct device *dev,
- struct device_attribute *attr, char *buf);
-
-static SENSOR_DEVICE_ATTR(temp1_input, 0444, get_cpu_temp, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp1_label, 0444, cpu_temp_label, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_input, 0444, get_cpu_temp, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp2_label, 0444, cpu_temp_label, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp3_input, 0444, get_cpu_temp, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp3_label, 0444, cpu_temp_label, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp4_input, 0444, get_cpu_temp, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp4_label, 0444, cpu_temp_label, NULL, 4);
-
-static const struct attribute *hwmon_cputemp[4][3] = {
- {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_label.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp2_label.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp3_label.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_temp4_input.dev_attr.attr,
- &sensor_dev_attr_temp4_label.dev_attr.attr,
- NULL
- }
-};
-
static ssize_t cpu_temp_label(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -121,23 +72,46 @@
return sprintf(buf, "%d\n", value);
}
-static int create_sysfs_cputemp_files(struct kobject *kobj)
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, get_cpu_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_label, 0444, cpu_temp_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_input, 0444, get_cpu_temp, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_label, 0444, cpu_temp_label, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp3_input, 0444, get_cpu_temp, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp3_label, 0444, cpu_temp_label, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp4_input, 0444, get_cpu_temp, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp4_label, 0444, cpu_temp_label, NULL, 4);
+
+static struct attribute *cpu_hwmon_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_label.dev_attr.attr,
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_label.dev_attr.attr,
+ &sensor_dev_attr_temp4_input.dev_attr.attr,
+ &sensor_dev_attr_temp4_label.dev_attr.attr,
+ NULL
+};
+
+static umode_t cpu_hwmon_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
{
- int i, ret = 0;
+ int id = i / 2;
- for (i = 0; i < nr_packages; i++)
- ret = sysfs_create_files(kobj, hwmon_cputemp[i]);
-
- return ret;
+ if (id < nr_packages)
+ return attr->mode;
+ return 0;
}
-static void remove_sysfs_cputemp_files(struct kobject *kobj)
-{
- int i;
+static struct attribute_group cpu_hwmon_group = {
+ .attrs = cpu_hwmon_attributes,
+ .is_visible = cpu_hwmon_is_visible,
+};
- for (i = 0; i < nr_packages; i++)
- sysfs_remove_files(kobj, hwmon_cputemp[i]);
-}
+static const struct attribute_group *cpu_hwmon_groups[] = {
+ &cpu_hwmon_group,
+ NULL
+};
#define CPU_THERMAL_THRESHOLD 90000
static struct delayed_work thermal_work;
@@ -159,50 +133,31 @@
static int __init loongson_hwmon_init(void)
{
- int ret;
-
pr_info("Loongson Hwmon Enter...\n");
if (cpu_has_csr())
csr_temp_enable = csr_readl(LOONGSON_CSR_FEATURES) &
LOONGSON_CSRF_TEMP;
- cpu_hwmon_dev = hwmon_device_register_with_info(NULL, "cpu_hwmon", NULL, NULL, NULL);
- if (IS_ERR(cpu_hwmon_dev)) {
- ret = PTR_ERR(cpu_hwmon_dev);
- pr_err("hwmon_device_register fail!\n");
- goto fail_hwmon_device_register;
- }
-
nr_packages = loongson_sysconf.nr_cpus /
loongson_sysconf.cores_per_package;
- ret = create_sysfs_cputemp_files(&cpu_hwmon_dev->kobj);
- if (ret) {
- pr_err("fail to create cpu temperature interface!\n");
- goto fail_create_sysfs_cputemp_files;
+ cpu_hwmon_dev = hwmon_device_register_with_groups(NULL, "cpu_hwmon",
+ NULL, cpu_hwmon_groups);
+ if (IS_ERR(cpu_hwmon_dev)) {
+ pr_err("hwmon_device_register fail!\n");
+ return PTR_ERR(cpu_hwmon_dev);
}
INIT_DEFERRABLE_WORK(&thermal_work, do_thermal_timer);
schedule_delayed_work(&thermal_work, msecs_to_jiffies(20000));
- return ret;
-
-fail_create_sysfs_cputemp_files:
- sysfs_remove_group(&cpu_hwmon_dev->kobj,
- &cpu_hwmon_attribute_group);
- hwmon_device_unregister(cpu_hwmon_dev);
-
-fail_hwmon_device_register:
- return ret;
+ return 0;
}
static void __exit loongson_hwmon_exit(void)
{
cancel_delayed_work_sync(&thermal_work);
- remove_sysfs_cputemp_files(&cpu_hwmon_dev->kobj);
- sysfs_remove_group(&cpu_hwmon_dev->kobj,
- &cpu_hwmon_attribute_group);
hwmon_device_unregister(cpu_hwmon_dev);
}
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
index 2db7113..89d9fca 100644
--- a/drivers/platform/olpc/olpc-ec.c
+++ b/drivers/platform/olpc/olpc-ec.c
@@ -265,7 +265,7 @@
int i, m;
unsigned char ec_cmd[EC_MAX_CMD_ARGS];
unsigned int ec_cmd_int[EC_MAX_CMD_ARGS];
- char cmdbuf[64];
+ char cmdbuf[64] = "";
int ec_cmd_bytes;
mutex_lock(&ec_dbgfs_lock);
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 80983f9..ebec499 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -93,6 +93,7 @@
{KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */
{KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */
{KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */
+ {KE_KEY, 0x27, {KEY_HELP} },
{KE_KEY, 0x29, {KEY_PROG3} }, /* P_Key for TM8372 */
{KE_IGNORE, 0x41, {KEY_MUTE} },
{KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} },
@@ -106,7 +107,13 @@
{KE_IGNORE, 0x48, {KEY_VOLUMEUP} },
{KE_IGNORE, 0x49, {KEY_VOLUMEDOWN} },
{KE_IGNORE, 0x4a, {KEY_VOLUMEDOWN} },
- {KE_IGNORE, 0x61, {KEY_SWITCHVIDEOMODE} },
+ /*
+ * 0x61 is KEY_SWITCHVIDEOMODE. Usually this is a duplicate input event
+ * with the "Video Bus" input device events. But sometimes it is not
+ * a dup. Map it to KEY_UNKNOWN instead of using KE_IGNORE so that
+ * udev/hwdb can override it on systems where it is not a dup.
+ */
+ {KE_KEY, 0x61, {KEY_UNKNOWN} },
{KE_IGNORE, 0x62, {KEY_BRIGHTNESSUP} },
{KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} },
{KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
@@ -531,6 +538,15 @@
},
{
.callback = set_force_caps,
+ .ident = "Acer Aspire Switch V 10 SW5-017",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SW5-017"),
+ },
+ .driver_data = (void *)ACER_CAP_KBD_DOCK,
+ },
+ {
+ .callback = set_force_caps,
.ident = "Acer One 10 (S1003)",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 39e1a63..db369cf 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -1212,6 +1212,8 @@
pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
cpu_to_le32(ports_available));
+ pci_dev_put(xhci_pdev);
+
pr_info("set USB_INTEL_XUSB2PR old: 0x%04x, new: 0x%04x\n",
orig_ports_available, ports_available);
}
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index e94e592..6642d09 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -62,6 +62,8 @@
HPWMI_BACKLIT_KB_BRIGHTNESS = 0x0D,
HPWMI_PEAKSHIFT_PERIOD = 0x0F,
HPWMI_BATTERY_CHARGE_PERIOD = 0x10,
+ HPWMI_SANITIZATION_MODE = 0x17,
+ HPWMI_SMART_EXPERIENCE_APP = 0x21,
};
struct bios_args {
@@ -629,6 +631,10 @@
break;
case HPWMI_BATTERY_CHARGE_PERIOD:
break;
+ case HPWMI_SANITIZATION_MODE:
+ break;
+ case HPWMI_SMART_EXPERIENCE_APP:
+ break;
default:
pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
break;
@@ -897,8 +903,16 @@
wwan_rfkill = NULL;
rfkill2_count = 0;
- if (hp_wmi_rfkill_setup(device))
- hp_wmi_rfkill2_setup(device);
+ /*
+ * In pre-2009 BIOS, command 1Bh return 0x4 to indicate that
+ * BIOS no longer controls the power for the wireless
+ * devices. All features supported by this command will no
+ * longer be supported.
+ */
+ if (!hp_wmi_bios_2009_later()) {
+ if (hp_wmi_rfkill_setup(device))
+ hp_wmi_rfkill2_setup(device);
+ }
thermal_policy_setup(device);
diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c
index a2d846c..eac3e6b 100644
--- a/drivers/platform/x86/huawei-wmi.c
+++ b/drivers/platform/x86/huawei-wmi.c
@@ -470,10 +470,17 @@
static int huawei_wmi_battery_add(struct power_supply *battery)
{
- device_create_file(&battery->dev, &dev_attr_charge_control_start_threshold);
- device_create_file(&battery->dev, &dev_attr_charge_control_end_threshold);
+ int err = 0;
- return 0;
+ err = device_create_file(&battery->dev, &dev_attr_charge_control_start_threshold);
+ if (err)
+ return err;
+
+ err = device_create_file(&battery->dev, &dev_attr_charge_control_end_threshold);
+ if (err)
+ device_remove_file(&battery->dev, &dev_attr_charge_control_start_threshold);
+
+ return err;
}
static int huawei_wmi_battery_remove(struct power_supply *battery)
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index 8a0cd5b..cebddef 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -93,6 +93,13 @@
DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"),
},
},
+ {
+ .ident = "Microsoft Surface Go 3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
+ },
+ },
{ }
};
diff --git a/drivers/platform/x86/intel_pmc_core_pltdrv.c b/drivers/platform/x86/intel_pmc_core_pltdrv.c
index 15ca8af..ddfba38 100644
--- a/drivers/platform/x86/intel_pmc_core_pltdrv.c
+++ b/drivers/platform/x86/intel_pmc_core_pltdrv.c
@@ -18,6 +18,8 @@
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <xen/xen.h>
+
static void intel_pmc_core_release(struct device *dev)
{
kfree(dev);
@@ -53,6 +55,13 @@
if (acpi_dev_present("INT33A1", NULL, -1))
return -ENODEV;
+ /*
+ * Skip forcefully attaching the device for VMs. Make an exception for
+ * Xen dom0, which does have full hardware access.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR) && !xen_initial_domain())
+ return -ENODEV;
+
if (!x86_match_cpu(intel_pmc_core_platform_ids))
return -ENODEV;
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 24ffc8e..0e804b6 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -596,11 +596,10 @@
{
.ident = "MSI S270",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-1013"),
DMI_MATCH(DMI_PRODUCT_VERSION, "0131"),
- DMI_MATCH(DMI_CHASSIS_VENDOR,
- "MICRO-STAR INT'L CO.,LTD")
+ DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT")
},
.driver_data = &quirk_old_ec_model,
.callback = dmi_check_cb
@@ -633,8 +632,7 @@
DMI_MATCH(DMI_SYS_VENDOR, "NOTEBOOK"),
DMI_MATCH(DMI_PRODUCT_NAME, "SAM2000"),
DMI_MATCH(DMI_PRODUCT_VERSION, "0131"),
- DMI_MATCH(DMI_CHASSIS_VENDOR,
- "MICRO-STAR INT'L CO.,LTD")
+ DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT")
},
.driver_data = &quirk_old_ec_model,
.callback = dmi_check_cb
@@ -1048,8 +1046,7 @@
return -EINVAL;
/* Register backlight stuff */
-
- if (quirks->old_ec_model ||
+ if (quirks->old_ec_model &&
acpi_video_get_backlight_type() == acpi_backlight_vendor) {
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
@@ -1117,6 +1114,8 @@
fail_create_group:
if (quirks->load_scm_model) {
i8042_remove_filter(msi_laptop_i8042_filter);
+ cancel_delayed_work_sync(&msi_touchpad_dwork);
+ input_unregister_device(msi_laptop_input_dev);
cancel_delayed_work_sync(&msi_rfkill_dwork);
cancel_work_sync(&msi_rfkill_work);
rfkill_cleanup();
@@ -1137,6 +1136,7 @@
{
if (quirks->load_scm_model) {
i8042_remove_filter(msi_laptop_i8042_filter);
+ cancel_delayed_work_sync(&msi_touchpad_dwork);
input_unregister_device(msi_laptop_input_dev);
cancel_delayed_work_sync(&msi_rfkill_dwork);
cancel_work_sync(&msi_rfkill_work);
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index a9d2a4b..4b0739f 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -244,7 +244,7 @@
pm1_cnt_port = acpi_base_addr + PM1_CNT;
pm1_cnt_value = inl(pm1_cnt_port);
- pm1_cnt_value &= SLEEP_TYPE_MASK;
+ pm1_cnt_value &= ~SLEEP_TYPE_MASK;
pm1_cnt_value |= SLEEP_TYPE_S5;
pm1_cnt_value |= SLEEP_ENABLE;
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index d5cec6e..0e456c3 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -1121,8 +1121,6 @@
if (value > samsung->kbd_led.max_brightness)
value = samsung->kbd_led.max_brightness;
- else if (value < 0)
- value = 0;
samsung->kbd_led_wk = value;
queue_work(samsung->led_workqueue, &samsung->kbd_led_work);
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index ab6a936..110ff1e 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -756,6 +756,22 @@
.properties = predia_basic_props,
};
+static const struct property_entry rca_cambio_w101_v2_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 20),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1644),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 874),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rca-cambio-w101-v2.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data rca_cambio_w101_v2_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = rca_cambio_w101_v2_props,
+};
+
static const struct property_entry rwc_nanote_p8_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-y", 46),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
@@ -1342,6 +1358,15 @@
},
},
{
+ /* RCA Cambio W101 v2 */
+ /* https://github.com/onitake/gsl-firmware/discussions/193 */
+ .driver_data = (void *)&rca_cambio_w101_v2_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "RCA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "W101SA23T1"),
+ },
+ },
+ {
/* RWC NANOTE P8 */
.driver_data = (void *)&rwc_nanote_p8_data,
.matches = {
diff --git a/drivers/power/reset/arm-versatile-reboot.c b/drivers/power/reset/arm-versatile-reboot.c
index 08d0a07..c7624d7 100644
--- a/drivers/power/reset/arm-versatile-reboot.c
+++ b/drivers/power/reset/arm-versatile-reboot.c
@@ -146,6 +146,7 @@
versatile_reboot_type = (enum versatile_reboot)reboot_id->data;
syscon_regmap = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(syscon_regmap))
return PTR_ERR(syscon_regmap);
diff --git a/drivers/power/reset/gemini-poweroff.c b/drivers/power/reset/gemini-poweroff.c
index 90e35c0..b7f7a82 100644
--- a/drivers/power/reset/gemini-poweroff.c
+++ b/drivers/power/reset/gemini-poweroff.c
@@ -107,8 +107,8 @@
return PTR_ERR(gpw->base);
irq = platform_get_irq(pdev, 0);
- if (!irq)
- return -EINVAL;
+ if (irq < 0)
+ return irq;
gpw->dev = dev;
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index f1da757..a6b4a94 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -2541,8 +2541,10 @@
ret = kobject_init_and_add(&di->fg_kobject,
&ab8500_fg_ktype,
NULL, "battery");
- if (ret < 0)
+ if (ret < 0) {
+ kobject_put(&di->fg_kobject);
dev_err(di->dev, "failed to create sysfs entry\n");
+ }
return ret;
}
diff --git a/drivers/power/supply/adp5061.c b/drivers/power/supply/adp5061.c
index 0035570..daee116 100644
--- a/drivers/power/supply/adp5061.c
+++ b/drivers/power/supply/adp5061.c
@@ -427,11 +427,11 @@
if (ret < 0)
return ret;
- chg_type = adp5061_chg_type[ADP5061_CHG_STATUS_1_CHG_STATUS(status1)];
- if (chg_type > ADP5061_CHG_FAST_CV)
+ chg_type = ADP5061_CHG_STATUS_1_CHG_STATUS(status1);
+ if (chg_type >= ARRAY_SIZE(adp5061_chg_type))
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
else
- val->intval = chg_type;
+ val->intval = adp5061_chg_type[chg_type];
return ret;
}
diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c
index e84b6e4..9fda98b 100644
--- a/drivers/power/supply/axp20x_battery.c
+++ b/drivers/power/supply/axp20x_battery.c
@@ -185,7 +185,6 @@
union power_supply_propval *val)
{
struct axp20x_batt_ps *axp20x_batt = power_supply_get_drvdata(psy);
- struct iio_channel *chan;
int ret = 0, reg, val1;
switch (psp) {
@@ -265,12 +264,12 @@
if (ret)
return ret;
- if (reg & AXP20X_PWR_STATUS_BAT_CHARGING)
- chan = axp20x_batt->batt_chrg_i;
- else
- chan = axp20x_batt->batt_dischrg_i;
-
- ret = iio_read_channel_processed(chan, &val->intval);
+ if (reg & AXP20X_PWR_STATUS_BAT_CHARGING) {
+ ret = iio_read_channel_processed(axp20x_batt->batt_chrg_i, &val->intval);
+ } else {
+ ret = iio_read_channel_processed(axp20x_batt->batt_dischrg_i, &val1);
+ val->intval = -val1;
+ }
if (ret)
return ret;
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index a4df1ea..f65bf7b 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -41,11 +41,11 @@
#define VBUS_ISPOUT_CUR_LIM_1500MA 0x1 /* 1500mA */
#define VBUS_ISPOUT_CUR_LIM_2000MA 0x2 /* 2000mA */
#define VBUS_ISPOUT_CUR_NO_LIM 0x3 /* 2500mA */
-#define VBUS_ISPOUT_VHOLD_SET_MASK 0x31
+#define VBUS_ISPOUT_VHOLD_SET_MASK 0x38
#define VBUS_ISPOUT_VHOLD_SET_BIT_POS 0x3
#define VBUS_ISPOUT_VHOLD_SET_OFFSET 4000 /* 4000mV */
#define VBUS_ISPOUT_VHOLD_SET_LSB_RES 100 /* 100mV */
-#define VBUS_ISPOUT_VHOLD_SET_4300MV 0x3 /* 4300mV */
+#define VBUS_ISPOUT_VHOLD_SET_4400MV 0x4 /* 4400mV */
#define VBUS_ISPOUT_VBUS_PATH_DIS BIT(7)
#define CHRG_CCCV_CC_MASK 0xf /* 4 bits */
@@ -744,6 +744,16 @@
ret = axp288_charger_vbus_path_select(info, true);
if (ret < 0)
return ret;
+ } else {
+ /* Set Vhold to the factory default / recommended 4.4V */
+ val = VBUS_ISPOUT_VHOLD_SET_4400MV << VBUS_ISPOUT_VHOLD_SET_BIT_POS;
+ ret = regmap_update_bits(info->regmap, AXP20X_VBUS_IPSOUT_MGMT,
+ VBUS_ISPOUT_VHOLD_SET_MASK, val);
+ if (ret < 0) {
+ dev_err(&info->pdev->dev, "register(%x) write error(%d)\n",
+ AXP20X_VBUS_IPSOUT_MGMT, ret);
+ return ret;
+ }
}
/* Read current charge voltage and current limit */
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index 845af0f..8c3c378 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -41,6 +41,7 @@
#define BQ24190_REG_POC_CHG_CONFIG_DISABLE 0x0
#define BQ24190_REG_POC_CHG_CONFIG_CHARGE 0x1
#define BQ24190_REG_POC_CHG_CONFIG_OTG 0x2
+#define BQ24190_REG_POC_CHG_CONFIG_OTG_ALT 0x3
#define BQ24190_REG_POC_SYS_MIN_MASK (BIT(3) | BIT(2) | BIT(1))
#define BQ24190_REG_POC_SYS_MIN_SHIFT 1
#define BQ24190_REG_POC_SYS_MIN_MIN 3000
@@ -552,7 +553,11 @@
pm_runtime_mark_last_busy(bdi->dev);
pm_runtime_put_autosuspend(bdi->dev);
- return ret ? ret : val == BQ24190_REG_POC_CHG_CONFIG_OTG;
+ if (ret)
+ return ret;
+
+ return (val == BQ24190_REG_POC_CHG_CONFIG_OTG ||
+ val == BQ24190_REG_POC_CHG_CONFIG_OTG_ALT);
}
static const struct regulator_ops bq24190_vbus_ops = {
diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c
index e05cee4..908cfd4 100644
--- a/drivers/power/supply/wm8350_power.c
+++ b/drivers/power/supply/wm8350_power.c
@@ -408,44 +408,112 @@
* Initialisation
*********************************************************************/
-static void wm8350_init_charger(struct wm8350 *wm8350)
+static int wm8350_init_charger(struct wm8350 *wm8350)
{
+ int ret;
+
/* register our interest in charger events */
- wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT,
+ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT,
wm8350_charger_handler, 0, "Battery hot", wm8350);
- wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD,
+ if (ret)
+ goto err;
+
+ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD,
wm8350_charger_handler, 0, "Battery cold", wm8350);
- wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL,
+ if (ret)
+ goto free_chg_bat_hot;
+
+ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL,
wm8350_charger_handler, 0, "Battery fail", wm8350);
- wm8350_register_irq(wm8350, WM8350_IRQ_CHG_TO,
+ if (ret)
+ goto free_chg_bat_cold;
+
+ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_TO,
wm8350_charger_handler, 0,
"Charger timeout", wm8350);
- wm8350_register_irq(wm8350, WM8350_IRQ_CHG_END,
+ if (ret)
+ goto free_chg_bat_fail;
+
+ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_END,
wm8350_charger_handler, 0,
"Charge end", wm8350);
- wm8350_register_irq(wm8350, WM8350_IRQ_CHG_START,
+ if (ret)
+ goto free_chg_to;
+
+ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_START,
wm8350_charger_handler, 0,
"Charge start", wm8350);
- wm8350_register_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY,
+ if (ret)
+ goto free_chg_end;
+
+ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY,
wm8350_charger_handler, 0,
"Fast charge ready", wm8350);
- wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9,
+ if (ret)
+ goto free_chg_start;
+
+ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9,
wm8350_charger_handler, 0,
"Battery <3.9V", wm8350);
- wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1,
+ if (ret)
+ goto free_chg_fast_rdy;
+
+ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1,
wm8350_charger_handler, 0,
"Battery <3.1V", wm8350);
- wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85,
+ if (ret)
+ goto free_chg_vbatt_lt_3p9;
+
+ ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85,
wm8350_charger_handler, 0,
"Battery <2.85V", wm8350);
+ if (ret)
+ goto free_chg_vbatt_lt_3p1;
/* and supply change events */
- wm8350_register_irq(wm8350, WM8350_IRQ_EXT_USB_FB,
+ ret = wm8350_register_irq(wm8350, WM8350_IRQ_EXT_USB_FB,
wm8350_charger_handler, 0, "USB", wm8350);
- wm8350_register_irq(wm8350, WM8350_IRQ_EXT_WALL_FB,
+ if (ret)
+ goto free_chg_vbatt_lt_2p85;
+
+ ret = wm8350_register_irq(wm8350, WM8350_IRQ_EXT_WALL_FB,
wm8350_charger_handler, 0, "Wall", wm8350);
- wm8350_register_irq(wm8350, WM8350_IRQ_EXT_BAT_FB,
+ if (ret)
+ goto free_ext_usb_fb;
+
+ ret = wm8350_register_irq(wm8350, WM8350_IRQ_EXT_BAT_FB,
wm8350_charger_handler, 0, "Battery", wm8350);
+ if (ret)
+ goto free_ext_wall_fb;
+
+ return 0;
+
+free_ext_wall_fb:
+ wm8350_free_irq(wm8350, WM8350_IRQ_EXT_WALL_FB, wm8350);
+free_ext_usb_fb:
+ wm8350_free_irq(wm8350, WM8350_IRQ_EXT_USB_FB, wm8350);
+free_chg_vbatt_lt_2p85:
+ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85, wm8350);
+free_chg_vbatt_lt_3p1:
+ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1, wm8350);
+free_chg_vbatt_lt_3p9:
+ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9, wm8350);
+free_chg_fast_rdy:
+ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY, wm8350);
+free_chg_start:
+ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_START, wm8350);
+free_chg_end:
+ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_END, wm8350);
+free_chg_to:
+ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_TO, wm8350);
+free_chg_bat_fail:
+ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL, wm8350);
+free_chg_bat_cold:
+ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD, wm8350);
+free_chg_bat_hot:
+ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT, wm8350);
+err:
+ return ret;
}
static void free_charger_irq(struct wm8350 *wm8350)
@@ -456,6 +524,7 @@
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_TO, wm8350);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_END, wm8350);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_START, wm8350);
+ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY, wm8350);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9, wm8350);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1, wm8350);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85, wm8350);
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 70d6d52..285420c 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -938,6 +938,9 @@
y = value & 0x1f;
value = (1 << y) * (4 + f) * rp->time_unit / 4;
} else {
+ if (value < rp->time_unit)
+ return 0;
+
do_div(value, rp->time_unit);
y = ilog2(value);
f = div64_u64(4 * (value - (1 << y)), 1 << y);
@@ -979,7 +982,6 @@
.check_unit = rapl_check_unit_core,
.set_floor_freq = set_floor_freq_default,
.compute_time_window = rapl_compute_time_window_core,
- .dram_domain_energy_unit = 15300,
.psys_domain_energy_unit = 1000000000,
};
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index be076a9..8cd59e8 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -13,7 +13,7 @@
struct device_attribute *attr, char *page)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
- return snprintf(page, PAGE_SIZE-1, "%s\n", ptp->info->name);
+ return sysfs_emit(page, "%s\n", ptp->info->name);
}
static DEVICE_ATTR_RO(clock_name);
@@ -227,7 +227,7 @@
mutex_unlock(&ptp->pincfg_mux);
- return snprintf(page, PAGE_SIZE, "%u %u\n", func, chan);
+ return sysfs_emit(page, "%u %u\n", func, chan);
}
static ssize_t ptp_pin_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index bf3f14f..05e4120 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -125,6 +125,7 @@
if (err)
return err;
+ duty_ns = min(duty_ns, period_ns);
val = (u8)(duty_ns * LP3943_MAX_DUTY / period_ns);
return lp3943_write_byte(lp3943, reg_duty, val);
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index 5ff1114..f32a9e0 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -325,7 +325,6 @@
{
struct lpc18xx_pwm_chip *lpc18xx_pwm;
struct pwm_device *pwm;
- struct resource *res;
int ret, i;
u64 val;
@@ -336,8 +335,7 @@
lpc18xx_pwm->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lpc18xx_pwm->base = devm_ioremap_resource(&pdev->dev, res);
+ lpc18xx_pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lpc18xx_pwm->base))
return PTR_ERR(lpc18xx_pwm->base);
@@ -400,12 +398,6 @@
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_LIMIT,
BIT(lpc18xx_pwm->period_event));
- ret = pwmchip_add(&lpc18xx_pwm->chip);
- if (ret < 0) {
- dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret);
- goto disable_pwmclk;
- }
-
for (i = 0; i < lpc18xx_pwm->chip.npwm; i++) {
struct lpc18xx_pwm_data *data;
@@ -415,14 +407,12 @@
GFP_KERNEL);
if (!data) {
ret = -ENOMEM;
- goto remove_pwmchip;
+ goto disable_pwmclk;
}
pwm_set_chip_data(pwm, data);
}
- platform_set_drvdata(pdev, lpc18xx_pwm);
-
val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL);
val &= ~LPC18XX_PWM_BIDIR;
val &= ~LPC18XX_PWM_CTRL_HALT;
@@ -430,10 +420,16 @@
val |= LPC18XX_PWM_PRE(0);
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL, val);
+ ret = pwmchip_add(&lpc18xx_pwm->chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret);
+ goto disable_pwmclk;
+ }
+
+ platform_set_drvdata(pdev, lpc18xx_pwm);
+
return 0;
-remove_pwmchip:
- pwmchip_remove(&lpc18xx_pwm->chip);
disable_pwmclk:
clk_disable_unprepare(lpc18xx_pwm->pwm_clk);
return ret;
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index 2485fba..9cc0612 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -23,7 +23,7 @@
#define PWM_SIFIVE_PWMCFG 0x0
#define PWM_SIFIVE_PWMCOUNT 0x8
#define PWM_SIFIVE_PWMS 0x10
-#define PWM_SIFIVE_PWMCMP0 0x20
+#define PWM_SIFIVE_PWMCMP(i) (0x20 + 4 * (i))
/* PWMCFG fields */
#define PWM_SIFIVE_PWMCFG_SCALE GENMASK(3, 0)
@@ -36,8 +36,6 @@
#define PWM_SIFIVE_PWMCFG_GANG BIT(24)
#define PWM_SIFIVE_PWMCFG_IP BIT(28)
-/* PWM_SIFIVE_SIZE_PWMCMP is used to calculate offset for pwmcmpX registers */
-#define PWM_SIFIVE_SIZE_PWMCMP 4
#define PWM_SIFIVE_CMPWIDTH 16
#define PWM_SIFIVE_DEFAULT_PERIOD 10000000
@@ -112,8 +110,7 @@
struct pwm_sifive_ddata *ddata = pwm_sifive_chip_to_ddata(chip);
u32 duty, val;
- duty = readl(ddata->regs + PWM_SIFIVE_PWMCMP0 +
- pwm->hwpwm * PWM_SIFIVE_SIZE_PWMCMP);
+ duty = readl(ddata->regs + PWM_SIFIVE_PWMCMP(pwm->hwpwm));
state->enabled = duty > 0;
@@ -194,8 +191,7 @@
pwm_sifive_update_clock(ddata, clk_get_rate(ddata->clk));
}
- writel(frac, ddata->regs + PWM_SIFIVE_PWMCMP0 +
- pwm->hwpwm * PWM_SIFIVE_SIZE_PWMCMP);
+ writel(frac, ddata->regs + PWM_SIFIVE_PWMCMP(pwm->hwpwm));
if (state->enabled != enabled)
pwm_sifive_enable(chip, state->enabled);
@@ -234,6 +230,8 @@
struct pwm_chip *chip;
struct resource *res;
int ret;
+ u32 val;
+ unsigned int enabled_pwms = 0, enabled_clks = 1;
ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
@@ -264,6 +262,33 @@
return ret;
}
+ val = readl(ddata->regs + PWM_SIFIVE_PWMCFG);
+ if (val & PWM_SIFIVE_PWMCFG_EN_ALWAYS) {
+ unsigned int i;
+
+ for (i = 0; i < chip->npwm; ++i) {
+ val = readl(ddata->regs + PWM_SIFIVE_PWMCMP(i));
+ if (val > 0)
+ ++enabled_pwms;
+ }
+ }
+
+ /* The clk should be on once for each running PWM. */
+ if (enabled_pwms) {
+ while (enabled_clks < enabled_pwms) {
+ /* This is not expected to fail as the clk is already on */
+ ret = clk_enable(ddata->clk);
+ if (unlikely(ret)) {
+ dev_err_probe(dev, ret, "Failed to enable clk\n");
+ goto disable_clk;
+ }
+ ++enabled_clks;
+ }
+ } else {
+ clk_disable(ddata->clk);
+ enabled_clks = 0;
+ }
+
/* Watch for changes to underlying clock frequency */
ddata->notifier.notifier_call = pwm_sifive_clock_notifier;
ret = clk_notifier_register(ddata->clk, &ddata->notifier);
@@ -286,7 +311,11 @@
unregister_clk:
clk_notifier_unregister(ddata->clk, &ddata->notifier);
disable_clk:
- clk_disable_unprepare(ddata->clk);
+ while (enabled_clks) {
+ clk_disable(ddata->clk);
+ --enabled_clks;
+ }
+ clk_unprepare(ddata->clk);
return ret;
}
@@ -294,25 +323,21 @@
static int pwm_sifive_remove(struct platform_device *dev)
{
struct pwm_sifive_ddata *ddata = platform_get_drvdata(dev);
- bool is_enabled = false;
struct pwm_device *pwm;
- int ret, ch;
+ int ch;
+
+ pwmchip_remove(&ddata->chip);
+ clk_notifier_unregister(ddata->clk, &ddata->notifier);
for (ch = 0; ch < ddata->chip.npwm; ch++) {
pwm = &ddata->chip.pwms[ch];
- if (pwm->state.enabled) {
- is_enabled = true;
- break;
- }
+ if (pwm->state.enabled)
+ clk_disable(ddata->clk);
}
- if (is_enabled)
- clk_disable(ddata->clk);
- clk_disable_unprepare(ddata->clk);
- ret = pwmchip_remove(&ddata->chip);
- clk_notifier_unregister(ddata->clk, &ddata->notifier);
+ clk_unprepare(ddata->clk);
- return ret;
+ return 0;
}
static const struct of_device_id pwm_sifive_of_match[] = {
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 2c48e55..eb083b2 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2027,10 +2027,13 @@
rdev->exclusive = 1;
ret = _regulator_is_enabled(rdev);
- if (ret > 0)
+ if (ret > 0) {
rdev->use_count = 1;
- else
+ regulator->enable_count = 1;
+ } else {
rdev->use_count = 0;
+ regulator->enable_count = 0;
+ }
}
link = device_link_add(dev, &rdev->dev, DL_FLAG_STATELESS);
@@ -2541,7 +2544,7 @@
* expired, return -ETIMEDOUT.
*/
if (rdev->desc->poll_enabled_time) {
- unsigned int time_remaining = delay;
+ int time_remaining = delay;
while (time_remaining > 0) {
_regulator_enable_delay(rdev->desc->poll_enabled_time);
@@ -2593,13 +2596,18 @@
*/
static int _regulator_handle_consumer_enable(struct regulator *regulator)
{
+ int ret;
struct regulator_dev *rdev = regulator->rdev;
lockdep_assert_held_once(&rdev->mutex.base);
regulator->enable_count++;
- if (regulator->uA_load && regulator->enable_count == 1)
- return drms_uA_update(rdev);
+ if (regulator->uA_load && regulator->enable_count == 1) {
+ ret = drms_uA_update(rdev);
+ if (ret)
+ regulator->enable_count--;
+ return ret;
+ }
return 0;
}
@@ -4920,6 +4928,7 @@
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
+ debugfs_remove_recursive(rdev->debugfs);
kfree(rdev->constraints);
of_node_put(rdev->dev.of_node);
kfree(rdev);
@@ -5393,11 +5402,15 @@
mutex_lock(®ulator_list_mutex);
regulator_ena_gpio_free(rdev);
mutex_unlock(®ulator_list_mutex);
+ put_device(&rdev->dev);
+ rdev = NULL;
clean:
if (dangling_of_gpiod)
gpiod_put(config->ena_gpiod);
+ if (rdev && rdev->dev.of_node)
+ of_node_put(rdev->dev.of_node);
+ kfree(rdev);
kfree(config);
- put_device(&rdev->dev);
rinse:
if (dangling_cfg_gpiod)
gpiod_put(cfg->ena_gpiod);
@@ -5426,7 +5439,6 @@
mutex_lock(®ulator_list_mutex);
- debugfs_remove_recursive(rdev->debugfs);
WARN_ON(rdev->open_count);
regulator_remove_coupling(rdev);
unset_regulator_supplies(rdev);
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 06c0b15..5d84469 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -206,8 +206,12 @@
}
suspend_np = of_get_child_by_name(np, regulator_states[i]);
- if (!suspend_np || !suspend_state)
+ if (!suspend_np)
continue;
+ if (!suspend_state) {
+ of_node_put(suspend_np);
+ continue;
+ }
if (!of_property_read_u32(suspend_np, "regulator-mode",
&pval)) {
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 01a12cf..44a8e50 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -531,6 +531,7 @@
parent = of_get_child_by_name(np, "regulators");
if (!parent) {
dev_err(dev, "regulators node not found\n");
+ of_node_put(np);
return -EINVAL;
}
@@ -560,6 +561,7 @@
}
of_node_put(parent);
+ of_node_put(np);
if (ret < 0) {
dev_err(dev, "Error parsing regulator init data: %d\n",
ret);
@@ -789,7 +791,7 @@
((pfuze_chip->chip_id == PFUZE3000) ? "3000" : "3001"))));
memcpy(pfuze_chip->regulator_descs, pfuze_chip->pfuze_regulators,
- sizeof(pfuze_chip->regulator_descs));
+ regulator_num * sizeof(struct pfuze_regulator));
ret = pfuze_parse_regulators_dt(pfuze_chip);
if (ret)
diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c
index 7f9d66a..3c41b71 100644
--- a/drivers/regulator/qcom_rpm-regulator.c
+++ b/drivers/regulator/qcom_rpm-regulator.c
@@ -802,6 +802,12 @@
};
static const struct rpm_regulator_data rpm_pm8058_regulators[] = {
+ { "s0", QCOM_RPM_PM8058_SMPS0, &pm8058_smps, "vdd_s0" },
+ { "s1", QCOM_RPM_PM8058_SMPS1, &pm8058_smps, "vdd_s1" },
+ { "s2", QCOM_RPM_PM8058_SMPS2, &pm8058_smps, "vdd_s2" },
+ { "s3", QCOM_RPM_PM8058_SMPS3, &pm8058_smps, "vdd_s3" },
+ { "s4", QCOM_RPM_PM8058_SMPS4, &pm8058_smps, "vdd_s4" },
+
{ "l0", QCOM_RPM_PM8058_LDO0, &pm8058_nldo, "vdd_l0_l1_lvs" },
{ "l1", QCOM_RPM_PM8058_LDO1, &pm8058_nldo, "vdd_l0_l1_lvs" },
{ "l2", QCOM_RPM_PM8058_LDO2, &pm8058_pldo, "vdd_l2_l11_l12" },
@@ -829,12 +835,6 @@
{ "l24", QCOM_RPM_PM8058_LDO24, &pm8058_nldo, "vdd_l23_l24_l25" },
{ "l25", QCOM_RPM_PM8058_LDO25, &pm8058_nldo, "vdd_l23_l24_l25" },
- { "s0", QCOM_RPM_PM8058_SMPS0, &pm8058_smps, "vdd_s0" },
- { "s1", QCOM_RPM_PM8058_SMPS1, &pm8058_smps, "vdd_s1" },
- { "s2", QCOM_RPM_PM8058_SMPS2, &pm8058_smps, "vdd_s2" },
- { "s3", QCOM_RPM_PM8058_SMPS3, &pm8058_smps, "vdd_s3" },
- { "s4", QCOM_RPM_PM8058_SMPS4, &pm8058_smps, "vdd_s4" },
-
{ "lvs0", QCOM_RPM_PM8058_LVS0, &pm8058_switch, "vdd_l0_l1_lvs" },
{ "lvs1", QCOM_RPM_PM8058_LVS1, &pm8058_switch, "vdd_l0_l1_lvs" },
@@ -843,6 +843,12 @@
};
static const struct rpm_regulator_data rpm_pm8901_regulators[] = {
+ { "s0", QCOM_RPM_PM8901_SMPS0, &pm8901_ftsmps, "vdd_s0" },
+ { "s1", QCOM_RPM_PM8901_SMPS1, &pm8901_ftsmps, "vdd_s1" },
+ { "s2", QCOM_RPM_PM8901_SMPS2, &pm8901_ftsmps, "vdd_s2" },
+ { "s3", QCOM_RPM_PM8901_SMPS3, &pm8901_ftsmps, "vdd_s3" },
+ { "s4", QCOM_RPM_PM8901_SMPS4, &pm8901_ftsmps, "vdd_s4" },
+
{ "l0", QCOM_RPM_PM8901_LDO0, &pm8901_nldo, "vdd_l0" },
{ "l1", QCOM_RPM_PM8901_LDO1, &pm8901_pldo, "vdd_l1" },
{ "l2", QCOM_RPM_PM8901_LDO2, &pm8901_pldo, "vdd_l2" },
@@ -851,12 +857,6 @@
{ "l5", QCOM_RPM_PM8901_LDO5, &pm8901_pldo, "vdd_l5" },
{ "l6", QCOM_RPM_PM8901_LDO6, &pm8901_pldo, "vdd_l6" },
- { "s0", QCOM_RPM_PM8901_SMPS0, &pm8901_ftsmps, "vdd_s0" },
- { "s1", QCOM_RPM_PM8901_SMPS1, &pm8901_ftsmps, "vdd_s1" },
- { "s2", QCOM_RPM_PM8901_SMPS2, &pm8901_ftsmps, "vdd_s2" },
- { "s3", QCOM_RPM_PM8901_SMPS3, &pm8901_ftsmps, "vdd_s3" },
- { "s4", QCOM_RPM_PM8901_SMPS4, &pm8901_ftsmps, "vdd_s4" },
-
{ "lvs0", QCOM_RPM_PM8901_LVS0, &pm8901_switch, "lvs0_in" },
{ "lvs1", QCOM_RPM_PM8901_LVS1, &pm8901_switch, "lvs1_in" },
{ "lvs2", QCOM_RPM_PM8901_LVS2, &pm8901_switch, "lvs2_in" },
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 03e146e..0295d7b 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -313,10 +313,10 @@
static const struct regulator_desc pm8916_pldo = {
.linear_ranges = (struct linear_range[]) {
- REGULATOR_LINEAR_RANGE(750000, 0, 208, 12500),
+ REGULATOR_LINEAR_RANGE(1750000, 0, 127, 12500),
},
.n_linear_ranges = 1,
- .n_voltages = 209,
+ .n_voltages = 128,
.ops = &rpm_smps_ldo_ops,
};
@@ -844,32 +844,31 @@
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8950_hfsmps, "vdd_s2" },
{ "s3", QCOM_SMD_RPM_SMPA, 3, &pm8950_hfsmps, "vdd_s3" },
{ "s4", QCOM_SMD_RPM_SMPA, 4, &pm8950_hfsmps, "vdd_s4" },
- { "s5", QCOM_SMD_RPM_SMPA, 5, &pm8950_ftsmps2p5, "vdd_s5" },
+ /* S5 is managed via SPMI. */
{ "s6", QCOM_SMD_RPM_SMPA, 6, &pm8950_hfsmps, "vdd_s6" },
{ "l1", QCOM_SMD_RPM_LDOA, 1, &pm8950_ult_nldo, "vdd_l1_l19" },
{ "l2", QCOM_SMD_RPM_LDOA, 2, &pm8950_ult_nldo, "vdd_l2_l23" },
{ "l3", QCOM_SMD_RPM_LDOA, 3, &pm8950_ult_nldo, "vdd_l3" },
- { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16" },
- { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8950_pldo_lv, "vdd_l4_l5_l6_l7_l16" },
- { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8950_pldo_lv, "vdd_l4_l5_l6_l7_l16" },
- { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8950_pldo_lv, "vdd_l4_l5_l6_l7_l16" },
+ /* L4 seems not to exist. */
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8950_pldo_lv, "vdd_l5_l6_l7_l16" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8950_pldo_lv, "vdd_l5_l6_l7_l16" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8950_pldo_lv, "vdd_l5_l6_l7_l16" },
{ "l8", QCOM_SMD_RPM_LDOA, 8, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
{ "l9", QCOM_SMD_RPM_LDOA, 9, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
{ "l10", QCOM_SMD_RPM_LDOA, 10, &pm8950_ult_nldo, "vdd_l9_l10_l13_l14_l15_l18"},
- { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22"},
- { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22"},
- { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"},
- { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"},
- { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"},
- { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16"},
- { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22"},
- { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"},
- { "l19", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l1_l19"},
- { "l20", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l20"},
- { "l21", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l21"},
- { "l22", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l8_l11_l12_l17_l22"},
- { "l23", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l2_l23"},
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8950_ult_pldo, "vdd_l5_l6_l7_l16" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
+ /* L18 seems not to exist. */
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pm8950_pldo, "vdd_l1_l19" },
+ /* L20 & L21 seem not to exist. */
+ { "l22", QCOM_SMD_RPM_LDOA, 22, &pm8950_pldo, "vdd_l8_l11_l12_l17_l22" },
+ { "l23", QCOM_SMD_RPM_LDOA, 23, &pm8950_pldo, "vdd_l2_l23" },
{}
};
@@ -1185,8 +1184,10 @@
for_each_available_child_of_node(dev->of_node, node) {
vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
- if (!vreg)
+ if (!vreg) {
+ of_node_put(node);
return -ENOMEM;
+ }
ret = rpm_regulator_init_vreg(vreg, dev, node, rpm, vreg_data);
diff --git a/drivers/regulator/rpi-panel-attiny-regulator.c b/drivers/regulator/rpi-panel-attiny-regulator.c
index ee46bfb..991b473 100644
--- a/drivers/regulator/rpi-panel-attiny-regulator.c
+++ b/drivers/regulator/rpi-panel-attiny-regulator.c
@@ -37,11 +37,24 @@
static int attiny_lcd_power_enable(struct regulator_dev *rdev)
{
unsigned int data;
+ int ret, i;
regmap_write(rdev->regmap, REG_POWERON, 1);
+ msleep(80);
+
/* Wait for nPWRDWN to go low to indicate poweron is done. */
- regmap_read_poll_timeout(rdev->regmap, REG_PORTB, data,
- data & BIT(0), 10, 1000000);
+ for (i = 0; i < 20; i++) {
+ ret = regmap_read(rdev->regmap, REG_PORTB, &data);
+ if (!ret) {
+ if (data & BIT(0))
+ break;
+ }
+ usleep_range(10000, 12000);
+ }
+ usleep_range(10000, 12000);
+
+ if (ret)
+ pr_err("%s: regmap_read_poll_timeout failed %d\n", __func__, ret);
/* Default to the same orientation as the closed source
* firmware used for the panel. Runtime rotation
@@ -57,23 +70,34 @@
{
regmap_write(rdev->regmap, REG_PWM, 0);
regmap_write(rdev->regmap, REG_POWERON, 0);
- udelay(1);
+ msleep(30);
return 0;
}
static int attiny_lcd_power_is_enabled(struct regulator_dev *rdev)
{
unsigned int data;
- int ret;
+ int ret, i;
- ret = regmap_read(rdev->regmap, REG_POWERON, &data);
+ for (i = 0; i < 10; i++) {
+ ret = regmap_read(rdev->regmap, REG_POWERON, &data);
+ if (!ret)
+ break;
+ usleep_range(10000, 12000);
+ }
if (ret < 0)
return ret;
if (!(data & BIT(0)))
return 0;
- ret = regmap_read(rdev->regmap, REG_PORTB, &data);
+ for (i = 0; i < 10; i++) {
+ ret = regmap_read(rdev->regmap, REG_PORTB, &data);
+ if (!ret)
+ break;
+ usleep_range(10000, 12000);
+ }
+
if (ret < 0)
return ret;
@@ -103,20 +127,32 @@
{
struct regmap *regmap = bl_get_data(bl);
int brightness = bl->props.brightness;
+ int ret, i;
if (bl->props.power != FB_BLANK_UNBLANK ||
bl->props.fb_blank != FB_BLANK_UNBLANK)
brightness = 0;
- return regmap_write(regmap, REG_PWM, brightness);
+ for (i = 0; i < 10; i++) {
+ ret = regmap_write(regmap, REG_PWM, brightness);
+ if (!ret)
+ break;
+ }
+
+ return ret;
}
static int attiny_get_brightness(struct backlight_device *bl)
{
struct regmap *regmap = bl_get_data(bl);
- int ret, brightness;
+ int ret, brightness, i;
- ret = regmap_read(regmap, REG_PWM, &brightness);
+ for (i = 0; i < 10; i++) {
+ ret = regmap_read(regmap, REG_PWM, &brightness);
+ if (!ret)
+ break;
+ }
+
if (ret)
return ret;
@@ -166,7 +202,7 @@
}
regmap_write(regmap, REG_POWERON, 0);
- mdelay(1);
+ msleep(30);
config.dev = &i2c->dev;
config.regmap = regmap;
diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c
index 430265c..7c7e364 100644
--- a/drivers/regulator/twl6030-regulator.c
+++ b/drivers/regulator/twl6030-regulator.c
@@ -530,6 +530,7 @@
#define TWL6032_ADJUSTABLE_LDO(label, offset) \
static const struct twlreg_info TWL6032_INFO_##label = { \
.base = offset, \
+ .features = TWL6032_SUBCLASS, \
.desc = { \
.name = #label, \
.id = TWL6032_REG_##label, \
@@ -562,6 +563,7 @@
#define TWL6032_ADJUSTABLE_SMPS(label, offset) \
static const struct twlreg_info TWLSMPS_INFO_##label = { \
.base = offset, \
+ .features = TWL6032_SUBCLASS, \
.desc = { \
.name = #label, \
.id = TWL6032_REG_##label, \
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index cadea03..40befdd 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -82,6 +82,35 @@
.min_uV = 2400000,
.uV_step = 100000,
.enable_time = 3000,
+ .off_on_delay = 36000,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "LDO2",
+ .id = 2,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = WM8994_LDO2_MAX_SELECTOR + 1,
+ .vsel_reg = WM8994_LDO_2,
+ .vsel_mask = WM8994_LDO2_VSEL_MASK,
+ .ops = &wm8994_ldo2_ops,
+ .enable_time = 3000,
+ .off_on_delay = 36000,
+ .owner = THIS_MODULE,
+ },
+};
+
+static const struct regulator_desc wm8958_ldo_desc[] = {
+ {
+ .name = "LDO1",
+ .id = 1,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = WM8994_LDO1_MAX_SELECTOR + 1,
+ .vsel_reg = WM8994_LDO_1,
+ .vsel_mask = WM8994_LDO1_VSEL_MASK,
+ .ops = &wm8994_ldo1_ops,
+ .min_uV = 2400000,
+ .uV_step = 100000,
+ .enable_time = 3000,
.owner = THIS_MODULE,
},
{
@@ -172,9 +201,16 @@
* regulator core and we need not worry about it on the
* error path.
*/
- ldo->regulator = devm_regulator_register(&pdev->dev,
- &wm8994_ldo_desc[id],
- &config);
+ if (ldo->wm8994->type == WM8994) {
+ ldo->regulator = devm_regulator_register(&pdev->dev,
+ &wm8994_ldo_desc[id],
+ &config);
+ } else {
+ ldo->regulator = devm_regulator_register(&pdev->dev,
+ &wm8958_ldo_desc[id],
+ &config);
+ }
+
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index 9eb5997..c39138d 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -406,6 +406,7 @@
}
ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
if (ret)
return ret;
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index ebc3e75..1b3aa84 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -1594,18 +1594,20 @@
* reserved memory regions from device's memory-region property.
*/
child = of_get_child_by_name(qproc->dev->of_node, "mba");
- if (!child)
+ if (!child) {
node = of_parse_phandle(qproc->dev->of_node,
"memory-region", 0);
- else
+ } else {
node = of_parse_phandle(child, "memory-region", 0);
+ of_node_put(child);
+ }
ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
if (ret) {
dev_err(qproc->dev, "unable to resolve mba region\n");
return ret;
}
- of_node_put(node);
qproc->mba_phys = r.start;
qproc->mba_size = resource_size(&r);
@@ -1622,14 +1624,15 @@
} else {
child = of_get_child_by_name(qproc->dev->of_node, "mpss");
node = of_parse_phandle(child, "memory-region", 0);
+ of_node_put(child);
}
ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
if (ret) {
dev_err(qproc->dev, "unable to resolve mpss region\n");
return ret;
}
- of_node_put(node);
qproc->mpss_phys = qproc->mpss_reloc = r.start;
qproc->mpss_size = resource_size(&r);
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
index b37b111..a26221a 100644
--- a/drivers/remoteproc/qcom_sysmon.c
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -41,6 +41,7 @@
struct completion comp;
struct completion ind_comp;
struct completion shutdown_comp;
+ struct completion ssctl_comp;
struct mutex lock;
bool ssr_ack;
@@ -422,6 +423,8 @@
svc->priv = sysmon;
+ complete(&sysmon->ssctl_comp);
+
return 0;
}
@@ -478,6 +481,7 @@
.ssr_event = SSCTL_SSR_EVENT_AFTER_POWERUP
};
+ reinit_completion(&sysmon->ssctl_comp);
mutex_lock(&sysmon->state_lock);
sysmon->state = SSCTL_SSR_EVENT_AFTER_POWERUP;
blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
@@ -520,6 +524,11 @@
if (crashed)
return;
+ if (sysmon->ssctl_instance) {
+ if (!wait_for_completion_timeout(&sysmon->ssctl_comp, HZ / 2))
+ dev_err(sysmon->dev, "timeout waiting for ssctl service\n");
+ }
+
if (sysmon->ssctl_version)
ssctl_request_shutdown(sysmon);
else if (sysmon->ept)
@@ -606,6 +615,7 @@
init_completion(&sysmon->comp);
init_completion(&sysmon->ind_comp);
init_completion(&sysmon->shutdown_comp);
+ init_completion(&sysmon->ssctl_comp);
mutex_init(&sysmon->lock);
mutex_init(&sysmon->state_lock);
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index e2573f7..572f7b8 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -415,6 +415,7 @@
irq_handler_t thread_fn)
{
int ret;
+ int irq_number;
ret = platform_get_irq_byname(pdev, name);
if (ret < 0 && optional) {
@@ -425,14 +426,19 @@
return ret;
}
+ irq_number = ret;
+
ret = devm_request_threaded_irq(&pdev->dev, ret,
NULL, thread_fn,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"wcnss", wcnss);
- if (ret)
+ if (ret) {
dev_err(&pdev->dev, "request %s IRQ failed\n", name);
+ return ret;
+ }
- return ret;
+ /* Return the IRQ number if the IRQ was successfully acquired */
+ return irq_number;
}
static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
@@ -448,6 +454,7 @@
}
ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
if (ret)
return ret;
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index 7e58453..e8bb0ee 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -76,7 +76,7 @@
int ret, err = 0;
char buf[20];
- if (count > sizeof(buf))
+ if (count < 1 || count > sizeof(buf))
return -EINVAL;
ret = copy_from_user(buf, user_buf, count);
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index afeb9d6..f92a18c 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -1283,6 +1283,7 @@
if (!cpdev) {
ret = -ENODEV;
dev_err(dev, "could not get R5 core platform device\n");
+ of_node_put(child);
goto fail;
}
@@ -1291,6 +1292,7 @@
dev_err(dev, "k3_r5_core_of_init failed, ret = %d\n",
ret);
put_device(&cpdev->dev);
+ of_node_put(child);
goto fail;
}
diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c
index 185a333..d240872 100644
--- a/drivers/reset/reset-imx7.c
+++ b/drivers/reset/reset-imx7.c
@@ -329,6 +329,7 @@
break;
case IMX8MP_RESET_PCIE_CTRL_APPS_EN:
+ case IMX8MP_RESET_PCIEPHY_PERST:
value = assert ? 0 : bit;
break;
}
diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c
index 24d3395..4c5bba5 100644
--- a/drivers/reset/tegra/reset-bpmp.c
+++ b/drivers/reset/tegra/reset-bpmp.c
@@ -20,6 +20,7 @@
struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
struct mrq_reset_request request;
struct tegra_bpmp_message msg;
+ int err;
memset(&request, 0, sizeof(request));
request.cmd = command;
@@ -30,7 +31,13 @@
msg.tx.data = &request;
msg.tx.size = sizeof(request);
- return tegra_bpmp_transfer(bpmp, &msg);
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err)
+ return err;
+ if (msg.rx.ret)
+ return -EINVAL;
+
+ return 0;
}
static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
diff --git a/drivers/rpmsg/mtk_rpmsg.c b/drivers/rpmsg/mtk_rpmsg.c
index 96a17ec..2d8cb59 100644
--- a/drivers/rpmsg/mtk_rpmsg.c
+++ b/drivers/rpmsg/mtk_rpmsg.c
@@ -234,7 +234,9 @@
if (info->registered)
continue;
+ mutex_unlock(&subdev->channels_lock);
ret = mtk_rpmsg_register_device(subdev, &info->info);
+ mutex_lock(&subdev->channels_lock);
if (ret) {
dev_err(&pdev->dev, "Can't create rpmsg_device\n");
continue;
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 4840886..7cbed03 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -1472,7 +1472,7 @@
cancel_work_sync(&channel->intent_work);
if (channel->rpdev) {
- strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
+ strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = RPMSG_ADDR_ANY;
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index 19903de..b5167ef 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -1073,7 +1073,7 @@
/* Assign public information to the rpmsg_device */
rpdev = &qsdev->rpdev;
- strncpy(rpdev->id.name, channel->name, RPMSG_NAME_SIZE);
+ strscpy_pad(rpdev->id.name, channel->name, RPMSG_NAME_SIZE);
rpdev->src = RPMSG_ADDR_ANY;
rpdev->dst = RPMSG_ADDR_ANY;
@@ -1304,7 +1304,7 @@
spin_unlock_irqrestore(&edge->channels_lock, flags);
- strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
+ strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = RPMSG_ADDR_ANY;
rpmsg_unregister_device(&edge->dev, &chinfo);
@@ -1364,6 +1364,7 @@
}
edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
+ of_node_put(syscon_np);
if (IS_ERR(edge->ipc_regmap)) {
ret = PTR_ERR(edge->ipc_regmap);
goto put_node;
@@ -1388,9 +1389,9 @@
edge->name = node->name;
irq = irq_of_parse_and_map(node, 0);
- if (irq < 0) {
+ if (!irq) {
dev_err(dev, "required smd interrupt missing\n");
- ret = irq;
+ ret = -EINVAL;
goto put_node;
}
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 7c88d19..625effe 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -26,6 +26,15 @@
static void rtc_device_release(struct device *dev)
{
struct rtc_device *rtc = to_rtc_device(dev);
+ struct timerqueue_head *head = &rtc->timerqueue;
+ struct timerqueue_node *node;
+
+ mutex_lock(&rtc->ops_lock);
+ while ((node = timerqueue_getnext(head)))
+ timerqueue_del(head, node);
+ mutex_unlock(&rtc->ops_lock);
+
+ cancel_work_sync(&rtc->irqwork);
ida_simple_remove(&rtc_ida, rtc->id);
kfree(rtc);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 794a4f0..1460568 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -807,9 +807,13 @@
struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
struct rtc_time tm;
ktime_t now;
+ int err;
+
+ err = __rtc_read_time(rtc, &tm);
+ if (err)
+ return err;
timer->enabled = 1;
- __rtc_read_time(rtc, &tm);
now = rtc_tm_to_ktime(tm);
/* Skip over expired timers */
@@ -823,7 +827,6 @@
trace_rtc_timer_enqueue(timer);
if (!next || ktime_before(timer->node.expires, next->expires)) {
struct rtc_wkalrm alarm;
- int err;
alarm.time = rtc_ktime_to_tm(timer->node.expires);
alarm.enabled = 1;
diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
index 5add637..b036ff3 100644
--- a/drivers/rtc/rtc-mc146818-lib.c
+++ b/drivers/rtc/rtc-mc146818-lib.c
@@ -99,6 +99,17 @@
}
EXPORT_SYMBOL_GPL(mc146818_get_time);
+/* AMD systems don't allow access to AltCentury with DV1 */
+static bool apply_amd_register_a_behavior(void)
+{
+#ifdef CONFIG_X86
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+ return true;
+#endif
+ return false;
+}
+
/* Set the current date and time in the real time clock. */
int mc146818_set_time(struct rtc_time *time)
{
@@ -172,7 +183,10 @@
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
- CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+ if (apply_amd_register_a_behavior())
+ CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT);
+ else
+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
#ifdef CONFIG_MACH_DECSTATION
CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 1894ade..acfcb37 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -269,6 +269,8 @@
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
rtc->addr_base = res->start;
rtc->data = of_device_get_match_data(&pdev->dev);
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index f0a6861..7155133 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -366,7 +366,8 @@
static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
- unsigned int buf[5], ctrl2;
+ u8 buf[5];
+ unsigned int ctrl2;
int ret;
ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2);
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index ebe03eb..87c9384 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -137,7 +137,7 @@
return ret;
}
-static int pl030_remove(struct amba_device *dev)
+static void pl030_remove(struct amba_device *dev)
{
struct pl030_rtc *rtc = amba_get_drvdata(dev);
@@ -146,8 +146,6 @@
free_irq(dev->irq[0], rtc);
iounmap(rtc->base);
amba_release_regions(dev);
-
- return 0;
}
static struct amba_id pl030_ids[] = {
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index d4b2ab7..2f5581e 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -280,7 +280,7 @@
return 0;
}
-static int pl031_remove(struct amba_device *adev)
+static void pl031_remove(struct amba_device *adev)
{
struct pl031_local *ldata = dev_get_drvdata(&adev->dev);
@@ -289,8 +289,6 @@
if (adev->irq[0])
free_irq(adev->irq[0], ldata);
amba_release_regions(adev);
-
- return 0;
}
static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index f2818cd..52b36b7 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -138,7 +138,7 @@
const struct sun6i_rtc_clk_data *data;
void __iomem *base;
int irq;
- unsigned long alarm;
+ time64_t alarm;
struct clk_hw hw;
struct clk_hw *int_osc;
@@ -510,10 +510,8 @@
struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
struct rtc_time *alrm_tm = &wkalrm->time;
struct rtc_time tm_now;
- unsigned long time_now = 0;
- unsigned long time_set = 0;
- unsigned long time_gap = 0;
- int ret = 0;
+ time64_t time_now, time_set;
+ int ret;
ret = sun6i_rtc_gettime(dev, &tm_now);
if (ret < 0) {
@@ -528,9 +526,7 @@
return -EINVAL;
}
- time_gap = time_set - time_now;
-
- if (time_gap > U32_MAX) {
+ if ((time_set - time_now) > U32_MAX) {
dev_err(dev, "Date too far in the future\n");
return -EINVAL;
}
@@ -539,7 +535,7 @@
writel(0, chip->base + SUN6I_ALRM_COUNTER);
usleep_range(100, 300);
- writel(time_gap, chip->base + SUN6I_ALRM_COUNTER);
+ writel(time_set - time_now, chip->base + SUN6I_ALRM_COUNTER);
chip->alarm = time_set;
sun6i_rtc_setaie(wkalrm->enabled, chip);
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
index 2018614..6eaa932 100644
--- a/drivers/rtc/rtc-wm8350.c
+++ b/drivers/rtc/rtc-wm8350.c
@@ -432,14 +432,21 @@
return ret;
}
- wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC,
+ ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC,
wm8350_rtc_update_handler, 0,
"RTC Seconds", wm8350);
+ if (ret)
+ return ret;
+
wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
- wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM,
+ ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM,
wm8350_rtc_alarm_handler, 0,
"RTC Alarm", wm8350);
+ if (ret) {
+ wm8350_free_irq(wm8350, WM8350_IRQ_RTC_SEC, wm8350);
+ return ret;
+ }
return 0;
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 2adfab5..f4edfe3 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1462,6 +1462,13 @@
if (!cqr->lpm)
cqr->lpm = dasd_path_get_opm(device);
}
+ /*
+ * remember the amount of formatted tracks to prevent double format on
+ * ESE devices
+ */
+ if (cqr->block)
+ cqr->trkcount = atomic_read(&cqr->block->trkcount);
+
if (cqr->cpmode == 1) {
rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
(long) cqr, cqr->lpm);
@@ -1680,6 +1687,7 @@
unsigned long now;
int nrf_suppressed = 0;
int fp_suppressed = 0;
+ struct request *req;
u8 *sense = NULL;
int expires;
@@ -1780,7 +1788,12 @@
}
if (dasd_ese_needs_format(cqr->block, irb)) {
- if (rq_data_dir((struct request *)cqr->callback_data) == READ) {
+ req = dasd_get_callback_data(cqr);
+ if (!req) {
+ cqr->status = DASD_CQR_ERROR;
+ return;
+ }
+ if (rq_data_dir(req) == READ) {
device->discipline->ese_read(cqr, irb);
cqr->status = DASD_CQR_SUCCESS;
cqr->stopclk = now;
@@ -2799,8 +2812,7 @@
* complete a request partially.
*/
if (proc_bytes) {
- blk_update_request(req, BLK_STS_OK,
- blk_rq_bytes(req) - proc_bytes);
+ blk_update_request(req, BLK_STS_OK, proc_bytes);
blk_mq_requeue_request(req, true);
} else if (likely(!blk_should_fake_timeout(req->q))) {
blk_mq_complete_request(req);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index dc78a52..b6b938a 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -675,12 +675,12 @@
struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
{
struct dasd_eckd_private *alias_priv, *private = base_device->private;
- struct alias_pav_group *group = private->pavgroup;
struct alias_lcu *lcu = private->lcu;
struct dasd_device *alias_device;
+ struct alias_pav_group *group;
unsigned long flags;
- if (!group || !lcu)
+ if (!lcu)
return NULL;
if (lcu->pav == NO_PAV ||
lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
@@ -697,6 +697,11 @@
}
spin_lock_irqsave(&lcu->lock, flags);
+ group = private->pavgroup;
+ if (!group) {
+ spin_unlock_irqrestore(&lcu->lock, flags);
+ return NULL;
+ }
alias_device = group->next;
if (!alias_device) {
if (list_empty(&group->aliaslist)) {
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index ad44d22..53d2297 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3026,13 +3026,24 @@
}
static bool test_and_set_format_track(struct dasd_format_entry *to_format,
- struct dasd_block *block)
+ struct dasd_ccw_req *cqr)
{
+ struct dasd_block *block = cqr->block;
struct dasd_format_entry *format;
unsigned long flags;
bool rc = false;
spin_lock_irqsave(&block->format_lock, flags);
+ if (cqr->trkcount != atomic_read(&block->trkcount)) {
+ /*
+ * The number of formatted tracks has changed after request
+ * start and we can not tell if the current track was involved.
+ * To avoid data corruption treat it as if the current track is
+ * involved
+ */
+ rc = true;
+ goto out;
+ }
list_for_each_entry(format, &block->format_list, list) {
if (format->track == to_format->track) {
rc = true;
@@ -3052,6 +3063,7 @@
unsigned long flags;
spin_lock_irqsave(&block->format_lock, flags);
+ atomic_inc(&block->trkcount);
list_del_init(&format->list);
spin_unlock_irqrestore(&block->format_lock, flags);
}
@@ -3088,7 +3100,7 @@
sector_t curr_trk;
int rc;
- req = cqr->callback_data;
+ req = dasd_get_callback_data(cqr);
block = cqr->block;
base = block->base;
private = base->private;
@@ -3113,8 +3125,11 @@
}
format->track = curr_trk;
/* test if track is already in formatting by another thread */
- if (test_and_set_format_track(format, block))
+ if (test_and_set_format_track(format, cqr)) {
+ /* this is no real error so do not count down retries */
+ cqr->retries++;
return ERR_PTR(-EEXIST);
+ }
fdata.start_unit = curr_trk;
fdata.stop_unit = curr_trk;
@@ -3213,12 +3228,11 @@
cqr->proc_bytes = blk_count * blksize;
return 0;
}
- if (dst && !skip_block) {
- dst += off;
+ if (dst && !skip_block)
memset(dst, 0, blksize);
- } else {
+ else
skip_block--;
- }
+ dst += blksize;
blk_count++;
}
}
@@ -4613,7 +4627,6 @@
struct dasd_device *basedev;
struct req_iterator iter;
struct dasd_ccw_req *cqr;
- unsigned int first_offs;
unsigned int trkcount;
unsigned long *idaws;
unsigned int size;
@@ -4647,7 +4660,6 @@
last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
DASD_RAW_SECTORS_PER_TRACK;
trkcount = last_trk - first_trk + 1;
- first_offs = 0;
if (rq_data_dir(req) == READ)
cmd = DASD_ECKD_CCW_READ_TRACK;
@@ -4691,13 +4703,13 @@
if (use_prefix) {
prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
- startdev, 1, first_offs + 1, trkcount, 0, 0);
+ startdev, 1, 0, trkcount, 0, 0);
} else {
define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
ccw[-1].flags |= CCW_FLAG_CC;
data += sizeof(struct DE_eckd_data);
- locate_record_ext(ccw++, data, first_trk, first_offs + 1,
+ locate_record_ext(ccw++, data, first_trk, 0,
trkcount, cmd, basedev, 0, 0);
}
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index fa552f9..9d9685c 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -188,6 +188,7 @@
void (*callback)(struct dasd_ccw_req *, void *data);
void *callback_data;
unsigned int proc_bytes; /* bytes for partial completion */
+ unsigned int trkcount; /* count formatted tracks */
};
/*
@@ -575,6 +576,7 @@
struct list_head format_list;
spinlock_t format_lock;
+ atomic_t trkcount;
};
struct dasd_attention_data {
@@ -723,6 +725,18 @@
return 0;
}
+/*
+ * return the callback data of the original request in case there are
+ * ERP requests build on top of it
+ */
+static inline void *dasd_get_callback_data(struct dasd_ccw_req *cqr)
+{
+ while (cqr->refers)
+ cqr = cqr->refers;
+
+ return cqr->callback_data;
+}
+
/* externals in dasd.c */
#define DASD_PROFILE_OFF 0
#define DASD_PROFILE_ON 1
diff --git a/drivers/s390/char/keyboard.h b/drivers/s390/char/keyboard.h
index c467589..c06d399 100644
--- a/drivers/s390/char/keyboard.h
+++ b/drivers/s390/char/keyboard.h
@@ -56,7 +56,7 @@
kbd_put_queue(struct tty_port *port, int ch)
{
tty_insert_flip_char(port, ch, 0);
- tty_schedule_flip(port);
+ tty_flip_buffer_push(port);
}
static inline void
@@ -64,5 +64,5 @@
{
while (*cp)
tty_insert_flip_char(port, *cp++, 0);
- tty_schedule_flip(port);
+ tty_flip_buffer_push(port);
}
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 1515fdc..3841c0e 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -48,6 +48,7 @@
static struct dentry *zcore_hsa_file;
static struct ipl_parameter_block *zcore_ipl_block;
+static DEFINE_MUTEX(hsa_buf_mutex);
static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE);
/*
@@ -64,19 +65,24 @@
if (!hsa_available)
return -ENODATA;
+ mutex_lock(&hsa_buf_mutex);
while (count) {
if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
TRACE("sclp_sdias_copy() failed\n");
+ mutex_unlock(&hsa_buf_mutex);
return -EIO;
}
offset = src % PAGE_SIZE;
bytes = min(PAGE_SIZE - offset, count);
- if (copy_to_user(dest, hsa_buf + offset, bytes))
+ if (copy_to_user(dest, hsa_buf + offset, bytes)) {
+ mutex_unlock(&hsa_buf_mutex);
return -EFAULT;
+ }
src += bytes;
dest += bytes;
count -= bytes;
}
+ mutex_unlock(&hsa_buf_mutex);
return 0;
}
@@ -94,9 +100,11 @@
if (!hsa_available)
return -ENODATA;
+ mutex_lock(&hsa_buf_mutex);
while (count) {
if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
TRACE("sclp_sdias_copy() failed\n");
+ mutex_unlock(&hsa_buf_mutex);
return -EIO;
}
offset = src % PAGE_SIZE;
@@ -106,6 +114,7 @@
dest += bytes;
count -= bytes;
}
+ mutex_unlock(&hsa_buf_mutex);
return 0;
}
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 9b61e9b..e3c1060 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -288,19 +288,11 @@
if (work_pending(&sch->todo_work))
goto out_unlock;
- if (cio_update_schib(sch)) {
- vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
- rc = 0;
- goto out_unlock;
- }
-
- private = dev_get_drvdata(&sch->dev);
- if (private->state == VFIO_CCW_STATE_NOT_OPER) {
- private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
- VFIO_CCW_STATE_STANDBY;
- }
rc = 0;
+ if (cio_update_schib(sch))
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+
out_unlock:
spin_unlock_irqrestore(sch->lock, flags);
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 85a1a45..20a6097 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -626,8 +626,6 @@
ctcm_clear_busy_do(dev);
}
- kfree(mpcginfo);
-
return;
}
@@ -1206,10 +1204,10 @@
CTCM_FUNTAIL, dev->name);
priv->stats.rx_dropped++;
/* mpcginfo only used for non-data transfers */
- kfree(mpcginfo);
if (do_debug_data)
ctcmpc_dump_skb(pskb, -8);
}
+ kfree(mpcginfo);
}
done:
@@ -1991,7 +1989,6 @@
}
break;
}
- kfree(mpcginfo);
CTCM_PR_DEBUG("ctcmpc:%s() %s xid2:%i xid7:%i xidt_p2:%i \n",
__func__, ch->id, grp->outstanding_xid2,
@@ -2052,7 +2049,6 @@
mpc_validate_xid(mpcginfo);
break;
}
- kfree(mpcginfo);
return;
}
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index ded1930..e3813a7 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -39,11 +39,12 @@
struct ctcm_priv *priv = dev_get_drvdata(dev);
int rc;
- ndev = priv->channel[CTCM_READ]->netdev;
- if (!(priv && priv->channel[CTCM_READ] && ndev)) {
+ if (!(priv && priv->channel[CTCM_READ] &&
+ priv->channel[CTCM_READ]->netdev)) {
CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev");
return -ENODEV;
}
+ ndev = priv->channel[CTCM_READ]->netdev;
rc = kstrtouint(buf, 0, &bs1);
if (rc)
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 440219b..06a322b 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1735,10 +1735,11 @@
lcs_schedule_recovery(card);
break;
case LCS_CMD_STOPLAN:
- pr_warn("Stoplan for %s initiated by LGW\n",
- card->dev->name);
- if (card->dev)
+ if (card->dev) {
+ pr_warn("Stoplan for %s initiated by LGW\n",
+ card->dev->name);
netif_carrier_off(card->dev);
+ }
break;
default:
LCS_DBF_TEXT(5, trace, "noLGWcmd");
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 511bf8e..b61acbb 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -145,27 +145,33 @@
static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
{
+ int ret = -EIO;
+
if (mutex_lock_interruptible(&wka_port->mutex))
return -ERESTARTSYS;
if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
- if (zfcp_fsf_open_wka_port(wka_port))
+ if (zfcp_fsf_open_wka_port(wka_port)) {
+ /* could not even send request, nothing to wait for */
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
+ goto out;
+ }
}
- mutex_unlock(&wka_port->mutex);
-
- wait_event(wka_port->completion_wq,
+ wait_event(wka_port->opened,
wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
atomic_inc(&wka_port->refcount);
- return 0;
+ ret = 0;
+ goto out;
}
- return -EIO;
+out:
+ mutex_unlock(&wka_port->mutex);
+ return ret;
}
static void zfcp_fc_wka_port_offline(struct work_struct *work)
@@ -181,9 +187,12 @@
wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
if (zfcp_fsf_close_wka_port(wka_port)) {
+ /* could not even send request, nothing to wait for */
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
- wake_up(&wka_port->completion_wq);
+ goto out;
}
+ wait_event(wka_port->closed,
+ wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
out:
mutex_unlock(&wka_port->mutex);
}
@@ -193,13 +202,15 @@
if (atomic_dec_return(&wka_port->refcount) != 0)
return;
/* wait 10 milliseconds, other reqs might pop in */
- schedule_delayed_work(&wka_port->work, HZ / 100);
+ queue_delayed_work(wka_port->adapter->work_queue, &wka_port->work,
+ msecs_to_jiffies(10));
}
static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
struct zfcp_adapter *adapter)
{
- init_waitqueue_head(&wka_port->completion_wq);
+ init_waitqueue_head(&wka_port->opened);
+ init_waitqueue_head(&wka_port->closed);
wka_port->adapter = adapter;
wka_port->d_id = d_id;
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 6902ae1..25bebfa 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -185,7 +185,8 @@
/**
* struct zfcp_fc_wka_port - representation of well-known-address (WKA) FC port
* @adapter: Pointer to adapter structure this WKA port belongs to
- * @completion_wq: Wait for completion of open/close command
+ * @opened: Wait for completion of open command
+ * @closed: Wait for completion of close command
* @status: Current status of WKA port
* @refcount: Reference count to keep port open as long as it is in use
* @d_id: FC destination id or well-known-address
@@ -195,7 +196,8 @@
*/
struct zfcp_fc_wka_port {
struct zfcp_adapter *adapter;
- wait_queue_head_t completion_wq;
+ wait_queue_head_t opened;
+ wait_queue_head_t closed;
enum zfcp_fc_wka_status status;
atomic_t refcount;
u32 d_id;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 6cb963a..524947b 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -866,7 +866,7 @@
const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req);
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
- int req_id = req->req_id;
+ unsigned long req_id = req->req_id;
zfcp_reqlist_add(adapter->req_list, req);
@@ -1889,7 +1889,7 @@
wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
}
out:
- wake_up(&wka_port->completion_wq);
+ wake_up(&wka_port->opened);
}
/**
@@ -1948,7 +1948,7 @@
}
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
- wake_up(&wka_port->completion_wq);
+ wake_up(&wka_port->closed);
}
/**
@@ -2359,8 +2359,7 @@
}
}
- blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
- sizeof(blktrc));
+ blk_add_driver_data(scsi->request, &blktrc, sizeof(blktrc));
}
/**
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 3337b1e..f6f9203 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2014,7 +2014,7 @@
retval = pci_enable_device(pdev);
if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
- goto out_disable_device;
+ return -ENODEV;
}
pci_set_master(pdev);
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index d8e19af..c6607c4 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -3367,13 +3367,11 @@
setup[setup_count].synchronous = ints[0] >= 6 ? ints[6] : 1;
setup[setup_count].delay = ints[0] >= 7 ? ints[7] : DELAY_DEFAULT;
setup[setup_count].ext_trans = ints[0] >= 8 ? ints[8] : 0;
- if (ints[0] > 8) { /*}*/
+ if (ints[0] > 8)
printk(KERN_NOTICE "aha152x: usage: aha152x=<IOBASE>[,<IRQ>[,<SCSI ID>"
"[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY>[,<EXT_TRANS>]]]]]]]\n");
- } else {
+ else
setup_count++;
- return 0;
- }
return 1;
}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index a13c203..c488165 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -182,6 +182,7 @@
struct beiscsi_endpoint *beiscsi_ep;
struct iscsi_endpoint *ep;
uint16_t cri_index;
+ int rc = 0;
ep = iscsi_lookup_endpoint(transport_fd);
if (!ep)
@@ -189,15 +190,17 @@
beiscsi_ep = ep->dd_data;
- if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
- return -EINVAL;
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
+ rc = -EINVAL;
+ goto put_ep;
+ }
if (beiscsi_ep->phba != phba) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : beiscsi_ep->hba=%p not equal to phba=%p\n",
beiscsi_ep->phba, phba);
-
- return -EEXIST;
+ rc = -EEXIST;
+ goto put_ep;
}
cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid);
if (phba->conn_table[cri_index]) {
@@ -209,7 +212,8 @@
beiscsi_ep->ep_cid,
beiscsi_conn,
phba->conn_table[cri_index]);
- return -EINVAL;
+ rc = -EINVAL;
+ goto put_ep;
}
}
@@ -226,7 +230,10 @@
"BS_%d : cid %d phba->conn_table[%u]=%p\n",
beiscsi_ep->ep_cid, cri_index, beiscsi_conn);
phba->conn_table[cri_index] = beiscsi_conn;
- return 0;
+
+put_ep:
+ iscsi_put_endpoint(ep);
+ return rc;
}
static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba)
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 987dc81..b977e03 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -5810,6 +5810,7 @@
.destroy_session = beiscsi_session_destroy,
.create_conn = beiscsi_conn_create,
.bind_conn = beiscsi_conn_bind,
+ .unbind_conn = iscsi_conn_unbind,
.destroy_conn = iscsi_conn_teardown,
.attr_is_visible = beiscsi_attr_is_visible,
.set_iface_param = beiscsi_iface_set_param,
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 5ae1e3f..e049cdb 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -711,7 +711,7 @@
char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
bfa_get_adapter_serial_num(&bfad->bfa, serial_num);
- return snprintf(buf, PAGE_SIZE, "%s\n", serial_num);
+ return sysfs_emit(buf, "%s\n", serial_num);
}
static ssize_t
@@ -725,7 +725,7 @@
char model[BFA_ADAPTER_MODEL_NAME_LEN];
bfa_get_adapter_model(&bfad->bfa, model);
- return snprintf(buf, PAGE_SIZE, "%s\n", model);
+ return sysfs_emit(buf, "%s\n", model);
}
static ssize_t
@@ -805,7 +805,7 @@
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Invalid Model");
- return snprintf(buf, PAGE_SIZE, "%s\n", model_descr);
+ return sysfs_emit(buf, "%s\n", model_descr);
}
static ssize_t
@@ -819,7 +819,7 @@
u64 nwwn;
nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port);
- return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn));
+ return sysfs_emit(buf, "0x%llx\n", cpu_to_be64(nwwn));
}
static ssize_t
@@ -836,7 +836,7 @@
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
strlcpy(symname, port_attr.port_cfg.sym_name.symname,
BFA_SYMNAME_MAXLEN);
- return snprintf(buf, PAGE_SIZE, "%s\n", symname);
+ return sysfs_emit(buf, "%s\n", symname);
}
static ssize_t
@@ -850,14 +850,14 @@
char hw_ver[BFA_VERSION_LEN];
bfa_get_pci_chip_rev(&bfad->bfa, hw_ver);
- return snprintf(buf, PAGE_SIZE, "%s\n", hw_ver);
+ return sysfs_emit(buf, "%s\n", hw_ver);
}
static ssize_t
bfad_im_drv_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_VERSION);
+ return sysfs_emit(buf, "%s\n", BFAD_DRIVER_VERSION);
}
static ssize_t
@@ -871,7 +871,7 @@
char optrom_ver[BFA_VERSION_LEN];
bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver);
- return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver);
+ return sysfs_emit(buf, "%s\n", optrom_ver);
}
static ssize_t
@@ -885,7 +885,7 @@
char fw_ver[BFA_VERSION_LEN];
bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
- return snprintf(buf, PAGE_SIZE, "%s\n", fw_ver);
+ return sysfs_emit(buf, "%s\n", fw_ver);
}
static ssize_t
@@ -897,7 +897,7 @@
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
bfa_get_nports(&bfad->bfa));
}
@@ -905,7 +905,7 @@
bfad_im_drv_name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_NAME);
+ return sysfs_emit(buf, "%s\n", BFAD_DRIVER_NAME);
}
static ssize_t
@@ -924,14 +924,14 @@
rports = kcalloc(nrports, sizeof(struct bfa_rport_qualifier_s),
GFP_ATOMIC);
if (rports == NULL)
- return snprintf(buf, PAGE_SIZE, "Failed\n");
+ return sysfs_emit(buf, "Failed\n");
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_fcs_lport_get_rport_quals(port->fcs_port, rports, &nrports);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
kfree(rports);
- return snprintf(buf, PAGE_SIZE, "%d\n", nrports);
+ return sysfs_emit(buf, "%d\n", nrports);
}
static DEVICE_ATTR(serial_number, S_IRUGO,
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 21efc73..649664d 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1422,17 +1422,23 @@
* Forcefully terminate all in progress connection recovery at the
* earliest, either in bind(), send_pdu(LOGIN), or conn_start()
*/
- if (bnx2i_adapter_ready(hba))
- return -EIO;
+ if (bnx2i_adapter_ready(hba)) {
+ ret_code = -EIO;
+ goto put_ep;
+ }
bnx2i_ep = ep->dd_data;
if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
- (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
+ (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) {
/* Peer disconnect via' FIN or RST */
- return -EINVAL;
+ ret_code = -EINVAL;
+ goto put_ep;
+ }
- if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
- return -EINVAL;
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
+ ret_code = -EINVAL;
+ goto put_ep;
+ }
if (bnx2i_ep->hba != hba) {
/* Error - TCP connection does not belong to this device
@@ -1443,7 +1449,8 @@
iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
"belong to hba (%s)\n",
hba->netdev->name);
- return -EEXIST;
+ ret_code = -EEXIST;
+ goto put_ep;
}
bnx2i_ep->conn = bnx2i_conn;
bnx2i_conn->ep = bnx2i_ep;
@@ -1460,6 +1467,8 @@
bnx2i_put_rq_buf(bnx2i_conn, 0);
bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
+put_ep:
+ iscsi_put_endpoint(ep);
return ret_code;
}
@@ -2278,6 +2287,7 @@
.destroy_session = bnx2i_session_destroy,
.create_conn = bnx2i_conn_create,
.bind_conn = bnx2i_conn_bind,
+ .unbind_conn = iscsi_conn_unbind,
.destroy_conn = bnx2i_conn_destroy,
.attr_is_visible = bnx2i_attr_is_visible,
.set_param = iscsi_set_param,
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 37d9935..edcd3fa 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -117,6 +117,7 @@
/* connection management */
.create_conn = cxgbi_create_conn,
.bind_conn = cxgbi_bind_conn,
+ .unbind_conn = iscsi_conn_unbind,
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 2c34915..efb3e2b 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -134,6 +134,7 @@
/* connection management */
.create_conn = cxgbi_create_conn,
.bind_conn = cxgbi_bind_conn,
+ .unbind_conn = iscsi_conn_unbind,
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index ecb134b..506b561 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -2690,11 +2690,13 @@
err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
ppm->tformat.pgsz_idx_dflt);
if (err < 0)
- return err;
+ goto put_ep;
err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
- if (err)
- return -EINVAL;
+ if (err) {
+ err = -EINVAL;
+ goto put_ep;
+ }
/* calculate the tag idx bits needed for this conn based on cmds_max */
cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
@@ -2715,7 +2717,9 @@
/* init recv engine */
iscsi_tcp_hdr_recv_prep(tcp_conn);
- return 0;
+put_ep:
+ iscsi_put_endpoint(ep);
+ return err;
}
EXPORT_SYMBOL_GPL(cxgbi_bind_conn);
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 6cb48ae..d8967bf 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -3631,10 +3631,19 @@
#endif
if (dcb->target_lun != 0) {
/* Copy settings */
- struct DeviceCtlBlk *p;
- list_for_each_entry(p, &acb->dcb_list, list)
- if (p->target_id == dcb->target_id)
+ struct DeviceCtlBlk *p = NULL, *iter;
+
+ list_for_each_entry(iter, &acb->dcb_list, list)
+ if (iter->target_id == dcb->target_id) {
+ p = iter;
break;
+ }
+
+ if (!p) {
+ kfree(dcb);
+ return NULL;
+ }
+
dprintkdbg(DBG_1,
"device_alloc: <%02i-%i> copy from <%02i-%i>\n",
dcb->target_id, dcb->target_lun,
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 5ea426e..bbc5d6b 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1969,7 +1969,7 @@
*
* Returns: u64 fc world wide name
*/
-u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
+u64 fcoe_wwn_from_mac(unsigned char mac[ETH_ALEN],
unsigned int scheme, unsigned int port)
{
u64 wwn;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 50a1c34..cd41dc0 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -514,7 +514,7 @@
/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
static int prot_mask;
-module_param(prot_mask, int, 0);
+module_param(prot_mask, int, 0444);
MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 ");
static bool auto_affine_msi_experimental;
@@ -2372,17 +2372,25 @@
return IRQ_WAKE_THREAD;
}
+static void hisi_sas_v3_free_vectors(void *data)
+{
+ struct pci_dev *pdev = data;
+
+ pci_free_irq_vectors(pdev);
+}
+
static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba)
{
int vectors;
int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi;
struct Scsi_Host *shost = hisi_hba->shost;
+ struct pci_dev *pdev = hisi_hba->pci_dev;
struct irq_affinity desc = {
.pre_vectors = BASE_VECTORS_V3_HW,
};
min_msi = MIN_AFFINE_VECTORS_V3_HW;
- vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev,
+ vectors = pci_alloc_irq_vectors_affinity(pdev,
min_msi, max_msi,
PCI_IRQ_MSI |
PCI_IRQ_AFFINITY,
@@ -2394,6 +2402,7 @@
hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW;
shost->nr_hw_queues = hisi_hba->cq_nvecs;
+ devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev);
return 0;
}
@@ -3313,7 +3322,7 @@
dev_err(dev, "%d hw queues\n", shost->nr_hw_queues);
rc = scsi_add_host(shost, dev);
if (rc)
- goto err_out_free_irq_vectors;
+ goto err_out_debugfs;
rc = sas_register_ha(sha);
if (rc)
@@ -3340,8 +3349,6 @@
err_out_register_ha:
scsi_remove_host(shost);
-err_out_free_irq_vectors:
- pci_free_irq_vectors(pdev);
err_out_debugfs:
hisi_sas_debugfs_exit(hisi_hba);
err_out_ha:
@@ -3369,7 +3376,6 @@
devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq);
}
- pci_free_irq_vectors(pdev);
}
static void hisi_sas_v3_remove(struct pci_dev *pdev)
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index f6d6539..b793e34 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -635,8 +635,13 @@
memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
vhost->async_crq.cur = 0;
- list_for_each_entry(tgt, &vhost->targets, queue)
- ibmvfc_del_tgt(tgt);
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (vhost->client_migrated)
+ tgt->need_login = 1;
+ else
+ ibmvfc_del_tgt(tgt);
+ }
+
scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
vhost->job_step = ibmvfc_npiv_login;
@@ -2822,9 +2827,12 @@
/* We need to re-setup the interpartition connection */
dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
vhost->client_migrated = 1;
+
+ scsi_block_requests(vhost->host);
ibmvfc_purge_requests(vhost, DID_REQUEUE);
- ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+ ibmvfc_set_host_state(vhost, IBMVFC_LINK_DOWN);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
+ wake_up(&vhost->work_wait_q);
} else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
ibmvfc_purge_requests(vhost, DID_ERROR);
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index cc3908c..a343148 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -35,7 +35,7 @@
#define IBMVSCSIS_VERSION "v0.2"
-#define INITIAL_SRP_LIMIT 800
+#define INITIAL_SRP_LIMIT 1024
#define DEFAULT_MAX_SECTORS 256
#define MAX_TXU 1024 * 1024
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index b0aa58d..90e8a53 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -9792,7 +9792,7 @@
GFP_KERNEL);
if (!ioa_cfg->hrrq[i].host_rrq) {
- while (--i > 0)
+ while (--i >= 0)
dma_free_coherent(&pdev->dev,
sizeof(u32) * ioa_cfg->hrrq[i].size,
ioa_cfg->hrrq[i].host_rrq,
@@ -10065,7 +10065,7 @@
ioa_cfg->vectors_info[i].desc,
&ioa_cfg->hrrq[i]);
if (rc) {
- while (--i >= 0)
+ while (--i > 0)
free_irq(pci_irq_vector(pdev, i),
&ioa_cfg->hrrq[i]);
return rc;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index df47557..6485c1a 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -558,6 +558,8 @@
tcp_conn = conn->dd_data;
tcp_sw_conn = tcp_conn->dd_data;
+ mutex_init(&tcp_sw_conn->sock_lock);
+
tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
goto free_conn;
@@ -592,11 +594,15 @@
static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
{
- struct iscsi_session *session = conn->session;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
struct socket *sock = tcp_sw_conn->sock;
+ /*
+ * The iscsi transport class will make sure we are not called in
+ * parallel with start, stop, bind and destroys. However, this can be
+ * called twice if userspace does a stop then a destroy.
+ */
if (!sock)
return;
@@ -604,9 +610,9 @@
iscsi_sw_tcp_conn_restore_callbacks(conn);
sock_put(sock->sk);
- spin_lock_bh(&session->frwd_lock);
+ mutex_lock(&tcp_sw_conn->sock_lock);
tcp_sw_conn->sock = NULL;
- spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&tcp_sw_conn->sock_lock);
sockfd_put(sock);
}
@@ -658,7 +664,6 @@
struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
int is_leading)
{
- struct iscsi_session *session = cls_session->dd_data;
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
@@ -678,10 +683,10 @@
if (err)
goto free_socket;
- spin_lock_bh(&session->frwd_lock);
+ mutex_lock(&tcp_sw_conn->sock_lock);
/* bind iSCSI connection and socket */
tcp_sw_conn->sock = sock;
- spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&tcp_sw_conn->sock_lock);
/* setup Socket parameters */
sk = sock->sk;
@@ -717,8 +722,15 @@
break;
case ISCSI_PARAM_DATADGST_EN:
iscsi_set_param(cls_conn, param, buf, buflen);
+
+ mutex_lock(&tcp_sw_conn->sock_lock);
+ if (!tcp_sw_conn->sock) {
+ mutex_unlock(&tcp_sw_conn->sock_lock);
+ return -ENOTCONN;
+ }
tcp_sw_conn->sendpage = conn->datadgst_en ?
sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
+ mutex_unlock(&tcp_sw_conn->sock_lock);
break;
case ISCSI_PARAM_MAX_R2T:
return iscsi_tcp_set_max_r2t(conn, buf);
@@ -733,8 +745,8 @@
enum iscsi_param param, char *buf)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn;
+ struct iscsi_tcp_conn *tcp_conn;
struct sockaddr_in6 addr;
struct socket *sock;
int rc;
@@ -744,21 +756,36 @@
case ISCSI_PARAM_CONN_ADDRESS:
case ISCSI_PARAM_LOCAL_PORT:
spin_lock_bh(&conn->session->frwd_lock);
- if (!tcp_sw_conn || !tcp_sw_conn->sock) {
+ if (!conn->session->leadconn) {
spin_unlock_bh(&conn->session->frwd_lock);
return -ENOTCONN;
}
- sock = tcp_sw_conn->sock;
- sock_hold(sock->sk);
+ /*
+ * The conn has been setup and bound, so just grab a ref
+ * incase a destroy runs while we are in the net layer.
+ */
+ iscsi_get_conn(conn->cls_conn);
spin_unlock_bh(&conn->session->frwd_lock);
+ tcp_conn = conn->dd_data;
+ tcp_sw_conn = tcp_conn->dd_data;
+
+ mutex_lock(&tcp_sw_conn->sock_lock);
+ sock = tcp_sw_conn->sock;
+ if (!sock) {
+ rc = -ENOTCONN;
+ goto sock_unlock;
+ }
+
if (param == ISCSI_PARAM_LOCAL_PORT)
rc = kernel_getsockname(sock,
(struct sockaddr *)&addr);
else
rc = kernel_getpeername(sock,
(struct sockaddr *)&addr);
- sock_put(sock->sk);
+sock_unlock:
+ mutex_unlock(&tcp_sw_conn->sock_lock);
+ iscsi_put_conn(conn->cls_conn);
if (rc < 0)
return rc;
@@ -796,17 +823,21 @@
}
tcp_conn = conn->dd_data;
tcp_sw_conn = tcp_conn->dd_data;
- sock = tcp_sw_conn->sock;
- if (!sock) {
- spin_unlock_bh(&session->frwd_lock);
- return -ENOTCONN;
- }
- sock_hold(sock->sk);
+ /*
+ * The conn has been setup and bound, so just grab a ref
+ * incase a destroy runs while we are in the net layer.
+ */
+ iscsi_get_conn(conn->cls_conn);
spin_unlock_bh(&session->frwd_lock);
- rc = kernel_getsockname(sock,
- (struct sockaddr *)&addr);
- sock_put(sock->sk);
+ mutex_lock(&tcp_sw_conn->sock_lock);
+ sock = tcp_sw_conn->sock;
+ if (!sock)
+ rc = -ENOTCONN;
+ else
+ rc = kernel_getsockname(sock, (struct sockaddr *)&addr);
+ mutex_unlock(&tcp_sw_conn->sock_lock);
+ iscsi_put_conn(conn->cls_conn);
if (rc < 0)
return rc;
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 7914531..1731956 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -28,6 +28,8 @@
struct iscsi_sw_tcp_conn {
struct socket *sock;
+ /* Taken when accessing the sock from the netlink/sysfs interface */
+ struct mutex sock_lock;
struct iscsi_sw_tcp_send out;
/* old values for socket callbacks */
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index a50f1ee..4261380 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1702,6 +1702,7 @@
if (cancel_delayed_work_sync(&ep->timeout_work)) {
FC_EXCH_DBG(ep, "Exchange timer canceled due to ABTS response\n");
fc_exch_release(ep); /* release from pending timer hold */
+ return;
}
spin_lock_bh(&ep->ex_lock);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index d4e66c5..05799b4 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1367,23 +1367,32 @@
}
EXPORT_SYMBOL_GPL(iscsi_session_failure);
-void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
{
struct iscsi_session *session = conn->session;
- spin_lock_bh(&session->frwd_lock);
- if (session->state == ISCSI_STATE_FAILED) {
- spin_unlock_bh(&session->frwd_lock);
- return;
- }
+ if (session->state == ISCSI_STATE_FAILED)
+ return false;
if (conn->stop_stage == 0)
session->state = ISCSI_STATE_FAILED;
- spin_unlock_bh(&session->frwd_lock);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
- iscsi_conn_error_event(conn->cls_conn, err);
+ return true;
+}
+
+void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+{
+ struct iscsi_session *session = conn->session;
+ bool needs_evt;
+
+ spin_lock_bh(&session->frwd_lock);
+ needs_evt = iscsi_set_conn_failed(conn);
+ spin_unlock_bh(&session->frwd_lock);
+
+ if (needs_evt)
+ iscsi_conn_error_event(conn->cls_conn, err);
}
EXPORT_SYMBOL_GPL(iscsi_conn_failure);
@@ -2117,6 +2126,51 @@
spin_unlock(&session->frwd_lock);
}
+/**
+ * iscsi_conn_unbind - prevent queueing to conn.
+ * @cls_conn: iscsi conn ep is bound to.
+ * @is_active: is the conn in use for boot or is this for EH/termination
+ *
+ * This must be called by drivers implementing the ep_disconnect callout.
+ * It disables queueing to the connection from libiscsi in preparation for
+ * an ep_disconnect call.
+ */
+void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
+{
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+
+ if (!cls_conn)
+ return;
+
+ conn = cls_conn->dd_data;
+ session = conn->session;
+ /*
+ * Wait for iscsi_eh calls to exit. We don't wait for the tmf to
+ * complete or timeout. The caller just wants to know what's running
+ * is everything that needs to be cleaned up, and no cmds will be
+ * queued.
+ */
+ mutex_lock(&session->eh_mutex);
+
+ iscsi_suspend_queue(conn);
+ iscsi_suspend_tx(conn);
+
+ spin_lock_bh(&session->frwd_lock);
+ if (!is_active) {
+ /*
+ * if logout timed out before userspace could even send a PDU
+ * the state might still be in ISCSI_STATE_LOGGED_IN and
+ * allowing new cmds and TMFs.
+ */
+ if (session->state == ISCSI_STATE_LOGGED_IN)
+ iscsi_set_conn_failed(conn);
+ }
+ spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_unbind);
+
static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
struct iscsi_tm *hdr)
{
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 8b9a390..a1a06a8 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -202,7 +202,7 @@
task->total_xfer_len = qc->nbytes;
task->num_scatter = qc->n_elem;
task->data_dir = qc->dma_dir;
- } else if (qc->tf.protocol == ATA_PROT_NODATA) {
+ } else if (!ata_is_data(qc->tf.protocol)) {
task->data_dir = DMA_NONE;
} else {
for_each_sg(qc->sg, sg, qc->n_elem, si)
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 8d6bcc1..51485d0 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -85,7 +85,7 @@
res = i->dft->lldd_execute_task(task, GFP_KERNEL);
if (res) {
- del_timer(&task->slow_task->timer);
+ del_timer_sync(&task->slow_task->timer);
pr_notice("executing SMP task failed:%d\n", res);
break;
}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index beaf3a8..fbc76d6 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2609,8 +2609,8 @@
struct lpfc_sli4_hdw_queue *qp;
struct lpfc_multixri_pool *multixri_pool;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2690,8 +2690,8 @@
if (!phba->targetport)
return -ENXIO;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2828,8 +2828,8 @@
char mybuf[64];
char *pbuf;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2956,8 +2956,8 @@
char mybuf[64];
char *pbuf;
- if (nbytes > 63)
- nbytes = 63;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -3062,8 +3062,8 @@
char *pbuf;
int i;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 47e832b..bfbc1c4 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -4281,6 +4281,9 @@
#define wqe_sup_SHIFT 6
#define wqe_sup_MASK 0x00000001
#define wqe_sup_WORD word11
+#define wqe_ffrq_SHIFT 6
+#define wqe_ffrq_MASK 0x00000001
+#define wqe_ffrq_WORD word11
#define wqe_wqec_SHIFT 7
#define wqe_wqec_MASK 0x00000001
#define wqe_wqec_WORD word11
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 1149bfc..17200b4 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6670,7 +6670,7 @@
/* Allocate device driver memory */
rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
if (rc)
- return -ENOMEM;
+ goto out_destroy_workqueue;
/* IF Type 2 ports get initialized now. */
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
@@ -7076,6 +7076,9 @@
lpfc_destroy_bootstrap_mbox(phba);
out_free_mem:
lpfc_mem_free(phba);
+out_destroy_workqueue:
+ destroy_workqueue(phba->wq);
+ phba->wq = NULL;
return rc;
}
@@ -13614,6 +13617,8 @@
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
+ /* Init cpu_map array */
+ lpfc_cpu_map_array_init(phba);
/* Configure and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index e33f752..1e22364 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -857,7 +857,8 @@
lpfc_nvmet_invalidate_host(phba, ndlp);
if (ndlp->nlp_DID == Fabric_DID) {
- if (vport->port_state <= LPFC_FDISC)
+ if (vport->port_state <= LPFC_FDISC ||
+ vport->fc_flag & FC_PT2PT)
goto out;
lpfc_linkdown_port(vport);
spin_lock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 03c81ce..ef92e0b 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1315,7 +1315,8 @@
{
struct lpfc_hba *phba = vport->phba;
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
- struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
+ struct nvme_common_command *sqe;
+ struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq;
union lpfc_wqe128 *wqe = &pwqeq->wqe;
uint32_t req_len;
@@ -1371,8 +1372,14 @@
cstat->control_requests++;
}
- if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
+ if (pnode->nlp_nvme_info & NLP_NVME_NSLER) {
bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
+ sqe = &((struct nvme_fc_cmd_iu *)
+ nCmd->cmdaddr)->sqe.common;
+ if (sqe->opcode == nvme_admin_async_event)
+ bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1);
+ }
+
/*
* Finish initializing those WQE fields that are independent
* of the nvme_cmnd request_buffer
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index a50f870..755d68b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -17445,7 +17445,6 @@
case FC_RCTL_ELS_REP: /* extended link services reply */
case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
- case FC_RCTL_BA_NOP: /* basic link service NOP */
case FC_RCTL_BA_ABTS: /* basic link service abort */
case FC_RCTL_BA_RMC: /* remove connection */
case FC_RCTL_BA_ACC: /* basic accept */
@@ -17466,6 +17465,7 @@
fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
return lpfc_fc_frame_check(phba, fc_hdr);
+ case FC_RCTL_BA_NOP: /* basic link service NOP */
default:
goto drop;
}
@@ -18284,12 +18284,14 @@
if (!lpfc_complete_unsol_iocb(phba,
phba->sli4_hba.els_wq->pring,
iocbq, fc_hdr->fh_r_ctl,
- fc_hdr->fh_type))
+ fc_hdr->fh_type)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2540 Ring %d handler: unexpected Rctl "
"x%x Type x%x received\n",
LPFC_ELS_RING,
fc_hdr->fh_r_ctl, fc_hdr->fh_type);
+ lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
+ }
/* Free iocb created in lpfc_prep_seq */
list_for_each_entry_safe(curr_iocb, next_iocb,
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 80f5469..daffa36 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4634,7 +4634,7 @@
* major number allocation.
*/
major = register_chrdev(0, "megadev_legacy", &megadev_fops);
- if (!major) {
+ if (major < 0) {
printk(KERN_WARNING
"megaraid: failed to register char device\n");
}
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 6b8ec57..c088a84 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -2554,6 +2554,9 @@
#define MEGASAS_IS_LOGICAL(sdev) \
((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
+#define MEGASAS_IS_LUN_VALID(sdev) \
+ (((sdev)->lun == 0) ? 1 : 0)
+
#define MEGASAS_DEV_INDEX(scp) \
(((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
scp->device->id)
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 1a70cc9..84a2e92 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2111,6 +2111,9 @@
goto scan_target;
}
return -ENXIO;
+ } else if (!MEGASAS_IS_LUN_VALID(sdev)) {
+ sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
+ return -ENXIO;
}
scan_target:
@@ -2141,6 +2144,10 @@
instance = megasas_lookup_instance(sdev->host->host_no);
if (MEGASAS_IS_LOGICAL(sdev)) {
+ if (!MEGASAS_IS_LUN_VALID(sdev)) {
+ sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
+ return;
+ }
ld_tgt_id = MEGASAS_TARGET_ID(sdev);
instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED;
if (megasas_dbg_lvl & LD_PD_DEBUG)
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 13022a4..7838c79 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -5198,7 +5198,6 @@
if (!fusion->log_to_span) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__);
- kfree(instance->ctrl_context);
return -ENOMEM;
}
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 3153f16..c1b76cd 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2822,23 +2822,22 @@
_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
{
struct sysinfo s;
- int dma_mask;
if (ioc->is_mcpu_endpoint ||
sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
- dma_get_required_mask(&pdev->dev) <= 32)
- dma_mask = 32;
+ dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32))
+ ioc->dma_mask = 32;
/* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
- dma_mask = 63;
+ ioc->dma_mask = 63;
else
- dma_mask = 64;
+ ioc->dma_mask = 64;
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)))
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)) ||
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)))
return -ENODEV;
- if (dma_mask > 32) {
+ if (ioc->dma_mask > 32) {
ioc->base_add_sg_single = &_base_add_sg_single_64;
ioc->sge_size = sizeof(Mpi2SGESimple64_t);
} else {
@@ -2848,7 +2847,7 @@
si_meminfo(&s);
ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
- dma_mask, convert_to_kb(s.totalram));
+ ioc->dma_mask, convert_to_kb(s.totalram));
return 0;
}
@@ -4902,10 +4901,10 @@
dma_pool_free(ioc->pcie_sgl_dma_pool,
ioc->pcie_sg_lookup[i].pcie_sgl,
ioc->pcie_sg_lookup[i].pcie_sgl_dma);
+ ioc->pcie_sg_lookup[i].pcie_sgl = NULL;
}
dma_pool_destroy(ioc->pcie_sgl_dma_pool);
}
-
if (ioc->config_page) {
dexitprintk(ioc,
ioc_info(ioc, "config_page(0x%p): free\n",
@@ -4961,6 +4960,89 @@
}
/**
+ * _base_reduce_hba_queue_depth- Retry with reduced queue depth
+ * @ioc: Adapter object
+ *
+ * Return: 0 for success, non-zero for failure.
+ **/
+static inline int
+_base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc)
+{
+ int reduce_sz = 64;
+
+ if ((ioc->hba_queue_depth - reduce_sz) >
+ (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) {
+ ioc->hba_queue_depth -= reduce_sz;
+ return 0;
+ } else
+ return -ENOMEM;
+}
+
+/**
+ * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory
+ * for pcie sgl pools.
+ * @ioc: Adapter object
+ * @sz: DMA Pool size
+ * @ct: Chain tracker
+ * Return: 0 for success, non-zero for failure.
+ */
+
+static int
+_base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
+{
+ int i = 0, j = 0;
+ struct chain_tracker *ct;
+
+ ioc->pcie_sgl_dma_pool =
+ dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz,
+ ioc->page_size, 0);
+ if (!ioc->pcie_sgl_dma_pool) {
+ ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n");
+ return -ENOMEM;
+ }
+
+ ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
+ ioc->chains_per_prp_buffer =
+ min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io);
+ for (i = 0; i < ioc->scsiio_depth; i++) {
+ ioc->pcie_sg_lookup[i].pcie_sgl =
+ dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL,
+ &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
+ if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
+ ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
+ return -EAGAIN;
+ }
+
+ if (!mpt3sas_check_same_4gb_region(
+ (long)ioc->pcie_sg_lookup[i].pcie_sgl, sz)) {
+ ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n",
+ ioc->pcie_sg_lookup[i].pcie_sgl,
+ (unsigned long long)
+ ioc->pcie_sg_lookup[i].pcie_sgl_dma);
+ ioc->use_32bit_dma = true;
+ return -EAGAIN;
+ }
+
+ for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
+ ct = &ioc->chain_lookup[i].chains_per_smid[j];
+ ct->chain_buffer =
+ ioc->pcie_sg_lookup[i].pcie_sgl +
+ (j * ioc->chain_segment_sz);
+ ct->chain_buffer_dma =
+ ioc->pcie_sg_lookup[i].pcie_sgl_dma +
+ (j * ioc->chain_segment_sz);
+ }
+ }
+ dinitprintk(ioc, ioc_info(ioc,
+ "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
+ ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
+ dinitprintk(ioc, ioc_info(ioc,
+ "Number of chains can fit in a PRP page(%d)\n",
+ ioc->chains_per_prp_buffer));
+ return 0;
+}
+
+/**
* base_alloc_rdpq_dma_pool - Allocating DMA'able memory
* for reply queues.
* @ioc: per adapter object
@@ -5058,7 +5140,7 @@
unsigned short sg_tablesize;
u16 sge_size;
int i, j;
- int ret = 0;
+ int ret = 0, rc = 0;
struct chain_tracker *ct;
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -5357,6 +5439,7 @@
* be required for NVMe PRP's, only each set of NVMe blocks will be
* contiguous, so a new set is allocated for each possible I/O.
*/
+
ioc->chains_per_prp_buffer = 0;
if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
nvme_blocks_needed =
@@ -5371,43 +5454,11 @@
goto out;
}
sz = nvme_blocks_needed * ioc->page_size;
- ioc->pcie_sgl_dma_pool =
- dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
- if (!ioc->pcie_sgl_dma_pool) {
- ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
- goto out;
- }
-
- ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
- ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
- ioc->chains_needed_per_io);
-
- for (i = 0; i < ioc->scsiio_depth; i++) {
- ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
- ioc->pcie_sgl_dma_pool, GFP_KERNEL,
- &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
- if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
- ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
- goto out;
- }
- for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
- ct = &ioc->chain_lookup[i].chains_per_smid[j];
- ct->chain_buffer =
- ioc->pcie_sg_lookup[i].pcie_sgl +
- (j * ioc->chain_segment_sz);
- ct->chain_buffer_dma =
- ioc->pcie_sg_lookup[i].pcie_sgl_dma +
- (j * ioc->chain_segment_sz);
- }
- }
-
- dinitprintk(ioc,
- ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
- ioc->scsiio_depth, sz,
- (sz * ioc->scsiio_depth) / 1024));
- dinitprintk(ioc,
- ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
- ioc->chains_per_prp_buffer));
+ rc = _base_allocate_pcie_sgl_pool(ioc, sz);
+ if (rc == -ENOMEM)
+ return -ENOMEM;
+ else if (rc == -EAGAIN)
+ goto try_32bit_dma;
total_sz += sz * ioc->scsiio_depth;
}
@@ -5577,6 +5628,19 @@
ioc->shost->sg_tablesize);
return 0;
+try_32bit_dma:
+ _base_release_memory_pools(ioc);
+ if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) {
+ /* Change dma coherent mask to 32 bit and reallocate */
+ if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
+ pr_err("Setting 32 bit coherent DMA mask Failed %s\n",
+ pci_name(ioc->pdev));
+ return -ENODEV;
+ }
+ } else if (_base_reduce_hba_queue_depth(ioc) != 0)
+ return -ENOMEM;
+ goto retry_allocation;
+
out:
return -ENOMEM;
}
@@ -7239,6 +7303,7 @@
ioc->rdpq_array_enable_assigned = 0;
ioc->use_32bit_dma = false;
+ ioc->dma_mask = 64;
if (ioc->is_aero_ioc)
ioc->base_readl = &_base_readl_aero;
else
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index bc8beb1..823bbe6 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -1257,6 +1257,7 @@
u16 thresh_hold;
u8 high_iops_queues;
u32 drv_support_bitmap;
+ u32 dma_mask;
bool enable_sdev_max_qd;
bool use_32bit_dma;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8418b59..c3a5978 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3501,6 +3501,7 @@
fw_event = list_first_entry(&ioc->fw_event_list,
struct fw_event_work, list);
list_del_init(&fw_event->list);
+ fw_event_work_put(fw_event);
}
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
@@ -3559,7 +3560,6 @@
if (cancel_work_sync(&fw_event->work))
fw_event_work_put(fw_event);
- fw_event_work_put(fw_event);
}
ioc->fw_events_cleanup = 0;
}
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index b03c0f3..85ca842 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -646,6 +646,7 @@
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
{ PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
+ { PCI_VDEVICE(TTI, 0x2640), chip_6440 },
{ PCI_VDEVICE(TTI, 0x2710), chip_9480 },
{ PCI_VDEVICE(TTI, 0x2720), chip_9480 },
{ PCI_VDEVICE(TTI, 0x2721), chip_9480 },
@@ -697,7 +698,7 @@
mvs_show_driver_version(struct device *cdev,
struct device_attribute *attr, char *buffer)
{
- return snprintf(buffer, PAGE_SIZE, "%s\n", DRV_VERSION);
+ return sysfs_emit(buffer, "%s\n", DRV_VERSION);
}
static DEVICE_ATTR(driver_version,
@@ -749,7 +750,7 @@
static ssize_t mvs_show_interrupt_coalescing(struct device *cdev,
struct device_attribute *attr, char *buffer)
{
- return snprintf(buffer, PAGE_SIZE, "%d\n", interrupt_coalescing);
+ return sysfs_emit(buffer, "%d\n", interrupt_coalescing);
}
static DEVICE_ATTR(interrupt_coalescing,
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index 5fa0f4e..ad17c2b 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1241,7 +1241,8 @@
myrb_unmap(cb);
if (cb->mmio_base) {
- cb->disable_intr(cb->io_base);
+ if (cb->disable_intr)
+ cb->disable_intr(cb->io_base);
iounmap(cb->mmio_base);
}
if (cb->irq)
@@ -3515,9 +3516,13 @@
mutex_init(&cb->dcmd_mutex);
mutex_init(&cb->dma_mutex);
cb->pdev = pdev;
+ cb->host = shost;
- if (pci_enable_device(pdev))
- goto failure;
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ scsi_host_put(shost);
+ return NULL;
+ }
if (privdata->hw_init == DAC960_PD_hw_init ||
privdata->hw_init == DAC960_P_hw_init) {
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 9b31895..da9fbe6 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1711,7 +1711,6 @@
}
task = sas_alloc_slow_task(GFP_ATOMIC);
-
if (!task) {
pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task\n");
return;
@@ -1720,13 +1719,16 @@
task->task_done = pm8001_task_done;
res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
- if (res)
+ if (res) {
+ sas_free_task(task);
return;
+ }
ccb = &pm8001_ha->ccb_info[ccb_tag];
ccb->device = pm8001_ha_dev;
ccb->ccb_tag = ccb_tag;
ccb->task = task;
+ ccb->n_elem = 0;
circularQ = &pm8001_ha->inbnd_q_tbl[0];
@@ -1737,8 +1739,10 @@
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
sizeof(task_abort), 0);
- if (ret)
+ if (ret) {
+ sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag);
+ }
}
@@ -1788,6 +1792,7 @@
ccb->device = pm8001_ha_dev;
ccb->ccb_tag = ccb_tag;
ccb->task = task;
+ ccb->n_elem = 0;
pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
@@ -1804,7 +1809,7 @@
sata_cmd.tag = cpu_to_le32(ccb_tag);
sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
- sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9));
+ sata_cmd.ncqtag_atap_dir_m = cpu_to_le32((0x1 << 7) | (0x5 << 9));
memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
@@ -2365,7 +2370,8 @@
len = sizeof(struct pio_setup_fis);
pm8001_dbg(pm8001_ha, IO,
"PIO read len = %d\n", len);
- } else if (t->ata_task.use_ncq) {
+ } else if (t->ata_task.use_ncq &&
+ t->data_dir != DMA_NONE) {
len = sizeof(struct set_dev_bits_fis);
pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n",
len);
@@ -3666,12 +3672,11 @@
mb();
if (pm8001_dev->id & NCQ_ABORT_ALL_FLAG) {
- pm8001_tag_free(pm8001_ha, tag);
sas_free_task(t);
- /* clear the flag */
- pm8001_dev->id &= 0xBFFFFFFF;
- } else
+ pm8001_dev->id &= ~NCQ_ABORT_ALL_FLAG;
+ } else {
t->task_done(t);
+ }
return 0;
}
@@ -4220,22 +4225,22 @@
u32 opc = OPC_INB_SATA_HOST_OPSTART;
memset(&sata_cmd, 0, sizeof(sata_cmd));
circularQ = &pm8001_ha->inbnd_q_tbl[0];
- if (task->data_dir == DMA_NONE) {
+
+ if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) {
ATAP = 0x04; /* no data*/
pm8001_dbg(pm8001_ha, IO, "no data\n");
} else if (likely(!task->ata_task.device_control_reg_update)) {
- if (task->ata_task.dma_xfer) {
+ if (task->ata_task.use_ncq &&
+ dev->sata_dev.class != ATA_DEV_ATAPI) {
+ ATAP = 0x07; /* FPDMA */
+ pm8001_dbg(pm8001_ha, IO, "FPDMA\n");
+ } else if (task->ata_task.dma_xfer) {
ATAP = 0x06; /* DMA */
pm8001_dbg(pm8001_ha, IO, "DMA\n");
} else {
ATAP = 0x05; /* PIO*/
pm8001_dbg(pm8001_ha, IO, "PIO\n");
}
- if (task->ata_task.use_ncq &&
- dev->sata_dev.class != ATA_DEV_ATAPI) {
- ATAP = 0x07; /* FPDMA */
- pm8001_dbg(pm8001_ha, IO, "FPDMA\n");
- }
}
if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
@@ -4425,6 +4430,9 @@
SAS_ADDR_SIZE);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
+ if (rc)
+ pm8001_tag_free(pm8001_ha, tag);
+
return rc;
}
@@ -4575,7 +4583,7 @@
memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
if (pm8001_ha->chip_id != chip_8001)
- sspTMCmd.ds_ads_m = 0x08;
+ sspTMCmd.ds_ads_m = cpu_to_le32(0x08);
circularQ = &pm8001_ha->inbnd_q_tbl[0];
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd,
sizeof(sspTMCmd), 0);
@@ -4837,6 +4845,11 @@
ccb->ccb_tag = tag;
rc = pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info,
tag);
+ if (rc) {
+ kfree(fw_control_context);
+ pm8001_tag_free(pm8001_ha, tag);
+ }
+
return rc;
}
@@ -4941,6 +4954,9 @@
payload.nds = cpu_to_le32(state);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
+ if (rc)
+ pm8001_tag_free(pm8001_ha, tag);
+
return rc;
}
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 75ac4d8..ba58525 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -831,10 +831,10 @@
res = PM8001_CHIP_DISP->task_abort(pm8001_ha,
pm8001_dev, flag, task_tag, ccb_tag);
-
if (res) {
del_timer(&task->slow_task->timer);
pm8001_dbg(pm8001_ha, FAIL, "Executing internal task failed\n");
+ pm8001_tag_free(pm8001_ha, ccb_tag);
goto ex_err;
}
wait_for_completion(&task->slow_task->completion);
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 2a3ce46..0305c89 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -66,18 +66,16 @@
}
static void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset,
- const void *destination,
+ __le32 *destination,
u32 dw_count, u32 bus_base_number)
{
u32 index, value, offset;
- u32 *destination1;
- destination1 = (u32 *)destination;
- for (index = 0; index < dw_count; index += 4, destination1++) {
+ for (index = 0; index < dw_count; index += 4, destination++) {
offset = (soffset + index);
if (offset < (64 * 1024)) {
value = pm8001_cr32(pm8001_ha, bus_base_number, offset);
- *destination1 = cpu_to_le32(value);
+ *destination = cpu_to_le32(value);
}
}
return;
@@ -767,6 +765,10 @@
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01;
+ /* Enable higher IQs and OQs, 32 to 63, bit 16 */
+ if (pm8001_ha->max_q_num > 32)
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
+ 1 << 16;
/* Disable end to end CRC checking */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
@@ -1026,6 +1028,13 @@
if (0x0000 != gst_len_mpistate)
return -EBUSY;
+ /*
+ * As per controller datasheet, after successful MPI
+ * initialization minimum 500ms delay is required before
+ * issuing commands.
+ */
+ msleep(500);
+
return 0;
}
@@ -1199,9 +1208,11 @@
else
page_code = THERMAL_PAGE_CODE_8H;
- payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) |
- (THERMAL_ENABLE << 8) | page_code;
- payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
+ payload.cfg_pg[0] =
+ cpu_to_le32((THERMAL_LOG_ENABLE << 9) |
+ (THERMAL_ENABLE << 8) | page_code);
+ payload.cfg_pg[1] =
+ cpu_to_le32((LTEMPHIL << 24) | (RTEMPHIL << 8));
pm8001_dbg(pm8001_ha, DEV,
"Setting up thermal config. cfg_pg 0 0x%x cfg_pg 1 0x%x\n",
@@ -1241,43 +1252,41 @@
circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = cpu_to_le32(tag);
- SASConfigPage.pageCode = SAS_PROTOCOL_TIMER_CONFIG_PAGE;
- SASConfigPage.MST_MSI = 3 << 15;
- SASConfigPage.STP_SSP_MCT_TMO = (STP_MCT_TMO << 16) | SSP_MCT_TMO;
- SASConfigPage.STP_FRM_TMO = (SAS_MAX_OPEN_TIME << 24) |
- (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER;
- SASConfigPage.STP_IDLE_TMO = STP_IDLE_TIME;
+ SASConfigPage.pageCode = cpu_to_le32(SAS_PROTOCOL_TIMER_CONFIG_PAGE);
+ SASConfigPage.MST_MSI = cpu_to_le32(3 << 15);
+ SASConfigPage.STP_SSP_MCT_TMO =
+ cpu_to_le32((STP_MCT_TMO << 16) | SSP_MCT_TMO);
+ SASConfigPage.STP_FRM_TMO =
+ cpu_to_le32((SAS_MAX_OPEN_TIME << 24) |
+ (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER);
+ SASConfigPage.STP_IDLE_TMO = cpu_to_le32(STP_IDLE_TIME);
- if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF)
- SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF;
-
-
- SASConfigPage.OPNRJT_RTRY_INTVL = (SAS_MFD << 16) |
- SAS_OPNRJT_RTRY_INTVL;
- SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = (SAS_DOPNRJT_RTRY_TMO << 16)
- | SAS_COPNRJT_RTRY_TMO;
- SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = (SAS_DOPNRJT_RTRY_THR << 16)
- | SAS_COPNRJT_RTRY_THR;
- SASConfigPage.MAX_AIP = SAS_MAX_AIP;
+ SASConfigPage.OPNRJT_RTRY_INTVL =
+ cpu_to_le32((SAS_MFD << 16) | SAS_OPNRJT_RTRY_INTVL);
+ SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO =
+ cpu_to_le32((SAS_DOPNRJT_RTRY_TMO << 16) | SAS_COPNRJT_RTRY_TMO);
+ SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR =
+ cpu_to_le32((SAS_DOPNRJT_RTRY_THR << 16) | SAS_COPNRJT_RTRY_THR);
+ SASConfigPage.MAX_AIP = cpu_to_le32(SAS_MAX_AIP);
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.pageCode 0x%08x\n",
- SASConfigPage.pageCode);
+ le32_to_cpu(SASConfigPage.pageCode));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MST_MSI 0x%08x\n",
- SASConfigPage.MST_MSI);
+ le32_to_cpu(SASConfigPage.MST_MSI));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_SSP_MCT_TMO 0x%08x\n",
- SASConfigPage.STP_SSP_MCT_TMO);
+ le32_to_cpu(SASConfigPage.STP_SSP_MCT_TMO));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_FRM_TMO 0x%08x\n",
- SASConfigPage.STP_FRM_TMO);
+ le32_to_cpu(SASConfigPage.STP_FRM_TMO));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_IDLE_TMO 0x%08x\n",
- SASConfigPage.STP_IDLE_TMO);
+ le32_to_cpu(SASConfigPage.STP_IDLE_TMO));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.OPNRJT_RTRY_INTVL 0x%08x\n",
- SASConfigPage.OPNRJT_RTRY_INTVL);
+ le32_to_cpu(SASConfigPage.OPNRJT_RTRY_INTVL));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO 0x%08x\n",
- SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO);
+ le32_to_cpu(SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR 0x%08x\n",
- SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR);
+ le32_to_cpu(SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MAX_AIP 0x%08x\n",
- SASConfigPage.MAX_AIP);
+ le32_to_cpu(SASConfigPage.MAX_AIP));
memcpy(&payload.cfg_pg, &SASConfigPage,
sizeof(SASProtocolTimerConfig_t));
@@ -1403,12 +1412,13 @@
/* Currently only one key is used. New KEK index is 1.
* Current KEK index is 1. Store KEK to NVRAM is 1.
*/
- payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) |
- KEK_MGMT_SUBOP_KEYCARDUPDATE);
+ payload.new_curidx_ksop =
+ cpu_to_le32(((1 << 24) | (1 << 16) | (1 << 8) |
+ KEK_MGMT_SUBOP_KEYCARDUPDATE));
pm8001_dbg(pm8001_ha, DEV,
"Saving Encryption info to flash. payload 0x%x\n",
- payload.new_curidx_ksop);
+ le32_to_cpu(payload.new_curidx_ksop));
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
@@ -1683,10 +1693,11 @@
pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
- u32 mask;
- mask = (u32)(1 << vec);
-
- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
+ if (vec < 32)
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec);
+ else
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U,
+ 1U << (vec - 32));
return;
#endif
pm80xx_chip_intx_interrupt_enable(pm8001_ha);
@@ -1702,12 +1713,15 @@
pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
- u32 mask;
- if (vec == 0xFF)
- mask = 0xFFFFFFFF;
+ if (vec == 0xFF) {
+ /* disable all vectors 0-31, 32-63 */
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF);
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF);
+ } else if (vec < 32)
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec);
else
- mask = (u32)(1 << vec);
- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U,
+ 1U << (vec - 32));
return;
#endif
pm80xx_chip_intx_interrupt_disable(pm8001_ha);
@@ -1749,6 +1763,7 @@
ccb->device = pm8001_ha_dev;
ccb->ccb_tag = ccb_tag;
ccb->task = task;
+ ccb->n_elem = 0;
circularQ = &pm8001_ha->inbnd_q_tbl[0];
@@ -1830,7 +1845,7 @@
sata_cmd.tag = cpu_to_le32(ccb_tag);
sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
- sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9));
+ sata_cmd.ncqtag_atap_dir_m_dad = cpu_to_le32(((0x1 << 7) | (0x5 << 9)));
memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
@@ -2464,7 +2479,8 @@
len = sizeof(struct pio_setup_fis);
pm8001_dbg(pm8001_ha, IO,
"PIO read len = %d\n", len);
- } else if (t->ata_task.use_ncq) {
+ } else if (t->ata_task.use_ncq &&
+ t->data_dir != DMA_NONE) {
len = sizeof(struct set_dev_bits_fis);
pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n",
len);
@@ -4307,13 +4323,15 @@
struct ssp_ini_io_start_req ssp_cmd;
u32 tag = ccb->ccb_tag;
int ret;
- u64 phys_addr, start_addr, end_addr;
+ u64 phys_addr, end_addr;
u32 end_addr_high, end_addr_low;
struct inbound_queue_table *circularQ;
u32 q_index, cpu_id;
u32 opc = OPC_INB_SSPINIIOSTART;
+
memset(&ssp_cmd, 0, sizeof(ssp_cmd));
memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
+
/* data address domain added for spcv; set to 0 by host,
* used internally by controller
* 0 for SAS 1.1 and SAS 2.0 compatible TLR
@@ -4324,7 +4342,7 @@
ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
ssp_cmd.tag = cpu_to_le32(tag);
if (task->ssp_task.enable_first_burst)
- ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
+ ssp_cmd.ssp_iu.efb_prio_attr = 0x80;
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
@@ -4356,21 +4374,24 @@
ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
} else if (task->num_scatter == 1) {
u64 dma_addr = sg_dma_address(task->scatter);
+
ssp_cmd.enc_addr_low =
cpu_to_le32(lower_32_bits(dma_addr));
ssp_cmd.enc_addr_high =
cpu_to_le32(upper_32_bits(dma_addr));
ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.enc_esgl = 0;
+
/* Check 4G Boundary */
- start_addr = cpu_to_le64(dma_addr);
- end_addr = (start_addr + ssp_cmd.enc_len) - 1;
- end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
- end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
- if (end_addr_high != ssp_cmd.enc_addr_high) {
+ end_addr = dma_addr + le32_to_cpu(ssp_cmd.enc_len) - 1;
+ end_addr_low = lower_32_bits(end_addr);
+ end_addr_high = upper_32_bits(end_addr);
+
+ if (end_addr_high != le32_to_cpu(ssp_cmd.enc_addr_high)) {
pm8001_dbg(pm8001_ha, FAIL,
"The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
- start_addr, ssp_cmd.enc_len,
+ dma_addr,
+ le32_to_cpu(ssp_cmd.enc_len),
end_addr_high, end_addr_low);
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
@@ -4379,7 +4400,7 @@
cpu_to_le32(lower_32_bits(phys_addr));
ssp_cmd.enc_addr_high =
cpu_to_le32(upper_32_bits(phys_addr));
- ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
+ ssp_cmd.enc_esgl = cpu_to_le32(1U<<31);
}
} else if (task->num_scatter == 0) {
ssp_cmd.enc_addr_low = 0;
@@ -4387,8 +4408,10 @@
ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.enc_esgl = 0;
}
+
/* XTS mode. All other fields are 0 */
- ssp_cmd.key_cmode = 0x6 << 4;
+ ssp_cmd.key_cmode = cpu_to_le32(0x6 << 4);
+
/* set tweak values. Should be the start lba */
ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cmd->cmnd[2] << 24) |
(task->ssp_task.cmd->cmnd[3] << 16) |
@@ -4410,20 +4433,22 @@
ssp_cmd.esgl = cpu_to_le32(1<<31);
} else if (task->num_scatter == 1) {
u64 dma_addr = sg_dma_address(task->scatter);
+
ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr));
ssp_cmd.addr_high =
cpu_to_le32(upper_32_bits(dma_addr));
ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.esgl = 0;
+
/* Check 4G Boundary */
- start_addr = cpu_to_le64(dma_addr);
- end_addr = (start_addr + ssp_cmd.len) - 1;
- end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
- end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
- if (end_addr_high != ssp_cmd.addr_high) {
+ end_addr = dma_addr + le32_to_cpu(ssp_cmd.len) - 1;
+ end_addr_low = lower_32_bits(end_addr);
+ end_addr_high = upper_32_bits(end_addr);
+ if (end_addr_high != le32_to_cpu(ssp_cmd.addr_high)) {
pm8001_dbg(pm8001_ha, FAIL,
"The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
- start_addr, ssp_cmd.len,
+ dma_addr,
+ le32_to_cpu(ssp_cmd.len),
end_addr_high, end_addr_low);
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
@@ -4457,7 +4482,7 @@
u32 q_index, cpu_id;
struct sata_start_req sata_cmd;
u32 hdr_tag, ncg_tag = 0;
- u64 phys_addr, start_addr, end_addr;
+ u64 phys_addr, end_addr;
u32 end_addr_high, end_addr_low;
u32 ATAP = 0x0;
u32 dir;
@@ -4469,22 +4494,21 @@
q_index = (u32) (cpu_id) % (pm8001_ha->max_q_num);
circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
- if (task->data_dir == DMA_NONE) {
+ if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) {
ATAP = 0x04; /* no data*/
pm8001_dbg(pm8001_ha, IO, "no data\n");
} else if (likely(!task->ata_task.device_control_reg_update)) {
- if (task->ata_task.dma_xfer) {
+ if (task->ata_task.use_ncq &&
+ dev->sata_dev.class != ATA_DEV_ATAPI) {
+ ATAP = 0x07; /* FPDMA */
+ pm8001_dbg(pm8001_ha, IO, "FPDMA\n");
+ } else if (task->ata_task.dma_xfer) {
ATAP = 0x06; /* DMA */
pm8001_dbg(pm8001_ha, IO, "DMA\n");
} else {
ATAP = 0x05; /* PIO*/
pm8001_dbg(pm8001_ha, IO, "PIO\n");
}
- if (task->ata_task.use_ncq &&
- dev->sata_dev.class != ATA_DEV_ATAPI) {
- ATAP = 0x07; /* FPDMA */
- pm8001_dbg(pm8001_ha, IO, "FPDMA\n");
- }
}
if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
@@ -4518,32 +4542,38 @@
pm8001_chip_make_sg(task->scatter,
ccb->n_elem, ccb->buf_prd);
phys_addr = ccb->ccb_dma_handle;
- sata_cmd.enc_addr_low = lower_32_bits(phys_addr);
- sata_cmd.enc_addr_high = upper_32_bits(phys_addr);
+ sata_cmd.enc_addr_low =
+ cpu_to_le32(lower_32_bits(phys_addr));
+ sata_cmd.enc_addr_high =
+ cpu_to_le32(upper_32_bits(phys_addr));
sata_cmd.enc_esgl = cpu_to_le32(1 << 31);
} else if (task->num_scatter == 1) {
u64 dma_addr = sg_dma_address(task->scatter);
- sata_cmd.enc_addr_low = lower_32_bits(dma_addr);
- sata_cmd.enc_addr_high = upper_32_bits(dma_addr);
+
+ sata_cmd.enc_addr_low =
+ cpu_to_le32(lower_32_bits(dma_addr));
+ sata_cmd.enc_addr_high =
+ cpu_to_le32(upper_32_bits(dma_addr));
sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
sata_cmd.enc_esgl = 0;
+
/* Check 4G Boundary */
- start_addr = cpu_to_le64(dma_addr);
- end_addr = (start_addr + sata_cmd.enc_len) - 1;
- end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
- end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
- if (end_addr_high != sata_cmd.enc_addr_high) {
+ end_addr = dma_addr + le32_to_cpu(sata_cmd.enc_len) - 1;
+ end_addr_low = lower_32_bits(end_addr);
+ end_addr_high = upper_32_bits(end_addr);
+ if (end_addr_high != le32_to_cpu(sata_cmd.enc_addr_high)) {
pm8001_dbg(pm8001_ha, FAIL,
"The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
- start_addr, sata_cmd.enc_len,
+ dma_addr,
+ le32_to_cpu(sata_cmd.enc_len),
end_addr_high, end_addr_low);
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
phys_addr = ccb->ccb_dma_handle;
sata_cmd.enc_addr_low =
- lower_32_bits(phys_addr);
+ cpu_to_le32(lower_32_bits(phys_addr));
sata_cmd.enc_addr_high =
- upper_32_bits(phys_addr);
+ cpu_to_le32(upper_32_bits(phys_addr));
sata_cmd.enc_esgl =
cpu_to_le32(1 << 31);
}
@@ -4554,7 +4584,8 @@
sata_cmd.enc_esgl = 0;
}
/* XTS mode. All other fields are 0 */
- sata_cmd.key_index_mode = 0x6 << 4;
+ sata_cmd.key_index_mode = cpu_to_le32(0x6 << 4);
+
/* set tweak values. Should be the start lba */
sata_cmd.twk_val0 =
cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) |
@@ -4580,31 +4611,31 @@
phys_addr = ccb->ccb_dma_handle;
sata_cmd.addr_low = lower_32_bits(phys_addr);
sata_cmd.addr_high = upper_32_bits(phys_addr);
- sata_cmd.esgl = cpu_to_le32(1 << 31);
+ sata_cmd.esgl = cpu_to_le32(1U << 31);
} else if (task->num_scatter == 1) {
u64 dma_addr = sg_dma_address(task->scatter);
+
sata_cmd.addr_low = lower_32_bits(dma_addr);
sata_cmd.addr_high = upper_32_bits(dma_addr);
sata_cmd.len = cpu_to_le32(task->total_xfer_len);
sata_cmd.esgl = 0;
+
/* Check 4G Boundary */
- start_addr = cpu_to_le64(dma_addr);
- end_addr = (start_addr + sata_cmd.len) - 1;
- end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
- end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+ end_addr = dma_addr + le32_to_cpu(sata_cmd.len) - 1;
+ end_addr_low = lower_32_bits(end_addr);
+ end_addr_high = upper_32_bits(end_addr);
if (end_addr_high != sata_cmd.addr_high) {
pm8001_dbg(pm8001_ha, FAIL,
"The sg list address start_addr=0x%016llx data_len=0x%xend_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
- start_addr, sata_cmd.len,
+ dma_addr,
+ le32_to_cpu(sata_cmd.len),
end_addr_high, end_addr_low);
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
phys_addr = ccb->ccb_dma_handle;
- sata_cmd.addr_low =
- lower_32_bits(phys_addr);
- sata_cmd.addr_high =
- upper_32_bits(phys_addr);
- sata_cmd.esgl = cpu_to_le32(1 << 31);
+ sata_cmd.addr_low = lower_32_bits(phys_addr);
+ sata_cmd.addr_high = upper_32_bits(phys_addr);
+ sata_cmd.esgl = cpu_to_le32(1U << 31);
}
} else if (task->num_scatter == 0) {
sata_cmd.addr_low = 0;
@@ -4612,27 +4643,28 @@
sata_cmd.len = cpu_to_le32(task->total_xfer_len);
sata_cmd.esgl = 0;
}
+
/* scsi cdb */
sata_cmd.atapi_scsi_cdb[0] =
cpu_to_le32(((task->ata_task.atapi_packet[0]) |
- (task->ata_task.atapi_packet[1] << 8) |
- (task->ata_task.atapi_packet[2] << 16) |
- (task->ata_task.atapi_packet[3] << 24)));
+ (task->ata_task.atapi_packet[1] << 8) |
+ (task->ata_task.atapi_packet[2] << 16) |
+ (task->ata_task.atapi_packet[3] << 24)));
sata_cmd.atapi_scsi_cdb[1] =
cpu_to_le32(((task->ata_task.atapi_packet[4]) |
- (task->ata_task.atapi_packet[5] << 8) |
- (task->ata_task.atapi_packet[6] << 16) |
- (task->ata_task.atapi_packet[7] << 24)));
+ (task->ata_task.atapi_packet[5] << 8) |
+ (task->ata_task.atapi_packet[6] << 16) |
+ (task->ata_task.atapi_packet[7] << 24)));
sata_cmd.atapi_scsi_cdb[2] =
cpu_to_le32(((task->ata_task.atapi_packet[8]) |
- (task->ata_task.atapi_packet[9] << 8) |
- (task->ata_task.atapi_packet[10] << 16) |
- (task->ata_task.atapi_packet[11] << 24)));
+ (task->ata_task.atapi_packet[9] << 8) |
+ (task->ata_task.atapi_packet[10] << 16) |
+ (task->ata_task.atapi_packet[11] << 24)));
sata_cmd.atapi_scsi_cdb[3] =
cpu_to_le32(((task->ata_task.atapi_packet[12]) |
- (task->ata_task.atapi_packet[13] << 8) |
- (task->ata_task.atapi_packet[14] << 16) |
- (task->ata_task.atapi_packet[15] << 24)));
+ (task->ata_task.atapi_packet[13] << 8) |
+ (task->ata_task.atapi_packet[14] << 16) |
+ (task->ata_task.atapi_packet[15] << 24)));
}
/* Check for read log for failed drive and return */
@@ -4830,8 +4862,13 @@
payload.tag = cpu_to_le32(tag);
payload.phyop_phyid =
cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF));
- return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
+ if (rc)
+ pm8001_tag_free(pm8001_ha, tag);
+
+ return rc;
}
static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index cbe5fab..ce10d68 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4528,7 +4528,7 @@
return 0;
out_unwind:
- while (--i > 0)
+ while (--i >= 0)
free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]);
pci_free_irq_vectors(pdev);
return rc;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index e64457f..f48ef47 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1917,6 +1917,27 @@
fc_vport_setlink(vn_port);
}
+ /* Set symbolic node name */
+ if (base_qedf->pdev->device == QL45xxx)
+ snprintf(fc_host_symbolic_name(vn_port->host), 256,
+ "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
+
+ if (base_qedf->pdev->device == QL41xxx)
+ snprintf(fc_host_symbolic_name(vn_port->host), 256,
+ "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
+
+ /* Set supported speed */
+ fc_host_supported_speeds(vn_port->host) = n_port->link_supported_speeds;
+
+ /* Set speed */
+ vn_port->link_speed = n_port->link_speed;
+
+ /* Set port type */
+ fc_host_port_type(vn_port->host) = FC_PORTTYPE_NPIV;
+
+ /* Set maxframe size */
+ fc_host_maxframe_size(vn_port->host) = n_port->mfs;
+
QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n",
vn_port);
@@ -3671,11 +3692,6 @@
err1:
scsi_host_put(lport->host);
err0:
- if (qedf) {
- QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
-
- clear_bit(QEDF_PROBING, &qedf->flags);
- }
return rc;
}
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index f51723e..3bcadb3 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -387,6 +387,7 @@
struct qedi_ctx *qedi = iscsi_host_priv(shost);
struct qedi_endpoint *qedi_ep;
struct iscsi_endpoint *ep;
+ int rc = 0;
ep = iscsi_lookup_endpoint(transport_fd);
if (!ep)
@@ -394,11 +395,16 @@
qedi_ep = ep->dd_data;
if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) ||
- (qedi_ep->state == EP_STATE_TCP_RST_RCVD))
- return -EINVAL;
+ (qedi_ep->state == EP_STATE_TCP_RST_RCVD)) {
+ rc = -EINVAL;
+ goto put_ep;
+ }
- if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
- return -EINVAL;
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
+ rc = -EINVAL;
+ goto put_ep;
+ }
+
qedi_ep->conn = qedi_conn;
qedi_conn->ep = qedi_ep;
@@ -408,13 +414,18 @@
qedi_conn->cmd_cleanup_req = 0;
qedi_conn->cmd_cleanup_cmpl = 0;
- if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn))
- return -EINVAL;
+ if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) {
+ rc = -EINVAL;
+ goto put_ep;
+ }
+
spin_lock_init(&qedi_conn->tmf_work_lock);
INIT_LIST_HEAD(&qedi_conn->tmf_work_list);
init_waitqueue_head(&qedi_conn->wait_queue);
- return 0;
+put_ep:
+ iscsi_put_endpoint(ep);
+ return rc;
}
static int qedi_iscsi_update_conn(struct qedi_ctx *qedi,
@@ -817,6 +828,37 @@
return qedi_iscsi_send_ioreq(task);
}
+static void qedi_offload_work(struct work_struct *work)
+{
+ struct qedi_endpoint *qedi_ep =
+ container_of(work, struct qedi_endpoint, offload_work);
+ struct qedi_ctx *qedi;
+ int wait_delay = 5 * HZ;
+ int ret;
+
+ qedi = qedi_ep->qedi;
+
+ ret = qedi_iscsi_offload_conn(qedi_ep);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
+ qedi_ep->iscsi_cid, qedi_ep, ret);
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ return;
+ }
+
+ ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
+ (qedi_ep->state ==
+ EP_STATE_OFLDCONN_COMPL),
+ wait_delay);
+ if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) {
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
+ qedi_ep->iscsi_cid, qedi_ep);
+ }
+}
+
static struct iscsi_endpoint *
qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
int non_blocking)
@@ -865,6 +907,7 @@
}
qedi_ep = ep->dd_data;
memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
+ INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
qedi_ep->state = EP_STATE_IDLE;
qedi_ep->iscsi_cid = (u32)-1;
qedi_ep->qedi = qedi;
@@ -1015,12 +1058,11 @@
qedi_ep = ep->dd_data;
qedi = qedi_ep->qedi;
+ flush_work(&qedi_ep->offload_work);
+
if (qedi_ep->state == EP_STATE_OFLDCONN_START)
goto ep_exit_recover;
- if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
- flush_work(&qedi_ep->offload_work);
-
if (qedi_ep->conn) {
qedi_conn = qedi_ep->conn;
conn = qedi_conn->cls_conn->dd_data;
@@ -1185,37 +1227,6 @@
return rc;
}
-static void qedi_offload_work(struct work_struct *work)
-{
- struct qedi_endpoint *qedi_ep =
- container_of(work, struct qedi_endpoint, offload_work);
- struct qedi_ctx *qedi;
- int wait_delay = 5 * HZ;
- int ret;
-
- qedi = qedi_ep->qedi;
-
- ret = qedi_iscsi_offload_conn(qedi_ep);
- if (ret) {
- QEDI_ERR(&qedi->dbg_ctx,
- "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
- qedi_ep->iscsi_cid, qedi_ep, ret);
- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
- return;
- }
-
- ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
- (qedi_ep->state ==
- EP_STATE_OFLDCONN_COMPL),
- wait_delay);
- if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
- QEDI_ERR(&qedi->dbg_ctx,
- "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
- qedi_ep->iscsi_cid, qedi_ep);
- }
-}
-
static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
{
struct qedi_ctx *qedi;
@@ -1331,7 +1342,6 @@
qedi_ep->dst_addr, qedi_ep->dst_port);
}
- INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
queue_work(qedi->offload_thread, &qedi_ep->offload_work);
ret = 0;
@@ -1428,6 +1438,7 @@
.destroy_session = qedi_session_destroy,
.create_conn = qedi_conn_create,
.bind_conn = qedi_conn_bind,
+ .unbind_conn = iscsi_conn_unbind,
.start_conn = qedi_conn_start,
.stop_conn = iscsi_conn_stop,
.destroy_conn = qedi_conn_destroy,
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index e40a372..61b9dc5 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -555,7 +555,7 @@
if (!capable(CAP_SYS_ADMIN))
return -EINVAL;
- if (IS_NOCACHE_VPD_TYPE(ha))
+ if (!IS_NOCACHE_VPD_TYPE(ha))
goto skip;
faddr = ha->flt_region_vpd << 2;
@@ -739,7 +739,7 @@
ql_log(ql_log_info, vha, 0x706f,
"Issuing MPI reset.\n");
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ if (IS_QLA83XX(ha)) {
uint32_t idc_control;
qla83xx_idc_lock(vha, 0);
@@ -1050,9 +1050,6 @@
continue;
if (iter->type == 3 && !(IS_CNA_CAPABLE(ha)))
continue;
- if (iter->type == 0x27 &&
- (!IS_QLA27XX(ha) || !IS_QLA28XX(ha)))
- continue;
sysfs_remove_bin_file(&host->shost_gendev.kobj,
iter->attr);
@@ -3265,11 +3262,34 @@
.bsg_timeout = qla24xx_bsg_timeout,
};
+static uint
+qla2x00_get_host_supported_speeds(scsi_qla_host_t *vha, uint speeds)
+{
+ uint supported_speeds = FC_PORTSPEED_UNKNOWN;
+
+ if (speeds & FDMI_PORT_SPEED_64GB)
+ supported_speeds |= FC_PORTSPEED_64GBIT;
+ if (speeds & FDMI_PORT_SPEED_32GB)
+ supported_speeds |= FC_PORTSPEED_32GBIT;
+ if (speeds & FDMI_PORT_SPEED_16GB)
+ supported_speeds |= FC_PORTSPEED_16GBIT;
+ if (speeds & FDMI_PORT_SPEED_8GB)
+ supported_speeds |= FC_PORTSPEED_8GBIT;
+ if (speeds & FDMI_PORT_SPEED_4GB)
+ supported_speeds |= FC_PORTSPEED_4GBIT;
+ if (speeds & FDMI_PORT_SPEED_2GB)
+ supported_speeds |= FC_PORTSPEED_2GBIT;
+ if (speeds & FDMI_PORT_SPEED_1GB)
+ supported_speeds |= FC_PORTSPEED_1GBIT;
+
+ return supported_speeds;
+}
+
void
qla2x00_init_host_attr(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
- u32 speeds = FC_PORTSPEED_UNKNOWN;
+ u32 speeds = 0, fdmi_speed = 0;
fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
@@ -3279,7 +3299,8 @@
fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
- speeds = qla25xx_fdmi_port_speed_capability(ha);
+ fdmi_speed = qla25xx_fdmi_port_speed_capability(ha);
+ speeds = qla2x00_get_host_supported_speeds(vha, fdmi_speed);
fc_host_supported_speeds(vha->host) = speeds;
}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index e1fd91a..6afce45 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2796,7 +2796,11 @@
#define FDMI_PORT_SPEED_8GB 0x10
#define FDMI_PORT_SPEED_16GB 0x20
#define FDMI_PORT_SPEED_32GB 0x40
-#define FDMI_PORT_SPEED_64GB 0x80
+#define FDMI_PORT_SPEED_20GB 0x80
+#define FDMI_PORT_SPEED_40GB 0x100
+#define FDMI_PORT_SPEED_128GB 0x200
+#define FDMI_PORT_SPEED_64GB 0x400
+#define FDMI_PORT_SPEED_256GB 0x800
#define FDMI_PORT_SPEED_UNKNOWN 0x8000
#define FC_CLASS_2 0x04
@@ -3853,6 +3857,7 @@
/* SRB cache. */
#define SRB_MIN_REQ 128
mempool_t *srb_mempool;
+ u8 port_name[WWN_SIZE];
volatile struct {
uint32_t mbox_int :1;
@@ -4130,8 +4135,8 @@
#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED)
-#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha) || \
- IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_MQUE_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+ IS_QLA28XX(ha))
#define IS_BIDI_CAPABLE(ha) \
(IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
/* Bit 21 of fw_attributes decides the MCTP capabilities */
@@ -5171,4 +5176,8 @@
#include "qla_gbl.h"
#include "qla_dbg.h"
#include "qla_inline.h"
+
+#define IS_SESSION_DELETED(_fcport) (_fcport->disc_state == DSC_DELETE_PEND || \
+ _fcport->disc_state == DSC_DELETED)
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 3bc1850..7e5ee31 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -405,7 +405,8 @@
qla2x00_get_resource_cnts(scsi_qla_host_t *);
extern int
-qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map);
+qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map,
+ u8 *num_entries);
extern int
qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *,
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index e28c4b7..20bbd69 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -676,8 +676,7 @@
return (QLA_SUCCESS);
}
- return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
- FC4_TYPE_FCP_SCSI);
+ return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha), type);
}
static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
@@ -727,7 +726,7 @@
/* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id);
ct_req->req.rff_id.fc4_feature = fc4feature;
- ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
+ ct_req->req.rff_id.fc4_type = fc4type; /* SCSI-FCP or FC-NVMe */
sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
@@ -1595,7 +1594,6 @@
unsigned int callopt)
{
struct qla_hw_data *ha = vha->hw;
- struct init_cb_24xx *icb24 = (void *)ha->init_cb;
struct new_utsname *p_sysid = utsname();
struct ct_fdmi_hba_attr *eiter;
uint16_t alen;
@@ -1757,8 +1755,8 @@
/* MAX CT Payload Length */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
- eiter->a.max_ct_len = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ?
- icb24->frame_payload_size : ha->init_cb->frame_payload_size));
+ eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size >> 2);
+
alen = sizeof(eiter->a.max_ct_len);
alen += FDMI_ATTR_TYPELEN(eiter);
eiter->len = cpu_to_be16(alen);
@@ -1850,7 +1848,6 @@
unsigned int callopt)
{
struct qla_hw_data *ha = vha->hw;
- struct init_cb_24xx *icb24 = (void *)ha->init_cb;
struct new_utsname *p_sysid = utsname();
char *hostname = p_sysid ?
p_sysid->nodename : fc_host_system_hostname(vha->host);
@@ -1902,8 +1899,7 @@
/* Max frame size. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
- eiter->a.max_frame_size = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ?
- icb24->frame_payload_size : ha->init_cb->frame_payload_size));
+ eiter->a.max_frame_size = cpu_to_be32(ha->frame_payload_size);
alen = sizeof(eiter->a.max_frame_size);
alen += FDMI_ATTR_TYPELEN(eiter);
eiter->len = cpu_to_be16(alen);
@@ -3556,7 +3552,7 @@
do_delete) {
if (fcport->loop_id != FC_NO_LOOP_ID) {
if (fcport->flags & FCF_FCP2_DEVICE)
- fcport->logout_on_delete = 0;
+ continue;
ql_dbg(ql_dbg_disc, vha, 0x20f0,
"%s %d %8phC post del sess\n",
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index fdae25e..422ff67 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -570,6 +570,14 @@
struct srb_iocb *lio;
int rval = QLA_FUNCTION_FAILED;
+ if (IS_SESSION_DELETED(fcport)) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "%s: %8phC is being delete - not sending command.\n",
+ __func__, fcport->port_name);
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
+ return rval;
+ }
+
if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
return rval;
@@ -953,6 +961,9 @@
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
}
break;
+ case ISP_CFG_NL:
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
default:
break;
}
@@ -1313,14 +1324,21 @@
struct port_database_24xx *pd;
struct qla_hw_data *ha = vha->hw;
- if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
- fcport->loop_id == FC_NO_LOOP_ID) {
+ if (IS_SESSION_DELETED(fcport)) {
ql_log(ql_log_warn, vha, 0xffff,
- "%s: %8phC - not sending command.\n",
- __func__, fcport->port_name);
+ "%s: %8phC is being delete - not sending command.\n",
+ __func__, fcport->port_name);
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
return rval;
}
+ if (!vha->flags.online || fcport->flags & FCF_ASYNC_SENT) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "%s: %8phC online %d flags %x - not sending command.\n",
+ __func__, fcport->port_name, vha->flags.online, fcport->flags);
+ goto done;
+ }
+
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
@@ -1480,6 +1498,11 @@
u8 login = 0;
int rc;
+ ql_dbg(ql_dbg_disc, vha, 0x307b,
+ "%s %8phC DS %d LS %d lid %d retries=%d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, fcport->loop_id, fcport->login_retry);
+
if (qla_tgt_mode_enabled(vha))
return;
@@ -1537,7 +1560,8 @@
fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
fcport->login_gen, fcport->loop_id, fcport->scan_state);
- if (fcport->scan_state != QLA_FCPORT_FOUND)
+ if (fcport->scan_state != QLA_FCPORT_FOUND ||
+ fcport->disc_state == DSC_DELETE_PEND)
return 0;
if ((fcport->loop_id != FC_NO_LOOP_ID) &&
@@ -1558,7 +1582,7 @@
if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw))
return 0;
- if (fcport->flags & FCF_ASYNC_SENT) {
+ if (fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE)) {
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
return 0;
}
@@ -1710,7 +1734,8 @@
case RSCN_PORT_ADDR:
fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
if (fcport) {
- if (fcport->flags & FCF_FCP2_DEVICE) {
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
ql_dbg(ql_dbg_disc, vha, 0x2115,
"Delaying session delete for FCP2 portid=%06x %8phC ",
fcport->d_id.b24, fcport->port_name);
@@ -1722,7 +1747,8 @@
break;
case RSCN_AREA_ADDR:
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->flags & FCF_FCP2_DEVICE)
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
continue;
if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) {
@@ -1733,7 +1759,8 @@
break;
case RSCN_DOM_ADDR:
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->flags & FCF_FCP2_DEVICE)
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
continue;
if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) {
@@ -1745,7 +1772,8 @@
case RSCN_FAB_ADDR:
default:
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->flags & FCF_FCP2_DEVICE)
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
continue;
fcport->scan_needed = 1;
@@ -2114,12 +2142,7 @@
ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
__func__, __LINE__, ea->fcport->port_name, ea->data[1]);
- ea->fcport->flags &= ~FCF_ASYNC_SENT;
- qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_FAILED);
- if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- else
- qla2x00_mark_device_lost(vha, ea->fcport, 1);
+ qlt_schedule_sess_for_deletion(ea->fcport);
break;
case MBS_LOOP_ID_USED:
/* data[1] = IO PARAM 1 = nport ID */
@@ -3309,6 +3332,14 @@
struct rsp_que *rsp = ha->rsp_q_map[0];
struct qla2xxx_fw_dump *fw_dump;
+ if (ha->fw_dump) {
+ ql_dbg(ql_dbg_init, vha, 0x00bd,
+ "Firmware dump already allocated.\n");
+ return;
+ }
+
+ ha->fw_dumped = 0;
+ ha->fw_dump_cap_flags = 0;
dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
req_q_size = rsp_q_size = 0;
@@ -3319,7 +3350,7 @@
mem_size = (ha->fw_memory_size - 0x11000 + 1) *
sizeof(uint16_t);
} else if (IS_FWI2_CAPABLE(ha)) {
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ if (IS_QLA83XX(ha))
fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
else if (IS_QLA81XX(ha))
fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
@@ -3331,8 +3362,7 @@
mem_size = (ha->fw_memory_size - 0x100000 + 1) *
sizeof(uint32_t);
if (ha->mqenable) {
- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) &&
- !IS_QLA28XX(ha))
+ if (!IS_QLA83XX(ha))
mq_size = sizeof(struct qla2xxx_mq_chain);
/*
* Allocate maximum buffer size for all queues - Q0.
@@ -3893,8 +3923,7 @@
ha->fw_major_version, ha->fw_minor_version,
ha->fw_subminor_version);
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
- IS_QLA28XX(ha)) {
+ if (IS_QLA83XX(ha)) {
ha->flags.fac_supported = 0;
rval = QLA_SUCCESS;
}
@@ -4303,6 +4332,8 @@
BIT_6) != 0;
ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
(ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
+ /* Init_cb will be reused for other command(s). Save a backup copy of port_name */
+ memcpy(ha->port_name, ha->init_cb->port_name, WWN_SIZE);
}
rval = qla2x00_init_firmware(vha, ha->init_cb_size);
@@ -5243,6 +5274,22 @@
return QLA_FUNCTION_FAILED;
}
+static void
+qla_reinitialize_link(scsi_qla_host_t *vha)
+{
+ int rval;
+
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ rval = qla2x00_full_login_lip(vha);
+ if (rval == QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0xd050, "Link reinitialized\n");
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xd051,
+ "Link reinitialization failed (%d)\n", rval);
+ }
+}
+
/*
* qla2x00_configure_local_loop
* Updates Fibre Channel Device Database with local loop devices.
@@ -5294,6 +5341,19 @@
spin_unlock_irqrestore(&vha->work_lock, flags);
if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ u8 loop_map_entries = 0;
+ int rc;
+
+ rc = qla2x00_get_fcal_position_map(vha, NULL,
+ &loop_map_entries);
+ if (rc == QLA_SUCCESS && loop_map_entries > 1) {
+ /*
+ * There are devices that are still not logged
+ * in. Reinitialize to give them a chance.
+ */
+ qla_reinitialize_link(vha);
+ return QLA_FUNCTION_FAILED;
+ }
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}
@@ -5382,6 +5442,13 @@
memcpy(fcport->node_name, new_fcport->node_name,
WWN_SIZE);
fcport->scan_state = QLA_FCPORT_FOUND;
+ if (fcport->login_retry == 0) {
+ fcport->login_retry = vha->hw->login_retry_count;
+ ql_dbg(ql_dbg_disc, vha, 0x2135,
+ "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
+ fcport->port_name, fcport->loop_id,
+ fcport->login_retry);
+ }
found++;
break;
}
@@ -9127,7 +9194,7 @@
qpair->rsp->req = qpair->req;
qpair->rsp->qpair = qpair;
/* init qpair to this cpu. Will adjust at run time. */
- qla_cpu_update(qpair, smp_processor_id());
+ qla_cpu_update(qpair, raw_smp_processor_id());
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4)
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index c532c74..e54cc2a 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2910,6 +2910,7 @@
set_bit(ISP_ABORT_NEEDED,
&vha->dpc_flags);
qla2xxx_wake_dpc(vha);
+ break;
}
fallthrough;
default:
@@ -2919,9 +2920,7 @@
fw_status[0], fw_status[1], fw_status[2]);
fcport->flags &= ~FCF_ASYNC_SENT;
- qla2x00_set_fcport_disc_state(fcport,
- DSC_LOGIN_FAILED);
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ qlt_schedule_sess_for_deletion(fcport);
break;
}
break;
@@ -2933,8 +2932,7 @@
fw_status[0], fw_status[1], fw_status[2]);
sp->fcport->flags &= ~FCF_ASYNC_SENT;
- qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED);
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ qlt_schedule_sess_for_deletion(fcport);
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 5e040b6..7ea73ad 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1202,9 +1202,7 @@
if (!vha->vp_idx) {
if (ha->flags.fawwpn_enabled &&
(ha->current_topology == ISP_CFG_F)) {
- void *wwpn = ha->init_cb->port_name;
-
- memcpy(vha->port_name, wwpn, WWN_SIZE);
+ memcpy(vha->port_name, ha->port_name, WWN_SIZE);
fc_host_port_name(vha->host) =
wwn_to_u64(vha->port_name);
ql_dbg(ql_dbg_init + ql_dbg_verbose,
@@ -2248,6 +2246,7 @@
iocb->u.tmf.data = QLA_FUNCTION_FAILED;
} else if ((le16_to_cpu(sts->scsi_status) &
SS_RESPONSE_INFO_LEN_VALID)) {
+ host_to_fcp_swap(sts->data, sizeof(sts->data));
if (le32_to_cpu(sts->rsp_data_len) < 4) {
ql_log(ql_log_warn, fcport->vha, 0x503b,
"Async-%s error - hdl=%x not enough response(%d).\n",
@@ -4055,16 +4054,12 @@
}
/* Enable MSI-X vector for response queue update for queue 0 */
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- if (ha->msixbase && ha->mqiobase &&
- (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
- ql2xmqsupport))
- ha->mqenable = 1;
- } else
- if (ha->mqiobase &&
- (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
- ql2xmqsupport))
- ha->mqenable = 1;
+ if (IS_MQUE_CAPABLE(ha) &&
+ (ha->msixbase && ha->mqiobase && ha->max_qpairs))
+ ha->mqenable = 1;
+ else
+ ha->mqenable = 0;
+
ql_dbg(ql_dbg_multiq, vha, 0xc005,
"mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 734745f..6ff720d 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -9,6 +9,12 @@
#include <linux/delay.h>
#include <linux/gfp.h>
+#ifdef CONFIG_PPC
+#define IS_PPCARCH true
+#else
+#define IS_PPCARCH false
+#endif
+
static struct mb_cmd_name {
uint16_t cmd;
const char *str;
@@ -227,6 +233,8 @@
ql_dbg(ql_dbg_mbx, vha, 0x1112,
"mbox[%d]<-0x%04x\n", cnt, *iptr);
wrt_reg_word(optr, *iptr);
+ } else {
+ wrt_reg_word(optr, 0);
}
mboxes >>= 1;
@@ -263,6 +271,12 @@
atomic_inc(&ha->num_pend_mbx_stage3);
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x117a,
+ "cmd=%x Timeout.\n", command);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
if (chip_reset != ha->chip_reset) {
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
@@ -273,12 +287,6 @@
rval = QLA_ABORTED;
goto premature_exit;
}
- ql_dbg(ql_dbg_mbx, vha, 0x117a,
- "cmd=%x Timeout.\n", command);
- spin_lock_irqsave(&ha->hardware_lock, flags);
- clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
} else if (ha->flags.purge_mbox ||
chip_reset != ha->chip_reset) {
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -698,6 +706,9 @@
vha->min_supported_speed =
nv->min_supported_speed;
}
+
+ if (IS_PPCARCH)
+ mcp->mb[11] |= BIT_4;
}
if (ha->flags.exlogins_enabled)
@@ -2984,8 +2995,7 @@
ha->orig_fw_iocb_count = mcp->mb[10];
if (ha->flags.npiv_supported)
ha->max_npiv_vports = mcp->mb[11];
- if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
- IS_QLA28XX(ha))
+ if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
ha->fw_max_fcf_count = mcp->mb[12];
}
@@ -3007,7 +3017,8 @@
* Kernel context.
*/
int
-qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
+qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map,
+ u8 *num_entries)
{
int rval;
mbx_cmd_t mc;
@@ -3047,6 +3058,8 @@
if (pos_map)
memcpy(pos_map, pmap, FCAL_MAP_SIZE);
+ if (num_entries)
+ *num_entries = pmap[0];
}
dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
@@ -5546,7 +5559,7 @@
mcp->out_mb = MBX_1|MBX_0;
mcp->in_mb = MBX_2|MBX_1|MBX_0;
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
- mcp->in_mb |= MBX_3;
+ mcp->in_mb |= MBX_4|MBX_3;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 5acee3c..d63ccdf 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -165,6 +165,18 @@
qla2xxx_rel_qpair_sp(sp->qpair, sp);
}
+static void qla_nvme_ls_unmap(struct srb *sp, struct nvmefc_ls_req *fd)
+{
+ if (sp->flags & SRB_DMA_VALID) {
+ struct srb_iocb *nvme = &sp->u.iocb_cmd;
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+
+ dma_unmap_single(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
+ fd->rqstlen, DMA_TO_DEVICE);
+ sp->flags &= ~SRB_DMA_VALID;
+ }
+}
+
static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
{
struct srb *sp = container_of(kref, struct srb, cmd_kref);
@@ -181,6 +193,8 @@
spin_unlock_irqrestore(&priv->cmd_lock, flags);
fd = priv->fd;
+
+ qla_nvme_ls_unmap(sp, fd);
fd->done(fd, priv->comp_status);
out:
qla2x00_rel_sp(sp);
@@ -327,6 +341,8 @@
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
fd->rqstlen, DMA_TO_DEVICE);
+ sp->flags |= SRB_DMA_VALID;
+
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
@@ -334,6 +350,7 @@
wake_up(&sp->nvme_ls_waitq);
sp->priv = NULL;
priv->sp = NULL;
+ qla_nvme_ls_unmap(sp, fd);
qla2x00_rel_sp(sp);
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index e7f73a1..4191561 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3673,8 +3673,7 @@
if (ha->mqiobase)
iounmap(ha->mqiobase);
- if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
- ha->msixbase)
+ if (ha->msixbase)
iounmap(ha->msixbase);
}
}
@@ -5391,6 +5390,11 @@
ea.fcport = fcport;
qla24xx_handle_relogin_event(vha, &ea);
} else if (vha->hw->current_topology ==
+ ISP_CFG_NL &&
+ IS_QLA2XXX_MIDTYPE(vha->hw)) {
+ (void)qla24xx_fcport_handle_login(vha,
+ fcport);
+ } else if (vha->hw->current_topology ==
ISP_CFG_NL) {
fcport->login_retry--;
status =
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 0f92e9a..0fa9c52 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -844,7 +844,7 @@
ha->flt_region_nvram = start;
break;
case FLT_REG_IMG_PRI_27XX:
- if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
ha->flt_region_img_status_pri = start;
break;
case FLT_REG_IMG_SEC_27XX:
@@ -1356,7 +1356,7 @@
flash_data_addr(ha, faddr), le32_to_cpu(*dwptr));
if (ret) {
ql_dbg(ql_dbg_user, vha, 0x7006,
- "Failed slopw write %x (%x)\n", faddr, *dwptr);
+ "Failed slow write %x (%x)\n", faddr, *dwptr);
break;
}
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index ebed14b..ecb30c2 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -3256,6 +3256,7 @@
"RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
vha->flags.online, qla2x00_reset_active(vha),
cmd->reset_count, qpair->chip_reset);
+ res = 0;
goto out_unmap_unlock;
}
@@ -3772,6 +3773,9 @@
spin_lock_irqsave(&cmd->cmd_lock, flags);
if (cmd->aborted) {
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
/*
* It's normal to see 2 calls in this path:
@@ -6813,14 +6817,8 @@
if (ha->flags.msix_enabled) {
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- if (IS_QLA2071(ha)) {
- /* 4 ports Baker: Enable Interrupt Handshake */
- icb->msix_atio = 0;
- icb->firmware_options_2 |= cpu_to_le32(BIT_26);
- } else {
- icb->msix_atio = cpu_to_le16(msix->entry);
- icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
- }
+ icb->msix_atio = cpu_to_le16(msix->entry);
+ icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
ql_dbg(ql_dbg_init, vha, 0xf072,
"Registering ICB vector 0x%x for atio que.\n",
msix->entry);
@@ -7076,8 +7074,7 @@
if (!QLA_TGT_MODE_ENABLED())
return;
- if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
- IS_QLA28XX(ha)) {
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
} else {
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 2c23b69..8d82d2a 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -259,6 +259,7 @@
.start_conn = qla4xxx_conn_start,
.create_conn = qla4xxx_conn_create,
.bind_conn = qla4xxx_conn_bind,
+ .unbind_conn = iscsi_conn_unbind,
.stop_conn = iscsi_conn_stop,
.destroy_conn = qla4xxx_conn_destroy,
.set_param = iscsi_set_param,
@@ -3237,6 +3238,7 @@
conn = cls_conn->dd_data;
qla_conn = conn->dd_data;
qla_conn->qla_ep = ep->dd_data;
+ iscsi_put_endpoint(ep);
return 0;
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 6b00de6..cc20621 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1878,6 +1878,13 @@
arr[14] |= 0x40;
}
+ /*
+ * Since the scsi_debug READ CAPACITY implementation always reports the
+ * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
+ */
+ if (devip->zmodel == BLK_ZONED_HM)
+ arr[12] |= 1 << 4;
+
arr[15] = sdebug_lowest_aligned & 0xff;
if (have_dif_prot) {
@@ -2746,6 +2753,24 @@
}
}
+static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
+ struct sdeb_zone_state *zsp)
+{
+ switch (zsp->z_cond) {
+ case ZC2_IMPLICIT_OPEN:
+ devip->nr_imp_open--;
+ break;
+ case ZC3_EXPLICIT_OPEN:
+ devip->nr_exp_open--;
+ break;
+ default:
+ WARN_ONCE(true, "Invalid zone %llu condition %x\n",
+ zsp->z_start, zsp->z_cond);
+ break;
+ }
+ zsp->z_cond = ZC5_FULL;
+}
+
static void zbc_inc_wp(struct sdebug_dev_info *devip,
unsigned long long lba, unsigned int num)
{
@@ -2758,7 +2783,7 @@
if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
zsp->z_wp += num;
if (zsp->z_wp >= zend)
- zsp->z_cond = ZC5_FULL;
+ zbc_set_zone_full(devip, zsp);
return;
}
@@ -2777,7 +2802,7 @@
n = num;
}
if (zsp->z_wp >= zend)
- zsp->z_cond = ZC5_FULL;
+ zbc_set_zone_full(devip, zsp);
num -= n;
lba += n;
@@ -7061,8 +7086,12 @@
dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
error = device_register(&sdbg_host->dev);
- if (error)
+ if (error) {
+ spin_lock(&sdebug_host_list_lock);
+ list_del(&sdbg_host->host_list);
+ spin_unlock(&sdebug_host_list_lock);
goto clean;
+ }
++sdebug_num_hosts;
return 0;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 42db9c5..6cc4d07 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -815,6 +815,14 @@
}
mutex_lock(&sdev->state_mutex);
+ switch (sdev->sdev_state) {
+ case SDEV_RUNNING:
+ case SDEV_OFFLINE:
+ break;
+ default:
+ mutex_unlock(&sdev->state_mutex);
+ return -EINVAL;
+ }
if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) {
ret = 0;
} else {
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index a5759d0..ef7cd75 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -86,16 +86,10 @@
struct transport_container session_cont;
};
-/* Worker to perform connection failure on unresponsive connections
- * completely in kernel space.
- */
-static void stop_conn_work_fn(struct work_struct *work);
-static DECLARE_WORK(stop_conn_work, stop_conn_work_fn);
-
static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
static struct workqueue_struct *iscsi_eh_timer_workq;
-static struct workqueue_struct *iscsi_destroy_workq;
+static struct workqueue_struct *iscsi_conn_cleanup_workq;
static DEFINE_IDA(iscsi_sess_ida);
/*
@@ -268,9 +262,20 @@
}
EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
+void iscsi_put_endpoint(struct iscsi_endpoint *ep)
+{
+ put_device(&ep->dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_put_endpoint);
+
+/**
+ * iscsi_lookup_endpoint - get ep from handle
+ * @handle: endpoint handle
+ *
+ * Caller must do a iscsi_put_endpoint.
+ */
struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
{
- struct iscsi_endpoint *ep;
struct device *dev;
dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
@@ -278,13 +283,7 @@
if (!dev)
return NULL;
- ep = iscsi_dev_to_endpoint(dev);
- /*
- * we can drop this now because the interface will prevent
- * removals and lookups from racing.
- */
- put_device(dev);
- return ep;
+ return iscsi_dev_to_endpoint(dev);
}
EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
@@ -1598,12 +1597,6 @@
static struct sock *nls;
static DEFINE_MUTEX(rx_queue_mutex);
-/*
- * conn_mutex protects the {start,bind,stop,destroy}_conn from racing
- * against the kernel stop_connection recovery mechanism
- */
-static DEFINE_MUTEX(conn_mutex);
-
static LIST_HEAD(sesslist);
static DEFINE_SPINLOCK(sesslock);
static LIST_HEAD(connlist);
@@ -2225,6 +2218,155 @@
}
EXPORT_SYMBOL_GPL(iscsi_remove_session);
+static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
+{
+ ISCSI_DBG_TRANS_CONN(conn, "Stopping conn.\n");
+
+ switch (flag) {
+ case STOP_CONN_RECOVER:
+ WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
+ break;
+ case STOP_CONN_TERM:
+ WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
+ break;
+ default:
+ iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n",
+ flag);
+ return;
+ }
+
+ conn->transport->stop_conn(conn, flag);
+ ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n");
+}
+
+static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
+{
+ struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
+ struct iscsi_endpoint *ep;
+
+ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
+ WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
+
+ if (!conn->ep || !session->transport->ep_disconnect)
+ return;
+
+ ep = conn->ep;
+ conn->ep = NULL;
+
+ session->transport->unbind_conn(conn, is_active);
+ session->transport->ep_disconnect(ep);
+ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
+}
+
+static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn,
+ struct iscsi_endpoint *ep,
+ bool is_active)
+{
+ /* Check if this was a conn error and the kernel took ownership */
+ spin_lock_irq(&conn->lock);
+ if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ spin_unlock_irq(&conn->lock);
+ iscsi_ep_disconnect(conn, is_active);
+ } else {
+ spin_unlock_irq(&conn->lock);
+ ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
+ mutex_unlock(&conn->ep_mutex);
+
+ flush_work(&conn->cleanup_work);
+ /*
+ * Userspace is now done with the EP so we can release the ref
+ * iscsi_cleanup_conn_work_fn took.
+ */
+ iscsi_put_endpoint(ep);
+ mutex_lock(&conn->ep_mutex);
+ }
+}
+
+static int iscsi_if_stop_conn(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev)
+{
+ int flag = ev->u.stop_conn.flag;
+ struct iscsi_cls_conn *conn;
+
+ conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
+ if (!conn)
+ return -EINVAL;
+
+ ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop.\n");
+ /*
+ * If this is a termination we have to call stop_conn with that flag
+ * so the correct states get set. If we haven't run the work yet try to
+ * avoid the extra run.
+ */
+ if (flag == STOP_CONN_TERM) {
+ cancel_work_sync(&conn->cleanup_work);
+ iscsi_stop_conn(conn, flag);
+ } else {
+ /*
+ * For offload, when iscsid is restarted it won't know about
+ * existing endpoints so it can't do a ep_disconnect. We clean
+ * it up here for userspace.
+ */
+ mutex_lock(&conn->ep_mutex);
+ if (conn->ep)
+ iscsi_if_disconnect_bound_ep(conn, conn->ep, true);
+ mutex_unlock(&conn->ep_mutex);
+
+ /*
+ * Figure out if it was the kernel or userspace initiating this.
+ */
+ spin_lock_irq(&conn->lock);
+ if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ spin_unlock_irq(&conn->lock);
+ iscsi_stop_conn(conn, flag);
+ } else {
+ spin_unlock_irq(&conn->lock);
+ ISCSI_DBG_TRANS_CONN(conn,
+ "flush kernel conn cleanup.\n");
+ flush_work(&conn->cleanup_work);
+ }
+ /*
+ * Only clear for recovery to avoid extra cleanup runs during
+ * termination.
+ */
+ spin_lock_irq(&conn->lock);
+ clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
+ spin_unlock_irq(&conn->lock);
+ }
+ ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n");
+ return 0;
+}
+
+static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
+{
+ struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn,
+ cleanup_work);
+ struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
+
+ mutex_lock(&conn->ep_mutex);
+ /*
+ * Get a ref to the ep, so we don't release its ID until after
+ * userspace is done referencing it in iscsi_if_disconnect_bound_ep.
+ */
+ if (conn->ep)
+ get_device(&conn->ep->dev);
+ iscsi_ep_disconnect(conn, false);
+
+ if (system_state != SYSTEM_RUNNING) {
+ /*
+ * If the user has set up for the session to never timeout
+ * then hang like they wanted. For all other cases fail right
+ * away since userspace is not going to relogin.
+ */
+ if (session->recovery_tmo > 0)
+ session->recovery_tmo = 0;
+ }
+
+ iscsi_stop_conn(conn, STOP_CONN_RECOVER);
+ mutex_unlock(&conn->ep_mutex);
+ ISCSI_DBG_TRANS_CONN(conn, "cleanup done.\n");
+}
+
void iscsi_free_session(struct iscsi_cls_session *session)
{
ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n");
@@ -2263,11 +2405,12 @@
conn->dd_data = &conn[1];
mutex_init(&conn->ep_mutex);
+ spin_lock_init(&conn->lock);
INIT_LIST_HEAD(&conn->conn_list);
- INIT_LIST_HEAD(&conn->conn_list_err);
+ INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn);
conn->transport = transport;
conn->cid = cid;
- conn->state = ISCSI_CONN_DOWN;
+ WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
/* this is released in the dev's release function */
if (!get_device(&session->dev))
@@ -2321,7 +2464,6 @@
spin_lock_irqsave(&connlock, flags);
list_del(&conn->conn_list);
- list_del(&conn->conn_list_err);
spin_unlock_irqrestore(&connlock, flags);
transport_unregister_device(&conn->dev);
@@ -2448,77 +2590,6 @@
}
EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
-/*
- * This can be called without the rx_queue_mutex, if invoked by the kernel
- * stop work. But, in that case, it is guaranteed not to race with
- * iscsi_destroy by conn_mutex.
- */
-static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
-{
- /*
- * It is important that this path doesn't rely on
- * rx_queue_mutex, otherwise, a thread doing allocation on a
- * start_session/start_connection could sleep waiting on a
- * writeback to a failed iscsi device, that cannot be recovered
- * because the lock is held. If we don't hold it here, the
- * kernel stop_conn_work_fn has a chance to stop the broken
- * session and resolve the allocation.
- *
- * Still, the user invoked .stop_conn() needs to be serialized
- * with stop_conn_work_fn by a private mutex. Not pretty, but
- * it works.
- */
- mutex_lock(&conn_mutex);
- switch (flag) {
- case STOP_CONN_RECOVER:
- conn->state = ISCSI_CONN_FAILED;
- break;
- case STOP_CONN_TERM:
- conn->state = ISCSI_CONN_DOWN;
- break;
- default:
- iscsi_cls_conn_printk(KERN_ERR, conn,
- "invalid stop flag %d\n", flag);
- goto unlock;
- }
-
- conn->transport->stop_conn(conn, flag);
-unlock:
- mutex_unlock(&conn_mutex);
-}
-
-static void stop_conn_work_fn(struct work_struct *work)
-{
- struct iscsi_cls_conn *conn, *tmp;
- unsigned long flags;
- LIST_HEAD(recovery_list);
-
- spin_lock_irqsave(&connlock, flags);
- if (list_empty(&connlist_err)) {
- spin_unlock_irqrestore(&connlock, flags);
- return;
- }
- list_splice_init(&connlist_err, &recovery_list);
- spin_unlock_irqrestore(&connlock, flags);
-
- list_for_each_entry_safe(conn, tmp, &recovery_list, conn_list_err) {
- uint32_t sid = iscsi_conn_get_sid(conn);
- struct iscsi_cls_session *session;
-
- session = iscsi_session_lookup(sid);
- if (session) {
- if (system_state != SYSTEM_RUNNING) {
- session->recovery_tmo = 0;
- iscsi_if_stop_conn(conn, STOP_CONN_TERM);
- } else {
- iscsi_if_stop_conn(conn, STOP_CONN_RECOVER);
- }
- }
-
- list_del_init(&conn->conn_list_err);
- }
-}
-
void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
{
struct nlmsghdr *nlh;
@@ -2527,11 +2598,31 @@
struct iscsi_internal *priv;
int len = nlmsg_total_size(sizeof(*ev));
unsigned long flags;
+ int state;
- spin_lock_irqsave(&connlock, flags);
- list_add(&conn->conn_list_err, &connlist_err);
- spin_unlock_irqrestore(&connlock, flags);
- queue_work(system_unbound_wq, &stop_conn_work);
+ spin_lock_irqsave(&conn->lock, flags);
+ /*
+ * Userspace will only do a stop call if we are at least bound. And, we
+ * only need to do the in kernel cleanup if in the UP state so cmds can
+ * be released to upper layers. If in other states just wait for
+ * userspace to avoid races that can leave the cleanup_work queued.
+ */
+ state = READ_ONCE(conn->state);
+ switch (state) {
+ case ISCSI_CONN_BOUND:
+ case ISCSI_CONN_UP:
+ if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP,
+ &conn->flags)) {
+ queue_work(iscsi_conn_cleanup_workq,
+ &conn->cleanup_work);
+ }
+ break;
+ default:
+ ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n",
+ state);
+ break;
+ }
+ spin_unlock_irqrestore(&conn->lock, flags);
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
@@ -2861,26 +2952,17 @@
iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
{
struct iscsi_cls_conn *conn;
- unsigned long flags;
conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid);
if (!conn)
return -EINVAL;
- spin_lock_irqsave(&connlock, flags);
- if (!list_empty(&conn->conn_list_err)) {
- spin_unlock_irqrestore(&connlock, flags);
- return -EAGAIN;
- }
- spin_unlock_irqrestore(&connlock, flags);
-
+ ISCSI_DBG_TRANS_CONN(conn, "Flushing cleanup during destruction\n");
+ flush_work(&conn->cleanup_work);
ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n");
- mutex_lock(&conn_mutex);
if (transport->destroy_conn)
transport->destroy_conn(conn);
- mutex_unlock(&conn_mutex);
-
return 0;
}
@@ -2890,7 +2972,7 @@
char *data = (char*)ev + sizeof(*ev);
struct iscsi_cls_conn *conn;
struct iscsi_cls_session *session;
- int err = 0, value = 0;
+ int err = 0, value = 0, state;
if (ev->u.set_param.len > PAGE_SIZE)
return -EINVAL;
@@ -2907,8 +2989,8 @@
session->recovery_tmo = value;
break;
default:
- if ((conn->state == ISCSI_CONN_BOUND) ||
- (conn->state == ISCSI_CONN_UP)) {
+ state = READ_ONCE(conn->state);
+ if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) {
err = transport->set_param(conn, ev->u.set_param.param,
data, ev->u.set_param.len);
} else {
@@ -2968,15 +3050,22 @@
ep = iscsi_lookup_endpoint(ep_handle);
if (!ep)
return -EINVAL;
+
conn = ep->conn;
- if (conn) {
- mutex_lock(&conn->ep_mutex);
- conn->ep = NULL;
- mutex_unlock(&conn->ep_mutex);
- conn->state = ISCSI_CONN_FAILED;
+ if (!conn) {
+ /*
+ * conn was not even bound yet, so we can't get iscsi conn
+ * failures yet.
+ */
+ transport->ep_disconnect(ep);
+ goto put_ep;
}
- transport->ep_disconnect(ep);
+ mutex_lock(&conn->ep_mutex);
+ iscsi_if_disconnect_bound_ep(conn, ep, false);
+ mutex_unlock(&conn->ep_mutex);
+put_ep:
+ iscsi_put_endpoint(ep);
return 0;
}
@@ -3002,6 +3091,7 @@
ev->r.retcode = transport->ep_poll(ep,
ev->u.ep_poll.timeout_ms);
+ iscsi_put_endpoint(ep);
break;
case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
rc = iscsi_if_ep_disconnect(transport,
@@ -3632,18 +3722,123 @@
return err;
}
+static int iscsi_if_transport_conn(struct iscsi_transport *transport,
+ struct nlmsghdr *nlh)
+{
+ struct iscsi_uevent *ev = nlmsg_data(nlh);
+ struct iscsi_cls_session *session;
+ struct iscsi_cls_conn *conn = NULL;
+ struct iscsi_endpoint *ep;
+ uint32_t pdu_len;
+ int err = 0;
+
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_CREATE_CONN:
+ return iscsi_if_create_conn(transport, ev);
+ case ISCSI_UEVENT_DESTROY_CONN:
+ return iscsi_if_destroy_conn(transport, ev);
+ case ISCSI_UEVENT_STOP_CONN:
+ return iscsi_if_stop_conn(transport, ev);
+ }
+
+ /*
+ * The following cmds need to be run under the ep_mutex so in kernel
+ * conn cleanup (ep_disconnect + unbind and conn) is not done while
+ * these are running. They also must not run if we have just run a conn
+ * cleanup because they would set the state in a way that might allow
+ * IO or send IO themselves.
+ */
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_START_CONN:
+ conn = iscsi_conn_lookup(ev->u.start_conn.sid,
+ ev->u.start_conn.cid);
+ break;
+ case ISCSI_UEVENT_BIND_CONN:
+ conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid);
+ break;
+ case ISCSI_UEVENT_SEND_PDU:
+ conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
+ break;
+ }
+
+ if (!conn)
+ return -EINVAL;
+
+ mutex_lock(&conn->ep_mutex);
+ spin_lock_irq(&conn->lock);
+ if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ spin_unlock_irq(&conn->lock);
+ mutex_unlock(&conn->ep_mutex);
+ ev->r.retcode = -ENOTCONN;
+ return 0;
+ }
+ spin_unlock_irq(&conn->lock);
+
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_BIND_CONN:
+ session = iscsi_session_lookup(ev->u.b_conn.sid);
+ if (!session) {
+ err = -EINVAL;
+ break;
+ }
+
+ ev->r.retcode = transport->bind_conn(session, conn,
+ ev->u.b_conn.transport_eph,
+ ev->u.b_conn.is_leading);
+ if (!ev->r.retcode)
+ WRITE_ONCE(conn->state, ISCSI_CONN_BOUND);
+
+ if (ev->r.retcode || !transport->ep_connect)
+ break;
+
+ ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph);
+ if (ep) {
+ ep->conn = conn;
+ conn->ep = ep;
+ iscsi_put_endpoint(ep);
+ } else {
+ err = -ENOTCONN;
+ iscsi_cls_conn_printk(KERN_ERR, conn,
+ "Could not set ep conn binding\n");
+ }
+ break;
+ case ISCSI_UEVENT_START_CONN:
+ ev->r.retcode = transport->start_conn(conn);
+ if (!ev->r.retcode)
+ WRITE_ONCE(conn->state, ISCSI_CONN_UP);
+
+ break;
+ case ISCSI_UEVENT_SEND_PDU:
+ pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
+
+ if ((ev->u.send_pdu.hdr_size > pdu_len) ||
+ (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) {
+ err = -EINVAL;
+ break;
+ }
+
+ ev->r.retcode = transport->send_pdu(conn,
+ (struct iscsi_hdr *)((char *)ev + sizeof(*ev)),
+ (char *)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
+ ev->u.send_pdu.data_size);
+ break;
+ default:
+ err = -ENOSYS;
+ }
+
+ mutex_unlock(&conn->ep_mutex);
+ return err;
+}
static int
iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
{
int err = 0;
u32 portid;
- u32 pdu_len;
struct iscsi_uevent *ev = nlmsg_data(nlh);
struct iscsi_transport *transport = NULL;
struct iscsi_internal *priv;
struct iscsi_cls_session *session;
- struct iscsi_cls_conn *conn;
struct iscsi_endpoint *ep = NULL;
if (!netlink_capable(skb, CAP_SYS_ADMIN))
@@ -3684,6 +3879,7 @@
ev->u.c_bound_session.initial_cmdsn,
ev->u.c_bound_session.cmds_max,
ev->u.c_bound_session.queue_depth);
+ iscsi_put_endpoint(ep);
break;
case ISCSI_UEVENT_DESTROY_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid);
@@ -3708,7 +3904,7 @@
list_del_init(&session->sess_list);
spin_unlock_irqrestore(&sesslock, flags);
- queue_work(iscsi_destroy_workq, &session->destroy_work);
+ queue_work(system_unbound_wq, &session->destroy_work);
}
break;
case ISCSI_UEVENT_UNBIND_SESSION:
@@ -3719,89 +3915,16 @@
else
err = -EINVAL;
break;
- case ISCSI_UEVENT_CREATE_CONN:
- err = iscsi_if_create_conn(transport, ev);
- break;
- case ISCSI_UEVENT_DESTROY_CONN:
- err = iscsi_if_destroy_conn(transport, ev);
- break;
- case ISCSI_UEVENT_BIND_CONN:
- session = iscsi_session_lookup(ev->u.b_conn.sid);
- conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid);
-
- if (conn && conn->ep)
- iscsi_if_ep_disconnect(transport, conn->ep->id);
-
- if (!session || !conn) {
- err = -EINVAL;
- break;
- }
-
- mutex_lock(&conn_mutex);
- ev->r.retcode = transport->bind_conn(session, conn,
- ev->u.b_conn.transport_eph,
- ev->u.b_conn.is_leading);
- if (!ev->r.retcode)
- conn->state = ISCSI_CONN_BOUND;
- mutex_unlock(&conn_mutex);
-
- if (ev->r.retcode || !transport->ep_connect)
- break;
-
- ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph);
- if (ep) {
- ep->conn = conn;
-
- mutex_lock(&conn->ep_mutex);
- conn->ep = ep;
- mutex_unlock(&conn->ep_mutex);
- } else
- iscsi_cls_conn_printk(KERN_ERR, conn,
- "Could not set ep conn "
- "binding\n");
- break;
case ISCSI_UEVENT_SET_PARAM:
err = iscsi_set_param(transport, ev);
break;
- case ISCSI_UEVENT_START_CONN:
- conn = iscsi_conn_lookup(ev->u.start_conn.sid, ev->u.start_conn.cid);
- if (conn) {
- mutex_lock(&conn_mutex);
- ev->r.retcode = transport->start_conn(conn);
- if (!ev->r.retcode)
- conn->state = ISCSI_CONN_UP;
- mutex_unlock(&conn_mutex);
- }
- else
- err = -EINVAL;
- break;
+ case ISCSI_UEVENT_CREATE_CONN:
+ case ISCSI_UEVENT_DESTROY_CONN:
case ISCSI_UEVENT_STOP_CONN:
- conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
- if (conn)
- iscsi_if_stop_conn(conn, ev->u.stop_conn.flag);
- else
- err = -EINVAL;
- break;
+ case ISCSI_UEVENT_START_CONN:
+ case ISCSI_UEVENT_BIND_CONN:
case ISCSI_UEVENT_SEND_PDU:
- pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
-
- if ((ev->u.send_pdu.hdr_size > pdu_len) ||
- (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) {
- err = -EINVAL;
- break;
- }
-
- conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
- if (conn) {
- mutex_lock(&conn_mutex);
- ev->r.retcode = transport->send_pdu(conn,
- (struct iscsi_hdr*)((char*)ev + sizeof(*ev)),
- (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
- ev->u.send_pdu.data_size);
- mutex_unlock(&conn_mutex);
- }
- else
- err = -EINVAL;
+ err = iscsi_if_transport_conn(transport, nlh);
break;
case ISCSI_UEVENT_GET_STATS:
err = iscsi_if_get_stats(transport, nlh);
@@ -3991,10 +4114,11 @@
{
struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent);
const char *state = "unknown";
+ int conn_state = READ_ONCE(conn->state);
- if (conn->state >= 0 &&
- conn->state < ARRAY_SIZE(connection_state_names))
- state = connection_state_names[conn->state];
+ if (conn_state >= 0 &&
+ conn_state < ARRAY_SIZE(connection_state_names))
+ state = connection_state_names[conn_state];
return sysfs_emit(buf, "%s\n", state);
}
@@ -4649,6 +4773,7 @@
int err;
BUG_ON(!tt);
+ WARN_ON(tt->ep_disconnect && !tt->unbind_conn);
priv = iscsi_if_transport_lookup(tt);
if (priv)
@@ -4803,10 +4928,10 @@
goto release_nls;
}
- iscsi_destroy_workq = alloc_workqueue("%s",
- WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
- 1, "iscsi_destroy");
- if (!iscsi_destroy_workq) {
+ iscsi_conn_cleanup_workq = alloc_workqueue("%s",
+ WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0,
+ "iscsi_conn_cleanup");
+ if (!iscsi_conn_cleanup_workq) {
err = -ENOMEM;
goto destroy_wq;
}
@@ -4836,7 +4961,7 @@
static void __exit iscsi_transport_exit(void)
{
- destroy_workqueue(iscsi_destroy_workq);
+ destroy_workqueue(iscsi_conn_cleanup_workq);
destroy_workqueue(iscsi_eh_timer_workq);
netlink_kernel_release(nls);
bus_unregister(&iscsi_flashnode_bus);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 4a96fb0..c6256fd 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -716,12 +716,17 @@
int error;
error = device_add(&phy->dev);
- if (!error) {
- transport_add_device(&phy->dev);
- transport_configure_device(&phy->dev);
- }
+ if (error)
+ return error;
- return error;
+ error = transport_add_device(&phy->dev);
+ if (error) {
+ device_del(&phy->dev);
+ return error;
+ }
+ transport_configure_device(&phy->dev);
+
+ return 0;
}
EXPORT_SYMBOL(sas_phy_add);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 56e2917..58f6617 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1074,6 +1074,7 @@
struct bio *bio = rq->bio;
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
+ unsigned int nr_bytes = blk_rq_bytes(rq);
blk_status_t ret;
if (sdkp->device->no_write_same)
@@ -1110,7 +1111,7 @@
*/
rq->__data_len = sdp->sector_size;
ret = scsi_alloc_sgtables(cmd);
- rq->__data_len = blk_rq_bytes(rq);
+ rq->__data_len = nr_bytes;
return ret;
}
@@ -3511,7 +3512,6 @@
out_put:
put_disk(gd);
out_free:
- sd_zbc_release_disk(sdkp);
kfree(sdkp);
out:
scsi_autopm_put_device(sdp);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index bfa8d77..e1c086a 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -190,7 +190,7 @@
static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
static Sg_fd *sg_add_sfp(Sg_device * sdp);
static void sg_remove_sfp(struct kref *);
-static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
+static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
static Sg_device *sg_get_dev(int dev);
@@ -444,6 +444,7 @@
Sg_fd *sfp;
Sg_request *srp;
int req_pack_id = -1;
+ bool busy;
sg_io_hdr_t *hp;
struct sg_header *old_hdr;
int retval;
@@ -466,20 +467,16 @@
if (retval)
return retval;
- srp = sg_get_rq_mark(sfp, req_pack_id);
+ srp = sg_get_rq_mark(sfp, req_pack_id, &busy);
if (!srp) { /* now wait on packet to arrive */
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
retval = wait_event_interruptible(sfp->read_wait,
- (atomic_read(&sdp->detaching) ||
- (srp = sg_get_rq_mark(sfp, req_pack_id))));
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
- if (retval)
- /* -ERESTARTSYS as signal hit process */
- return retval;
+ ((srp = sg_get_rq_mark(sfp, req_pack_id, &busy)) ||
+ (!busy && atomic_read(&sdp->detaching))));
+ if (!srp)
+ /* signal or detaching */
+ return retval ? retval : -ENODEV;
}
if (srp->header.interface_id != '\0')
return sg_new_read(sfp, buf, count, srp);
@@ -938,9 +935,7 @@
if (result < 0)
return result;
result = wait_event_interruptible(sfp->read_wait,
- (srp_done(sfp, srp) || atomic_read(&sdp->detaching)));
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
+ srp_done(sfp, srp));
write_lock_irq(&sfp->rq_list_lock);
if (srp->done) {
srp->done = 2;
@@ -2093,19 +2088,28 @@
}
static Sg_request *
-sg_get_rq_mark(Sg_fd * sfp, int pack_id)
+sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy)
{
Sg_request *resp;
unsigned long iflags;
+ *busy = false;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(resp, &sfp->rq_list, entry) {
- /* look for requests that are ready + not SG_IO owned */
- if ((1 == resp->done) && (!resp->sg_io_owned) &&
+ /* look for requests that are not SG_IO owned */
+ if ((!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
- resp->done = 2; /* guard against other readers */
- write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return resp;
+ switch (resp->done) {
+ case 0: /* request active */
+ *busy = true;
+ break;
+ case 1: /* request done; response ready to return */
+ resp->done = 2; /* guard against other readers */
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return resp;
+ case 2: /* response already being returned */
+ break;
+ }
}
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
@@ -2159,6 +2163,15 @@
res = 1;
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+
+ /*
+ * If the device is detaching, wakeup any readers in case we just
+ * removed the last response, which would leave nothing for them to
+ * return other than -ENODEV.
+ */
+ if (unlikely(atomic_read(&sfp->parentdp->detaching)))
+ wake_up_interruptible_all(&sfp->read_wait);
+
return res;
}
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index de73ade..fcff35e 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -4997,10 +4997,10 @@
}
switch (scmd->sc_data_direction) {
- case DMA_TO_DEVICE:
+ case DMA_FROM_DEVICE:
request->data_direction = SOP_READ_FLAG;
break;
- case DMA_FROM_DEVICE:
+ case DMA_TO_DEVICE:
request->data_direction = SOP_WRITE_FLAG;
break;
case DMA_NONE:
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index d4f10c0..a3bce11 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -668,16 +668,17 @@
return 0;
case PASSTHRU_CMD:
if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
- struct st_drvver ver;
+ const struct st_drvver ver = {
+ .major = ST_VER_MAJOR,
+ .minor = ST_VER_MINOR,
+ .oem = ST_OEM,
+ .build = ST_BUILD_VER,
+ .signature[0] = PASSTHRU_SIGNATURE,
+ .console_id = host->max_id - 1,
+ .host_no = hba->host->host_no,
+ };
size_t cp_len = sizeof(ver);
- ver.major = ST_VER_MAJOR;
- ver.minor = ST_VER_MINOR;
- ver.oem = ST_OEM;
- ver.build = ST_BUILD_VER;
- ver.signature[0] = PASSTHRU_SIGNATURE;
- ver.console_id = host->max_id - 1;
- ver.host_no = hba->host->host_no;
cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len);
cmd->result = sizeof(ver) == cp_len ?
DID_OK << 16 | COMMAND_COMPLETE << 8 :
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 0ee0b80..3fa8a0c 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -356,16 +356,21 @@
};
/*
- * SRB status codes and masks; a subset of the codes used here.
+ * SRB status codes and masks. In the 8-bit field, the two high order bits
+ * are flags, while the remaining 6 bits are an integer status code. The
+ * definitions here include only the subset of the integer status codes that
+ * are tested for in this driver.
*/
-
#define SRB_STATUS_AUTOSENSE_VALID 0x80
#define SRB_STATUS_QUEUE_FROZEN 0x40
-#define SRB_STATUS_INVALID_LUN 0x20
-#define SRB_STATUS_SUCCESS 0x01
-#define SRB_STATUS_ABORTED 0x02
-#define SRB_STATUS_ERROR 0x04
-#define SRB_STATUS_DATA_OVERRUN 0x12
+
+/* SRB status integer codes */
+#define SRB_STATUS_SUCCESS 0x01
+#define SRB_STATUS_ABORTED 0x02
+#define SRB_STATUS_ERROR 0x04
+#define SRB_STATUS_INVALID_REQUEST 0x06
+#define SRB_STATUS_DATA_OVERRUN 0x12
+#define SRB_STATUS_INVALID_LUN 0x20
#define SRB_STATUS(status) \
(status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN))
@@ -995,38 +1000,25 @@
void (*process_err_fn)(struct work_struct *work);
struct hv_host_device *host_dev = shost_priv(host);
- /*
- * In some situations, Hyper-V sets multiple bits in the
- * srb_status, such as ABORTED and ERROR. So process them
- * individually, with the most specific bits first.
- */
+ switch (SRB_STATUS(vm_srb->srb_status)) {
+ case SRB_STATUS_ERROR:
+ case SRB_STATUS_ABORTED:
+ case SRB_STATUS_INVALID_REQUEST:
+ if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) {
+ /* Check for capacity change */
+ if ((asc == 0x2a) && (ascq == 0x9)) {
+ process_err_fn = storvsc_device_scan;
+ /* Retry the I/O that triggered this. */
+ set_host_byte(scmnd, DID_REQUEUE);
+ goto do_work;
+ }
- if (vm_srb->srb_status & SRB_STATUS_INVALID_LUN) {
- set_host_byte(scmnd, DID_NO_CONNECT);
- process_err_fn = storvsc_remove_lun;
- goto do_work;
- }
-
- if (vm_srb->srb_status & SRB_STATUS_ABORTED) {
- if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
- /* Capacity data has changed */
- (asc == 0x2a) && (ascq == 0x9)) {
- process_err_fn = storvsc_device_scan;
/*
- * Retry the I/O that triggered this.
+ * Otherwise, let upper layer deal with the
+ * error when sense message is present
*/
- set_host_byte(scmnd, DID_REQUEUE);
- goto do_work;
- }
- }
-
- if (vm_srb->srb_status & SRB_STATUS_ERROR) {
- /*
- * Let upper layer deal with error when
- * sense message is present.
- */
- if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)
return;
+ }
/*
* If there is an error; offline the device since all
@@ -1049,6 +1041,13 @@
default:
set_host_byte(scmnd, DID_ERROR);
}
+ return;
+
+ case SRB_STATUS_INVALID_LUN:
+ set_host_byte(scmnd, DID_NO_CONNECT);
+ process_err_fn = storvsc_remove_lun;
+ goto do_work;
+
}
return;
@@ -1997,7 +1996,7 @@
*/
host_dev->handle_error_wq =
alloc_ordered_workqueue("storvsc_error_wq_%d",
- WQ_MEM_RECLAIM,
+ 0,
host->host_no);
if (!host_dev->handle_error_wq) {
ret = -ENOMEM;
diff --git a/drivers/scsi/ufs/ti-j721e-ufs.c b/drivers/scsi/ufs/ti-j721e-ufs.c
index eafe0db..122d650 100644
--- a/drivers/scsi/ufs/ti-j721e-ufs.c
+++ b/drivers/scsi/ufs/ti-j721e-ufs.c
@@ -29,11 +29,9 @@
return PTR_ERR(regbase);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
goto disable_pm;
- }
/* Select MPHY refclk frequency */
clk = devm_clk_get(dev, NULL);
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 20182e3..08331ec 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -623,12 +623,7 @@
return err;
}
- err = ufs_qcom_ice_resume(host);
- if (err)
- return err;
-
- hba->is_sys_suspended = false;
- return 0;
+ return ufs_qcom_ice_resume(host);
}
static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
@@ -669,8 +664,11 @@
writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
- /* ensure that ref_clk is enabled/disabled before we return */
- wmb();
+ /*
+ * Make sure the write to ref_clk reaches the destination and
+ * not stored in a Write Buffer (WB).
+ */
+ readl(host->dev_ref_clk_ctrl_mmio);
/*
* If we call hibern8 exit after this, we need to make sure that
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 0f2430f..576cc39 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -107,9 +107,20 @@
return ret;
}
+static bool phandle_exists(const struct device_node *np,
+ const char *phandle_name, int index)
+{
+ struct device_node *parse_np = of_parse_phandle(np, phandle_name, index);
+
+ if (parse_np)
+ of_node_put(parse_np);
+
+ return parse_np != NULL;
+}
+
#define MAX_PROP_SIZE 32
static int ufshcd_populate_vreg(struct device *dev, const char *name,
- struct ufs_vreg **out_vreg)
+ struct ufs_vreg **out_vreg)
{
int ret = 0;
char prop_name[MAX_PROP_SIZE];
@@ -122,7 +133,7 @@
}
snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
- if (!of_parse_phandle(np, prop_name, 0)) {
+ if (!phandle_exists(np, prop_name, 0)) {
dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
__func__, prop_name);
goto out;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index bf30277..ea6ceab 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -107,8 +107,13 @@
if (!regs)
return -ENOMEM;
- for (pos = 0; pos < len; pos += 4)
+ for (pos = 0; pos < len; pos += 4) {
+ if (offset == 0 &&
+ pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER &&
+ pos <= REG_UIC_ERROR_CODE_DME)
+ continue;
regs[pos / 4] = ufshcd_readl(hba, offset + pos);
+ }
ufshcd_hex_dump(prefix, regs, len);
kfree(regs);
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 1d99922..e380941 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -129,11 +129,7 @@
#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK)
-#define UFSHCD_ERROR_MASK (UIC_ERROR |\
- DEVICE_FATAL_ERROR |\
- CONTROLLER_FATAL_ERROR |\
- SYSTEM_BUS_FATAL_ERROR |\
- CRYPTO_ENGINE_FATAL_ERROR)
+#define UFSHCD_ERROR_MASK (UIC_ERROR | INT_FATAL_ERRORS)
#define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\
CONTROLLER_FATAL_ERROR |\
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index 75966d3..d87c123 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -333,8 +333,8 @@
u8 tag;
u8 bus;
u8 target;
- u8 vcpuHint;
- u8 unused[59];
+ u16 vcpuHint;
+ u8 unused[58];
} __packed;
/*
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
index 27b9e2b..7acf919 100644
--- a/drivers/scsi/zorro7xx.c
+++ b/drivers/scsi/zorro7xx.c
@@ -159,6 +159,8 @@
scsi_remove_host(host);
NCR_700_release(host);
+ if (host->base > 0x01000000)
+ iounmap(hostdata->base);
kfree(hostdata);
free_irq(host->irq, host);
zorro_release_device(z);
diff --git a/drivers/siox/siox-core.c b/drivers/siox/siox-core.c
index f8c08fb..e0ffef6 100644
--- a/drivers/siox/siox-core.c
+++ b/drivers/siox/siox-core.c
@@ -835,6 +835,8 @@
err_device_register:
/* don't care to make the buffer smaller again */
+ put_device(&sdevice->dev);
+ sdevice = NULL;
err_buf_alloc:
siox_master_unlock(smaster);
diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c
index f04b961..ec58091 100644
--- a/drivers/slimbus/qcom-ctrl.c
+++ b/drivers/slimbus/qcom-ctrl.c
@@ -510,9 +510,9 @@
}
ctrl->irq = platform_get_irq(pdev, 0);
- if (!ctrl->irq) {
+ if (ctrl->irq < 0) {
dev_err(&pdev->dev, "no slimbus IRQ\n");
- return -ENODEV;
+ return ctrl->irq;
}
sctrl = &ctrl->ctrl;
diff --git a/drivers/slimbus/stream.c b/drivers/slimbus/stream.c
index 75f87b3..73a2aa3 100644
--- a/drivers/slimbus/stream.c
+++ b/drivers/slimbus/stream.c
@@ -67,10 +67,10 @@
384000,
768000,
0, /* Reserved */
- 110250,
- 220500,
- 441000,
- 882000,
+ 11025,
+ 22050,
+ 44100,
+ 88200,
176400,
352800,
705600,
diff --git a/drivers/soc/amlogic/meson-mx-socinfo.c b/drivers/soc/amlogic/meson-mx-socinfo.c
index 78f0f1a..92125dd 100644
--- a/drivers/soc/amlogic/meson-mx-socinfo.c
+++ b/drivers/soc/amlogic/meson-mx-socinfo.c
@@ -126,6 +126,7 @@
np = of_find_matching_node(NULL, meson_mx_socinfo_analog_top_ids);
if (np) {
analog_top_regmap = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(analog_top_regmap))
return PTR_ERR(analog_top_regmap);
diff --git a/drivers/soc/amlogic/meson-secure-pwrc.c b/drivers/soc/amlogic/meson-secure-pwrc.c
index 5fb29a4..fff92e2 100644
--- a/drivers/soc/amlogic/meson-secure-pwrc.c
+++ b/drivers/soc/amlogic/meson-secure-pwrc.c
@@ -138,8 +138,10 @@
}
pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL);
- if (!pwrc)
+ if (!pwrc) {
+ of_node_put(sm_np);
return -ENOMEM;
+ }
pwrc->fw = meson_sm_get(sm_np);
of_node_put(sm_np);
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
index b106233..722fd54 100644
--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c
+++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
@@ -681,13 +681,14 @@
const struct of_device_id *of_id = NULL;
struct device_node *dn;
void __iomem *base;
- int ret, i;
+ int ret, i, s;
/* AON ctrl registers */
base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
if (IS_ERR(base)) {
pr_err("error mapping AON_CTRL\n");
- return PTR_ERR(base);
+ ret = PTR_ERR(base);
+ goto aon_err;
}
ctrl.aon_ctrl_base = base;
@@ -697,8 +698,10 @@
/* Assume standard offset */
ctrl.aon_sram = ctrl.aon_ctrl_base +
AON_CTRL_SYSTEM_DATA_RAM_OFS;
+ s = 0;
} else {
ctrl.aon_sram = base;
+ s = 1;
}
writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
@@ -708,7 +711,8 @@
(const void **)&ddr_phy_data);
if (IS_ERR(base)) {
pr_err("error mapping DDR PHY\n");
- return PTR_ERR(base);
+ ret = PTR_ERR(base);
+ goto ddr_phy_err;
}
ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
@@ -728,17 +732,20 @@
for_each_matching_node(dn, ddr_shimphy_dt_ids) {
i = ctrl.num_memc;
if (i >= MAX_NUM_MEMC) {
+ of_node_put(dn);
pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
break;
}
base = of_io_request_and_map(dn, 0, dn->full_name);
if (IS_ERR(base)) {
+ of_node_put(dn);
if (!ctrl.support_warm_boot)
break;
pr_err("error mapping DDR SHIMPHY %d\n", i);
- return PTR_ERR(base);
+ ret = PTR_ERR(base);
+ goto ddr_shimphy_err;
}
ctrl.memcs[i].ddr_shimphy_base = base;
ctrl.num_memc++;
@@ -749,14 +756,18 @@
for_each_matching_node(dn, brcmstb_memc_of_match) {
base = of_iomap(dn, 0);
if (!base) {
+ of_node_put(dn);
pr_err("error mapping DDR Sequencer %d\n", i);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto brcmstb_memc_err;
}
of_id = of_match_node(brcmstb_memc_of_match, dn);
if (!of_id) {
iounmap(base);
- return -EINVAL;
+ of_node_put(dn);
+ ret = -EINVAL;
+ goto brcmstb_memc_err;
}
ddr_seq_data = of_id->data;
@@ -776,20 +787,24 @@
dn = of_find_matching_node(NULL, sram_dt_ids);
if (!dn) {
pr_err("SRAM not found\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto brcmstb_memc_err;
}
ret = brcmstb_init_sram(dn);
+ of_node_put(dn);
if (ret) {
pr_err("error setting up SRAM for PM\n");
- return ret;
+ goto brcmstb_memc_err;
}
ctrl.pdev = pdev;
ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
- if (!ctrl.s3_params)
- return -ENOMEM;
+ if (!ctrl.s3_params) {
+ ret = -ENOMEM;
+ goto s3_params_err;
+ }
ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
sizeof(*ctrl.s3_params),
DMA_TO_DEVICE);
@@ -809,7 +824,21 @@
out:
kfree(ctrl.s3_params);
+s3_params_err:
+ iounmap(ctrl.boot_sram);
+brcmstb_memc_err:
+ for (i--; i >= 0; i--)
+ iounmap(ctrl.memcs[i].ddr_ctrl);
+ddr_shimphy_err:
+ for (i = 0; i < ctrl.num_memc; i++)
+ iounmap(ctrl.memcs[i].ddr_shimphy_base);
+ iounmap(ctrl.memcs[0].ddr_phy_base);
+ddr_phy_err:
+ iounmap(ctrl.aon_ctrl_base);
+ if (s)
+ iounmap(ctrl.aon_sram);
+aon_err:
pr_warn("PM: initialization failed with code %d\n", ret);
return ret;
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index 4df32bc..c5d4615 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -24,6 +24,7 @@
tristate "QorIQ DPAA2 DPIO driver"
depends on FSL_MC_BUS
select SOC_BUS
+ select FSL_GUTS
help
Driver for the DPAA2 DPIO object. A DPIO provides queue and
buffer management facilities for software to interact with
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index 091e94c..6b0c433 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -141,7 +141,7 @@
struct device *dev = &pdev->dev;
struct resource *res;
const struct fsl_soc_die_attr *soc_die;
- const char *machine;
+ const char *machine = NULL;
u32 svr;
/* Initialize guts */
diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c
index 6065aaa..8482a48 100644
--- a/drivers/soc/ixp4xx/ixp4xx-npe.c
+++ b/drivers/soc/ixp4xx/ixp4xx-npe.c
@@ -735,7 +735,7 @@
static struct platform_driver ixp4xx_npe_driver = {
.driver = {
.name = "ixp4xx-npe",
- .of_match_table = of_match_ptr(ixp4xx_npe_of_match),
+ .of_match_table = ixp4xx_npe_of_match,
},
.probe = ixp4xx_npe_probe,
.remove = ixp4xx_npe_remove,
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 6a3b69b..d0cf969 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -128,6 +128,7 @@
config QCOM_RPMPD
tristate "Qualcomm RPM Power domain driver"
+ depends on PM
depends on QCOM_SMD_RPM
help
QCOM RPM Power domain driver to support power-domains with
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 70fbe70..2e06f48 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -496,6 +496,7 @@
{ .compatible = "qcom,sdm845-llcc", .data = &sdm845_cfg },
{ }
};
+MODULE_DEVICE_TABLE(of, qcom_llcc_of_match);
static struct platform_driver qcom_llcc_driver = {
.driver = {
diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c
index f1875dc..1dfdd0b 100644
--- a/drivers/soc/qcom/ocmem.c
+++ b/drivers/soc/qcom/ocmem.c
@@ -194,18 +194,22 @@
devnode = of_parse_phandle(dev->of_node, "sram", 0);
if (!devnode || !devnode->parent) {
dev_err(dev, "Cannot look up sram phandle\n");
+ of_node_put(devnode);
return ERR_PTR(-ENODEV);
}
pdev = of_find_device_by_node(devnode->parent);
if (!pdev) {
dev_err(dev, "Cannot find device node %s\n", devnode->name);
+ of_node_put(devnode);
return ERR_PTR(-EPROBE_DEFER);
}
+ of_node_put(devnode);
ocmem = platform_get_drvdata(pdev);
if (!ocmem) {
dev_err(dev, "Cannot get ocmem\n");
+ put_device(&pdev->dev);
return ERR_PTR(-ENODEV);
}
return ocmem;
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index 4fe88d4..401a0be 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -493,8 +493,10 @@
continue;
ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++],
child);
- if (ret)
+ if (ret) {
+ of_node_put(child);
goto unroll;
+ }
}
if (!count)
@@ -548,7 +550,7 @@
}
irq = platform_get_irq(pdev, 0);
- ret = devm_request_irq(&pdev->dev, irq, qmp_intr, IRQF_ONESHOT,
+ ret = devm_request_irq(&pdev->dev, irq, qmp_intr, 0,
"aoss-qmp", qmp);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request interrupt\n");
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index f2168e4..c6084c0 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -387,6 +387,9 @@
data->domains = devm_kcalloc(&pdev->dev, num, sizeof(*data->domains),
GFP_KERNEL);
+ if (!data->domains)
+ return -ENOMEM;
+
data->num_domains = num;
for (i = 0; i < num; i++) {
diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c
index d2b5584..41e9294 100644
--- a/drivers/soc/qcom/smem_state.c
+++ b/drivers/soc/qcom/smem_state.c
@@ -136,6 +136,7 @@
struct qcom_smem_state *state = container_of(ref, struct qcom_smem_state, refcount);
list_del(&state->list);
+ of_node_put(state->of_node);
kfree(state);
}
@@ -169,7 +170,7 @@
kref_init(&state->refcount);
- state->of_node = of_node;
+ state->of_node = of_node_get(of_node);
state->ops = *ops;
state->priv = priv;
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index a9709aa..fb76c8b 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -420,6 +420,7 @@
}
smp2p->ipc_regmap = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
if (IS_ERR(smp2p->ipc_regmap))
return PTR_ERR(smp2p->ipc_regmap);
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
index c428d0f..acba67d 100644
--- a/drivers/soc/qcom/smsm.c
+++ b/drivers/soc/qcom/smsm.c
@@ -359,6 +359,7 @@
return 0;
host->ipc_regmap = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
if (IS_ERR(host->ipc_regmap))
return PTR_ERR(host->ipc_regmap);
@@ -510,7 +511,7 @@
for (id = 0; id < smsm->num_hosts; id++) {
ret = smsm_parse_ipc(smsm, id);
if (ret < 0)
- return ret;
+ goto out_put;
}
/* Acquire the main SMSM state vector */
@@ -518,13 +519,14 @@
smsm->num_entries * sizeof(u32));
if (ret < 0 && ret != -EEXIST) {
dev_err(&pdev->dev, "unable to allocate shared state entry\n");
- return ret;
+ goto out_put;
}
states = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE, NULL);
if (IS_ERR(states)) {
dev_err(&pdev->dev, "Unable to acquire shared state entry\n");
- return PTR_ERR(states);
+ ret = PTR_ERR(states);
+ goto out_put;
}
/* Acquire the list of interrupt mask vectors */
@@ -532,13 +534,14 @@
ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, size);
if (ret < 0 && ret != -EEXIST) {
dev_err(&pdev->dev, "unable to allocate smsm interrupt mask\n");
- return ret;
+ goto out_put;
}
intr_mask = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, NULL);
if (IS_ERR(intr_mask)) {
dev_err(&pdev->dev, "unable to acquire shared memory interrupt mask\n");
- return PTR_ERR(intr_mask);
+ ret = PTR_ERR(intr_mask);
+ goto out_put;
}
/* Setup the reference to the local state bits */
@@ -549,7 +552,8 @@
smsm->state = qcom_smem_state_register(local_node, &smsm_state_ops, smsm);
if (IS_ERR(smsm->state)) {
dev_err(smsm->dev, "failed to register qcom_smem_state\n");
- return PTR_ERR(smsm->state);
+ ret = PTR_ERR(smsm->state);
+ goto out_put;
}
/* Register handlers for remote processor entries of interest. */
@@ -579,16 +583,19 @@
}
platform_set_drvdata(pdev, smsm);
+ of_node_put(local_node);
return 0;
unwind_interfaces:
+ of_node_put(node);
for (id = 0; id < smsm->num_entries; id++)
if (smsm->entries[id].domain)
irq_domain_remove(smsm->entries[id].domain);
qcom_smem_state_unregister(smsm->state);
-
+out_put:
+ of_node_put(local_node);
return ret;
}
diff --git a/drivers/soc/renesas/r8a779a0-sysc.c b/drivers/soc/renesas/r8a779a0-sysc.c
index d464ffa..d0a5434 100644
--- a/drivers/soc/renesas/r8a779a0-sysc.c
+++ b/drivers/soc/renesas/r8a779a0-sysc.c
@@ -83,11 +83,11 @@
{ "a2cv6", R8A779A0_PD_A2CV6, R8A779A0_PD_A3IR },
{ "a2cn2", R8A779A0_PD_A2CN2, R8A779A0_PD_A3IR },
{ "a2imp23", R8A779A0_PD_A2IMP23, R8A779A0_PD_A3IR },
- { "a2dp1", R8A779A0_PD_A2DP0, R8A779A0_PD_A3IR },
- { "a2cv2", R8A779A0_PD_A2CV0, R8A779A0_PD_A3IR },
- { "a2cv3", R8A779A0_PD_A2CV1, R8A779A0_PD_A3IR },
- { "a2cv5", R8A779A0_PD_A2CV4, R8A779A0_PD_A3IR },
- { "a2cv7", R8A779A0_PD_A2CV6, R8A779A0_PD_A3IR },
+ { "a2dp1", R8A779A0_PD_A2DP1, R8A779A0_PD_A3IR },
+ { "a2cv2", R8A779A0_PD_A2CV2, R8A779A0_PD_A3IR },
+ { "a2cv3", R8A779A0_PD_A2CV3, R8A779A0_PD_A3IR },
+ { "a2cv5", R8A779A0_PD_A2CV5, R8A779A0_PD_A3IR },
+ { "a2cv7", R8A779A0_PD_A2CV7, R8A779A0_PD_A3IR },
{ "a2cn1", R8A779A0_PD_A2CN1, R8A779A0_PD_A3IR },
{ "a1cnn0", R8A779A0_PD_A1CNN0, R8A779A0_PD_A2CN0 },
{ "a1cnn2", R8A779A0_PD_A1CNN2, R8A779A0_PD_A2CN2 },
diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c
index 494cf2b..343ff61 100644
--- a/drivers/soc/rockchip/grf.c
+++ b/drivers/soc/rockchip/grf.c
@@ -148,12 +148,14 @@
return -ENODEV;
if (!match || !match->data) {
pr_err("%s: missing grf data\n", __func__);
+ of_node_put(np);
return -EINVAL;
}
grf_info = match->data;
grf = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(grf)) {
pr_err("%s: could not get grf syscon\n", __func__);
return PTR_ERR(grf);
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
index d4c7bd5..443e38e 100644
--- a/drivers/soc/sunxi/sunxi_sram.c
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -78,8 +78,8 @@
static struct sunxi_sram_desc sun50i_a64_sram_c = {
.data = SUNXI_SRAM_DATA("C", 0x4, 24, 1,
- SUNXI_SRAM_MAP(0, 1, "cpu"),
- SUNXI_SRAM_MAP(1, 0, "de2")),
+ SUNXI_SRAM_MAP(1, 0, "cpu"),
+ SUNXI_SRAM_MAP(0, 1, "de2")),
};
static const struct of_device_id sunxi_sram_dt_ids[] = {
@@ -254,6 +254,7 @@
writel(val | ((device << sram_data->offset) & mask),
base + sram_data->reg);
+ sram_desc->claimed = true;
spin_unlock(&sram_lock);
return 0;
@@ -318,12 +319,11 @@
.writeable_reg = sunxi_sram_regmap_accessible_reg,
};
-static int sunxi_sram_probe(struct platform_device *pdev)
+static int __init sunxi_sram_probe(struct platform_device *pdev)
{
- struct resource *res;
- struct dentry *d;
struct regmap *emac_clock;
const struct sunxi_sramc_variant *variant;
+ struct device *dev = &pdev->dev;
sram_dev = &pdev->dev;
@@ -331,18 +331,10 @@
if (!variant)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
- of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
-
- d = debugfs_create_file("sram", S_IRUGO, NULL, NULL,
- &sunxi_sram_fops);
- if (!d)
- return -ENOMEM;
-
if (variant->has_emac_clock) {
emac_clock = devm_regmap_init_mmio(&pdev->dev, base,
&sunxi_sram_emac_clock_regmap);
@@ -351,6 +343,10 @@
return PTR_ERR(emac_clock);
}
+ of_platform_populate(dev->of_node, NULL, NULL, dev);
+
+ debugfs_create_file("sram", 0444, NULL, NULL, &sunxi_sram_fops);
+
return 0;
}
@@ -396,9 +392,8 @@
.name = "sunxi-sram",
.of_match_table = sunxi_sram_dt_match,
},
- .probe = sunxi_sram_probe,
};
-module_platform_driver(sunxi_sram_driver);
+builtin_platform_driver_probe(sunxi_sram_driver, sunxi_sram_probe);
MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
MODULE_DESCRIPTION("Allwinner sunXi SRAM Controller Driver");
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index 976dee0..676807c 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -136,7 +136,6 @@
def_bool y
depends on ARCH_TEGRA
select SOC_BUS
- select TEGRA20_APB_DMA if ARCH_TEGRA_2x_SOC
config SOC_TEGRA_FLOWCTRL
bool
diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c
index 8afb3f4..a33ec7e 100644
--- a/drivers/soc/ti/ti_sci_pm_domains.c
+++ b/drivers/soc/ti/ti_sci_pm_domains.c
@@ -183,6 +183,8 @@
devm_kcalloc(dev, max_id + 1,
sizeof(*pd_provider->data.domains),
GFP_KERNEL);
+ if (!pd_provider->data.domains)
+ return -ENOMEM;
pd_provider->data.num_domains = max_id + 1;
pd_provider->data.xlate = ti_sci_pd_xlate;
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index e9ece45..ef3f95f 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -447,9 +447,9 @@
}
irq = platform_get_irq(pdev, 0);
- if (!irq) {
+ if (irq < 0) {
dev_err(&pdev->dev, "no irq resource\n");
- return -ENXIO;
+ return irq;
}
ret = devm_request_irq(dev, irq, wkup_m3_txev_handler,
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index 575b9ba..2e8986c 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -184,12 +184,8 @@
drv->driver.owner = owner;
drv->driver.probe = sdw_drv_probe;
-
- if (drv->remove)
- drv->driver.remove = sdw_drv_remove;
-
- if (drv->shutdown)
- drv->driver.shutdown = sdw_drv_shutdown;
+ drv->driver.remove = sdw_drv_remove;
+ drv->driver.shutdown = sdw_drv_shutdown;
return driver_register(&drv->driver);
}
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index c6d421a..a324769 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -501,9 +501,12 @@
return SDW_CMD_IGNORED;
}
- /* fill response */
- for (i = 0; i < count; i++)
- msg->buf[i + offset] = FIELD_GET(CDNS_MCP_RESP_RDATA, cdns->response_buf[i]);
+ if (msg->flags == SDW_MSG_FLAG_READ) {
+ /* fill response */
+ for (i = 0; i < count; i++)
+ msg->buf[i + offset] = FIELD_GET(CDNS_MCP_RESP_RDATA,
+ cdns->response_buf[i]);
+ }
return SDW_CMD_OK;
}
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index dad4326..942d2fe 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -521,8 +521,8 @@
/* Clear wake status */
wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
- wake_sts |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
- intel_writew(shim, SDW_SHIM_WAKESTS_STATUS, wake_sts);
+ wake_sts |= (SDW_SHIM_WAKESTS_STATUS << link_id);
+ intel_writew(shim, SDW_SHIM_WAKESTS, wake_sts);
}
mutex_unlock(sdw->link_res->shim_lock);
}
@@ -1470,7 +1470,6 @@
ret = intel_register_dai(sdw);
if (ret) {
dev_err(dev, "DAI registration failed: %d\n", ret);
- snd_soc_unregister_component(dev);
goto err_interrupt;
}
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 1e63fd4..8aa89d9 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -277,6 +277,9 @@
static bool atmel_qspi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
if (atmel_qspi_find_mode(op) < 0)
return false;
diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
index 7f62954..a027cfd 100644
--- a/drivers/spi/spi-amd.c
+++ b/drivers/spi/spi-amd.c
@@ -28,6 +28,7 @@
#define AMD_SPI_RX_COUNT_REG 0x4B
#define AMD_SPI_STATUS_REG 0x4C
+#define AMD_SPI_FIFO_SIZE 70
#define AMD_SPI_MEM_SIZE 200
/* M_CMD OP codes for SPI */
@@ -245,6 +246,11 @@
return 0;
}
+static size_t amd_spi_max_transfer_size(struct spi_device *spi)
+{
+ return AMD_SPI_FIFO_SIZE;
+}
+
static int amd_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -278,6 +284,8 @@
master->flags = SPI_MASTER_HALF_DUPLEX;
master->setup = amd_spi_master_setup;
master->transfer_one_message = amd_spi_master_transfer;
+ master->max_transfer_size = amd_spi_max_transfer_size;
+ master->max_message_size = amd_spi_max_transfer_size;
/* Register the controller with SPI framework */
err = devm_spi_register_master(dev, master);
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 4a80f04..766b003 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -1032,7 +1032,7 @@
addr = op->addr.val;
len = op->data.nbytes;
- if (bcm_qspi_bspi_ver_three(qspi) == true) {
+ if (has_bspi(qspi) && bcm_qspi_bspi_ver_three(qspi) == true) {
/*
* The address coming into this function is a raw flash offset.
* But for BSPI <= V3, we need to convert it to a remapped BSPI
@@ -1051,7 +1051,7 @@
len < 4)
mspi_read = true;
- if (mspi_read)
+ if (!has_bspi(qspi) || mspi_read)
return bcm_qspi_mspi_exec_mem_op(spi, op);
ret = bcm_qspi_bspi_set_mode(qspi, op, 0);
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 33c32e9..bb9d838 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -1174,10 +1174,14 @@
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
/* if an error occurred and we have an active dma, then terminate */
- dmaengine_terminate_sync(ctlr->dma_tx);
- bs->tx_dma_active = false;
- dmaengine_terminate_sync(ctlr->dma_rx);
- bs->rx_dma_active = false;
+ if (ctlr->dma_tx) {
+ dmaengine_terminate_sync(ctlr->dma_tx);
+ bs->tx_dma_active = false;
+ }
+ if (ctlr->dma_rx) {
+ dmaengine_terminate_sync(ctlr->dma_rx);
+ bs->rx_dma_active = false;
+ }
bcm2835_spi_undo_prologue(bs);
/* and reset */
diff --git a/drivers/spi/spi-dw-bt1.c b/drivers/spi/spi-dw-bt1.c
index bc9d5ea..8f6a1af 100644
--- a/drivers/spi/spi-dw-bt1.c
+++ b/drivers/spi/spi-dw-bt1.c
@@ -293,8 +293,10 @@
pm_runtime_enable(&pdev->dev);
ret = dw_spi_add_host(&pdev->dev, dws);
- if (ret)
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
goto err_disable_clk;
+ }
platform_set_drvdata(pdev, dwsbt1);
diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c
index a09831c..32ac8f9 100644
--- a/drivers/spi/spi-dw-dma.c
+++ b/drivers/spi/spi-dw-dma.c
@@ -127,12 +127,15 @@
dw_spi_dma_sg_burst_init(dws);
+ pci_dev_put(dma_dev);
+
return 0;
free_rxchan:
dma_release_channel(dws->rxchan);
dws->rxchan = NULL;
err_exit:
+ pci_dev_put(dma_dev);
return -EBUSY;
}
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index 9851551..46ae46a 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -876,6 +876,10 @@
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"QuadSPI-memory");
+ if (!res) {
+ ret = -EINVAL;
+ goto err_put_ctrl;
+ }
q->memmap_phy = res->start;
/* Since there are 4 cs, map size required is 4 times ahb_buf_size */
q->ahb_addr = devm_ioremap(dev, q->memmap_phy,
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 5f05d51..71376b6 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -731,7 +731,7 @@
int ret;
ret = pm_runtime_get_sync(dev);
- if (ret) {
+ if (ret < 0) {
pm_runtime_put_noidle(dev);
return ret;
}
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index 0bc7daa..6974a1c 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -156,6 +156,7 @@
void __iomem *base;
struct clk *core;
struct clk *pclk;
+ struct clk_divider pow2_div;
struct clk *clk;
struct spi_message *message;
struct spi_transfer *xfer;
@@ -168,6 +169,8 @@
unsigned long xfer_remain;
};
+#define pow2_clk_to_spicc(_div) container_of(_div, struct meson_spicc_device, pow2_div)
+
static void meson_spicc_oen_enable(struct meson_spicc_device *spicc)
{
u32 conf;
@@ -421,7 +424,7 @@
{
struct meson_spicc_device *spicc = spi_master_get_devdata(master);
struct spi_device *spi = message->spi;
- u32 conf = 0;
+ u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
/* Store current message */
spicc->message = message;
@@ -458,8 +461,6 @@
/* Select CS */
conf |= FIELD_PREP(SPICC_CS_MASK, spi->chip_select);
- /* Default Clock rate core/4 */
-
/* Default 8bit word */
conf |= FIELD_PREP(SPICC_BITLENGTH_MASK, 8 - 1);
@@ -476,12 +477,16 @@
static int meson_spicc_unprepare_transfer(struct spi_master *master)
{
struct meson_spicc_device *spicc = spi_master_get_devdata(master);
+ u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
/* Disable all IRQs */
writel(0, spicc->base + SPICC_INTREG);
device_reset_optional(&spicc->pdev->dev);
+ /* Set default configuration, keeping datarate field */
+ writel_relaxed(conf, spicc->base + SPICC_CONREG);
+
return 0;
}
@@ -518,14 +523,60 @@
* Clk path for G12A series:
* pclk -> pow2 fixed div -> pow2 div -> mux -> out
* pclk -> enh fixed div -> enh div -> mux -> out
+ *
+ * The pow2 divider is tied to the controller HW state, and the
+ * divider is only valid when the controller is initialized.
+ *
+ * A set of clock ops is added to make sure we don't read/set this
+ * clock rate while the controller is in an unknown state.
*/
-static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
+static unsigned long meson_spicc_pow2_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg)
+ return 0;
+
+ return clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static int meson_spicc_pow2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg)
+ return -EINVAL;
+
+ return clk_divider_ops.determine_rate(hw, req);
+}
+
+static int meson_spicc_pow2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg)
+ return -EINVAL;
+
+ return clk_divider_ops.set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops meson_spicc_pow2_clk_ops = {
+ .recalc_rate = meson_spicc_pow2_recalc_rate,
+ .determine_rate = meson_spicc_pow2_determine_rate,
+ .set_rate = meson_spicc_pow2_set_rate,
+};
+
+static int meson_spicc_pow2_clk_init(struct meson_spicc_device *spicc)
{
struct device *dev = &spicc->pdev->dev;
- struct clk_fixed_factor *pow2_fixed_div, *enh_fixed_div;
- struct clk_divider *pow2_div, *enh_div;
- struct clk_mux *mux;
+ struct clk_fixed_factor *pow2_fixed_div;
struct clk_init_data init;
struct clk *clk;
struct clk_parent_data parent_data[2];
@@ -560,31 +611,45 @@
if (WARN_ON(IS_ERR(clk)))
return PTR_ERR(clk);
- pow2_div = devm_kzalloc(dev, sizeof(*pow2_div), GFP_KERNEL);
- if (!pow2_div)
- return -ENOMEM;
-
snprintf(name, sizeof(name), "%s#pow2_div", dev_name(dev));
init.name = name;
- init.ops = &clk_divider_ops;
- init.flags = CLK_SET_RATE_PARENT;
+ init.ops = &meson_spicc_pow2_clk_ops;
+ /*
+ * Set NOCACHE here to make sure we read the actual HW value
+ * since we reset the HW after each transfer.
+ */
+ init.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
parent_data[0].hw = &pow2_fixed_div->hw;
init.num_parents = 1;
- pow2_div->shift = 16,
- pow2_div->width = 3,
- pow2_div->flags = CLK_DIVIDER_POWER_OF_TWO,
- pow2_div->reg = spicc->base + SPICC_CONREG;
- pow2_div->hw.init = &init;
+ spicc->pow2_div.shift = 16,
+ spicc->pow2_div.width = 3,
+ spicc->pow2_div.flags = CLK_DIVIDER_POWER_OF_TWO,
+ spicc->pow2_div.reg = spicc->base + SPICC_CONREG;
+ spicc->pow2_div.hw.init = &init;
- clk = devm_clk_register(dev, &pow2_div->hw);
- if (WARN_ON(IS_ERR(clk)))
- return PTR_ERR(clk);
+ spicc->clk = devm_clk_register(dev, &spicc->pow2_div.hw);
+ if (WARN_ON(IS_ERR(spicc->clk)))
+ return PTR_ERR(spicc->clk);
- if (!spicc->data->has_enhance_clk_div) {
- spicc->clk = clk;
- return 0;
- }
+ return 0;
+}
+
+static int meson_spicc_enh_clk_init(struct meson_spicc_device *spicc)
+{
+ struct device *dev = &spicc->pdev->dev;
+ struct clk_fixed_factor *enh_fixed_div;
+ struct clk_divider *enh_div;
+ struct clk_mux *mux;
+ struct clk_init_data init;
+ struct clk *clk;
+ struct clk_parent_data parent_data[2];
+ char name[64];
+
+ memset(&init, 0, sizeof(init));
+ memset(&parent_data, 0, sizeof(parent_data));
+
+ init.parent_data = parent_data;
/* algorithm for enh div: rate = freq / 2 / (N + 1) */
@@ -637,7 +702,7 @@
snprintf(name, sizeof(name), "%s#sel", dev_name(dev));
init.name = name;
init.ops = &clk_mux_ops;
- parent_data[0].hw = &pow2_div->hw;
+ parent_data[0].hw = &spicc->pow2_div.hw;
parent_data[1].hw = &enh_div->hw;
init.num_parents = 2;
init.flags = CLK_SET_RATE_PARENT;
@@ -754,12 +819,20 @@
meson_spicc_oen_enable(spicc);
- ret = meson_spicc_clk_init(spicc);
+ ret = meson_spicc_pow2_clk_init(spicc);
if (ret) {
- dev_err(&pdev->dev, "clock registration failed\n");
+ dev_err(&pdev->dev, "pow2 clock registration failed\n");
goto out_clk;
}
+ if (spicc->data->has_enhance_clk_div) {
+ ret = meson_spicc_enh_clk_init(spicc);
+ if (ret) {
+ dev_err(&pdev->dev, "clock registration failed\n");
+ goto out_clk;
+ }
+ }
+
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "spi master registration failed\n");
diff --git a/drivers/spi/spi-mt7621.c b/drivers/spi/spi-mt7621.c
index b4b9b73..351b0ef 100644
--- a/drivers/spi/spi-mt7621.c
+++ b/drivers/spi/spi-mt7621.c
@@ -340,11 +340,9 @@
return PTR_ERR(base);
clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get SYS clock, err=%d\n",
- status);
- return PTR_ERR(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "unable to get SYS clock\n");
status = clk_prepare_enable(clk);
if (status)
diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
index 288f6c2..106e3ca 100644
--- a/drivers/spi/spi-mtk-nor.c
+++ b/drivers/spi/spi-mtk-nor.c
@@ -895,7 +895,17 @@
static int __maybe_unused mtk_nor_resume(struct device *dev)
{
- return pm_runtime_force_resume(dev);
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ mtk_nor_init(sp);
+
+ return 0;
}
static const struct dev_pm_ops mtk_nor_pm_ops = {
diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c
index 96b4182..4fb19e6 100644
--- a/drivers/spi/spi-mxic.c
+++ b/drivers/spi/spi-mxic.c
@@ -304,25 +304,21 @@
writel(data, mxic->regs + TXD(nbytes % 4));
+ ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
+ sts & INT_TX_EMPTY, 0, USEC_PER_SEC);
+ if (ret)
+ return ret;
+
+ ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
+ sts & INT_RX_NOT_EMPTY, 0,
+ USEC_PER_SEC);
+ if (ret)
+ return ret;
+
+ data = readl(mxic->regs + RXD);
if (rxbuf) {
- ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
- sts & INT_TX_EMPTY, 0,
- USEC_PER_SEC);
- if (ret)
- return ret;
-
- ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
- sts & INT_RX_NOT_EMPTY, 0,
- USEC_PER_SEC);
- if (ret)
- return ret;
-
- data = readl(mxic->regs + RXD);
data >>= (8 * (4 - nbytes));
memcpy(rxbuf + pos, &data, nbytes);
- WARN_ON(readl(mxic->regs + INT_STS) & INT_RX_NOT_EMPTY);
- } else {
- readl(mxic->regs + RXD);
}
WARN_ON(readl(mxic->regs + INT_STS) & INT_RX_NOT_EMPTY);
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index 0d0cd06..7c992d1 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -414,6 +414,7 @@
return status;
err_fck:
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(spi100k->fck);
err_ick:
clk_disable_unprepare(spi100k->ick);
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index e4ee8b0..f7603c2 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -2315,13 +2315,13 @@
return status;
}
-static int
+static void
pl022_remove(struct amba_device *adev)
{
struct pl022 *pl022 = amba_get_drvdata(adev);
if (!pl022)
- return 0;
+ return;
/*
* undo pm_runtime_put() in probe. I assume that we're not
@@ -2336,7 +2336,6 @@
clk_disable_unprepare(pl022->clk);
amba_release_regions(adev);
tasklet_disable(&pl022->pump_transfers);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index aafac12..4eb979a 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -74,14 +74,23 @@
return true;
}
+static void lpss_dma_put_device(void *dma_dev)
+{
+ pci_dev_put(dma_dev);
+}
+
static int lpss_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
{
struct pci_dev *dma_dev;
+ int ret;
c->num_chipselect = 1;
c->max_clk_rate = 50000000;
dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ ret = devm_add_action_or_reset(&dev->dev, lpss_dma_put_device, dma_dev);
+ if (ret)
+ return ret;
if (c->tx_param) {
struct dw_dma_slave *slave = c->tx_param;
@@ -105,8 +114,9 @@
static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
{
- struct pci_dev *dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(21, 0));
struct dw_dma_slave *tx, *rx;
+ struct pci_dev *dma_dev;
+ int ret;
switch (PCI_FUNC(dev->devfn)) {
case 0:
@@ -131,6 +141,11 @@
return -ENODEV;
}
+ dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(21, 0));
+ ret = devm_add_action_or_reset(&dev->dev, lpss_dma_put_device, dma_dev);
+ if (ret)
+ return ret;
+
tx = c->tx_param;
tx->dma_dev = &dma_dev->dev;
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index d39dec6..f3877ee 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -1199,8 +1199,10 @@
return ret;
ret = clk_prepare_enable(controller->cclk);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(controller->iclk);
return ret;
+ }
/* Disable clocks auto gaiting */
config = readl_relaxed(controller->base + QUP_CONFIG);
@@ -1246,14 +1248,25 @@
return ret;
ret = clk_prepare_enable(controller->cclk);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(controller->iclk);
return ret;
+ }
ret = spi_qup_set_state(controller, QUP_STATE_RESET);
if (ret)
- return ret;
+ goto disable_clk;
- return spi_master_resume(master);
+ ret = spi_master_resume(master);
+ if (ret)
+ goto disable_clk;
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+ return ret;
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index e39fd38..4600e3c 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -612,6 +612,10 @@
rspi->dma_callbacked, HZ);
if (ret > 0 && rspi->dma_callbacked) {
ret = 0;
+ if (tx)
+ dmaengine_synchronize(rspi->ctlr->dma_tx);
+ if (rx)
+ dmaengine_synchronize(rspi->ctlr->dma_rx);
} else {
if (!ret) {
dev_err(&rspi->ctlr->dev, "DMA timeout\n");
@@ -1107,14 +1111,11 @@
}
memset(&cfg, 0, sizeof(cfg));
+ cfg.dst_addr = port_addr + RSPI_SPDR;
+ cfg.src_addr = port_addr + RSPI_SPDR;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
cfg.direction = dir;
- if (dir == DMA_MEM_TO_DEV) {
- cfg.dst_addr = port_addr;
- cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- } else {
- cfg.src_addr = port_addr;
- cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- }
ret = dmaengine_slave_config(chan, &cfg);
if (ret) {
@@ -1145,12 +1146,12 @@
}
ctlr->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
- res->start + RSPI_SPDR);
+ res->start);
if (!ctlr->dma_tx)
return -ENODEV;
ctlr->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
- res->start + RSPI_SPDR);
+ res->start);
if (!ctlr->dma_rx) {
dma_release_channel(ctlr->dma_tx);
ctlr->dma_tx = NULL;
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index dfa7c91..d435df1 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -84,6 +84,7 @@
#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
#define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
+#define S3C64XX_SPI_PACKET_CNT_MASK GENMASK(15, 0)
#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
@@ -660,6 +661,13 @@
return 0;
}
+static size_t s3c64xx_spi_max_transfer_size(struct spi_device *spi)
+{
+ struct spi_controller *ctlr = spi->controller;
+
+ return ctlr->can_dma ? S3C64XX_SPI_PACKET_CNT_MASK : SIZE_MAX;
+}
+
static int s3c64xx_spi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
@@ -1135,6 +1143,7 @@
master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
master->prepare_message = s3c64xx_spi_prepare_message;
master->transfer_one = s3c64xx_spi_transfer_one;
+ master->max_transfer_size = s3c64xx_spi_max_transfer_size;
master->num_chipselect = sci->num_cs;
master->dma_alignment = 8;
master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 4f24f63..9c58dcd 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -295,7 +295,8 @@
if (!op->data.nbytes)
goto wait_nobusy;
- if (readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF)
+ if ((readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF) ||
+ qspi->fmode == CCR_FMODE_APM)
goto out;
reinit_completion(&qspi->data_completion);
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index a6dfc8f..9ec37cf 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -443,7 +443,7 @@
u32 div, mbrdiv;
/* Ensure spi->clk_rate is even */
- div = DIV_ROUND_UP(spi->clk_rate & ~0x1, speed_hz);
+ div = DIV_ROUND_CLOSEST(spi->clk_rate & ~0x1, speed_hz);
/*
* SPI framework set xfer->speed_hz to master->max_speed_hz if
@@ -941,6 +941,7 @@
static DEFINE_RATELIMIT_STATE(rs,
DEFAULT_RATELIMIT_INTERVAL * 10,
1);
+ ratelimit_set_flags(&rs, RATELIMIT_MSG_ON_RELEASE);
if (__ratelimit(&rs))
dev_dbg_ratelimited(spi->dev, "Communication suspended\n");
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c
index ea706d9..47cbe73 100644
--- a/drivers/spi/spi-synquacer.c
+++ b/drivers/spi/spi-synquacer.c
@@ -783,6 +783,7 @@
ret = synquacer_spi_enable(master);
if (ret) {
+ clk_disable_unprepare(sspi->clk);
dev_err(dev, "failed to enable spi (%d)\n", ret);
return ret;
}
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index a2e5907..ed42665 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -1353,6 +1353,10 @@
tspi->phys = r->start;
spi_irq = platform_get_irq(pdev, 0);
+ if (spi_irq < 0) {
+ ret = spi_irq;
+ goto exit_free_master;
+ }
tspi->irq = spi_irq;
tspi->clk = devm_clk_get(&pdev->dev, "spi");
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 669fc42..9e2b812 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1006,14 +1006,8 @@
struct resource *r;
int ret, spi_irq;
const struct tegra_slink_chip_data *cdata = NULL;
- const struct of_device_id *match;
- match = of_match_device(tegra_slink_of_match, &pdev->dev);
- if (!match) {
- dev_err(&pdev->dev, "Error: No device match found\n");
- return -ENODEV;
- }
- cdata = match->data;
+ cdata = of_device_get_match_data(&pdev->dev);
master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
if (!master) {
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index e06aafe..081da1f 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -448,6 +448,7 @@
enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
struct dma_async_tx_descriptor *tx;
int ret;
+ unsigned long time_left;
tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
if (!tx) {
@@ -467,9 +468,9 @@
}
dma_async_issue_pending(chan);
- ret = wait_for_completion_timeout(&qspi->transfer_complete,
+ time_left = wait_for_completion_timeout(&qspi->transfer_complete,
msecs_to_jiffies(len));
- if (ret <= 0) {
+ if (time_left == 0) {
dmaengine_terminate_sync(chan);
dev_err(qspi->dev, "DMA wait_for_completion_timeout\n");
return -ETIMEDOUT;
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 1dd2af9..3d3ac48 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -1165,7 +1165,10 @@
goto clk_dis_all;
}
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ if (ret)
+ goto clk_dis_all;
+
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS;
ctlr->mem_ops = &zynqmp_qspi_mem_ops;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 8c261ea..857a139 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -881,10 +881,10 @@
int i, ret;
if (vmalloced_buf || kmap_buf) {
- desc_len = min_t(int, max_seg_size, PAGE_SIZE);
+ desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
} else if (virt_addr_valid(buf)) {
- desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
+ desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
sgs = DIV_ROUND_UP(len, desc_len);
} else {
return -EINVAL;
@@ -946,6 +946,8 @@
if (sgt->orig_nents) {
dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
sg_free_table(sgt);
+ sgt->orig_nents = 0;
+ sgt->nents = 0;
}
}
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index bbbd311..e6de2ae 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -887,7 +887,8 @@
* version 5, there is more than one APID mapped to each PPID.
* The owner field for each of these mappings specifies the EE which is
* allowed to write to the APID. The owner of the last (highest) APID
- * for a given PPID will receive interrupts from the PPID.
+ * which has the IRQ owner bit set for a given PPID will receive
+ * interrupts from the PPID.
*/
for (i = 0; ; i++, apidd++) {
offset = pmic_arb->ver_ops->apid_map_offset(i);
@@ -910,16 +911,16 @@
apid = pmic_arb->ppid_to_apid[ppid] & ~PMIC_ARB_APID_VALID;
prev_apidd = &pmic_arb->apid_data[apid];
- if (valid && is_irq_ee &&
- prev_apidd->write_ee == pmic_arb->ee) {
+ if (!valid || apidd->write_ee == pmic_arb->ee) {
+ /* First PPID mapping or one for this EE */
+ pmic_arb->ppid_to_apid[ppid] = i | PMIC_ARB_APID_VALID;
+ } else if (valid && is_irq_ee &&
+ prev_apidd->write_ee == pmic_arb->ee) {
/*
* Duplicate PPID mapping after the one for this EE;
* override the irq owner
*/
prev_apidd->irq_ee = apidd->irq_ee;
- } else if (!valid || is_irq_ee) {
- /* First PPID mapping or duplicate for another EE */
- pmic_arb->ppid_to_apid[ppid] = i | PMIC_ARB_APID_VALID;
}
apidd->ppid = ppid;
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index e1fe03c..e6d4a3e 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -114,6 +114,9 @@
void *vaddr;
if (buffer->kmap_cnt) {
+ if (buffer->kmap_cnt == INT_MAX)
+ return ERR_PTR(-EOVERFLOW);
+
buffer->kmap_cnt++;
return buffer->vaddr;
}
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index 7769ead..ccc65cf 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -685,7 +685,7 @@
if (!devpriv->usb_rx_buf)
return -ENOMEM;
- size = max(usb_endpoint_maxp(devpriv->ep_rx), MIN_BUF_SIZE);
+ size = max(usb_endpoint_maxp(devpriv->ep_tx), MIN_BUF_SIZE);
devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
if (!devpriv->usb_tx_buf)
return -ENOMEM;
diff --git a/drivers/staging/fieldbus/anybuss/host.c b/drivers/staging/fieldbus/anybuss/host.c
index 549cb7d..2a20a17 100644
--- a/drivers/staging/fieldbus/anybuss/host.c
+++ b/drivers/staging/fieldbus/anybuss/host.c
@@ -1384,7 +1384,7 @@
goto err_device;
return cd;
err_device:
- device_unregister(&cd->client->dev);
+ put_device(&cd->client->dev);
err_kthread:
kthread_stop(cd->qthread);
err_reset:
diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c
index 42ce6c8..4ed29f8 100644
--- a/drivers/staging/greybus/audio_codec.c
+++ b/drivers/staging/greybus/audio_codec.c
@@ -621,8 +621,8 @@
break;
}
if (!data) {
- dev_err(dai->dev, "%s:%s DATA connection missing\n",
- dai->name, module->name);
+ dev_err(dai->dev, "%s DATA connection missing\n",
+ dai->name);
mutex_unlock(&codec->lock);
return -ENODEV;
}
diff --git a/drivers/staging/greybus/audio_helper.c b/drivers/staging/greybus/audio_helper.c
index a9576f9..08443f4 100644
--- a/drivers/staging/greybus/audio_helper.c
+++ b/drivers/staging/greybus/audio_helper.c
@@ -3,7 +3,6 @@
* Greybus Audio Sound SoC helper APIs
*/
-#include <linux/debugfs.h>
#include <sound/core.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
@@ -116,10 +115,6 @@
{
int i;
struct snd_soc_dapm_widget *w, *next_w;
-#ifdef CONFIG_DEBUG_FS
- struct dentry *parent = dapm->debugfs_dapm;
- struct dentry *debugfs_w = NULL;
-#endif
mutex_lock(&dapm->card->dapm_mutex);
for (i = 0; i < num; i++) {
@@ -139,12 +134,6 @@
continue;
}
widget++;
-#ifdef CONFIG_DEBUG_FS
- if (!parent)
- debugfs_w = debugfs_lookup(w->name, parent);
- debugfs_remove(debugfs_w);
- debugfs_w = NULL;
-#endif
gbaudio_dapm_free_widget(w);
}
mutex_unlock(&dapm->card->dapm_mutex);
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index fef0055..20183b2 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -107,9 +107,9 @@
static unsigned int ad7280a_devaddr(unsigned int addr)
{
return ((addr & 0x1) << 4) |
- ((addr & 0x2) << 3) |
+ ((addr & 0x2) << 2) |
(addr & 0x4) |
- ((addr & 0x8) >> 3) |
+ ((addr & 0x8) >> 2) |
((addr & 0x10) >> 4);
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_acc.c b/drivers/staging/media/atomisp/pci/atomisp_acc.c
index f638d0b..b1614cc 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_acc.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_acc.c
@@ -439,6 +439,18 @@
return 0;
}
+static void atomisp_acc_unload_some_extensions(struct atomisp_sub_device *asd,
+ int i,
+ struct atomisp_acc_fw *acc_fw)
+{
+ while (--i >= 0) {
+ if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
+ atomisp_css_unload_acc_extension(asd, acc_fw->fw,
+ acc_flag_to_pipe[i].pipe_id);
+ }
+ }
+}
+
/*
* Appends the loaded acceleration binary extensions to the
* current ISP mode. Must be called just before sh_css_start().
@@ -477,16 +489,20 @@
acc_fw->fw,
acc_flag_to_pipe[i].pipe_id,
acc_fw->type);
- if (ret)
+ if (ret) {
+ atomisp_acc_unload_some_extensions(asd, i, acc_fw);
goto error;
+ }
ext_loaded = true;
}
}
ret = atomisp_css_set_acc_parameters(acc_fw);
- if (ret < 0)
+ if (ret < 0) {
+ atomisp_acc_unload_some_extensions(asd, i, acc_fw);
goto error;
+ }
}
if (!ext_loaded)
@@ -495,6 +511,7 @@
ret = atomisp_css_update_stream(asd);
if (ret) {
dev_err(isp->dev, "%s: update stream failed.\n", __func__);
+ atomisp_acc_unload_extensions(asd);
goto error;
}
@@ -502,13 +519,6 @@
return 0;
error:
- while (--i >= 0) {
- if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
- atomisp_css_unload_acc_extension(asd, acc_fw->fw,
- acc_flag_to_pipe[i].pipe_id);
- }
- }
-
list_for_each_entry_continue_reverse(acc_fw, &asd->acc.fw, list) {
if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT &&
acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER)
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
index 90d50a6..20c19e0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
@@ -899,9 +899,9 @@
int err;
unsigned long irqflags;
struct ia_css_frame *frame = NULL;
- struct atomisp_s3a_buf *s3a_buf = NULL, *_s3a_buf_tmp;
- struct atomisp_dis_buf *dis_buf = NULL, *_dis_buf_tmp;
- struct atomisp_metadata_buf *md_buf = NULL, *_md_buf_tmp;
+ struct atomisp_s3a_buf *s3a_buf = NULL, *_s3a_buf_tmp, *s3a_iter;
+ struct atomisp_dis_buf *dis_buf = NULL, *_dis_buf_tmp, *dis_iter;
+ struct atomisp_metadata_buf *md_buf = NULL, *_md_buf_tmp, *md_iter;
enum atomisp_metadata_type md_type;
struct atomisp_device *isp = asd->isp;
struct v4l2_control ctrl;
@@ -940,60 +940,75 @@
switch (buf_type) {
case IA_CSS_BUFFER_TYPE_3A_STATISTICS:
- list_for_each_entry_safe(s3a_buf, _s3a_buf_tmp,
+ list_for_each_entry_safe(s3a_iter, _s3a_buf_tmp,
&asd->s3a_stats_in_css, list) {
- if (s3a_buf->s3a_data ==
+ if (s3a_iter->s3a_data ==
buffer.css_buffer.data.stats_3a) {
- list_del_init(&s3a_buf->list);
- list_add_tail(&s3a_buf->list,
+ list_del_init(&s3a_iter->list);
+ list_add_tail(&s3a_iter->list,
&asd->s3a_stats_ready);
+ s3a_buf = s3a_iter;
break;
}
}
asd->s3a_bufs_in_css[css_pipe_id]--;
atomisp_3a_stats_ready_event(asd, buffer.css_buffer.exp_id);
- dev_dbg(isp->dev, "%s: s3a stat with exp_id %d is ready\n",
- __func__, s3a_buf->s3a_data->exp_id);
+ if (s3a_buf)
+ dev_dbg(isp->dev, "%s: s3a stat with exp_id %d is ready\n",
+ __func__, s3a_buf->s3a_data->exp_id);
+ else
+ dev_dbg(isp->dev, "%s: s3a stat is ready with no exp_id found\n",
+ __func__);
break;
case IA_CSS_BUFFER_TYPE_METADATA:
if (error)
break;
md_type = atomisp_get_metadata_type(asd, css_pipe_id);
- list_for_each_entry_safe(md_buf, _md_buf_tmp,
+ list_for_each_entry_safe(md_iter, _md_buf_tmp,
&asd->metadata_in_css[md_type], list) {
- if (md_buf->metadata ==
+ if (md_iter->metadata ==
buffer.css_buffer.data.metadata) {
- list_del_init(&md_buf->list);
- list_add_tail(&md_buf->list,
+ list_del_init(&md_iter->list);
+ list_add_tail(&md_iter->list,
&asd->metadata_ready[md_type]);
+ md_buf = md_iter;
break;
}
}
asd->metadata_bufs_in_css[stream_id][css_pipe_id]--;
atomisp_metadata_ready_event(asd, md_type);
- dev_dbg(isp->dev, "%s: metadata with exp_id %d is ready\n",
- __func__, md_buf->metadata->exp_id);
+ if (md_buf)
+ dev_dbg(isp->dev, "%s: metadata with exp_id %d is ready\n",
+ __func__, md_buf->metadata->exp_id);
+ else
+ dev_dbg(isp->dev, "%s: metadata is ready with no exp_id found\n",
+ __func__);
break;
case IA_CSS_BUFFER_TYPE_DIS_STATISTICS:
- list_for_each_entry_safe(dis_buf, _dis_buf_tmp,
+ list_for_each_entry_safe(dis_iter, _dis_buf_tmp,
&asd->dis_stats_in_css, list) {
- if (dis_buf->dis_data ==
+ if (dis_iter->dis_data ==
buffer.css_buffer.data.stats_dvs) {
spin_lock_irqsave(&asd->dis_stats_lock,
irqflags);
- list_del_init(&dis_buf->list);
- list_add(&dis_buf->list, &asd->dis_stats);
+ list_del_init(&dis_iter->list);
+ list_add(&dis_iter->list, &asd->dis_stats);
asd->params.dis_proj_data_valid = true;
spin_unlock_irqrestore(&asd->dis_stats_lock,
irqflags);
+ dis_buf = dis_iter;
break;
}
}
asd->dis_bufs_in_css--;
- dev_dbg(isp->dev, "%s: dis stat with exp_id %d is ready\n",
- __func__, dis_buf->dis_data->exp_id);
+ if (dis_buf)
+ dev_dbg(isp->dev, "%s: dis stat with exp_id %d is ready\n",
+ __func__, dis_buf->dis_data->exp_id);
+ else
+ dev_dbg(isp->dev, "%s: dis stat is ready with no exp_id found\n",
+ __func__);
break;
case IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME:
case IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME:
diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
index 34480ca..c9ee850 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
@@ -729,6 +729,21 @@
return 0;
}
+/*
+ * Some boards contain a hw-bug where turning eldo2 back on after having turned
+ * it off causes the CPLM3218 ambient-light-sensor on the image-sensor's I2C bus
+ * to crash, hanging the bus. Do not turn eldo2 off on these systems.
+ */
+static const struct dmi_system_id axp_leave_eldo2_on_ids[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab duo W1 10.1 (VT4)"),
+ },
+ },
+ { }
+};
+
static int axp_v1p8_on(struct device *dev, struct gmin_subdev *gs)
{
int ret;
@@ -763,6 +778,9 @@
if (ret)
return ret;
+ if (dmi_check_system(axp_leave_eldo2_on_ids))
+ return 0;
+
ret = axp_regulator_set(dev, gs, gs->eldo2_sel_reg, gs->eldo2_1p8v,
ELDO_CTRL_REG, gs->eldo2_ctrl_shift, false);
return ret;
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c
index 6a5ee46..c1cda16 100644
--- a/drivers/staging/media/atomisp/pci/hmm/hmm.c
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c
@@ -39,7 +39,7 @@
struct hmm_bo_device bo_device;
struct hmm_pool dynamic_pool;
struct hmm_pool reserved_pool;
-static ia_css_ptr dummy_ptr;
+static ia_css_ptr dummy_ptr = mmgr_EXCEPTION;
static bool hmm_initialized;
struct _hmm_mem_stat hmm_mem_stat;
@@ -209,7 +209,7 @@
void hmm_cleanup(void)
{
- if (!dummy_ptr)
+ if (dummy_ptr == mmgr_EXCEPTION)
return;
sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group);
@@ -288,7 +288,8 @@
dev_dbg(atomisp_dev, "%s: free 0x%08x\n", __func__, virt);
- WARN_ON(!virt);
+ if (WARN_ON(virt == mmgr_EXCEPTION))
+ return;
bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt);
diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
index b88dc4e..ed244ae 100644
--- a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
+++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
@@ -23,7 +23,7 @@
reg = H1_REG_IN_IMG_CTRL_ROW_LEN(pix_fmt->width)
| H1_REG_IN_IMG_CTRL_OVRFLR_D4(0)
- | H1_REG_IN_IMG_CTRL_OVRFLB_D4(0)
+ | H1_REG_IN_IMG_CTRL_OVRFLB(0)
| H1_REG_IN_IMG_CTRL_FMT(ctx->vpu_src_fmt->enc_fmt);
vepu_write_relaxed(vpu, reg, H1_REG_IN_IMG_CTRL);
}
diff --git a/drivers/staging/media/hantro/hantro_h1_regs.h b/drivers/staging/media/hantro/hantro_h1_regs.h
index d6e9825..30e7e7b 100644
--- a/drivers/staging/media/hantro/hantro_h1_regs.h
+++ b/drivers/staging/media/hantro/hantro_h1_regs.h
@@ -47,7 +47,7 @@
#define H1_REG_IN_IMG_CTRL 0x03c
#define H1_REG_IN_IMG_CTRL_ROW_LEN(x) ((x) << 12)
#define H1_REG_IN_IMG_CTRL_OVRFLR_D4(x) ((x) << 10)
-#define H1_REG_IN_IMG_CTRL_OVRFLB_D4(x) ((x) << 6)
+#define H1_REG_IN_IMG_CTRL_OVRFLB(x) ((x) << 6)
#define H1_REG_IN_IMG_CTRL_FMT(x) ((x) << 2)
#define H1_REG_ENC_CTRL0 0x040
#define H1_REG_ENC_CTRL0_INIT_QP(x) ((x) << 26)
diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
index 5c2ca61..b65f2f3 100644
--- a/drivers/staging/media/hantro/hantro_v4l2.c
+++ b/drivers/staging/media/hantro/hantro_v4l2.c
@@ -644,8 +644,12 @@
* (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets
* it to buffer length).
*/
- if (V4L2_TYPE_IS_CAPTURE(vq->type))
- vb2_set_plane_payload(vb, 0, pix_fmt->plane_fmt[0].sizeimage);
+ if (V4L2_TYPE_IS_CAPTURE(vq->type)) {
+ if (ctx->is_encoder)
+ vb2_set_plane_payload(vb, 0, 0);
+ else
+ vb2_set_plane_payload(vb, 0, pix_fmt->plane_fmt[0].sizeimage);
+ }
return 0;
}
diff --git a/drivers/staging/media/meson/vdec/esparser.c b/drivers/staging/media/meson/vdec/esparser.c
index db70227..86ccc89 100644
--- a/drivers/staging/media/meson/vdec/esparser.c
+++ b/drivers/staging/media/meson/vdec/esparser.c
@@ -328,7 +328,12 @@
offset = esparser_get_offset(sess);
- amvdec_add_ts(sess, vb->timestamp, vbuf->timecode, offset, vbuf->flags);
+ ret = amvdec_add_ts(sess, vb->timestamp, vbuf->timecode, offset, vbuf->flags);
+ if (ret) {
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ return ret;
+ }
+
dev_dbg(core->dev, "esparser: ts = %llu pld_size = %u offset = %08X flags = %08X\n",
vb->timestamp, payload_size, offset, vbuf->flags);
diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c
index 5ccb384..7a818ca 100644
--- a/drivers/staging/media/meson/vdec/vdec.c
+++ b/drivers/staging/media/meson/vdec/vdec.c
@@ -1109,6 +1109,7 @@
err_vdev_release:
video_device_release(vdev);
+ v4l2_device_unregister(&core->v4l2_dev);
return ret;
}
@@ -1117,6 +1118,7 @@
struct amvdec_core *core = platform_get_drvdata(pdev);
video_unregister_device(core->vdev_dec);
+ v4l2_device_unregister(&core->v4l2_dev);
return 0;
}
diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.c b/drivers/staging/media/meson/vdec/vdec_helpers.c
index 7f07a91..db4a854 100644
--- a/drivers/staging/media/meson/vdec/vdec_helpers.c
+++ b/drivers/staging/media/meson/vdec/vdec_helpers.c
@@ -227,13 +227,16 @@
}
EXPORT_SYMBOL_GPL(amvdec_set_canvases);
-void amvdec_add_ts(struct amvdec_session *sess, u64 ts,
- struct v4l2_timecode tc, u32 offset, u32 vbuf_flags)
+int amvdec_add_ts(struct amvdec_session *sess, u64 ts,
+ struct v4l2_timecode tc, u32 offset, u32 vbuf_flags)
{
struct amvdec_timestamp *new_ts;
unsigned long flags;
new_ts = kzalloc(sizeof(*new_ts), GFP_KERNEL);
+ if (!new_ts)
+ return -ENOMEM;
+
new_ts->ts = ts;
new_ts->tc = tc;
new_ts->offset = offset;
@@ -242,6 +245,7 @@
spin_lock_irqsave(&sess->ts_spinlock, flags);
list_add_tail(&new_ts->list, &sess->timestamps);
spin_unlock_irqrestore(&sess->ts_spinlock, flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(amvdec_add_ts);
diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.h b/drivers/staging/media/meson/vdec/vdec_helpers.h
index cfaed52..798e5a8 100644
--- a/drivers/staging/media/meson/vdec/vdec_helpers.h
+++ b/drivers/staging/media/meson/vdec/vdec_helpers.h
@@ -55,8 +55,8 @@
* @offset: offset in the VIFIFO where the associated packet was written
* @flags the vb2_v4l2_buffer flags
*/
-void amvdec_add_ts(struct amvdec_session *sess, u64 ts,
- struct v4l2_timecode tc, u32 offset, u32 flags);
+int amvdec_add_ts(struct amvdec_session *sess, u64 ts,
+ struct v4l2_timecode tc, u32 offset, u32 flags);
void amvdec_remove_ts(struct amvdec_session *sess, u64 ts);
/**
diff --git a/drivers/staging/media/meson/vdec/vdec_hevc.c b/drivers/staging/media/meson/vdec/vdec_hevc.c
index 9530e58..afced43 100644
--- a/drivers/staging/media/meson/vdec/vdec_hevc.c
+++ b/drivers/staging/media/meson/vdec/vdec_hevc.c
@@ -167,8 +167,12 @@
clk_set_rate(core->vdec_hevc_clk, 666666666);
ret = clk_prepare_enable(core->vdec_hevc_clk);
- if (ret)
+ if (ret) {
+ if (core->platform->revision == VDEC_REVISION_G12A ||
+ core->platform->revision == VDEC_REVISION_SM1)
+ clk_disable_unprepare(core->vdec_hevcf_clk);
return ret;
+ }
if (core->platform->revision == VDEC_REVISION_SM1)
regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
diff --git a/drivers/staging/media/rkisp1/rkisp1-capture.c b/drivers/staging/media/rkisp1/rkisp1-capture.c
index 0c934ca..8936f5a 100644
--- a/drivers/staging/media/rkisp1/rkisp1-capture.c
+++ b/drivers/staging/media/rkisp1/rkisp1-capture.c
@@ -1258,11 +1258,12 @@
struct rkisp1_capture *cap = video_get_drvdata(vdev);
const struct rkisp1_capture_fmt_cfg *fmt =
rkisp1_find_fmt_cfg(cap, cap->pix.fmt.pixelformat);
- struct v4l2_subdev_format sd_fmt;
+ struct v4l2_subdev_format sd_fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .pad = link->source->index,
+ };
int ret;
- sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- sd_fmt.pad = link->source->index;
ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt);
if (ret)
return ret;
diff --git a/drivers/staging/media/rkisp1/rkisp1-resizer.c b/drivers/staging/media/rkisp1/rkisp1-resizer.c
index 4dcc342..76f17dd 100644
--- a/drivers/staging/media/rkisp1/rkisp1-resizer.c
+++ b/drivers/staging/media/rkisp1/rkisp1-resizer.c
@@ -500,6 +500,10 @@
sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
sink_fmt->field = V4L2_FIELD_NONE;
sink_fmt->code = RKISP1_DEF_FMT;
+ sink_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ sink_fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
+ sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ sink_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
sink_crop = v4l2_subdev_get_try_crop(sd, cfg, RKISP1_RSZ_PAD_SINK);
sink_crop->width = RKISP1_DEFAULT_WIDTH;
diff --git a/drivers/staging/media/rkvdec/rkvdec-h264.c b/drivers/staging/media/rkvdec/rkvdec-h264.c
index 5487f6d..ddccd97 100644
--- a/drivers/staging/media/rkvdec/rkvdec-h264.c
+++ b/drivers/staging/media/rkvdec/rkvdec-h264.c
@@ -112,6 +112,7 @@
const struct v4l2_ctrl_h264_sps *sps;
const struct v4l2_ctrl_h264_pps *pps;
const struct v4l2_ctrl_h264_scaling_matrix *scaling_matrix;
+ int ref_buf_idx[V4L2_H264_NUM_DPB_ENTRIES];
};
struct rkvdec_h264_ctx {
@@ -661,8 +662,8 @@
WRITE_PPS(0xff, PROFILE_IDC);
WRITE_PPS(1, CONSTRAINT_SET3_FLAG);
WRITE_PPS(sps->chroma_format_idc, CHROMA_FORMAT_IDC);
- WRITE_PPS(sps->bit_depth_luma_minus8 + 8, BIT_DEPTH_LUMA);
- WRITE_PPS(sps->bit_depth_chroma_minus8 + 8, BIT_DEPTH_CHROMA);
+ WRITE_PPS(sps->bit_depth_luma_minus8, BIT_DEPTH_LUMA);
+ WRITE_PPS(sps->bit_depth_chroma_minus8, BIT_DEPTH_CHROMA);
WRITE_PPS(0, QPPRIME_Y_ZERO_TRANSFORM_BYPASS_FLAG);
WRITE_PPS(sps->log2_max_frame_num_minus4, LOG2_MAX_FRAME_NUM_MINUS4);
WRITE_PPS(sps->max_num_ref_frames, MAX_NUM_REF_FRAMES);
@@ -725,6 +726,26 @@
}
}
+static void lookup_ref_buf_idx(struct rkvdec_ctx *ctx,
+ struct rkvdec_h264_run *run)
+{
+ const struct v4l2_ctrl_h264_decode_params *dec_params = run->decode_params;
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(dec_params->dpb); i++) {
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+ const struct v4l2_h264_dpb_entry *dpb = run->decode_params->dpb;
+ struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q;
+ int buf_idx = -1;
+
+ if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
+ buf_idx = vb2_find_timestamp(cap_q,
+ dpb[i].reference_ts, 0);
+
+ run->ref_buf_idx[i] = buf_idx;
+ }
+}
+
static void assemble_hw_rps(struct rkvdec_ctx *ctx,
struct rkvdec_h264_run *run)
{
@@ -762,7 +783,7 @@
for (j = 0; j < RKVDEC_NUM_REFLIST; j++) {
for (i = 0; i < h264_ctx->reflists.num_valid; i++) {
- u8 dpb_valid = 0;
+ bool dpb_valid = run->ref_buf_idx[i] >= 0;
u8 idx = 0;
switch (j) {
@@ -779,8 +800,6 @@
if (idx >= ARRAY_SIZE(dec_params->dpb))
continue;
- dpb_valid = !!(dpb[idx].flags &
- V4L2_H264_DPB_ENTRY_FLAG_ACTIVE);
set_ps_field(hw_rps, DPB_INFO(i, j),
idx | dpb_valid << 4);
@@ -859,13 +878,8 @@
unsigned int dpb_idx)
{
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
- const struct v4l2_h264_dpb_entry *dpb = run->decode_params->dpb;
struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q;
- int buf_idx = -1;
-
- if (dpb[dpb_idx].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
- buf_idx = vb2_find_timestamp(cap_q,
- dpb[dpb_idx].reference_ts, 0);
+ int buf_idx = run->ref_buf_idx[dpb_idx];
/*
* If a DPB entry is unused or invalid, address of current destination
@@ -1102,6 +1116,7 @@
assemble_hw_scaling_list(ctx, &run);
assemble_hw_pps(ctx, &run);
+ lookup_ref_buf_idx(ctx, &run);
assemble_hw_rps(ctx, &run);
config_registers(ctx, &run);
@@ -1109,8 +1124,8 @@
schedule_delayed_work(&rkvdec->watchdog_work, msecs_to_jiffies(2000));
- writel(0xffffffff, rkvdec->regs + RKVDEC_REG_STRMD_ERR_EN);
- writel(0xffffffff, rkvdec->regs + RKVDEC_REG_H264_ERR_E);
+ writel(0, rkvdec->regs + RKVDEC_REG_STRMD_ERR_EN);
+ writel(0, rkvdec->regs + RKVDEC_REG_H264_ERR_E);
writel(1, rkvdec->regs + RKVDEC_REG_PREF_LUMA_CACHE_COMMAND);
writel(1, rkvdec->regs + RKVDEC_REG_PREF_CHR_CACHE_COMMAND);
diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
index a7788e7..e384ea8 100644
--- a/drivers/staging/media/rkvdec/rkvdec.c
+++ b/drivers/staging/media/rkvdec/rkvdec.c
@@ -1000,7 +1000,6 @@
static int rkvdec_probe(struct platform_device *pdev)
{
struct rkvdec_dev *rkvdec;
- struct resource *res;
unsigned int i;
int ret, irq;
@@ -1032,8 +1031,7 @@
*/
clk_set_rate(rkvdec->clocks[0].clk, 500 * 1000 * 1000);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rkvdec->regs = devm_ioremap_resource(&pdev->dev, res);
+ rkvdec->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rkvdec->regs))
return PTR_ERR(rkvdec->regs);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index 1dd8337..28de90e 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -399,6 +399,8 @@
if (!dev)
return -ENOMEM;
+ platform_set_drvdata(pdev, dev);
+
dev->vfd = cedrus_video_device;
dev->dev = &pdev->dev;
dev->pdev = pdev;
@@ -469,8 +471,6 @@
goto err_m2m_mc;
}
- platform_set_drvdata(pdev, dev);
-
return 0;
err_m2m_mc:
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
index de7442d..d3e26bf 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
@@ -38,7 +38,7 @@
#define CEDRUS_H264_FRAME_NUM 18
-#define CEDRUS_NEIGHBOR_INFO_BUF_SIZE (16 * SZ_1K)
+#define CEDRUS_NEIGHBOR_INFO_BUF_SIZE (32 * SZ_1K)
#define CEDRUS_MIN_PIC_INFO_BUF_SIZE (130 * SZ_1K)
static void cedrus_h264_write_sram(struct cedrus_dev *dev,
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
index 10744fa..20c01a5 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
@@ -23,7 +23,7 @@
* Subsequent BSP implementations seem to double the neighbor info buffer size
* for the H6 SoC, which may be related to 10 bit H265 support.
*/
-#define CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE (397 * SZ_1K)
+#define CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE (794 * SZ_1K)
#define CEDRUS_H265_ENTRY_POINTS_BUF_SIZE (4 * SZ_1K)
#define CEDRUS_H265_MV_COL_BUF_UNIT_CTB_SIZE 160
@@ -147,6 +147,9 @@
dpb[i].pic_order_cnt[1]
};
+ if (buffer_index < 0)
+ continue;
+
cedrus_h265_frame_info_write_single(ctx, i, dpb[i].field_pic,
pic_order_cnt,
buffer_index);
diff --git a/drivers/staging/media/zoran/zoran.h b/drivers/staging/media/zoran/zoran.h
index e7fe8da..3f223e5 100644
--- a/drivers/staging/media/zoran/zoran.h
+++ b/drivers/staging/media/zoran/zoran.h
@@ -314,6 +314,6 @@
#endif
-int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq);
+int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq, int dir);
void zoran_queue_exit(struct zoran *zr);
int zr_set_buf(struct zoran *zr);
diff --git a/drivers/staging/media/zoran/zoran_card.c b/drivers/staging/media/zoran/zoran_card.c
index dfc60e2..fe0cca1 100644
--- a/drivers/staging/media/zoran/zoran_card.c
+++ b/drivers/staging/media/zoran/zoran_card.c
@@ -802,6 +802,52 @@
return 0;
}
+static int zoran_init_video_device(struct zoran *zr, struct video_device *video_dev, int dir)
+{
+ int err;
+
+ /* Now add the template and register the device unit. */
+ *video_dev = zoran_template;
+ video_dev->v4l2_dev = &zr->v4l2_dev;
+ video_dev->lock = &zr->lock;
+ video_dev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE | dir;
+
+ strscpy(video_dev->name, ZR_DEVNAME(zr), sizeof(video_dev->name));
+ /*
+ * It's not a mem2mem device, but you can both capture and output from one and the same
+ * device. This should really be split up into two device nodes, but that's a job for
+ * another day.
+ */
+ video_dev->vfl_dir = VFL_DIR_M2M;
+ zoran_queue_init(zr, &zr->vq, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ err = video_register_device(video_dev, VFL_TYPE_VIDEO, video_nr[zr->id]);
+ if (err < 0)
+ return err;
+ video_set_drvdata(video_dev, zr);
+ return 0;
+}
+
+static void zoran_exit_video_devices(struct zoran *zr)
+{
+ video_unregister_device(zr->video_dev);
+ kfree(zr->video_dev);
+}
+
+static int zoran_init_video_devices(struct zoran *zr)
+{
+ int err;
+
+ zr->video_dev = video_device_alloc();
+ if (!zr->video_dev)
+ return -ENOMEM;
+
+ err = zoran_init_video_device(zr, zr->video_dev, V4L2_CAP_VIDEO_CAPTURE);
+ if (err)
+ kfree(zr->video_dev);
+ return err;
+}
+
void zoran_open_init_params(struct zoran *zr)
{
int i;
@@ -873,17 +919,11 @@
zoran_open_init_params(zr);
/* allocate memory *before* doing anything to the hardware in case allocation fails */
- zr->video_dev = video_device_alloc();
- if (!zr->video_dev) {
- err = -ENOMEM;
- goto exit;
- }
zr->stat_com = dma_alloc_coherent(&zr->pci_dev->dev,
BUZ_NUM_STAT_COM * sizeof(u32),
&zr->p_sc, GFP_KERNEL);
if (!zr->stat_com) {
- err = -ENOMEM;
- goto exit_video;
+ return -ENOMEM;
}
for (j = 0; j < BUZ_NUM_STAT_COM; j++)
zr->stat_com[j] = cpu_to_le32(1); /* mark as unavailable to zr36057 */
@@ -896,26 +936,9 @@
goto exit_statcom;
}
- /* Now add the template and register the device unit. */
- *zr->video_dev = zoran_template;
- zr->video_dev->v4l2_dev = &zr->v4l2_dev;
- zr->video_dev->lock = &zr->lock;
- zr->video_dev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
-
- strscpy(zr->video_dev->name, ZR_DEVNAME(zr), sizeof(zr->video_dev->name));
- /*
- * It's not a mem2mem device, but you can both capture and output from one and the same
- * device. This should really be split up into two device nodes, but that's a job for
- * another day.
- */
- zr->video_dev->vfl_dir = VFL_DIR_M2M;
-
- zoran_queue_init(zr, &zr->vq);
-
- err = video_register_device(zr->video_dev, VFL_TYPE_VIDEO, video_nr[zr->id]);
- if (err < 0)
+ err = zoran_init_video_devices(zr);
+ if (err)
goto exit_statcomb;
- video_set_drvdata(zr->video_dev, zr);
zoran_init_hardware(zr);
if (!pass_through) {
@@ -930,9 +953,6 @@
dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32) * 2, zr->stat_comb, zr->p_scb);
exit_statcom:
dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32), zr->stat_com, zr->p_sc);
-exit_video:
- kfree(zr->video_dev);
-exit:
return err;
}
@@ -964,7 +984,7 @@
dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32) * 2, zr->stat_comb, zr->p_scb);
pci_release_regions(pdev);
pci_disable_device(zr->pci_dev);
- video_unregister_device(zr->video_dev);
+ zoran_exit_video_devices(zr);
exit_free:
v4l2_ctrl_handler_free(&zr->hdl);
v4l2_device_unregister(&zr->v4l2_dev);
@@ -1068,8 +1088,10 @@
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err)
- return -ENODEV;
- vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
+ return err;
+ err = vb2_dma_contig_set_max_seg_size(&pdev->dev, U32_MAX);
+ if (err)
+ return err;
nr = zoran_num++;
if (nr >= BUZ_MAX) {
diff --git a/drivers/staging/media/zoran/zoran_device.c b/drivers/staging/media/zoran/zoran_device.c
index e569a13..913f5a3 100644
--- a/drivers/staging/media/zoran/zoran_device.c
+++ b/drivers/staging/media/zoran/zoran_device.c
@@ -879,7 +879,7 @@
if (zr->jpg_settings.tmp_dcm == 1)
i = (zr->jpg_dma_tail - zr->jpg_err_shift) & BUZ_MASK_STAT_COM;
else
- i = ((zr->jpg_dma_tail - zr->jpg_err_shift) & 1) * 2 + 1;
+ i = ((zr->jpg_dma_tail - zr->jpg_err_shift) & 1) * 2;
stat_com = le32_to_cpu(zr->stat_com[i]);
if ((stat_com & 1) == 0) {
@@ -891,6 +891,11 @@
size = (stat_com & GENMASK(22, 1)) >> 1;
buf = zr->inuse[i];
+ if (!buf) {
+ spin_unlock_irqrestore(&zr->queued_bufs_lock, flags);
+ pci_err(zr->pci_dev, "No buffer at slot %d\n", i);
+ return;
+ }
buf->vbuf.vb2_buf.timestamp = ktime_get_ns();
if (zr->codec_mode == BUZ_MODE_MOTION_COMPRESS) {
diff --git a/drivers/staging/media/zoran/zoran_driver.c b/drivers/staging/media/zoran/zoran_driver.c
index 808196e..ea04f6c 100644
--- a/drivers/staging/media/zoran/zoran_driver.c
+++ b/drivers/staging/media/zoran/zoran_driver.c
@@ -255,8 +255,6 @@
strscpy(cap->card, ZR_DEVNAME(zr), sizeof(cap->card));
strscpy(cap->driver, "zoran", sizeof(cap->driver));
snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", pci_name(zr->pci_dev));
- cap->device_caps = zr->video_dev->device_caps;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -582,6 +580,9 @@
struct zoran *zr = video_drvdata(file);
int res = 0;
+ if (zr->norm == std)
+ return 0;
+
if (zr->running != ZORAN_MAP_MODE_NONE)
return -EBUSY;
@@ -737,6 +738,7 @@
if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+ parm->parm.capture.readbuffers = 9;
return 0;
}
@@ -867,6 +869,10 @@
vbuf = &buf->vbuf;
buf->vbuf.field = V4L2_FIELD_INTERLACED;
+ if (BUZ_MAX_HEIGHT < (zr->v4l_settings.height * 2))
+ buf->vbuf.field = V4L2_FIELD_INTERLACED;
+ else
+ buf->vbuf.field = V4L2_FIELD_TOP;
vb2_set_plane_payload(&buf->vbuf.vb2_buf, 0, zr->buffer_size);
vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_DONE);
zr->inuse[0] = NULL;
@@ -926,6 +932,7 @@
zr->stat_com[j] = cpu_to_le32(1);
zr->inuse[j] = NULL;
}
+ zr->vbseq = 0;
if (zr->map_mode != ZORAN_MAP_MODE_RAW) {
pci_info(zr->pci_dev, "START JPG\n");
@@ -1006,7 +1013,7 @@
.wait_finish = vb2_ops_wait_finish,
};
-int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq)
+int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq, int dir)
{
int err;
@@ -1014,8 +1021,9 @@
INIT_LIST_HEAD(&zr->queued_bufs);
vq->dev = &zr->pci_dev->dev;
- vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- vq->io_modes = VB2_USERPTR | VB2_DMABUF | VB2_MMAP | VB2_READ | VB2_WRITE;
+ vq->type = dir;
+
+ vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_READ | VB2_WRITE;
vq->drv_priv = zr;
vq->buf_struct_size = sizeof(struct zr_buffer);
vq->ops = &zr_video_qops;
diff --git a/drivers/staging/mt7621-dts/gbpc1.dts b/drivers/staging/mt7621-dts/gbpc1.dts
index a7c0d31..d48ca5a 100644
--- a/drivers/staging/mt7621-dts/gbpc1.dts
+++ b/drivers/staging/mt7621-dts/gbpc1.dts
@@ -11,7 +11,8 @@
memory@0 {
device_type = "memory";
- reg = <0x0 0x1c000000>, <0x20000000 0x4000000>;
+ reg = <0x00000000 0x1c000000>,
+ <0x20000000 0x04000000>;
};
chosen {
@@ -37,24 +38,16 @@
gpio-leds {
compatible = "gpio-leds";
- system {
- label = "gb-pc1:green:system";
+ power {
+ label = "green:power";
gpios = <&gpio 6 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "default-on";
};
- status {
- label = "gb-pc1:green:status";
+ system {
+ label = "green:system";
gpios = <&gpio 8 GPIO_ACTIVE_LOW>;
- };
-
- lan1 {
- label = "gb-pc1:green:lan1";
- gpios = <&gpio 24 GPIO_ACTIVE_LOW>;
- };
-
- lan2 {
- label = "gb-pc1:green:lan2";
- gpios = <&gpio 25 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "disk-activity";
};
};
};
@@ -94,9 +87,8 @@
partition@50000 {
label = "firmware";
- reg = <0x50000 0x1FB0000>;
+ reg = <0x50000 0x1fb0000>;
};
-
};
};
@@ -122,9 +114,12 @@
};
&pinctrl {
- state_default: pinctrl0 {
- default_gpio: gpio {
- groups = "wdt", "rgmii2", "uart3";
+ pinctrl-names = "default";
+ pinctrl-0 = <&state_default>;
+
+ state_default: state-default {
+ gpio-pinmux {
+ groups = "rgmii2", "uart3", "wdt";
function = "gpio";
};
};
@@ -133,12 +128,13 @@
&switch0 {
ports {
port@0 {
+ status = "okay";
label = "ethblack";
- status = "ok";
};
+
port@4 {
+ status = "okay";
label = "ethblue";
- status = "ok";
};
};
};
diff --git a/drivers/staging/mt7621-dts/gbpc2.dts b/drivers/staging/mt7621-dts/gbpc2.dts
index 52760e7..6f6fed0 100644
--- a/drivers/staging/mt7621-dts/gbpc2.dts
+++ b/drivers/staging/mt7621-dts/gbpc2.dts
@@ -1,21 +1,121 @@
/dts-v1/;
-#include "gbpc1.dts"
+#include "mt7621.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
/ {
compatible = "gnubee,gb-pc2", "mediatek,mt7621-soc";
model = "GB-PC2";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x1c000000>,
+ <0x20000000 0x04000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,57600";
+ };
+
+ palmbus: palmbus@1e000000 {
+ i2c@900 {
+ status = "okay";
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ reset {
+ label = "reset";
+ gpios = <&gpio 18 GPIO_ACTIVE_HIGH>;
+ linux,code = <KEY_RESTART>;
+ };
+ };
};
-&default_gpio {
- groups = "wdt", "uart3";
- function = "gpio";
+&sdhci {
+ status = "okay";
};
-&gmac1 {
- status = "ok";
+&spi0 {
+ status = "okay";
+
+ m25p80@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <50000000>;
+ broken-flash-reset;
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x0 0x30000>;
+ read-only;
+ };
+
+ partition@30000 {
+ label = "u-boot-env";
+ reg = <0x30000 0x10000>;
+ read-only;
+ };
+
+ factory: partition@40000 {
+ label = "factory";
+ reg = <0x40000 0x10000>;
+ read-only;
+ };
+
+ partition@50000 {
+ label = "firmware";
+ reg = <0x50000 0x1fb0000>;
+ };
+ };
};
-&phy_external {
- status = "ok";
+&pcie {
+ status = "okay";
+};
+
+&pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&state_default>;
+
+ state_default: state-default {
+ gpio-pinmux {
+ groups = "wdt";
+ function = "gpio";
+ };
+ };
+};
+
+ðernet {
+ gmac1: mac@1 {
+ status = "okay";
+ phy-handle = <ðphy7>;
+ };
+
+ mdio-bus {
+ ethphy7: ethernet-phy@7 {
+ reg = <7>;
+ phy-mode = "rgmii-rxid";
+ };
+ };
+};
+
+&switch0 {
+ ports {
+ port@0 {
+ status = "okay";
+ label = "ethblack";
+ };
+
+ port@4 {
+ status = "okay";
+ label = "ethblue";
+ };
+ };
};
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index 27222f7..91a7fa7 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -56,9 +56,9 @@
regulator-max-microvolt = <3300000>;
enable-active-high;
regulator-always-on;
- };
+ };
- mmc_fixed_1v8_io: fixedregulator@1 {
+ mmc_fixed_1v8_io: fixedregulator@1 {
compatible = "regulator-fixed";
regulator-name = "mmc_io";
regulator-min-microvolt = <1800000>;
@@ -412,37 +412,32 @@
mediatek,ethsys = <ðsys>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mdio_pins>, <&rgmii1_pins>, <&rgmii2_pins>;
gmac0: mac@0 {
compatible = "mediatek,eth-mac";
reg = <0>;
phy-mode = "rgmii";
+
fixed-link {
speed = <1000>;
full-duplex;
pause;
};
};
+
gmac1: mac@1 {
compatible = "mediatek,eth-mac";
reg = <1>;
status = "off";
phy-mode = "rgmii-rxid";
- phy-handle = <&phy_external>;
};
+
mdio-bus {
#address-cells = <1>;
#size-cells = <0>;
- phy_external: ethernet-phy@5 {
- status = "off";
- reg = <5>;
- phy-mode = "rgmii-rxid";
-
- pinctrl-names = "default";
- pinctrl-0 = <&rgmii2_pins>;
- };
-
switch0: switch0@0 {
compatible = "mediatek,mt7621";
#address-cells = <1>;
@@ -456,36 +451,43 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
+
port@0 {
status = "off";
reg = <0>;
label = "lan0";
};
+
port@1 {
status = "off";
reg = <1>;
label = "lan1";
};
+
port@2 {
status = "off";
reg = <2>;
label = "lan2";
};
+
port@3 {
status = "off";
reg = <3>;
label = "lan3";
};
+
port@4 {
status = "off";
reg = <4>;
label = "lan4";
};
+
port@6 {
reg = <6>;
label = "cpu";
ethernet = <&gmac0>;
phy-mode = "trgmii";
+
fixed-link {
speed = <1000>;
full-duplex;
diff --git a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
index 09b0b8a..2e971cb 100644
--- a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
+++ b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
@@ -267,6 +267,8 @@
p->func[i]->pin_count,
sizeof(int),
GFP_KERNEL);
+ if (!p->func[i]->pins)
+ return -ENOMEM;
for (j = 0; j < p->func[i]->pin_count; j++)
p->func[i]->pins[j] = p->func[i]->pin_first + j;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index e8e72f7..aeb6f01 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -651,9 +651,9 @@
spin_lock_irqsave(&ieee->beacon_lock, flags);
ieee->beacon_txing = 0;
- del_timer_sync(&ieee->beacon_timer);
spin_unlock_irqrestore(&ieee->beacon_lock, flags);
+ del_timer_sync(&ieee->beacon_timer);
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 690b664..56a4476 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -528,9 +528,9 @@
spin_lock_irqsave(&ieee->beacon_lock, flags);
ieee->beacon_txing = 0;
- del_timer_sync(&ieee->beacon_timer);
spin_unlock_irqrestore(&ieee->beacon_lock, flags);
+ del_timer_sync(&ieee->beacon_timer);
}
void ieee80211_stop_send_beacons(struct ieee80211_device *ieee)
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index ec33fb9..57badc1 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -1013,7 +1013,7 @@
bool bis_any_nonbepkts;
bool bcurrent_turbo_EDCA;
bool bis_cur_rdlstate;
- struct timer_list fsync_timer;
+ struct delayed_work fsync_work;
bool bfsync_processing; /* 500ms Fsync timer is active or not */
u32 rate_record;
u32 rateCountDiffRecord;
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index bac402b..6aa424a 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -2578,19 +2578,20 @@
priv->ieee80211->fsync_seconddiff_ratethreshold = 200;
priv->ieee80211->fsync_state = Default_Fsync;
priv->framesyncMonitor = 1; /* current default 0xc38 monitor on */
- timer_setup(&priv->fsync_timer, dm_fsync_timer_callback, 0);
+ INIT_DELAYED_WORK(&priv->fsync_work, dm_fsync_work_callback);
}
static void dm_deInit_fsync(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- del_timer_sync(&priv->fsync_timer);
+ cancel_delayed_work_sync(&priv->fsync_work);
}
-void dm_fsync_timer_callback(struct timer_list *t)
+void dm_fsync_work_callback(struct work_struct *work)
{
- struct r8192_priv *priv = from_timer(priv, t, fsync_timer);
+ struct r8192_priv *priv =
+ container_of(work, struct r8192_priv, fsync_work.work);
struct net_device *dev = priv->ieee80211->dev;
u32 rate_index, rate_count = 0, rate_count_diff = 0;
bool bSwitchFromCountDiff = false;
@@ -2657,17 +2658,16 @@
}
}
if (bDoubleTimeInterval) {
- if (timer_pending(&priv->fsync_timer))
- del_timer_sync(&priv->fsync_timer);
- priv->fsync_timer.expires = jiffies +
- msecs_to_jiffies(priv->ieee80211->fsync_time_interval*priv->ieee80211->fsync_multiple_timeinterval);
- add_timer(&priv->fsync_timer);
+ cancel_delayed_work_sync(&priv->fsync_work);
+ schedule_delayed_work(&priv->fsync_work,
+ msecs_to_jiffies(priv
+ ->ieee80211->fsync_time_interval *
+ priv->ieee80211->fsync_multiple_timeinterval));
} else {
- if (timer_pending(&priv->fsync_timer))
- del_timer_sync(&priv->fsync_timer);
- priv->fsync_timer.expires = jiffies +
- msecs_to_jiffies(priv->ieee80211->fsync_time_interval);
- add_timer(&priv->fsync_timer);
+ cancel_delayed_work_sync(&priv->fsync_work);
+ schedule_delayed_work(&priv->fsync_work,
+ msecs_to_jiffies(priv
+ ->ieee80211->fsync_time_interval));
}
} else {
/* Let Register return to default value; */
@@ -2695,7 +2695,7 @@
struct r8192_priv *priv = ieee80211_priv(dev);
RT_TRACE(COMP_HALDM, "%s\n", __func__);
- del_timer_sync(&(priv->fsync_timer));
+ cancel_delayed_work_sync(&priv->fsync_work);
/* Let Register return to default value; */
if (priv->bswitch_fsync) {
@@ -2736,11 +2736,9 @@
if (priv->ieee80211->fsync_rate_bitmap & rateBitmap)
priv->rate_record += priv->stats.received_rate_histogram[1][rateIndex];
}
- if (timer_pending(&priv->fsync_timer))
- del_timer_sync(&priv->fsync_timer);
- priv->fsync_timer.expires = jiffies +
- msecs_to_jiffies(priv->ieee80211->fsync_time_interval);
- add_timer(&priv->fsync_timer);
+ cancel_delayed_work_sync(&priv->fsync_work);
+ schedule_delayed_work(&priv->fsync_work,
+ msecs_to_jiffies(priv->ieee80211->fsync_time_interval));
write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cd);
}
diff --git a/drivers/staging/rtl8192u/r8192U_dm.h b/drivers/staging/rtl8192u/r8192U_dm.h
index 0b2a1c6..2159018 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.h
+++ b/drivers/staging/rtl8192u/r8192U_dm.h
@@ -166,7 +166,7 @@
void dm_init_edca_turbo(struct net_device *dev);
void dm_rf_operation_test_callback(unsigned long data);
void dm_rf_pathcheck_workitemcallback(struct work_struct *work);
-void dm_fsync_timer_callback(struct timer_list *t);
+void dm_fsync_work_callback(struct work_struct *work);
void dm_cck_txpower_adjust(struct net_device *dev, bool binch14);
void dm_shadow_init(struct net_device *dev);
void dm_initialize_txpower_tracking(struct net_device *dev);
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 2214aca..daa3180 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -332,7 +332,6 @@
r8712_free_evt_priv(&padapter->evtpriv);
r8712_DeInitSwLeds(padapter);
r8712_free_mlme_priv(&padapter->mlmepriv);
- r8712_free_io_queue(padapter);
_free_xmit_priv(&padapter->xmitpriv);
_r8712_free_sta_priv(&padapter->stapriv);
_r8712_free_recv_priv(&padapter->recvpriv);
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index ff3cb09..30e965c 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -117,34 +117,6 @@
kfree(pdrvcmd->pbuf);
}
-static u8 read_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
-{
- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
-
- /* invoke cmd->callback function */
- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (!pcmd_callback)
- r8712_free_cmd_obj(pcmd);
- else
- pcmd_callback(padapter, pcmd);
- return H2C_SUCCESS;
-}
-
-static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
-{
- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
-
- /* invoke cmd->callback function */
- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (!pcmd_callback)
- r8712_free_cmd_obj(pcmd);
- else
- pcmd_callback(padapter, pcmd);
- return H2C_SUCCESS;
-}
-
static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
@@ -213,14 +185,6 @@
pcmd_r = NULL;
switch (pcmd->cmdcode) {
- case GEN_CMD_CODE(_Read_MACREG):
- read_macreg_hdl(padapter, (u8 *)pcmd);
- pcmd_r = pcmd;
- break;
- case GEN_CMD_CODE(_Write_MACREG):
- write_macreg_hdl(padapter, (u8 *)pcmd);
- pcmd_r = pcmd;
- break;
case GEN_CMD_CODE(_Read_BBREG):
read_bbreg_hdl(padapter, (u8 *)pcmd);
break;
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index fed96d4..68d66c3 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -266,6 +266,7 @@
static void r8712_usb_dvobj_deinit(struct _adapter *padapter)
{
+ r8712_free_io_queue(padapter);
}
void rtl871x_intf_stop(struct _adapter *padapter)
@@ -303,9 +304,6 @@
rtl8712_hal_deinit(padapter);
}
- /*s6.*/
- if (padapter->dvobj_deinit)
- padapter->dvobj_deinit(padapter);
padapter->bup = false;
}
}
@@ -541,13 +539,13 @@
} else {
AutoloadFail = false;
}
- if (((mac[0] == 0xff) && (mac[1] == 0xff) &&
+ if ((!AutoloadFail) ||
+ ((mac[0] == 0xff) && (mac[1] == 0xff) &&
(mac[2] == 0xff) && (mac[3] == 0xff) &&
(mac[4] == 0xff) && (mac[5] == 0xff)) ||
((mac[0] == 0x00) && (mac[1] == 0x00) &&
(mac[2] == 0x00) && (mac[3] == 0x00) &&
- (mac[4] == 0x00) && (mac[5] == 0x00)) ||
- (!AutoloadFail)) {
+ (mac[4] == 0x00) && (mac[5] == 0x00))) {
mac[0] = 0x00;
mac[1] = 0xe0;
mac[2] = 0x4c;
@@ -610,6 +608,8 @@
/* Stop driver mlme relation timer */
r8712_stop_drv_timers(padapter);
r871x_dev_unload(padapter);
+ if (padapter->dvobj_deinit)
+ padapter->dvobj_deinit(padapter);
r8712_free_drv_sw(padapter);
free_netdev(pnetdev);
diff --git a/drivers/staging/rtl8712/usb_ops.c b/drivers/staging/rtl8712/usb_ops.c
index e64845e..af9966d 100644
--- a/drivers/staging/rtl8712/usb_ops.c
+++ b/drivers/staging/rtl8712/usb_ops.c
@@ -29,7 +29,8 @@
u16 wvalue;
u16 index;
u16 len;
- __le32 data;
+ int status;
+ __le32 data = 0;
struct intf_priv *intfpriv = intfhdl->pintfpriv;
request = 0x05;
@@ -37,8 +38,10 @@
index = 0;
wvalue = (u16)(addr & 0x0000ffff);
len = 1;
- r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len,
- requesttype);
+ status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index,
+ &data, len, requesttype);
+ if (status < 0)
+ return 0;
return (u8)(le32_to_cpu(data) & 0x0ff);
}
@@ -49,7 +52,8 @@
u16 wvalue;
u16 index;
u16 len;
- __le32 data;
+ int status;
+ __le32 data = 0;
struct intf_priv *intfpriv = intfhdl->pintfpriv;
request = 0x05;
@@ -57,8 +61,10 @@
index = 0;
wvalue = (u16)(addr & 0x0000ffff);
len = 2;
- r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len,
- requesttype);
+ status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index,
+ &data, len, requesttype);
+ if (status < 0)
+ return 0;
return (u16)(le32_to_cpu(data) & 0xffff);
}
@@ -69,7 +75,8 @@
u16 wvalue;
u16 index;
u16 len;
- __le32 data;
+ int status;
+ __le32 data = 0;
struct intf_priv *intfpriv = intfhdl->pintfpriv;
request = 0x05;
@@ -77,8 +84,10 @@
index = 0;
wvalue = (u16)(addr & 0x0000ffff);
len = 4;
- r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len,
- requesttype);
+ status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index,
+ &data, len, requesttype);
+ if (status < 0)
+ return 0;
return le32_to_cpu(data);
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index 2abe205..cee0538 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -165,8 +165,6 @@
int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
{
- int res = 0;
-
init_completion(&pcmdpriv->cmd_queue_comp);
init_completion(&pcmdpriv->terminate_cmdthread_comp);
@@ -178,18 +176,16 @@
pcmdpriv->cmd_allocated_buf = rtw_zmalloc(MAX_CMDSZ + CMDBUFF_ALIGN_SZ);
- if (!pcmdpriv->cmd_allocated_buf) {
- res = -ENOMEM;
- goto exit;
- }
+ if (!pcmdpriv->cmd_allocated_buf)
+ return -ENOMEM;
pcmdpriv->cmd_buf = pcmdpriv->cmd_allocated_buf + CMDBUFF_ALIGN_SZ - ((SIZE_PTR)(pcmdpriv->cmd_allocated_buf) & (CMDBUFF_ALIGN_SZ-1));
pcmdpriv->rsp_allocated_buf = rtw_zmalloc(MAX_RSPSZ + 4);
if (!pcmdpriv->rsp_allocated_buf) {
- res = -ENOMEM;
- goto exit;
+ kfree(pcmdpriv->cmd_allocated_buf);
+ return -ENOMEM;
}
pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((SIZE_PTR)(pcmdpriv->rsp_allocated_buf) & 3);
@@ -199,8 +195,8 @@
pcmdpriv->rsp_cnt = 0;
mutex_init(&pcmdpriv->sctx_mutex);
-exit:
- return res;
+
+ return 0;
}
static void c2h_wk_callback(_workitem * work);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index 902ac81..083ff72 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -1351,9 +1351,11 @@
sec_len = *(pos++); len -= 1;
- if (sec_len > 0 && sec_len <= len) {
+ if (sec_len > 0 &&
+ sec_len <= len &&
+ sec_len <= 32) {
ssid[ssid_index].SsidLength = sec_len;
- memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength);
+ memcpy(ssid[ssid_index].Ssid, pos, sec_len);
/* DBG_871X("%s COMBO_SCAN with specific ssid:%s, %d\n", __func__ */
/* , ssid[ssid_index].Ssid, ssid[ssid_index].SsidLength); */
ssid_index++;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 38b10fd..95b91fe 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -2280,6 +2280,9 @@
struct vchiq_service *service = find_service_by_handle(handle);
int pos;
+ if (!service)
+ return;
+
while (service->msg_queue_write == service->msg_queue_read +
VCHIQ_MAX_SLOTS) {
if (wait_for_completion_interruptible(&service->msg_queue_pop))
@@ -2299,6 +2302,9 @@
struct vchiq_header *header;
int pos;
+ if (!service)
+ return NULL;
+
if (service->msg_queue_write == service->msg_queue_read)
return NULL;
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 09ab6d6..343f0de 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -564,7 +564,7 @@
kfree(desc->rd_info);
err_free_desc:
- while (--i) {
+ while (i--) {
desc = &priv->aRD0Ring[i];
device_free_rx_buf(priv, desc);
kfree(desc->rd_info);
@@ -610,7 +610,7 @@
kfree(desc->rd_info);
err_free_desc:
- while (--i) {
+ while (i--) {
desc = &priv->aRD1Ring[i];
device_free_rx_buf(priv, desc);
kfree(desc->rd_info);
@@ -675,7 +675,7 @@
return 0;
err_free_desc:
- while (--i) {
+ while (i--) {
desc = &priv->apTD0Rings[i];
kfree(desc->td_info);
}
@@ -715,7 +715,7 @@
return 0;
err_free_desc:
- while (--i) {
+ while (i--) {
desc = &priv->apTD1Rings[i];
kfree(desc->td_info);
}
diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c
index e7bc198..d5dacd5 100644
--- a/drivers/staging/wfx/main.c
+++ b/drivers/staging/wfx/main.c
@@ -309,7 +309,8 @@
wdev->pdata.gpio_wakeup = devm_gpiod_get_optional(dev, "wakeup",
GPIOD_OUT_LOW);
if (IS_ERR(wdev->pdata.gpio_wakeup))
- return NULL;
+ goto err;
+
if (wdev->pdata.gpio_wakeup)
gpiod_set_consumer_name(wdev->pdata.gpio_wakeup, "wfx wakeup");
@@ -328,6 +329,10 @@
return NULL;
return wdev;
+
+err:
+ ieee80211_free_hw(hw);
+ return NULL;
}
int wfx_probe(struct wfx_dev *wdev)
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 16d5a4e..5ae5d94 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -394,6 +394,7 @@
ret = device_register(&tl_hba->dev);
if (ret) {
pr_err("device_register() failed for tl_hba->dev: %d\n", ret);
+ put_device(&tl_hba->dev);
return -ENODEV;
}
@@ -1072,7 +1073,7 @@
*/
ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
if (ret)
- goto out;
+ return ERR_PTR(ret);
sh = tl_hba->sh;
tcm_loop_hba_no_cnt++;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 109f019..1eded5c 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -831,7 +831,6 @@
attrib->unmap_granularity = q->limits.discard_granularity / block_size;
attrib->unmap_granularity_alignment = q->limits.discard_alignment /
block_size;
- attrib->unmap_zeroes_data = !!(q->limits.max_write_zeroes_sectors);
return true;
}
EXPORT_SYMBOL(target_configure_unmap_from_queue);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index c6950f1..c283e45 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1676,6 +1676,7 @@
mutex_lock(&udev->cmdr_lock);
page = tcmu_get_block_page(udev, dbi);
if (likely(page)) {
+ get_page(page);
mutex_unlock(&udev->cmdr_lock);
return page;
}
@@ -1714,6 +1715,7 @@
/* For the vmalloc()ed cmd area pages */
addr = (void *)(unsigned long)info->mem[mi].addr + offset;
page = vmalloc_to_page(addr);
+ get_page(page);
} else {
uint32_t dbi;
@@ -1724,7 +1726,6 @@
return VM_FAULT_SIGBUS;
}
- get_page(page);
vmf->page = page;
return 0;
}
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
index 0318064..60ffc54 100644
--- a/drivers/tee/optee/device.c
+++ b/drivers/tee/optee/device.c
@@ -80,7 +80,7 @@
rc = device_register(&optee_device->dev);
if (rc) {
pr_err("device registration failed, err: %d\n", rc);
- kfree(optee_device);
+ put_device(&optee_device->dev);
}
return rc;
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index e07f997..9cc4a7b 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -334,6 +334,9 @@
if (data.flags)
return -EINVAL;
+ if (!access_ok((void __user *)(unsigned long)data.addr, data.length))
+ return -EFAULT;
+
shm = tee_shm_register(ctx, data.addr, data.length,
TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED);
if (IS_ERR(shm))
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 499fccb..6fb4400 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
+#include <linux/uaccess.h>
#include <linux/uio.h>
#include "tee_private.h"
diff --git a/drivers/thermal/broadcom/bcm2711_thermal.c b/drivers/thermal/broadcom/bcm2711_thermal.c
index 67c2a73..7b536c8a 100644
--- a/drivers/thermal/broadcom/bcm2711_thermal.c
+++ b/drivers/thermal/broadcom/bcm2711_thermal.c
@@ -38,7 +38,6 @@
int offset = thermal_zone_get_offset(priv->thermal);
u32 val;
int ret;
- long t;
ret = regmap_read(priv->regmap, AVS_RO_TEMP_STATUS, &val);
if (ret)
@@ -50,9 +49,7 @@
val &= AVS_RO_TEMP_STATUS_DATA_MSK;
/* Convert a HW code to a temperature reading (millidegree celsius) */
- t = slope * val + offset;
-
- *temp = t < 0 ? 0 : t;
+ *temp = slope * val + offset;
return 0;
}
diff --git a/drivers/thermal/broadcom/sr-thermal.c b/drivers/thermal/broadcom/sr-thermal.c
index 475ce29..85ab9ed 100644
--- a/drivers/thermal/broadcom/sr-thermal.c
+++ b/drivers/thermal/broadcom/sr-thermal.c
@@ -60,6 +60,9 @@
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOENT;
+
sr_thermal->regs = (void __iomem *)devm_memremap(&pdev->dev, res->start,
resource_size(res),
MEMREMAP_WB);
diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c
index 8d76dbf..331a241 100644
--- a/drivers/thermal/imx_sc_thermal.c
+++ b/drivers/thermal/imx_sc_thermal.c
@@ -94,8 +94,8 @@
sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL);
if (!sensor) {
of_node_put(child);
- of_node_put(sensor_np);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto put_node;
}
ret = thermal_zone_of_get_sensor_id(child,
@@ -124,7 +124,9 @@
dev_warn(&pdev->dev, "failed to add hwmon sysfs attributes\n");
}
+put_node:
of_node_put(sensor_np);
+ of_node_put(np);
return ret;
}
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 793d7b5..2891386 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -53,7 +53,7 @@
struct art *arts;
int trt_count;
struct trt *trts;
- u8 uuid_bitmap;
+ u32 uuid_bitmap;
int rel_misc_dev_res;
int current_uuid_index;
char *data_vault;
@@ -67,7 +67,7 @@
struct odvp_attr {
int odvp;
struct int3400_thermal_priv *priv;
- struct kobj_attribute attr;
+ struct device_attribute attr;
};
static ssize_t data_vault_read(struct file *file, struct kobject *kobj,
@@ -269,7 +269,7 @@
return result;
}
-static ssize_t odvp_show(struct kobject *kobj, struct kobj_attribute *attr,
+static ssize_t odvp_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct odvp_attr *odvp_attr;
@@ -466,6 +466,11 @@
priv->data_vault = kmemdup(obj->package.elements[0].buffer.pointer,
obj->package.elements[0].buffer.length,
GFP_KERNEL);
+ if (!priv->data_vault) {
+ kfree(buffer.pointer);
+ return;
+ }
+
bin_attr_data_vault.private = priv->data_vault;
bin_attr_data_vault.size = obj->package.elements[0].buffer.length;
kfree(buffer.pointer);
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index b0eb5ec..fb04470 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -531,9 +531,7 @@
get_online_cpus();
/* prefer BSP */
- control_cpu = 0;
- if (!cpu_online(control_cpu))
- control_cpu = smp_processor_id();
+ control_cpu = cpumask_first(cpu_online_mask);
clamping = true;
schedule_delayed_work(&poll_pkg_cstate_work, 0);
diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c
index 4ffa2e2..9b8ba42 100644
--- a/drivers/thermal/qcom/tsens-v0_1.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -522,7 +522,7 @@
struct tsens_plat_data data_8939 = {
.num_sensors = 10,
.ops = &ops_8939,
- .hw_ids = (unsigned int []){ 0, 1, 2, 4, 5, 6, 7, 8, 9, 10 },
+ .hw_ids = (unsigned int []){ 0, 1, 2, 3, 5, 6, 7, 8, 9, 10 },
.feat = &tsens_v0_1_feat,
.fields = tsens_v0_1_regfields,
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index d9e34ac..dd44994 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1092,10 +1092,7 @@
{
struct thermal_cooling_device *cdev;
struct thermal_zone_device *pos = NULL;
- int result;
-
- if (type && strlen(type) >= THERMAL_NAME_LENGTH)
- return ERR_PTR(-EINVAL);
+ int id, ret;
if (!ops || !ops->get_max_state || !ops->get_cur_state ||
!ops->set_cur_state)
@@ -1105,14 +1102,18 @@
if (!cdev)
return ERR_PTR(-ENOMEM);
- result = ida_simple_get(&thermal_cdev_ida, 0, 0, GFP_KERNEL);
- if (result < 0) {
- kfree(cdev);
- return ERR_PTR(result);
+ ret = ida_simple_get(&thermal_cdev_ida, 0, 0, GFP_KERNEL);
+ if (ret < 0)
+ goto out_kfree_cdev;
+ cdev->id = ret;
+ id = ret;
+
+ cdev->type = kstrdup(type ? type : "", GFP_KERNEL);
+ if (!cdev->type) {
+ ret = -ENOMEM;
+ goto out_ida_remove;
}
- cdev->id = result;
- strlcpy(cdev->type, type ? : "", sizeof(cdev->type));
mutex_init(&cdev->lock);
INIT_LIST_HEAD(&cdev->thermal_instances);
cdev->np = np;
@@ -1122,12 +1123,9 @@
cdev->devdata = devdata;
thermal_cooling_device_setup_sysfs(cdev);
dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
- result = device_register(&cdev->device);
- if (result) {
- ida_simple_remove(&thermal_cdev_ida, cdev->id);
- put_device(&cdev->device);
- return ERR_PTR(result);
- }
+ ret = device_register(&cdev->device);
+ if (ret)
+ goto out_kfree_type;
/* Add 'this' new cdev to the global cdev list */
mutex_lock(&thermal_list_lock);
@@ -1145,6 +1143,17 @@
mutex_unlock(&thermal_list_lock);
return cdev;
+
+out_kfree_type:
+ thermal_cooling_device_destroy_sysfs(cdev);
+ kfree(cdev->type);
+ put_device(&cdev->device);
+ cdev = NULL;
+out_ida_remove:
+ ida_simple_remove(&thermal_cdev_ida, id);
+out_kfree_cdev:
+ kfree(cdev);
+ return ERR_PTR(ret);
}
/**
@@ -1303,6 +1312,7 @@
ida_simple_remove(&thermal_cdev_ida, cdev->id);
device_del(&cdev->device);
thermal_cooling_device_destroy_sysfs(cdev);
+ kfree(cdev->type);
put_device(&cdev->device);
}
EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister);
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index f52708f..05e9a3d 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -893,12 +893,13 @@
static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
{
+ const struct attribute_group *stats_attr_group = NULL;
struct cooling_dev_stats *stats;
unsigned long states;
int var;
if (cdev->ops->get_max_state(cdev, &states))
- return;
+ goto out;
states++; /* Total number of states is highest state + 1 */
@@ -908,7 +909,7 @@
stats = kzalloc(var, GFP_KERNEL);
if (!stats)
- return;
+ goto out;
stats->time_in_state = (ktime_t *)(stats + 1);
stats->trans_table = (unsigned int *)(stats->time_in_state + states);
@@ -918,9 +919,12 @@
spin_lock_init(&stats->lock);
+ stats_attr_group = &cooling_device_stats_attr_group;
+
+out:
/* Fill the empty slot left in cooling_device_attr_groups */
var = ARRAY_SIZE(cooling_device_attr_groups) - 2;
- cooling_device_attr_groups[var] = &cooling_device_stats_attr_group;
+ cooling_device_attr_groups[var] = stats_attr_group;
}
static void cooling_device_stats_destroy(struct thermal_cooling_device *cdev)
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 9894b8f..772acb1 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -396,7 +396,7 @@
static int tb_async_error(const struct ctl_pkg *pkg)
{
- const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg;
+ const struct cfg_error_pkg *error = pkg->buffer;
if (pkg->frame.eof != TB_CFG_PKG_ERROR)
return false;
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 82c46b2..b2fb339 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -2300,6 +2300,18 @@
icm->rtd3_veto = icm_icl_rtd3_veto;
tb->cm_ops = &icm_icl_ops;
break;
+
+ case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI:
+ case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI:
+ icm->is_supported = icm_tgl_is_supported;
+ icm->get_mode = icm_ar_get_mode;
+ icm->driver_ready = icm_tr_driver_ready;
+ icm->device_connected = icm_tr_device_connected;
+ icm->device_disconnected = icm_tr_device_disconnected;
+ icm->xdomain_connected = icm_tr_xdomain_connected;
+ icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
+ tb->cm_ops = &icm_tr_ops;
+ break;
}
if (!icm->is_supported || !icm->is_supported(tb)) {
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 4e0861d..7ad6d3f 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -55,6 +55,8 @@
* need for the PCI quirk anymore as we will use ICM also on Apple
* hardware.
*/
+#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI 0x1134
+#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI 0x1137
#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d
#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI 0x15bf
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index c4b157c..e881b72 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -2046,6 +2046,7 @@
* additional capabilities.
*/
sw->config.cmuv = USB4_VERSION_1_0;
+ sw->config.plug_events_delay = 0xa;
/* Enumerate the switch */
ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
@@ -2412,6 +2413,26 @@
tb_lc_unconfigure_port(down);
}
+static int tb_switch_port_hotplug_enable(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ if (tb_switch_is_icm(sw))
+ return 0;
+
+ tb_switch_for_each_port(sw, port) {
+ int res;
+
+ if (!port->cap_usb4)
+ continue;
+
+ res = usb4_port_hotplug_enable(port);
+ if (res)
+ return res;
+ }
+ return 0;
+}
+
/**
* tb_switch_add() - Add a switch to the domain
* @sw: Switch to add
@@ -2479,6 +2500,10 @@
return ret;
}
+ ret = tb_switch_port_hotplug_enable(sw);
+ if (ret)
+ return ret;
+
ret = device_add(&sw->dev);
if (ret) {
dev_err(&sw->dev, "failed to add device: %d\n", ret);
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 8ea360b..266f3bf 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -979,6 +979,7 @@
const struct tb_port *port);
int usb4_port_unlock(struct tb_port *port);
+int usb4_port_hotplug_enable(struct tb_port *port);
int usb4_port_configure(struct tb_port *port);
void usb4_port_unconfigure(struct tb_port *port);
int usb4_port_configure_xdomain(struct tb_port *port);
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index e7d9529..26868e2 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -285,6 +285,7 @@
#define ADP_CS_5 0x05
#define ADP_CS_5_LCA_MASK GENMASK(28, 22)
#define ADP_CS_5_LCA_SHIFT 22
+#define ADP_CS_5_DHP BIT(31)
/* TMU adapter registers */
#define TMU_ADP_CS_3 0x03
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index c05ec6f..0b3a77a 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -854,6 +854,26 @@
return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
}
+/**
+ * usb4_port_hotplug_enable() - Enables hotplug for a port
+ * @port: USB4 port to operate on
+ *
+ * Enables hot plug events on a given port. This is only intended
+ * to be used on lane, DP-IN, and DP-OUT adapters.
+ */
+int usb4_port_hotplug_enable(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
+ if (ret)
+ return ret;
+
+ val &= ~ADP_CS_5_DHP;
+ return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
+}
+
static int usb4_port_set_configured(struct tb_port *port, bool configured)
{
int ret;
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index 0972663..4bcaab2 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -556,7 +556,7 @@
}
info->idle_stats.recv_idle = jiffies;
}
- tty_schedule_flip(port);
+ tty_flip_buffer_push(port);
/* end of service */
cyy_writeb(info, CyRIR, save_xir & 0x3f);
@@ -996,7 +996,7 @@
mod_timer(&info->rx_full_timer, jiffies + 1);
#endif
info->idle_stats.recv_idle = jiffies;
- tty_schedule_flip(&info->port);
+ tty_flip_buffer_push(&info->port);
/* Update rx_get */
cy_writel(&buf_ctrl->rx_get, new_rx_get);
@@ -1172,7 +1172,7 @@
if (delta_count)
wake_up_interruptible(&info->port.delta_msr_wait);
if (special_count)
- tty_schedule_flip(&info->port);
+ tty_flip_buffer_push(&info->port);
}
}
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index c8c5cdf..d6e82eb 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -151,7 +151,7 @@
address = (unsigned long)(void *)buf;
goldfish_tty_rw(qtty, address, count, 0);
- tty_schedule_flip(&qtty->port);
+ tty_flip_buffer_push(&qtty->port);
return IRQ_HANDLED;
}
@@ -407,6 +407,7 @@
err_tty_register_device_failed:
free_irq(irq, qtty);
err_dec_line_count:
+ tty_port_destroy(&qtty->port);
goldfish_tty_current_line_count--;
if (goldfish_tty_current_line_count == 0)
goldfish_tty_delete_driver();
@@ -427,7 +428,8 @@
tty_unregister_device(goldfish_tty_driver, qtty->console.index);
iounmap(qtty->base);
qtty->base = NULL;
- free_irq(qtty->irq, pdev);
+ free_irq(qtty->irq, qtty);
+ tty_port_destroy(&qtty->port);
goldfish_tty_current_line_count--;
if (goldfish_tty_current_line_count == 0)
goldfish_tty_delete_driver();
diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index 2af1e57..796fbff 100644
--- a/drivers/tty/hvc/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
@@ -1470,7 +1470,9 @@
*/
static int __init hvc_iucv_config(char *val)
{
- return kstrtoul(val, 10, &hvc_iucv_devices);
+ if (kstrtoul(val, 10, &hvc_iucv_devices))
+ pr_warn("hvc_iucv= invalid parameter value '%s'\n", val);
+ return 1;
}
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index f9f1410..d6528f3 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -1385,7 +1385,7 @@
if (inited && !tty_throttled(tty) &&
MoxaPortRxQueue(p) > 0) { /* RX */
MoxaPortReadData(p);
- tty_schedule_flip(&p->port);
+ tty_flip_buffer_push(&p->port);
}
} else {
clear_bit(EMPTYWAIT, &p->statusflags);
@@ -1410,7 +1410,7 @@
if (tty && (intr & IntrBreak) && !I_IGNBRK(tty)) { /* BREAK */
tty_insert_flip_char(&p->port, 0, TTY_BREAK);
- tty_schedule_flip(&p->port);
+ tty_flip_buffer_push(&p->port);
}
if (intr & IntrLine)
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 3703987..8344265 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -858,6 +858,7 @@
struct mxser_port *info = container_of(port, struct mxser_port, port);
unsigned long page;
unsigned long flags;
+ int ret;
page = __get_free_page(GFP_KERNEL);
if (!page)
@@ -867,9 +868,9 @@
if (!info->ioaddr || !info->type) {
set_bit(TTY_IO_ERROR, &tty->flags);
- free_page(page);
spin_unlock_irqrestore(&info->slock, flags);
- return 0;
+ ret = 0;
+ goto err_free_xmit;
}
info->port.xmit_buf = (unsigned char *) page;
@@ -895,8 +896,10 @@
if (capable(CAP_SYS_ADMIN)) {
set_bit(TTY_IO_ERROR, &tty->flags);
return 0;
- } else
- return -ENODEV;
+ }
+
+ ret = -ENODEV;
+ goto err_free_xmit;
}
/*
@@ -941,6 +944,10 @@
spin_unlock_irqrestore(&info->slock, flags);
return 0;
+err_free_xmit:
+ free_page(page);
+ info->port.xmit_buf = NULL;
+ return ret;
}
/*
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 05562b3..e852828 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -72,6 +72,8 @@
*/
#define MAX_MRU 1500
#define MAX_MTU 1500
+/* SOF, ADDR, CTRL, LEN1, LEN2, ..., FCS, EOF */
+#define PROT_OVERHEAD 7
#define GSM_NET_TX_TIMEOUT (HZ*10)
/**
@@ -230,9 +232,10 @@
int initiator; /* Did we initiate connection */
bool dead; /* Has the mux been shut down */
struct gsm_dlci *dlci[NUM_DLCI];
+ int old_c_iflag; /* termios c_iflag value before attach */
bool constipated; /* Asked by remote to shut up */
- spinlock_t tx_lock;
+ struct mutex tx_mutex;
unsigned int tx_bytes; /* TX data outstanding */
#define TX_THRESH_HI 8192
#define TX_THRESH_LO 2048
@@ -415,6 +418,27 @@
}
/**
+ * gsm_read_ea_val - read a value until EA
+ * @val: variable holding value
+ * @data: buffer of data
+ * @dlen: length of data
+ *
+ * Processes an EA value. Updates the passed variable and
+ * returns the processed data length.
+ */
+static unsigned int gsm_read_ea_val(unsigned int *val, const u8 *data, int dlen)
+{
+ unsigned int len = 0;
+
+ for (; dlen > 0; dlen--) {
+ len++;
+ if (gsm_read_ea(val, *data++))
+ break;
+ }
+ return len;
+}
+
+/**
* gsm_encode_modem - encode modem data bits
* @dlci: DLCI to encode from
*
@@ -651,6 +675,37 @@
}
/**
+ * gsm_is_flow_ctrl_msg - checks if flow control message
+ * @msg: message to check
+ *
+ * Returns true if the given message is a flow control command of the
+ * control channel. False is returned in any other case.
+ */
+static bool gsm_is_flow_ctrl_msg(struct gsm_msg *msg)
+{
+ unsigned int cmd;
+
+ if (msg->addr > 0)
+ return false;
+
+ switch (msg->ctrl & ~PF) {
+ case UI:
+ case UIH:
+ cmd = 0;
+ if (gsm_read_ea_val(&cmd, msg->data + 2, msg->len - 2) < 1)
+ break;
+ switch (cmd & ~PF) {
+ case CMD_FCOFF:
+ case CMD_FCON:
+ return true;
+ }
+ break;
+ }
+
+ return false;
+}
+
+/**
* gsm_data_kick - poke the queue
* @gsm: GSM Mux
*
@@ -668,7 +723,7 @@
int len;
list_for_each_entry_safe(msg, nmsg, &gsm->tx_list, list) {
- if (gsm->constipated && msg->addr)
+ if (gsm->constipated && !gsm_is_flow_ctrl_msg(msg))
continue;
if (gsm->encoding != 0) {
gsm->txframe[0] = GSM1_SOF;
@@ -765,15 +820,14 @@
*
* Add data to the transmit queue and try and get stuff moving
* out of the mux tty if not already doing so. Take the
- * the gsm tx lock and dlci lock.
+ * the gsm tx mutex and dlci lock.
*/
static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
{
- unsigned long flags;
- spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ mutex_lock(&dlci->gsm->tx_mutex);
__gsm_data_queue(dlci, msg);
- spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+ mutex_unlock(&dlci->gsm->tx_mutex);
}
/**
@@ -785,48 +839,58 @@
* is data. Keep to the MRU of the mux. This path handles the usual tty
* interface which is a byte stream with optional modem data.
*
- * Caller must hold the tx_lock of the mux.
+ * Caller must hold the tx_mutex of the mux.
*/
static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
{
struct gsm_msg *msg;
u8 *dp;
- int len, total_size, size;
- int h = dlci->adaption - 1;
+ int h, len, size;
- total_size = 0;
- while (1) {
- len = kfifo_len(&dlci->fifo);
- if (len == 0)
- return total_size;
+ /* for modem bits without break data */
+ h = ((dlci->adaption == 1) ? 0 : 1);
- /* MTU/MRU count only the data bits */
- if (len > gsm->mtu)
- len = gsm->mtu;
+ len = kfifo_len(&dlci->fifo);
+ if (len == 0)
+ return 0;
- size = len + h;
+ /* MTU/MRU count only the data bits but watch adaption mode */
+ if ((len + h) > gsm->mtu)
+ len = gsm->mtu - h;
- msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
- /* FIXME: need a timer or something to kick this so it can't
- get stuck with no work outstanding and no buffer free */
- if (msg == NULL)
- return -ENOMEM;
- dp = msg->data;
- switch (dlci->adaption) {
- case 1: /* Unstructured */
- break;
- case 2: /* Unstructed with modem bits.
- Always one byte as we never send inline break data */
- *dp++ = gsm_encode_modem(dlci);
- break;
- }
- WARN_ON(kfifo_out_locked(&dlci->fifo, dp , len, &dlci->lock) != len);
- __gsm_data_queue(dlci, msg);
- total_size += size;
+ size = len + h;
+
+ msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
+ /* FIXME: need a timer or something to kick this so it can't
+ * get stuck with no work outstanding and no buffer free
+ */
+ if (!msg)
+ return -ENOMEM;
+ dp = msg->data;
+ switch (dlci->adaption) {
+ case 1: /* Unstructured */
+ break;
+ case 2: /* Unstructured with modem bits.
+ * Always one byte as we never send inline break data
+ */
+ *dp++ = (gsm_encode_modem(dlci) << 1) | EA;
+ break;
+ default:
+ pr_err("%s: unsupported adaption %d\n", __func__,
+ dlci->adaption);
+ break;
}
+
+ WARN_ON(len != kfifo_out_locked(&dlci->fifo, dp, len,
+ &dlci->lock));
+
+ /* Notify upper layer about available send space. */
+ tty_port_tty_wakeup(&dlci->port);
+
+ __gsm_data_queue(dlci, msg);
/* Bytes of data we used up */
- return total_size;
+ return size;
}
/**
@@ -838,7 +902,7 @@
* is data. Keep to the MRU of the mux. This path handles framed data
* queued as skbuffs to the DLCI.
*
- * Caller must hold the tx_lock of the mux.
+ * Caller must hold the tx_mutex of the mux.
*/
static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
@@ -854,7 +918,7 @@
if (dlci->adaption == 4)
overhead = 1;
- /* dlci->skb is locked by tx_lock */
+ /* dlci->skb is locked by tx_mutex */
if (dlci->skb == NULL) {
dlci->skb = skb_dequeue_tail(&dlci->skb_list);
if (dlci->skb == NULL)
@@ -954,13 +1018,12 @@
static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
{
- unsigned long flags;
int sweep;
if (dlci->constipated)
return;
- spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ mutex_lock(&dlci->gsm->tx_mutex);
/* If we have nothing running then we need to fire up */
sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO);
if (dlci->gsm->tx_bytes == 0) {
@@ -971,7 +1034,7 @@
}
if (sweep)
gsm_dlci_data_sweep(dlci->gsm);
- spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+ mutex_unlock(&dlci->gsm->tx_mutex);
}
/*
@@ -1193,7 +1256,6 @@
const u8 *data, int clen)
{
u8 buf[1];
- unsigned long flags;
switch (command) {
case CMD_CLD: {
@@ -1215,9 +1277,9 @@
gsm->constipated = false;
gsm_control_reply(gsm, CMD_FCON, NULL, 0);
/* Kick the link in case it is idling */
- spin_lock_irqsave(&gsm->tx_lock, flags);
+ mutex_lock(&gsm->tx_mutex);
gsm_data_kick(gsm, NULL);
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ mutex_unlock(&gsm->tx_mutex);
break;
case CMD_FCOFF:
/* Modem wants us to STFU */
@@ -1295,11 +1357,12 @@
static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl)
{
- struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 1, gsm->ftype);
+ struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 2, gsm->ftype);
if (msg == NULL)
return;
- msg->data[0] = (ctrl->cmd << 1) | 2 | EA; /* command */
- memcpy(msg->data + 1, ctrl->data, ctrl->len);
+ msg->data[0] = (ctrl->cmd << 1) | CR | EA; /* command */
+ msg->data[1] = (ctrl->len << 1) | EA;
+ memcpy(msg->data + 2, ctrl->data, ctrl->len);
gsm_data_queue(gsm->dlci[0], msg);
}
@@ -1322,8 +1385,7 @@
spin_lock_irqsave(&gsm->control_lock, flags);
ctrl = gsm->pending_cmd;
if (ctrl) {
- gsm->cretries--;
- if (gsm->cretries == 0) {
+ if (gsm->cretries == 0 || !gsm->dlci[0] || gsm->dlci[0]->dead) {
gsm->pending_cmd = NULL;
ctrl->error = -ETIMEDOUT;
ctrl->done = 1;
@@ -1331,6 +1393,7 @@
wake_up(&gsm->event);
return;
}
+ gsm->cretries--;
gsm_control_transmit(gsm, ctrl);
mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
}
@@ -1353,7 +1416,7 @@
unsigned int command, u8 *data, int clen)
{
struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control),
- GFP_KERNEL);
+ GFP_ATOMIC);
unsigned long flags;
if (ctrl == NULL)
return NULL;
@@ -1371,7 +1434,7 @@
/* If DLCI0 is in ADM mode skip retries, it won't respond */
if (gsm->dlci[0]->mode == DLCI_MODE_ADM)
- gsm->cretries = 1;
+ gsm->cretries = 0;
else
gsm->cretries = gsm->n2;
@@ -1419,13 +1482,19 @@
static void gsm_dlci_close(struct gsm_dlci *dlci)
{
+ unsigned long flags;
+
del_timer(&dlci->t1);
if (debug & 8)
pr_debug("DLCI %d goes closed.\n", dlci->addr);
dlci->state = DLCI_CLOSED;
+ /* Prevent us from sending data before the link is up again */
+ dlci->constipated = true;
if (dlci->addr != 0) {
tty_port_tty_hangup(&dlci->port, false);
+ spin_lock_irqsave(&dlci->lock, flags);
kfifo_reset(&dlci->fifo);
+ spin_unlock_irqrestore(&dlci->lock, flags);
/* Ensure that gsmtty_open() can return. */
tty_port_set_initialized(&dlci->port, 0);
wake_up_interruptible(&dlci->port.open_wait);
@@ -1450,6 +1519,7 @@
del_timer(&dlci->t1);
/* This will let a tty open continue */
dlci->state = DLCI_OPEN;
+ dlci->constipated = false;
if (debug & 8)
pr_debug("DLCI %d goes open.\n", dlci->addr);
wake_up(&dlci->gsm->event);
@@ -1477,8 +1547,8 @@
switch (dlci->state) {
case DLCI_OPENING:
- dlci->retries--;
if (dlci->retries) {
+ dlci->retries--;
gsm_command(dlci->gsm, dlci->addr, SABM|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
} else if (!dlci->addr && gsm->control == (DM | PF)) {
@@ -1493,8 +1563,8 @@
break;
case DLCI_CLOSING:
- dlci->retries--;
if (dlci->retries) {
+ dlci->retries--;
gsm_command(dlci->gsm, dlci->addr, DISC|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
} else
@@ -1528,6 +1598,25 @@
}
/**
+ * gsm_dlci_set_opening - change state to opening
+ * @dlci: DLCI to open
+ *
+ * Change internal state to wait for DLCI open from initiator side.
+ * We set off timers and responses upon reception of an SABM.
+ */
+static void gsm_dlci_set_opening(struct gsm_dlci *dlci)
+{
+ switch (dlci->state) {
+ case DLCI_CLOSED:
+ case DLCI_CLOSING:
+ dlci->state = DLCI_OPENING;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
* gsm_dlci_begin_close - start channel open procedure
* @dlci: DLCI to open
*
@@ -1665,10 +1754,13 @@
dlci->addr = addr;
dlci->adaption = gsm->adaption;
dlci->state = DLCI_CLOSED;
- if (addr)
+ if (addr) {
dlci->data = gsm_dlci_data;
- else
+ /* Prevent us from sending data before the link is up */
+ dlci->constipated = true;
+ } else {
dlci->data = gsm_dlci_command;
+ }
gsm->dlci[addr] = dlci;
return dlci;
}
@@ -1810,7 +1902,6 @@
gsm_response(gsm, address, UA);
gsm_dlci_close(dlci);
break;
- case UA:
case UA|PF:
if (cr == 0 || dlci == NULL)
break;
@@ -1844,7 +1935,7 @@
goto invalid;
#endif
if (dlci == NULL || dlci->state != DLCI_OPEN) {
- gsm_command(gsm, address, DM|PF);
+ gsm_response(gsm, address, DM|PF);
return;
}
dlci->data(dlci, gsm->buf, gsm->len);
@@ -1954,6 +2045,16 @@
static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
{
+ /* handle XON/XOFF */
+ if ((c & ISO_IEC_646_MASK) == XON) {
+ gsm->constipated = true;
+ return;
+ } else if ((c & ISO_IEC_646_MASK) == XOFF) {
+ gsm->constipated = false;
+ /* Kick the link in case it is idling */
+ gsm_data_kick(gsm, NULL);
+ return;
+ }
if (c == GSM1_SOF) {
/* EOF is only valid in frame if we have got to the data state
and received at least one byte (the FCS) */
@@ -1968,7 +2069,8 @@
}
/* Any partial frame was a runt so go back to start */
if (gsm->state != GSM_START) {
- gsm->malformed++;
+ if (gsm->state != GSM_SEARCH)
+ gsm->malformed++;
gsm->state = GSM_START;
}
/* A SOF in GSM_START means we are still reading idling or
@@ -2040,74 +2142,43 @@
gsm->io_error++;
}
-static int gsm_disconnect(struct gsm_mux *gsm)
-{
- struct gsm_dlci *dlci = gsm->dlci[0];
- struct gsm_control *gc;
-
- if (!dlci)
- return 0;
-
- /* In theory disconnecting DLCI 0 is sufficient but for some
- modems this is apparently not the case. */
- gc = gsm_control_send(gsm, CMD_CLD, NULL, 0);
- if (gc)
- gsm_control_wait(gsm, gc);
-
- del_timer_sync(&gsm->t2_timer);
- /* Now we are sure T2 has stopped */
-
- gsm_dlci_begin_close(dlci);
- wait_event_interruptible(gsm->event,
- dlci->state == DLCI_CLOSED);
-
- if (signal_pending(current))
- return -EINTR;
-
- return 0;
-}
-
/**
* gsm_cleanup_mux - generic GSM protocol cleanup
* @gsm: our mux
+ * @disc: disconnect link?
*
* Clean up the bits of the mux which are the same for all framing
* protocols. Remove the mux from the mux table, stop all the timers
* and then shut down each device hanging up the channels as we go.
*/
-static void gsm_cleanup_mux(struct gsm_mux *gsm)
+static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
{
int i;
struct gsm_dlci *dlci = gsm->dlci[0];
struct gsm_msg *txq, *ntxq;
gsm->dead = true;
-
- spin_lock(&gsm_mux_lock);
- for (i = 0; i < MAX_MUX; i++) {
- if (gsm_mux[i] == gsm) {
- gsm_mux[i] = NULL;
- break;
- }
- }
- spin_unlock(&gsm_mux_lock);
- /* open failed before registering => nothing to do */
- if (i == MAX_MUX)
- return;
-
- del_timer_sync(&gsm->t2_timer);
- /* Now we are sure T2 has stopped */
- if (dlci)
- dlci->dead = true;
-
- /* Free up any link layer users */
mutex_lock(&gsm->mutex);
- for (i = 0; i < NUM_DLCI; i++)
+
+ if (dlci) {
+ if (disc && dlci->state != DLCI_CLOSED) {
+ gsm_dlci_begin_close(dlci);
+ wait_event(gsm->event, dlci->state == DLCI_CLOSED);
+ }
+ dlci->dead = true;
+ }
+
+ /* Finish outstanding timers, making sure they are done */
+ del_timer_sync(&gsm->t2_timer);
+
+ /* Free up any link layer users and finally the control channel */
+ for (i = NUM_DLCI - 1; i >= 0; i--)
if (gsm->dlci[i])
gsm_dlci_release(gsm->dlci[i]);
mutex_unlock(&gsm->mutex);
/* Now wipe the queues */
+ tty_ldisc_flush(gsm->tty);
list_for_each_entry_safe(txq, ntxq, &gsm->tx_list, list)
kfree(txq);
INIT_LIST_HEAD(&gsm->tx_list);
@@ -2125,30 +2196,12 @@
static int gsm_activate_mux(struct gsm_mux *gsm)
{
struct gsm_dlci *dlci;
- int i = 0;
-
- timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
- init_waitqueue_head(&gsm->event);
- spin_lock_init(&gsm->control_lock);
- spin_lock_init(&gsm->tx_lock);
if (gsm->encoding == 0)
gsm->receive = gsm0_receive;
else
gsm->receive = gsm1_receive;
- spin_lock(&gsm_mux_lock);
- for (i = 0; i < MAX_MUX; i++) {
- if (gsm_mux[i] == NULL) {
- gsm->num = i;
- gsm_mux[i] = gsm;
- break;
- }
- }
- spin_unlock(&gsm_mux_lock);
- if (i == MAX_MUX)
- return -EBUSY;
-
dlci = gsm_dlci_alloc(gsm, 0);
if (dlci == NULL)
return -ENOMEM;
@@ -2164,6 +2217,16 @@
*/
static void gsm_free_mux(struct gsm_mux *gsm)
{
+ int i;
+
+ for (i = 0; i < MAX_MUX; i++) {
+ if (gsm == gsm_mux[i]) {
+ gsm_mux[i] = NULL;
+ break;
+ }
+ }
+ mutex_destroy(&gsm->tx_mutex);
+ mutex_destroy(&gsm->mutex);
kfree(gsm->txframe);
kfree(gsm->buf);
kfree(gsm);
@@ -2183,12 +2246,20 @@
static inline void mux_get(struct gsm_mux *gsm)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsm_mux_lock, flags);
kref_get(&gsm->ref);
+ spin_unlock_irqrestore(&gsm_mux_lock, flags);
}
static inline void mux_put(struct gsm_mux *gsm)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsm_mux_lock, flags);
kref_put(&gsm->ref, gsm_free_muxr);
+ spin_unlock_irqrestore(&gsm_mux_lock, flags);
}
static inline unsigned int mux_num_to_base(struct gsm_mux *gsm)
@@ -2209,6 +2280,7 @@
static struct gsm_mux *gsm_alloc_mux(void)
{
+ int i;
struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL);
if (gsm == NULL)
return NULL;
@@ -2217,7 +2289,7 @@
kfree(gsm);
return NULL;
}
- gsm->txframe = kmalloc(2 * MAX_MRU + 2, GFP_KERNEL);
+ gsm->txframe = kmalloc(2 * (MAX_MTU + PROT_OVERHEAD - 1), GFP_KERNEL);
if (gsm->txframe == NULL) {
kfree(gsm->buf);
kfree(gsm);
@@ -2225,8 +2297,12 @@
}
spin_lock_init(&gsm->lock);
mutex_init(&gsm->mutex);
+ mutex_init(&gsm->tx_mutex);
kref_init(&gsm->ref);
INIT_LIST_HEAD(&gsm->tx_list);
+ timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
+ init_waitqueue_head(&gsm->event);
+ spin_lock_init(&gsm->control_lock);
gsm->t1 = T1;
gsm->t2 = T2;
@@ -2238,6 +2314,27 @@
gsm->mtu = 64;
gsm->dead = true; /* Avoid early tty opens */
+ /* Store the instance to the mux array or abort if no space is
+ * available.
+ */
+ spin_lock(&gsm_mux_lock);
+ for (i = 0; i < MAX_MUX; i++) {
+ if (!gsm_mux[i]) {
+ gsm_mux[i] = gsm;
+ gsm->num = i;
+ break;
+ }
+ }
+ spin_unlock(&gsm_mux_lock);
+ if (i == MAX_MUX) {
+ mutex_destroy(&gsm->tx_mutex);
+ mutex_destroy(&gsm->mutex);
+ kfree(gsm->txframe);
+ kfree(gsm->buf);
+ kfree(gsm);
+ return NULL;
+ }
+
return gsm;
}
@@ -2264,6 +2361,7 @@
static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
{
+ int ret = 0;
int need_close = 0;
int need_restart = 0;
@@ -2273,7 +2371,7 @@
/* Check the MRU/MTU range looks sane */
if (c->mru > MAX_MRU || c->mtu > MAX_MTU || c->mru < 8 || c->mtu < 8)
return -EINVAL;
- if (c->n2 < 3)
+ if (c->n2 > 255)
return -EINVAL;
if (c->encapsulation > 1) /* Basic, advanced, no I */
return -EINVAL;
@@ -2304,19 +2402,11 @@
/*
* Close down what is needed, restart and initiate the new
- * configuration
+ * configuration. On the first time there is no DLCI[0]
+ * and closing or cleaning up is not necessary.
*/
-
- if (need_close || need_restart) {
- int ret;
-
- ret = gsm_disconnect(gsm);
-
- if (ret)
- return ret;
- }
- if (need_restart)
- gsm_cleanup_mux(gsm);
+ if (need_close || need_restart)
+ gsm_cleanup_mux(gsm, true);
gsm->initiator = c->initiator;
gsm->mru = c->mru;
@@ -2339,10 +2429,13 @@
* FIXME: We need to separate activation/deactivation from adding
* and removing from the mux array
*/
- if (need_restart)
- gsm_activate_mux(gsm);
- if (gsm->initiator && need_close)
- gsm_dlci_begin_open(gsm->dlci[0]);
+ if (gsm->dead) {
+ ret = gsm_activate_mux(gsm);
+ if (ret)
+ return ret;
+ if (gsm->initiator)
+ gsm_dlci_begin_open(gsm->dlci[0]);
+ }
return 0;
}
@@ -2385,6 +2478,9 @@
int ret, i;
gsm->tty = tty_kref_get(tty);
+ /* Turn off tty XON/XOFF handling to handle it explicitly. */
+ gsm->old_c_iflag = tty->termios.c_iflag;
+ tty->termios.c_iflag &= (IXON | IXOFF);
ret = gsm_activate_mux(gsm);
if (ret != 0)
tty_kref_put(gsm->tty);
@@ -2425,7 +2521,8 @@
WARN_ON(tty != gsm->tty);
for (i = 1; i < NUM_DLCI; i++)
tty_unregister_device(gsm_tty_driver, base + i);
- gsm_cleanup_mux(gsm);
+ /* Restore tty XON/XOFF handling. */
+ gsm->tty->termios.c_iflag = gsm->old_c_iflag;
tty_kref_put(gsm->tty);
gsm->tty = NULL;
}
@@ -2493,6 +2590,12 @@
{
struct gsm_mux *gsm = tty->disc_data;
+ /* The ldisc locks and closes the port before calling our close. This
+ * means we have no way to do a proper disconnect. We will not bother
+ * to do one.
+ */
+ gsm_cleanup_mux(gsm, false);
+
gsmld_detach_gsm(tty, gsm);
gsmld_flush_buffer(tty);
@@ -2531,7 +2634,7 @@
ret = gsmld_attach_gsm(tty, gsm);
if (ret != 0) {
- gsm_cleanup_mux(gsm);
+ gsm_cleanup_mux(gsm, false);
mux_put(gsm);
}
return ret;
@@ -2549,16 +2652,15 @@
static void gsmld_write_wakeup(struct tty_struct *tty)
{
struct gsm_mux *gsm = tty->disc_data;
- unsigned long flags;
/* Queue poll */
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- spin_lock_irqsave(&gsm->tx_lock, flags);
+ mutex_lock(&gsm->tx_mutex);
gsm_data_kick(gsm, NULL);
if (gsm->tx_bytes < TX_THRESH_LO) {
gsm_dlci_data_sweep(gsm);
}
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ mutex_unlock(&gsm->tx_mutex);
}
/**
@@ -2600,11 +2702,23 @@
static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr)
{
- int space = tty_write_room(tty);
+ struct gsm_mux *gsm = tty->disc_data;
+ int space;
+ int ret;
+
+ if (!gsm)
+ return -ENODEV;
+
+ ret = -ENOBUFS;
+ mutex_lock(&gsm->tx_mutex);
+ space = tty_write_room(tty);
if (space >= nr)
- return tty->ops->write(tty, buf, nr);
- set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- return -ENOBUFS;
+ ret = tty->ops->write(tty, buf, nr);
+ else
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ mutex_unlock(&gsm->tx_mutex);
+
+ return ret;
}
/**
@@ -2629,12 +2743,15 @@
poll_wait(file, &tty->read_wait, wait);
poll_wait(file, &tty->write_wait, wait);
+
+ if (gsm->dead)
+ mask |= EPOLLHUP;
if (tty_hung_up_p(file))
mask |= EPOLLHUP;
+ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+ mask |= EPOLLHUP;
if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0)
mask |= EPOLLOUT | EPOLLWRNORM;
- if (gsm->dead)
- mask |= EPOLLHUP;
return mask;
}
@@ -2888,19 +3005,17 @@
static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
{
- u8 modembits[5];
+ u8 modembits[3];
struct gsm_control *ctrl;
int len = 2;
- if (brk)
+ modembits[0] = (dlci->addr << 2) | 2 | EA; /* DLCI, Valid, EA */
+ modembits[1] = (gsm_encode_modem(dlci) << 1) | EA;
+ if (brk) {
+ modembits[2] = (brk << 4) | 2 | EA; /* Length, Break, EA */
len++;
-
- modembits[0] = len << 1 | EA; /* Data bytes */
- modembits[1] = dlci->addr << 2 | 3; /* DLCI, EA, 1 */
- modembits[2] = gsm_encode_modem(dlci) << 1 | EA;
- if (brk)
- modembits[3] = brk << 4 | 2 | EA; /* Valid, EA */
- ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len + 1);
+ }
+ ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len);
if (ctrl == NULL)
return -ENOMEM;
return gsm_control_wait(dlci->gsm, ctrl);
@@ -3008,6 +3123,7 @@
{
struct gsm_dlci *dlci = tty->driver_data;
struct tty_port *port = &dlci->port;
+ struct gsm_mux *gsm = dlci->gsm;
port->count++;
tty_port_tty_set(port, tty);
@@ -3017,7 +3133,10 @@
a DM straight back. This is ok as that will have caused a hangup */
tty_port_set_initialized(port, 1);
/* Start sending off SABM messages */
- gsm_dlci_begin_open(dlci);
+ if (gsm->initiator)
+ gsm_dlci_begin_open(dlci);
+ else
+ gsm_dlci_set_opening(dlci);
/* And wait for virtual carrier */
return tty_port_block_til_ready(port, tty, filp);
}
@@ -3085,13 +3204,17 @@
static void gsmtty_flush_buffer(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
+ unsigned long flags;
+
if (dlci->state == DLCI_CLOSED)
return;
/* Caution needed: If we implement reliable transport classes
then the data being transmitted can't simply be junked once
it has first hit the stack. Until then we can just blow it
away */
+ spin_lock_irqsave(&dlci->lock, flags);
kfifo_reset(&dlci->fifo);
+ spin_unlock_irqrestore(&dlci->lock, flags);
/* Need to unhook this DLCI from the transmit queue logic */
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 5819013..12dde01 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2073,6 +2073,35 @@
return ldata->read_tail != canon_head;
}
+/*
+ * If we finished a read at the exact location of an
+ * EOF (special EOL character that's a __DISABLED_CHAR)
+ * in the stream, silently eat the EOF.
+ */
+static void canon_skip_eof(struct tty_struct *tty)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+ size_t tail, canon_head;
+
+ canon_head = smp_load_acquire(&ldata->canon_head);
+ tail = ldata->read_tail;
+
+ // No data?
+ if (tail == canon_head)
+ return;
+
+ // See if the tail position is EOF in the circular buffer
+ tail &= (N_TTY_BUF_SIZE - 1);
+ if (!test_bit(tail, ldata->read_flags))
+ return;
+ if (read_buf(ldata, tail) != __DISABLED_CHAR)
+ return;
+
+ // Clear the EOL bit, skip the EOF char.
+ clear_bit(tail, ldata->read_flags);
+ smp_store_release(&ldata->read_tail, ldata->read_tail + 1);
+}
+
/**
* job_control - check job control
* @tty: tty
@@ -2142,7 +2171,14 @@
*/
if (*cookie) {
if (ldata->icanon && !L_EXTPROC(tty)) {
- if (canon_copy_from_read_buf(tty, &kb, &nr))
+ /*
+ * If we have filled the user buffer, see
+ * if we should skip an EOF character before
+ * releasing the lock and returning done.
+ */
+ if (!nr)
+ canon_skip_eof(tty);
+ else if (canon_copy_from_read_buf(tty, &kb, &nr))
return kb - kbuf;
} else {
if (copy_from_read_buf(tty, &kb, &nr))
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 23368ce..16498f5 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -111,21 +111,11 @@
static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
{
struct tty_struct *to = tty->link;
- unsigned long flags;
- if (tty->stopped)
+ if (tty->stopped || !c)
return 0;
- if (c > 0) {
- spin_lock_irqsave(&to->port->lock, flags);
- /* Stuff the data into the input queue of the other end */
- c = tty_insert_flip_string(to->port, buf, c);
- spin_unlock_irqrestore(&to->port->lock, flags);
- /* And shovel */
- if (c)
- tty_flip_buffer_push(to->port);
- }
- return c;
+ return tty_insert_flip_string_and_push_buffer(to->port, buf, c);
}
/**
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 34aa271..b6dc900 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -119,6 +119,28 @@
up->port.serial_out(&up->port, offset, value);
}
+/*
+ * For the 16C950
+ */
+static void serial_icr_write(struct uart_8250_port *up, int offset, int value)
+{
+ serial_out(up, UART_SCR, offset);
+ serial_out(up, UART_ICR, value);
+}
+
+static unsigned int __maybe_unused serial_icr_read(struct uart_8250_port *up,
+ int offset)
+{
+ unsigned int value;
+
+ serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD);
+ serial_out(up, UART_SCR, offset);
+ value = serial_in(up, UART_ICR);
+ serial_icr_write(up, UART_ACR, up->acr);
+
+ return value;
+}
+
void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p);
static inline int serial_dl_read(struct uart_8250_port *up)
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index cae61d1..0a7e949 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -23,6 +23,7 @@
#include <linux/sysrq.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/tty.h>
#include <linux/ratelimit.h>
#include <linux/tty_flip.h>
@@ -309,10 +310,9 @@
jiffies + uart_poll_timeout(&up->port) + HZ / 5);
}
-static int univ8250_setup_irq(struct uart_8250_port *up)
+static void univ8250_setup_timer(struct uart_8250_port *up)
{
struct uart_port *port = &up->port;
- int retval = 0;
/*
* The above check will only give an accurate result the first time
@@ -331,12 +331,18 @@
* hardware interrupt, we use a timer-based system. The original
* driver used to do this with IRQ0.
*/
- if (!port->irq) {
+ if (!port->irq)
mod_timer(&up->timer, jiffies + uart_poll_timeout(port));
- } else
- retval = serial_link_irq_chain(up);
+}
- return retval;
+static int univ8250_setup_irq(struct uart_8250_port *up)
+{
+ struct uart_port *port = &up->port;
+
+ if (port->irq)
+ return serial_link_irq_chain(up);
+
+ return 0;
}
static void univ8250_release_irq(struct uart_8250_port *up)
@@ -392,6 +398,7 @@
static const struct uart_8250_ops univ8250_driver_ops = {
.setup_irq = univ8250_setup_irq,
.release_irq = univ8250_release_irq,
+ .setup_timer = univ8250_setup_timer,
};
static struct uart_8250_port serial8250_ports[UART_NR];
@@ -571,6 +578,9 @@
up->port.dev = dev;
+ if (uart_console_enabled(&up->port))
+ pm_runtime_get_sync(up->port.dev);
+
serial8250_apply_quirks(up);
uart_add_one_port(drv, &up->port);
}
@@ -762,6 +772,7 @@
if (!console_suspend_enabled && uart_console(port) &&
port->type != PORT_8250) {
unsigned char canary = 0xa5;
+
serial_out(up, UART_SCR, canary);
if (serial_in(up, UART_SCR) == canary)
up->canary = canary;
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index 890fa7d..b3c3f7e 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -64,10 +64,19 @@
struct uart_8250_dma *dma = p->dma;
struct circ_buf *xmit = &p->port.state->xmit;
struct dma_async_tx_descriptor *desc;
+ struct uart_port *up = &p->port;
int ret;
- if (dma->tx_running)
+ if (dma->tx_running) {
+ if (up->x_char) {
+ dmaengine_pause(dma->txchan);
+ uart_xchar_out(up, UART_TX);
+ dmaengine_resume(dma->txchan);
+ }
return 0;
+ } else if (up->x_char) {
+ uart_xchar_out(up, UART_TX);
+ }
if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
/* We have been called from __dma_tx_complete() */
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 4955973..ace221a 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -124,12 +124,15 @@
/* Returns once the transmitter is empty or we run out of retries */
static void dw8250_tx_wait_empty(struct uart_port *p)
{
+ struct uart_8250_port *up = up_to_u8250p(p);
unsigned int tries = 20000;
unsigned int delay_threshold = tries - 1000;
unsigned int lsr;
while (tries--) {
lsr = readb (p->membase + (UART_LSR << p->regshift));
+ up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+
if (lsr & UART_LSR_TEMT)
break;
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 251f001..dba5950 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -200,12 +200,12 @@
if (!pdata)
return -EINVAL;
- /* Hardware do not support same RTS level on send and receive */
- if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
- !(rs485->flags & SER_RS485_RTS_AFTER_SEND))
- return -EINVAL;
if (rs485->flags & SER_RS485_ENABLED) {
+ /* Hardware do not support same RTS level on send and receive */
+ if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
+ !(rs485->flags & SER_RS485_RTS_AFTER_SEND))
+ return -EINVAL;
memset(rs485->padding, 0, sizeof(rs485->padding));
config |= RS485_URA;
} else {
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 4dee8a9..1349c16 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -121,8 +121,7 @@
{
struct dw_dma_slave *param = &lpss->dma_param;
struct pci_dev *pdev = to_pci_dev(port->dev);
- unsigned int dma_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
- struct pci_dev *dma_dev = pci_get_slot(pdev->bus, dma_devfn);
+ struct pci_dev *dma_dev;
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_BYT_UART1:
@@ -141,6 +140,8 @@
return -EINVAL;
}
+ dma_dev = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
+
param->dma_dev = &dma_dev->dev;
param->m_master = 0;
param->p_master = 1;
@@ -156,11 +157,26 @@
return 0;
}
+static void byt_serial_exit(struct lpss8250 *lpss)
+{
+ struct dw_dma_slave *param = &lpss->dma_param;
+
+ /* Paired with pci_get_slot() in the byt_serial_setup() above */
+ put_device(param->dma_dev);
+}
+
static int ehl_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
{
return 0;
}
+static void ehl_serial_exit(struct lpss8250 *lpss)
+{
+ struct uart_8250_port *up = serial8250_get_port(lpss->data.line);
+
+ up->dma = NULL;
+}
+
#ifdef CONFIG_SERIAL_8250_DMA
static const struct dw_dma_platform_data qrk_serial_dma_pdata = {
.nr_channels = 2,
@@ -252,8 +268,13 @@
struct dw_dma_slave *rx_param, *tx_param;
struct device *dev = port->port.dev;
- if (!lpss->dma_param.dma_dev)
+ if (!lpss->dma_param.dma_dev) {
+ dma = port->dma;
+ if (dma)
+ goto out_configuration_only;
+
return 0;
+ }
rx_param = devm_kzalloc(dev, sizeof(*rx_param), GFP_KERNEL);
if (!rx_param)
@@ -264,16 +285,18 @@
return -ENOMEM;
*rx_param = lpss->dma_param;
- dma->rxconf.src_maxburst = lpss->dma_maxburst;
-
*tx_param = lpss->dma_param;
- dma->txconf.dst_maxburst = lpss->dma_maxburst;
dma->fn = lpss8250_dma_filter;
dma->rx_param = rx_param;
dma->tx_param = tx_param;
port->dma = dma;
+
+out_configuration_only:
+ dma->rxconf.src_maxburst = lpss->dma_maxburst;
+ dma->txconf.dst_maxburst = lpss->dma_maxburst;
+
return 0;
}
@@ -335,8 +358,7 @@
return 0;
err_exit:
- if (lpss->board->exit)
- lpss->board->exit(lpss);
+ lpss->board->exit(lpss);
pci_free_irq_vectors(pdev);
return ret;
}
@@ -347,8 +369,7 @@
serial8250_unregister_port(lpss->data.line);
- if (lpss->board->exit)
- lpss->board->exit(lpss);
+ lpss->board->exit(lpss);
pci_free_irq_vectors(pdev);
}
@@ -356,12 +377,14 @@
.freq = 100000000,
.base_baud = 2764800,
.setup = byt_serial_setup,
+ .exit = byt_serial_exit,
};
static const struct lpss8250_board ehl_board = {
.freq = 200000000,
.base_baud = 12500000,
.setup = ehl_serial_setup,
+ .exit = ehl_serial_exit,
};
static const struct lpss8250_board qrk_board = {
diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
index efa0515..e6c1791 100644
--- a/drivers/tty/serial/8250/8250_mid.c
+++ b/drivers/tty/serial/8250/8250_mid.c
@@ -73,6 +73,11 @@
return 0;
}
+static void pnw_exit(struct mid8250 *mid)
+{
+ pci_dev_put(mid->dma_dev);
+}
+
static int tng_handle_irq(struct uart_port *p)
{
struct mid8250 *mid = p->private_data;
@@ -124,6 +129,11 @@
return 0;
}
+static void tng_exit(struct mid8250 *mid)
+{
+ pci_dev_put(mid->dma_dev);
+}
+
static int dnv_handle_irq(struct uart_port *p)
{
struct mid8250 *mid = p->private_data;
@@ -330,9 +340,9 @@
pci_set_drvdata(pdev, mid);
return 0;
+
err:
- if (mid->board->exit)
- mid->board->exit(mid);
+ mid->board->exit(mid);
return ret;
}
@@ -342,8 +352,7 @@
serial8250_unregister_port(mid->line);
- if (mid->board->exit)
- mid->board->exit(mid);
+ mid->board->exit(mid);
}
static const struct mid8250_board pnw_board = {
@@ -351,6 +360,7 @@
.freq = 50000000,
.base_baud = 115200,
.setup = pnw_setup,
+ .exit = pnw_exit,
};
static const struct mid8250_board tng_board = {
@@ -358,6 +368,7 @@
.freq = 38400000,
.base_baud = 1843200,
.setup = tng_setup,
+ .exit = tng_exit,
};
static const struct mid8250_board dnv_board = {
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index fb65dc6..de48a58 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -37,6 +37,7 @@
#define MTK_UART_IER_RTSI 0x40 /* Enable RTS Modem status interrupt */
#define MTK_UART_IER_CTSI 0x80 /* Enable CTS Modem status interrupt */
+#define MTK_UART_EFR 38 /* I/O: Extended Features Register */
#define MTK_UART_EFR_EN 0x10 /* Enable enhancement feature */
#define MTK_UART_EFR_RTS 0x40 /* Enable hardware rx flow control */
#define MTK_UART_EFR_CTS 0x80 /* Enable hardware tx flow control */
@@ -53,6 +54,9 @@
#define MTK_UART_TX_TRIGGER 1
#define MTK_UART_RX_TRIGGER MTK_UART_RX_SIZE
+#define MTK_UART_XON1 40 /* I/O: Xon character 1 */
+#define MTK_UART_XOFF1 42 /* I/O: Xoff character 1 */
+
#ifdef CONFIG_SERIAL_8250_DMA
enum dma_rx_status {
DMA_RX_START = 0,
@@ -169,7 +173,7 @@
MTK_UART_DMA_EN_RX | MTK_UART_DMA_EN_TX);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(up, UART_EFR, UART_EFR_ECB);
+ serial_out(up, MTK_UART_EFR, UART_EFR_ECB);
serial_out(up, UART_LCR, lcr);
if (dmaengine_slave_config(dma->rxchan, &dma->rxconf) != 0)
@@ -232,7 +236,7 @@
int lcr = serial_in(up, UART_LCR);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(up, UART_EFR, UART_EFR_ECB);
+ serial_out(up, MTK_UART_EFR, UART_EFR_ECB);
serial_out(up, UART_LCR, lcr);
lcr = serial_in(up, UART_LCR);
@@ -241,7 +245,7 @@
serial_out(up, MTK_UART_ESCAPE_DAT, MTK_UART_ESCAPE_CHAR);
serial_out(up, MTK_UART_ESCAPE_EN, 0x00);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(up, UART_EFR, serial_in(up, UART_EFR) &
+ serial_out(up, MTK_UART_EFR, serial_in(up, MTK_UART_EFR) &
(~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK)));
serial_out(up, UART_LCR, lcr);
mtk8250_disable_intrs(up, MTK_UART_IER_XOFFI |
@@ -255,8 +259,8 @@
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
/*enable hw flow control*/
- serial_out(up, UART_EFR, MTK_UART_EFR_HW_FC |
- (serial_in(up, UART_EFR) &
+ serial_out(up, MTK_UART_EFR, MTK_UART_EFR_HW_FC |
+ (serial_in(up, MTK_UART_EFR) &
(~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK))));
serial_out(up, UART_LCR, lcr);
@@ -270,12 +274,12 @@
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
/*enable sw flow control */
- serial_out(up, UART_EFR, MTK_UART_EFR_XON1_XOFF1 |
- (serial_in(up, UART_EFR) &
+ serial_out(up, MTK_UART_EFR, MTK_UART_EFR_XON1_XOFF1 |
+ (serial_in(up, MTK_UART_EFR) &
(~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK))));
- serial_out(up, UART_XON1, START_CHAR(port->state->port.tty));
- serial_out(up, UART_XOFF1, STOP_CHAR(port->state->port.tty));
+ serial_out(up, MTK_UART_XON1, START_CHAR(port->state->port.tty));
+ serial_out(up, MTK_UART_XOFF1, STOP_CHAR(port->state->port.tty));
serial_out(up, UART_LCR, lcr);
mtk8250_disable_intrs(up, MTK_UART_IER_CTSI|MTK_UART_IER_RTSI);
mtk8250_enable_intrs(up, MTK_UART_IER_XOFFI);
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 537bee8..483fff3 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -157,7 +157,11 @@
return readl(up->port.membase + (reg << up->port.regshift));
}
-static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
+/*
+ * Called on runtime PM resume path from omap8250_restore_regs(), and
+ * omap8250_set_mctrl().
+ */
+static void __omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct uart_8250_port *up = up_to_u8250p(port);
struct omap8250_priv *priv = up->port.private_data;
@@ -181,6 +185,20 @@
}
}
+static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ int err;
+
+ err = pm_runtime_resume_and_get(port->dev);
+ if (err)
+ return;
+
+ __omap8250_set_mctrl(port, mctrl);
+
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
+}
+
/*
* Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460)
* The access to uart register after MDR1 Access
@@ -193,27 +211,10 @@
static void omap_8250_mdr1_errataset(struct uart_8250_port *up,
struct omap8250_priv *priv)
{
- u8 timeout = 255;
-
serial_out(up, UART_OMAP_MDR1, priv->mdr1);
udelay(2);
serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT |
UART_FCR_CLEAR_RCVR);
- /*
- * Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and
- * TX_FIFO_E bit is 1.
- */
- while (UART_LSR_THRE != (serial_in(up, UART_LSR) &
- (UART_LSR_THRE | UART_LSR_DR))) {
- timeout--;
- if (!timeout) {
- /* Should *never* happen. we warn and carry on */
- dev_crit(up->port.dev, "Errata i202: timedout %x\n",
- serial_in(up, UART_LSR));
- break;
- }
- udelay(1);
- }
}
static void omap_8250_get_divisor(struct uart_port *port, unsigned int baud,
@@ -292,6 +293,7 @@
{
struct omap8250_priv *priv = up->port.private_data;
struct uart_8250_dma *dma = up->dma;
+ u8 mcr = serial8250_in_MCR(up);
if (dma && dma->tx_running) {
/*
@@ -308,7 +310,7 @@
serial_out(up, UART_EFR, UART_EFR_ECB);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
- serial8250_out_MCR(up, UART_MCR_TCRTLR);
+ serial8250_out_MCR(up, mcr | UART_MCR_TCRTLR);
serial_out(up, UART_FCR, up->fcr);
omap8250_update_scr(up, priv);
@@ -324,7 +326,8 @@
serial_out(up, UART_LCR, 0);
/* drop TCR + TLR access, we setup XON/XOFF later */
- serial8250_out_MCR(up, up->mcr);
+ serial8250_out_MCR(up, mcr);
+
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
@@ -341,7 +344,10 @@
omap8250_update_mdr1(up, priv);
- up->port.ops->set_mctrl(&up->port, up->port.mctrl);
+ __omap8250_set_mctrl(&up->port, up->port.mctrl);
+
+ if (up->port.rs485.flags & SER_RS485_ENABLED)
+ serial8250_em485_stop_tx(up);
}
/*
@@ -680,7 +686,6 @@
pm_runtime_get_sync(port->dev);
- up->mcr = 0;
serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
serial_out(up, UART_LCR, UART_LCR_WLEN8);
@@ -1471,9 +1476,15 @@
static int omap8250_remove(struct platform_device *pdev)
{
struct omap8250_priv *priv = platform_get_drvdata(pdev);
+ int err;
+
+ err = pm_runtime_resume_and_get(&pdev->dev);
+ if (err)
+ return err;
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
+ flush_work(&priv->qos_work);
pm_runtime_disable(&pdev->dev);
serial8250_unregister_port(priv->line);
cpu_latency_qos_remove_request(&priv->pm_qos_request);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 3a985e9..b665689 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -75,13 +75,12 @@
static void moan_device(const char *str, struct pci_dev *dev)
{
- dev_err(&dev->dev,
- "%s: %s\n"
+ pci_err(dev, "%s\n"
"Please send the output of lspci -vv, this\n"
"message (0x%04x,0x%04x,0x%04x,0x%04x), the\n"
"manufacturer and name of serial board or\n"
"modem board to <linux-serial@vger.kernel.org>.\n",
- pci_name(dev), str, dev->vendor, dev->device,
+ str, dev->vendor, dev->device,
dev->subsystem_vendor, dev->subsystem_device);
}
@@ -238,7 +237,7 @@
/* is firmware started? */
pci_read_config_dword(dev, 0x44, &oldval);
if (oldval == 0x00001000L) { /* RESET value */
- dev_dbg(&dev->dev, "Local i960 firmware missing\n");
+ pci_dbg(dev, "Local i960 firmware missing\n");
return -ENODEV;
}
return 0;
@@ -588,9 +587,8 @@
* (0,2,3,5,6: serial only -- 7,8,9: serial + parallel)
*/
if ((dev->subsystem_device & 0x00f0) >= 0x70) {
- dev_info(&dev->dev,
- "ignoring Timedia subdevice %04x for parport_serial\n",
- dev->subsystem_device);
+ pci_info(dev, "ignoring Timedia subdevice %04x for parport_serial\n",
+ dev->subsystem_device);
return -ENODEV;
}
@@ -827,8 +825,7 @@
if (sub_serports > 0)
return sub_serports;
- dev_err(&dev->dev,
- "NetMos/Mostech serial driver ignoring port on ambiguous config.\n");
+ pci_err(dev, "NetMos/Mostech serial driver ignoring port on ambiguous config.\n");
return 0;
}
@@ -897,18 +894,16 @@
/* enable IO_Space bit */
#define ITE_887x_POSIO_ENABLE (1 << 31)
+/* inta_addr are the configuration addresses of the ITE */
+static const short inta_addr[] = { 0x2a0, 0x2c0, 0x220, 0x240, 0x1e0, 0x200, 0x280 };
static int pci_ite887x_init(struct pci_dev *dev)
{
- /* inta_addr are the configuration addresses of the ITE */
- static const short inta_addr[] = { 0x2a0, 0x2c0, 0x220, 0x240, 0x1e0,
- 0x200, 0x280, 0 };
int ret, i, type;
struct resource *iobase = NULL;
u32 miscr, uartbar, ioport;
/* search for the base-ioport */
- i = 0;
- while (inta_addr[i] && iobase == NULL) {
+ for (i = 0; i < ARRAY_SIZE(inta_addr); i++) {
iobase = request_region(inta_addr[i], ITE_887x_IOSIZE,
"ite887x");
if (iobase != NULL) {
@@ -925,13 +920,11 @@
break;
}
release_region(iobase->start, ITE_887x_IOSIZE);
- iobase = NULL;
}
- i++;
}
- if (!inta_addr[i]) {
- dev_err(&dev->dev, "ite887x: could not find iobase\n");
+ if (i == ARRAY_SIZE(inta_addr)) {
+ pci_err(dev, "could not find iobase\n");
return -ENODEV;
}
@@ -1001,43 +994,29 @@
}
/*
- * EndRun Technologies.
- * Determine the number of ports available on the device.
+ * Oxford Semiconductor Inc.
+ * Check if an OxSemi device is part of the Tornado range of devices.
*/
#define PCI_VENDOR_ID_ENDRUN 0x7401
#define PCI_DEVICE_ID_ENDRUN_1588 0xe100
-static int pci_endrun_init(struct pci_dev *dev)
+static bool pci_oxsemi_tornado_p(struct pci_dev *dev)
{
- u8 __iomem *p;
- unsigned long deviceID;
- unsigned int number_uarts = 0;
+ /* OxSemi Tornado devices are all 0xCxxx */
+ if (dev->vendor == PCI_VENDOR_ID_OXSEMI &&
+ (dev->device & 0xf000) != 0xc000)
+ return false;
- /* EndRun device is all 0xexxx */
+ /* EndRun devices are all 0xExxx */
if (dev->vendor == PCI_VENDOR_ID_ENDRUN &&
- (dev->device & 0xf000) != 0xe000)
- return 0;
+ (dev->device & 0xf000) != 0xe000)
+ return false;
- p = pci_iomap(dev, 0, 5);
- if (p == NULL)
- return -ENOMEM;
-
- deviceID = ioread32(p);
- /* EndRun device */
- if (deviceID == 0x07000200) {
- number_uarts = ioread8(p + 4);
- dev_dbg(&dev->dev,
- "%d ports detected on EndRun PCI Express device\n",
- number_uarts);
- }
- pci_iounmap(dev, p);
- return number_uarts;
+ return true;
}
/*
- * Oxford Semiconductor Inc.
- * Check that device is part of the Tornado range of devices, then determine
- * the number of ports available on the device.
+ * Determine the number of ports available on a Tornado device.
*/
static int pci_oxsemi_tornado_init(struct pci_dev *dev)
{
@@ -1045,9 +1024,7 @@
unsigned long deviceID;
unsigned int number_uarts = 0;
- /* OxSemi Tornado devices are all 0xCxxx */
- if (dev->vendor == PCI_VENDOR_ID_OXSEMI &&
- (dev->device & 0xF000) != 0xC000)
+ if (!pci_oxsemi_tornado_p(dev))
return 0;
p = pci_iomap(dev, 0, 5);
@@ -1058,9 +1035,10 @@
/* Tornado device */
if (deviceID == 0x07000200) {
number_uarts = ioread8(p + 4);
- dev_dbg(&dev->dev,
- "%d ports detected on Oxford PCI Express device\n",
- number_uarts);
+ pci_dbg(dev, "%d ports detected on %s PCI Express device\n",
+ number_uarts,
+ dev->vendor == PCI_VENDOR_ID_ENDRUN ?
+ "EndRun" : "Oxford");
}
pci_iounmap(dev, p);
return number_uarts;
@@ -1120,15 +1098,15 @@
{ 0, }
};
-static int pci_quatech_amcc(u16 devid)
+static int pci_quatech_amcc(struct pci_dev *dev)
{
struct quatech_feature *qf = &quatech_cards[0];
while (qf->devid) {
- if (qf->devid == devid)
+ if (qf->devid == dev->device)
return qf->amcc;
qf++;
}
- pr_err("quatech: unknown port type '0x%04X'.\n", devid);
+ pci_err(dev, "unknown port type '0x%04X'.\n", dev->device);
return 0;
};
@@ -1291,7 +1269,7 @@
static int pci_quatech_init(struct pci_dev *dev)
{
- if (pci_quatech_amcc(dev->device)) {
+ if (pci_quatech_amcc(dev)) {
unsigned long base = pci_resource_start(dev, 0);
if (base) {
u32 tmp;
@@ -1315,7 +1293,7 @@
port->port.uartclk = pci_quatech_clock(port);
/* For now just warn about RS422 */
if (pci_quatech_rs422(port))
- pr_warn("quatech: software control of RS422 features not currently supported.\n");
+ pci_warn(priv->dev, "software control of RS422 features not currently supported.\n");
return pci_default_setup(priv, board, port, idx);
}
@@ -1529,7 +1507,7 @@
/* Get the io address from configuration space */
pci_read_config_word(pdev, config_base + 4, &iobase);
- dev_dbg(&pdev->dev, "%s: idx=%d iobase=0x%x", __func__, idx, iobase);
+ pci_dbg(pdev, "idx=%d iobase=0x%x", idx, iobase);
port->port.iotype = UPIO_PORT;
port->port.iobase = iobase;
@@ -1553,7 +1531,6 @@
resource_size_t bar_data[3];
u8 config_base;
struct serial_private *priv = pci_get_drvdata(dev);
- struct uart_8250_port *port;
if (!(pci_resource_flags(dev, 5) & IORESOURCE_IO) ||
!(pci_resource_flags(dev, 4) & IORESOURCE_IO) ||
@@ -1600,13 +1577,7 @@
pci_write_config_byte(dev, config_base + 0x06, dev->irq);
- if (priv) {
- /* re-apply RS232/485 mode when
- * pciserial_resume_ports()
- */
- port = serial8250_get_port(priv->line[i]);
- pci_fintek_rs485_config(&port->port, NULL);
- } else {
+ if (!priv) {
/* First init without port data
* force init to RS232 Mode
*/
@@ -1693,7 +1664,7 @@
struct uart_8250_port *port, int idx)
{
port->port.quirks |= UPQ_NO_TXEN_TEST;
- dev_dbg(&priv->dev->dev,
+ pci_dbg(priv->dev,
"serial8250: skipping TxEn test for device [%04x:%04x] subsystem [%04x:%04x]\n",
priv->dev->vendor, priv->dev->device,
priv->dev->subsystem_vendor, priv->dev->subsystem_device);
@@ -2517,7 +2488,7 @@
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .init = pci_endrun_init,
+ .init = pci_oxsemi_tornado_init,
.setup = pci_default_setup,
},
/*
@@ -2862,7 +2833,7 @@
pbn_b0_2_1843200,
pbn_b0_4_1843200,
- pbn_b0_1_4000000,
+ pbn_b0_1_3906250,
pbn_b0_bt_1_115200,
pbn_b0_bt_2_115200,
@@ -2940,12 +2911,11 @@
pbn_panacom2,
pbn_panacom4,
pbn_plx_romulus,
- pbn_endrun_2_4000000,
pbn_oxsemi,
- pbn_oxsemi_1_4000000,
- pbn_oxsemi_2_4000000,
- pbn_oxsemi_4_4000000,
- pbn_oxsemi_8_4000000,
+ pbn_oxsemi_1_3906250,
+ pbn_oxsemi_2_3906250,
+ pbn_oxsemi_4_3906250,
+ pbn_oxsemi_8_3906250,
pbn_intel_i960,
pbn_sgi_ioc3,
pbn_computone_4,
@@ -2983,6 +2953,10 @@
pbn_sunix_pci_4s,
pbn_sunix_pci_8s,
pbn_sunix_pci_16s,
+ pbn_titan_1_4000000,
+ pbn_titan_2_4000000,
+ pbn_titan_4_4000000,
+ pbn_titan_8_4000000,
pbn_moxa8250_2p,
pbn_moxa8250_4p,
pbn_moxa8250_8p,
@@ -3088,10 +3062,10 @@
.uart_offset = 8,
},
- [pbn_b0_1_4000000] = {
+ [pbn_b0_1_3906250] = {
.flags = FL_BASE0,
.num_ports = 1,
- .base_baud = 4000000,
+ .base_baud = 3906250,
.uart_offset = 8,
},
@@ -3463,20 +3437,6 @@
},
/*
- * EndRun Technologies
- * Uses the size of PCI Base region 0 to
- * signal now many ports are available
- * 2 port 952 Uart support
- */
- [pbn_endrun_2_4000000] = {
- .flags = FL_BASE0,
- .num_ports = 2,
- .base_baud = 4000000,
- .uart_offset = 0x200,
- .first_offset = 0x1000,
- },
-
- /*
* This board uses the size of PCI Base region 0 to
* signal now many ports are available
*/
@@ -3486,31 +3446,31 @@
.base_baud = 115200,
.uart_offset = 8,
},
- [pbn_oxsemi_1_4000000] = {
+ [pbn_oxsemi_1_3906250] = {
.flags = FL_BASE0,
.num_ports = 1,
- .base_baud = 4000000,
+ .base_baud = 3906250,
.uart_offset = 0x200,
.first_offset = 0x1000,
},
- [pbn_oxsemi_2_4000000] = {
+ [pbn_oxsemi_2_3906250] = {
.flags = FL_BASE0,
.num_ports = 2,
- .base_baud = 4000000,
+ .base_baud = 3906250,
.uart_offset = 0x200,
.first_offset = 0x1000,
},
- [pbn_oxsemi_4_4000000] = {
+ [pbn_oxsemi_4_3906250] = {
.flags = FL_BASE0,
.num_ports = 4,
- .base_baud = 4000000,
+ .base_baud = 3906250,
.uart_offset = 0x200,
.first_offset = 0x1000,
},
- [pbn_oxsemi_8_4000000] = {
+ [pbn_oxsemi_8_3906250] = {
.flags = FL_BASE0,
.num_ports = 8,
- .base_baud = 4000000,
+ .base_baud = 3906250,
.uart_offset = 0x200,
.first_offset = 0x1000,
},
@@ -3770,6 +3730,34 @@
.base_baud = 921600,
.uart_offset = 0x8,
},
+ [pbn_titan_1_4000000] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 4000000,
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
+ [pbn_titan_2_4000000] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 4000000,
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
+ [pbn_titan_4_4000000] = {
+ .flags = FL_BASE0,
+ .num_ports = 4,
+ .base_baud = 4000000,
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
+ [pbn_titan_8_4000000] = {
+ .flags = FL_BASE0,
+ .num_ports = 8,
+ .base_baud = 4000000,
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
[pbn_moxa8250_2p] = {
.flags = FL_BASE1,
.num_ports = 2,
@@ -3979,12 +3967,12 @@
uart.port.irq = 0;
} else {
if (pci_match_id(pci_use_msi, dev)) {
- dev_dbg(&dev->dev, "Using MSI(-X) interrupts\n");
+ pci_dbg(dev, "Using MSI(-X) interrupts\n");
pci_set_master(dev);
uart.port.flags &= ~UPF_SHARE_IRQ;
rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES);
} else {
- dev_dbg(&dev->dev, "Using legacy interrupts\n");
+ pci_dbg(dev, "Using legacy interrupts\n");
rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
}
if (rc < 0) {
@@ -4002,12 +3990,12 @@
if (quirk->setup(priv, board, &uart, i))
break;
- dev_dbg(&dev->dev, "Setup PCI port: port %lx, irq %d, type %d\n",
+ pci_dbg(dev, "Setup PCI port: port %lx, irq %d, type %d\n",
uart.port.iobase, uart.port.irq, uart.port.iotype);
priv->line[i] = serial8250_register_8250_port(&uart);
if (priv->line[i] < 0) {
- dev_err(&dev->dev,
+ pci_err(dev,
"Couldn't register serial port %lx, irq %d, type %d, error %d\n",
uart.port.iobase, uart.port.irq,
uart.port.iotype, priv->line[i]);
@@ -4103,8 +4091,7 @@
}
if (ent->driver_data >= ARRAY_SIZE(pci_boards)) {
- dev_err(&dev->dev, "invalid driver_data: %ld\n",
- ent->driver_data);
+ pci_err(dev, "invalid driver_data: %ld\n", ent->driver_data);
return -EINVAL;
}
@@ -4187,7 +4174,7 @@
err = pci_enable_device(pdev);
/* FIXME: We cannot simply error out here */
if (err)
- dev_err(dev, "Unable to re-enable ports, trying to continue.\n");
+ pci_err(pdev, "Unable to re-enable ports, trying to continue.\n");
pciserial_resume_ports(priv);
}
return 0;
@@ -4381,13 +4368,6 @@
0x10b5, 0x106a, 0, 0,
pbn_plx_romulus },
/*
- * EndRun Technologies. PCI express device range.
- * EndRun PTP/1588 has 2 Native UARTs.
- */
- { PCI_VENDOR_ID_ENDRUN, PCI_DEVICE_ID_ENDRUN_1588,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_endrun_2_4000000 },
- /*
* Quatech cards. These actually have configurable clocks but for
* now we just use the default.
*
@@ -4496,158 +4476,165 @@
*/
{ PCI_VENDOR_ID_OXSEMI, 0xc101, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_4000000 },
+ pbn_b0_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc105, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_4000000 },
+ pbn_b0_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc11b, /* OXPCIe952 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc11f, /* OXPCIe952 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc120, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_4000000 },
+ pbn_b0_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc124, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_4000000 },
+ pbn_b0_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc138, /* OXPCIe952 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc13d, /* OXPCIe952 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc140, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_4000000 },
+ pbn_b0_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc141, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_4000000 },
+ pbn_b0_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc144, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_4000000 },
+ pbn_b0_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc145, /* OXPCIe952 1 Legacy UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_1_4000000 },
+ pbn_b0_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc158, /* OXPCIe952 2 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_2_4000000 },
+ pbn_oxsemi_2_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc15d, /* OXPCIe952 2 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_2_4000000 },
+ pbn_oxsemi_2_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc208, /* OXPCIe954 4 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_4_4000000 },
+ pbn_oxsemi_4_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc20d, /* OXPCIe954 4 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_4_4000000 },
+ pbn_oxsemi_4_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc308, /* OXPCIe958 8 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_8_4000000 },
+ pbn_oxsemi_8_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc30d, /* OXPCIe958 8 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_8_4000000 },
+ pbn_oxsemi_8_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc40b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc40f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc41b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc41f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc42b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc42f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc43b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc43f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc44b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc44f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc45b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc45f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc46b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc46f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc47b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc47f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc48b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc48f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc49b, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc49f, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4ab, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4af, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4bb, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4bf, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4cb, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_OXSEMI, 0xc4cf, /* OXPCIe200 1 Native UART */
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
/*
* Mainpine Inc. IQ Express "Rev3" utilizing OxSemi Tornado
*/
{ PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 1 Port V.34 Super-G3 Fax */
PCI_VENDOR_ID_MAINPINE, 0x4001, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_oxsemi_1_3906250 },
{ PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 2 Port V.34 Super-G3 Fax */
PCI_VENDOR_ID_MAINPINE, 0x4002, 0, 0,
- pbn_oxsemi_2_4000000 },
+ pbn_oxsemi_2_3906250 },
{ PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 4 Port V.34 Super-G3 Fax */
PCI_VENDOR_ID_MAINPINE, 0x4004, 0, 0,
- pbn_oxsemi_4_4000000 },
+ pbn_oxsemi_4_3906250 },
{ PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 8 Port V.34 Super-G3 Fax */
PCI_VENDOR_ID_MAINPINE, 0x4008, 0, 0,
- pbn_oxsemi_8_4000000 },
+ pbn_oxsemi_8_3906250 },
/*
* Digi/IBM PCIe 2-port Async EIA-232 Adapter utilizing OxSemi Tornado
*/
{ PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_2_OX_IBM,
PCI_SUBVENDOR_ID_IBM, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_2_4000000 },
+ pbn_oxsemi_2_3906250 },
+ /*
+ * EndRun Technologies. PCI express device range.
+ * EndRun PTP/1588 has 2 Native UARTs utilizing OxSemi 952.
+ */
+ { PCI_VENDOR_ID_ENDRUN, PCI_DEVICE_ID_ENDRUN_1588,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_2_3906250 },
/*
* SBS Technologies, Inc. P-Octal and PMC-OCTPRO cards,
@@ -4721,22 +4708,22 @@
pbn_b0_4_921600 },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_100E,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_1_4000000 },
+ pbn_titan_1_4000000 },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200E,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_2_4000000 },
+ pbn_titan_2_4000000 },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400E,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_4_4000000 },
+ pbn_titan_4_4000000 },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800E,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_8_4000000 },
+ pbn_titan_8_4000000 },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_2_4000000 },
+ pbn_titan_2_4000000 },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EISI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_oxsemi_2_4000000 },
+ pbn_titan_2_4000000 },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200V3,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_bt_2_921600 },
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 7c07ebb..1f231fc 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -530,27 +530,6 @@
}
/*
- * For the 16C950
- */
-static void serial_icr_write(struct uart_8250_port *up, int offset, int value)
-{
- serial_out(up, UART_SCR, offset);
- serial_out(up, UART_ICR, value);
-}
-
-static unsigned int serial_icr_read(struct uart_8250_port *up, int offset)
-{
- unsigned int value;
-
- serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD);
- serial_out(up, UART_SCR, offset);
- value = serial_in(up, UART_ICR);
- serial_icr_write(up, UART_ACR, up->acr);
-
- return value;
-}
-
-/*
* FIFO support.
*/
static void serial8250_clear_fifos(struct uart_8250_port *p)
@@ -613,7 +592,7 @@
static int serial8250_em485_init(struct uart_8250_port *p)
{
if (p->em485)
- return 0;
+ goto deassert_rts;
p->em485 = kmalloc(sizeof(struct uart_8250_em485), GFP_ATOMIC);
if (!p->em485)
@@ -629,7 +608,9 @@
p->em485->active_timer = NULL;
p->em485->tx_stopped = true;
- p->rs485_stop_tx(p);
+deassert_rts:
+ if (p->em485->tx_stopped)
+ p->rs485_stop_tx(p);
return 0;
}
@@ -680,13 +661,6 @@
rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
}
- /* clamp the delays to [0, 100ms] */
- rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
- rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
-
- memset(rs485->padding, 0, sizeof(rs485->padding));
- port->rs485 = *rs485;
-
gpiod_set_value(port->rs485_term_gpio,
rs485->flags & SER_RS485_TERMINATE_BUS);
@@ -694,15 +668,8 @@
* Both serial8250_em485_init() and serial8250_em485_destroy()
* are idempotent.
*/
- if (rs485->flags & SER_RS485_ENABLED) {
- int ret = serial8250_em485_init(up);
-
- if (ret) {
- rs485->flags &= ~SER_RS485_ENABLED;
- port->rs485.flags &= ~SER_RS485_ENABLED;
- }
- return ret;
- }
+ if (rs485->flags & SER_RS485_ENABLED)
+ return serial8250_em485_init(up);
serial8250_em485_destroy(up);
return 0;
@@ -1042,7 +1009,8 @@
up->port.type = PORT_16550A;
up->capabilities |= UART_CAP_FIFO;
- if (!IS_ENABLED(CONFIG_SERIAL_8250_16550A_VARIANTS))
+ if (!IS_ENABLED(CONFIG_SERIAL_8250_16550A_VARIANTS) &&
+ !(up->port.flags & UPF_FULL_PROBE))
return;
/*
@@ -1532,6 +1500,8 @@
if (em485) {
unsigned char lsr = serial_in(p, UART_LSR);
+ p->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+
/*
* To provide required timeing and allow FIFO transfer,
* __stop_tx_rs485() must be called only when both FIFO and
@@ -1620,6 +1590,18 @@
struct uart_8250_port *up = up_to_u8250p(port);
struct uart_8250_em485 *em485 = up->em485;
+ /*
+ * While serial8250_em485_handle_stop_tx() is a noop if
+ * em485->active_timer != &em485->stop_tx_timer, it might happen that
+ * the timer is still armed and triggers only after the current bunch of
+ * chars is send and em485->active_timer == &em485->stop_tx_timer again.
+ * So cancel the timer. There is still a theoretical race condition if
+ * the timer is already running and only comes around to check for
+ * em485->active_timer when &em485->stop_tx_timer is armed again.
+ */
+ if (em485->active_timer == &em485->stop_tx_timer)
+ hrtimer_try_to_cancel(&em485->stop_tx_timer);
+
em485->active_timer = NULL;
if (em485->tx_stopped) {
@@ -1805,9 +1787,7 @@
int count;
if (port->x_char) {
- serial_out(up, UART_TX, port->x_char);
- port->icount.tx++;
- port->x_char = 0;
+ uart_xchar_out(port, UART_TX);
return;
}
if (uart_tx_stopped(port)) {
@@ -1889,10 +1869,13 @@
static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
{
switch (iir & 0x3f) {
- case UART_IIR_RX_TIMEOUT:
- serial8250_rx_dma_flush(up);
+ case UART_IIR_RDI:
+ if (!up->dma->rx_running)
+ break;
fallthrough;
case UART_IIR_RLSI:
+ case UART_IIR_RX_TIMEOUT:
+ serial8250_rx_dma_flush(up);
return true;
}
return up->dma->rx_dma(up);
@@ -2039,6 +2022,9 @@
static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
+ if (port->rs485.flags & SER_RS485_ENABLED)
+ return;
+
if (port->set_mctrl)
port->set_mctrl(port, mctrl);
else
@@ -2285,6 +2271,10 @@
if (port->irq && (up->port.flags & UPF_SHARE_IRQ))
up->port.irqflags |= IRQF_SHARED;
+ retval = up->ops->setup_irq(up);
+ if (retval)
+ goto out;
+
if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
unsigned char iir1;
@@ -2327,9 +2317,7 @@
}
}
- retval = up->ops->setup_irq(up);
- if (retval)
- goto out;
+ up->ops->setup_timer(up);
/*
* Now, initialize the UART
@@ -2941,8 +2929,10 @@
case UPIO_MEM32BE:
case UPIO_MEM16:
case UPIO_MEM:
- if (!port->mapbase)
+ if (!port->mapbase) {
+ ret = -EINVAL;
break;
+ }
if (!request_mem_region(port->mapbase, size, "serial")) {
ret = -EBUSY;
@@ -3166,9 +3156,6 @@
if (flags & UART_CONFIG_TYPE)
autoconfig(up);
- if (port->rs485.flags & SER_RS485_ENABLED)
- port->rs485_config(port, &port->rs485);
-
/* if access method is AU, it is a 16550 with a quirk */
if (port->type == PORT_16550A && port->iotype == UPIO_AU)
up->bugs |= UART_BUG_NOMSR;
@@ -3293,15 +3280,20 @@
unsigned int baud, quot, frac = 0;
termios.c_cflag = port->cons->cflag;
- if (port->state->port.tty && termios.c_cflag == 0)
+ termios.c_ispeed = port->cons->ispeed;
+ termios.c_ospeed = port->cons->ospeed;
+ if (port->state->port.tty && termios.c_cflag == 0) {
termios.c_cflag = port->state->port.tty->termios.c_cflag;
+ termios.c_ispeed = port->state->port.tty->termios.c_ispeed;
+ termios.c_ospeed = port->state->port.tty->termios.c_ospeed;
+ }
baud = serial8250_get_baud_rate(port, &termios, NULL);
quot = serial8250_get_divisor(port, baud, &frac);
serial8250_set_divisor(port, baud, quot, frac);
serial_port_out(port, UART_LCR, up->lcr);
- serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS);
+ serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS);
}
/*
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 603137d..136f2b1 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -119,7 +119,7 @@
config SERIAL_8250_GSC
tristate
- depends on SERIAL_8250 && GSC
+ depends on SERIAL_8250 && PARISC
default SERIAL_8250
config SERIAL_8250_DMA
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 75d61e0..e538d6d 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -751,7 +751,7 @@
return ret;
}
-static int pl010_remove(struct amba_device *dev)
+static void pl010_remove(struct amba_device *dev)
{
struct uart_amba_port *uap = amba_get_drvdata(dev);
int i;
@@ -767,8 +767,6 @@
if (!busy)
uart_unregister_driver(&amba_reg);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 61183e7..9900ee3 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1326,6 +1326,15 @@
pl011_dma_rx_stop(uap);
}
+static void pl011_throttle_rx(struct uart_port *port)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ pl011_stop_rx(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
static void pl011_enable_ms(struct uart_port *port)
{
struct uart_amba_port *uap =
@@ -1717,9 +1726,10 @@
*/
static void pl011_enable_interrupts(struct uart_amba_port *uap)
{
+ unsigned long flags;
unsigned int i;
- spin_lock_irq(&uap->port.lock);
+ spin_lock_irqsave(&uap->port.lock, flags);
/* Clear out any spuriously appearing RX interrupts */
pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
@@ -1741,7 +1751,14 @@
if (!pl011_dma_rx_running(uap))
uap->im |= UART011_RXIM;
pl011_write(uap->im, uap, REG_IMSC);
- spin_unlock_irq(&uap->port.lock);
+ spin_unlock_irqrestore(&uap->port.lock, flags);
+}
+
+static void pl011_unthrottle_rx(struct uart_port *port)
+{
+ struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port);
+
+ pl011_enable_interrupts(uap);
}
static int pl011_startup(struct uart_port *port)
@@ -2116,6 +2133,8 @@
.stop_tx = pl011_stop_tx,
.start_tx = pl011_start_tx,
.stop_rx = pl011_stop_rx,
+ .throttle = pl011_throttle_rx,
+ .unthrottle = pl011_unthrottle_rx,
.enable_ms = pl011_enable_ms,
.break_ctl = pl011_break_ctl,
.startup = pl011_startup,
@@ -2658,13 +2677,12 @@
return pl011_register_port(uap);
}
-static int pl011_remove(struct amba_device *dev)
+static void pl011_remove(struct amba_device *dev)
{
struct uart_amba_port *uap = amba_get_drvdata(dev);
uart_remove_one_port(&amba_reg, &uap->port);
pl011_unregister_port(uap);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index c2be7cf..fcbaff8 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -593,6 +593,11 @@
dev_err(port->dev, "RS485 needs rts-gpio\n");
return 1;
}
+
+ if (rs485conf->flags & SER_RS485_ENABLED)
+ gpiod_set_value(up->rts_gpiod,
+ !!(rs485conf->flags & SER_RS485_RTS_AFTER_SEND));
+
port->rs485 = *rs485conf;
return 0;
}
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 602065b..b7872ad 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -295,20 +295,16 @@
mode = atmel_uart_readl(port, ATMEL_US_MR);
- /* Resetting serial mode to RS232 (0x0) */
- mode &= ~ATMEL_US_USMODE;
-
- port->rs485 = *rs485conf;
-
if (rs485conf->flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
- if (port->rs485.flags & SER_RS485_RX_DURING_TX)
+ if (rs485conf->flags & SER_RS485_RX_DURING_TX)
atmel_port->tx_done_mask = ATMEL_US_TXRDY;
else
atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
atmel_uart_writel(port, ATMEL_US_TTGR,
rs485conf->delay_rts_after_send);
+ mode &= ~ATMEL_US_USMODE;
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
index 13ac36e..5fea9bf 100644
--- a/drivers/tty/serial/digicolor-usart.c
+++ b/drivers/tty/serial/digicolor-usart.c
@@ -309,6 +309,8 @@
case CS8:
default:
config |= UA_CONFIG_CHAR_LEN;
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS8;
break;
}
@@ -471,11 +473,10 @@
if (IS_ERR(uart_clk))
return PTR_ERR(uart_clk);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dp->port.mapbase = res->start;
- dp->port.membase = devm_ioremap_resource(&pdev->dev, res);
+ dp->port.membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(dp->port.membase))
return PTR_ERR(dp->port.membase);
+ dp->port.mapbase = res->start;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index b9f8add..43aca5a 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -229,8 +229,6 @@
/* IMX lpuart has four extra unused regs located at the beginning */
#define IMX_REG_OFF 0x10
-static DEFINE_IDA(fsl_lpuart_ida);
-
enum lpuart_type {
VF610_LPUART,
LS1021A_LPUART,
@@ -265,7 +263,6 @@
int rx_dma_rng_buf_len;
unsigned int dma_tx_nents;
wait_queue_head_t dma_wait;
- bool id_allocated;
};
struct lpuart_soc_data {
@@ -1379,9 +1376,9 @@
* Note: UART is assumed to be active high.
*/
if (rs485->flags & SER_RS485_RTS_ON_SEND)
- modem &= ~UARTMODEM_TXRTSPOL;
- else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
modem |= UARTMODEM_TXRTSPOL;
+ else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
+ modem &= ~UARTMODEM_TXRTSPOL;
}
/* Store the new configuration */
@@ -1728,6 +1725,7 @@
if (sport->lpuart_dma_rx_use) {
del_timer_sync(&sport->lpuart_timer);
lpuart_dma_rx_free(&sport->port);
+ sport->lpuart_dma_rx_use = false;
}
if (sport->lpuart_dma_tx_use) {
@@ -1736,6 +1734,7 @@
sport->dma_tx_in_progress = false;
dmaengine_terminate_all(sport->dma_tx_chan);
}
+ sport->lpuart_dma_tx_use = false;
}
if (sport->dma_tx_chan)
@@ -2141,6 +2140,7 @@
uart_update_timeout(port, termios->c_cflag, baud);
/* wait transmit engin complete */
+ lpuart32_write(&sport->port, 0, UARTMODIR);
lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
/* disable transmit and receive */
@@ -2638,23 +2638,18 @@
ret = of_alias_get_id(np, "serial");
if (ret < 0) {
- ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL);
- if (ret < 0) {
- dev_err(&pdev->dev, "port line is full, add device failed\n");
- return ret;
- }
- sport->id_allocated = true;
+ dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
+ return ret;
}
if (ret >= ARRAY_SIZE(lpuart_ports)) {
dev_err(&pdev->dev, "serial%d out of range\n", ret);
- ret = -EINVAL;
- goto failed_out_of_range;
+ return -EINVAL;
}
sport->port.line = ret;
ret = lpuart_enable_clks(sport);
if (ret)
- goto failed_clock_enable;
+ return ret;
sport->port.uartclk = lpuart_get_baud_clk_rate(sport);
lpuart_ports[sport->port.line] = sport;
@@ -2674,10 +2669,6 @@
if (ret)
goto failed_irq_request;
- ret = uart_add_one_port(&lpuart_reg, &sport->port);
- if (ret)
- goto failed_attach_port;
-
ret = uart_get_rs485_mode(&sport->port);
if (ret)
goto failed_get_rs485;
@@ -2689,7 +2680,9 @@
sport->port.rs485.delay_rts_after_send)
dev_err(&pdev->dev, "driver doesn't support RTS delays\n");
- sport->port.rs485_config(&sport->port, &sport->port.rs485);
+ ret = uart_add_one_port(&lpuart_reg, &sport->port);
+ if (ret)
+ goto failed_attach_port;
return 0;
@@ -2697,10 +2690,6 @@
failed_attach_port:
failed_irq_request:
lpuart_disable_clks(sport);
-failed_clock_enable:
-failed_out_of_range:
- if (sport->id_allocated)
- ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
return ret;
}
@@ -2710,9 +2699,6 @@
uart_remove_one_port(&lpuart_reg, &sport->port);
- if (sport->id_allocated)
- ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
-
lpuart_disable_clks(sport);
if (sport->dma_tx_chan)
@@ -2842,7 +2828,6 @@
static void __exit lpuart_serial_exit(void)
{
- ida_destroy(&fsl_lpuart_ida);
platform_driver_unregister(&lpuart_driver);
uart_unregister_driver(&lpuart_reg);
}
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index 94c8281..74b325c 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -1503,7 +1503,7 @@
retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg);
if (retval) {
dev_err(&dev->dev, "PCI Config read FAILED\n");
- return retval;
+ goto probe_exit0;
}
pci_write_config_dword(dev, PCI_COMMAND,
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 93cd8ad..164597e 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -398,8 +398,7 @@
{
*ucr2 &= ~(UCR2_CTSC | UCR2_CTS);
- sport->port.mctrl |= TIOCM_RTS;
- mctrl_gpio_set(sport->gpios, sport->port.mctrl);
+ mctrl_gpio_set(sport->gpios, sport->port.mctrl | TIOCM_RTS);
}
/* called with port.lock taken and irqs caller dependent */
@@ -408,8 +407,7 @@
*ucr2 &= ~UCR2_CTSC;
*ucr2 |= UCR2_CTS;
- sport->port.mctrl &= ~TIOCM_RTS;
- mctrl_gpio_set(sport->gpios, sport->port.mctrl);
+ mctrl_gpio_set(sport->gpios, sport->port.mctrl & ~TIOCM_RTS);
}
static void start_hrtimer_ms(struct hrtimer *hrt, unsigned long msec)
@@ -1470,7 +1468,7 @@
imx_uart_writel(sport, ucr1, UCR1);
ucr4 = imx_uart_readl(sport, UCR4) & ~(UCR4_OREN | UCR4_INVR);
- if (!sport->dma_is_enabled)
+ if (!dma_is_inited)
ucr4 |= UCR4_OREN;
if (sport->inverted_rx)
ucr4 |= UCR4_INVR;
@@ -2381,8 +2379,6 @@
dev_err(&pdev->dev,
"low-active RTS not possible when receiver is off, enabling receiver\n");
- imx_uart_rs485_config(&sport->port, &sport->port.rs485);
-
/* Disable interrupts before requesting them */
ucr1 = imx_uart_readl(sport, UCR1);
ucr1 &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN | UCR1_RTSDEN);
@@ -2630,6 +2626,7 @@
.suspend_noirq = imx_uart_suspend_noirq,
.resume_noirq = imx_uart_resume_noirq,
.freeze_noirq = imx_uart_suspend_noirq,
+ .thaw_noirq = imx_uart_resume_noirq,
.restore_noirq = imx_uart_resume_noirq,
.suspend = imx_uart_suspend,
.resume = imx_uart_resume,
diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
index cd30da0..b5b61e5 100644
--- a/drivers/tty/serial/jsm/jsm_driver.c
+++ b/drivers/tty/serial/jsm/jsm_driver.c
@@ -212,7 +212,8 @@
break;
default:
- return -ENXIO;
+ rc = -ENXIO;
+ goto out_kfree_brd;
}
rc = request_irq(brd->irq, brd->bd_ops->intr, IRQF_SHARED, "JSM", brd);
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 49d0c7f..79b7db8 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -403,16 +403,16 @@
{
if (!opt) {
pr_err("config string not provided\n");
- return -EINVAL;
+ return 1;
}
if (strlen(opt) >= MAX_CONFIG_LEN) {
pr_err("config string too long\n");
- return -ENOSPC;
+ return 1;
}
strcpy(config, opt);
- return 0;
+ return 1;
}
__setup("kgdboc=", kgdboc_option_setup);
diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c
index b5898c9..a980230 100644
--- a/drivers/tty/serial/lpc32xx_hs.c
+++ b/drivers/tty/serial/lpc32xx_hs.c
@@ -344,7 +344,7 @@
LPC32XX_HSUART_IIR(port->membase));
port->icount.overrun++;
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
- tty_schedule_flip(tport);
+ tty_flip_buffer_push(tport);
}
/* Data received? */
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index d2c08b7..91b7359 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -255,6 +255,14 @@
return (port->type == PORT_MESON) ? "meson_uart" : NULL;
}
+/*
+ * This function is called only from probe() using a temporary io mapping
+ * in order to perform a reset before setting up the device. Since the
+ * temporarily mapped region was successfully requested, there can be no
+ * console on this port at this time. Hence it is not necessary for this
+ * function to acquire the port->lock. (Since there is no console on this
+ * port at this time, the port->lock is not initialized yet.)
+ */
static void meson_uart_reset(struct uart_port *port)
{
u32 val;
@@ -269,9 +277,12 @@
static int meson_uart_startup(struct uart_port *port)
{
+ unsigned long flags;
u32 val;
int ret = 0;
+ spin_lock_irqsave(&port->lock, flags);
+
val = readl(port->membase + AML_UART_CONTROL);
val |= AML_UART_CLEAR_ERR;
writel(val, port->membase + AML_UART_CONTROL);
@@ -287,6 +298,8 @@
val = (AML_UART_RECV_IRQ(1) | AML_UART_XMIT_IRQ(port->fifosize / 2));
writel(val, port->membase + AML_UART_MISC);
+ spin_unlock_irqrestore(&port->lock, flags);
+
ret = request_irq(port->irq, meson_uart_interrupt, 0,
port->name, port);
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 26bcbec..27023a5 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -1593,6 +1593,7 @@
static void __msm_console_write(struct uart_port *port, const char *s,
unsigned int count, bool is_uartdm)
{
+ unsigned long flags;
int i;
int num_newlines = 0;
bool replaced = false;
@@ -1610,6 +1611,8 @@
num_newlines++;
count += num_newlines;
+ local_irq_save(flags);
+
if (port->sysrq)
locked = 0;
else if (oops_in_progress)
@@ -1655,6 +1658,8 @@
if (locked)
spin_unlock(&port->lock);
+
+ local_irq_restore(flags);
}
static void msm_console_write(struct console *co, const char *s,
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 34ff218..bbf1b0b 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -238,6 +238,7 @@
struct tty_port *tport = &port->state->port;
unsigned char ch = 0;
char flag = 0;
+ int ret;
do {
if (status & STAT_RX_RDY(port)) {
@@ -250,6 +251,16 @@
port->icount.parity++;
}
+ /*
+ * For UART2, error bits are not cleared on buffer read.
+ * This causes interrupt loop and system hang.
+ */
+ if (IS_EXTENDED(port) && (status & STAT_BRK_ERR)) {
+ ret = readl(port->membase + UART_STAT);
+ ret |= STAT_BRK_ERR;
+ writel(ret, port->membase + UART_STAT);
+ }
+
if (status & STAT_BRK_DET) {
port->icount.brk++;
status &= ~(STAT_FRM_ERR | STAT_PAR_ERR);
@@ -443,13 +454,13 @@
}
}
-static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
+static unsigned int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
{
unsigned int d_divisor, m_divisor;
u32 brdv, osamp;
if (!port->uartclk)
- return -EOPNOTSUPP;
+ return 0;
/*
* The baudrate is derived from the UART clock thanks to two divisors:
@@ -473,7 +484,7 @@
osamp &= ~OSAMP_DIVISORS_MASK;
writel(osamp, port->membase + UART_OSAMP);
- return 0;
+ return DIV_ROUND_CLOSEST(port->uartclk, d_divisor * m_divisor);
}
static void mvebu_uart_set_termios(struct uart_port *port,
@@ -510,15 +521,11 @@
max_baud = 230400;
baud = uart_get_baud_rate(port, termios, old, min_baud, max_baud);
- if (mvebu_uart_baud_rate_set(port, baud)) {
- /* No clock available, baudrate cannot be changed */
- if (old)
- baud = uart_get_baud_rate(port, old, NULL,
- min_baud, max_baud);
- } else {
- tty_termios_encode_baud_rate(termios, baud, baud);
- uart_update_timeout(port, termios->c_cflag, baud);
- }
+ baud = mvebu_uart_baud_rate_set(port, baud);
+
+ /* In case baudrate cannot be changed, report previous old value */
+ if (baud == 0 && old)
+ baud = tty_termios_baud_rate(old);
/* Only the following flag changes are supported */
if (old) {
@@ -529,6 +536,11 @@
termios->c_cflag |= CS8;
}
+ if (baud != 0) {
+ tty_termios_encode_baud_rate(termios, baud, baud);
+ uart_update_timeout(port, termios->c_cflag, baud);
+ }
+
spin_unlock_irqrestore(&port->lock, flags);
}
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index c149f8c..a0d4bff 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -695,6 +695,7 @@
owl_port->port.uartclk = clk_get_rate(owl_port->clk);
if (owl_port->port.uartclk == 0) {
dev_err(&pdev->dev, "clock rate is zero\n");
+ clk_disable_unprepare(owl_port->clk);
return -EINVAL;
}
owl_port->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_LOW_LATENCY;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index a7363bc..351ad0b 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -628,22 +628,6 @@
return 0;
}
-static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf)
-{
- int ret = 0;
- struct uart_port *port = &priv->port;
-
- if (port->x_char) {
- dev_dbg(priv->port.dev, "%s:X character send %02x (%lu)\n",
- __func__, port->x_char, jiffies);
- buf[0] = port->x_char;
- port->x_char = 0;
- ret = 1;
- }
-
- return ret;
-}
-
static int dma_push_rx(struct eg20t_port *priv, int size)
{
int room;
@@ -893,9 +877,10 @@
fifo_size = max(priv->fifo_size, 1);
tx_empty = 1;
- if (pop_tx_x(priv, xmit->buf)) {
- pch_uart_hal_write(priv, xmit->buf, 1);
+ if (port->x_char) {
+ pch_uart_hal_write(priv, &port->x_char, 1);
port->icount.tx++;
+ port->x_char = 0;
tx_empty = 0;
fifo_size--;
}
@@ -950,9 +935,11 @@
}
fifo_size = max(priv->fifo_size, 1);
- if (pop_tx_x(priv, xmit->buf)) {
- pch_uart_hal_write(priv, xmit->buf, 1);
+
+ if (port->x_char) {
+ pch_uart_hal_write(priv, &port->x_char, 1);
port->icount.tx++;
+ port->x_char = 0;
fifo_size--;
}
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
index 85366e0..a45069e 100644
--- a/drivers/tty/serial/rda-uart.c
+++ b/drivers/tty/serial/rda-uart.c
@@ -262,6 +262,8 @@
fallthrough;
case CS7:
ctrl &= ~RDA_UART_DBITS_8;
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS7;
break;
default:
ctrl |= RDA_UART_DBITS_8;
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index f5fab1d..aa1cf2a 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -448,6 +448,8 @@
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
quot = uart_get_divisor(port, baud);
+ del_timer_sync(&sport->timer);
+
spin_lock_irqsave(&sport->port.lock, flags);
sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS);
@@ -478,8 +480,6 @@
UTSR1_TO_SM(UTSR1_ROR);
}
- del_timer_sync(&sport->timer);
-
/*
* Update the per-port timeout.
*/
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index 8ae3e03..263c332 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -361,8 +361,7 @@
/* Enable tx dma mode */
ucon = rd_regl(port, S3C2410_UCON);
ucon &= ~(S3C64XX_UCON_TXBURST_MASK | S3C64XX_UCON_TXMODE_MASK);
- ucon |= (dma_get_cache_alignment() >= 16) ?
- S3C64XX_UCON_TXBURST_16 : S3C64XX_UCON_TXBURST_1;
+ ucon |= S3C64XX_UCON_TXBURST_1;
ucon |= S3C64XX_UCON_TXMODE_DMA;
wr_regl(port, S3C2410_UCON, ucon);
@@ -634,7 +633,7 @@
S3C64XX_UCON_DMASUS_EN |
S3C64XX_UCON_TIMEOUT_EN |
S3C64XX_UCON_RXMODE_MASK);
- ucon |= S3C64XX_UCON_RXBURST_16 |
+ ucon |= S3C64XX_UCON_RXBURST_1 |
0xf << S3C64XX_UCON_TIMEOUT_SHIFT |
S3C64XX_UCON_EMPTYINT_EN |
S3C64XX_UCON_TIMEOUT_EN |
@@ -883,11 +882,8 @@
goto out;
}
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) {
- spin_unlock(&port->lock);
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- spin_lock(&port->lock);
- }
if (uart_circ_empty(xmit))
s3c24xx_serial_stop_tx(port);
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index c2be22c..cda7180 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -520,7 +520,7 @@
count = tup->tx_bytes_requested - state.residue;
async_tx_ack(tup->tx_dma_desc);
spin_lock_irqsave(&tup->uport.lock, flags);
- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ uart_xmit_advance(&tup->uport, count);
tup->tx_in_progress = 0;
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&tup->uport);
@@ -608,7 +608,6 @@
static void tegra_uart_stop_tx(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
- struct circ_buf *xmit = &tup->uport.state->xmit;
struct dma_tx_state state;
unsigned int count;
@@ -619,7 +618,7 @@
dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
count = tup->tx_bytes_requested - state.residue;
async_tx_ack(tup->tx_dma_desc);
- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ uart_xmit_advance(&tup->uport, count);
tup->tx_in_progress = 0;
}
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index be0d992..605f928 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -42,6 +42,11 @@
#define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8)
+/*
+ * Max time with active RTS before/after data is sent.
+ */
+#define RS485_MAX_RTS_DELAY 100 /* msecs */
+
static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
struct ktermios *old_termios);
static void uart_wait_until_sent(struct tty_struct *tty, int timeout);
@@ -147,7 +152,7 @@
spin_lock_irqsave(&port->lock, flags);
old = port->mctrl;
port->mctrl = (old & ~clear) | set;
- if (old != port->mctrl)
+ if (old != port->mctrl && !(port->rs485.flags & SER_RS485_ENABLED))
port->ops->set_mctrl(port, port->mctrl);
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -157,23 +162,10 @@
static void uart_port_dtr_rts(struct uart_port *uport, int raise)
{
- int rs485_on = uport->rs485_config &&
- (uport->rs485.flags & SER_RS485_ENABLED);
- int RTS_after_send = !!(uport->rs485.flags & SER_RS485_RTS_AFTER_SEND);
-
- if (raise) {
- if (rs485_on && RTS_after_send) {
- uart_set_mctrl(uport, TIOCM_DTR);
- uart_clear_mctrl(uport, TIOCM_RTS);
- } else {
- uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
- }
- } else {
- unsigned int clear = TIOCM_DTR;
-
- clear |= (!rs485_on || RTS_after_send) ? TIOCM_RTS : 0;
- uart_clear_mctrl(uport, clear);
- }
+ if (raise)
+ uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
+ else
+ uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
}
/*
@@ -677,6 +669,20 @@
}
/*
+ * This function performs low-level write of high-priority XON/XOFF
+ * character and accounting for it.
+ *
+ * Requires uart_port to implement .serial_out().
+ */
+void uart_xchar_out(struct uart_port *uport, int offset)
+{
+ serial_port_out(uport, offset, uport->x_char);
+ uport->icount.tx++;
+ uport->x_char = 0;
+}
+EXPORT_SYMBOL_GPL(uart_xchar_out);
+
+/*
* This function is used to send a high-priority XON/XOFF character to
* the device
*/
@@ -1102,11 +1108,6 @@
goto out;
if (!tty_io_error(tty)) {
- if (uport->rs485.flags & SER_RS485_ENABLED) {
- set &= ~TIOCM_RTS;
- clear &= ~TIOCM_RTS;
- }
-
uart_update_mctrl(uport, set, clear);
ret = 0;
}
@@ -1325,8 +1326,41 @@
if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user)))
return -EFAULT;
+ /* pick sane settings if the user hasn't */
+ if (!(rs485.flags & SER_RS485_RTS_ON_SEND) ==
+ !(rs485.flags & SER_RS485_RTS_AFTER_SEND)) {
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
+ port->name, port->line);
+ rs485.flags |= SER_RS485_RTS_ON_SEND;
+ rs485.flags &= ~SER_RS485_RTS_AFTER_SEND;
+ }
+
+ if (rs485.delay_rts_before_send > RS485_MAX_RTS_DELAY) {
+ rs485.delay_rts_before_send = RS485_MAX_RTS_DELAY;
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay before sending clamped to %u ms\n",
+ port->name, port->line, rs485.delay_rts_before_send);
+ }
+
+ if (rs485.delay_rts_after_send > RS485_MAX_RTS_DELAY) {
+ rs485.delay_rts_after_send = RS485_MAX_RTS_DELAY;
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay after sending clamped to %u ms\n",
+ port->name, port->line, rs485.delay_rts_after_send);
+ }
+ /* Return clean padding area to userspace */
+ memset(rs485.padding, 0, sizeof(rs485.padding));
+
spin_lock_irqsave(&port->lock, flags);
ret = port->rs485_config(port, &rs485);
+ if (!ret) {
+ port->rs485 = rs485;
+
+ /* Reset RTS and other mctrl lines when disabling RS485 */
+ if (!(rs485.flags & SER_RS485_ENABLED))
+ port->ops->set_mctrl(port, port->mctrl);
+ }
spin_unlock_irqrestore(&port->lock, flags);
if (ret)
return ret;
@@ -1940,11 +1974,6 @@
}
#endif
-static inline bool uart_console_enabled(struct uart_port *port)
-{
- return uart_console(port) && (port->cons->flags & CON_ENABLED);
-}
-
static void uart_port_spin_lock_init(struct uart_port *port)
{
spin_lock_init(&port->lock);
@@ -2306,7 +2335,8 @@
uart_change_pm(state, UART_PM_STATE_ON);
spin_lock_irq(&uport->lock);
- ops->set_mctrl(uport, 0);
+ if (!(uport->rs485.flags & SER_RS485_ENABLED))
+ ops->set_mctrl(uport, 0);
spin_unlock_irq(&uport->lock);
if (console_suspend_enabled || !uart_console(uport)) {
/* Protected by port mutex for now */
@@ -2317,7 +2347,10 @@
if (tty)
uart_change_speed(tty, state, NULL);
spin_lock_irq(&uport->lock);
- ops->set_mctrl(uport, uport->mctrl);
+ if (!(uport->rs485.flags & SER_RS485_ENABLED))
+ ops->set_mctrl(uport, uport->mctrl);
+ else
+ uport->rs485_config(uport, &uport->rs485);
ops->start_tx(uport);
spin_unlock_irq(&uport->lock);
tty_port_set_initialized(port, 1);
@@ -2415,7 +2448,10 @@
*/
spin_lock_irqsave(&port->lock, flags);
port->mctrl &= TIOCM_DTR;
- port->ops->set_mctrl(port, port->mctrl);
+ if (!(port->rs485.flags & SER_RS485_ENABLED))
+ port->ops->set_mctrl(port, port->mctrl);
+ else
+ port->rs485_config(port, &port->rs485);
spin_unlock_irqrestore(&port->lock, flags);
/*
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index 7a07e72..7beec33 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -644,6 +644,8 @@
case CS6: /* not supported */
case CS8:
cval |= TXX9_SILCR_UMODE_8BIT;
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS8;
break;
}
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index f700bfa..8d92472 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2392,8 +2392,12 @@
int best_clk = -1;
unsigned long flags;
- if ((termios->c_cflag & CSIZE) == CS7)
+ if ((termios->c_cflag & CSIZE) == CS7) {
smr_val |= SCSMR_CHR;
+ } else {
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS8;
+ }
if (termios->c_cflag & PARENB)
smr_val |= SCSMR_PE;
if (termios->c_cflag & PARODD)
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index 214bf30..91952be 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -667,12 +667,16 @@
int rate;
char nstop;
- if ((termios->c_cflag & CSIZE) != CS8)
+ if ((termios->c_cflag & CSIZE) != CS8) {
dev_err_once(ssp->port.dev, "only 8-bit words supported\n");
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS8;
+ }
if (termios->c_iflag & (INPCK | PARMRK))
dev_err_once(ssp->port.dev, "parity checking not supported\n");
if (termios->c_iflag & BRKINT)
dev_err_once(ssp->port.dev, "BREAK detection not supported\n");
+ termios->c_iflag &= ~(INPCK|PARMRK|BRKINT);
/* Set number of stop bits */
nstop = (termios->c_cflag & CSTOPB) ? 2 : 1;
@@ -999,7 +1003,7 @@
/* Set up clock divider */
ssp->clkin_rate = clk_get_rate(ssp->clk);
ssp->baud_rate = SIFIVE_DEFAULT_BAUD_RATE;
- ssp->port.uartclk = ssp->baud_rate * 16;
+ ssp->port.uartclk = ssp->clkin_rate;
__ssp_update_div(ssp);
platform_set_drvdata(pdev, ssp);
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index e704851..97d36f8 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -535,10 +535,14 @@
/* set character length */
if ((cflag & CSIZE) == CS7) {
ctrl_val |= ASC_CTL_MODE_7BIT_PAR;
+ cflag |= PARENB;
} else {
ctrl_val |= (cflag & PARENB) ? ASC_CTL_MODE_8BIT_PAR :
ASC_CTL_MODE_8BIT;
+ cflag &= ~CSIZE;
+ cflag |= CS8;
}
+ termios->c_cflag = cflag;
/* set stop bit */
ctrl_val |= (cflag & CSTOPB) ? ASC_CTL_STOP_2BIT : ASC_CTL_STOP_1BIT;
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 6afae05..9377da1 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -70,6 +70,8 @@
*cr3 |= USART_CR3_DEM;
over8 = *cr1 & USART_CR1_OVER8;
+ *cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
+
if (over8)
rs485_deat_dedt = delay_ADE * baud * 8;
else
@@ -810,13 +812,22 @@
* CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00
* M0 and M1 already cleared by cr1 initialization.
*/
- if (bits == 9)
+ if (bits == 9) {
cr1 |= USART_CR1_M0;
- else if ((bits == 7) && cfg->has_7bits_data)
+ } else if ((bits == 7) && cfg->has_7bits_data) {
cr1 |= USART_CR1_M1;
- else if (bits != 8)
+ } else if (bits != 8) {
dev_dbg(port->dev, "Unsupported data bits config: %u bits\n"
, bits);
+ cflag &= ~CSIZE;
+ cflag |= CS8;
+ termios->c_cflag = cflag;
+ bits = 8;
+ if (cflag & PARENB) {
+ bits++;
+ cr1 |= USART_CR1_M0;
+ }
+ }
if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch ||
stm32_port->fifoen)) {
diff --git a/drivers/tty/serial/tegra-tcu.c b/drivers/tty/serial/tegra-tcu.c
index aaf8748..31ae705 100644
--- a/drivers/tty/serial/tegra-tcu.c
+++ b/drivers/tty/serial/tegra-tcu.c
@@ -101,7 +101,7 @@
break;
tegra_tcu_write(tcu, &xmit->buf[xmit->tail], count);
- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ uart_xmit_advance(port, count);
}
uart_write_wakeup(port);
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index d6a8604..d1fecc8 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -1137,6 +1137,8 @@
/* No compatible property, so try the name. */
soc_string = np->name;
+ of_node_put(np);
+
/* Extract the SOC number from the "PowerPC," string */
if ((sscanf(soc_string, "PowerPC,%u", &soc) != 1) || !soc)
return 0;
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index b5a8afb..f7dfa12 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -375,6 +375,8 @@
isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
}
+ isrstatus &= port->read_status_mask;
+ isrstatus &= ~port->ignore_status_mask;
/*
* Skip RX processing if RX is disabled as RXEMPTY will never be set
* as read bytes will not be removed from the FIFO.
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 1a0c7be..0569d59 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -1749,6 +1749,8 @@
*/
static void hdlcdev_exit(struct slgt_info *info)
{
+ if (!info->netdev)
+ return;
unregister_hdlc_device(info->netdev);
free_netdev(info->netdev);
info->netdev = NULL;
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 959f9e1..7ca209d 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -231,8 +231,10 @@
unsigned long flags;
/* Idle CPUs have no interesting backtrace. */
- if (idle_cpu(smp_processor_id()))
+ if (idle_cpu(smp_processor_id())) {
+ pr_info("CPU%d: backtrace skipped as idling\n", smp_processor_id());
return;
+ }
raw_spin_lock_irqsave(&show_lock, flags);
pr_info("CPU%d:\n", smp_processor_id());
@@ -259,10 +261,13 @@
if (in_irq())
regs = get_irq_regs();
- if (regs) {
- pr_info("CPU%d:\n", smp_processor_id());
+
+ pr_info("CPU%d:\n", smp_processor_id());
+ if (regs)
show_regs(regs);
- }
+ else
+ show_stack(NULL, NULL, KERN_INFO);
+
schedule_work(&sysrq_showallcpus);
}
}
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 0fc4733..5bbc2e0 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -172,7 +172,8 @@
have queued and recycle that ? */
if (atomic_read(&port->buf.mem_used) > port->buf.mem_limit)
return NULL;
- p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
+ p = kmalloc(sizeof(struct tty_buffer) + 2 * size,
+ GFP_ATOMIC | __GFP_NOWARN);
if (p == NULL)
return NULL;
@@ -394,27 +395,6 @@
EXPORT_SYMBOL(__tty_insert_flip_char);
/**
- * tty_schedule_flip - push characters to ldisc
- * @port: tty port to push from
- *
- * Takes any pending buffers and transfers their ownership to the
- * ldisc side of the queue. It then schedules those characters for
- * processing by the line discipline.
- */
-
-void tty_schedule_flip(struct tty_port *port)
-{
- struct tty_bufhead *buf = &port->buf;
-
- /* paired w/ acquire in flush_to_ldisc(); ensures
- * flush_to_ldisc() sees buffer data.
- */
- smp_store_release(&buf->tail->commit, buf->tail->used);
- queue_work(system_unbound_wq, &buf->work);
-}
-EXPORT_SYMBOL(tty_schedule_flip);
-
-/**
* tty_prepare_flip_string - make room for characters
* @port: tty port
* @chars: return pointer for character write area
@@ -543,6 +523,15 @@
}
+static inline void tty_flip_buffer_commit(struct tty_buffer *tail)
+{
+ /*
+ * Paired w/ acquire in flush_to_ldisc(); ensures flush_to_ldisc() sees
+ * buffer data.
+ */
+ smp_store_release(&tail->commit, tail->used);
+}
+
/**
* tty_flip_buffer_push - terminal
* @port: tty port to push
@@ -556,11 +545,45 @@
void tty_flip_buffer_push(struct tty_port *port)
{
- tty_schedule_flip(port);
+ struct tty_bufhead *buf = &port->buf;
+
+ tty_flip_buffer_commit(buf->tail);
+ queue_work(system_unbound_wq, &buf->work);
}
EXPORT_SYMBOL(tty_flip_buffer_push);
/**
+ * tty_insert_flip_string_and_push_buffer - add characters to the tty buffer and
+ * push
+ * @port: tty port
+ * @chars: characters
+ * @size: size
+ *
+ * The function combines tty_insert_flip_string() and tty_flip_buffer_push()
+ * with the exception of properly holding the @port->lock.
+ *
+ * To be used only internally (by pty currently).
+ *
+ * Returns: the number added.
+ */
+int tty_insert_flip_string_and_push_buffer(struct tty_port *port,
+ const unsigned char *chars, size_t size)
+{
+ struct tty_bufhead *buf = &port->buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ size = tty_insert_flip_string(port, chars, size);
+ if (size)
+ tty_flip_buffer_commit(buf->tail);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ queue_work(system_unbound_wq, &buf->work);
+
+ return size;
+}
+
+/**
* tty_buffer_init - prepare a tty buffer structure
* @port: tty port to initialise
*
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 78acc27..aa0026a 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -311,7 +311,7 @@
static void put_queue(struct vc_data *vc, int ch)
{
tty_insert_flip_char(&vc->port, ch, 0);
- tty_schedule_flip(&vc->port);
+ tty_flip_buffer_push(&vc->port);
}
static void puts_queue(struct vc_data *vc, char *cp)
@@ -320,7 +320,7 @@
tty_insert_flip_char(&vc->port, *cp, 0);
cp++;
}
- tty_schedule_flip(&vc->port);
+ tty_flip_buffer_push(&vc->port);
}
static void applkey(struct vc_data *vc, int key, char mode)
@@ -565,7 +565,7 @@
static void fn_send_intr(struct vc_data *vc)
{
tty_insert_flip_char(&vc->port, 0, TTY_BREAK);
- tty_schedule_flip(&vc->port);
+ tty_flip_buffer_push(&vc->port);
}
static void fn_scroll_forw(struct vc_data *vc)
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index a7ee117..0252c05 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -344,7 +344,7 @@
/* allocate everything in one go */
memsize = cols * rows * sizeof(char32_t);
memsize += rows * sizeof(char32_t *);
- p = vmalloc(memsize);
+ p = vzalloc(memsize);
if (!p)
return NULL;
@@ -855,7 +855,7 @@
unsigned short *p = (unsigned short *) vc->vc_pos;
vc_uniscr_delete(vc, nr);
- scr_memcpyw(p, p + nr, (vc->vc_cols - vc->state.x - nr) * 2);
+ scr_memmovew(p, p + nr, (vc->vc_cols - vc->state.x - nr) * 2);
scr_memsetw(p + vc->vc_cols - vc->state.x - nr, vc->vc_video_erase_char,
nr * 2);
vc->vc_need_wrap = 0;
@@ -1834,7 +1834,7 @@
static void respond_string(const char *p, size_t len, struct tty_port *port)
{
tty_insert_flip_string(port, p, len);
- tty_schedule_flip(port);
+ tty_flip_buffer_push(port);
}
static void cursor_report(struct vc_data *vc, struct tty_struct *tty)
@@ -4625,16 +4625,8 @@
if (op->data && font.charcount > op->charcount)
rc = -ENOSPC;
- if (!(op->flags & KD_FONT_FLAG_OLD)) {
- if (font.width > op->width || font.height > op->height)
- rc = -ENOSPC;
- } else {
- if (font.width != 8)
- rc = -EIO;
- else if ((op->height && font.height > op->height) ||
- font.height > 32)
- rc = -ENOSPC;
- }
+ if (font.width > op->width || font.height > op->height)
+ rc = -ENOSPC;
if (rc)
goto out;
@@ -4662,7 +4654,7 @@
return -EINVAL;
if (op->charcount > 512)
return -EINVAL;
- if (op->width <= 0 || op->width > 32 || op->height > 32)
+ if (op->width <= 0 || op->width > 32 || !op->height || op->height > 32)
return -EINVAL;
size = (op->width+7)/8 * 32 * op->charcount;
if (size > max_font_size)
@@ -4672,31 +4664,6 @@
if (IS_ERR(font.data))
return PTR_ERR(font.data);
- if (!op->height) { /* Need to guess font height [compat] */
- int h, i;
- u8 *charmap = font.data;
-
- /*
- * If from KDFONTOP ioctl, don't allow things which can be done
- * in userland,so that we can get rid of this soon
- */
- if (!(op->flags & KD_FONT_FLAG_OLD)) {
- kfree(font.data);
- return -EINVAL;
- }
-
- for (h = 32; h > 0; h--)
- for (i = 0; i < op->charcount; i++)
- if (charmap[32*i+h-1])
- goto nonzero;
-
- kfree(font.data);
- return -EINVAL;
-
- nonzero:
- op->height = h;
- }
-
font.charcount = op->charcount;
font.width = op->width;
font.height = op->height;
@@ -4704,9 +4671,11 @@
console_lock();
if (vc->vc_mode != KD_TEXT)
rc = -EINVAL;
- else if (vc->vc_sw->con_font_set)
+ else if (vc->vc_sw->con_font_set) {
+ if (vc_is_sel(vc))
+ clear_selection();
rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
- else
+ } else
rc = -ENOSYS;
console_unlock();
kfree(font.data);
@@ -4733,9 +4702,11 @@
console_unlock();
return -EINVAL;
}
- if (vc->vc_sw->con_font_default)
+ if (vc->vc_sw->con_font_default) {
+ if (vc_is_sel(vc))
+ clear_selection();
rc = vc->vc_sw->con_font_default(vc, &font, s);
- else
+ } else
rc = -ENOSYS;
console_unlock();
if (!rc) {
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index a9c6ea8..b10b86e 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -486,70 +486,6 @@
return 0;
}
-static inline int do_fontx_ioctl(struct vc_data *vc, int cmd,
- struct consolefontdesc __user *user_cfd,
- struct console_font_op *op)
-{
- struct consolefontdesc cfdarg;
- int i;
-
- if (copy_from_user(&cfdarg, user_cfd, sizeof(struct consolefontdesc)))
- return -EFAULT;
-
- switch (cmd) {
- case PIO_FONTX:
- op->op = KD_FONT_OP_SET;
- op->flags = KD_FONT_FLAG_OLD;
- op->width = 8;
- op->height = cfdarg.charheight;
- op->charcount = cfdarg.charcount;
- op->data = cfdarg.chardata;
- return con_font_op(vc, op);
-
- case GIO_FONTX:
- op->op = KD_FONT_OP_GET;
- op->flags = KD_FONT_FLAG_OLD;
- op->width = 8;
- op->height = cfdarg.charheight;
- op->charcount = cfdarg.charcount;
- op->data = cfdarg.chardata;
- i = con_font_op(vc, op);
- if (i)
- return i;
- cfdarg.charheight = op->height;
- cfdarg.charcount = op->charcount;
- if (copy_to_user(user_cfd, &cfdarg, sizeof(struct consolefontdesc)))
- return -EFAULT;
- return 0;
- }
- return -EINVAL;
-}
-
-static int vt_io_fontreset(struct vc_data *vc, struct console_font_op *op)
-{
- int ret;
-
- if (__is_defined(BROKEN_GRAPHICS_PROGRAMS)) {
- /*
- * With BROKEN_GRAPHICS_PROGRAMS defined, the default font is
- * not saved.
- */
- return -ENOSYS;
- }
-
- op->op = KD_FONT_OP_SET_DEFAULT;
- op->data = NULL;
- ret = con_font_op(vc, op);
- if (ret)
- return ret;
-
- console_lock();
- con_set_default_unimap(vc);
- console_unlock();
-
- return 0;
-}
-
static inline int do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud,
bool perm, struct vc_data *vc)
{
@@ -574,29 +510,7 @@
static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up,
bool perm)
{
- struct console_font_op op; /* used in multiple places here */
-
switch (cmd) {
- case PIO_FONT:
- if (!perm)
- return -EPERM;
- op.op = KD_FONT_OP_SET;
- op.flags = KD_FONT_FLAG_OLD | KD_FONT_FLAG_DONT_RECALC; /* Compatibility */
- op.width = 8;
- op.height = 0;
- op.charcount = 256;
- op.data = up;
- return con_font_op(vc, &op);
-
- case GIO_FONT:
- op.op = KD_FONT_OP_GET;
- op.flags = KD_FONT_FLAG_OLD;
- op.width = 8;
- op.height = 32;
- op.charcount = 256;
- op.data = up;
- return con_font_op(vc, &op);
-
case PIO_CMAP:
if (!perm)
return -EPERM;
@@ -605,20 +519,6 @@
case GIO_CMAP:
return con_get_cmap(up);
- case PIO_FONTX:
- if (!perm)
- return -EPERM;
-
- fallthrough;
- case GIO_FONTX:
- return do_fontx_ioctl(vc, cmd, up, &op);
-
- case PIO_FONTRESET:
- if (!perm)
- return -EPERM;
-
- return vt_io_fontreset(vc, &op);
-
case PIO_SCRNMAP:
if (!perm)
return -EPERM;
@@ -1099,54 +999,6 @@
#ifdef CONFIG_COMPAT
-struct compat_consolefontdesc {
- unsigned short charcount; /* characters in font (256 or 512) */
- unsigned short charheight; /* scan lines per character (1-32) */
- compat_caddr_t chardata; /* font data in expanded form */
-};
-
-static inline int
-compat_fontx_ioctl(struct vc_data *vc, int cmd,
- struct compat_consolefontdesc __user *user_cfd,
- int perm, struct console_font_op *op)
-{
- struct compat_consolefontdesc cfdarg;
- int i;
-
- if (copy_from_user(&cfdarg, user_cfd, sizeof(struct compat_consolefontdesc)))
- return -EFAULT;
-
- switch (cmd) {
- case PIO_FONTX:
- if (!perm)
- return -EPERM;
- op->op = KD_FONT_OP_SET;
- op->flags = KD_FONT_FLAG_OLD;
- op->width = 8;
- op->height = cfdarg.charheight;
- op->charcount = cfdarg.charcount;
- op->data = compat_ptr(cfdarg.chardata);
- return con_font_op(vc, op);
-
- case GIO_FONTX:
- op->op = KD_FONT_OP_GET;
- op->flags = KD_FONT_FLAG_OLD;
- op->width = 8;
- op->height = cfdarg.charheight;
- op->charcount = cfdarg.charcount;
- op->data = compat_ptr(cfdarg.chardata);
- i = con_font_op(vc, op);
- if (i)
- return i;
- cfdarg.charheight = op->height;
- cfdarg.charcount = op->charcount;
- if (copy_to_user(user_cfd, &cfdarg, sizeof(struct compat_consolefontdesc)))
- return -EFAULT;
- return 0;
- }
- return -EINVAL;
-}
-
struct compat_console_font_op {
compat_uint_t op; /* operation code KD_FONT_OP_* */
compat_uint_t flags; /* KD_FONT_FLAG_* */
@@ -1223,9 +1075,6 @@
/*
* these need special handlers for incompatible data structures
*/
- case PIO_FONTX:
- case GIO_FONTX:
- return compat_fontx_ioctl(vc, cmd, up, perm, &op);
case KDFONTOP:
return compat_kdfontop_ioctl(up, perm, &op, vc);
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
index 6eeb7ed..8fe7420 100644
--- a/drivers/usb/cdns3/core.c
+++ b/drivers/usb/cdns3/core.c
@@ -97,13 +97,23 @@
* can be restricted later depending on strap pin configuration.
*/
if (dr_mode == USB_DR_MODE_UNKNOWN) {
- if (IS_ENABLED(CONFIG_USB_CDNS3_HOST) &&
- IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
- dr_mode = USB_DR_MODE_OTG;
- else if (IS_ENABLED(CONFIG_USB_CDNS3_HOST))
- dr_mode = USB_DR_MODE_HOST;
- else if (IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
- dr_mode = USB_DR_MODE_PERIPHERAL;
+ if (cdns->version == CDNSP_CONTROLLER_V2) {
+ if (IS_ENABLED(CONFIG_USB_CDNSP_HOST) &&
+ IS_ENABLED(CONFIG_USB_CDNSP_GADGET))
+ dr_mode = USB_DR_MODE_OTG;
+ else if (IS_ENABLED(CONFIG_USB_CDNSP_HOST))
+ dr_mode = USB_DR_MODE_HOST;
+ else if (IS_ENABLED(CONFIG_USB_CDNSP_GADGET))
+ dr_mode = USB_DR_MODE_PERIPHERAL;
+ } else {
+ if (IS_ENABLED(CONFIG_USB_CDNS3_HOST) &&
+ IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
+ dr_mode = USB_DR_MODE_OTG;
+ else if (IS_ENABLED(CONFIG_USB_CDNS3_HOST))
+ dr_mode = USB_DR_MODE_HOST;
+ else if (IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
+ dr_mode = USB_DR_MODE_PERIPHERAL;
+ }
}
/*
diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h
index 3176f92..0d87871 100644
--- a/drivers/usb/cdns3/core.h
+++ b/drivers/usb/cdns3/core.h
@@ -55,7 +55,9 @@
* @otg_res: the resource for otg
* @otg_v0_regs: pointer to base of v0 otg registers
* @otg_v1_regs: pointer to base of v1 otg registers
+ * @otg_cdnsp_regs: pointer to base of CDNSP otg registers
* @otg_regs: pointer to base of otg registers
+ * @otg_irq_regs: pointer to interrupt registers
* @otg_irq: irq number for otg controller
* @dev_irq: irq number for device controller
* @wakeup_irq: irq number for wakeup event, it is optional
@@ -86,9 +88,12 @@
struct resource otg_res;
struct cdns3_otg_legacy_regs *otg_v0_regs;
struct cdns3_otg_regs *otg_v1_regs;
+ struct cdnsp_otg_regs *otg_cdnsp_regs;
struct cdns3_otg_common_regs *otg_regs;
+ struct cdns3_otg_irq_regs *otg_irq_regs;
#define CDNS3_CONTROLLER_V0 0
#define CDNS3_CONTROLLER_V1 1
+#define CDNSP_CONTROLLER_V2 2
u32 version;
bool phyrst_a_enable;
diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
index 38ccd29..95863d4 100644
--- a/drivers/usb/cdns3/drd.c
+++ b/drivers/usb/cdns3/drd.c
@@ -2,13 +2,12 @@
/*
* Cadence USBSS DRD Driver.
*
- * Copyright (C) 2018-2019 Cadence.
+ * Copyright (C) 2018-2020 Cadence.
* Copyright (C) 2019 Texas Instruments
*
* Author: Pawel Laszczak <pawell@cadence.com>
* Roger Quadros <rogerq@ti.com>
*
- *
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
@@ -28,8 +27,9 @@
*
* Returns 0 on success otherwise negative errno
*/
-int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode)
+static int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode)
{
+ u32 __iomem *override_reg;
u32 reg;
switch (mode) {
@@ -39,11 +39,24 @@
break;
case USB_DR_MODE_OTG:
dev_dbg(cdns->dev, "Set controller to OTG mode\n");
- if (cdns->version == CDNS3_CONTROLLER_V1) {
- reg = readl(&cdns->otg_v1_regs->override);
- reg |= OVERRIDE_IDPULLUP;
- writel(reg, &cdns->otg_v1_regs->override);
+ if (cdns->version == CDNSP_CONTROLLER_V2)
+ override_reg = &cdns->otg_cdnsp_regs->override;
+ else if (cdns->version == CDNS3_CONTROLLER_V1)
+ override_reg = &cdns->otg_v1_regs->override;
+ else
+ override_reg = &cdns->otg_v0_regs->ctrl1;
+
+ reg = readl(override_reg);
+
+ if (cdns->version != CDNS3_CONTROLLER_V0)
+ reg |= OVERRIDE_IDPULLUP;
+ else
+ reg |= OVERRIDE_IDPULLUP_V0;
+
+ writel(reg, override_reg);
+
+ if (cdns->version == CDNS3_CONTROLLER_V1) {
/*
* Enable work around feature built into the
* controller to address issue with RX Sensitivity
@@ -55,10 +68,6 @@
reg |= PHYRST_CFG_PHYRST_A_ENABLE;
writel(reg, &cdns->otg_v1_regs->phyrst_cfg);
}
- } else {
- reg = readl(&cdns->otg_v0_regs->ctrl1);
- reg |= OVERRIDE_IDPULLUP_V0;
- writel(reg, &cdns->otg_v0_regs->ctrl1);
}
/*
@@ -123,7 +132,7 @@
*/
static void cdns3_otg_disable_irq(struct cdns3 *cdns)
{
- writel(0, &cdns->otg_regs->ien);
+ writel(0, &cdns->otg_irq_regs->ien);
}
/**
@@ -133,7 +142,7 @@
static void cdns3_otg_enable_irq(struct cdns3 *cdns)
{
writel(OTGIEN_ID_CHANGE_INT | OTGIEN_VBUSVALID_RISE_INT |
- OTGIEN_VBUSVALID_FALL_INT, &cdns->otg_regs->ien);
+ OTGIEN_VBUSVALID_FALL_INT, &cdns->otg_irq_regs->ien);
}
/**
@@ -144,16 +153,21 @@
*/
int cdns3_drd_host_on(struct cdns3 *cdns)
{
- u32 val;
+ u32 val, ready_bit;
int ret;
/* Enable host mode. */
writel(OTGCMD_HOST_BUS_REQ | OTGCMD_OTG_DIS,
&cdns->otg_regs->cmd);
+ if (cdns->version == CDNSP_CONTROLLER_V2)
+ ready_bit = OTGSTS_CDNSP_XHCI_READY;
+ else
+ ready_bit = OTGSTS_CDNS3_XHCI_READY;
+
dev_dbg(cdns->dev, "Waiting till Host mode is turned on\n");
ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val,
- val & OTGSTS_XHCI_READY, 1, 100000);
+ val & ready_bit, 1, 100000);
if (ret)
dev_err(cdns->dev, "timeout waiting for xhci_ready\n");
@@ -189,17 +203,22 @@
*/
int cdns3_drd_gadget_on(struct cdns3 *cdns)
{
- int ret, val;
u32 reg = OTGCMD_OTG_DIS;
+ u32 ready_bit;
+ int ret, val;
/* switch OTG core */
writel(OTGCMD_DEV_BUS_REQ | reg, &cdns->otg_regs->cmd);
dev_dbg(cdns->dev, "Waiting till Device mode is turned on\n");
+ if (cdns->version == CDNSP_CONTROLLER_V2)
+ ready_bit = OTGSTS_CDNSP_DEV_READY;
+ else
+ ready_bit = OTGSTS_CDNS3_DEV_READY;
+
ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val,
- val & OTGSTS_DEV_READY,
- 1, 100000);
+ val & ready_bit, 1, 100000);
if (ret) {
dev_err(cdns->dev, "timeout waiting for dev_ready\n");
return ret;
@@ -244,7 +263,7 @@
cdns3_otg_disable_irq(cdns);
/* clear all interrupts */
- writel(~0, &cdns->otg_regs->ivect);
+ writel(~0, &cdns->otg_irq_regs->ivect);
ret = cdns3_set_mode(cdns, USB_DR_MODE_OTG);
if (ret)
@@ -313,7 +332,7 @@
if (cdns->in_lpm)
return ret;
- reg = readl(&cdns->otg_regs->ivect);
+ reg = readl(&cdns->otg_irq_regs->ivect);
if (!reg)
return IRQ_NONE;
@@ -332,7 +351,7 @@
ret = IRQ_WAKE_THREAD;
}
- writel(~0, &cdns->otg_regs->ivect);
+ writel(~0, &cdns->otg_irq_regs->ivect);
return ret;
}
@@ -347,28 +366,43 @@
return PTR_ERR(regs);
/* Detection of DRD version. Controller has been released
- * in two versions. Both are similar, but they have same changes
- * in register maps.
- * The first register in old version is command register and it's read
- * only, so driver should read 0 from it. On the other hand, in v1
- * the first register contains device ID number which is not set to 0.
- * Driver uses this fact to detect the proper version of
+ * in three versions. All are very similar and are software compatible,
+ * but they have same changes in register maps.
+ * The first register in oldest version is command register and it's
+ * read only. Driver should read 0 from it. On the other hand, in v1
+ * and v2 the first register contains device ID number which is not
+ * set to 0. Driver uses this fact to detect the proper version of
* controller.
*/
cdns->otg_v0_regs = regs;
if (!readl(&cdns->otg_v0_regs->cmd)) {
cdns->version = CDNS3_CONTROLLER_V0;
cdns->otg_v1_regs = NULL;
+ cdns->otg_cdnsp_regs = NULL;
cdns->otg_regs = regs;
+ cdns->otg_irq_regs = (struct cdns3_otg_irq_regs *)
+ &cdns->otg_v0_regs->ien;
writel(1, &cdns->otg_v0_regs->simulate);
dev_dbg(cdns->dev, "DRD version v0 (%08x)\n",
readl(&cdns->otg_v0_regs->version));
} else {
cdns->otg_v0_regs = NULL;
cdns->otg_v1_regs = regs;
+ cdns->otg_cdnsp_regs = regs;
+
cdns->otg_regs = (void *)&cdns->otg_v1_regs->cmd;
- cdns->version = CDNS3_CONTROLLER_V1;
- writel(1, &cdns->otg_v1_regs->simulate);
+
+ if (cdns->otg_cdnsp_regs->did == OTG_CDNSP_DID) {
+ cdns->otg_irq_regs = (struct cdns3_otg_irq_regs *)
+ &cdns->otg_cdnsp_regs->ien;
+ cdns->version = CDNSP_CONTROLLER_V2;
+ } else {
+ cdns->otg_irq_regs = (struct cdns3_otg_irq_regs *)
+ &cdns->otg_v1_regs->ien;
+ writel(1, &cdns->otg_v1_regs->simulate);
+ cdns->version = CDNS3_CONTROLLER_V1;
+ }
+
dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n",
readl(&cdns->otg_v1_regs->did),
readl(&cdns->otg_v1_regs->rid));
@@ -378,10 +412,17 @@
/* Update dr_mode according to STRAP configuration. */
cdns->dr_mode = USB_DR_MODE_OTG;
- if (state == OTGSTS_STRAP_HOST) {
+
+ if ((cdns->version == CDNSP_CONTROLLER_V2 &&
+ state == OTGSTS_CDNSP_STRAP_HOST) ||
+ (cdns->version != CDNSP_CONTROLLER_V2 &&
+ state == OTGSTS_STRAP_HOST)) {
dev_dbg(cdns->dev, "Controller strapped to HOST\n");
cdns->dr_mode = USB_DR_MODE_HOST;
- } else if (state == OTGSTS_STRAP_GADGET) {
+ } else if ((cdns->version == CDNSP_CONTROLLER_V2 &&
+ state == OTGSTS_CDNSP_STRAP_GADGET) ||
+ (cdns->version != CDNSP_CONTROLLER_V2 &&
+ state == OTGSTS_STRAP_GADGET)) {
dev_dbg(cdns->dev, "Controller strapped to PERIPHERAL\n");
cdns->dr_mode = USB_DR_MODE_PERIPHERAL;
}
diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h
index f1ccae2..a767b68 100644
--- a/drivers/usb/cdns3/drd.h
+++ b/drivers/usb/cdns3/drd.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Cadence USB3 DRD header file.
+ * Cadence USB3 and USBSSP DRD header file.
*
- * Copyright (C) 2018-2019 Cadence.
+ * Copyright (C) 2018-2020 Cadence.
*
* Author: Pawel Laszczak <pawell@cadence.com>
*/
@@ -13,7 +13,7 @@
#include <linux/phy/phy.h>
#include "core.h"
-/* DRD register interface for version v1. */
+/* DRD register interface for version v1 of cdns3 driver. */
struct cdns3_otg_regs {
__le32 did;
__le32 rid;
@@ -38,7 +38,7 @@
__le32 ctrl2;
};
-/* DRD register interface for version v0. */
+/* DRD register interface for version v0 of cdns3 driver. */
struct cdns3_otg_legacy_regs {
__le32 cmd;
__le32 sts;
@@ -57,14 +57,45 @@
__le32 ctrl1;
};
+/* DRD register interface for cdnsp driver */
+struct cdnsp_otg_regs {
+ __le32 did;
+ __le32 rid;
+ __le32 cfgs1;
+ __le32 cfgs2;
+ __le32 cmd;
+ __le32 sts;
+ __le32 state;
+ __le32 ien;
+ __le32 ivect;
+ __le32 tmr;
+ __le32 simulate;
+ __le32 adpbc_sts;
+ __le32 adp_ramp_time;
+ __le32 adpbc_ctrl1;
+ __le32 adpbc_ctrl2;
+ __le32 override;
+ __le32 vbusvalid_dbnc_cfg;
+ __le32 sessvalid_dbnc_cfg;
+ __le32 susp_timing_ctrl;
+};
+
+#define OTG_CDNSP_DID 0x0004034E
+
/*
- * Common registers interface for both version of DRD.
+ * Common registers interface for both CDNS3 and CDNSP version of DRD.
*/
struct cdns3_otg_common_regs {
__le32 cmd;
__le32 sts;
__le32 state;
- __le32 different1;
+};
+
+/*
+ * Interrupt related registers. This registers are mapped in different
+ * location for CDNSP controller.
+ */
+struct cdns3_otg_irq_regs {
__le32 ien;
__le32 ivect;
};
@@ -92,9 +123,9 @@
#define OTGCMD_DEV_BUS_DROP BIT(8)
/* Drop the bus for Host mode*/
#define OTGCMD_HOST_BUS_DROP BIT(9)
-/* Power Down USBSS-DEV. */
+/* Power Down USBSS-DEV - only for CDNS3.*/
#define OTGCMD_DEV_POWER_OFF BIT(11)
-/* Power Down CDNSXHCI. */
+/* Power Down CDNSXHCI - only for CDNS3. */
#define OTGCMD_HOST_POWER_OFF BIT(12)
/* OTGIEN - bitmasks */
@@ -123,20 +154,31 @@
#define OTGSTS_OTG_NRDY_MASK BIT(11)
#define OTGSTS_OTG_NRDY(p) ((p) & OTGSTS_OTG_NRDY_MASK)
/*
- * Value of the strap pins.
+ * Value of the strap pins for:
+ * CDNS3:
* 000 - no default configuration
* 010 - Controller initiall configured as Host
* 100 - Controller initially configured as Device
+ * CDNSP:
+ * 000 - No default configuration.
+ * 010 - Controller initiall configured as Host.
+ * 100 - Controller initially configured as Device.
*/
#define OTGSTS_STRAP(p) (((p) & GENMASK(14, 12)) >> 12)
#define OTGSTS_STRAP_NO_DEFAULT_CFG 0x00
#define OTGSTS_STRAP_HOST_OTG 0x01
#define OTGSTS_STRAP_HOST 0x02
#define OTGSTS_STRAP_GADGET 0x04
+#define OTGSTS_CDNSP_STRAP_HOST 0x01
+#define OTGSTS_CDNSP_STRAP_GADGET 0x02
+
/* Host mode is turned on. */
-#define OTGSTS_XHCI_READY BIT(26)
+#define OTGSTS_CDNS3_XHCI_READY BIT(26)
+#define OTGSTS_CDNSP_XHCI_READY BIT(27)
+
/* "Device mode is turned on .*/
-#define OTGSTS_DEV_READY BIT(27)
+#define OTGSTS_CDNS3_DEV_READY BIT(27)
+#define OTGSTS_CDNSP_DEV_READY BIT(26)
/* OTGSTATE- bitmasks */
#define OTGSTATE_DEV_STATE_MASK GENMASK(2, 0)
@@ -152,6 +194,8 @@
#define OVERRIDE_IDPULLUP BIT(0)
/* Only for CDNS3_CONTROLLER_V0 version */
#define OVERRIDE_IDPULLUP_V0 BIT(24)
+/* Vbusvalid/Sesvalid override select. */
+#define OVERRIDE_SESS_VLD_SEL BIT(10)
/* PHYRST_CFG - bitmasks */
#define PHYRST_CFG_PHYRST_A_ENABLE BIT(0)
@@ -170,6 +214,5 @@
void cdns3_drd_gadget_off(struct cdns3 *cdns);
int cdns3_drd_host_on(struct cdns3 *cdns);
void cdns3_drd_host_off(struct cdns3 *cdns);
-int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode);
#endif /* __LINUX_CDNS3_DRD */
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
index e111622..e3a8b6c 100644
--- a/drivers/usb/cdns3/gadget.c
+++ b/drivers/usb/cdns3/gadget.c
@@ -352,19 +352,6 @@
cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs);
}
-static void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req)
-{
- struct cdns3_endpoint *priv_ep = priv_req->priv_ep;
- int current_trb = priv_req->start_trb;
-
- while (current_trb != priv_req->end_trb) {
- cdns3_ep_inc_deq(priv_ep);
- current_trb = priv_ep->dequeue;
- }
-
- cdns3_ep_inc_deq(priv_ep);
-}
-
/**
* cdns3_allow_enable_l1 - enable/disable permits to transition to L1.
* @priv_dev: Extended gadget object
@@ -655,9 +642,9 @@
trace_cdns3_wa2(priv_ep, "removes eldest request");
kfree(priv_req->request.buf);
+ list_del_init(&priv_req->list);
cdns3_gadget_ep_free_request(&priv_ep->endpoint,
&priv_req->request);
- list_del_init(&priv_req->list);
--priv_ep->wa2_counter;
if (!chain)
@@ -1518,10 +1505,11 @@
trb = priv_ep->trb_pool + priv_ep->dequeue;
- /* Request was dequeued and TRB was changed to TRB_LINK. */
- if (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) {
+ /* The TRB was changed as link TRB, and the request was handled at ep_dequeue */
+ while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) {
trace_cdns3_complete_trb(priv_ep, trb);
- cdns3_move_deq_to_next_trb(priv_req);
+ cdns3_ep_inc_deq(priv_ep);
+ trb = priv_ep->trb_pool + priv_ep->dequeue;
}
if (!request->stream_id) {
@@ -1543,7 +1531,8 @@
TRB_LEN(le32_to_cpu(trb->length));
if (priv_req->num_of_trb > 1 &&
- le32_to_cpu(trb->control) & TRB_SMM)
+ le32_to_cpu(trb->control) & TRB_SMM &&
+ le32_to_cpu(trb->control) & TRB_CHAIN)
transfer_end = true;
cdns3_ep_inc_deq(priv_ep);
@@ -1703,6 +1692,7 @@
ep_cfg &= ~EP_CFG_ENABLE;
writel(ep_cfg, &priv_dev->regs->ep_cfg);
priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN;
+ priv_ep->flags |= EP_UPDATE_EP_TRBADDR;
}
cdns3_transfer_completed(priv_dev, priv_ep);
} else if (!(priv_ep->flags & EP_STALLED) &&
@@ -2293,11 +2283,16 @@
int ret = 0;
int val;
+ if (!ep) {
+ pr_debug("usbss: ep not configured?\n");
+ return -EINVAL;
+ }
+
priv_ep = ep_to_cdns3_ep(ep);
priv_dev = priv_ep->cdns3_dev;
comp_desc = priv_ep->endpoint.comp_desc;
- if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
+ if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
dev_dbg(priv_dev->dev, "usbss: invalid parameters\n");
return -EINVAL;
}
@@ -2609,7 +2604,7 @@
struct usb_request *request)
{
struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
- struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ struct cdns3_device *priv_dev;
struct usb_request *req, *req_temp;
struct cdns3_request *priv_req;
struct cdns3_trb *link_trb;
@@ -2620,6 +2615,8 @@
if (!ep || !request || !ep->desc)
return -EINVAL;
+ priv_dev = priv_ep->cdns3_dev;
+
spin_lock_irqsave(&priv_dev->lock, flags);
priv_req = to_cdns3_request(request);
@@ -2697,6 +2694,7 @@
struct usb_request *request;
struct cdns3_request *priv_req;
struct cdns3_trb *trb = NULL;
+ struct cdns3_trb trb_tmp;
int ret;
int val;
@@ -2706,8 +2704,10 @@
if (request) {
priv_req = to_cdns3_request(request);
trb = priv_req->trb;
- if (trb)
+ if (trb) {
+ trb_tmp = *trb;
trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
+ }
}
writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
@@ -2722,7 +2722,7 @@
if (request) {
if (trb)
- trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
+ *trb = trb_tmp;
cdns3_rearm_transfer(priv_ep, 1);
}
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index 6ed4b00..7a2a955 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -256,8 +256,10 @@
ci->enabled_otg_timer_bits &= ~(1 << t);
if (ci->next_otg_timer == t) {
if (ci->enabled_otg_timer_bits == 0) {
+ spin_unlock_irqrestore(&ci->lock, flags);
/* No enabled timers after delete it */
hrtimer_cancel(&ci->otg_fsm_hrtimer);
+ spin_lock_irqsave(&ci->lock, flags);
ci->next_otg_timer = NUM_OTG_FSM_TIMERS;
} else {
/* Find the next timer */
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 5f35cdd..67d8da0 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1034,6 +1034,9 @@
struct ci_hdrc *ci = req->context;
unsigned long flags;
+ if (req->status < 0)
+ return;
+
if (ci->setaddr) {
hw_usb_set_address(ci, ci->address);
ci->setaddr = false;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 7950d5b..070b838 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1830,6 +1830,9 @@
{ USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
.driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
},
+ { USB_DEVICE(0x0c26, 0x0020), /* Icom ICF3400 Serie */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
{ USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index d1e4a73..80332b6 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -755,6 +755,7 @@
poison_urbs(desc);
spin_lock_irq(&desc->iuspin);
desc->resp_count = 0;
+ clear_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
desc->manage_power(desc->intf, 0);
unpoison_urbs(desc);
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 1433260..347fb3d 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -25,6 +25,12 @@
[USB_ENDPOINT_XFER_INT] = "intr",
};
+/**
+ * usb_ep_type_string() - Returns human readable-name of the endpoint type.
+ * @ep_type: The endpoint type to return human-readable name for. If it's not
+ * any of the types: USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT},
+ * usually got by usb_endpoint_type(), the string 'unknown' will be returned.
+ */
const char *usb_ep_type_string(int ep_type)
{
if (ep_type < 0 || ep_type >= ARRAY_SIZE(ep_type_names))
@@ -69,6 +75,19 @@
[USB_SPEED_SUPER_PLUS] = "super-speed-plus",
};
+static const char *const ssp_rate[] = {
+ [USB_SSP_GEN_UNKNOWN] = "UNKNOWN",
+ [USB_SSP_GEN_2x1] = "super-speed-plus-gen2x1",
+ [USB_SSP_GEN_1x2] = "super-speed-plus-gen1x2",
+ [USB_SSP_GEN_2x2] = "super-speed-plus-gen2x2",
+};
+
+/**
+ * usb_speed_string() - Returns human readable-name of the speed.
+ * @speed: The speed to return human-readable name for. If it's not
+ * any of the speeds defined in usb_device_speed enum, string for
+ * USB_SPEED_UNKNOWN will be returned.
+ */
const char *usb_speed_string(enum usb_device_speed speed)
{
if (speed < 0 || speed >= ARRAY_SIZE(speed_names))
@@ -77,6 +96,14 @@
}
EXPORT_SYMBOL_GPL(usb_speed_string);
+/**
+ * usb_get_maximum_speed - Get maximum requested speed for a given USB
+ * controller.
+ * @dev: Pointer to the given USB controller device
+ *
+ * The function gets the maximum speed string from property "maximum-speed",
+ * and returns the corresponding enum usb_device_speed.
+ */
enum usb_device_speed usb_get_maximum_speed(struct device *dev)
{
const char *maximum_speed;
@@ -86,12 +113,44 @@
if (ret < 0)
return USB_SPEED_UNKNOWN;
- ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed);
+ ret = match_string(ssp_rate, ARRAY_SIZE(ssp_rate), maximum_speed);
+ if (ret > 0)
+ return USB_SPEED_SUPER_PLUS;
+ ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed);
return (ret < 0) ? USB_SPEED_UNKNOWN : ret;
}
EXPORT_SYMBOL_GPL(usb_get_maximum_speed);
+/**
+ * usb_get_maximum_ssp_rate - Get the signaling rate generation and lane count
+ * of a SuperSpeed Plus capable device.
+ * @dev: Pointer to the given USB controller device
+ *
+ * If the string from "maximum-speed" property is super-speed-plus-genXxY where
+ * 'X' is the generation number and 'Y' is the number of lanes, then this
+ * function returns the corresponding enum usb_ssp_rate.
+ */
+enum usb_ssp_rate usb_get_maximum_ssp_rate(struct device *dev)
+{
+ const char *maximum_speed;
+ int ret;
+
+ ret = device_property_read_string(dev, "maximum-speed", &maximum_speed);
+ if (ret < 0)
+ return USB_SSP_GEN_UNKNOWN;
+
+ ret = match_string(ssp_rate, ARRAY_SIZE(ssp_rate), maximum_speed);
+ return (ret < 0) ? USB_SSP_GEN_UNKNOWN : ret;
+}
+EXPORT_SYMBOL_GPL(usb_get_maximum_ssp_rate);
+
+/**
+ * usb_state_string - Returns human readable name for the state.
+ * @state: The state to return a human-readable name for. If it's not
+ * any of the states devices in usb_device_state_string enum,
+ * the string UNKNOWN will be returned.
+ */
const char *usb_state_string(enum usb_device_state state)
{
static const char *const names[] = {
@@ -141,6 +200,47 @@
}
EXPORT_SYMBOL_GPL(usb_get_dr_mode);
+/**
+ * usb_decode_interval - Decode bInterval into the time expressed in 1us unit
+ * @epd: The descriptor of the endpoint
+ * @speed: The speed that the endpoint works as
+ *
+ * Function returns the interval expressed in 1us unit for servicing
+ * endpoint for data transfers.
+ */
+unsigned int usb_decode_interval(const struct usb_endpoint_descriptor *epd,
+ enum usb_device_speed speed)
+{
+ unsigned int interval = 0;
+
+ switch (usb_endpoint_type(epd)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ /* uframes per NAK */
+ if (speed == USB_SPEED_HIGH)
+ interval = epd->bInterval;
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ interval = 1 << (epd->bInterval - 1);
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ /* uframes per NAK */
+ if (speed == USB_SPEED_HIGH && usb_endpoint_dir_out(epd))
+ interval = epd->bInterval;
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ if (speed >= USB_SPEED_HIGH)
+ interval = 1 << (epd->bInterval - 1);
+ else
+ interval = epd->bInterval;
+ break;
+ }
+
+ interval *= (speed >= USB_SPEED_HIGH) ? 125 : 1000;
+
+ return interval;
+}
+EXPORT_SYMBOL_GPL(usb_decode_interval);
+
#ifdef CONFIG_OF
/**
* of_usb_get_dr_mode_by_phy - Get dual role mode for the controller device
diff --git a/drivers/usb/common/debug.c b/drivers/usb/common/debug.c
index ba849c7..f0c0e8d 100644
--- a/drivers/usb/common/debug.c
+++ b/drivers/usb/common/debug.c
@@ -207,12 +207,28 @@
snprintf(str, size, "Set Isochronous Delay(Delay = %d ns)", wValue);
}
-/*
- * usb_decode_ctrl - returns a string representation of ctrl request
- */
-const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType,
- __u8 bRequest, __u16 wValue, __u16 wIndex,
- __u16 wLength)
+static void usb_decode_ctrl_generic(char *str, size_t size, __u8 bRequestType,
+ __u8 bRequest, __u16 wValue, __u16 wIndex,
+ __u16 wLength)
+{
+ u8 recip = bRequestType & USB_RECIP_MASK;
+ u8 type = bRequestType & USB_TYPE_MASK;
+
+ snprintf(str, size,
+ "Type=%s Recipient=%s Dir=%s bRequest=%u wValue=%u wIndex=%u wLength=%u",
+ (type == USB_TYPE_STANDARD) ? "Standard" :
+ (type == USB_TYPE_VENDOR) ? "Vendor" :
+ (type == USB_TYPE_CLASS) ? "Class" : "Unknown",
+ (recip == USB_RECIP_DEVICE) ? "Device" :
+ (recip == USB_RECIP_INTERFACE) ? "Interface" :
+ (recip == USB_RECIP_ENDPOINT) ? "Endpoint" : "Unknown",
+ (bRequestType & USB_DIR_IN) ? "IN" : "OUT",
+ bRequest, wValue, wIndex, wLength);
+}
+
+static void usb_decode_ctrl_standard(char *str, size_t size, __u8 bRequestType,
+ __u8 bRequest, __u16 wValue, __u16 wIndex,
+ __u16 wLength)
{
switch (bRequest) {
case USB_REQ_GET_STATUS:
@@ -253,14 +269,48 @@
usb_decode_set_isoch_delay(wValue, str, size);
break;
default:
- snprintf(str, size, "%02x %02x %02x %02x %02x %02x %02x %02x",
- bRequestType, bRequest,
- (u8)(cpu_to_le16(wValue) & 0xff),
- (u8)(cpu_to_le16(wValue) >> 8),
- (u8)(cpu_to_le16(wIndex) & 0xff),
- (u8)(cpu_to_le16(wIndex) >> 8),
- (u8)(cpu_to_le16(wLength) & 0xff),
- (u8)(cpu_to_le16(wLength) >> 8));
+ usb_decode_ctrl_generic(str, size, bRequestType, bRequest,
+ wValue, wIndex, wLength);
+ break;
+ }
+}
+
+/**
+ * usb_decode_ctrl - Returns human readable representation of control request.
+ * @str: buffer to return a human-readable representation of control request.
+ * This buffer should have about 200 bytes.
+ * @size: size of str buffer.
+ * @bRequestType: matches the USB bmRequestType field
+ * @bRequest: matches the USB bRequest field
+ * @wValue: matches the USB wValue field (CPU byte order)
+ * @wIndex: matches the USB wIndex field (CPU byte order)
+ * @wLength: matches the USB wLength field (CPU byte order)
+ *
+ * Function returns decoded, formatted and human-readable description of
+ * control request packet.
+ *
+ * The usage scenario for this is for tracepoints, so function as a return
+ * use the same value as in parameters. This approach allows to use this
+ * function in TP_printk
+ *
+ * Important: wValue, wIndex, wLength parameters before invoking this function
+ * should be processed by le16_to_cpu macro.
+ */
+const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType,
+ __u8 bRequest, __u16 wValue, __u16 wIndex,
+ __u16 wLength)
+{
+ switch (bRequestType & USB_TYPE_MASK) {
+ case USB_TYPE_STANDARD:
+ usb_decode_ctrl_standard(str, size, bRequestType, bRequest,
+ wValue, wIndex, wLength);
+ break;
+ case USB_TYPE_VENDOR:
+ case USB_TYPE_CLASS:
+ default:
+ usb_decode_ctrl_generic(str, size, bRequestType, bRequest,
+ wValue, wIndex, wLength);
+ break;
}
return str;
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 1ef2de6..d8b0041 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -157,38 +157,25 @@
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_CONTROL:
type = "Ctrl";
- if (speed == USB_SPEED_HIGH) /* uframes per NAK */
- interval = desc->bInterval;
- else
- interval = 0;
dir = 'B'; /* ctrl is bidirectional */
break;
case USB_ENDPOINT_XFER_ISOC:
type = "Isoc";
- interval = 1 << (desc->bInterval - 1);
break;
case USB_ENDPOINT_XFER_BULK:
type = "Bulk";
- if (speed == USB_SPEED_HIGH && dir == 'O') /* uframes per NAK */
- interval = desc->bInterval;
- else
- interval = 0;
break;
case USB_ENDPOINT_XFER_INT:
type = "Int.";
- if (speed == USB_SPEED_HIGH || speed >= USB_SPEED_SUPER)
- interval = 1 << (desc->bInterval - 1);
- else
- interval = desc->bInterval;
break;
default: /* "can't happen" */
return start;
}
- interval *= (speed == USB_SPEED_HIGH ||
- speed >= USB_SPEED_SUPER) ? 125 : 1000;
- if (interval % 1000)
+
+ interval = usb_decode_interval(desc, speed);
+ if (interval % 1000) {
unit = 'u';
- else {
+ } else {
unit = 'm';
interval /= 1000;
}
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 1c2c040..fc3341f 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -84,40 +84,13 @@
char *buf)
{
struct ep_device *ep = to_ep_device(dev);
+ unsigned int interval;
char unit;
- unsigned interval = 0;
- unsigned in;
- in = (ep->desc->bEndpointAddress & USB_DIR_IN);
-
- switch (usb_endpoint_type(ep->desc)) {
- case USB_ENDPOINT_XFER_CONTROL:
- if (ep->udev->speed == USB_SPEED_HIGH)
- /* uframes per NAK */
- interval = ep->desc->bInterval;
- break;
-
- case USB_ENDPOINT_XFER_ISOC:
- interval = 1 << (ep->desc->bInterval - 1);
- break;
-
- case USB_ENDPOINT_XFER_BULK:
- if (ep->udev->speed == USB_SPEED_HIGH && !in)
- /* uframes per NAK */
- interval = ep->desc->bInterval;
- break;
-
- case USB_ENDPOINT_XFER_INT:
- if (ep->udev->speed == USB_SPEED_HIGH)
- interval = 1 << (ep->desc->bInterval - 1);
- else
- interval = ep->desc->bInterval;
- break;
- }
- interval *= (ep->udev->speed == USB_SPEED_HIGH) ? 125 : 1000;
- if (interval % 1000)
+ interval = usb_decode_interval(ep->desc, ep->udev->speed);
+ if (interval % 1000) {
unit = 'u';
- else {
+ } else {
unit = 'm';
interval /= 1000;
}
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index ec0d6c5..eee78cb 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -614,10 +614,10 @@
.suspend_noirq = hcd_pci_suspend_noirq,
.resume_noirq = hcd_pci_resume_noirq,
.resume = hcd_pci_resume,
- .freeze = check_root_hub_suspended,
+ .freeze = hcd_pci_suspend,
.freeze_noirq = check_root_hub_suspended,
.thaw_noirq = NULL,
- .thaw = NULL,
+ .thaw = hcd_pci_resume,
.poweroff = hcd_pci_suspend,
.poweroff_noirq = hcd_pci_suspend_noirq,
.restore_noirq = hcd_pci_resume_noirq,
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index ddd1d3e..ac347f9 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1692,7 +1692,6 @@
spin_lock_irq(&bh->lock);
bh->running = true;
- restart:
list_replace_init(&bh->head, &local_list);
spin_unlock_irq(&bh->lock);
@@ -1706,10 +1705,17 @@
bh->completing_ep = NULL;
}
- /* check if there are new URBs to giveback */
+ /*
+ * giveback new URBs next time to prevent this function
+ * from not exiting for a long time.
+ */
spin_lock_irq(&bh->lock);
- if (!list_empty(&bh->head))
- goto restart;
+ if (!list_empty(&bh->head)) {
+ if (bh->high_prio)
+ tasklet_hi_schedule(&bh->bh);
+ else
+ tasklet_schedule(&bh->bh);
+ }
bh->running = false;
spin_unlock_irq(&bh->lock);
}
@@ -1734,7 +1740,7 @@
void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct giveback_urb_bh *bh;
- bool running, high_prio_bh;
+ bool running;
/* pass status to tasklet via unlinked */
if (likely(!urb->unlinked))
@@ -1745,13 +1751,10 @@
return;
}
- if (usb_pipeisoc(urb->pipe) || usb_pipeint(urb->pipe)) {
+ if (usb_pipeisoc(urb->pipe) || usb_pipeint(urb->pipe))
bh = &hcd->high_prio_bh;
- high_prio_bh = true;
- } else {
+ else
bh = &hcd->low_prio_bh;
- high_prio_bh = false;
- }
spin_lock(&bh->lock);
list_add_tail(&urb->urb_list, &bh->head);
@@ -1760,7 +1763,7 @@
if (running)
;
- else if (high_prio_bh)
+ else if (bh->high_prio)
tasklet_hi_schedule(&bh->bh);
else
tasklet_schedule(&bh->bh);
@@ -2661,6 +2664,7 @@
{
int retval;
struct usb_device *rhdev;
+ struct usb_hcd *shared_hcd;
if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
@@ -2799,6 +2803,7 @@
/* initialize tasklets */
init_giveback_urb_bh(&hcd->high_prio_bh);
+ hcd->high_prio_bh.high_prio = true;
init_giveback_urb_bh(&hcd->low_prio_bh);
/* enable irqs just before we start the controller,
@@ -2817,13 +2822,26 @@
goto err_hcd_driver_start;
}
- /* starting here, usbcore will pay attention to this root hub */
- retval = register_root_hub(hcd);
- if (retval != 0)
- goto err_register_root_hub;
+ /* starting here, usbcore will pay attention to the shared HCD roothub */
+ shared_hcd = hcd->shared_hcd;
+ if (!usb_hcd_is_primary_hcd(hcd) && shared_hcd && HCD_DEFER_RH_REGISTER(shared_hcd)) {
+ retval = register_root_hub(shared_hcd);
+ if (retval != 0)
+ goto err_register_root_hub;
- if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
- usb_hcd_poll_rh_status(hcd);
+ if (shared_hcd->uses_new_polling && HCD_POLL_RH(shared_hcd))
+ usb_hcd_poll_rh_status(shared_hcd);
+ }
+
+ /* starting here, usbcore will pay attention to this root hub */
+ if (!HCD_DEFER_RH_REGISTER(hcd)) {
+ retval = register_root_hub(hcd);
+ if (retval != 0)
+ goto err_register_root_hub;
+
+ if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
+ usb_hcd_poll_rh_status(hcd);
+ }
return retval;
@@ -2866,6 +2884,7 @@
void usb_remove_hcd(struct usb_hcd *hcd)
{
struct usb_device *rhdev = hcd->self.root_hub;
+ bool rh_registered;
dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
@@ -2876,6 +2895,7 @@
dev_dbg(hcd->self.controller, "roothub graceful disconnect\n");
spin_lock_irq (&hcd_root_hub_lock);
+ rh_registered = hcd->rh_registered;
hcd->rh_registered = 0;
spin_unlock_irq (&hcd_root_hub_lock);
@@ -2885,7 +2905,8 @@
cancel_work_sync(&hcd->died_work);
mutex_lock(&usb_bus_idr_lock);
- usb_disconnect(&rhdev); /* Sets rhdev to NULL */
+ if (rh_registered)
+ usb_disconnect(&rhdev); /* Sets rhdev to NULL */
mutex_unlock(&usb_bus_idr_lock);
/*
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 18ee391..f2a3c0b 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -5967,6 +5967,11 @@
* the reset is over (using their post_reset method).
*
* Return: The same as for usb_reset_and_verify_device().
+ * However, if a reset is already in progress (for instance, if a
+ * driver doesn't have pre_reset() or post_reset() callbacks, and while
+ * being unbound or re-bound during the ongoing reset its disconnect()
+ * or probe() routine tries to perform a second, nested reset), the
+ * routine returns -EINPROGRESS.
*
* Note:
* The caller must own the device lock. For example, it's safe to use
@@ -6000,6 +6005,10 @@
return -EISDIR;
}
+ if (udev->reset_in_progress)
+ return -EINPROGRESS;
+ udev->reset_in_progress = 1;
+
port_dev = hub->ports[udev->portnum - 1];
/*
@@ -6064,6 +6073,7 @@
usb_autosuspend_device(udev);
memalloc_noio_restore(noio_flag);
+ udev->reset_in_progress = 0;
return ret;
}
EXPORT_SYMBOL_GPL(usb_reset_device);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index baf80e2..6d24d13 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -362,6 +362,9 @@
{ USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM },
{ USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM },
+ /* Realforce 87U Keyboard */
+ { USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM },
+
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -388,6 +391,15 @@
/* Kingston DataTraveler 3.0 */
{ USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM },
+ /* NVIDIA Jetson devices in Force Recovery mode */
+ { USB_DEVICE(0x0955, 0x7018), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7019), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7418), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7721), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7c18), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7e19), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7f21), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
@@ -404,6 +416,9 @@
{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+ /* Realtek Semiconductor Corp. Mass Storage Device (Multicard Reader)*/
+ { USB_DEVICE(0x0bda, 0x0151), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* Realtek hub in Dell WD19 (Type-C) */
{ USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM },
{ USB_DEVICE(0x0bda, 0x5487), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -435,6 +450,10 @@
{ USB_DEVICE(0x1532, 0x0116), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ /* Lenovo ThinkPad OneLink+ Dock twin hub controllers (VIA Labs VL812) */
+ { USB_DEVICE(0x17ef, 0x1018), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x17ef, 0x1019), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Lenovo USB-C to Ethernet Adapter RTL8153-04 */
{ USB_DEVICE(0x17ef, 0x720c), .driver_info = USB_QUIRK_NO_LPM },
@@ -508,6 +527,12 @@
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+ /* DELL USB GEN2 */
+ { USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME },
+
+ /* VCOM device */
+ { USB_DEVICE(0x4296, 0x7570), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index ec54971..da0df69 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3593,7 +3593,8 @@
void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg)
{
/* remove the soft-disconnect and let's go */
- dwc2_clear_bit(hsotg, DCTL, DCTL_SFTDISCON);
+ if (!hsotg->role_sw || (dwc2_readl(hsotg, GOTGCTL) & GOTGCTL_BSESVLD))
+ dwc2_clear_bit(hsotg, DCTL, DCTL_SFTDISCON);
}
/**
@@ -4518,7 +4519,6 @@
WARN_ON(hsotg->driver);
- driver->driver.bus = NULL;
hsotg->driver = driver;
hsotg->gadget.dev.of_node = hsotg->dev->of_node;
hsotg->gadget.speed = USB_SPEED_UNKNOWN;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 30919f7..9279d3d 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -5076,7 +5076,7 @@
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
retval = -EINVAL;
- goto error1;
+ goto error2;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 49d333f..8851db6 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -154,9 +154,9 @@
} else if (hsotg->plat && hsotg->plat->phy_init) {
ret = hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
} else {
- ret = phy_power_on(hsotg->phy);
+ ret = phy_init(hsotg->phy);
if (ret == 0)
- ret = phy_init(hsotg->phy);
+ ret = phy_power_on(hsotg->phy);
}
return ret;
@@ -188,9 +188,9 @@
} else if (hsotg->plat && hsotg->plat->phy_exit) {
ret = hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
} else {
- ret = phy_exit(hsotg->phy);
+ ret = phy_power_off(hsotg->phy);
if (ret == 0)
- ret = phy_power_off(hsotg->phy);
+ ret = phy_exit(hsotg->phy);
}
if (ret)
return ret;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 1580d51..4a0eec1 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -114,8 +114,6 @@
dwc->current_dr_role = mode;
}
-static int dwc3_core_soft_reset(struct dwc3 *dwc);
-
static void __dwc3_set_mode(struct work_struct *work)
{
struct dwc3 *dwc = work_to_dwc(work);
@@ -158,8 +156,13 @@
break;
}
- /* For DRD host or device mode only */
- if (dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG) {
+ /*
+ * When current_dr_role is not set, there's no role switching.
+ * Only perform GCTL.CoreSoftReset when there's DRD role switching.
+ */
+ if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) ||
+ DWC3_VER_IS_PRIOR(DWC31, 190A)) &&
+ dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) {
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg |= DWC3_GCTL_CORESOFTRESET;
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
@@ -260,7 +263,7 @@
* dwc3_core_soft_reset - Issues core soft reset and PHY reset
* @dwc: pointer to our context structure
*/
-static int dwc3_core_soft_reset(struct dwc3 *dwc)
+int dwc3_core_soft_reset(struct dwc3 *dwc)
{
u32 reg;
int retries = 1000;
@@ -275,7 +278,8 @@
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg |= DWC3_DCTL_CSFTRST;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ reg &= ~DWC3_DCTL_RUN_STOP;
+ dwc3_gadget_dctl_write_safe(dwc, reg);
/*
* For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit
@@ -722,15 +726,16 @@
{
dwc3_event_buffers_cleanup(dwc);
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
+ phy_power_off(dwc->usb2_generic_phy);
+ phy_power_off(dwc->usb3_generic_phy);
+
usb_phy_shutdown(dwc->usb2_phy);
usb_phy_shutdown(dwc->usb3_phy);
phy_exit(dwc->usb2_generic_phy);
phy_exit(dwc->usb3_generic_phy);
- usb_phy_set_suspend(dwc->usb2_phy, 1);
- usb_phy_set_suspend(dwc->usb3_phy, 1);
- phy_power_off(dwc->usb2_generic_phy);
- phy_power_off(dwc->usb3_generic_phy);
clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks);
reset_control_assert(dwc->reset);
}
@@ -1277,10 +1282,10 @@
u8 lpm_nyet_threshold;
u8 tx_de_emphasis;
u8 hird_threshold;
- u8 rx_thr_num_pkt_prd;
- u8 rx_max_burst_prd;
- u8 tx_thr_num_pkt_prd;
- u8 tx_max_burst_prd;
+ u8 rx_thr_num_pkt_prd = 0;
+ u8 rx_max_burst_prd = 0;
+ u8 tx_thr_num_pkt_prd = 0;
+ u8 tx_max_burst_prd = 0;
/* default to highest possible threshold */
lpm_nyet_threshold = 0xf;
@@ -1600,16 +1605,16 @@
dwc3_debugfs_exit(dwc);
dwc3_event_buffers_cleanup(dwc);
- usb_phy_shutdown(dwc->usb2_phy);
- usb_phy_shutdown(dwc->usb3_phy);
- phy_exit(dwc->usb2_generic_phy);
- phy_exit(dwc->usb3_generic_phy);
-
usb_phy_set_suspend(dwc->usb2_phy, 1);
usb_phy_set_suspend(dwc->usb3_phy, 1);
phy_power_off(dwc->usb2_generic_phy);
phy_power_off(dwc->usb3_generic_phy);
+ usb_phy_shutdown(dwc->usb2_phy);
+ usb_phy_shutdown(dwc->usb3_phy);
+ phy_exit(dwc->usb2_generic_phy);
+ phy_exit(dwc->usb3_generic_phy);
+
dwc3_ulpi_exit(dwc);
err4:
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 79e1b82..cbebe54 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1010,6 +1010,7 @@
* @tx_max_burst_prd: max periodic ESS transmit burst size
* @hsphy_interface: "utmi" or "ulpi"
* @connected: true when we're connected to a host, false otherwise
+ * @softconnect: true when gadget connect is called, false when disconnect runs
* @delayed_status: true when gadget driver asks for delayed status
* @ep0_bounced: true when we used bounce buffer
* @ep0_expect_in: true when we expect a DATA IN transfer
@@ -1218,6 +1219,7 @@
const char *hsphy_interface;
unsigned connected:1;
+ unsigned softconnect:1;
unsigned delayed_status:1;
unsigned ep0_bounced:1;
unsigned ep0_expect_in:1;
@@ -1456,6 +1458,8 @@
int dwc3_event_buffers_setup(struct dwc3 *dwc);
void dwc3_event_buffers_cleanup(struct dwc3 *dwc);
+int dwc3_core_soft_reset(struct dwc3 *dwc);
+
#if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
int dwc3_host_init(struct dwc3 *dwc);
void dwc3_host_exit(struct dwc3 *dwc);
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index 3e1c1aa..0a96f44 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -568,16 +568,15 @@
{
int ret, irq;
+ if (ROLE_SWITCH &&
+ device_property_read_bool(dwc->dev, "usb-role-switch"))
+ return dwc3_setup_role_switch(dwc);
+
dwc->edev = dwc3_get_extcon(dwc);
if (IS_ERR(dwc->edev))
return PTR_ERR(dwc->edev);
- if (ROLE_SWITCH &&
- device_property_read_bool(dwc->dev, "usb-role-switch")) {
- ret = dwc3_setup_role_switch(dwc);
- if (ret < 0)
- return ret;
- } else if (dwc->edev) {
+ if (dwc->edev) {
dwc->edev_nb.notifier_call = dwc3_drd_notifier;
ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST,
&dwc->edev_nb);
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 90bb022..ee7b718 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -37,15 +37,6 @@
struct regulator *vdd10;
};
-static int dwc3_exynos_remove_child(struct device *dev, void *unused)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- platform_device_unregister(pdev);
-
- return 0;
-}
-
static int dwc3_exynos_probe(struct platform_device *pdev)
{
struct dwc3_exynos *exynos;
@@ -142,7 +133,7 @@
struct dwc3_exynos *exynos = platform_get_drvdata(pdev);
int i;
- device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child);
+ of_platform_depopulate(&pdev->dev);
for (i = exynos->num_clks - 1; i >= 0; i--)
clk_disable_unprepare(exynos->clks[i]);
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index e196673..efaf0db 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -242,7 +242,7 @@
break;
case OMAP_DWC3_ID_FLOAT:
- if (omap->vbus_reg)
+ if (omap->vbus_reg && regulator_is_enabled(omap->vbus_reg))
regulator_disable(omap->vbus_reg);
val = dwc3_omap_read_utmi_ctrl(omap);
val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG;
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 98df8d5..a5a8c57 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -213,7 +213,7 @@
int ret;
ret = pm_runtime_get_sync(&dwc3->dev);
- if (ret) {
+ if (ret < 0) {
pm_runtime_put_sync_autosuspend(&dwc3->dev);
return;
}
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index 504f8af..ca3a35f 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -296,6 +296,14 @@
icc_put(qcom->icc_path_apps);
}
+/* Only usable in contexts where the role can not change. */
+static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom)
+{
+ struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
+
+ return dwc->xhci;
+}
+
static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom)
{
if (qcom->hs_phy_irq) {
@@ -411,7 +419,11 @@
if (qcom->pm_suspended)
return IRQ_HANDLED;
- if (dwc->xhci)
+ /*
+ * This is safe as role switching is done from a freezable workqueue
+ * and the wakeup interrupts are disabled as part of resume.
+ */
+ if (dwc3_qcom_is_host(qcom))
pm_runtime_resume(&dwc->xhci->dev);
return IRQ_HANDLED;
@@ -443,9 +455,9 @@
int ret;
if (np)
- ret = platform_get_irq_byname(pdev_irq, name);
+ ret = platform_get_irq_byname_optional(pdev_irq, name);
else
- ret = platform_get_irq(pdev_irq, num);
+ ret = platform_get_irq_optional(pdev_irq, num);
return ret;
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index b68fe48..a9a43d6 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -752,7 +752,7 @@
return 0;
}
-static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
+static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep, int status)
{
struct dwc3_request *req;
@@ -762,19 +762,19 @@
while (!list_empty(&dep->started_list)) {
req = next_request(&dep->started_list);
- dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+ dwc3_gadget_giveback(dep, req, status);
}
while (!list_empty(&dep->pending_list)) {
req = next_request(&dep->pending_list);
- dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+ dwc3_gadget_giveback(dep, req, status);
}
while (!list_empty(&dep->cancelled_list)) {
req = next_request(&dep->cancelled_list);
- dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+ dwc3_gadget_giveback(dep, req, status);
}
}
@@ -803,18 +803,18 @@
reg &= ~DWC3_DALEPENA_EP(dep->number);
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
+ dwc3_remove_requests(dwc, dep, -ESHUTDOWN);
+
+ dep->stream_capable = false;
+ dep->type = 0;
+ dep->flags = 0;
+
/* Clear out the ep descriptors for non-ep0 */
if (dep->number > 1) {
dep->endpoint.comp_desc = NULL;
dep->endpoint.desc = NULL;
}
- dwc3_remove_requests(dwc, dep);
-
- dep->stream_capable = false;
- dep->type = 0;
- dep->flags = 0;
-
return 0;
}
@@ -970,17 +970,49 @@
return trbs_left;
}
-static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
- dma_addr_t dma, unsigned int length, unsigned int chain,
- unsigned int node, unsigned int stream_id,
- unsigned int short_not_ok, unsigned int no_interrupt,
- unsigned int is_last, bool must_interrupt)
+/**
+ * dwc3_prepare_one_trb - setup one TRB from one request
+ * @dep: endpoint for which this request is prepared
+ * @req: dwc3_request pointer
+ * @trb_length: buffer size of the TRB
+ * @chain: should this TRB be chained to the next?
+ * @node: only for isochronous endpoints. First TRB needs different type.
+ * @use_bounce_buffer: set to use bounce buffer
+ * @must_interrupt: set to interrupt on TRB completion
+ */
+static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
+ struct dwc3_request *req, unsigned int trb_length,
+ unsigned int chain, unsigned int node, bool use_bounce_buffer,
+ bool must_interrupt)
{
+ struct dwc3_trb *trb;
+ dma_addr_t dma;
+ unsigned int stream_id = req->request.stream_id;
+ unsigned int short_not_ok = req->request.short_not_ok;
+ unsigned int no_interrupt = req->request.no_interrupt;
+ unsigned int is_last = req->request.is_last;
struct dwc3 *dwc = dep->dwc;
struct usb_gadget *gadget = dwc->gadget;
enum usb_device_speed speed = gadget->speed;
- trb->size = DWC3_TRB_SIZE_LENGTH(length);
+ if (use_bounce_buffer)
+ dma = dep->dwc->bounce_addr;
+ else if (req->request.num_sgs > 0)
+ dma = sg_dma_address(req->start_sg);
+ else
+ dma = req->request.dma;
+
+ trb = &dep->trb_pool[dep->trb_enqueue];
+
+ if (!req->trb) {
+ dwc3_gadget_move_started_request(req);
+ req->trb = trb;
+ req->trb_dma = dwc3_trb_dma_offset(dep, trb);
+ }
+
+ req->num_trbs++;
+
+ trb->size = DWC3_TRB_SIZE_LENGTH(trb_length);
trb->bpl = lower_32_bits(dma);
trb->bph = upper_32_bits(dma);
@@ -1020,10 +1052,10 @@
unsigned int mult = 2;
unsigned int maxp = usb_endpoint_maxp(ep->desc);
- if (length <= (2 * maxp))
+ if (req->request.length <= (2 * maxp))
mult--;
- if (length <= maxp)
+ if (req->request.length <= maxp)
mult--;
trb->size |= DWC3_TRB_SIZE_PCM1(mult);
@@ -1032,8 +1064,8 @@
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
}
- /* always enable Interrupt on Missed ISOC */
- trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
+ if (!no_interrupt && !chain)
+ trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
break;
case USB_ENDPOINT_XFER_BULK:
@@ -1092,50 +1124,6 @@
trace_dwc3_prepare_trb(dep, trb);
}
-/**
- * dwc3_prepare_one_trb - setup one TRB from one request
- * @dep: endpoint for which this request is prepared
- * @req: dwc3_request pointer
- * @trb_length: buffer size of the TRB
- * @chain: should this TRB be chained to the next?
- * @node: only for isochronous endpoints. First TRB needs different type.
- * @use_bounce_buffer: set to use bounce buffer
- * @must_interrupt: set to interrupt on TRB completion
- */
-static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
- struct dwc3_request *req, unsigned int trb_length,
- unsigned int chain, unsigned int node, bool use_bounce_buffer,
- bool must_interrupt)
-{
- struct dwc3_trb *trb;
- dma_addr_t dma;
- unsigned int stream_id = req->request.stream_id;
- unsigned int short_not_ok = req->request.short_not_ok;
- unsigned int no_interrupt = req->request.no_interrupt;
- unsigned int is_last = req->request.is_last;
-
- if (use_bounce_buffer)
- dma = dep->dwc->bounce_addr;
- else if (req->request.num_sgs > 0)
- dma = sg_dma_address(req->start_sg);
- else
- dma = req->request.dma;
-
- trb = &dep->trb_pool[dep->trb_enqueue];
-
- if (!req->trb) {
- dwc3_gadget_move_started_request(req);
- req->trb = trb;
- req->trb_dma = dwc3_trb_dma_offset(dep, trb);
- }
-
- req->num_trbs++;
-
- __dwc3_prepare_one_trb(dep, trb, dma, trb_length, chain, node,
- stream_id, short_not_ok, no_interrupt, is_last,
- must_interrupt);
-}
-
static bool dwc3_needs_extra_trb(struct dwc3_ep *dep, struct dwc3_request *req)
{
unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
@@ -2079,7 +2067,7 @@
if (!dep)
continue;
- dwc3_remove_requests(dwc, dep);
+ dwc3_remove_requests(dwc, dep, -ESHUTDOWN);
}
}
@@ -2132,14 +2120,42 @@
static void __dwc3_gadget_stop(struct dwc3 *dwc);
static int __dwc3_gadget_start(struct dwc3 *dwc);
+static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->connected = false;
+
+ /*
+ * In the Synopsys DesignWare Cores USB3 Databook Rev. 3.30a
+ * Section 4.1.8 Table 4-7, it states that for a device-initiated
+ * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
+ * command for any active transfers" before clearing the RunStop
+ * bit.
+ */
+ dwc3_stop_active_transfers(dwc);
+ __dwc3_gadget_stop(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ /*
+ * Note: if the GEVNTCOUNT indicates events in the event buffer, the
+ * driver needs to acknowledge them before the controller can halt.
+ * Simply let the interrupt handler acknowledges and handle the
+ * remaining event generated by the controller while polling for
+ * DSTS.DEVCTLHLT.
+ */
+ return dwc3_gadget_run_stop(dwc, false, false);
+}
+
static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
{
struct dwc3 *dwc = gadget_to_dwc(g);
- unsigned long flags;
int ret;
is_on = !!is_on;
+ dwc->softconnect = is_on;
/*
* Per databook, when we want to stop the gadget, if a control transfer
* is still in process, complete it and get the core into setup phase.
@@ -2175,49 +2191,26 @@
return 0;
}
- /*
- * Synchronize and disable any further event handling while controller
- * is being enabled/disabled.
- */
- disable_irq(dwc->irq_gadget);
-
- spin_lock_irqsave(&dwc->lock, flags);
-
- if (!is_on) {
- u32 count;
-
- dwc->connected = false;
- /*
- * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
- * Section 4.1.8 Table 4-7, it states that for a device-initiated
- * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
- * command for any active transfers" before clearing the RunStop
- * bit.
- */
- dwc3_stop_active_transfers(dwc);
- __dwc3_gadget_stop(dwc);
-
- /*
- * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
- * Section 1.3.4, it mentions that for the DEVCTRLHLT bit, the
- * "software needs to acknowledge the events that are generated
- * (by writing to GEVNTCOUNTn) while it is waiting for this bit
- * to be set to '1'."
- */
- count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
- count &= DWC3_GEVNTCOUNT_MASK;
- if (count > 0) {
- dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
- dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
- dwc->ev_buf->length;
- }
- } else {
- __dwc3_gadget_start(dwc);
+ if (dwc->pullups_connected == is_on) {
+ pm_runtime_put(dwc->dev);
+ return 0;
}
- ret = dwc3_gadget_run_stop(dwc, is_on, false);
- spin_unlock_irqrestore(&dwc->lock, flags);
- enable_irq(dwc->irq_gadget);
+ if (!is_on) {
+ ret = dwc3_gadget_soft_disconnect(dwc);
+ } else {
+ /*
+ * In the Synopsys DWC_usb31 1.90a programming guide section
+ * 4.1.9, it specifies that for a reconnect after a
+ * device-initiated disconnect requires a core soft reset
+ * (DCTL.CSftRst) before enabling the run/stop bit.
+ */
+ dwc3_core_soft_reset(dwc);
+
+ dwc3_event_buffers_setup(dwc);
+ __dwc3_gadget_start(dwc);
+ ret = dwc3_gadget_run_stop(dwc, true, false);
+ }
pm_runtime_put(dwc->dev);
@@ -2807,6 +2800,10 @@
if (event->status & DEPEVT_STATUS_SHORT && !chain)
return 1;
+ if ((trb->ctrl & DWC3_TRB_CTRL_ISP_IMI) &&
+ DWC3_TRB_SIZE_TRBSTS(trb->size) == DWC3_TRBSTS_MISSED_ISOC)
+ return 1;
+
if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
(trb->ctrl & DWC3_TRB_CTRL_LST))
return 1;
@@ -2859,6 +2856,7 @@
const struct dwc3_event_depevt *event,
struct dwc3_request *req, int status)
{
+ int request_status;
int ret;
if (req->request.num_mapped_sgs)
@@ -2879,7 +2877,35 @@
req->needs_extra_trb = false;
}
- dwc3_gadget_giveback(dep, req, status);
+ /*
+ * The event status only reflects the status of the TRB with IOC set.
+ * For the requests that don't set interrupt on completion, the driver
+ * needs to check and return the status of the completed TRBs associated
+ * with the request. Use the status of the last TRB of the request.
+ */
+ if (req->request.no_interrupt) {
+ struct dwc3_trb *trb;
+
+ trb = dwc3_ep_prev_trb(dep, dep->trb_dequeue);
+ switch (DWC3_TRB_SIZE_TRBSTS(trb->size)) {
+ case DWC3_TRBSTS_MISSED_ISOC:
+ /* Isoc endpoint only */
+ request_status = -EXDEV;
+ break;
+ case DWC3_TRB_STS_XFER_IN_PROG:
+ /* Applicable when End Transfer with ForceRM=0 */
+ case DWC3_TRBSTS_SETUP_PENDING:
+ /* Control endpoint only */
+ case DWC3_TRBSTS_OK:
+ default:
+ request_status = 0;
+ break;
+ }
+ } else {
+ request_status = status;
+ }
+
+ dwc3_gadget_giveback(dep, req, request_status);
out:
return ret;
@@ -2931,14 +2957,14 @@
struct dwc3 *dwc = dep->dwc;
bool no_started_trb = true;
- if (!dep->endpoint.desc)
- return no_started_trb;
-
dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
goto out;
+ if (!dep->endpoint.desc)
+ return no_started_trb;
+
if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
list_empty(&dep->started_list) &&
(list_empty(&dep->pending_list) || status == -EXDEV))
@@ -3752,7 +3778,6 @@
}
evt->count = 0;
- evt->flags &= ~DWC3_EVENT_PENDING;
ret = IRQ_HANDLED;
/* Unmask interrupt */
@@ -3765,6 +3790,9 @@
dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
}
+ /* Keep the clearing of DWC3_EVENT_PENDING at the end */
+ evt->flags &= ~DWC3_EVENT_PENDING;
+
return ret;
}
@@ -4029,7 +4057,7 @@
{
int ret;
- if (!dwc->gadget_driver)
+ if (!dwc->gadget_driver || !dwc->softconnect)
return 0;
ret = __dwc3_gadget_start(dwc);
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index e195176..b06ab85 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -130,4 +130,5 @@
void dwc3_host_exit(struct dwc3 *dwc)
{
platform_device_unregister(dwc->xhci);
+ dwc->xhci = NULL;
}
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 9b7fa53..d51ea1c 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1443,6 +1443,8 @@
usb_ep_autoconfig_reset(cdev->gadget);
spin_lock_irqsave(&gi->spinlock, flags);
cdev->gadget = NULL;
+ cdev->deactivations = 0;
+ gadget->deactivated = false;
set_gadget_data(gadget, NULL);
spin_unlock_irqrestore(&gi->spinlock, flags);
}
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 236ecc9..c13bb29 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -87,7 +87,7 @@
u8 printer_cdev_open;
wait_queue_head_t wait;
unsigned q_len;
- char *pnp_string; /* We don't own memory! */
+ char **pnp_string; /* We don't own memory! */
struct usb_function function;
};
@@ -999,16 +999,16 @@
if ((wIndex>>8) != dev->interface)
break;
- if (!dev->pnp_string) {
+ if (!*dev->pnp_string) {
value = 0;
break;
}
- value = strlen(dev->pnp_string);
+ value = strlen(*dev->pnp_string);
buf[0] = (value >> 8) & 0xFF;
buf[1] = value & 0xFF;
- memcpy(buf + 2, dev->pnp_string, value);
+ memcpy(buf + 2, *dev->pnp_string, value);
DBG(dev, "1284 PNP String: %x %s\n", value,
- dev->pnp_string);
+ *dev->pnp_string);
break;
case GET_PORT_STATUS: /* Get Port Status */
@@ -1471,7 +1471,7 @@
kref_init(&dev->kref);
++opts->refcnt;
dev->minor = opts->minor;
- dev->pnp_string = opts->pnp_string;
+ dev->pnp_string = &opts->pnp_string;
dev->q_len = opts->q_len;
mutex_unlock(&opts->lock);
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index f48a00e..fecdba8 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -883,17 +883,42 @@
kfree(uvc);
}
-static void uvc_unbind(struct usb_configuration *c, struct usb_function *f)
+static void uvc_function_unbind(struct usb_configuration *c,
+ struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct uvc_device *uvc = to_uvc(f);
+ long wait_ret = 1;
- uvcg_info(f, "%s\n", __func__);
+ uvcg_info(f, "%s()\n", __func__);
+
+ /* If we know we're connected via v4l2, then there should be a cleanup
+ * of the device from userspace either via UVC_EVENT_DISCONNECT or
+ * though the video device removal uevent. Allow some time for the
+ * application to close out before things get deleted.
+ */
+ if (uvc->func_connected) {
+ uvcg_dbg(f, "waiting for clean disconnect\n");
+ wait_ret = wait_event_interruptible_timeout(uvc->func_connected_queue,
+ uvc->func_connected == false, msecs_to_jiffies(500));
+ uvcg_dbg(f, "done waiting with ret: %ld\n", wait_ret);
+ }
device_remove_file(&uvc->vdev.dev, &dev_attr_function_name);
video_unregister_device(&uvc->vdev);
v4l2_device_unregister(&uvc->v4l2_dev);
+ if (uvc->func_connected) {
+ /* Wait for the release to occur to ensure there are no longer any
+ * pending operations that may cause panics when resources are cleaned
+ * up.
+ */
+ uvcg_warn(f, "%s no clean disconnect, wait for release\n", __func__);
+ wait_ret = wait_event_interruptible_timeout(uvc->func_connected_queue,
+ uvc->func_connected == false, msecs_to_jiffies(1000));
+ uvcg_dbg(f, "done waiting for release with ret: %ld\n", wait_ret);
+ }
+
usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
kfree(uvc->control_buf);
@@ -912,6 +937,7 @@
mutex_init(&uvc->video.mutex);
uvc->state = UVC_STATE_DISCONNECTED;
+ init_waitqueue_head(&uvc->func_connected_queue);
opts = fi_to_f_uvc_opts(fi);
mutex_lock(&opts->lock);
@@ -942,7 +968,7 @@
/* Register the function. */
uvc->func.name = "uvc";
uvc->func.bind = uvc_function_bind;
- uvc->func.unbind = uvc_unbind;
+ uvc->func.unbind = uvc_function_unbind;
uvc->func.get_alt = uvc_function_get_alt;
uvc->func.set_alt = uvc_function_set_alt;
uvc->func.disable = uvc_function_disable;
diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c
index f7e6c42..0219849 100644
--- a/drivers/usb/gadget/function/storage_common.c
+++ b/drivers/usb/gadget/function/storage_common.c
@@ -294,8 +294,10 @@
void store_cdrom_address(u8 *dest, int msf, u32 addr)
{
if (msf) {
- /* Convert to Minutes-Seconds-Frames */
- addr >>= 2; /* Convert to 2048-byte frames */
+ /*
+ * Convert to Minutes-Seconds-Frames.
+ * Sector size is already set to 2048 bytes.
+ */
addr += 2*75; /* Lead-in occupies 2 seconds */
dest[3] = addr % 75; /* Frames */
addr /= 75;
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index a40be8b..64ef97a 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -772,9 +772,13 @@
dev->qmult = qmult;
snprintf(net->name, sizeof(net->name), "%s%%d", netname);
- if (get_ether_addr(dev_addr, net->dev_addr))
+ if (get_ether_addr(dev_addr, net->dev_addr)) {
+ net->addr_assign_type = NET_ADDR_RANDOM;
dev_warn(&g->dev,
"using random %s ethernet address\n", "self");
+ } else {
+ net->addr_assign_type = NET_ADDR_SET;
+ }
if (get_ether_addr(host_addr, dev->host_mac))
dev_warn(&g->dev,
"using random %s ethernet address\n", "host");
@@ -831,6 +835,9 @@
INIT_LIST_HEAD(&dev->tx_reqs);
INIT_LIST_HEAD(&dev->rx_reqs);
+ /* by default we always have a random MAC address */
+ net->addr_assign_type = NET_ADDR_RANDOM;
+
skb_queue_head_init(&dev->rx_frames);
/* network device setup */
@@ -868,7 +875,6 @@
g = dev->gadget;
memcpy(net->dev_addr, dev->dev_mac, ETH_ALEN);
- net->addr_assign_type = NET_ADDR_RANDOM;
status = register_netdev(net);
if (status < 0) {
@@ -908,6 +914,7 @@
if (get_ether_addr(dev_addr, new_addr))
return -EINVAL;
memcpy(dev->dev_mac, new_addr, ETH_ALEN);
+ net->addr_assign_type = NET_ADDR_SET;
return 0;
}
EXPORT_SYMBOL_GPL(gether_set_dev_addr);
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 893aaa7..6c4fc49 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -14,6 +14,7 @@
#include <linux/spinlock.h>
#include <linux/usb/composite.h>
#include <linux/videodev2.h>
+#include <linux/wait.h>
#include <media/v4l2-device.h>
#include <media/v4l2-dev.h>
@@ -118,6 +119,7 @@
struct usb_function func;
struct uvc_video video;
bool func_connected;
+ wait_queue_head_t func_connected_queue;
/* Descriptors */
struct {
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index 61e2c94..cab1e30 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -242,6 +242,8 @@
buf->state = UVC_BUF_STATE_ERROR;
vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
}
+ queue->buf_used = 0;
+
/* This must be protected by the irqlock spinlock to avoid race
* conditions between uvc_queue_buffer and the disconnection event that
* could result in an interruptible wait in uvc_dequeue_buffer. Do not
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index 197c26f..65abd55 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -252,10 +252,11 @@
static void uvc_v4l2_disable(struct uvc_device *uvc)
{
- uvc->func_connected = false;
uvc_function_disconnect(uvc);
uvcg_video_enable(&uvc->video, 0);
uvcg_free_buffers(&uvc->video.queue);
+ uvc->func_connected = false;
+ wake_up_interruptible(&uvc->func_connected_queue);
}
static int
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 633e23d..5ce548c 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -159,7 +159,7 @@
break;
default:
- uvcg_info(&video->uvc->func,
+ uvcg_warn(&video->uvc->func,
"VS request completed with status %d.\n",
req->status);
uvcg_queue_cancel(queue, 0);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 454860d..cd09747 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -362,6 +362,7 @@
spin_unlock_irq (&epdata->dev->lock);
DBG (epdata->dev, "endpoint gone\n");
+ wait_for_completion(&done);
epdata->status = -ENODEV;
}
}
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
index 33efa69..b496ca9 100644
--- a/drivers/usb/gadget/legacy/raw_gadget.c
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -10,6 +10,7 @@
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/idr.h>
#include <linux/kref.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
@@ -35,6 +36,9 @@
/*----------------------------------------------------------------------*/
+static DEFINE_IDA(driver_id_numbers);
+#define DRIVER_DRIVER_NAME_LENGTH_MAX 32
+
#define RAW_EVENT_QUEUE_SIZE 16
struct raw_event_queue {
@@ -144,6 +148,7 @@
STATE_DEV_INVALID = 0,
STATE_DEV_OPENED,
STATE_DEV_INITIALIZED,
+ STATE_DEV_REGISTERING,
STATE_DEV_RUNNING,
STATE_DEV_CLOSED,
STATE_DEV_FAILED
@@ -159,6 +164,9 @@
/* Reference to misc device: */
struct device *dev;
+ /* Make driver names unique */
+ int driver_id_number;
+
/* Protected by lock: */
enum dev_state state;
bool gadget_registered;
@@ -187,6 +195,7 @@
spin_lock_init(&dev->lock);
init_completion(&dev->ep0_done);
raw_event_queue_init(&dev->queue);
+ dev->driver_id_number = -1;
return dev;
}
@@ -197,6 +206,9 @@
kfree(dev->udc_name);
kfree(dev->driver.udc_name);
+ kfree(dev->driver.driver.name);
+ if (dev->driver_id_number >= 0)
+ ida_free(&driver_id_numbers, dev->driver_id_number);
if (dev->req) {
if (dev->ep0_urb_queued)
usb_ep_dequeue(dev->gadget->ep0, dev->req);
@@ -417,9 +429,11 @@
static int raw_ioctl_init(struct raw_dev *dev, unsigned long value)
{
int ret = 0;
+ int driver_id_number;
struct usb_raw_init arg;
char *udc_driver_name;
char *udc_device_name;
+ char *driver_driver_name;
unsigned long flags;
if (copy_from_user(&arg, (void __user *)value, sizeof(arg)))
@@ -438,36 +452,43 @@
return -EINVAL;
}
+ driver_id_number = ida_alloc(&driver_id_numbers, GFP_KERNEL);
+ if (driver_id_number < 0)
+ return driver_id_number;
+
+ driver_driver_name = kmalloc(DRIVER_DRIVER_NAME_LENGTH_MAX, GFP_KERNEL);
+ if (!driver_driver_name) {
+ ret = -ENOMEM;
+ goto out_free_driver_id_number;
+ }
+ snprintf(driver_driver_name, DRIVER_DRIVER_NAME_LENGTH_MAX,
+ DRIVER_NAME ".%d", driver_id_number);
+
udc_driver_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
- if (!udc_driver_name)
- return -ENOMEM;
+ if (!udc_driver_name) {
+ ret = -ENOMEM;
+ goto out_free_driver_driver_name;
+ }
ret = strscpy(udc_driver_name, &arg.driver_name[0],
UDC_NAME_LENGTH_MAX);
- if (ret < 0) {
- kfree(udc_driver_name);
- return ret;
- }
+ if (ret < 0)
+ goto out_free_udc_driver_name;
ret = 0;
udc_device_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
if (!udc_device_name) {
- kfree(udc_driver_name);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_free_udc_driver_name;
}
ret = strscpy(udc_device_name, &arg.device_name[0],
UDC_NAME_LENGTH_MAX);
- if (ret < 0) {
- kfree(udc_driver_name);
- kfree(udc_device_name);
- return ret;
- }
+ if (ret < 0)
+ goto out_free_udc_device_name;
ret = 0;
spin_lock_irqsave(&dev->lock, flags);
if (dev->state != STATE_DEV_OPENED) {
dev_dbg(dev->dev, "fail, device is not opened\n");
- kfree(udc_driver_name);
- kfree(udc_device_name);
ret = -EINVAL;
goto out_unlock;
}
@@ -482,14 +503,25 @@
dev->driver.suspend = gadget_suspend;
dev->driver.resume = gadget_resume;
dev->driver.reset = gadget_reset;
- dev->driver.driver.name = DRIVER_NAME;
+ dev->driver.driver.name = driver_driver_name;
dev->driver.udc_name = udc_device_name;
dev->driver.match_existing_only = 1;
+ dev->driver_id_number = driver_id_number;
dev->state = STATE_DEV_INITIALIZED;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
out_unlock:
spin_unlock_irqrestore(&dev->lock, flags);
+out_free_udc_device_name:
+ kfree(udc_device_name);
+out_free_udc_driver_name:
+ kfree(udc_driver_name);
+out_free_driver_driver_name:
+ kfree(driver_driver_name);
+out_free_driver_id_number:
+ ida_free(&driver_id_numbers, driver_id_number);
return ret;
}
@@ -507,6 +539,7 @@
ret = -EINVAL;
goto out_unlock;
}
+ dev->state = STATE_DEV_REGISTERING;
spin_unlock_irqrestore(&dev->lock, flags);
ret = usb_gadget_probe_driver(&dev->driver);
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 933e80d..f28e1bb 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -311,7 +311,7 @@
config USB_AMD5536UDC
tristate "AMD5536 UDC"
- depends on USB_PCI
+ depends on USB_PCI && HAS_DMA
select USB_SNP_CORE
help
The AMD5536 UDC is part of the AMD Geode CS5536, an x86 southbridge.
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
index bfd8e77..3a4ccc7 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
@@ -1033,8 +1033,10 @@
/* Initialize vhub String Descriptors. */
INIT_LIST_HEAD(&vhub->vhub_str_desc);
desc_np = of_get_child_by_name(vhub_np, "vhub-strings");
- if (desc_np)
+ if (desc_np) {
ret = ast_vhub_of_parse_str_desc(vhub, desc_np);
+ of_node_put(desc_np);
+ }
else
ret = ast_vhub_str_alloc_add(vhub, &ast_vhub_strings);
diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c
index 248426a..5f0b3fd 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_udc.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c
@@ -151,6 +151,7 @@
bdc->delayed_status = false;
bdc->reinit = reinit;
bdc->test_mode = false;
+ usb_gadget_set_state(&bdc->gadget, USB_STATE_NOTATTACHED);
}
/* TNotify wkaeup timer */
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index 3f1c62a..314cb5e 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -3015,6 +3015,7 @@
}
udc->isp1301_i2c_client = isp1301_get_client(isp1301_node);
+ of_node_put(isp1301_node);
if (!udc->isp1301_i2c_client) {
return -EPROBE_DEFER;
}
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index 57ee72f..3ebc8c5 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -32,9 +32,6 @@
#include <linux/workqueue.h>
/* XUSB_DEV registers */
-#define SPARAM 0x000
-#define SPARAM_ERSTMAX_MASK GENMASK(20, 16)
-#define SPARAM_ERSTMAX(x) (((x) << 16) & SPARAM_ERSTMAX_MASK)
#define DB 0x004
#define DB_TARGET_MASK GENMASK(15, 8)
#define DB_TARGET(x) (((x) << 8) & DB_TARGET_MASK)
@@ -275,8 +272,10 @@
BUILD_EP_CONTEXT_RW(avg_trb_len, tx_info, 0, 0xffff)
BUILD_EP_CONTEXT_RW(max_esit_payload, tx_info, 16, 0xffff)
BUILD_EP_CONTEXT_RW(edtla, rsvd[0], 0, 0xffffff)
-BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 24, 0xff)
+BUILD_EP_CONTEXT_RW(rsvd, rsvd[0], 24, 0x1)
BUILD_EP_CONTEXT_RW(partial_td, rsvd[0], 25, 0x1)
+BUILD_EP_CONTEXT_RW(splitxstate, rsvd[0], 26, 0x1)
+BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 27, 0x1f)
BUILD_EP_CONTEXT_RW(cerrcnt, rsvd[1], 18, 0x3)
BUILD_EP_CONTEXT_RW(data_offset, rsvd[2], 0, 0x1ffff)
BUILD_EP_CONTEXT_RW(numtrbs, rsvd[2], 22, 0x1f)
@@ -1557,6 +1556,9 @@
ep_reload(xudc, ep->index);
ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
+ ep_ctx_write_rsvd(ep->context, 0);
+ ep_ctx_write_partial_td(ep->context, 0);
+ ep_ctx_write_splitxstate(ep->context, 0);
ep_ctx_write_seq_num(ep->context, 0);
ep_reload(xudc, ep->index);
@@ -2812,7 +2814,10 @@
xudc->setup_seq_num = 0;
xudc->queued_setup_packet = false;
- ep_ctx_write_seq_num(ep0->context, xudc->setup_seq_num);
+ ep_ctx_write_rsvd(ep0->context, 0);
+ ep_ctx_write_partial_td(ep0->context, 0);
+ ep_ctx_write_splitxstate(ep0->context, 0);
+ ep_ctx_write_seq_num(ep0->context, 0);
deq_ptr = trb_virt_to_phys(ep0, &ep0->transfer_ring[ep0->deq_ptr]);
@@ -3295,11 +3300,6 @@
unsigned int i;
u32 val;
- val = xudc_readl(xudc, SPARAM);
- val &= ~(SPARAM_ERSTMAX_MASK);
- val |= SPARAM_ERSTMAX(XUDC_NR_EVENT_RINGS);
- xudc_writel(xudc, val, SPARAM);
-
for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
memset(xudc->event_ring[i], 0, XUDC_EVENT_RING_SIZE *
sizeof(*xudc->event_ring[i]));
@@ -3693,15 +3693,15 @@
int err;
xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev, "dev");
- if (IS_ERR(xudc->genpd_dev_device)) {
- err = PTR_ERR(xudc->genpd_dev_device);
+ if (IS_ERR_OR_NULL(xudc->genpd_dev_device)) {
+ err = PTR_ERR(xudc->genpd_dev_device) ? : -ENODATA;
dev_err(dev, "failed to get device power domain: %d\n", err);
return err;
}
xudc->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "ss");
- if (IS_ERR(xudc->genpd_dev_ss)) {
- err = PTR_ERR(xudc->genpd_dev_ss);
+ if (IS_ERR_OR_NULL(xudc->genpd_dev_ss)) {
+ err = PTR_ERR(xudc->genpd_dev_ss) ? : -ENODATA;
dev_err(dev, "failed to get SuperSpeed power domain: %d\n", err);
return err;
}
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index 2df52f7..7558cc4 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -285,7 +285,7 @@
{
struct bcma_hcd_device *usb_dev = bcma_get_drvdata(dev);
- if (IS_ERR_OR_NULL(usb_dev->gpio_desc))
+ if (!usb_dev->gpio_desc)
return;
gpiod_set_value(usb_dev->gpio_desc, val);
@@ -406,9 +406,11 @@
return -ENOMEM;
usb_dev->core = core;
- if (core->dev.of_node)
- usb_dev->gpio_desc = devm_gpiod_get(&core->dev, "vcc",
- GPIOD_OUT_HIGH);
+ usb_dev->gpio_desc = devm_gpiod_get_optional(&core->dev, "vcc",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(usb_dev->gpio_desc))
+ return dev_err_probe(&core->dev, PTR_ERR(usb_dev->gpio_desc),
+ "error obtaining VCC GPIO");
switch (core->id.id) {
case BCMA_CORE_USB20_HOST:
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index e87cf3a..638f03b 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -21,6 +21,9 @@
/* defined here to avoid adding to pci_ids.h for single instance use */
#define PCI_DEVICE_ID_INTEL_CE4100_USB 0x2e70
+#define PCI_VENDOR_ID_ASPEED 0x1a03
+#define PCI_DEVICE_ID_ASPEED_EHCI 0x2603
+
/*-------------------------------------------------------------------------*/
#define PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC 0x0939
static inline bool is_intel_quark_x1000(struct pci_dev *pdev)
@@ -222,6 +225,12 @@
ehci->has_synopsys_hc_bug = 1;
}
break;
+ case PCI_VENDOR_ID_ASPEED:
+ if (pdev->device == PCI_DEVICE_ID_ASPEED_EHCI) {
+ ehci_info(ehci, "applying Aspeed HC workaround\n");
+ ehci->is_aspeed = 1;
+ }
+ break;
}
/* optional debug port, normally in the first BAR */
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 6bbaee7..28a1969 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -148,6 +148,7 @@
} else {
ehci->has_amcc_usb23 = 1;
}
+ of_node_put(np);
}
if (of_get_property(dn, "big-endian", NULL)) {
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 3055d9a..3e5c547 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -1541,10 +1541,12 @@
iounmap(isp116x->data_reg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- release_mem_region(res->start, 2);
+ if (res)
+ release_mem_region(res->start, 2);
iounmap(isp116x->addr_reg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, 2);
+ if (res)
+ release_mem_region(res->start, 2);
usb_put_hcd(hcd);
return 0;
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index 85878e8..106a6bc 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -164,6 +164,7 @@
}
isp1301_i2c_client = isp1301_get_client(isp1301_node);
+ of_node_put(isp1301_node);
if (!isp1301_i2c_client)
return -EPROBE_DEFER;
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 45f7cce..98e4672 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -169,6 +169,7 @@
release_mem_region(res.start, 0x4);
} else
pr_debug("%s: cannot get ehci offset from fdt\n", __FILE__);
+ of_node_put(np);
}
irq_dispose_mapping(irq);
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index e832909..6df2881 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -3908,8 +3908,10 @@
}
}
+ spin_unlock_irq(&oxu->lock);
/* turn off now-idle HC */
del_timer_sync(&oxu->watchdog);
+ spin_lock_irq(&oxu->lock);
ehci_halt(oxu);
hcd->state = HC_STATE_SUSPENDED;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 71b018e..7bb3067 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -676,7 +676,7 @@
}
pm_runtime_allow(xhci_to_hcd(xhci)->self.controller);
xhci->test_mode = 0;
- return xhci_reset(xhci);
+ return xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
}
void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
@@ -1002,6 +1002,9 @@
if (link_state == XDEV_U2)
*status |= USB_PORT_STAT_L1;
if (link_state == XDEV_U0) {
+ if (bus_state->resume_done[portnum])
+ usb_hcd_end_port_resume(&port->rhub->hcd->self,
+ portnum);
bus_state->resume_done[portnum] = 0;
clear_bit(portnum, &bus_state->resuming_ports);
if (bus_state->suspended_ports & (1 << portnum)) {
@@ -1345,7 +1348,7 @@
}
spin_unlock_irqrestore(&xhci->lock, flags);
if (!wait_for_completion_timeout(&bus_state->u3exit_done[wIndex],
- msecs_to_jiffies(100)))
+ msecs_to_jiffies(500)))
xhci_dbg(xhci, "missing U0 port change event for port %d-%d\n",
hcd->self.busnum, wIndex + 1);
spin_lock_irqsave(&xhci->lock, flags);
@@ -1558,6 +1561,17 @@
status = bus_state->resuming_ports;
+ /*
+ * SS devices are only visible to roothub after link training completes.
+ * Keep polling roothubs for a grace period after xHC start
+ */
+ if (xhci->run_graceperiod) {
+ if (time_before(jiffies, xhci->run_graceperiod))
+ status = 1;
+ else
+ xhci->run_graceperiod = 0;
+ }
+
mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
/* For each port, did anything change? If so, set that bit in buf. */
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index ed380ee..d1a4230 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -659,7 +659,7 @@
num_stream_ctxs, &stream_info->ctx_array_dma,
mem_flags);
if (!stream_info->stream_ctx_array)
- goto cleanup_ctx;
+ goto cleanup_ring_array;
memset(stream_info->stream_ctx_array, 0,
sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
@@ -720,6 +720,11 @@
}
xhci_free_command(xhci, stream_info->free_streams_command);
cleanup_ctx:
+ xhci_free_stream_ctx(xhci,
+ stream_info->num_stream_ctxs,
+ stream_info->stream_ctx_array,
+ stream_info->ctx_array_dma);
+cleanup_ring_array:
kfree(stream_info->stream_rings);
cleanup_info:
kfree(stream_info);
@@ -910,15 +915,19 @@
if (dev->eps[i].stream_info)
xhci_free_stream_info(xhci,
dev->eps[i].stream_info);
- /* Endpoints on the TT/root port lists should have been removed
- * when usb_disable_device() was called for the device.
- * We can't drop them anyway, because the udev might have gone
- * away by this point, and we can't tell what speed it was.
+ /*
+ * Endpoints are normally deleted from the bandwidth list when
+ * endpoints are dropped, before device is freed.
+ * If host is dying or being removed then endpoints aren't
+ * dropped cleanly, so delete the endpoint from list here.
+ * Only applicable for hosts with software bandwidth checking.
*/
- if (!list_empty(&dev->eps[i].bw_endpoint_list))
- xhci_warn(xhci, "Slot %u endpoint %u "
- "not removed from BW list!\n",
- slot_id, i);
+
+ if (!list_empty(&dev->eps[i].bw_endpoint_list)) {
+ list_del_init(&dev->eps[i].bw_endpoint_list);
+ xhci_dbg(xhci, "Slot %u endpoint %u not removed from BW list!\n",
+ slot_id, i);
+ }
}
/* If this is a hub, free the TT(s) from the TT list */
xhci_free_tt_info(xhci, dev, slot_id);
@@ -2595,7 +2604,7 @@
fail:
xhci_halt(xhci);
- xhci_reset(xhci);
+ xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
xhci_mem_cleanup(xhci);
return -ENOMEM;
}
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 8950d1f..86c4bc9 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -25,6 +25,13 @@
*/
#define TT_MICROFRAMES_MAX 9
+/* schedule error type */
+#define ESCH_SS_Y6 1001
+#define ESCH_SS_OVERLAP 1002
+#define ESCH_CS_OVERFLOW 1003
+#define ESCH_BW_OVERFLOW 1004
+#define ESCH_FIXME 1005
+
/* mtk scheduler bitmasks */
#define EP_BPKTS(p) ((p) & 0x7f)
#define EP_BCSCOUNT(p) (((p) & 0x7) << 8)
@@ -32,6 +39,24 @@
#define EP_BOFFSET(p) ((p) & 0x3fff)
#define EP_BREPEAT(p) (((p) & 0x7fff) << 16)
+static char *sch_error_string(int err_num)
+{
+ switch (err_num) {
+ case ESCH_SS_Y6:
+ return "Can't schedule Start-Split in Y6";
+ case ESCH_SS_OVERLAP:
+ return "Can't find a suitable Start-Split location";
+ case ESCH_CS_OVERFLOW:
+ return "The last Complete-Split is greater than 7";
+ case ESCH_BW_OVERFLOW:
+ return "Bandwidth exceeds the maximum limit";
+ case ESCH_FIXME:
+ return "FIXME, to be resolved";
+ default:
+ return "Unknown";
+ }
+}
+
static int is_fs_or_ls(enum usb_device_speed speed)
{
return speed == USB_SPEED_FULL || speed == USB_SPEED_LOW;
@@ -375,7 +400,6 @@
sch_ep->bw_budget_table[j];
}
}
- sch_ep->allocated = used;
}
static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
@@ -384,19 +408,20 @@
u32 num_esit, tmp;
int base;
int i, j;
+ u8 uframes = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX);
num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
+
+ if (sch_ep->ep_type == INT_IN_EP || sch_ep->ep_type == ISOC_IN_EP)
+ offset++;
+
for (i = 0; i < num_esit; i++) {
base = offset + i * sch_ep->esit;
- /*
- * Compared with hs bus, no matter what ep type,
- * the hub will always delay one uframe to send data
- */
- for (j = 0; j < sch_ep->cs_count; j++) {
+ for (j = 0; j < uframes; j++) {
tmp = tt->fs_bus_bw[base + j] + sch_ep->bw_cost_per_microframe;
if (tmp > FS_PAYLOAD_MAX)
- return -ERANGE;
+ return -ESCH_BW_OVERFLOW;
}
}
@@ -406,15 +431,11 @@
static int check_sch_tt(struct usb_device *udev,
struct mu3h_sch_ep_info *sch_ep, u32 offset)
{
- struct mu3h_sch_tt *tt = sch_ep->sch_tt;
u32 extra_cs_count;
- u32 fs_budget_start;
u32 start_ss, last_ss;
u32 start_cs, last_cs;
- int i;
start_ss = offset % 8;
- fs_budget_start = (start_ss + 1) % 8;
if (sch_ep->ep_type == ISOC_OUT_EP) {
last_ss = start_ss + sch_ep->cs_count - 1;
@@ -424,11 +445,7 @@
* must never schedule Start-Split in Y6
*/
if (!(start_ss == 7 || last_ss < 6))
- return -ERANGE;
-
- for (i = 0; i < sch_ep->cs_count; i++)
- if (test_bit(offset + i, tt->ss_bit_map))
- return -ERANGE;
+ return -ESCH_SS_Y6;
} else {
u32 cs_count = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX);
@@ -438,29 +455,24 @@
* must never schedule Start-Split in Y6
*/
if (start_ss == 6)
- return -ERANGE;
+ return -ESCH_SS_Y6;
/* one uframe for ss + one uframe for idle */
start_cs = (start_ss + 2) % 8;
last_cs = start_cs + cs_count - 1;
if (last_cs > 7)
- return -ERANGE;
+ return -ESCH_CS_OVERFLOW;
if (sch_ep->ep_type == ISOC_IN_EP)
extra_cs_count = (last_cs == 7) ? 1 : 2;
else /* ep_type : INTR IN / INTR OUT */
- extra_cs_count = (fs_budget_start == 6) ? 1 : 2;
+ extra_cs_count = 1;
cs_count += extra_cs_count;
if (cs_count > 7)
cs_count = 7; /* HW limit */
- for (i = 0; i < cs_count + 2; i++) {
- if (test_bit(offset + i, tt->ss_bit_map))
- return -ERANGE;
- }
-
sch_ep->cs_count = cs_count;
/* one for ss, the other for idle */
sch_ep->num_budget_microframes = cs_count + 2;
@@ -482,28 +494,24 @@
struct mu3h_sch_tt *tt = sch_ep->sch_tt;
u32 base, num_esit;
int bw_updated;
- int bits;
int i, j;
+ int offset = sch_ep->offset;
+ u8 uframes = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX);
num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
- bits = (sch_ep->ep_type == ISOC_OUT_EP) ? sch_ep->cs_count : 1;
if (used)
bw_updated = sch_ep->bw_cost_per_microframe;
else
bw_updated = -sch_ep->bw_cost_per_microframe;
+ if (sch_ep->ep_type == INT_IN_EP || sch_ep->ep_type == ISOC_IN_EP)
+ offset++;
+
for (i = 0; i < num_esit; i++) {
- base = sch_ep->offset + i * sch_ep->esit;
+ base = offset + i * sch_ep->esit;
- for (j = 0; j < bits; j++) {
- if (used)
- set_bit(base + j, tt->ss_bit_map);
- else
- clear_bit(base + j, tt->ss_bit_map);
- }
-
- for (j = 0; j < sch_ep->cs_count; j++)
+ for (j = 0; j < uframes; j++)
tt->fs_bus_bw[base + j] += bw_updated;
}
@@ -513,21 +521,48 @@
list_del(&sch_ep->tt_endpoint);
}
+static int load_ep_bw(struct usb_device *udev, struct mu3h_sch_bw_info *sch_bw,
+ struct mu3h_sch_ep_info *sch_ep, bool loaded)
+{
+ if (sch_ep->sch_tt)
+ update_sch_tt(udev, sch_ep, loaded);
+
+ /* update bus bandwidth info */
+ update_bus_bw(sch_bw, sch_ep, loaded);
+ sch_ep->allocated = loaded;
+
+ return 0;
+}
+
+static u32 get_esit_boundary(struct mu3h_sch_ep_info *sch_ep)
+{
+ u32 boundary = sch_ep->esit;
+
+ if (sch_ep->sch_tt) { /* LS/FS with TT */
+ /*
+ * tune for CS, normally esit >= 8 for FS/LS,
+ * not add one for other types to avoid access array
+ * out of boundary
+ */
+ if (sch_ep->ep_type == ISOC_OUT_EP && boundary > 1)
+ boundary--;
+ }
+
+ return boundary;
+}
+
static int check_sch_bw(struct usb_device *udev,
struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
{
u32 offset;
- u32 esit;
u32 min_bw;
u32 min_index;
u32 worst_bw;
u32 bw_boundary;
+ u32 esit_boundary;
u32 min_num_budget;
u32 min_cs_count;
- bool tt_offset_ok = false;
- int ret;
-
- esit = sch_ep->esit;
+ int ret = 0;
/*
* Search through all possible schedule microframes.
@@ -537,16 +572,15 @@
min_index = 0;
min_cs_count = sch_ep->cs_count;
min_num_budget = sch_ep->num_budget_microframes;
- for (offset = 0; offset < esit; offset++) {
- if (is_fs_or_ls(udev->speed)) {
+ esit_boundary = get_esit_boundary(sch_ep);
+ for (offset = 0; offset < sch_ep->esit; offset++) {
+ if (sch_ep->sch_tt) {
ret = check_sch_tt(udev, sch_ep, offset);
if (ret)
continue;
- else
- tt_offset_ok = true;
}
- if ((offset + sch_ep->num_budget_microframes) > sch_ep->esit)
+ if ((offset + sch_ep->num_budget_microframes) > esit_boundary)
break;
worst_bw = get_max_bw(sch_bw, sch_ep, offset);
@@ -569,35 +603,21 @@
/* check bandwidth */
if (min_bw > bw_boundary)
- return -ERANGE;
+ return ret ? ret : -ESCH_BW_OVERFLOW;
sch_ep->offset = min_index;
sch_ep->cs_count = min_cs_count;
sch_ep->num_budget_microframes = min_num_budget;
- if (is_fs_or_ls(udev->speed)) {
- /* all offset for tt is not ok*/
- if (!tt_offset_ok)
- return -ERANGE;
-
- update_sch_tt(udev, sch_ep, 1);
- }
-
- /* update bus bandwidth info */
- update_bus_bw(sch_bw, sch_ep, 1);
-
- return 0;
+ return load_ep_bw(udev, sch_bw, sch_ep, true);
}
static void destroy_sch_ep(struct usb_device *udev,
struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
{
/* only release ep bw check passed by check_sch_bw() */
- if (sch_ep->allocated) {
- update_bus_bw(sch_bw, sch_ep, 0);
- if (sch_ep->sch_tt)
- update_sch_tt(udev, sch_ep, 0);
- }
+ if (sch_ep->allocated)
+ load_ep_bw(udev, sch_bw, sch_ep, false);
if (sch_ep->sch_tt)
drop_tt(udev);
@@ -760,7 +780,8 @@
ret = check_sch_bw(udev, sch_bw, sch_ep);
if (ret) {
- xhci_err(xhci, "Not enough bandwidth!\n");
+ xhci_err(xhci, "Not enough bandwidth! (%s)\n",
+ sch_error_string(-ret));
return -ENOSPC;
}
}
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index 2fc0568..3e2c607 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -20,14 +20,12 @@
#define XHCI_MTK_MAX_ESIT 64
/**
- * @ss_bit_map: used to avoid start split microframes overlay
* @fs_bus_bw: array to keep track of bandwidth already used for FS
* @ep_list: Endpoints using this TT
* @usb_tt: usb TT related
* @tt_port: TT port number
*/
struct mu3h_sch_tt {
- DECLARE_BITMAP(ss_bit_map, XHCI_MTK_MAX_ESIT);
u32 fs_bus_bw[XHCI_MTK_MAX_ESIT];
struct list_head ep_list;
struct usb_tt *usb_tt;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index dafb58f..0ee11a9 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -58,20 +58,12 @@
#define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af
#define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13
#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138
-#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e
+#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
-#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_1 0x161a
-#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_2 0x161b
-#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3 0x161d
-#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 0x161e
-#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 0x15d6
-#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 0x15d7
-#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 0x161c
-#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8 0x161f
#define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042
#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
@@ -250,6 +242,10 @@
xhci->quirks |= XHCI_MISSING_CAS;
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI)
+ xhci->quirks |= XHCI_RESET_TO_DEFAULT;
+
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_XHCI ||
@@ -260,8 +256,7 @@
pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI))
+ pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI))
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
@@ -294,8 +289,14 @@
}
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
- pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI)
+ pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) {
+ /*
+ * try to tame the ASMedia 1042 controller which reports 0.96
+ * but appears to behave more like 1.0
+ */
+ xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
xhci->quirks |= XHCI_BROKEN_STREAMS;
+ }
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) {
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
@@ -324,15 +325,8 @@
pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4))
xhci->quirks |= XHCI_NO_SOFT_RETRY;
- if (pdev->vendor == PCI_VENDOR_ID_AMD &&
- (pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_1 ||
- pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_2 ||
- pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3 ||
- pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 ||
- pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 ||
- pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 ||
- pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 ||
- pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8))
+ /* xHC spec requires PCI devices to support D3hot and D3cold */
+ if (xhci->hci_version >= 0x120)
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if (xhci->quirks & XHCI_RESET_ON_RESUME)
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index dc570ce..972a44b 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -134,7 +134,7 @@
};
static const struct xhci_plat_priv xhci_plat_brcm = {
- .quirks = XHCI_RESET_ON_RESUME,
+ .quirks = XHCI_RESET_ON_RESUME | XHCI_SUSPEND_RESUME_CLKS,
};
static const struct of_device_id usb_xhci_of_match[] = {
@@ -447,7 +447,16 @@
* xhci_suspend() needs `do_wakeup` to know whether host is allowed
* to do wakeup during suspend.
*/
- return xhci_suspend(xhci, device_may_wakeup(dev));
+ ret = xhci_suspend(xhci, device_may_wakeup(dev));
+ if (ret)
+ return ret;
+
+ if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
+ clk_disable_unprepare(xhci->clk);
+ clk_disable_unprepare(xhci->reg_clk);
+ }
+
+ return 0;
}
static int __maybe_unused xhci_plat_resume(struct device *dev)
@@ -456,6 +465,11 @@
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int ret;
+ if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
+ clk_prepare_enable(xhci->clk);
+ clk_prepare_enable(xhci->reg_clk);
+ }
+
ret = xhci_priv_resume_quirk(hcd);
if (ret)
return ret;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 76389c0..fa3a7ac 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2969,6 +2969,8 @@
if (event_loop++ < TRBS_PER_SEGMENT / 2)
continue;
xhci_update_erst_dequeue(xhci, event_ring_deq);
+ event_ring_deq = xhci->event_ring->dequeue;
+
event_loop = 0;
}
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 50bb91b..246a3d2 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1042,15 +1042,15 @@
int err;
tegra->genpd_dev_host = dev_pm_domain_attach_by_name(dev, "xusb_host");
- if (IS_ERR(tegra->genpd_dev_host)) {
- err = PTR_ERR(tegra->genpd_dev_host);
+ if (IS_ERR_OR_NULL(tegra->genpd_dev_host)) {
+ err = PTR_ERR(tegra->genpd_dev_host) ? : -ENODATA;
dev_err(dev, "failed to get host pm-domain: %d\n", err);
return err;
}
tegra->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "xusb_ss");
- if (IS_ERR(tegra->genpd_dev_ss)) {
- err = PTR_ERR(tegra->genpd_dev_ss);
+ if (IS_ERR_OR_NULL(tegra->genpd_dev_ss)) {
+ err = PTR_ERR(tegra->genpd_dev_ss) ? : -ENODATA;
dev_err(dev, "failed to get superspeed pm-domain: %d\n", err);
return err;
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 49f7429..c968dd8 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -66,7 +66,7 @@
* handshake done). There are two failure modes: "usec" have passed (major
* hardware flakeout), or the register reads as all-ones (hardware removed).
*/
-int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
+int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
{
u32 result;
int ret;
@@ -74,7 +74,7 @@
ret = readl_poll_timeout_atomic(ptr, result,
(result & mask) == done ||
result == U32_MAX,
- 1, usec);
+ 1, timeout_us);
if (result == U32_MAX) /* card removed */
return -ENODEV;
@@ -149,9 +149,11 @@
xhci_err(xhci, "Host took too long to start, "
"waited %u microseconds.\n",
XHCI_MAX_HALT_USEC);
- if (!ret)
+ if (!ret) {
/* clear state flags. Including dying, halted or removing */
xhci->xhc_state = 0;
+ xhci->run_graceperiod = jiffies + msecs_to_jiffies(500);
+ }
return ret;
}
@@ -163,7 +165,7 @@
* Transactions will be terminated immediately, and operational registers
* will be set to their defaults.
*/
-int xhci_reset(struct xhci_hcd *xhci)
+int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
{
u32 command;
u32 state;
@@ -196,8 +198,7 @@
if (xhci->quirks & XHCI_INTEL_HOST)
udelay(1000);
- ret = xhci_handshake(&xhci->op_regs->command,
- CMD_RESET, 0, 10 * 1000 * 1000);
+ ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
if (ret)
return ret;
@@ -210,8 +211,7 @@
* xHCI cannot write to any doorbells or operational registers other
* than status until the "Controller Not Ready" flag is cleared.
*/
- ret = xhci_handshake(&xhci->op_regs->status,
- STS_CNR, 0, 10 * 1000 * 1000);
+ ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
xhci->usb2_rhub.bus_state.port_c_suspend = 0;
xhci->usb2_rhub.bus_state.suspended_ports = 0;
@@ -732,7 +732,7 @@
xhci->xhc_state |= XHCI_STATE_HALTED;
xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
xhci_halt(xhci);
- xhci_reset(xhci);
+ xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
spin_unlock_irq(&xhci->lock);
xhci_cleanup_msix(xhci);
@@ -781,11 +781,28 @@
if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
+ /* Don't poll the roothubs after shutdown. */
+ xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
+ __func__, hcd->self.busnum);
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ del_timer_sync(&hcd->rh_timer);
+
+ if (xhci->shared_hcd) {
+ clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+ del_timer_sync(&xhci->shared_hcd->rh_timer);
+ }
+
spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
- /* Workaround for spurious wakeups at shutdown with HSW */
- if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
- xhci_reset(xhci);
+
+ /*
+ * Workaround for spurious wakeps at shutdown with HSW, and for boot
+ * firmware delay in ADL-P PCH if port are left in U3 at shutdown
+ */
+ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP ||
+ xhci->quirks & XHCI_RESET_TO_DEFAULT)
+ xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
+
spin_unlock_irq(&xhci->lock);
xhci_cleanup_msix(xhci);
@@ -1152,7 +1169,8 @@
/* re-initialize the HC on Restore Error, or Host Controller Error */
if (temp & (STS_SRE | STS_HCE)) {
reinit_xhc = true;
- xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
+ if (!xhci->broken_suspend)
+ xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
}
if (reinit_xhc) {
@@ -1170,7 +1188,7 @@
xhci_dbg(xhci, "Stop HCD\n");
xhci_halt(xhci);
xhci_zero_64b_regs(xhci);
- retval = xhci_reset(xhci);
+ retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
spin_unlock_irq(&xhci->lock);
if (retval)
return retval;
@@ -5276,7 +5294,7 @@
xhci_dbg(xhci, "Resetting HCD\n");
/* Reset the internal HC memory state and registers. */
- retval = xhci_reset(xhci);
+ retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
if (retval)
return retval;
xhci_dbg(xhci, "Reset complete\n");
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 45584a2..059050f 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -229,6 +229,9 @@
#define CMD_ETE (1 << 14)
/* bits 15:31 are reserved (and should be preserved on writes). */
+#define XHCI_RESET_LONG_USEC (10 * 1000 * 1000)
+#define XHCI_RESET_SHORT_USEC (250 * 1000)
+
/* IMAN - Interrupt Management Register */
#define IMAN_IE (1 << 1)
#define IMAN_IP (1 << 0)
@@ -1813,7 +1816,7 @@
/* Host controller watchdog timer structures */
unsigned int xhc_state;
-
+ unsigned long run_graceperiod;
u32 command;
struct s3_save s3;
/* Host controller is dying - not responding to commands. "I'm not dead yet!"
@@ -1885,6 +1888,8 @@
#define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39)
#define XHCI_NO_SOFT_RETRY BIT_ULL(40)
#define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42)
+#define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43)
+#define XHCI_RESET_TO_DEFAULT BIT_ULL(44)
unsigned int num_active_eps;
unsigned int limit_active_eps;
@@ -2068,11 +2073,11 @@
/* xHCI host controller glue */
typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
-int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec);
+int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us);
void xhci_quiesce(struct xhci_hcd *xhci);
int xhci_halt(struct xhci_hcd *xhci);
int xhci_start(struct xhci_hcd *xhci);
-int xhci_reset(struct xhci_hcd *xhci);
+int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us);
int xhci_run(struct usb_hcd *hcd);
int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
void xhci_shutdown(struct usb_hcd *hcd);
@@ -2378,7 +2383,7 @@
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_STOP_RING:
- sprintf(str,
+ snprintf(str, size,
"%s: slot %d sp %d ep %d flags %c",
xhci_trb_type_string(type),
TRB_TO_SLOT_ID(field3),
@@ -2455,6 +2460,8 @@
unsigned int bit;
int ret = 0;
+ str[0] = '\0';
+
if (drop) {
ret = sprintf(str, "Drop:");
for_each_set_bit(bit, &drop, 32)
@@ -2612,8 +2619,11 @@
{
int ret = 0;
+ ret = sprintf(str, " 0x%08x", usbsts);
+
if (usbsts == ~(u32)0)
- return " 0xffffffff";
+ return str;
+
if (usbsts & STS_HALT)
ret += sprintf(str + ret, " HCHalted");
if (usbsts & STS_FATAL)
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index e9437a1..ea39243 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -177,10 +177,6 @@
bytes_read += bulk_read;
}
- /* reset the device */
-reset:
- ftip_command(dev, FTIP_RELEASE, 0, 0);
-
/* check for valid image */
/* right border should be black (0x00) */
for (bytes_read = sizeof(HEADER)-1 + WIDTH-1; bytes_read < IMGSIZE; bytes_read += WIDTH)
@@ -192,6 +188,10 @@
if (dev->bulk_in_buffer[bytes_read] != 0xFF)
return -EAGAIN;
+ /* reset the device */
+reset:
+ ftip_command(dev, FTIP_RELEASE, 0, 0);
+
/* should be IMGSIZE == 65040 */
dev_dbg(&dev->interface->dev, "read %d bytes fingerprint data\n",
bytes_read);
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 748139d..0be8efc 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -71,6 +71,7 @@
dev_dbg(&priv->usbdev->dev, "destroying priv datastructure\n");
usb_put_dev(priv->usbdev);
+ priv->usbdev = NULL;
kfree(priv);
}
@@ -736,7 +737,6 @@
parport_announce_port(pp);
usb_set_intfdata(intf, pp);
- usb_put_dev(usbdev);
return 0;
probe_abort:
@@ -754,7 +754,6 @@
usb_set_intfdata(intf, NULL);
if (pp) {
priv = pp->private_data;
- priv->usbdev = NULL;
priv->pp = NULL;
dev_dbg(&intf->dev, "parport_remove_port\n");
parport_remove_port(pp);
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index f48a23a..094e812 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1268,6 +1268,11 @@
{
/* don't do anything here: "fault" will set up page table entries */
vma->vm_ops = &mon_bin_vm_ops;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ vma->vm_flags &= ~VM_MAYWRITE;
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = filp->private_data;
mon_bin_vma_open(vma);
diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c
index 04f666e..1673132 100644
--- a/drivers/usb/mtu3/mtu3_dr.c
+++ b/drivers/usb/mtu3/mtu3_dr.c
@@ -41,10 +41,8 @@
static void toggle_opstate(struct ssusb_mtk *ssusb)
{
- if (!ssusb->otg_switch.is_u3_drd) {
- mtu3_setbits(ssusb->mac_base, U3D_DEVICE_CONTROL, DC_SESSION);
- mtu3_setbits(ssusb->mac_base, U3D_POWER_MANAGEMENT, SOFT_CONN);
- }
+ mtu3_setbits(ssusb->mac_base, U3D_DEVICE_CONTROL, DC_SESSION);
+ mtu3_setbits(ssusb->mac_base, U3D_POWER_MANAGEMENT, SOFT_CONN);
}
/* only port0 supports dual-role mode */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index fb806b3..c273eee 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -760,6 +760,9 @@
musb_writew(epio, MUSB_RXCSR, csr);
buffer_aint_mapped:
+ fifo_count = min_t(unsigned int,
+ request->length - request->actual,
+ (unsigned int)fifo_count);
musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
(request->buf + request->actual));
request->actual += fifo_count;
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 4232f1c..1d435e4 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -360,6 +360,7 @@
control_node = of_parse_phandle(np, "ctrl-module", 0);
if (control_node) {
control_pdev = of_find_device_by_node(control_node);
+ of_node_put(control_node);
if (!control_pdev) {
dev_err(&pdev->dev, "Failed to get control device\n");
ret = -EINVAL;
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 661a229..34b9f81 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -268,6 +268,13 @@
return -EPROBE_DEFER;
}
+ nop->vbus_draw = devm_regulator_get_exclusive(dev, "vbus");
+ if (PTR_ERR(nop->vbus_draw) == -ENODEV)
+ nop->vbus_draw = NULL;
+ if (IS_ERR(nop->vbus_draw))
+ return dev_err_probe(dev, PTR_ERR(nop->vbus_draw),
+ "could not get vbus regulator\n");
+
nop->dev = dev;
nop->phy.dev = nop->dev;
nop->phy.label = "nop-xceiv";
diff --git a/drivers/usb/renesas_usbhs/rza.c b/drivers/usb/renesas_usbhs/rza.c
index 24de64e..2d77ede 100644
--- a/drivers/usb/renesas_usbhs/rza.c
+++ b/drivers/usb/renesas_usbhs/rza.c
@@ -23,6 +23,10 @@
extal_clk = of_find_node_by_name(NULL, "extal");
of_property_read_u32(usb_x1_clk, "clock-frequency", &freq_usb);
of_property_read_u32(extal_clk, "clock-frequency", &freq_extal);
+
+ of_node_put(usb_x1_clk);
+ of_node_put(extal_clk);
+
if (freq_usb == 0) {
if (freq_extal == 12000000) {
/* Select 12MHz XTAL */
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 4007fa2..169251e 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -66,6 +66,7 @@
- Libtransistor USB console
- a number of Motorola phones
- Motorola Tetra devices
+ - Nokia mobile phones
- Novatel Wireless GPS receivers
- Siemens USB/MPI adapter.
- ViVOtech ViVOpay USB device.
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index a2a38fc..97a250e 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -97,7 +97,10 @@
u8 mcr;
u8 msr;
u8 lcr;
+
unsigned long quirks;
+ u8 version;
+
unsigned long break_end;
};
@@ -256,8 +259,12 @@
/*
* CH341A buffers data until a full endpoint-size packet (32 bytes)
* has been received unless bit 7 is set.
+ *
+ * At least one device with version 0x27 appears to have this bit
+ * inverted.
*/
- val |= BIT(7);
+ if (priv->version > 0x27)
+ val |= BIT(7);
r = ch341_control_out(dev, CH341_REQ_WRITE_REG,
CH341_REG_DIVISOR << 8 | CH341_REG_PRESCALER,
@@ -271,6 +278,9 @@
* (stop bits, parity and word length). Version 0x30 and above use
* CH341_REG_LCR only and CH341_REG_LCR2 is always set to zero.
*/
+ if (priv->version < 0x30)
+ return 0;
+
r = ch341_control_out(dev, CH341_REQ_WRITE_REG,
CH341_REG_LCR2 << 8 | CH341_REG_LCR, lcr);
if (r)
@@ -323,7 +333,9 @@
r = ch341_control_in(dev, CH341_REQ_READ_VERSION, 0, 0, buffer, size);
if (r < 0)
goto out;
- dev_dbg(&dev->dev, "Chip version: 0x%02x\n", buffer[0]);
+
+ priv->version = buffer[0];
+ dev_dbg(&dev->dev, "Chip version: 0x%02x\n", priv->version);
r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, 0, 0);
if (r < 0)
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 7ac6680..6b5ba61 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -134,6 +134,7 @@
{ USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */
{ USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
+ { USB_DEVICE(0x10C4, 0x8414) }, /* Decagon USB Cable Adapter */
{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
{ USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
@@ -198,6 +199,8 @@
{ USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
{ USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */
{ USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */
+ { USB_DEVICE(0x17A8, 0x0101) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Int Ant) */
+ { USB_DEVICE(0x17A8, 0x0102) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Ext Ant) */
{ USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index b74621d..3bfa395 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1023,6 +1023,9 @@
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
+ /* Belimo Automation devices */
+ { USB_DEVICE(FTDI_VID, BELIMO_ZTH_PID) },
+ { USB_DEVICE(FTDI_VID, BELIMO_ZIP_PID) },
/* ICP DAS I-756xU devices */
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
@@ -1042,6 +1045,8 @@
/* IDS GmbH devices */
{ USB_DEVICE(IDS_VID, IDS_SI31A_PID) },
{ USB_DEVICE(IDS_VID, IDS_CM31A_PID) },
+ /* Omron devices */
+ { USB_DEVICE(OMRON_VID, OMRON_CS1W_CIF31_PID) },
/* U-Blox devices */
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
@@ -1315,8 +1320,7 @@
case 38400: div_value = ftdi_sio_b38400; break;
case 57600: div_value = ftdi_sio_b57600; break;
case 115200: div_value = ftdi_sio_b115200; break;
- } /* baud */
- if (div_value == 0) {
+ default:
dev_dbg(dev, "%s - Baudrate (%d) requested is not supported\n",
__func__, baud);
div_value = ftdi_sio_b9600;
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index d1a9564..31c8cca 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -662,6 +662,12 @@
#define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
/*
+ * Omron corporation (https://www.omron.com)
+ */
+ #define OMRON_VID 0x0590
+ #define OMRON_CS1W_CIF31_PID 0x00b2
+
+/*
* Acton Research Corp.
*/
#define ACTON_VID 0x0647 /* Vendor ID */
@@ -1569,6 +1575,12 @@
#define CHETCO_SEASMART_ANALOG_PID 0xA5AF /* SeaSmart Analog Adapter */
/*
+ * Belimo Automation
+ */
+#define BELIMO_ZTH_PID 0x8050
+#define BELIMO_ZIP_PID 0xC811
+
+/*
* Unjo AB
*/
#define UNJO_VID 0x22B7
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index c327d4c..03bcab3 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -168,6 +168,7 @@
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) },
+ { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) },
{ }
};
@@ -206,6 +207,7 @@
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) },
+ { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) },
{ }
};
diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h
index 52cbc35..9a6f742 100644
--- a/drivers/usb/serial/io_usbvend.h
+++ b/drivers/usb/serial/io_usbvend.h
@@ -212,6 +212,7 @@
//
// Definitions for other product IDs
#define ION_DEVICE_ID_MT4X56USB 0x1403 // OEM device
+#define ION_DEVICE_ID_E5805A 0x1A01 // OEM device (rebranded Edgeport/4)
#define GENERATION_ID_FROM_USB_PRODUCT_ID(ProductId) \
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index b878f4c..537ef27 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -162,6 +162,8 @@
#define NOVATELWIRELESS_PRODUCT_G2 0xA010
#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
+#define UBLOX_VENDOR_ID 0x1546
+
/* AMOI PRODUCTS */
#define AMOI_VENDOR_ID 0x1614
#define AMOI_PRODUCT_H01 0x0800
@@ -240,7 +242,6 @@
#define QUECTEL_PRODUCT_UC15 0x9090
/* These u-blox products use Qualcomm's vendor ID */
#define UBLOX_PRODUCT_R410M 0x90b2
-#define UBLOX_PRODUCT_R6XX 0x90fa
/* These Yuga products use Qualcomm's vendor ID */
#define YUGA_PRODUCT_CLM920_NC5 0x9625
@@ -252,10 +253,14 @@
#define QUECTEL_PRODUCT_EG95 0x0195
#define QUECTEL_PRODUCT_BG96 0x0296
#define QUECTEL_PRODUCT_EP06 0x0306
+#define QUECTEL_PRODUCT_EM05G 0x030a
+#define QUECTEL_PRODUCT_EM060K 0x030b
#define QUECTEL_PRODUCT_EM12 0x0512
#define QUECTEL_PRODUCT_RM500Q 0x0800
+#define QUECTEL_PRODUCT_RM520N 0x0801
#define QUECTEL_PRODUCT_EC200S_CN 0x6002
#define QUECTEL_PRODUCT_EC200T 0x6026
+#define QUECTEL_PRODUCT_RM500K 0x7001
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -432,6 +437,12 @@
#define CINTERION_PRODUCT_CLS8 0x00b0
#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
+#define CINTERION_PRODUCT_MV31_2_MBIM 0x00b8
+#define CINTERION_PRODUCT_MV31_2_RMNET 0x00b9
+#define CINTERION_PRODUCT_MV32_WA 0x00f1
+#define CINTERION_PRODUCT_MV32_WB 0x00f2
+#define CINTERION_PRODUCT_MV32_WA_RMNET 0x00f3
+#define CINTERION_PRODUCT_MV32_WB_RMNET 0x00f4
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
@@ -567,6 +578,13 @@
#define WETELECOM_PRODUCT_6802 0x6802
#define WETELECOM_PRODUCT_WMD300 0x6803
+/* OPPO products */
+#define OPPO_VENDOR_ID 0x22d9
+#define OPPO_PRODUCT_R11 0x276c
+
+/* Sierra Wireless products */
+#define SIERRA_VENDOR_ID 0x1199
+#define SIERRA_PRODUCT_EM9191 0x90d3
/* Device flags */
@@ -1110,8 +1128,16 @@
/* u-blox products using Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
.driver_info = RSVD(1) | RSVD(3) },
- { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R6XX),
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x908b), /* u-blox LARA-R6 00B */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x90fa),
.driver_info = RSVD(3) },
+ /* u-blox products */
+ { USB_DEVICE(UBLOX_VENDOR_ID, 0x1341) }, /* u-blox LARA-L6 */
+ { USB_DEVICE(UBLOX_VENDOR_ID, 0x1342), /* u-blox LARA-L6 (RMNET) */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE(UBLOX_VENDOR_ID, 0x1343), /* u-blox LARA-L6 (ECM) */
+ .driver_info = RSVD(4) },
/* Quectel products using Quectel vendor ID */
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 },
@@ -1125,22 +1151,35 @@
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0203, 0xff), /* BG95-M3 */
+ .driver_info = ZLP },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
+ .driver_info = RSVD(6) | ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) }, /* EM160R-GL */
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0700, 0xff), /* BG95 */
+ .driver_info = RSVD(3) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
.driver_info = ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
@@ -1217,6 +1256,10 @@
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff), /* Telit FD980 */
.driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1057, 0xff), /* Telit FN980 */
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1058, 0xff), /* Telit FN980 (PCIe) */
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1060, 0xff), /* Telit LN920 (rmnet) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1061, 0xff), /* Telit LN920 (MBIM) */
@@ -1233,6 +1276,8 @@
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990 (PCIe) */
+ .driver_info = RSVD(0) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -1267,6 +1312,7 @@
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff), /* Telit LE910Cx (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x1250, 0xff, 0x00, 0x00) }, /* Telit LE910Cx (rmnet) */
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
@@ -1969,6 +2015,18 @@
.driver_info = RSVD(3)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
.driver_info = RSVD(0)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_MBIM, 0xff),
+ .driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_RMNET, 0xff),
+ .driver_info = RSVD(0)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff),
+ .driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA_RMNET, 0xff),
+ .driver_info = RSVD(0) },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff),
+ .driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB_RMNET, 0xff),
+ .driver_info = RSVD(0) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
.driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
@@ -2111,12 +2169,17 @@
.driver_info = RSVD(3) },
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
+ { USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) }, /* Fibocom L610 (ECM/RNDIS mode) */
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
.driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
.driver_info = RSVD(6) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0106, 0xff) }, /* Fibocom MA510 (ECM mode w/ diag intf.) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x010a, 0xff) }, /* Fibocom MA510 (ECM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
@@ -2125,6 +2188,9 @@
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
+ { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 1bbe18f..16118d9 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -106,6 +106,7 @@
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LM920_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LM930_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LM940_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_TD620_PRODUCT_ID) },
{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
@@ -116,6 +117,7 @@
{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530GC_PRODUCT_ID) },
{ USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
{ USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 6097ee8..732f9b1 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -35,6 +35,9 @@
#define ATEN_PRODUCT_UC232B 0x2022
#define ATEN_PRODUCT_ID2 0x2118
+#define IBM_VENDOR_ID 0x04b3
+#define IBM_PRODUCT_ID 0x4016
+
#define IODATA_VENDOR_ID 0x04bb
#define IODATA_PRODUCT_ID 0x0a03
#define IODATA_PRODUCT_ID_RSAQ5 0x0a0e
@@ -132,6 +135,7 @@
#define HP_TD620_PRODUCT_ID 0x0956
#define HP_LD960_PRODUCT_ID 0x0b39
#define HP_LD381_PRODUCT_ID 0x0f7f
+#define HP_LM930_PRODUCT_ID 0x0f9b
#define HP_LCM220_PRODUCT_ID 0x3139
#define HP_LCM960_PRODUCT_ID 0x3239
#define HP_LD220_PRODUCT_ID 0x3524
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index c18bf81..b1e844b 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -166,6 +166,8 @@
{DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */
{DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */
{DEVICE_SWI(0x1199, 0x90d2)}, /* Sierra Wireless EM9191 QDL */
+ {DEVICE_SWI(0x1199, 0xc080)}, /* Sierra Wireless EM7590 QDL */
+ {DEVICE_SWI(0x1199, 0xc081)}, /* Sierra Wireless EM7590 */
{DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
@@ -175,6 +177,7 @@
{DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
{DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
+ {DEVICE_SWI(0x413c, 0x81c2)}, /* Dell Wireless 5811e */
{DEVICE_SWI(0x413c, 0x81cb)}, /* Dell Wireless 5816e QDL */
{DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */
{DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 57fc3c3..018a27d 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -737,7 +737,8 @@
/*
* Need to take susp_lock to make sure port is not already being
- * resumed, but no need to hold it due to initialized
+ * resumed, but no need to hold it due to the tty-port initialized
+ * flag.
*/
spin_lock_irq(&intfdata->susp_lock);
if (--intfdata->open_ports == 0)
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index bd23a7c..4c67478 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -91,6 +91,11 @@
{ USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */
DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
+/* Nokia mobile phone driver */
+#define NOKIA_IDS() \
+ { USB_DEVICE(0x0421, 0x069a) } /* Nokia 130 (RM-1035) */
+DEVICE(nokia, NOKIA_IDS);
+
/* Novatel Wireless GPS driver */
#define NOVATEL_IDS() \
{ USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */
@@ -123,6 +128,7 @@
&vivopay_device,
&moto_modem_device,
&motorola_tetra_device,
+ &nokia_device,
&novatel_gps_device,
&hp4x_device,
&suunto_device,
@@ -140,6 +146,7 @@
VIVOPAY_IDS(),
MOTO_IDS(),
MOTOROLA_TETRA_IDS(),
+ NOKIA_IDS(),
NOVATEL_IDS(),
HP4X_IDS(),
SUUNTO_IDS(),
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 27e3bb5..e8dd460 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -254,7 +254,7 @@
*
* Shut down a USB serial port. Serialized against activate by the
* tport mutex and kept to matching open/close pairs
- * of calls by the initialized flag.
+ * of calls by the tty-port initialized flag.
*
* Not called if tty is console.
*/
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index b2285d5..628a75d 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -435,7 +435,8 @@
/*
* Need to take susp_lock to make sure port is not already being
- * resumed, but no need to hold it due to initialized
+ * resumed, but no need to hold it due to the tty-port initialized
+ * flag.
*/
spin_lock_irq(&intfdata->susp_lock);
if (--intfdata->open_ports == 0)
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index ca3bd58..5576cfa 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -599,9 +599,8 @@
switch (command) {
case WHITEHEAT_GET_DTR_RTS:
info = usb_get_serial_port_data(port);
- memcpy(&info->mcr, command_info->result_buffer,
- sizeof(struct whiteheat_dr_info));
- break;
+ info->mcr = command_info->result_buffer[0];
+ break;
}
}
exit:
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 98c1aa5..c9ce1c2 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -237,36 +237,33 @@
#define memstick_logaddr(logadr1, logadr0) ((((u16)(logadr1)) << 8) | (logadr0))
-struct SD_STATUS {
- u8 Insert:1;
- u8 Ready:1;
- u8 MediaChange:1;
- u8 IsMMC:1;
- u8 HiCapacity:1;
- u8 HiSpeed:1;
- u8 WtP:1;
- u8 Reserved:1;
-};
+/* SD_STATUS bits */
+#define SD_Insert BIT(0)
+#define SD_Ready BIT(1)
+#define SD_MediaChange BIT(2)
+#define SD_IsMMC BIT(3)
+#define SD_HiCapacity BIT(4)
+#define SD_HiSpeed BIT(5)
+#define SD_WtP BIT(6)
+ /* Bit 7 reserved */
-struct MS_STATUS {
- u8 Insert:1;
- u8 Ready:1;
- u8 MediaChange:1;
- u8 IsMSPro:1;
- u8 IsMSPHG:1;
- u8 Reserved1:1;
- u8 WtP:1;
- u8 Reserved2:1;
-};
+/* MS_STATUS bits */
+#define MS_Insert BIT(0)
+#define MS_Ready BIT(1)
+#define MS_MediaChange BIT(2)
+#define MS_IsMSPro BIT(3)
+#define MS_IsMSPHG BIT(4)
+ /* Bit 5 reserved */
+#define MS_WtP BIT(6)
+ /* Bit 7 reserved */
-struct SM_STATUS {
- u8 Insert:1;
- u8 Ready:1;
- u8 MediaChange:1;
- u8 Reserved:3;
- u8 WtP:1;
- u8 IsMS:1;
-};
+/* SM_STATUS bits */
+#define SM_Insert BIT(0)
+#define SM_Ready BIT(1)
+#define SM_MediaChange BIT(2)
+ /* Bits 3-5 reserved */
+#define SM_WtP BIT(6)
+#define SM_IsMS BIT(7)
struct ms_bootblock_cis {
u8 bCistplDEVICE[6]; /* 0 */
@@ -437,9 +434,9 @@
u8 *bbuf;
/* for 6250 code */
- struct SD_STATUS SD_Status;
- struct MS_STATUS MS_Status;
- struct SM_STATUS SM_Status;
+ u8 SD_Status;
+ u8 MS_Status;
+ u8 SM_Status;
/* ----- SD Control Data ---------------- */
/*SD_REGISTER SD_Regs; */
@@ -602,7 +599,7 @@
{
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
- if (info->SD_Status.Insert && info->SD_Status.Ready)
+ if ((info->SD_Status & SD_Insert) && (info->SD_Status & SD_Ready))
return USB_STOR_TRANSPORT_GOOD;
else {
ene_sd_init(us);
@@ -622,7 +619,7 @@
0x0b, 0x00, 0x80, 0x08, 0x00, 0x00,
0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 };
- if (info->SD_Status.WtP)
+ if (info->SD_Status & SD_WtP)
usb_stor_set_xfer_buf(mediaWP, 12, srb);
else
usb_stor_set_xfer_buf(mediaNoWP, 12, srb);
@@ -641,9 +638,9 @@
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
usb_stor_dbg(us, "sd_scsi_read_capacity\n");
- if (info->SD_Status.HiCapacity) {
+ if (info->SD_Status & SD_HiCapacity) {
bl_len = 0x200;
- if (info->SD_Status.IsMMC)
+ if (info->SD_Status & SD_IsMMC)
bl_num = info->HC_C_SIZE-1;
else
bl_num = (info->HC_C_SIZE + 1) * 1024 - 1;
@@ -693,7 +690,7 @@
return USB_STOR_TRANSPORT_ERROR;
}
- if (info->SD_Status.HiCapacity)
+ if (info->SD_Status & SD_HiCapacity)
bnByte = bn;
/* set up the command wrapper */
@@ -733,7 +730,7 @@
return USB_STOR_TRANSPORT_ERROR;
}
- if (info->SD_Status.HiCapacity)
+ if (info->SD_Status & SD_HiCapacity)
bnByte = bn;
/* set up the command wrapper */
@@ -1455,7 +1452,7 @@
struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
/* pr_info("MS_SCSI_Test_Unit_Ready\n"); */
- if (info->MS_Status.Insert && info->MS_Status.Ready) {
+ if ((info->MS_Status & MS_Insert) && (info->MS_Status & MS_Ready)) {
return USB_STOR_TRANSPORT_GOOD;
} else {
ene_ms_init(us);
@@ -1475,7 +1472,7 @@
0x0b, 0x00, 0x80, 0x08, 0x00, 0x00,
0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 };
- if (info->MS_Status.WtP)
+ if (info->MS_Status & MS_WtP)
usb_stor_set_xfer_buf(mediaWP, 12, srb);
else
usb_stor_set_xfer_buf(mediaNoWP, 12, srb);
@@ -1494,7 +1491,7 @@
usb_stor_dbg(us, "ms_scsi_read_capacity\n");
bl_len = 0x200;
- if (info->MS_Status.IsMSPro)
+ if (info->MS_Status & MS_IsMSPro)
bl_num = info->MSP_TotalBlock - 1;
else
bl_num = info->MS_Lib.NumberOfLogBlock * info->MS_Lib.blockSize * 2 - 1;
@@ -1649,7 +1646,7 @@
if (bn > info->bl_num)
return USB_STOR_TRANSPORT_ERROR;
- if (info->MS_Status.IsMSPro) {
+ if (info->MS_Status & MS_IsMSPro) {
result = ene_load_bincode(us, MSP_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "Load MPS RW pattern Fail !!\n");
@@ -1750,7 +1747,7 @@
if (bn > info->bl_num)
return USB_STOR_TRANSPORT_ERROR;
- if (info->MS_Status.IsMSPro) {
+ if (info->MS_Status & MS_IsMSPro) {
result = ene_load_bincode(us, MSP_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
pr_info("Load MSP RW pattern Fail !!\n");
@@ -1858,12 +1855,12 @@
tmpreg = (u16) reg4b;
reg4b = *(u32 *)(&buf[0x14]);
- if (info->SD_Status.HiCapacity && !info->SD_Status.IsMMC)
+ if ((info->SD_Status & SD_HiCapacity) && !(info->SD_Status & SD_IsMMC))
info->HC_C_SIZE = (reg4b >> 8) & 0x3fffff;
info->SD_C_SIZE = ((tmpreg & 0x03) << 10) | (u16)(reg4b >> 22);
info->SD_C_SIZE_MULT = (u8)(reg4b >> 7) & 0x07;
- if (info->SD_Status.HiCapacity && info->SD_Status.IsMMC)
+ if ((info->SD_Status & SD_HiCapacity) && (info->SD_Status & SD_IsMMC))
info->HC_C_SIZE = *(u32 *)(&buf[0x100]);
if (info->SD_READ_BL_LEN > SD_BLOCK_LEN) {
@@ -2075,6 +2072,7 @@
u16 MSP_BlockSize, MSP_UserAreaBlocks;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
u8 *bbuf = info->bbuf;
+ unsigned int s;
printk(KERN_INFO "transport --- ENE_MSInit\n");
@@ -2099,15 +2097,16 @@
return USB_STOR_TRANSPORT_ERROR;
}
/* the same part to test ENE */
- info->MS_Status = *(struct MS_STATUS *) bbuf;
+ info->MS_Status = bbuf[0];
- if (info->MS_Status.Insert && info->MS_Status.Ready) {
- printk(KERN_INFO "Insert = %x\n", info->MS_Status.Insert);
- printk(KERN_INFO "Ready = %x\n", info->MS_Status.Ready);
- printk(KERN_INFO "IsMSPro = %x\n", info->MS_Status.IsMSPro);
- printk(KERN_INFO "IsMSPHG = %x\n", info->MS_Status.IsMSPHG);
- printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP);
- if (info->MS_Status.IsMSPro) {
+ s = info->MS_Status;
+ if ((s & MS_Insert) && (s & MS_Ready)) {
+ printk(KERN_INFO "Insert = %x\n", !!(s & MS_Insert));
+ printk(KERN_INFO "Ready = %x\n", !!(s & MS_Ready));
+ printk(KERN_INFO "IsMSPro = %x\n", !!(s & MS_IsMSPro));
+ printk(KERN_INFO "IsMSPHG = %x\n", !!(s & MS_IsMSPHG));
+ printk(KERN_INFO "WtP= %x\n", !!(s & MS_WtP));
+ if (s & MS_IsMSPro) {
MSP_BlockSize = (bbuf[6] << 8) | bbuf[7];
MSP_UserAreaBlocks = (bbuf[10] << 8) | bbuf[11];
info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks;
@@ -2168,17 +2167,17 @@
return USB_STOR_TRANSPORT_ERROR;
}
- info->SD_Status = *(struct SD_STATUS *) bbuf;
- if (info->SD_Status.Insert && info->SD_Status.Ready) {
- struct SD_STATUS *s = &info->SD_Status;
+ info->SD_Status = bbuf[0];
+ if ((info->SD_Status & SD_Insert) && (info->SD_Status & SD_Ready)) {
+ unsigned int s = info->SD_Status;
ene_get_card_status(us, bbuf);
- usb_stor_dbg(us, "Insert = %x\n", s->Insert);
- usb_stor_dbg(us, "Ready = %x\n", s->Ready);
- usb_stor_dbg(us, "IsMMC = %x\n", s->IsMMC);
- usb_stor_dbg(us, "HiCapacity = %x\n", s->HiCapacity);
- usb_stor_dbg(us, "HiSpeed = %x\n", s->HiSpeed);
- usb_stor_dbg(us, "WtP = %x\n", s->WtP);
+ usb_stor_dbg(us, "Insert = %x\n", !!(s & SD_Insert));
+ usb_stor_dbg(us, "Ready = %x\n", !!(s & SD_Ready));
+ usb_stor_dbg(us, "IsMMC = %x\n", !!(s & SD_IsMMC));
+ usb_stor_dbg(us, "HiCapacity = %x\n", !!(s & SD_HiCapacity));
+ usb_stor_dbg(us, "HiSpeed = %x\n", !!(s & SD_HiSpeed));
+ usb_stor_dbg(us, "WtP = %x\n", !!(s & SD_WtP));
} else {
usb_stor_dbg(us, "SD Card Not Ready --- %x\n", bbuf[0]);
return USB_STOR_TRANSPORT_ERROR;
@@ -2200,14 +2199,14 @@
misc_reg03 = bbuf[0];
if (misc_reg03 & 0x01) {
- if (!info->SD_Status.Ready) {
+ if (!(info->SD_Status & SD_Ready)) {
result = ene_sd_init(us);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
}
}
if (misc_reg03 & 0x02) {
- if (!info->MS_Status.Ready) {
+ if (!(info->MS_Status & MS_Ready)) {
result = ene_ms_init(us);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
@@ -2306,14 +2305,14 @@
/*US_DEBUG(usb_stor_show_command(us, srb)); */
scsi_set_resid(srb, 0);
- if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready)))
+ if (unlikely(!(info->SD_Status & SD_Ready) || (info->MS_Status & MS_Ready)))
result = ene_init(us);
if (result == USB_STOR_XFER_GOOD) {
result = USB_STOR_TRANSPORT_ERROR;
- if (info->SD_Status.Ready)
+ if (info->SD_Status & SD_Ready)
result = sd_scsi_irp(us, srb);
- if (info->MS_Status.Ready)
+ if (info->MS_Status & MS_Ready)
result = ms_scsi_irp(us, srb);
}
return result;
@@ -2377,7 +2376,6 @@
static int ene_ub6250_resume(struct usb_interface *iface)
{
- u8 tmp = 0;
struct us_data *us = usb_get_intfdata(iface);
struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
@@ -2389,17 +2387,16 @@
mutex_unlock(&us->dev_mutex);
info->Power_IsResum = true;
- /*info->SD_Status.Ready = 0; */
- info->SD_Status = *(struct SD_STATUS *)&tmp;
- info->MS_Status = *(struct MS_STATUS *)&tmp;
- info->SM_Status = *(struct SM_STATUS *)&tmp;
+ /* info->SD_Status &= ~SD_Ready; */
+ info->SD_Status = 0;
+ info->MS_Status = 0;
+ info->SM_Status = 0;
return 0;
}
static int ene_ub6250_reset_resume(struct usb_interface *iface)
{
- u8 tmp = 0;
struct us_data *us = usb_get_intfdata(iface);
struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
@@ -2411,10 +2408,10 @@
* the device
*/
info->Power_IsResum = true;
- /*info->SD_Status.Ready = 0; */
- info->SD_Status = *(struct SD_STATUS *)&tmp;
- info->MS_Status = *(struct MS_STATUS *)&tmp;
- info->SM_Status = *(struct SM_STATUS *)&tmp;
+ /* info->SD_Status &= ~SD_Ready; */
+ info->SD_Status = 0;
+ info->MS_Status = 0;
+ info->SM_Status = 0;
return 0;
}
diff --git a/drivers/usb/storage/karma.c b/drivers/usb/storage/karma.c
index 05cec81..38ddfed 100644
--- a/drivers/usb/storage/karma.c
+++ b/drivers/usb/storage/karma.c
@@ -174,24 +174,25 @@
static int rio_karma_init(struct us_data *us)
{
- int ret = 0;
struct karma_data *data = kzalloc(sizeof(struct karma_data), GFP_NOIO);
if (!data)
- goto out;
+ return -ENOMEM;
data->recv = kmalloc(RIO_RECV_LEN, GFP_NOIO);
if (!data->recv) {
kfree(data);
- goto out;
+ return -ENOMEM;
}
us->extra = data;
us->extra_destructor = rio_karma_destructor;
- ret = rio_karma_send_command(RIO_ENTER_STORAGE, us);
- data->in_storage = (ret == 0);
-out:
- return ret;
+ if (rio_karma_send_command(RIO_ENTER_STORAGE, us))
+ return -EIO;
+
+ data->in_storage = 1;
+
+ return 0;
}
static struct scsi_host_template karma_host_template;
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index 3789698..0c42391 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -365,7 +365,7 @@
buf = kmalloc(len, GFP_NOIO);
if (buf == NULL)
- return USB_STOR_TRANSPORT_ERROR;
+ return -ENOMEM;
usb_stor_dbg(us, "addr = 0x%x, len = %d\n", addr, len);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 1a05e3d..20dcbcc 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1275,12 +1275,6 @@
USB_SC_RBC, USB_PR_BULK, NULL,
0 ),
-UNUSUAL_DEV(0x090c, 0x1000, 0x1100, 0x1100,
- "Samsung",
- "Flash Drive FIT",
- USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_MAX_SECTORS_64),
-
/* aeb */
UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
"Feiya",
@@ -2294,6 +2288,13 @@
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
+/* Reported by Witold Lipieta <witold.lipieta@thaumatec.com> */
+UNUSUAL_DEV( 0x1fc9, 0x0117, 0x0100, 0x0100,
+ "NXP Semiconductors",
+ "PN7462AU",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
/* Supplied with some Castlewood ORB removable drives */
UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999,
"Double-H Technology",
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 4051c8c..251778d 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -52,6 +52,13 @@
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
+/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
+UNUSUAL_DEV(0x090c, 0x2000, 0x0000, 0x9999,
+ "Hiksemi",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/*
* Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
* commands in UAS mode. Observed with the 1.28 firmware; are there others?
@@ -62,6 +69,13 @@
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
+/* Reported-by: Tom Hu <huxiaoying@kylinos.cn> */
+UNUSUAL_DEV(0x0b05, 0x1932, 0x0000, 0x9999,
+ "ASUS",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: David Webb <djw@noc.ac.uk> */
UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
"Seagate",
@@ -69,6 +83,13 @@
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_LUNS),
+/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
+UNUSUAL_DEV(0x0bda, 0x9210, 0x0000, 0x9999,
+ "Hiksemi",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
"Initio Corporation",
@@ -111,6 +132,13 @@
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
+UNUSUAL_DEV(0x17ef, 0x3899, 0x0000, 0x9999,
+ "Thinkplus",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
"VIA",
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index e62e5e3..5e293cc 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -88,8 +88,8 @@
case DP_STATUS_CON_UFP_D:
case DP_STATUS_CON_BOTH: /* NOTE: First acting as DP source */
conf |= DP_CONF_UFP_U_AS_UFP_D;
- pin_assign = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo) &
- DP_CAP_UFP_D_PIN_ASSIGN(dp->port->vdo);
+ pin_assign = DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo) &
+ DP_CAP_PIN_ASSIGN_DFP_D(dp->port->vdo);
break;
default:
break;
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index c7d44da..9d3a35b 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -1444,6 +1444,7 @@
partner->usb_pd = 1;
sysfs_notify(&partner_dev->kobj, NULL,
"supports_usb_power_delivery");
+ kobject_uevent(&partner_dev->kobj, KOBJ_CHANGE);
}
put_device(partner_dev);
}
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index b9035c3..a6e5028 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -127,8 +127,11 @@
sw->dev.class = &typec_mux_class;
sw->dev.type = &typec_switch_dev_type;
sw->dev.driver_data = desc->drvdata;
- dev_set_name(&sw->dev, "%s-switch",
- desc->name ? desc->name : dev_name(parent));
+ ret = dev_set_name(&sw->dev, "%s-switch", desc->name ? desc->name : dev_name(parent));
+ if (ret) {
+ put_device(&sw->dev);
+ return ERR_PTR(ret);
+ }
ret = device_add(&sw->dev);
if (ret) {
@@ -331,8 +334,11 @@
mux->dev.class = &typec_mux_class;
mux->dev.type = &typec_mux_dev_type;
mux->dev.driver_data = desc->drvdata;
- dev_set_name(&mux->dev, "%s-mux",
- desc->name ? desc->name : dev_name(parent));
+ ret = dev_set_name(&mux->dev, "%s-mux", desc->name ? desc->name : dev_name(parent));
+ if (ret) {
+ put_device(&mux->dev);
+ return ERR_PTR(ret);
+ }
ret = device_add(&mux->dev);
if (ret) {
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index acdef6f..1276112 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -83,8 +83,6 @@
/*
* Input Output Manager (IOM) PORT STATUS
*/
-#define IOM_PORT_STATUS_OFFSET 0x560
-
#define IOM_PORT_STATUS_ACTIVITY_TYPE_MASK GENMASK(9, 6)
#define IOM_PORT_STATUS_ACTIVITY_TYPE_SHIFT 6
#define IOM_PORT_STATUS_ACTIVITY_TYPE_USB 0x03
@@ -144,6 +142,7 @@
struct pmc_usb_port *port;
struct acpi_device *iom_adev;
void __iomem *iom_base;
+ u32 iom_port_status_offset;
};
static void update_port_status(struct pmc_usb_port *port)
@@ -153,7 +152,8 @@
/* SoC expects the USB Type-C port numbers to start with 0 */
port_num = port->usb3_port - 1;
- port->iom_status = readl(port->pmc->iom_base + IOM_PORT_STATUS_OFFSET +
+ port->iom_status = readl(port->pmc->iom_base +
+ port->pmc->iom_port_status_offset +
port_num * sizeof(u32));
}
@@ -339,13 +339,24 @@
return pmc_usb_command(port, (void *)&req, sizeof(req));
}
-static int pmc_usb_mux_safe_state(struct pmc_usb_port *port)
+static int pmc_usb_mux_safe_state(struct pmc_usb_port *port,
+ struct typec_mux_state *state)
{
u8 msg;
if (IOM_PORT_ACTIVITY_IS(port->iom_status, SAFE_MODE))
return 0;
+ if ((IOM_PORT_ACTIVITY_IS(port->iom_status, DP) ||
+ IOM_PORT_ACTIVITY_IS(port->iom_status, DP_MFD)) &&
+ state->alt && state->alt->svid == USB_TYPEC_DP_SID)
+ return 0;
+
+ if ((IOM_PORT_ACTIVITY_IS(port->iom_status, TBT) ||
+ IOM_PORT_ACTIVITY_IS(port->iom_status, ALT_MODE_TBT_USB)) &&
+ state->alt && state->alt->svid == USB_TYPEC_TBT_SID)
+ return 0;
+
msg = PMC_USB_SAFE_MODE;
msg |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
@@ -413,7 +424,7 @@
return 0;
if (state->mode == TYPEC_STATE_SAFE)
- return pmc_usb_mux_safe_state(port);
+ return pmc_usb_mux_safe_state(port, state);
if (state->mode == TYPEC_STATE_USB)
return pmc_usb_connect(port, port->role);
@@ -541,19 +552,42 @@
static int is_memory(struct acpi_resource *res, void *data)
{
- struct resource r;
+ struct resource_win win = {};
+ struct resource *r = &win.res;
- return !acpi_dev_resource_memory(res, &r);
+ return !(acpi_dev_resource_memory(res, r) ||
+ acpi_dev_resource_address_space(res, &win));
}
+/* IOM ACPI IDs and IOM_PORT_STATUS_OFFSET */
+static const struct acpi_device_id iom_acpi_ids[] = {
+ /* TigerLake */
+ { "INTC1072", 0x560, },
+
+ /* AlderLake */
+ { "INTC1079", 0x160, },
+
+ /* Meteor Lake */
+ { "INTC107A", 0x160, },
+ {}
+};
+
static int pmc_usb_probe_iom(struct pmc_usb *pmc)
{
struct list_head resource_list;
struct resource_entry *rentry;
- struct acpi_device *adev;
+ static const struct acpi_device_id *dev_id;
+ struct acpi_device *adev = NULL;
int ret;
- adev = acpi_dev_get_first_match_dev("INTC1072", NULL, -1);
+ for (dev_id = &iom_acpi_ids[0]; dev_id->id[0]; dev_id++) {
+ if (acpi_dev_present(dev_id->id, NULL, -1)) {
+ pmc->iom_port_status_offset = (u32)dev_id->driver_data;
+ adev = acpi_dev_get_first_match_dev(dev_id->id, NULL, -1);
+ break;
+ }
+ }
+
if (!adev)
return -ENODEV;
diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig
index 557f392..073fd2e 100644
--- a/drivers/usb/typec/tcpm/Kconfig
+++ b/drivers/usb/typec/tcpm/Kconfig
@@ -56,7 +56,6 @@
tristate "Intel WhiskeyCove PMIC USB Type-C PHY driver"
depends on ACPI
depends on MFD_INTEL_PMC_BXT
- depends on INTEL_SOC_PMIC
depends on BXT_WC_PMIC_OPREGION
help
This driver adds support for USB Type-C on Intel Broxton platforms
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index a06da18..49420e2 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -709,7 +709,7 @@
/* Disable chip interrupts before unregistering port */
err = tcpci_write16(chip->tcpci, TCPC_ALERT_MASK, 0);
if (err < 0)
- return err;
+ dev_warn(&client->dev, "Failed to disable irqs (%pe)\n", ERR_PTR(err));
tcpci_unregister_port(chip->tcpci);
diff --git a/drivers/usb/typec/tcpm/tcpci_mt6360.c b/drivers/usb/typec/tcpm/tcpci_mt6360.c
index f1bd9e0..8a952ea 100644
--- a/drivers/usb/typec/tcpm/tcpci_mt6360.c
+++ b/drivers/usb/typec/tcpm/tcpci_mt6360.c
@@ -15,6 +15,9 @@
#include "tcpci.h"
+#define MT6360_REG_PHYCTRL1 0x80
+#define MT6360_REG_PHYCTRL3 0x82
+#define MT6360_REG_PHYCTRL7 0x86
#define MT6360_REG_VCONNCTRL1 0x8C
#define MT6360_REG_MODECTRL2 0x8F
#define MT6360_REG_SWRESET 0xA0
@@ -22,6 +25,8 @@
#define MT6360_REG_DRPCTRL1 0xA2
#define MT6360_REG_DRPCTRL2 0xA3
#define MT6360_REG_I2CTORST 0xBF
+#define MT6360_REG_PHYCTRL11 0xCA
+#define MT6360_REG_RXCTRL1 0xCE
#define MT6360_REG_RXCTRL2 0xCF
#define MT6360_REG_CTDCTRL2 0xEC
@@ -106,6 +111,27 @@
if (ret)
return ret;
+ /* BMC PHY */
+ ret = mt6360_tcpc_write16(regmap, MT6360_REG_PHYCTRL1, 0x3A70);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, MT6360_REG_PHYCTRL3, 0x82);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, MT6360_REG_PHYCTRL7, 0x36);
+ if (ret)
+ return ret;
+
+ ret = mt6360_tcpc_write16(regmap, MT6360_REG_PHYCTRL11, 0x3C60);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, MT6360_REG_RXCTRL1, 0xE8);
+ if (ret)
+ return ret;
+
/* Set shipping mode off, AUTOIDLE on */
return regmap_write(regmap, MT6360_REG_MODECTRL2, 0x7A);
}
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 3bfa800..4cd5c29 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -76,6 +76,10 @@
if (ret)
return ret;
+ ret = ucsi_acknowledge_command(ucsi);
+ if (ret)
+ return ret;
+
switch (error) {
case UCSI_ERROR_INCOMPATIBLE_PARTNER:
return -EOPNOTSUPP;
@@ -511,8 +515,6 @@
num_pdos * sizeof(u32));
if (ret < 0)
dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
- if (ret == 0 && offset == 0)
- dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
return ret;
}
@@ -928,6 +930,8 @@
role == TYPEC_HOST))
goto out_unlock;
+ reinit_completion(&con->complete);
+
command = UCSI_SET_UOR | UCSI_CONNECTOR_NUMBER(con->num);
command |= UCSI_SET_UOR_ROLE(role);
command |= UCSI_SET_UOR_ACCEPT_ROLE_SWAPS;
@@ -935,14 +939,18 @@
if (ret < 0)
goto out_unlock;
+ mutex_unlock(&con->lock);
+
if (!wait_for_completion_timeout(&con->complete,
- msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS)))
- ret = -ETIMEDOUT;
+ msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS)))
+ return -ETIMEDOUT;
+
+ return 0;
out_unlock:
mutex_unlock(&con->lock);
- return ret < 0 ? ret : 0;
+ return ret;
}
static int ucsi_pr_swap(struct typec_port *port, enum typec_role role)
@@ -964,6 +972,8 @@
if (cur_role == role)
goto out_unlock;
+ reinit_completion(&con->complete);
+
command = UCSI_SET_PDR | UCSI_CONNECTOR_NUMBER(con->num);
command |= UCSI_SET_PDR_ROLE(role);
command |= UCSI_SET_PDR_ACCEPT_ROLE_SWAPS;
@@ -971,11 +981,13 @@
if (ret < 0)
goto out_unlock;
+ mutex_unlock(&con->lock);
+
if (!wait_for_completion_timeout(&con->complete,
- msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS))) {
- ret = -ETIMEDOUT;
- goto out_unlock;
- }
+ msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS)))
+ return -ETIMEDOUT;
+
+ mutex_lock(&con->lock);
/* Something has gone wrong while swapping the role */
if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) !=
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index d8d3892..3c6d452 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -393,7 +393,6 @@
err_port:
dev_set_drvdata(&udev->dev, NULL);
- usb_put_dev(udev);
/* we already have busid_priv, just lock busid_lock */
spin_lock(&busid_priv->busid_lock);
@@ -408,6 +407,7 @@
put_busid_priv(busid_priv);
sdev_free:
+ usb_put_dev(udev);
stub_device_free(sdev);
return rc;
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 325c220..5dd41e8 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -138,7 +138,9 @@
req = (struct usb_ctrlrequest *) urb->setup_packet;
config = le16_to_cpu(req->wValue);
+ usb_lock_device(sdev->udev);
err = usb_set_configuration(sdev->udev, config);
+ usb_unlock_device(sdev->udev);
if (err && err != -ENODEV)
dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
config, err);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 65d6f8f..577ff78 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1482,11 +1482,25 @@
return ndev->mvdev.mlx_features;
}
-static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features)
+static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features)
{
+ /* Minimum features to expect */
if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)))
return -EOPNOTSUPP;
+ /* Double check features combination sent down by the driver.
+ * Fail invalid features due to absence of the depended feature.
+ *
+ * Per VIRTIO v1.1 specification, section 5.1.3.1 Feature bit
+ * requirements: "VIRTIO_NET_F_MQ Requires VIRTIO_NET_F_CTRL_VQ".
+ * By failing the invalid features sent down by untrusted drivers,
+ * we're assured the assumption made upon is_index_valid() and
+ * is_ctrl_vq_idx() will not be compromised.
+ */
+ if ((features & (BIT_ULL(VIRTIO_NET_F_MQ) | BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) ==
+ BIT_ULL(VIRTIO_NET_F_MQ))
+ return -EINVAL;
+
return 0;
}
@@ -1544,7 +1558,7 @@
print_features(mvdev, features, true);
- err = verify_min_features(mvdev, features);
+ err = verify_driver_features(mvdev, features);
if (err)
return err;
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index f2ad450..e65c0fa 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -473,11 +473,14 @@
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+ bool old_ready;
spin_lock(&vdpasim->lock);
+ old_ready = vq->ready;
vq->ready = ready;
- if (vq->ready)
+ if (vq->ready && !old_ready) {
vdpasim_queue_ready(vdpasim, idx);
+ }
spin_unlock(&vdpasim->lock);
}
diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h
index 7d92295..74c2e54 100644
--- a/drivers/vfio/mdev/mdev_private.h
+++ b/drivers/vfio/mdev/mdev_private.h
@@ -35,7 +35,10 @@
bool active;
};
-#define to_mdev_device(dev) container_of(dev, struct mdev_device, dev)
+static inline struct mdev_device *to_mdev_device(struct device *dev)
+{
+ return container_of(dev, struct mdev_device, dev);
+}
#define dev_is_mdev(d) ((d)->bus == &mdev_bus_type)
struct mdev_type {
diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c
index 9636a2a..3626c21 100644
--- a/drivers/vfio/platform/vfio_amba.c
+++ b/drivers/vfio/platform/vfio_amba.c
@@ -71,18 +71,13 @@
return ret;
}
-static int vfio_amba_remove(struct amba_device *adev)
+static void vfio_amba_remove(struct amba_device *adev)
{
- struct vfio_platform_device *vdev;
+ struct vfio_platform_device *vdev =
+ vfio_platform_remove_common(&adev->dev);
- vdev = vfio_platform_remove_common(&adev->dev);
- if (vdev) {
- kfree(vdev->name);
- kfree(vdev);
- return 0;
- }
-
- return -EINVAL;
+ kfree(vdev->name);
+ kfree(vdev);
}
static const struct amba_id pl330_ids[] = {
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 2151bc7..90db9d6 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -46,7 +46,6 @@
struct mutex group_lock;
struct cdev group_cdev;
dev_t group_devt;
- wait_queue_head_t release_q;
} vfio;
struct vfio_iommu_driver {
@@ -90,15 +89,6 @@
struct blocking_notifier_head notifier;
};
-struct vfio_device {
- struct kref kref;
- struct device *dev;
- const struct vfio_device_ops *ops;
- struct vfio_group *group;
- struct list_head group_next;
- void *device_data;
-};
-
#ifdef CONFIG_VFIO_NOIOMMU
static bool noiommu __read_mostly;
module_param_named(enable_unsafe_noiommu_mode,
@@ -532,67 +522,17 @@
/**
* Device objects - create, release, get, put, search
*/
-static
-struct vfio_device *vfio_group_create_device(struct vfio_group *group,
- struct device *dev,
- const struct vfio_device_ops *ops,
- void *device_data)
-{
- struct vfio_device *device;
-
- device = kzalloc(sizeof(*device), GFP_KERNEL);
- if (!device)
- return ERR_PTR(-ENOMEM);
-
- kref_init(&device->kref);
- device->dev = dev;
- device->group = group;
- device->ops = ops;
- device->device_data = device_data;
- dev_set_drvdata(dev, device);
-
- /* No need to get group_lock, caller has group reference */
- vfio_group_get(group);
-
- mutex_lock(&group->device_lock);
- list_add(&device->group_next, &group->device_list);
- group->dev_counter++;
- mutex_unlock(&group->device_lock);
-
- return device;
-}
-
-static void vfio_device_release(struct kref *kref)
-{
- struct vfio_device *device = container_of(kref,
- struct vfio_device, kref);
- struct vfio_group *group = device->group;
-
- list_del(&device->group_next);
- group->dev_counter--;
- mutex_unlock(&group->device_lock);
-
- dev_set_drvdata(device->dev, NULL);
-
- kfree(device);
-
- /* vfio_del_group_dev may be waiting for this device */
- wake_up(&vfio.release_q);
-}
-
/* Device reference always implies a group reference */
void vfio_device_put(struct vfio_device *device)
{
- struct vfio_group *group = device->group;
- kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock);
- vfio_group_put(group);
+ if (refcount_dec_and_test(&device->refcount))
+ complete(&device->comp);
}
EXPORT_SYMBOL_GPL(vfio_device_put);
-static void vfio_device_get(struct vfio_device *device)
+static bool vfio_device_try_get(struct vfio_device *device)
{
- vfio_group_get(device->group);
- kref_get(&device->kref);
+ return refcount_inc_not_zero(&device->refcount);
}
static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
@@ -602,8 +542,7 @@
mutex_lock(&group->device_lock);
list_for_each_entry(device, &group->device_list, group_next) {
- if (device->dev == dev) {
- vfio_device_get(device);
+ if (device->dev == dev && vfio_device_try_get(device)) {
mutex_unlock(&group->device_lock);
return device;
}
@@ -801,14 +740,23 @@
/**
* VFIO driver API
*/
-int vfio_add_group_dev(struct device *dev,
- const struct vfio_device_ops *ops, void *device_data)
+void vfio_init_group_dev(struct vfio_device *device, struct device *dev,
+ const struct vfio_device_ops *ops, void *device_data)
{
+ init_completion(&device->comp);
+ device->dev = dev;
+ device->ops = ops;
+ device->device_data = device_data;
+}
+EXPORT_SYMBOL_GPL(vfio_init_group_dev);
+
+int vfio_register_group_dev(struct vfio_device *device)
+{
+ struct vfio_device *existing_device;
struct iommu_group *iommu_group;
struct vfio_group *group;
- struct vfio_device *device;
- iommu_group = iommu_group_get(dev);
+ iommu_group = iommu_group_get(device->dev);
if (!iommu_group)
return -EINVAL;
@@ -827,30 +775,51 @@
iommu_group_put(iommu_group);
}
- device = vfio_group_get_device(group, dev);
- if (device) {
- dev_WARN(dev, "Device already exists on group %d\n",
+ existing_device = vfio_group_get_device(group, device->dev);
+ if (existing_device) {
+ dev_WARN(device->dev, "Device already exists on group %d\n",
iommu_group_id(iommu_group));
- vfio_device_put(device);
+ vfio_device_put(existing_device);
vfio_group_put(group);
return -EBUSY;
}
- device = vfio_group_create_device(group, dev, ops, device_data);
- if (IS_ERR(device)) {
- vfio_group_put(group);
- return PTR_ERR(device);
- }
+ /* Our reference on group is moved to the device */
+ device->group = group;
- /*
- * Drop all but the vfio_device reference. The vfio_device holds
- * a reference to the vfio_group, which holds a reference to the
- * iommu_group.
- */
- vfio_group_put(group);
+ /* Refcounting can't start until the driver calls register */
+ refcount_set(&device->refcount, 1);
+
+ mutex_lock(&group->device_lock);
+ list_add(&device->group_next, &group->device_list);
+ group->dev_counter++;
+ mutex_unlock(&group->device_lock);
return 0;
}
+EXPORT_SYMBOL_GPL(vfio_register_group_dev);
+
+int vfio_add_group_dev(struct device *dev, const struct vfio_device_ops *ops,
+ void *device_data)
+{
+ struct vfio_device *device;
+ int ret;
+
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+
+ vfio_init_group_dev(device, dev, ops, device_data);
+ ret = vfio_register_group_dev(device);
+ if (ret)
+ goto err_kfree;
+ dev_set_drvdata(dev, device);
+ return 0;
+
+err_kfree:
+ kfree(device);
+ return ret;
+}
EXPORT_SYMBOL_GPL(vfio_add_group_dev);
/**
@@ -895,9 +864,8 @@
ret = !strcmp(dev_name(it->dev), buf);
}
- if (ret) {
+ if (ret && vfio_device_try_get(it)) {
device = it;
- vfio_device_get(device);
break;
}
}
@@ -918,21 +886,13 @@
/*
* Decrement the device reference count and wait for the device to be
* removed. Open file descriptors for the device... */
-void *vfio_del_group_dev(struct device *dev)
+void vfio_unregister_group_dev(struct vfio_device *device)
{
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
- struct vfio_device *device = dev_get_drvdata(dev);
struct vfio_group *group = device->group;
- void *device_data = device->device_data;
struct vfio_unbound_dev *unbound;
unsigned int i = 0;
bool interrupted = false;
-
- /*
- * The group exists so long as we have a device reference. Get
- * a group reference and use it to scan for the device going away.
- */
- vfio_group_get(group);
+ long rc;
/*
* When the device is removed from the group, the group suddenly
@@ -945,7 +905,7 @@
*/
unbound = kzalloc(sizeof(*unbound), GFP_KERNEL);
if (unbound) {
- unbound->dev = dev;
+ unbound->dev = device->dev;
mutex_lock(&group->unbound_lock);
list_add(&unbound->unbound_next, &group->unbound_list);
mutex_unlock(&group->unbound_lock);
@@ -953,44 +913,33 @@
WARN_ON(!unbound);
vfio_device_put(device);
-
- /*
- * If the device is still present in the group after the above
- * 'put', then it is in use and we need to request it from the
- * bus driver. The driver may in turn need to request the
- * device from the user. We send the request on an arbitrary
- * interval with counter to allow the driver to take escalating
- * measures to release the device if it has the ability to do so.
- */
- add_wait_queue(&vfio.release_q, &wait);
-
- do {
- device = vfio_group_get_device(group, dev);
- if (!device)
- break;
-
+ rc = try_wait_for_completion(&device->comp);
+ while (rc <= 0) {
if (device->ops->request)
- device->ops->request(device_data, i++);
-
- vfio_device_put(device);
+ device->ops->request(device->device_data, i++);
if (interrupted) {
- wait_woken(&wait, TASK_UNINTERRUPTIBLE, HZ * 10);
+ rc = wait_for_completion_timeout(&device->comp,
+ HZ * 10);
} else {
- wait_woken(&wait, TASK_INTERRUPTIBLE, HZ * 10);
- if (signal_pending(current)) {
+ rc = wait_for_completion_interruptible_timeout(
+ &device->comp, HZ * 10);
+ if (rc < 0) {
interrupted = true;
- dev_warn(dev,
+ dev_warn(device->dev,
"Device is currently in use, task"
" \"%s\" (%d) "
"blocked until device is released",
current->comm, task_pid_nr(current));
}
}
+ }
- } while (1);
+ mutex_lock(&group->device_lock);
+ list_del(&device->group_next);
+ group->dev_counter--;
+ mutex_unlock(&group->device_lock);
- remove_wait_queue(&vfio.release_q, &wait);
/*
* In order to support multiple devices per group, devices can be
* plucked from the group while other devices in the group are still
@@ -1008,8 +957,19 @@
if (list_empty(&group->device_list))
wait_event(group->container_q, !group->container);
+ /* Matches the get in vfio_register_group_dev() */
vfio_group_put(group);
+}
+EXPORT_SYMBOL_GPL(vfio_unregister_group_dev);
+void *vfio_del_group_dev(struct device *dev)
+{
+ struct vfio_device *device = dev_get_drvdata(dev);
+ void *device_data = device->device_data;
+
+ vfio_unregister_group_dev(device);
+ dev_set_drvdata(dev, NULL);
+ kfree(device);
return device_data;
}
EXPORT_SYMBOL_GPL(vfio_del_group_dev);
@@ -1823,6 +1783,7 @@
buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL);
if (!buf) {
kfree(caps->buf);
+ caps->buf = NULL;
caps->size = 0;
return ERR_PTR(-ENOMEM);
}
@@ -2356,7 +2317,6 @@
mutex_init(&vfio.iommu_drivers_lock);
INIT_LIST_HEAD(&vfio.group_list);
INIT_LIST_HEAD(&vfio.iommu_drivers_list);
- init_waitqueue_head(&vfio.release_q);
ret = misc_register(&vfio_dev);
if (ret) {
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index fbd438e..ce50ca9 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -98,6 +98,12 @@
unsigned long *bitmap;
};
+struct vfio_batch {
+ struct page **pages; /* for pin_user_pages_remote */
+ struct page *fallback_page; /* if pages alloc fails */
+ int capacity; /* length of pages array */
+};
+
struct vfio_group {
struct iommu_group *iommu_group;
struct list_head next;
@@ -428,6 +434,31 @@
return 0;
}
+#define VFIO_BATCH_MAX_CAPACITY (PAGE_SIZE / sizeof(struct page *))
+
+static void vfio_batch_init(struct vfio_batch *batch)
+{
+ if (unlikely(disable_hugepages))
+ goto fallback;
+
+ batch->pages = (struct page **) __get_free_page(GFP_KERNEL);
+ if (!batch->pages)
+ goto fallback;
+
+ batch->capacity = VFIO_BATCH_MAX_CAPACITY;
+ return;
+
+fallback:
+ batch->pages = &batch->fallback_page;
+ batch->capacity = 1;
+}
+
+static void vfio_batch_fini(struct vfio_batch *batch)
+{
+ if (batch->capacity == VFIO_BATCH_MAX_CAPACITY)
+ free_page((unsigned long)batch->pages);
+}
+
static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
unsigned long vaddr, unsigned long *pfn,
bool write_fault)
@@ -464,10 +495,14 @@
return ret;
}
-static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
- int prot, unsigned long *pfn)
+/*
+ * Returns the positive number of pfns successfully obtained or a negative
+ * error code.
+ */
+static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
+ long npages, int prot, unsigned long *pfn,
+ struct page **pages)
{
- struct page *page[1];
struct vm_area_struct *vma;
unsigned int flags = 0;
int ret;
@@ -476,11 +511,22 @@
flags |= FOLL_WRITE;
mmap_read_lock(mm);
- ret = pin_user_pages_remote(mm, vaddr, 1, flags | FOLL_LONGTERM,
- page, NULL, NULL);
- if (ret == 1) {
- *pfn = page_to_pfn(page[0]);
- ret = 0;
+ ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM,
+ pages, NULL, NULL);
+ if (ret > 0) {
+ int i;
+
+ /*
+ * The zero page is always resident, we don't need to pin it
+ * and it falls into our invalid/reserved test so we don't
+ * unpin in put_pfn(). Unpin all zero pages in the batch here.
+ */
+ for (i = 0 ; i < ret; i++) {
+ if (unlikely(is_zero_pfn(page_to_pfn(pages[i]))))
+ unpin_user_page(pages[i]);
+ }
+
+ *pfn = page_to_pfn(pages[0]);
goto done;
}
@@ -494,8 +540,12 @@
if (ret == -EAGAIN)
goto retry;
- if (!ret && !is_invalid_reserved_pfn(*pfn))
- ret = -EFAULT;
+ if (!ret) {
+ if (is_invalid_reserved_pfn(*pfn))
+ ret = 1;
+ else
+ ret = -EFAULT;
+ }
}
done:
mmap_read_unlock(mm);
@@ -509,7 +559,7 @@
*/
static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
long npage, unsigned long *pfn_base,
- unsigned long limit)
+ unsigned long limit, struct vfio_batch *batch)
{
unsigned long pfn = 0;
long ret, pinned = 0, lock_acct = 0;
@@ -520,8 +570,9 @@
if (!current->mm)
return -ENODEV;
- ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base);
- if (ret)
+ ret = vaddr_get_pfns(current->mm, vaddr, 1, dma->prot, pfn_base,
+ batch->pages);
+ if (ret < 0)
return ret;
pinned++;
@@ -547,8 +598,9 @@
/* Lock all the consecutive pages from pfn_base */
for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage;
pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
- ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn);
- if (ret)
+ ret = vaddr_get_pfns(current->mm, vaddr, 1, dma->prot, &pfn,
+ batch->pages);
+ if (ret < 0)
break;
if (pfn != *pfn_base + pinned ||
@@ -574,7 +626,7 @@
ret = vfio_lock_acct(dma, lock_acct, false);
unpin_out:
- if (ret) {
+ if (ret < 0) {
if (!rsvd) {
for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
put_pfn(pfn, dma->prot);
@@ -610,6 +662,7 @@
static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
unsigned long *pfn_base, bool do_accounting)
{
+ struct page *pages[1];
struct mm_struct *mm;
int ret;
@@ -617,8 +670,13 @@
if (!mm)
return -ENODEV;
- ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base);
- if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
+ ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages);
+ if (ret != 1)
+ goto out;
+
+ ret = 0;
+
+ if (do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
ret = vfio_lock_acct(dma, 1, true);
if (ret) {
put_pfn(*pfn_base, dma->prot);
@@ -630,6 +688,7 @@
}
}
+out:
mmput(mm);
return ret;
}
@@ -1263,15 +1322,19 @@
{
dma_addr_t iova = dma->iova;
unsigned long vaddr = dma->vaddr;
+ struct vfio_batch batch;
size_t size = map_size;
long npage;
unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
int ret = 0;
+ vfio_batch_init(&batch);
+
while (size) {
/* Pin a contiguous chunk of memory */
npage = vfio_pin_pages_remote(dma, vaddr + dma->size,
- size >> PAGE_SHIFT, &pfn, limit);
+ size >> PAGE_SHIFT, &pfn, limit,
+ &batch);
if (npage <= 0) {
WARN_ON(!npage);
ret = (int)npage;
@@ -1291,6 +1354,7 @@
dma->size += npage << PAGE_SHIFT;
}
+ vfio_batch_fini(&batch);
dma->iommu_mapped = true;
if (ret)
@@ -1449,6 +1513,7 @@
static int vfio_iommu_replay(struct vfio_iommu *iommu,
struct vfio_domain *domain)
{
+ struct vfio_batch batch;
struct vfio_domain *d = NULL;
struct rb_node *n;
unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
@@ -1459,6 +1524,8 @@
d = list_first_entry(&iommu->domain_list,
struct vfio_domain, next);
+ vfio_batch_init(&batch);
+
n = rb_first(&iommu->dma_list);
for (; n; n = rb_next(n)) {
@@ -1506,7 +1573,8 @@
npage = vfio_pin_pages_remote(dma, vaddr,
n >> PAGE_SHIFT,
- &pfn, limit);
+ &pfn, limit,
+ &batch);
if (npage <= 0) {
WARN_ON(!npage);
ret = (int)npage;
@@ -1539,6 +1607,7 @@
dma->iommu_mapped = true;
}
+ vfio_batch_fini(&batch);
return 0;
unwind:
@@ -1579,6 +1648,7 @@
}
}
+ vfio_batch_fini(&batch);
return ret;
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index da02c3e..5beb207 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -472,6 +472,7 @@
goto signal_used;
msghdr->msg_control = &ctl;
+ msghdr->msg_controllen = sizeof(ctl);
err = sock->ops->sendmsg(sock, msghdr, 0);
if (unlikely(err < 0)) {
vq_err(&nvq->vq, "Fail to batch sending packets\n");
@@ -1449,13 +1450,9 @@
return ERR_PTR(r);
}
-static struct ptr_ring *get_tap_ptr_ring(int fd)
+static struct ptr_ring *get_tap_ptr_ring(struct file *file)
{
struct ptr_ring *ring;
- struct file *file = fget(fd);
-
- if (!file)
- return NULL;
ring = tun_get_tx_ring(file);
if (!IS_ERR(ring))
goto out;
@@ -1464,7 +1461,6 @@
goto out;
ring = NULL;
out:
- fput(file);
return ring;
}
@@ -1551,8 +1547,12 @@
r = vhost_net_enable_vq(n, vq);
if (r)
goto err_used;
- if (index == VHOST_NET_VQ_RX)
- nvq->rx_ring = get_tap_ptr_ring(fd);
+ if (index == VHOST_NET_VQ_RX) {
+ if (sock)
+ nvq->rx_ring = get_tap_ptr_ring(sock->file);
+ else
+ nvq->rx_ring = NULL;
+ }
oldubufs = nvq->ubufs;
nvq->ubufs = ubufs;
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index e4d6000..04578aa 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -97,8 +97,11 @@
return;
irq = ops->get_vq_irq(vdpa, qid);
+ if (irq < 0)
+ return;
+
irq_bypass_unregister_producer(&vq->call_ctx.producer);
- if (!vq->call_ctx.ctx || irq < 0)
+ if (!vq->call_ctx.ctx)
return;
vq->call_ctx.producer.token = vq->call_ctx.ctx;
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 0bd7e64..5a0340c 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -274,7 +274,7 @@
int (*copy)(const struct vringh *vrh,
void *dst, const void *src, size_t len))
{
- int err, count = 0, up_next, desc_max;
+ int err, count = 0, indirect_count = 0, up_next, desc_max;
struct vring_desc desc, *descs;
struct vringh_range range = { -1ULL, 0 }, slowrange;
bool slow = false;
@@ -331,7 +331,12 @@
continue;
}
- if (count++ == vrh->vring.num) {
+ if (up_next == -1)
+ count++;
+ else
+ indirect_count++;
+
+ if (count > vrh->vring.num || indirect_count > desc_max) {
vringh_bad("Descriptor loop in %p", descs);
err = -ELOOP;
goto fail;
@@ -393,6 +398,7 @@
i = return_from_indirect(vrh, &up_next,
&descs, &desc_max);
slow = false;
+ indirect_count = 0;
} else
break;
}
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 5d2d6ce..b015361 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -359,7 +359,7 @@
return NULL;
}
- pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
+ pkt->buf = kvmalloc(pkt->len, GFP_KERNEL);
if (!pkt->buf) {
kfree(pkt);
return NULL;
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
index 40496e9..f304163 100644
--- a/drivers/video/console/sticon.c
+++ b/drivers/video/console/sticon.c
@@ -46,6 +46,7 @@
#include <linux/slab.h>
#include <linux/font.h>
#include <linux/crc32.h>
+#include <linux/fb.h>
#include <asm/io.h>
@@ -392,7 +393,9 @@
for (i = 0; i < MAX_NR_CONSOLES; i++)
font_data[i] = STI_DEF_FONT;
- pr_info("sticon: Initializing STI text console.\n");
+ pr_info("sticon: Initializing STI text console on %s at [%s]\n",
+ sticon_sti->sti_data->inq_outptr.dev_name,
+ sticon_sti->pa_path);
console_lock();
err = do_take_over_console(&sti_con, 0, MAX_NR_CONSOLES - 1,
PAGE0->mem_cons.cl_class != CL_DUPLEX);
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index 6a26a36..68fb531 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -30,10 +30,11 @@
#include <asm/pdc.h>
#include <asm/cacheflush.h>
#include <asm/grfioctl.h>
+#include <asm/fb.h>
#include "../fbdev/sticore.h"
-#define STI_DRIVERVERSION "Version 0.9b"
+#define STI_DRIVERVERSION "Version 0.9c"
static struct sti_struct *default_sti __read_mostly;
@@ -502,7 +503,7 @@
if (!fbfont)
return NULL;
- pr_info("STI selected %dx%d framebuffer font %s for sticon\n",
+ pr_info(" using %ux%u framebuffer font %s\n",
fbfont->width, fbfont->height, fbfont->name);
bpc = ((fbfont->width+7)/8) * fbfont->height;
@@ -946,6 +947,7 @@
static void sticore_check_for_default_sti(struct sti_struct *sti, char *path)
{
+ pr_info(" located at [%s]\n", sti->pa_path);
if (strcmp (path, default_sti_path) == 0)
default_sti = sti;
}
@@ -957,7 +959,6 @@
*/
static int __init sticore_pa_init(struct parisc_device *dev)
{
- char pa_path[21];
struct sti_struct *sti = NULL;
int hpa = dev->hpa.start;
@@ -970,8 +971,8 @@
if (!sti)
return 1;
- print_pa_hwpath(dev, pa_path);
- sticore_check_for_default_sti(sti, pa_path);
+ print_pa_hwpath(dev, sti->pa_path);
+ sticore_check_for_default_sti(sti, sti->pa_path);
return 0;
}
@@ -1007,9 +1008,8 @@
sti = sti_try_rom_generic(rom_base, fb_base, pd);
if (sti) {
- char pa_path[30];
- print_pci_hwpath(pd, pa_path);
- sticore_check_for_default_sti(sti, pa_path);
+ print_pci_hwpath(pd, sti->pa_path);
+ sticore_check_for_default_sti(sti, sti->pa_path);
}
if (!sti) {
@@ -1127,6 +1127,24 @@
return ret;
}
+#if defined(CONFIG_FB_STI)
+/* check if given fb_info is the primary device */
+int fb_is_primary_device(struct fb_info *info)
+{
+ struct sti_struct *sti;
+
+ sti = sti_get_rom(0);
+
+ /* if no built-in graphics card found, allow any fb driver as default */
+ if (!sti)
+ return true;
+
+ /* return true if it's the default built-in framebuffer driver */
+ return (sti->info == info);
+}
+EXPORT_SYMBOL(fb_is_primary_device);
+#endif
+
MODULE_AUTHOR("Philipp Rumpf, Helge Deller, Thomas Bogendoerfer");
MODULE_DESCRIPTION("Core STI driver for HP's NGLE series graphics cards in HP PARISC machines");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index b7682de..6252cd5 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -711,16 +711,18 @@
return -ENODEV;
panel = of_graph_get_remote_port_parent(endpoint);
- if (!panel)
- return -ENODEV;
+ if (!panel) {
+ err = -ENODEV;
+ goto out_endpoint_put;
+ }
err = clcdfb_of_get_backlight(&fb->dev->dev, fb->panel);
if (err)
- return err;
+ goto out_panel_put;
err = clcdfb_of_get_mode(&fb->dev->dev, panel, fb->panel);
if (err)
- return err;
+ goto out_panel_put;
err = of_property_read_u32(fb->dev->dev.of_node, "max-memory-bandwidth",
&max_bandwidth);
@@ -749,11 +751,21 @@
if (of_property_read_u32_array(endpoint,
"arm,pl11x,tft-r0g0b0-pads",
- tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) != 0)
- return -ENOENT;
+ tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) != 0) {
+ err = -ENOENT;
+ goto out_panel_put;
+ }
+
+ of_node_put(panel);
+ of_node_put(endpoint);
return clcdfb_of_init_tft_panel(fb, tft_r0b0g0[0],
tft_r0b0g0[1], tft_r0b0g0[2]);
+out_panel_put:
+ of_node_put(panel);
+out_endpoint_put:
+ of_node_put(endpoint);
+ return err;
}
static int clcdfb_of_vram_setup(struct clcd_fb *fb)
@@ -771,12 +783,15 @@
return -ENODEV;
fb->fb.screen_base = of_iomap(memory, 0);
- if (!fb->fb.screen_base)
+ if (!fb->fb.screen_base) {
+ of_node_put(memory);
return -ENOMEM;
+ }
fb->fb.fix.smem_start = of_translate_address(memory,
of_get_address(memory, 0, &size, NULL));
fb->fb.fix.smem_len = size;
+ of_node_put(memory);
return 0;
}
@@ -925,7 +940,7 @@
return ret;
}
-static int clcdfb_remove(struct amba_device *dev)
+static void clcdfb_remove(struct amba_device *dev)
{
struct clcd_fb *fb = amba_get_drvdata(dev);
@@ -942,8 +957,6 @@
kfree(fb);
amba_release_regions(dev);
-
- return 0;
}
static const struct amba_id clcdfb_id_table[] = {
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index edf169d..8d092b1 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -778,7 +778,12 @@
return -EINVAL;
}
- ark_set_pixclock(info, (hdiv * info->var.pixclock) / hmul);
+ value = (hdiv * info->var.pixclock) / hmul;
+ if (!value) {
+ fb_dbg(info, "invalid pixclock\n");
+ value = 1;
+ }
+ ark_set_pixclock(info, value);
svga_set_timings(par->state.vgabase, &ark_timing_regs, &(info->var), hmul, hdiv,
(info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1,
(info->var.vmode & FB_VMODE_INTERLACED) ? 2 : 1,
@@ -789,6 +794,8 @@
value = ((value * hmul / hdiv) / 8) - 5;
vga_wcrt(par->state.vgabase, 0x42, (value + 1) / 2);
+ if (screen_size > info->screen_size)
+ screen_size = info->screen_size;
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c
index f253daa..a7a1739 100644
--- a/drivers/video/fbdev/atafb.c
+++ b/drivers/video/fbdev/atafb.c
@@ -1691,9 +1691,9 @@
((blue & 0xfc00) >> 8));
if (regno < 16) {
shifter_tt.color_reg[regno] =
- (((red & 0xe000) >> 13) | ((red & 0x1000) >> 12) << 8) |
- (((green & 0xe000) >> 13) | ((green & 0x1000) >> 12) << 4) |
- ((blue & 0xe000) >> 13) | ((blue & 0x1000) >> 12);
+ ((((red & 0xe000) >> 13) | ((red & 0x1000) >> 12)) << 8) |
+ ((((green & 0xe000) >> 13) | ((green & 0x1000) >> 12)) << 4) |
+ ((blue & 0xe000) >> 13) | ((blue & 0x1000) >> 12);
((u32 *)info->pseudo_palette)[regno] = ((red & 0xf800) |
((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11));
@@ -1979,9 +1979,9 @@
green >>= 12;
if (ATARIHW_PRESENT(EXTD_SHIFTER))
shifter_tt.color_reg[regno] =
- (((red & 0xe) >> 1) | ((red & 1) << 3) << 8) |
- (((green & 0xe) >> 1) | ((green & 1) << 3) << 4) |
- ((blue & 0xe) >> 1) | ((blue & 1) << 3);
+ ((((red & 0xe) >> 1) | ((red & 1) << 3)) << 8) |
+ ((((green & 0xe) >> 1) | ((green & 1) << 3)) << 4) |
+ ((blue & 0xe) >> 1) | ((blue & 1) << 3);
else
shifter_tt.color_reg[regno] =
((red & 0xe) << 7) |
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index 355b612..1fc8de4 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -1062,15 +1062,16 @@
INIT_LIST_HEAD(&info->modelist);
- if (pdev->dev.of_node) {
- ret = atmel_lcdfb_of_init(sinfo);
- if (ret)
- goto free_info;
- } else {
+ if (!pdev->dev.of_node) {
dev_err(dev, "cannot get default configuration\n");
goto free_info;
}
+ ret = atmel_lcdfb_of_init(sinfo);
+ if (ret)
+ goto free_info;
+
+ ret = -ENODEV;
if (!sinfo->config)
goto free_info;
diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
index 393894a..2b00a9d 100644
--- a/drivers/video/fbdev/chipsfb.c
+++ b/drivers/video/fbdev/chipsfb.c
@@ -430,6 +430,7 @@
err_release_fb:
framebuffer_release(p);
err_disable:
+ pci_disable_device(dp);
err_out:
return rc;
}
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index 15a9ee7..b4980bc 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -469,7 +469,7 @@
return 0;
}
-static int cirrusfb_check_pixclock(const struct fb_var_screeninfo *var,
+static int cirrusfb_check_pixclock(struct fb_var_screeninfo *var,
struct fb_info *info)
{
long freq;
@@ -478,9 +478,7 @@
unsigned maxclockidx = var->bits_per_pixel >> 3;
/* convert from ps to kHz */
- freq = PICOS2KHZ(var->pixclock);
-
- dev_dbg(info->device, "desired pixclock: %ld kHz\n", freq);
+ freq = PICOS2KHZ(var->pixclock ? : 1);
maxclock = cirrusfb_board_info[cinfo->btype].maxclock[maxclockidx];
cinfo->multiplexing = 0;
@@ -488,11 +486,13 @@
/* If the frequency is greater than we can support, we might be able
* to use multiplexing for the video mode */
if (freq > maxclock) {
- dev_err(info->device,
- "Frequency greater than maxclock (%ld kHz)\n",
- maxclock);
- return -EINVAL;
+ var->pixclock = KHZ2PICOS(maxclock);
+
+ while ((freq = PICOS2KHZ(var->pixclock)) > maxclock)
+ var->pixclock++;
}
+ dev_dbg(info->device, "desired pixclock: %ld kHz\n", freq);
+
/*
* Additional constraint: 8bpp uses DAC clock doubling to allow maximum
* pixel clock
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index 2df56bd..bd59e7b 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -64,10 +64,12 @@
#undef in_le32
#undef out_le32
#define in_8(addr) 0
-#define out_8(addr, val)
+#define out_8(addr, val) (void)(val)
#define in_le32(addr) 0
-#define out_le32(addr, val)
+#define out_le32(addr, val) (void)(val)
+#ifndef pgprot_cached_wthru
#define pgprot_cached_wthru(prot) (prot)
+#endif
#else
static void invalid_vram_cache(void __force *addr)
{
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index f102519..2618d3b 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -123,8 +123,8 @@
enums. */
static int logo_shown = FBCON_LOGO_CANSHOW;
/* console mappings */
-static int first_fb_vc;
-static int last_fb_vc = MAX_NR_CONSOLES - 1;
+static unsigned int first_fb_vc;
+static unsigned int last_fb_vc = MAX_NR_CONSOLES - 1;
static int fbcon_is_default = 1;
static int primary_device = -1;
static int fbcon_has_console_bind;
@@ -472,10 +472,12 @@
options += 3;
if (*options)
first_fb_vc = simple_strtoul(options, &options, 10) - 1;
- if (first_fb_vc < 0)
+ if (first_fb_vc >= MAX_NR_CONSOLES)
first_fb_vc = 0;
if (*options++ == '-')
last_fb_vc = simple_strtoul(options, &options, 10) - 1;
+ if (last_fb_vc < first_fb_vc || last_fb_vc >= MAX_NR_CONSOLES)
+ last_fb_vc = MAX_NR_CONSOLES - 1;
fbcon_is_default = 0;
continue;
}
@@ -1717,8 +1719,6 @@
case SM_UP:
if (count > vc->vc_rows) /* Maximum realistic size */
count = vc->vc_rows;
- if (logo_shown >= 0)
- goto redraw_up;
switch (fb_scrollmode(p)) {
case SCROLL_MOVE:
fbcon_redraw_blit(vc, info, p, t, b - t - count,
@@ -1807,8 +1807,6 @@
case SM_DOWN:
if (count > vc->vc_rows) /* Maximum realistic size */
count = vc->vc_rows;
- if (logo_shown >= 0)
- goto redraw_down;
switch (fb_scrollmode(p)) {
case SCROLL_MOVE:
fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
@@ -2510,6 +2508,11 @@
if (charcount != 256 && charcount != 512)
return -EINVAL;
+ /* font bigger than screen resolution ? */
+ if (w > FBCON_SWAP(info->var.rotate, info->var.xres, info->var.yres) ||
+ h > FBCON_SWAP(info->var.rotate, info->var.yres, info->var.xres))
+ return -EINVAL;
+
/* Make sure drawing engine can handle the font */
if (!(info->pixmap.blit_x & (1 << (font->width - 1))) ||
!(info->pixmap.blit_y & (1 << (font->height - 1))))
@@ -2771,6 +2774,34 @@
}
EXPORT_SYMBOL(fbcon_update_vcs);
+/* let fbcon check if it supports a new screen resolution */
+int fbcon_modechange_possible(struct fb_info *info, struct fb_var_screeninfo *var)
+{
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct vc_data *vc;
+ unsigned int i;
+
+ WARN_CONSOLE_UNLOCKED();
+
+ if (!ops)
+ return 0;
+
+ /* prevent setting a screen size which is smaller than font size */
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
+ vc = vc_cons[i].d;
+ if (!vc || vc->vc_mode != KD_TEXT ||
+ registered_fb[con2fb_map[i]] != info)
+ continue;
+
+ if (vc->vc_font.width > FBCON_SWAP(var->rotate, var->xres, var->yres) ||
+ vc->vc_font.height > FBCON_SWAP(var->rotate, var->yres, var->xres))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fbcon_modechange_possible);
+
int fbcon_mode_deleted(struct fb_info *info,
struct fb_videomode *mode)
{
@@ -3300,6 +3331,9 @@
console_lock();
+ deferred_takeover = false;
+ logo_shown = FBCON_LOGO_DONTSHOW;
+
for_each_registered_fb(i)
fbcon_fb_registered(registered_fb[i]);
@@ -3317,8 +3351,6 @@
pr_info("fbcon: Taking over console\n");
dummycon_unregister_output_notifier(&fbcon_output_nb);
- deferred_takeover = false;
- logo_shown = FBCON_LOGO_DONTSHOW;
/* We may get called in atomic context */
schedule_work(&fbcon_deferred_takeover_work);
diff --git a/drivers/video/fbdev/core/fbcvt.c b/drivers/video/fbdev/core/fbcvt.c
index 55d2bd0..6484346 100644
--- a/drivers/video/fbdev/core/fbcvt.c
+++ b/drivers/video/fbdev/core/fbcvt.c
@@ -214,9 +214,11 @@
static void fb_cvt_print_name(struct fb_cvt_data *cvt)
{
u32 pixcount, pixcount_mod;
- int cnt = 255, offset = 0, read = 0;
- u8 *buf = kzalloc(256, GFP_KERNEL);
+ int size = 256;
+ int off = 0;
+ u8 *buf;
+ buf = kzalloc(size, GFP_KERNEL);
if (!buf)
return;
@@ -224,43 +226,30 @@
pixcount_mod = (cvt->xres * (cvt->yres/cvt->interlace)) % 1000000;
pixcount_mod /= 1000;
- read = snprintf(buf+offset, cnt, "fbcvt: %dx%d@%d: CVT Name - ",
- cvt->xres, cvt->yres, cvt->refresh);
- offset += read;
- cnt -= read;
+ off += scnprintf(buf + off, size - off, "fbcvt: %dx%d@%d: CVT Name - ",
+ cvt->xres, cvt->yres, cvt->refresh);
- if (cvt->status)
- snprintf(buf+offset, cnt, "Not a CVT standard - %d.%03d Mega "
- "Pixel Image\n", pixcount, pixcount_mod);
- else {
- if (pixcount) {
- read = snprintf(buf+offset, cnt, "%d", pixcount);
- cnt -= read;
- offset += read;
- }
+ if (cvt->status) {
+ off += scnprintf(buf + off, size - off,
+ "Not a CVT standard - %d.%03d Mega Pixel Image\n",
+ pixcount, pixcount_mod);
+ } else {
+ if (pixcount)
+ off += scnprintf(buf + off, size - off, "%d", pixcount);
- read = snprintf(buf+offset, cnt, ".%03dM", pixcount_mod);
- cnt -= read;
- offset += read;
+ off += scnprintf(buf + off, size - off, ".%03dM", pixcount_mod);
if (cvt->aspect_ratio == 0)
- read = snprintf(buf+offset, cnt, "3");
+ off += scnprintf(buf + off, size - off, "3");
else if (cvt->aspect_ratio == 3)
- read = snprintf(buf+offset, cnt, "4");
+ off += scnprintf(buf + off, size - off, "4");
else if (cvt->aspect_ratio == 1 || cvt->aspect_ratio == 4)
- read = snprintf(buf+offset, cnt, "9");
+ off += scnprintf(buf + off, size - off, "9");
else if (cvt->aspect_ratio == 2)
- read = snprintf(buf+offset, cnt, "A");
- else
- read = 0;
- cnt -= read;
- offset += read;
+ off += scnprintf(buf + off, size - off, "A");
- if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK) {
- read = snprintf(buf+offset, cnt, "-R");
- cnt -= read;
- offset += read;
- }
+ if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK)
+ off += scnprintf(buf + off, size - off, "-R");
}
printk(KERN_INFO "%s\n", buf);
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 00939ca..d787a34 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -513,7 +513,7 @@
while (n && (n * (logo->width + 8) - 8 > xres))
--n;
- image.dx = (xres - n * (logo->width + 8) - 8) / 2;
+ image.dx = (xres - (n * (logo->width + 8) - 8)) / 2;
image.dy = y ?: (yres - logo->height) / 2;
} else {
image.dx = 0;
@@ -1019,6 +1019,16 @@
if (ret)
return ret;
+ /* verify that virtual resolution >= physical resolution */
+ if (var->xres_virtual < var->xres ||
+ var->yres_virtual < var->yres) {
+ pr_warn("WARNING: fbcon: Driver '%s' missed to adjust virtual screen size (%ux%u vs. %ux%u)\n",
+ info->fix.id,
+ var->xres_virtual, var->yres_virtual,
+ var->xres, var->yres);
+ return -EINVAL;
+ }
+
if ((var->activate & FB_ACTIVATE_MASK) != FB_ACTIVATE_NOW)
return 0;
@@ -1109,7 +1119,9 @@
return -EFAULT;
console_lock();
lock_fb_info(info);
- ret = fb_set_var(info, &var);
+ ret = fbcon_modechange_possible(info, &var);
+ if (!ret)
+ ret = fb_set_var(info, &var);
if (!ret)
fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL);
unlock_fb_info(info);
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 3c309ab..40baa79 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -1008,7 +1008,6 @@
struct pci_dev *pdev = NULL;
void __iomem *fb_virt;
int gen2vm = efi_enabled(EFI_BOOT);
- resource_size_t pot_start, pot_end;
phys_addr_t paddr;
int ret;
@@ -1059,23 +1058,7 @@
dio_fb_size =
screen_width * screen_height * screen_depth / 8;
- if (gen2vm) {
- pot_start = 0;
- pot_end = -1;
- } else {
- if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
- pci_resource_len(pdev, 0) < screen_fb_size) {
- pr_err("Resource not available or (0x%lx < 0x%lx)\n",
- (unsigned long) pci_resource_len(pdev, 0),
- (unsigned long) screen_fb_size);
- goto err1;
- }
-
- pot_end = pci_resource_end(pdev, 0);
- pot_start = pot_end - screen_fb_size + 1;
- }
-
- ret = vmbus_allocate_mmio(&par->mem, hdev, pot_start, pot_end,
+ ret = vmbus_allocate_mmio(&par->mem, hdev, 0, -1,
screen_fb_size, 0x100000, true);
if (ret != 0) {
pr_err("Unable to allocate framebuffer memory\n");
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index 52cce0d..8fb4e01 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -400,7 +400,7 @@
u32 xres, right, hslen, left, xtotal;
u32 yres, lower, vslen, upper, ytotal;
u32 vxres, xoffset, vyres, yoffset;
- u32 bpp, base, dacspeed24, mem;
+ u32 bpp, base, dacspeed24, mem, freq;
u8 r7;
int i;
@@ -643,7 +643,12 @@
par->atc[VGA_ATC_OVERSCAN] = 0;
/* Calculate VCLK that most closely matches the requested dot clock */
- i740_calc_vclk((((u32)1e9) / var->pixclock) * (u32)(1e3), par);
+ freq = (((u32)1e9) / var->pixclock) * (u32)(1e3);
+ if (freq < I740_RFREQ_FIX) {
+ fb_dbg(info, "invalid pixclock\n");
+ freq = I740_RFREQ_FIX;
+ }
+ i740_calc_vclk(freq, par);
/* Since we program the clocks ourselves, always use VCLK2. */
par->misc |= 0x0C;
@@ -657,6 +662,9 @@
static int i740fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
+ if (!var->pixclock)
+ return -EINVAL;
+
switch (var->bits_per_pixel) {
case 8:
var->red.offset = var->green.offset = var->blue.offset = 0;
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 570439b..daaa998 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -1377,7 +1377,7 @@
.lowlevel = &matrox_G100
};
static struct video_board vbG200eW = {
- .maxvram = 0x800000,
+ .maxvram = 0x100000,
.maxdisplayable = 0x800000,
.accelID = FB_ACCEL_MATROX_MGAG200,
.lowlevel = &matrox_G100
diff --git a/drivers/video/fbdev/nvidia/nv_i2c.c b/drivers/video/fbdev/nvidia/nv_i2c.c
index d7994a1..0b48965 100644
--- a/drivers/video/fbdev/nvidia/nv_i2c.c
+++ b/drivers/video/fbdev/nvidia/nv_i2c.c
@@ -86,7 +86,7 @@
{
int rc;
- strcpy(chan->adapter.name, name);
+ strscpy(chan->adapter.name, name, sizeof(chan->adapter.name));
chan->adapter.owner = THIS_MODULE;
chan->adapter.class = i2c_class;
chan->adapter.algo_data = &chan->algo;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
index b4a1aef..777f6d6 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
@@ -251,6 +251,7 @@
adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
if (adapter_node) {
adapter = of_get_i2c_adapter_by_node(adapter_node);
+ of_node_put(adapter_node);
if (adapter == NULL) {
dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
omap_dss_put_device(ddata->in);
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
index 4b0793a..a2c7c5c 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
@@ -409,7 +409,7 @@
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%d\n", errors);
+ return sysfs_emit(buf, "%d\n", errors);
}
static ssize_t dsicm_hw_revision_show(struct device *dev,
@@ -439,7 +439,7 @@
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3);
+ return sysfs_emit(buf, "%02x.%02x.%02x\n", id1, id2, id3);
}
static ssize_t dsicm_store_ulps(struct device *dev,
@@ -487,7 +487,7 @@
t = ddata->ulps_enabled;
mutex_unlock(&ddata->lock);
- return snprintf(buf, PAGE_SIZE, "%u\n", t);
+ return sysfs_emit(buf, "%u\n", t);
}
static ssize_t dsicm_store_ulps_timeout(struct device *dev,
@@ -532,7 +532,7 @@
t = ddata->ulps_timeout;
mutex_unlock(&ddata->lock);
- return snprintf(buf, PAGE_SIZE, "%u\n", t);
+ return sysfs_emit(buf, "%u\n", t);
}
static DEVICE_ATTR(num_dsi_errors, S_IRUGO, dsicm_num_errors_show, NULL);
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
index 1293515..0cbc5b9 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
@@ -476,7 +476,7 @@
int i;
if (!ddata->has_cabc)
- return snprintf(buf, PAGE_SIZE, "%s\n", cabc_modes[0]);
+ return sysfs_emit(buf, "%s\n", cabc_modes[0]);
for (i = 0, len = 0;
len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++)
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
index bb85b21..9f6ef9e 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
@@ -169,7 +169,7 @@
{
struct panel_drv_data *ddata = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", ddata->vmirror);
+ return sysfs_emit(buf, "%d\n", ddata->vmirror);
}
static ssize_t tpo_td043_vmirror_store(struct device *dev,
@@ -199,7 +199,7 @@
{
struct panel_drv_data *ddata = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", ddata->mode);
+ return sysfs_emit(buf, "%d\n", ddata->mode);
}
static ssize_t tpo_td043_mode_store(struct device *dev,
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index 0642555..c12d46e 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -616,6 +616,11 @@
return -EINVAL;
}
+ if (!var->pixclock) {
+ DPRINTK("pixclock is zero\n");
+ return -EINVAL;
+ }
+
if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
DPRINTK("pixclock too high (%ldKHz)\n",
PICOS2KHZ(var->pixclock));
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 4279e13..9e9888e 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -381,7 +381,7 @@
struct pxa3xx_gcu_batch *buffer;
struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file);
- int words = count / 4;
+ size_t words = count / 4;
/* Does not need to be atomic. There's a lock in user space,
* but anyhow, this is just for statistics. */
@@ -650,6 +650,7 @@
for (i = 0; i < 8; i++) {
ret = pxa3xx_gcu_add_buffer(dev, priv);
if (ret) {
+ pxa3xx_gcu_free_buffers(dev, priv);
dev_err(dev, "failed to allocate DMA memory\n");
goto err_disable_clk;
}
@@ -666,15 +667,15 @@
SHARED_SIZE, irq);
return 0;
-err_free_dma:
- dma_free_coherent(dev, SHARED_SIZE,
- priv->shared, priv->shared_phys);
+err_disable_clk:
+ clk_disable_unprepare(priv->clk);
err_misc_deregister:
misc_deregister(&priv->misc_dev);
-err_disable_clk:
- clk_disable_unprepare(priv->clk);
+err_free_dma:
+ dma_free_coherent(dev, SHARED_SIZE,
+ priv->shared, priv->shared_phys);
return ret;
}
@@ -687,6 +688,7 @@
pxa3xx_gcu_wait_idle(priv);
misc_deregister(&priv->misc_dev);
dma_free_coherent(dev, SHARED_SIZE, priv->shared, priv->shared_phys);
+ clk_disable_unprepare(priv->clk);
pxa3xx_gcu_free_buffers(dev, priv);
return 0;
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index 5c74253..a936455 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -902,6 +902,8 @@
value = clamp((htotal + hsstart + 1) / 2 + 2, hsstart + 4, htotal + 1);
svga_wcrt_multi(par->state.vgabase, s3_dtpc_regs, value);
+ if (screen_size > info->screen_size)
+ screen_size = info->screen_size;
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c
index fde27fe..d6b2ce9 100644
--- a/drivers/video/fbdev/sis/init.c
+++ b/drivers/video/fbdev/sis/init.c
@@ -355,12 +355,12 @@
}
break;
case 400:
- if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 800) && (LCDwidth >= 600))) {
+ if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 800) && (LCDheight >= 600))) {
if(VDisplay == 300) ModeIndex = ModeIndex_400x300[Depth];
}
break;
case 512:
- if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 1024) && (LCDwidth >= 768))) {
+ if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 1024) && (LCDheight >= 768))) {
if(VDisplay == 384) ModeIndex = ModeIndex_512x384[Depth];
}
break;
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index 0dbc6bf..092a1ca 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -1047,7 +1047,7 @@
if (count + p > total_size)
count = total_size - p;
- buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
+ buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -1059,24 +1059,13 @@
while (count) {
c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
dst = buffer;
- for (i = c >> 2; i--;) {
- *dst = fb_readl(src++);
- *dst = big_swap(*dst);
- dst++;
- }
- if (c & 3) {
- u8 *dst8 = (u8 *)dst;
- u8 __iomem *src8 = (u8 __iomem *)src;
+ for (i = (c + 3) >> 2; i--;) {
+ u32 val;
- for (i = c & 3; i--;) {
- if (i & 1) {
- *dst8++ = fb_readb(++src8);
- } else {
- *dst8++ = fb_readb(--src8);
- src8 += 2;
- }
- }
- src = (u32 __iomem *)src8;
+ val = fb_readl(src);
+ *dst = big_swap(val);
+ src++;
+ dst++;
}
if (copy_to_user(buf, buffer, c)) {
@@ -1130,7 +1119,7 @@
count = total_size - p;
}
- buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
+ buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -1148,24 +1137,11 @@
break;
}
- for (i = c >> 2; i--;) {
- fb_writel(big_swap(*src), dst++);
+ for (i = (c + 3) >> 2; i--;) {
+ fb_writel(big_swap(*src), dst);
+ dst++;
src++;
}
- if (c & 3) {
- u8 *src8 = (u8 *)src;
- u8 __iomem *dst8 = (u8 __iomem *)dst;
-
- for (i = c & 3; i--;) {
- if (i & 1) {
- fb_writeb(*src8++, ++dst8);
- } else {
- fb_writeb(*src8++, --dst8);
- dst8 += 2;
- }
- }
- dst = (u32 __iomem *)dst8;
- }
*ppos += c;
buf += c;
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index bfac3ee..5fa3f1e 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -97,7 +97,6 @@
struct kref kref;
int fb_count;
bool virtualized; /* true when physical usb device not present */
- struct delayed_work free_framebuffer_work;
atomic_t usb_active; /* 0 = update virtual buffer, but no usb traffic */
atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */
u8 *edid; /* null until we read edid from hw or get from sysfs */
@@ -137,6 +136,8 @@
static int ufx_alloc_urb_list(struct ufx_data *dev, int count, size_t size);
static void ufx_free_urb_list(struct ufx_data *dev);
+static DEFINE_MUTEX(disconnect_mutex);
+
/* reads a control register */
static int ufx_reg_read(struct ufx_data *dev, u32 index, u32 *data)
{
@@ -1070,9 +1071,13 @@
if (user == 0 && !console)
return -EBUSY;
+ mutex_lock(&disconnect_mutex);
+
/* If the USB device is gone, we don't accept new opens */
- if (dev->virtualized)
+ if (dev->virtualized) {
+ mutex_unlock(&disconnect_mutex);
return -ENODEV;
+ }
dev->fb_count++;
@@ -1096,6 +1101,8 @@
pr_debug("open /dev/fb%d user=%d fb_info=%p count=%d",
info->node, user, info, dev->fb_count);
+ mutex_unlock(&disconnect_mutex);
+
return 0;
}
@@ -1108,15 +1115,24 @@
{
struct ufx_data *dev = container_of(kref, struct ufx_data, kref);
- /* this function will wait for all in-flight urbs to complete */
- if (dev->urbs.count > 0)
- ufx_free_urb_list(dev);
-
- pr_debug("freeing ufx_data %p", dev);
-
kfree(dev);
}
+static void ufx_ops_destory(struct fb_info *info)
+{
+ struct ufx_data *dev = info->par;
+ int node = info->node;
+
+ /* Assume info structure is freed after this point */
+ framebuffer_release(info);
+
+ pr_debug("fb_info for /dev/fb%d has been freed", node);
+
+ /* release reference taken by kref_init in probe() */
+ kref_put(&dev->kref, ufx_free);
+}
+
+
static void ufx_release_urb_work(struct work_struct *work)
{
struct urb_node *unode = container_of(work, struct urb_node,
@@ -1125,14 +1141,9 @@
up(&unode->dev->urbs.limit_sem);
}
-static void ufx_free_framebuffer_work(struct work_struct *work)
+static void ufx_free_framebuffer(struct ufx_data *dev)
{
- struct ufx_data *dev = container_of(work, struct ufx_data,
- free_framebuffer_work.work);
struct fb_info *info = dev->info;
- int node = info->node;
-
- unregister_framebuffer(info);
if (info->cmap.len != 0)
fb_dealloc_cmap(&info->cmap);
@@ -1144,11 +1155,6 @@
dev->info = NULL;
- /* Assume info structure is freed after this point */
- framebuffer_release(info);
-
- pr_debug("fb_info for /dev/fb%d has been freed", node);
-
/* ref taken in probe() as part of registering framebfufer */
kref_put(&dev->kref, ufx_free);
}
@@ -1160,11 +1166,13 @@
{
struct ufx_data *dev = info->par;
+ mutex_lock(&disconnect_mutex);
+
dev->fb_count--;
/* We can't free fb_info here - fbmem will touch it when we return */
if (dev->virtualized && (dev->fb_count == 0))
- schedule_delayed_work(&dev->free_framebuffer_work, HZ);
+ ufx_free_framebuffer(dev);
if ((dev->fb_count == 0) && (info->fbdefio)) {
fb_deferred_io_cleanup(info);
@@ -1177,6 +1185,8 @@
kref_put(&dev->kref, ufx_free);
+ mutex_unlock(&disconnect_mutex);
+
return 0;
}
@@ -1283,6 +1293,7 @@
.fb_blank = ufx_ops_blank,
.fb_check_var = ufx_ops_check_var,
.fb_set_par = ufx_ops_set_par,
+ .fb_destroy = ufx_ops_destory,
};
/* Assumes &info->lock held by caller
@@ -1656,6 +1667,7 @@
info->par = dev;
info->pseudo_palette = dev->pseudo_palette;
info->fbops = &ufx_ops;
+ INIT_LIST_HEAD(&info->modelist);
retval = fb_alloc_cmap(&info->cmap, 256, 0);
if (retval < 0) {
@@ -1663,11 +1675,6 @@
goto destroy_modedb;
}
- INIT_DELAYED_WORK(&dev->free_framebuffer_work,
- ufx_free_framebuffer_work);
-
- INIT_LIST_HEAD(&info->modelist);
-
retval = ufx_reg_read(dev, 0x3000, &id_rev);
check_warn_goto_error(retval, "error %d reading 0x3000 register from device", retval);
dev_dbg(dev->gdev, "ID_REV register value 0x%08x", id_rev);
@@ -1740,8 +1747,12 @@
static void ufx_usb_disconnect(struct usb_interface *interface)
{
struct ufx_data *dev;
+ struct fb_info *info;
+
+ mutex_lock(&disconnect_mutex);
dev = usb_get_intfdata(interface);
+ info = dev->info;
pr_debug("USB disconnect starting\n");
@@ -1755,12 +1766,17 @@
/* if clients still have us open, will be freed on last close */
if (dev->fb_count == 0)
- schedule_delayed_work(&dev->free_framebuffer_work, 0);
+ ufx_free_framebuffer(dev);
- /* release reference taken by kref_init in probe() */
- kref_put(&dev->kref, ufx_free);
+ /* this function will wait for all in-flight urbs to complete */
+ if (dev->urbs.count > 0)
+ ufx_free_urb_list(dev);
- /* consider ufx_data freed */
+ pr_debug("freeing ufx_data %p", dev);
+
+ unregister_framebuffer(info);
+
+ mutex_unlock(&disconnect_mutex);
}
static struct usb_driver ufx_driver = {
diff --git a/drivers/video/fbdev/sticore.h b/drivers/video/fbdev/sticore.h
index c338f78..0ebdd28 100644
--- a/drivers/video/fbdev/sticore.h
+++ b/drivers/video/fbdev/sticore.h
@@ -370,6 +370,9 @@
/* pointer to all internal data */
struct sti_all_data *sti_data;
+
+ /* pa_path of this device */
+ char pa_path[24];
};
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 2658656..3feb6e4 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -1041,6 +1041,48 @@
SETUP_FB(fb);
}
+#define ARTIST_VRAM_SIZE 0x000804
+#define ARTIST_VRAM_SRC 0x000808
+#define ARTIST_VRAM_SIZE_TRIGGER_WINFILL 0x000a04
+#define ARTIST_VRAM_DEST_TRIGGER_BLOCKMOVE 0x000b00
+#define ARTIST_SRC_BM_ACCESS 0x018008
+#define ARTIST_FGCOLOR 0x018010
+#define ARTIST_BGCOLOR 0x018014
+#define ARTIST_BITMAP_OP 0x01801c
+
+static void
+stifb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct stifb_info *fb = container_of(info, struct stifb_info, info);
+
+ if (rect->rop != ROP_COPY ||
+ (fb->id == S9000_ID_HCRX && fb->info.var.bits_per_pixel == 32))
+ return cfb_fillrect(info, rect);
+
+ SETUP_HW(fb);
+
+ if (fb->info.var.bits_per_pixel == 32) {
+ WRITE_WORD(0xBBA0A000, fb, REG_10);
+
+ NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xffffffff);
+ } else {
+ WRITE_WORD(fb->id == S9000_ID_HCRX ? 0x13a02000 : 0x13a01000, fb, REG_10);
+
+ NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xff);
+ }
+
+ WRITE_WORD(0x03000300, fb, ARTIST_BITMAP_OP);
+ WRITE_WORD(0x2ea01000, fb, ARTIST_SRC_BM_ACCESS);
+ NGLE_QUICK_SET_DST_BM_ACCESS(fb, 0x2ea01000);
+ NGLE_REALLY_SET_IMAGE_FG_COLOR(fb, rect->color);
+ WRITE_WORD(0, fb, ARTIST_BGCOLOR);
+
+ NGLE_SET_DSTXY(fb, (rect->dx << 16) | (rect->dy));
+ SET_LENXY_START_RECFILL(fb, (rect->width << 16) | (rect->height));
+
+ SETUP_FB(fb);
+}
+
static void __init
stifb_init_display(struct stifb_info *fb)
{
@@ -1105,7 +1147,7 @@
.owner = THIS_MODULE,
.fb_setcolreg = stifb_setcolreg,
.fb_blank = stifb_blank,
- .fb_fillrect = cfb_fillrect,
+ .fb_fillrect = stifb_fillrect,
.fb_copyarea = stifb_copyarea,
.fb_imageblit = cfb_imageblit,
};
@@ -1257,7 +1299,7 @@
/* limit fbsize to max visible screen size */
if (fix->smem_len > yres*fix->line_length)
- fix->smem_len = yres*fix->line_length;
+ fix->smem_len = ALIGN(yres*fix->line_length, 4*1024*1024);
fix->accel = FB_ACCEL_NONE;
@@ -1297,7 +1339,7 @@
goto out_err0;
}
info->screen_size = fix->smem_len;
- info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA;
+ info->flags = FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
info->pseudo_palette = &fb->pseudo_palette;
/* This has to be done !!! */
@@ -1317,11 +1359,11 @@
goto out_err3;
}
+ /* save for primary gfx device detection & unregister_framebuffer() */
+ sti->info = info;
if (register_framebuffer(&fb->info) < 0)
goto out_err4;
- sti->info = info; /* save for unregister_framebuffer() */
-
fb_info(&fb->info, "%s %dx%d-%d frame buffer device, %s, id: %04x, mmio: 0x%04lx\n",
fix->id,
var->xres,
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index b9cdd02..d9eec1b 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1426,7 +1426,7 @@
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dlfb = fb_info->par;
- return snprintf(buf, PAGE_SIZE, "%u\n",
+ return sysfs_emit(buf, "%u\n",
atomic_read(&dlfb->bytes_rendered));
}
@@ -1434,7 +1434,7 @@
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dlfb = fb_info->par;
- return snprintf(buf, PAGE_SIZE, "%u\n",
+ return sysfs_emit(buf, "%u\n",
atomic_read(&dlfb->bytes_identical));
}
@@ -1442,7 +1442,7 @@
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dlfb = fb_info->par;
- return snprintf(buf, PAGE_SIZE, "%u\n",
+ return sysfs_emit(buf, "%u\n",
atomic_read(&dlfb->bytes_sent));
}
@@ -1450,7 +1450,7 @@
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dlfb = fb_info->par;
- return snprintf(buf, PAGE_SIZE, "%u\n",
+ return sysfs_emit(buf, "%u\n",
atomic_read(&dlfb->cpu_kcycles_used));
}
@@ -1649,8 +1649,9 @@
const struct device_attribute *attr;
struct dlfb_data *dlfb;
struct fb_info *info;
- int retval = -ENOMEM;
+ int retval;
struct usb_device *usbdev = interface_to_usbdev(intf);
+ struct usb_endpoint_descriptor *out;
/* usb initialization */
dlfb = kzalloc(sizeof(*dlfb), GFP_KERNEL);
@@ -1664,6 +1665,12 @@
dlfb->udev = usb_get_dev(usbdev);
usb_set_intfdata(intf, dlfb);
+ retval = usb_find_common_endpoints(intf->cur_altsetting, NULL, &out, NULL, NULL);
+ if (retval) {
+ dev_err(&intf->dev, "Device should have at lease 1 bulk endpoint!\n");
+ goto error;
+ }
+
dev_dbg(&intf->dev, "console enable=%d\n", console);
dev_dbg(&intf->dev, "fb_defio enable=%d\n", fb_defio);
dev_dbg(&intf->dev, "shadow enable=%d\n", shadow);
@@ -1673,6 +1680,7 @@
if (!dlfb_parse_vendor_descriptor(dlfb, intf)) {
dev_err(&intf->dev,
"firmware not recognized, incompatible device?\n");
+ retval = -ENODEV;
goto error;
}
@@ -1686,8 +1694,10 @@
/* allocates framebuffer driver structure, not framebuffer memory */
info = framebuffer_alloc(0, &dlfb->udev->dev);
- if (!info)
+ if (!info) {
+ retval = -ENOMEM;
goto error;
+ }
dlfb->info = info;
info->par = dlfb;
diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index 7a959e5..c274ec5 100644
--- a/drivers/video/fbdev/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
@@ -504,6 +504,8 @@
(info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1, 1,
1, info->node);
+ if (screen_size > info->screen_size)
+ screen_size = info->screen_size;
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c
index d96ab28..4e641a7 100644
--- a/drivers/video/fbdev/w100fb.c
+++ b/drivers/video/fbdev/w100fb.c
@@ -770,12 +770,18 @@
fb_dealloc_cmap(&info->cmap);
kfree(info->pseudo_palette);
}
- if (remapped_fbuf != NULL)
+ if (remapped_fbuf != NULL) {
iounmap(remapped_fbuf);
- if (remapped_regs != NULL)
+ remapped_fbuf = NULL;
+ }
+ if (remapped_regs != NULL) {
iounmap(remapped_regs);
- if (remapped_base != NULL)
+ remapped_regs = NULL;
+ }
+ if (remapped_base != NULL) {
iounmap(remapped_base);
+ remapped_base = NULL;
+ }
if (info)
framebuffer_release(info);
return err;
@@ -795,8 +801,11 @@
fb_dealloc_cmap(&info->cmap);
iounmap(remapped_base);
+ remapped_base = NULL;
iounmap(remapped_regs);
+ remapped_regs = NULL;
iounmap(remapped_fbuf);
+ remapped_fbuf = NULL;
framebuffer_release(info);
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
index 73eb348..4ccfd30 100644
--- a/drivers/virt/vboxguest/vboxguest_linux.c
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -356,8 +356,8 @@
goto err_vbg_core_exit;
}
- ret = devm_request_irq(dev, pci->irq, vbg_core_isr, IRQF_SHARED,
- DEVICE_NAME, gdev);
+ ret = request_irq(pci->irq, vbg_core_isr, IRQF_SHARED, DEVICE_NAME,
+ gdev);
if (ret) {
vbg_err("vboxguest: Error requesting irq: %d\n", ret);
goto err_vbg_core_exit;
@@ -367,7 +367,7 @@
if (ret) {
vbg_err("vboxguest: Error misc_register %s failed: %d\n",
DEVICE_NAME, ret);
- goto err_vbg_core_exit;
+ goto err_free_irq;
}
ret = misc_register(&gdev->misc_device_user);
@@ -403,6 +403,8 @@
misc_deregister(&gdev->misc_device_user);
err_unregister_misc_device:
misc_deregister(&gdev->misc_device);
+err_free_irq:
+ free_irq(pci->irq, gdev);
err_vbg_core_exit:
vbg_core_exit(gdev);
err_disable_pcidev:
@@ -419,6 +421,7 @@
vbg_gdev = NULL;
mutex_unlock(&vbg_gdev_mutex);
+ free_irq(pci->irq, gdev);
device_remove_file(gdev->dev, &dev_attr_host_features);
device_remove_file(gdev->dev, &dev_attr_host_version);
misc_deregister(&gdev->misc_device_user);
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 238383f..e8ef0c6 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -62,6 +62,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/virtio.h>
@@ -543,6 +544,28 @@
.get_shm_region = vm_get_shm_region,
};
+#ifdef CONFIG_PM_SLEEP
+static int virtio_mmio_freeze(struct device *dev)
+{
+ struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev);
+
+ return virtio_device_freeze(&vm_dev->vdev);
+}
+
+static int virtio_mmio_restore(struct device *dev)
+{
+ struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev);
+
+ if (vm_dev->version == 1)
+ writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
+
+ return virtio_device_restore(&vm_dev->vdev);
+}
+
+static const struct dev_pm_ops virtio_mmio_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(virtio_mmio_freeze, virtio_mmio_restore)
+};
+#endif
static void virtio_mmio_release_dev(struct device *_d)
{
@@ -689,6 +712,7 @@
if (!vm_cmdline_parent_registered) {
err = device_register(&vm_cmdline_parent);
if (err) {
+ put_device(&vm_cmdline_parent);
pr_err("Failed to register parent device!\n");
return err;
}
@@ -786,6 +810,9 @@
.name = "virtio-mmio",
.of_match_table = virtio_mmio_match,
.acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match),
+#ifdef CONFIG_PM_SLEEP
+ .pm = &virtio_mmio_pm_ops,
+#endif
},
};
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index b35bb2d..1e890ef 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -254,8 +254,7 @@
if (vp_dev->msix_affinity_masks) {
for (i = 0; i < vp_dev->msix_vectors; i++)
- if (vp_dev->msix_affinity_masks[i])
- free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+ free_cpumask_var(vp_dev->msix_affinity_masks[i]);
}
if (vp_dev->msix_enabled) {
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 974d02b..6546d02 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -2092,16 +2092,20 @@
if (sl->reg_num.id == reg_num->id)
seq = i;
+ if (w1_reset_bus(sl->master))
+ goto error;
+
+ /* Put the device into chain DONE state */
+ w1_write_8(sl->master, W1_MATCH_ROM);
+ w1_write_block(sl->master, (u8 *)&rn, 8);
w1_write_8(sl->master, W1_42_CHAIN);
w1_write_8(sl->master, W1_42_CHAIN_DONE);
w1_write_8(sl->master, W1_42_CHAIN_DONE_INV);
- w1_read_block(sl->master, &ack, sizeof(ack));
/* check for acknowledgment */
ack = w1_read_8(sl->master);
if (ack != W1_42_SUCCESS_CONFIRM_BYTE)
goto error;
-
}
/* Exit from CHAIN state */
diff --git a/drivers/watchdog/armada_37xx_wdt.c b/drivers/watchdog/armada_37xx_wdt.c
index e5dcb26..dcb3ffd 100644
--- a/drivers/watchdog/armada_37xx_wdt.c
+++ b/drivers/watchdog/armada_37xx_wdt.c
@@ -274,6 +274,8 @@
if (!res)
return -ENODEV;
dev->reg = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!dev->reg)
+ return -ENOMEM;
/* init clock */
dev->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
index 359302f..46c2a4b 100644
--- a/drivers/watchdog/rti_wdt.c
+++ b/drivers/watchdog/rti_wdt.c
@@ -227,8 +227,9 @@
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
- if (ret) {
+ if (ret < 0) {
pm_runtime_put_noidle(dev);
+ pm_runtime_disable(&pdev->dev);
return dev_err_probe(dev, ret, "runtime pm failed\n");
}
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 190d26e..2815f78 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -304,14 +304,12 @@
return ret;
}
-static int sp805_wdt_remove(struct amba_device *adev)
+static void sp805_wdt_remove(struct amba_device *adev)
{
struct sp805_wdt *wdt = amba_get_drvdata(adev);
watchdog_unregister_device(&wdt->wdd);
watchdog_set_drvdata(&wdt->wdd, NULL);
-
- return 0;
}
static int __maybe_unused sp805_wdt_suspend(struct device *dev)
diff --git a/drivers/watchdog/ts4800_wdt.c b/drivers/watchdog/ts4800_wdt.c
index c137ad2..0ea554c 100644
--- a/drivers/watchdog/ts4800_wdt.c
+++ b/drivers/watchdog/ts4800_wdt.c
@@ -125,13 +125,16 @@
ret = of_property_read_u32_index(np, "syscon", 1, ®);
if (ret < 0) {
dev_err(dev, "no offset in syscon\n");
+ of_node_put(syscon_np);
return ret;
}
/* allocate memory for watchdog struct */
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
- if (!wdt)
+ if (!wdt) {
+ of_node_put(syscon_np);
return -ENOMEM;
+ }
/* set regmap and offset to know where to write */
wdt->feed_offset = reg;
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index 3065dd6..c60723f 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -462,6 +462,7 @@
return ret;
watchdog_set_nowayout(&wdat->wdd, nowayout);
+ watchdog_stop_on_reboot(&wdat->wdd);
return devm_watchdog_register_device(dev, &wdat->wdd);
}
diff --git a/drivers/xen/features.c b/drivers/xen/features.c
index 25c053b..2c306de 100644
--- a/drivers/xen/features.c
+++ b/drivers/xen/features.c
@@ -29,6 +29,6 @@
if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
break;
for (j = 0; j < 32; j++)
- xen_features[i * 32 + j] = !!(fi.submap & 1<<j);
+ xen_features[i * 32 + j] = !!(fi.submap & 1U << j);
}
}
diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h
index 20d7d05..9c286b2 100644
--- a/drivers/xen/gntdev-common.h
+++ b/drivers/xen/gntdev-common.h
@@ -16,6 +16,7 @@
#include <linux/mmu_notifier.h>
#include <linux/types.h>
#include <xen/interface/event_channel.h>
+#include <xen/grant_table.h>
struct gntdev_dmabuf_priv;
@@ -43,9 +44,10 @@
};
struct gntdev_grant_map {
+ atomic_t in_use;
struct mmu_interval_notifier notifier;
+ bool notifier_init;
struct list_head next;
- struct vm_area_struct *vma;
int index;
int count;
int flags;
@@ -56,6 +58,7 @@
struct gnttab_unmap_grant_ref *unmap_ops;
struct gnttab_map_grant_ref *kmap_ops;
struct gnttab_unmap_grant_ref *kunmap_ops;
+ bool *being_removed;
struct page **pages;
unsigned long pages_vm_start;
@@ -73,6 +76,11 @@
/* Needed to avoid allocation in gnttab_dma_free_pages(). */
xen_pfn_t *frames;
#endif
+
+ /* Number of live grants */
+ atomic_t live_grants;
+ /* Needed to avoid allocation in __unmap_grant_pages */
+ struct gntab_unmap_queue_data unmap_data;
};
struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 54778aa..16acdda 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/refcount.h>
+#include <linux/workqueue.h>
#include <xen/xen.h>
#include <xen/grant_table.h>
@@ -60,10 +61,11 @@
MODULE_PARM_DESC(limit,
"Maximum number of grants that may be mapped by one mapping request");
+/* True in PV mode, false otherwise */
static int use_ptemod;
-static int unmap_grant_pages(struct gntdev_grant_map *map,
- int offset, int pages);
+static void unmap_grant_pages(struct gntdev_grant_map *map,
+ int offset, int pages);
static struct miscdevice gntdev_miscdev;
@@ -120,6 +122,7 @@
kvfree(map->unmap_ops);
kvfree(map->kmap_ops);
kvfree(map->kunmap_ops);
+ kvfree(map->being_removed);
kfree(map);
}
@@ -140,12 +143,15 @@
add->kunmap_ops = kvcalloc(count,
sizeof(add->kunmap_ops[0]), GFP_KERNEL);
add->pages = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
+ add->being_removed =
+ kvcalloc(count, sizeof(add->being_removed[0]), GFP_KERNEL);
if (NULL == add->grants ||
NULL == add->map_ops ||
NULL == add->unmap_ops ||
NULL == add->kmap_ops ||
NULL == add->kunmap_ops ||
- NULL == add->pages)
+ NULL == add->pages ||
+ NULL == add->being_removed)
goto err;
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
@@ -240,9 +246,39 @@
if (!refcount_dec_and_test(&map->users))
return;
- if (map->pages && !use_ptemod)
+ if (map->pages && !use_ptemod) {
+ /*
+ * Increment the reference count. This ensures that the
+ * subsequent call to unmap_grant_pages() will not wind up
+ * re-entering itself. It *can* wind up calling
+ * gntdev_put_map() recursively, but such calls will be with a
+ * reference count greater than 1, so they will return before
+ * this code is reached. The recursion depth is thus limited to
+ * 1. Do NOT use refcount_inc() here, as it will detect that
+ * the reference count is zero and WARN().
+ */
+ refcount_set(&map->users, 1);
+
+ /*
+ * Unmap the grants. This may or may not be asynchronous, so it
+ * is possible that the reference count is 1 on return, but it
+ * could also be greater than 1.
+ */
unmap_grant_pages(map, 0, map->count);
+ /* Check if the memory now needs to be freed */
+ if (!refcount_dec_and_test(&map->users))
+ return;
+
+ /*
+ * All pages have been returned to the hypervisor, so free the
+ * map.
+ */
+ }
+
+ if (use_ptemod && map->notifier_init)
+ mmu_interval_notifier_remove(&map->notifier);
+
if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(map->notify.event);
evtchn_put(map->notify.event);
@@ -255,21 +291,14 @@
static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
{
struct gntdev_grant_map *map = data;
- unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
- int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
+ unsigned int pgnr = (addr - map->pages_vm_start) >> PAGE_SHIFT;
+ int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte |
+ (1 << _GNTMAP_guest_avail0);
u64 pte_maddr;
BUG_ON(pgnr >= map->count);
pte_maddr = arbitrary_virt_to_machine(pte).maddr;
- /*
- * Set the PTE as special to force get_user_pages_fast() fall
- * back to the slow path. If this is not supported as part of
- * the grant map, it will be done afterwards.
- */
- if (xen_feature(XENFEAT_gnttab_map_avail_bits))
- flags |= (1 << _GNTMAP_guest_avail0);
-
gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
map->grants[pgnr].ref,
map->grants[pgnr].domid);
@@ -278,16 +307,9 @@
return 0;
}
-#ifdef CONFIG_X86
-static int set_grant_ptes_as_special(pte_t *pte, unsigned long addr, void *data)
-{
- set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
- return 0;
-}
-#endif
-
int gntdev_map_grant_pages(struct gntdev_grant_map *map)
{
+ size_t alloced = 0;
int i, err = 0;
if (!use_ptemod) {
@@ -336,87 +358,130 @@
map->pages, map->count);
for (i = 0; i < map->count; i++) {
- if (map->map_ops[i].status == GNTST_okay)
+ if (map->map_ops[i].status == GNTST_okay) {
map->unmap_ops[i].handle = map->map_ops[i].handle;
- else if (!err)
+ alloced++;
+ } else if (!err)
err = -EINVAL;
if (map->flags & GNTMAP_device_map)
map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
if (use_ptemod) {
- if (map->kmap_ops[i].status == GNTST_okay)
+ if (map->kmap_ops[i].status == GNTST_okay) {
+ alloced++;
map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
- else if (!err)
+ } else if (!err)
err = -EINVAL;
}
}
+ atomic_add(alloced, &map->live_grants);
return err;
}
-static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
+static void __unmap_grant_pages_done(int result,
+ struct gntab_unmap_queue_data *data)
+{
+ unsigned int i;
+ struct gntdev_grant_map *map = data->data;
+ unsigned int offset = data->unmap_ops - map->unmap_ops;
+ int successful_unmaps = 0;
+ int live_grants;
+
+ for (i = 0; i < data->count; i++) {
+ if (map->unmap_ops[offset + i].status == GNTST_okay &&
+ map->unmap_ops[offset + i].handle != -1)
+ successful_unmaps++;
+
+ WARN_ON(map->unmap_ops[offset+i].status &&
+ map->unmap_ops[offset+i].handle != -1);
+ pr_debug("unmap handle=%d st=%d\n",
+ map->unmap_ops[offset+i].handle,
+ map->unmap_ops[offset+i].status);
+ map->unmap_ops[offset+i].handle = -1;
+ if (use_ptemod) {
+ if (map->kunmap_ops[offset + i].status == GNTST_okay &&
+ map->kunmap_ops[offset + i].handle != -1)
+ successful_unmaps++;
+
+ WARN_ON(map->kunmap_ops[offset+i].status &&
+ map->kunmap_ops[offset+i].handle != -1);
+ pr_debug("kunmap handle=%u st=%d\n",
+ map->kunmap_ops[offset+i].handle,
+ map->kunmap_ops[offset+i].status);
+ map->kunmap_ops[offset+i].handle = -1;
+ }
+ }
+
+ /*
+ * Decrease the live-grant counter. This must happen after the loop to
+ * prevent premature reuse of the grants by gnttab_mmap().
+ */
+ live_grants = atomic_sub_return(successful_unmaps, &map->live_grants);
+ if (WARN_ON(live_grants < 0))
+ pr_err("%s: live_grants became negative (%d) after unmapping %d pages!\n",
+ __func__, live_grants, successful_unmaps);
+
+ /* Release reference taken by __unmap_grant_pages */
+ gntdev_put_map(NULL, map);
+}
+
+static void __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
int pages)
{
- int i, err = 0;
- struct gntab_unmap_queue_data unmap_data;
-
if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
int pgno = (map->notify.addr >> PAGE_SHIFT);
+
if (pgno >= offset && pgno < offset + pages) {
/* No need for kmap, pages are in lowmem */
uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
+
tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
}
}
- unmap_data.unmap_ops = map->unmap_ops + offset;
- unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
- unmap_data.pages = map->pages + offset;
- unmap_data.count = pages;
+ map->unmap_data.unmap_ops = map->unmap_ops + offset;
+ map->unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
+ map->unmap_data.pages = map->pages + offset;
+ map->unmap_data.count = pages;
+ map->unmap_data.done = __unmap_grant_pages_done;
+ map->unmap_data.data = map;
+ refcount_inc(&map->users); /* to keep map alive during async call below */
- err = gnttab_unmap_refs_sync(&unmap_data);
- if (err)
- return err;
-
- for (i = 0; i < pages; i++) {
- if (map->unmap_ops[offset+i].status)
- err = -EINVAL;
- pr_debug("unmap handle=%d st=%d\n",
- map->unmap_ops[offset+i].handle,
- map->unmap_ops[offset+i].status);
- map->unmap_ops[offset+i].handle = -1;
- }
- return err;
+ gnttab_unmap_refs_async(&map->unmap_data);
}
-static int unmap_grant_pages(struct gntdev_grant_map *map, int offset,
- int pages)
+static void unmap_grant_pages(struct gntdev_grant_map *map, int offset,
+ int pages)
{
- int range, err = 0;
+ int range;
+
+ if (atomic_read(&map->live_grants) == 0)
+ return; /* Nothing to do */
pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
/* It is possible the requested range will have a "hole" where we
* already unmapped some of the grants. Only unmap valid ranges.
*/
- while (pages && !err) {
- while (pages && map->unmap_ops[offset].handle == -1) {
+ while (pages) {
+ while (pages && map->being_removed[offset]) {
offset++;
pages--;
}
range = 0;
while (range < pages) {
- if (map->unmap_ops[offset+range].handle == -1)
+ if (map->being_removed[offset + range])
break;
+ map->being_removed[offset + range] = true;
range++;
}
- err = __unmap_grant_pages(map, offset, range);
+ if (range)
+ __unmap_grant_pages(map, offset, range);
offset += range;
pages -= range;
}
-
- return err;
}
/* ------------------------------------------------------------------ */
@@ -436,11 +501,7 @@
struct gntdev_priv *priv = file->private_data;
pr_debug("gntdev_vma_close %p\n", vma);
- if (use_ptemod) {
- WARN_ON(map->vma != vma);
- mmu_interval_notifier_remove(&map->notifier);
- map->vma = NULL;
- }
+
vma->vm_private_data = NULL;
gntdev_put_map(priv, map);
}
@@ -468,31 +529,30 @@
struct gntdev_grant_map *map =
container_of(mn, struct gntdev_grant_map, notifier);
unsigned long mstart, mend;
- int err;
+ unsigned long map_start, map_end;
if (!mmu_notifier_range_blockable(range))
return false;
+ map_start = map->pages_vm_start;
+ map_end = map->pages_vm_start + (map->count << PAGE_SHIFT);
+
/*
* If the VMA is split or otherwise changed the notifier is not
* updated, but we don't want to process VA's outside the modified
* VMA. FIXME: It would be much more understandable to just prevent
* modifying the VMA in the first place.
*/
- if (map->vma->vm_start >= range->end ||
- map->vma->vm_end <= range->start)
+ if (map_start >= range->end || map_end <= range->start)
return true;
- mstart = max(range->start, map->vma->vm_start);
- mend = min(range->end, map->vma->vm_end);
+ mstart = max(range->start, map_start);
+ mend = min(range->end, map_end);
pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
- map->index, map->count,
- map->vma->vm_start, map->vma->vm_end,
- range->start, range->end, mstart, mend);
- err = unmap_grant_pages(map,
- (mstart - map->vma->vm_start) >> PAGE_SHIFT,
- (mend - mstart) >> PAGE_SHIFT);
- WARN_ON(err);
+ map->index, map->count, map_start, map_end,
+ range->start, range->end, mstart, mend);
+ unmap_grant_pages(map, (mstart - map_start) >> PAGE_SHIFT,
+ (mend - mstart) >> PAGE_SHIFT);
return true;
}
@@ -972,14 +1032,15 @@
return -EINVAL;
pr_debug("map %d+%d at %lx (pgoff %lx)\n",
- index, count, vma->vm_start, vma->vm_pgoff);
+ index, count, vma->vm_start, vma->vm_pgoff);
mutex_lock(&priv->lock);
map = gntdev_find_map_index(priv, index, count);
if (!map)
goto unlock_out;
- if (use_ptemod && map->vma)
+ if (!atomic_add_unless(&map->in_use, 1, 1))
goto unlock_out;
+
refcount_inc(&map->users);
vma->vm_ops = &gntdev_vmops;
@@ -1000,15 +1061,16 @@
map->flags |= GNTMAP_readonly;
}
+ map->pages_vm_start = vma->vm_start;
+
if (use_ptemod) {
- map->vma = vma;
err = mmu_interval_notifier_insert_locked(
&map->notifier, vma->vm_mm, vma->vm_start,
vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
- if (err) {
- map->vma = NULL;
+ if (err)
goto out_unlock_put;
- }
+
+ map->notifier_init = true;
}
mutex_unlock(&priv->lock);
@@ -1025,7 +1087,6 @@
*/
mmu_interval_read_begin(&map->notifier);
- map->pages_vm_start = vma->vm_start;
err = apply_to_page_range(vma->vm_mm, vma->vm_start,
vma->vm_end - vma->vm_start,
find_grant_ptes, map);
@@ -1043,23 +1104,6 @@
err = vm_map_pages_zero(vma, map->pages, map->count);
if (err)
goto out_put_map;
- } else {
-#ifdef CONFIG_X86
- /*
- * If the PTEs were not made special by the grant map
- * hypercall, do so here.
- *
- * This is racy since the mapping is already visible
- * to userspace but userspace should be well-behaved
- * enough to not touch it until the mmap() call
- * returns.
- */
- if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) {
- apply_to_page_range(vma->vm_mm, vma->vm_start,
- vma->vm_end - vma->vm_start,
- set_grant_ptes_as_special, NULL);
- }
-#endif
}
return 0;
@@ -1071,13 +1115,8 @@
out_unlock_put:
mutex_unlock(&priv->lock);
out_put_map:
- if (use_ptemod) {
+ if (use_ptemod)
unmap_grant_pages(map, 0, map->count);
- if (map->vma) {
- mmu_interval_notifier_remove(&map->notifier);
- map->vma = NULL;
- }
- }
gntdev_put_map(priv, map);
return err;
}
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 5c83d41..0a2d24d 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -981,6 +981,9 @@
size_t size;
int i, ret;
+ if (args->nr_pages < 0 || args->nr_pages > (INT_MAX >> PAGE_SHIFT))
+ return -ENOMEM;
+
size = args->nr_pages << PAGE_SHIFT;
if (args->coherent)
args->vaddr = dma_alloc_coherent(args->dev, size,
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
index cdc6daa..9cf7085 100644
--- a/drivers/xen/pcpu.c
+++ b/drivers/xen/pcpu.c
@@ -228,7 +228,7 @@
err = device_register(dev);
if (err) {
- pcpu_release(dev);
+ put_device(dev);
return err;
}
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 9db557b..804d8f4 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -137,7 +137,7 @@
if (ret) {
dev_warn(&pdev->dev, "Unable to set the evtchn callback "
"err=%d\n", ret);
- goto out;
+ goto irq_out;
}
}
@@ -145,13 +145,16 @@
grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
ret = gnttab_setup_auto_xlat_frames(grant_frames);
if (ret)
- goto out;
+ goto irq_out;
ret = gnttab_init();
if (ret)
goto grant_out;
return 0;
grant_out:
gnttab_free_auto_xlat_frames();
+irq_out:
+ if (!xen_have_vector_callback)
+ free_irq(pdev->irq, pdev);
out:
pci_release_region(pdev, 0);
mem_out:
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index fe8df32..cd5f2f0 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -581,27 +581,30 @@
struct privcmd_dm_op_buf kbufs[], unsigned int num,
struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
{
- unsigned int i;
+ unsigned int i, off = 0;
- for (i = 0; i < num; i++) {
+ for (i = 0; i < num; ) {
unsigned int requested;
int page_count;
requested = DIV_ROUND_UP(
offset_in_page(kbufs[i].uptr) + kbufs[i].size,
- PAGE_SIZE);
+ PAGE_SIZE) - off;
if (requested > nr_pages)
return -ENOSPC;
page_count = pin_user_pages_fast(
- (unsigned long) kbufs[i].uptr,
+ (unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
requested, FOLL_WRITE, pages);
- if (page_count < 0)
- return page_count;
+ if (page_count <= 0)
+ return page_count ? : -EFAULT;
*pinned += page_count;
nr_pages -= page_count;
pages += page_count;
+
+ off = (requested == page_count) ? 0 : off + page_count;
+ i += !off;
}
return 0;
@@ -677,10 +680,8 @@
}
rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
- if (rc < 0) {
- nr_pages = pinned;
+ if (rc < 0)
goto out;
- }
for (i = 0; i < kdata.num; i++) {
set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
@@ -692,7 +693,7 @@
xen_preemptible_hcall_end();
out:
- unlock_pages(pages, nr_pages);
+ unlock_pages(pages, pinned);
kfree(xbufs);
kfree(pages);
kfree(kbufs);
diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
index 5e53b48..097316a 100644
--- a/drivers/xen/xen-pciback/conf_space_capability.c
+++ b/drivers/xen/xen-pciback/conf_space_capability.c
@@ -190,13 +190,16 @@
};
static struct msi_msix_field_config {
- u16 enable_bit; /* bit for enabling MSI/MSI-X */
- unsigned int int_type; /* interrupt type for exclusiveness check */
+ u16 enable_bit; /* bit for enabling MSI/MSI-X */
+ u16 allowed_bits; /* bits allowed to be changed */
+ unsigned int int_type; /* interrupt type for exclusiveness check */
} msi_field_config = {
.enable_bit = PCI_MSI_FLAGS_ENABLE,
+ .allowed_bits = PCI_MSI_FLAGS_ENABLE,
.int_type = INTERRUPT_TYPE_MSI,
}, msix_field_config = {
.enable_bit = PCI_MSIX_FLAGS_ENABLE,
+ .allowed_bits = PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL,
.int_type = INTERRUPT_TYPE_MSIX,
};
@@ -229,7 +232,7 @@
return 0;
if (!dev_data->allow_interrupt_control ||
- (new_value ^ old_value) & ~field_config->enable_bit)
+ (new_value ^ old_value) & ~field_config->allowed_bits)
return PCIBIOS_SET_FAILED;
if (new_value & field_config->enable_bit) {
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 597af45..0792fda 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -128,7 +128,7 @@
{
struct xenbus_file_priv *u = filp->private_data;
struct read_buffer *rb;
- unsigned i;
+ ssize_t i;
int ret;
mutex_lock(&u->reply_mutex);
@@ -148,7 +148,7 @@
rb = list_entry(u->read_buffers.next, struct read_buffer, list);
i = 0;
while (i < len) {
- unsigned sz = min((unsigned)len - i, rb->len - rb->cons);
+ size_t sz = min_t(size_t, len - i, rb->len - rb->cons);
ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz);
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index 34742c6..f17c4c0 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -261,7 +261,6 @@
return 0;
}
-EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages);
struct remap_pfn {
struct mm_struct *mm;