Update Linux to v5.4.148
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.4.148.tar.gz
Change-Id: Ib3d26c5ba9b022e2e03533005c4fed4d7c30b61b
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index a65d5a9..911b3d2 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1398,7 +1398,7 @@
u8 phy_type;
int without_mii;
- phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
+ phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f;
switch (phy_type) {
case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
@@ -1518,7 +1518,7 @@
mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
(mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
- (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
+ (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) {
/* enable/disable MDI/MDI-X auto-switching. */
mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
@@ -2266,9 +2266,9 @@
{
/* ASF can be enabled from eeprom */
return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
- (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
- !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
- ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
+ (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) &&
+ !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) &&
+ ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE);
}
static int e100_up(struct nic *nic)
@@ -2924,7 +2924,7 @@
/* Wol magic packet can be enabled from eeprom */
if ((nic->mac >= mac_82558_D101_A4) &&
- (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
+ (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) {
nic->flags |= wol_magic;
device_set_wakeup_enable(&pdev->dev, true);
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 86493fe..a2ee28e 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -542,8 +542,13 @@
WARN_ON(in_interrupt());
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
- e1000_down(adapter);
- e1000_up(adapter);
+
+ /* only run the task if not already down */
+ if (!test_bit(__E1000_DOWN, &adapter->flags)) {
+ e1000_down(adapter);
+ e1000_up(adapter);
+ }
+
clear_bit(__E1000_RESETTING, &adapter->flags);
}
@@ -1433,10 +1438,15 @@
struct e1000_hw *hw = &adapter->hw;
int count = E1000_CHECK_RESET_COUNT;
- while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
usleep_range(10000, 20000);
- WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+ WARN_ON(count < 0);
+
+ /* signal that we're down so that the reset task will no longer run */
+ set_bit(__E1000_DOWN, &adapter->flags);
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+
e1000_down(adapter);
e1000_power_down_phy(adapter);
e1000_free_irq(adapter);
@@ -3140,8 +3150,9 @@
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
if (skb->data_len && hdr_len == len) {
switch (hw->mac_type) {
+ case e1000_82544: {
unsigned int pull_size;
- case e1000_82544:
+
/* Make sure we have room to chop off 4 bytes,
* and that the end alignment will work out to
* this hardware's requirements
@@ -3162,6 +3173,7 @@
}
len = skb_headlen(skb);
break;
+ }
default:
/* do nothing */
break;
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 2c1bab3..1fd4406 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -899,6 +899,8 @@
} else {
data &= ~IGP02E1000_PM_D0_LPLU;
ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
+ if (ret_val)
+ return ret_val;
/* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 6c51b1b..944abd5 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -185,13 +185,12 @@
/* board specific private data structure */
struct e1000_adapter {
+ struct timer_list watchdog_timer;
struct timer_list phy_info_timer;
struct timer_list blink_timer;
struct work_struct reset_task;
- struct delayed_work watchdog_task;
-
- struct workqueue_struct *e1000_workqueue;
+ struct work_struct watchdog_task;
const struct e1000_info *ei;
@@ -577,7 +576,6 @@
#define er32(reg) __er32(hw, E1000_##reg)
-s32 __ew32_prepare(struct e1000_hw *hw);
void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val);
#define ew32(reg, val) __ew32(hw, E1000_##reg, (val))
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index eff75bd..11fdc27 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -86,6 +86,12 @@
#define E1000_DEV_ID_PCH_ICP_I219_V8 0x15E0
#define E1000_DEV_ID_PCH_ICP_I219_LM9 0x15E1
#define E1000_DEV_ID_PCH_ICP_I219_V9 0x15E2
+#define E1000_DEV_ID_PCH_CMP_I219_LM10 0x0D4E
+#define E1000_DEV_ID_PCH_CMP_I219_V10 0x0D4F
+#define E1000_DEV_ID_PCH_CMP_I219_LM11 0x0D4C
+#define E1000_DEV_ID_PCH_CMP_I219_V11 0x0D4D
+#define E1000_DEV_ID_PCH_CMP_I219_LM12 0x0D53
+#define E1000_DEV_ID_PCH_CMP_I219_V12 0x0D55
#define E1000_REVISION_4 4
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index a1fab77..58ff747 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -995,6 +995,8 @@
{
u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
+ u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
+ u16 lat_enc_d = 0; /* latency decoded */
u16 lat_enc = 0; /* latency encoded */
if (link) {
@@ -1048,7 +1050,17 @@
E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
- if (lat_enc > max_ltr_enc)
+ lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) *
+ (1U << (E1000_LTRV_SCALE_FACTOR *
+ ((lat_enc & E1000_LTRV_SCALE_MASK)
+ >> E1000_LTRV_SCALE_SHIFT)));
+
+ max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) *
+ (1U << (E1000_LTRV_SCALE_FACTOR *
+ ((max_ltr_enc & E1000_LTRV_SCALE_MASK)
+ >> E1000_LTRV_SCALE_SHIFT)));
+
+ if (lat_enc_d > max_ltr_enc_d)
lat_enc = max_ltr_enc;
}
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 1502895..e757896 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -274,8 +274,11 @@
/* Latency Tolerance Reporting */
#define E1000_LTRV 0x000F8
+#define E1000_LTRV_VALUE_MASK 0x000003FF
#define E1000_LTRV_SCALE_MAX 5
#define E1000_LTRV_SCALE_FACTOR 5
+#define E1000_LTRV_SCALE_SHIFT 10
+#define E1000_LTRV_SCALE_MASK 0x00001C00
#define E1000_LTRV_REQ_SHIFT 15
#define E1000_LTRV_NOSNOOP_SHIFT 16
#define E1000_LTRV_SEND (1 << 30)
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index d7d56e4..cbd83bb 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -119,14 +119,12 @@
* has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
* and try again a number of times.
**/
-s32 __ew32_prepare(struct e1000_hw *hw)
+static void __ew32_prepare(struct e1000_hw *hw)
{
s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
udelay(50);
-
- return i;
}
void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
@@ -607,11 +605,11 @@
{
struct e1000_adapter *adapter = rx_ring->adapter;
struct e1000_hw *hw = &adapter->hw;
- s32 ret_val = __ew32_prepare(hw);
+ __ew32_prepare(hw);
writel(i, rx_ring->tail);
- if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
+ if (unlikely(i != readl(rx_ring->tail))) {
u32 rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
@@ -624,11 +622,11 @@
{
struct e1000_adapter *adapter = tx_ring->adapter;
struct e1000_hw *hw = &adapter->hw;
- s32 ret_val = __ew32_prepare(hw);
+ __ew32_prepare(hw);
writel(i, tx_ring->tail);
- if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
+ if (unlikely(i != readl(tx_ring->tail))) {
u32 tctl = er32(TCTL);
ew32(TCTL, tctl & ~E1000_TCTL_EN);
@@ -1780,8 +1778,7 @@
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_delayed_work(adapter->e1000_workqueue,
- &adapter->watchdog_task, HZ);
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
/* Reset on uncorrectable ECC error */
@@ -1861,8 +1858,7 @@
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_delayed_work(adapter->e1000_workqueue,
- &adapter->watchdog_task, HZ);
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
/* Reset on uncorrectable ECC error */
@@ -1907,8 +1903,7 @@
hw->mac.get_link_status = true;
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_delayed_work(adapter->e1000_workqueue,
- &adapter->watchdog_task, HZ);
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -4281,6 +4276,7 @@
napi_synchronize(&adapter->napi);
+ del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
spin_lock(&adapter->stats64_lock);
@@ -4715,12 +4711,12 @@
pm_runtime_get_sync(&pdev->dev);
- if (!test_bit(__E1000_DOWN, &adapter->state)) {
+ if (netif_device_present(netdev)) {
e1000e_down(adapter, true);
e1000_free_irq(adapter);
/* Link status message must follow this format */
- pr_info("%s NIC Link is Down\n", adapter->netdev->name);
+ pr_info("%s NIC Link is Down\n", netdev->name);
}
napi_disable(&adapter->napi);
@@ -5152,11 +5148,25 @@
}
}
+/**
+ * e1000_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void e1000_watchdog(struct timer_list *t)
+{
+ struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+
+ /* Do the rest outside of interrupt context */
+ schedule_work(&adapter->watchdog_task);
+
+ /* TODO: make this use queue_delayed_work() */
+}
+
static void e1000_watchdog_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter,
- watchdog_task.work);
+ watchdog_task);
struct net_device *netdev = adapter->netdev;
struct e1000_mac_info *mac = &adapter->hw.mac;
struct e1000_phy_info *phy = &adapter->hw.phy;
@@ -5189,18 +5199,20 @@
pm_runtime_resume(netdev->dev.parent);
/* Checking if MAC is in DMoff state*/
- pcim_state = er32(STATUS);
- while (pcim_state & E1000_STATUS_PCIM_STATE) {
- if (tries++ == dmoff_exit_timeout) {
- e_dbg("Error in exiting dmoff\n");
- break;
- }
- usleep_range(10000, 20000);
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
pcim_state = er32(STATUS);
+ while (pcim_state & E1000_STATUS_PCIM_STATE) {
+ if (tries++ == dmoff_exit_timeout) {
+ e_dbg("Error in exiting dmoff\n");
+ break;
+ }
+ usleep_range(10000, 20000);
+ pcim_state = er32(STATUS);
- /* Checking if MAC exited DMoff state */
- if (!(pcim_state & E1000_STATUS_PCIM_STATE))
- e1000_phy_hw_reset(&adapter->hw);
+ /* Checking if MAC exited DMoff state */
+ if (!(pcim_state & E1000_STATUS_PCIM_STATE))
+ e1000_phy_hw_reset(&adapter->hw);
+ }
}
/* update snapshot of PHY registers on LSC */
@@ -5277,6 +5289,10 @@
/* oops */
break;
}
+ if (hw->mac.type == e1000_pch_spt) {
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ }
}
/* enable transmits in the hardware, need to do this
@@ -5404,9 +5420,8 @@
/* Reset the timer */
if (!test_bit(__E1000_DOWN, &adapter->state))
- queue_delayed_work(adapter->e1000_workqueue,
- &adapter->watchdog_task,
- round_jiffies(2 * HZ));
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 2 * HZ));
}
#define E1000_TX_FLAGS_CSUM 0x00000001
@@ -5940,15 +5955,19 @@
struct e1000_adapter *adapter;
adapter = container_of(work, struct e1000_adapter, reset_task);
+ rtnl_lock();
/* don't run the task if already down */
- if (test_bit(__E1000_DOWN, &adapter->state))
+ if (test_bit(__E1000_DOWN, &adapter->state)) {
+ rtnl_unlock();
return;
+ }
if (!(adapter->flags & FLAG_RESTART_NOW)) {
e1000e_dump(adapter);
e_err("Reset adapter unexpectedly\n");
}
e1000e_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
@@ -6298,10 +6317,14 @@
{
struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ bool present;
+ rtnl_lock();
+
+ present = netif_device_present(netdev);
netif_device_detach(netdev);
- if (netif_running(netdev)) {
+ if (present && netif_running(netdev)) {
int count = E1000_CHECK_RESET_COUNT;
while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
@@ -6313,6 +6336,8 @@
e1000e_down(adapter, false);
e1000_free_irq(adapter);
}
+ rtnl_unlock();
+
e1000e_reset_interrupt_capability(adapter);
/* Allow time for pending master requests to run */
@@ -6326,11 +6351,17 @@
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 ctrl, ctrl_ext, rctl, status;
- /* Runtime suspend should only enable wakeup for link changes */
- u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
+ u32 ctrl, ctrl_ext, rctl, status, wufc;
int retval = 0;
+ /* Runtime suspend should only enable wakeup for link changes */
+ if (runtime)
+ wufc = E1000_WUFC_LNKC;
+ else if (device_may_wakeup(&pdev->dev))
+ wufc = adapter->wol;
+ else
+ wufc = 0;
+
status = er32(STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@@ -6387,7 +6418,7 @@
if (adapter->hw.phy.type == e1000_phy_igp_3) {
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
} else if (hw->mac.type >= e1000_pch_lpt) {
- if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
+ if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
/* ULP does not support wake from unicast, multicast
* or broadcast.
*/
@@ -6560,6 +6591,30 @@
__e1000e_disable_aspm(pdev, state, 1);
}
+static int e1000e_pm_thaw(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ int rc = 0;
+
+ e1000e_set_interrupt_capability(adapter);
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ rc = e1000_request_irq(adapter);
+ if (rc)
+ goto err_irq;
+
+ e1000e_up(adapter);
+ }
+
+ netif_device_attach(netdev);
+err_irq:
+ rtnl_unlock();
+
+ return rc;
+}
+
#ifdef CONFIG_PM
static int __e1000_resume(struct pci_dev *pdev)
{
@@ -6627,26 +6682,6 @@
}
#ifdef CONFIG_PM_SLEEP
-static int e1000e_pm_thaw(struct device *dev)
-{
- struct net_device *netdev = dev_get_drvdata(dev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- e1000e_set_interrupt_capability(adapter);
- if (netif_running(netdev)) {
- u32 err = e1000_request_irq(adapter);
-
- if (err)
- return err;
-
- e1000e_up(adapter);
- }
-
- netif_device_attach(netdev);
-
- return 0;
-}
-
static int e1000e_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -6818,16 +6853,11 @@
static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- netif_device_detach(netdev);
+ e1000e_pm_freeze(&pdev->dev);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
- if (netif_running(netdev))
- e1000e_down(adapter, true);
pci_disable_device(pdev);
/* Request a slot slot reset. */
@@ -6893,10 +6923,7 @@
e1000_init_manageability_pt(adapter);
- if (netif_running(netdev))
- e1000e_up(adapter);
-
- netif_device_attach(netdev);
+ e1000e_pm_thaw(&pdev->dev);
/* If the controller has AMT, do not set DRV_LOAD until the interface
* is up. For all other cases, let the f/w know that the h/w is now
@@ -7259,21 +7286,11 @@
goto err_eeprom;
}
- adapter->e1000_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
- e1000e_driver_name);
-
- if (!adapter->e1000_workqueue) {
- err = -ENOMEM;
- goto err_workqueue;
- }
-
- INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog_task);
- queue_delayed_work(adapter->e1000_workqueue, &adapter->watchdog_task,
- 0);
-
+ timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0);
timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0);
INIT_WORK(&adapter->reset_task, e1000_reset_task);
+ INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
@@ -7367,9 +7384,6 @@
return 0;
err_register:
- flush_workqueue(adapter->e1000_workqueue);
- destroy_workqueue(adapter->e1000_workqueue);
-err_workqueue:
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_release_hw_control(adapter);
err_eeprom:
@@ -7387,6 +7401,7 @@
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -7407,26 +7422,22 @@
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- bool down = test_bit(__E1000_DOWN, &adapter->state);
e1000e_ptp_remove(adapter);
/* The timers may be rescheduled, so explicitly disable them
* from being rescheduled.
*/
- if (!down)
- set_bit(__E1000_DOWN, &adapter->state);
+ set_bit(__E1000_DOWN, &adapter->state);
+ del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->downshift_task);
cancel_work_sync(&adapter->update_phy_task);
cancel_work_sync(&adapter->print_hang_task);
- cancel_delayed_work(&adapter->watchdog_task);
- flush_workqueue(adapter->e1000_workqueue);
- destroy_workqueue(adapter->e1000_workqueue);
-
if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
cancel_work_sync(&adapter->tx_hwtstamp_work);
if (adapter->tx_hwtstamp_skb) {
@@ -7435,9 +7446,6 @@
}
}
- /* Don't lie to e1000_close() down the road. */
- if (!down)
- clear_bit(__E1000_DOWN, &adapter->state);
unregister_netdev(netdev);
if (pci_dev_run_wake(pdev))
@@ -7567,6 +7575,12 @@
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V8), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM9), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V9), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM10), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V10), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM11), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index bb236fa..36b0163 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2230,6 +2230,7 @@
err_ioremap:
free_netdev(netdev);
err_alloc_netdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 2af9f63..e571c61 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -129,6 +129,7 @@
__I40E_RESET_INTR_RECEIVED,
__I40E_REINIT_REQUESTED,
__I40E_PF_RESET_REQUESTED,
+ __I40E_PF_RESET_AND_REBUILD_REQUESTED,
__I40E_CORE_RESET_REQUESTED,
__I40E_GLOBAL_RESET_REQUESTED,
__I40E_EMP_RESET_INTR_RECEIVED,
@@ -150,11 +151,15 @@
__I40E_CLIENT_RESET,
__I40E_VIRTCHNL_OP_PENDING,
__I40E_RECOVERY_MODE,
+ __I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */
+ __I40E_VFS_RELEASING,
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
};
#define I40E_PF_RESET_FLAG BIT_ULL(__I40E_PF_RESET_REQUESTED)
+#define I40E_PF_RESET_AND_REBUILD_FLAG \
+ BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED)
/* VSI state flags */
enum i40e_vsi_state_t {
@@ -1151,7 +1156,7 @@
static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
{
- return !!vsi->xdp_prog;
+ return !!READ_ONCE(vsi->xdp_prog);
}
int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 69a2daa..57a8328 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1211,7 +1211,7 @@
#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
#define I40E_AQC_SET_VSI_DEFAULT 0x08
#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
-#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000
+#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY 0x8000
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
__le16 vlan_tag;
@@ -1893,8 +1893,10 @@
I40E_PHY_TYPE_25GBASE_LR = 0x22,
I40E_PHY_TYPE_25GBASE_AOC = 0x23,
I40E_PHY_TYPE_25GBASE_ACC = 0x24,
- I40E_PHY_TYPE_2_5GBASE_T = 0x30,
- I40E_PHY_TYPE_5GBASE_T = 0x31,
+ I40E_PHY_TYPE_2_5GBASE_T = 0x26,
+ I40E_PHY_TYPE_5GBASE_T = 0x27,
+ I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS = 0x30,
+ I40E_PHY_TYPE_5GBASE_T_LINK_STATUS = 0x31,
I40E_PHY_TYPE_MAX,
I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
I40E_PHY_TYPE_EMPTY = 0xFE,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index e81530c..5706abb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -377,6 +377,7 @@
clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
&cdev->state);
i40e_client_del_instance(pf);
+ return;
}
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 7560f06..6475f78 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1156,8 +1156,8 @@
break;
case I40E_PHY_TYPE_100BASE_TX:
case I40E_PHY_TYPE_1000BASE_T:
- case I40E_PHY_TYPE_2_5GBASE_T:
- case I40E_PHY_TYPE_5GBASE_T:
+ case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
+ case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
case I40E_PHY_TYPE_10GBASE_T:
media = I40E_MEDIA_TYPE_BASET;
break;
@@ -1950,6 +1950,21 @@
}
/**
+ * i40e_is_aq_api_ver_ge
+ * @aq: pointer to AdminQ info containing HW API version to compare
+ * @maj: API major value
+ * @min: API minor value
+ *
+ * Assert whether current HW API version is greater/equal than provided.
+ **/
+static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
+ u16 min)
+{
+ return (aq->api_maj_ver > maj ||
+ (aq->api_maj_ver == maj && aq->api_min_ver >= min));
+}
+
+/**
* i40e_aq_add_vsi
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a vsi context struct
@@ -2074,18 +2089,16 @@
if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
- if (rx_only_promisc &&
- (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
- (hw->aq.api_maj_ver > 1)))
- flags |= I40E_AQC_SET_VSI_PROMISC_TX;
+ if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
}
cmd->promiscuous_flags = cpu_to_le16(flags);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
- if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
- (hw->aq.api_maj_ver > 1))
- cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
+ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ cmd->valid_flags |=
+ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
cmd->seid = cpu_to_le16(seid);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2182,11 +2195,17 @@
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
- if (enable)
+ if (enable) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
+ }
cmd->promiscuous_flags = cpu_to_le16(flags);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ cmd->valid_flags |=
+ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
cmd->seid = cpu_to_le16(seid);
cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
@@ -2571,9 +2590,16 @@
if (status)
return status;
- hw->phy.link_info.req_fec_info =
- abilities.fec_cfg_curr_mod_ext_info &
- (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
+ if (abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_ENABLE_FEC_AUTO)
+ hw->phy.link_info.req_fec_info =
+ (I40E_AQ_REQUEST_FEC_KR |
+ I40E_AQ_REQUEST_FEC_RS);
+ else
+ hw->phy.link_info.req_fec_info =
+ abilities.fec_cfg_curr_mod_ext_info &
+ (I40E_AQ_REQUEST_FEC_KR |
+ I40E_AQ_REQUEST_FEC_RS);
memcpy(hw->phy.link_info.module_type, &abilities.module_type,
sizeof(hw->phy.link_info.module_type));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 41e1240..2cc4f63 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -232,6 +232,8 @@
I40E_STAT(struct i40e_vsi, _name, _stat)
#define I40E_VEB_STAT(_name, _stat) \
I40E_STAT(struct i40e_veb, _name, _stat)
+#define I40E_VEB_TC_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_cp_veb_tc_stats, _name, _stat)
#define I40E_PFC_STAT(_name, _stat) \
I40E_STAT(struct i40e_pfc_stats, _name, _stat)
#define I40E_QUEUE_STAT(_name, _stat) \
@@ -266,11 +268,18 @@
I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol),
};
+struct i40e_cp_veb_tc_stats {
+ u64 tc_rx_packets;
+ u64 tc_rx_bytes;
+ u64 tc_tx_packets;
+ u64 tc_tx_bytes;
+};
+
static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = {
- I40E_VEB_STAT("veb.tc_%u_tx_packets", tc_stats.tc_tx_packets),
- I40E_VEB_STAT("veb.tc_%u_tx_bytes", tc_stats.tc_tx_bytes),
- I40E_VEB_STAT("veb.tc_%u_rx_packets", tc_stats.tc_rx_packets),
- I40E_VEB_STAT("veb.tc_%u_rx_bytes", tc_stats.tc_rx_bytes),
+ I40E_VEB_TC_STAT("veb.tc_%u_tx_packets", tc_tx_packets),
+ I40E_VEB_TC_STAT("veb.tc_%u_tx_bytes", tc_tx_bytes),
+ I40E_VEB_TC_STAT("veb.tc_%u_rx_packets", tc_rx_packets),
+ I40E_VEB_TC_STAT("veb.tc_%u_rx_bytes", tc_rx_bytes),
};
static const struct i40e_stats i40e_gstrings_misc_stats[] = {
@@ -722,7 +731,14 @@
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
- if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) {
+ if ((I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) &&
+ (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info)) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ } else if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) {
ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
} else if (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info) {
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -730,12 +746,6 @@
} else {
ethtool_link_ksettings_add_link_mode(ks, advertising,
FEC_NONE);
- if (I40E_AQ_SET_FEC_AUTO & req_fec_info) {
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_BASER);
- }
}
}
@@ -829,8 +839,8 @@
10000baseT_Full);
break;
case I40E_PHY_TYPE_10GBASE_T:
- case I40E_PHY_TYPE_5GBASE_T:
- case I40E_PHY_TYPE_2_5GBASE_T:
+ case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
+ case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
case I40E_PHY_TYPE_1000BASE_T:
case I40E_PHY_TYPE_100BASE_TX:
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
@@ -967,7 +977,7 @@
default:
/* if we got here and link is up something bad is afoot */
netdev_info(netdev,
- "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
+ "WARNING: Link is up but PHY type 0x%x is not recognized, or incorrect cable is in use\n",
hw_link_info->phy_type);
}
@@ -1097,6 +1107,7 @@
/* Set flow control settings */
ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause);
switch (hw->fc.requested_mode) {
case I40E_FC_FULL:
@@ -1248,8 +1259,7 @@
if (ethtool_link_ksettings_test_link_mode(&safe_ks,
supported,
Autoneg) &&
- hw->phy.link_info.phy_type !=
- I40E_PHY_TYPE_10GBASE_T) {
+ hw->phy.media_type != I40E_MEDIA_TYPE_BASET) {
netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
err = -EINVAL;
goto done;
@@ -1395,7 +1405,8 @@
memset(&config, 0, sizeof(config));
config.phy_type = abilities.phy_type;
- config.abilities = abilities.abilities;
+ config.abilities = abilities.abilities |
+ I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
config.phy_type_ext = abilities.phy_type_ext;
config.link_speed = abilities.link_speed;
config.eee_capability = abilities.eee_capability;
@@ -1437,6 +1448,7 @@
struct i40e_hw *hw = &pf->hw;
i40e_status status = 0;
int err = 0;
+ u8 fec_cfg;
/* Get the current phy config */
memset(&abilities, 0, sizeof(abilities));
@@ -1448,18 +1460,16 @@
}
fecparam->fec = 0;
- if (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_AUTO)
+ fec_cfg = abilities.fec_cfg_curr_mod_ext_info;
+ if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
fecparam->fec |= ETHTOOL_FEC_AUTO;
- if ((abilities.fec_cfg_curr_mod_ext_info &
- I40E_AQ_SET_FEC_REQUEST_RS) ||
- (abilities.fec_cfg_curr_mod_ext_info &
- I40E_AQ_SET_FEC_ABILITY_RS))
+ else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_RS |
+ I40E_AQ_SET_FEC_ABILITY_RS))
fecparam->fec |= ETHTOOL_FEC_RS;
- if ((abilities.fec_cfg_curr_mod_ext_info &
- I40E_AQ_SET_FEC_REQUEST_KR) ||
- (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_ABILITY_KR))
+ else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_KR |
+ I40E_AQ_SET_FEC_ABILITY_KR))
fecparam->fec |= ETHTOOL_FEC_BASER;
- if (abilities.fec_cfg_curr_mod_ext_info == 0)
+ if (fec_cfg == 0)
fecparam->fec |= ETHTOOL_FEC_OFF;
if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
@@ -2213,6 +2223,29 @@
}
/**
+ * i40e_get_veb_tc_stats - copy VEB TC statistics to formatted structure
+ * @tc: the TC statistics in VEB structure (veb->tc_stats)
+ * @i: the index of traffic class in (veb->tc_stats) structure to copy
+ *
+ * Copy VEB TC statistics from structure of arrays (veb->tc_stats) to
+ * one dimensional structure i40e_cp_veb_tc_stats.
+ * Produce formatted i40e_cp_veb_tc_stats structure of the VEB TC
+ * statistics for the given TC.
+ **/
+static struct i40e_cp_veb_tc_stats
+i40e_get_veb_tc_stats(struct i40e_veb_tc_stats *tc, unsigned int i)
+{
+ struct i40e_cp_veb_tc_stats veb_tc = {
+ .tc_rx_packets = tc->tc_rx_packets[i],
+ .tc_rx_bytes = tc->tc_rx_bytes[i],
+ .tc_tx_packets = tc->tc_tx_packets[i],
+ .tc_tx_bytes = tc->tc_tx_bytes[i],
+ };
+
+ return veb_tc;
+}
+
+/**
* i40e_get_pfc_stats - copy HW PFC statistics to formatted structure
* @pf: the PF device structure
* @i: the priority value to copy
@@ -2296,8 +2329,16 @@
i40e_gstrings_veb_stats);
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
- i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL,
- i40e_gstrings_veb_tc_stats);
+ if (veb_stats) {
+ struct i40e_cp_veb_tc_stats veb_tc =
+ i40e_get_veb_tc_stats(&veb->tc_stats, i);
+
+ i40e_add_ethtool_stats(&data, &veb_tc,
+ i40e_gstrings_veb_tc_stats);
+ } else {
+ i40e_add_ethtool_stats(&data, NULL,
+ i40e_gstrings_veb_tc_stats);
+ }
i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats);
@@ -4874,7 +4915,7 @@
enum i40e_admin_queue_err adq_err;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- bool is_reset_needed;
+ u32 reset_needed = 0;
i40e_status status;
u32 i, j;
@@ -4919,9 +4960,11 @@
flags_complete:
changed_flags = orig_flags ^ new_flags;
- is_reset_needed = !!(changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
- I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED |
- I40E_FLAG_DISABLE_FW_LLDP));
+ if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP)
+ reset_needed = I40E_PF_RESET_AND_REBUILD_FLAG;
+ if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
+ I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED))
+ reset_needed = BIT(__I40E_PF_RESET_REQUESTED);
/* Before we finalize any flag changes, we need to perform some
* checks to ensure that the changes are supported and safe.
@@ -5038,12 +5081,16 @@
case I40E_AQ_RC_EEXIST:
dev_warn(&pf->pdev->dev,
"FW LLDP agent is already running\n");
- is_reset_needed = false;
+ reset_needed = 0;
break;
case I40E_AQ_RC_EPERM:
dev_warn(&pf->pdev->dev,
"Device configuration forbids SW from starting the LLDP agent.\n");
return -EINVAL;
+ case I40E_AQ_RC_EAGAIN:
+ dev_warn(&pf->pdev->dev,
+ "Stop FW LLDP agent command is still being processed, please try again in a second.\n");
+ return -EBUSY;
default:
dev_warn(&pf->pdev->dev,
"Starting FW LLDP agent failed: error: %s, %s\n",
@@ -5067,8 +5114,8 @@
/* Issue reset to cause things to take effect, as additional bits
* are added we will need to create a mask of bits requiring reset
*/
- if (is_reset_needed)
- i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
+ if (reset_needed)
+ i40e_do_reset(pf, reset_needed, true);
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 6031223..21ab7d2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -44,6 +44,8 @@
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
+ bool lock_acquired);
static int i40e_reset(struct i40e_pf *pf);
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
@@ -458,11 +460,15 @@
i40e_get_netdev_stats_struct_tx(ring, stats);
if (i40e_enabled_xdp_vsi(vsi)) {
- ring++;
+ ring = READ_ONCE(vsi->xdp_rings[i]);
+ if (!ring)
+ continue;
i40e_get_netdev_stats_struct_tx(ring, stats);
}
- ring++;
+ ring = READ_ONCE(vsi->rx_rings[i]);
+ if (!ring)
+ continue;
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
packets = ring->stats.packets;
@@ -806,6 +812,8 @@
for (q = 0; q < vsi->num_queue_pairs; q++) {
/* locate Tx ring */
p = READ_ONCE(vsi->tx_rings[q]);
+ if (!p)
+ continue;
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
@@ -819,8 +827,11 @@
tx_linearize += p->tx_stats.tx_linearize;
tx_force_wb += p->tx_stats.tx_force_wb;
- /* Rx queue is part of the same block as Tx queue */
- p = &p[1];
+ /* locate Rx ring */
+ p = READ_ONCE(vsi->rx_rings[q]);
+ if (!p)
+ continue;
+
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
packets = p->stats.packets;
@@ -2536,8 +2547,7 @@
i40e_stat_str(hw, aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
} else {
- dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n",
- vsi->netdev->name,
+ dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
cur_multipromisc ? "entering" : "leaving");
}
}
@@ -2592,7 +2602,7 @@
return;
if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
return;
- if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
+ if (test_bit(__I40E_VF_DISABLE, pf->state)) {
set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
return;
}
@@ -2610,7 +2620,6 @@
}
}
}
- clear_bit(__I40E_VF_DISABLE, pf->state);
}
/**
@@ -3534,14 +3543,14 @@
q_vector->rx.target_itr =
ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
- q_vector->rx.target_itr);
+ q_vector->rx.target_itr >> 1);
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->tx.next_update = jiffies + 1;
q_vector->tx.target_itr =
ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
- q_vector->tx.target_itr);
+ q_vector->tx.target_itr >> 1);
q_vector->tx.current_itr = q_vector->tx.target_itr;
wr32(hw, I40E_PFINT_RATEN(vector - 1),
@@ -3646,11 +3655,11 @@
/* set the ITR configuration */
q_vector->rx.next_update = jiffies + 1;
q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
- wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr);
+ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->tx.next_update = jiffies + 1;
q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
- wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr);
+ wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
q_vector->tx.current_itr = q_vector->tx.target_itr;
i40e_enable_misc_int_causes(pf);
@@ -3979,8 +3988,16 @@
}
if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
- ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
- set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
+ /* disable any further VFLR event notifications */
+ if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
+ u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+
+ reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+ } else {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
+ set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
+ }
}
if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
@@ -4386,11 +4403,10 @@
}
/**
- * i40e_vsi_control_tx - Start or stop a VSI's rings
+ * i40e_vsi_enable_tx - Start a VSI's rings
* @vsi: the VSI being configured
- * @enable: start or stop the rings
**/
-static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
+static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
int i, pf_q, ret = 0;
@@ -4399,7 +4415,7 @@
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
ret = i40e_control_wait_tx_q(vsi->seid, pf,
pf_q,
- false /*is xdp*/, enable);
+ false /*is xdp*/, true);
if (ret)
break;
@@ -4408,7 +4424,7 @@
ret = i40e_control_wait_tx_q(vsi->seid, pf,
pf_q + vsi->alloc_queue_pairs,
- true /*is xdp*/, enable);
+ true /*is xdp*/, true);
if (ret)
break;
}
@@ -4506,32 +4522,25 @@
}
/**
- * i40e_vsi_control_rx - Start or stop a VSI's rings
+ * i40e_vsi_enable_rx - Start a VSI's rings
* @vsi: the VSI being configured
- * @enable: start or stop the rings
**/
-static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
+static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
int i, pf_q, ret = 0;
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- ret = i40e_control_wait_rx_q(pf, pf_q, enable);
+ ret = i40e_control_wait_rx_q(pf, pf_q, true);
if (ret) {
dev_info(&pf->pdev->dev,
- "VSI seid %d Rx ring %d %sable timeout\n",
- vsi->seid, pf_q, (enable ? "en" : "dis"));
+ "VSI seid %d Rx ring %d enable timeout\n",
+ vsi->seid, pf_q);
break;
}
}
- /* Due to HW errata, on Rx disable only, the register can indicate done
- * before it really is. Needs 50ms to be sure
- */
- if (!enable)
- mdelay(50);
-
return ret;
}
@@ -4544,29 +4553,47 @@
int ret = 0;
/* do rx first for enable and last for disable */
- ret = i40e_vsi_control_rx(vsi, true);
+ ret = i40e_vsi_enable_rx(vsi);
if (ret)
return ret;
- ret = i40e_vsi_control_tx(vsi, true);
+ ret = i40e_vsi_enable_tx(vsi);
return ret;
}
+#define I40E_DISABLE_TX_GAP_MSEC 50
+
/**
* i40e_vsi_stop_rings - Stop a VSI's rings
* @vsi: the VSI being configured
**/
void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
{
+ struct i40e_pf *pf = vsi->back;
+ int pf_q, err, q_end;
+
/* When port TX is suspended, don't wait */
if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
return i40e_vsi_stop_rings_no_wait(vsi);
- /* do rx first for enable and last for disable
- * Ignore return value, we need to shutdown whatever we can
- */
- i40e_vsi_control_tx(vsi, false);
- i40e_vsi_control_rx(vsi, false);
+ q_end = vsi->base_queue + vsi->num_queue_pairs;
+ for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
+ i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
+
+ for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
+ err = i40e_control_wait_rx_q(pf, pf_q, false);
+ if (err)
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d Rx ring %d dissable timeout\n",
+ vsi->seid, pf_q);
+ }
+
+ msleep(I40E_DISABLE_TX_GAP_MSEC);
+ pf_q = vsi->base_queue;
+ for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
+ wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
+
+ i40e_vsi_wait_queues_disabled(vsi);
}
/**
@@ -6804,8 +6831,8 @@
for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_clean_tx_ring(vsi->tx_rings[i]);
if (i40e_enabled_xdp_vsi(vsi)) {
- /* Make sure that in-progress ndo_xdp_xmit
- * calls are completed.
+ /* Make sure that in-progress ndo_xdp_xmit and
+ * ndo_xsk_wakeup calls are completed.
*/
synchronize_rcu();
i40e_clean_tx_ring(vsi->xdp_rings[i]);
@@ -6851,6 +6878,8 @@
}
if (vsi->num_queue_pairs <
(mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to create traffic channel, insufficient number of queues.\n");
return -EINVAL;
}
if (sum_max_rate > i40e_get_link_speed(vsi)) {
@@ -7168,6 +7197,7 @@
ch->num_queue_pairs = qcnt;
if (!i40e_setup_channel(pf, vsi, ch)) {
ret = -EINVAL;
+ kfree(ch);
goto err_free;
}
ch->parent_vsi = vsi;
@@ -7592,6 +7622,8 @@
if (filter->flags >= ARRAY_SIZE(flag_table))
return I40E_ERR_CONFIG;
+ memset(&cld_filter, 0, sizeof(cld_filter));
+
/* copy element needed to add cloud filter from filter */
i40e_set_cld_element(filter, &cld_filter);
@@ -7655,10 +7687,13 @@
return -EOPNOTSUPP;
/* adding filter using src_port/src_ip is not supported at this stage */
- if (filter->src_port || filter->src_ipv4 ||
+ if (filter->src_port ||
+ (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
!ipv6_addr_any(&filter->ip.v6.src_ip6))
return -EOPNOTSUPP;
+ memset(&cld_filter, 0, sizeof(cld_filter));
+
/* copy element needed to add cloud filter from filter */
i40e_set_cld_element(filter, &cld_filter.element);
@@ -7682,7 +7717,7 @@
cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
}
- } else if (filter->dst_ipv4 ||
+ } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
!ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
cld_filter.element.flags =
cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
@@ -8269,6 +8304,8 @@
dev_driver_string(&pf->pdev->dev),
dev_name(&pf->pdev->dev));
err = i40e_vsi_request_irq(vsi, int_name);
+ if (err)
+ goto err_setup_rx;
} else {
err = -EINVAL;
@@ -8461,6 +8498,13 @@
dev_dbg(&pf->pdev->dev, "PFR requested\n");
i40e_handle_reset_warning(pf, lock_acquired);
+ } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
+ /* Request a PF Reset
+ *
+ * Resets PF and reinitializes PFs VSI.
+ */
+ i40e_prep_for_reset(pf, lock_acquired);
+ i40e_reset_and_rebuild(pf, true, lock_acquired);
dev_info(&pf->pdev->dev,
pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
"FW LLDP is disabled\n" :
@@ -9927,7 +9971,6 @@
int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
- u8 set_fc_aq_fail = 0;
i40e_status ret;
u32 val;
int v;
@@ -10053,13 +10096,6 @@
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
- /* make sure our flow control settings are restored */
- ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
- if (ret)
- dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
- i40e_stat_str(&pf->hw, ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
-
/* Rebuild the VSIs and VEBs that existed before reset.
* They are still in our local switch element arrays, so only
* need to rebuild the switch model in the HW.
@@ -10815,10 +10851,10 @@
if (vsi->tx_rings && vsi->tx_rings[0]) {
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
kfree_rcu(vsi->tx_rings[i], rcu);
- vsi->tx_rings[i] = NULL;
- vsi->rx_rings[i] = NULL;
+ WRITE_ONCE(vsi->tx_rings[i], NULL);
+ WRITE_ONCE(vsi->rx_rings[i], NULL);
if (vsi->xdp_rings)
- vsi->xdp_rings[i] = NULL;
+ WRITE_ONCE(vsi->xdp_rings[i], NULL);
}
}
}
@@ -10852,7 +10888,7 @@
if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
ring->itr_setting = pf->tx_itr_default;
- vsi->tx_rings[i] = ring++;
+ WRITE_ONCE(vsi->tx_rings[i], ring++);
if (!i40e_enabled_xdp_vsi(vsi))
goto setup_rx;
@@ -10870,7 +10906,7 @@
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
set_ring_xdp(ring);
ring->itr_setting = pf->tx_itr_default;
- vsi->xdp_rings[i] = ring++;
+ WRITE_ONCE(vsi->xdp_rings[i], ring++);
setup_rx:
ring->queue_index = i;
@@ -10883,7 +10919,7 @@
ring->size = 0;
ring->dcb_tc = 0;
ring->itr_setting = pf->rx_itr_default;
- vsi->rx_rings[i] = ring;
+ WRITE_ONCE(vsi->rx_rings[i], ring);
}
return 0;
@@ -11396,7 +11432,7 @@
/* associate no queues to the misc vector */
wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
- wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
+ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
i40e_flush(hw);
@@ -11742,6 +11778,8 @@
struct i40e_aqc_configure_partition_bw_data bw_data;
i40e_status status;
+ memset(&bw_data, 0, sizeof(bw_data));
+
/* Set the valid bit for this PF */
bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
@@ -11848,6 +11886,7 @@
{
int err = 0;
int size;
+ u16 pow;
/* Set default capability flags */
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
@@ -11866,6 +11905,11 @@
pf->rss_table_size = pf->hw.func_caps.rss_table_size;
pf->rss_size_max = min_t(int, pf->rss_size_max,
pf->hw.func_caps.num_tx_qp);
+
+ /* find the next higher power-of-2 of num cpus */
+ pow = roundup_pow_of_two(num_online_cpus());
+ pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
+
if (pf->hw.func_caps.rss) {
pf->flags |= I40E_FLAG_RSS_ENABLED;
pf->alloc_rss_size = min_t(int, pf->rss_size_max,
@@ -12526,8 +12570,12 @@
old_prog = xchg(&vsi->xdp_prog, prog);
- if (need_reset)
+ if (need_reset) {
+ if (!prog)
+ /* Wait until ndo_xsk_wakeup completes. */
+ synchronize_rcu();
i40e_reset_and_rebuild(pf, true, true);
+ }
for (i = 0; i < vsi->num_queue_pairs; i++)
WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
@@ -12847,6 +12895,7 @@
.ndo_poll_controller = i40e_netpoll,
#endif
.ndo_setup_tc = __i40e_setup_tc,
+ .ndo_select_queue = i40e_lan_select_queue,
.ndo_set_features = i40e_set_features,
.ndo_set_vf_mac = i40e_ndo_set_vf_mac,
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
@@ -14672,12 +14721,16 @@
* in order to register the netdev
*/
v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
- if (v_idx < 0)
+ if (v_idx < 0) {
+ err = v_idx;
goto err_switch_setup;
+ }
pf->lan_vsi = v_idx;
vsi = pf->vsi[v_idx];
- if (!vsi)
+ if (!vsi) {
+ err = -EFAULT;
goto err_switch_setup;
+ }
vsi->alloc_queue_pairs = 1;
err = i40e_config_netdev(vsi);
if (err)
@@ -14736,7 +14789,6 @@
int err;
u32 val;
u32 i;
- u8 set_fc_aq_fail;
err = pci_enable_device_mem(pdev);
if (err)
@@ -15058,24 +15110,6 @@
}
INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
- /* Make sure flow control is set according to current settings */
- err = i40e_set_fc(hw, &set_fc_aq_fail, true);
- if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
- dev_dbg(&pf->pdev->dev,
- "Set fc with err %s aq_err %s on get_phy_cap\n",
- i40e_stat_str(hw, err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
- if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
- dev_dbg(&pf->pdev->dev,
- "Set fc with err %s aq_err %s on set_phy_config\n",
- i40e_stat_str(hw, err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
- if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
- dev_dbg(&pf->pdev->dev,
- "Set fc with err %s aq_err %s on get_link_info\n",
- i40e_stat_str(hw, err),
- i40e_aq_str(hw, hw->aq.asq_last_status));
-
/* if FDIR VSI was set up, start it now */
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
@@ -15132,6 +15166,8 @@
if (err) {
dev_info(&pdev->dev,
"setup of misc vector failed: %d\n", err);
+ i40e_cloud_filter_exit(pf);
+ i40e_fdir_teardown(pf);
goto err_vsis;
}
}
@@ -15328,6 +15364,14 @@
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
+ while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ usleep_range(1000, 2000);
+
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+ set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
+ i40e_free_vfs(pf);
+ pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+ }
/* no more scheduling of any task */
set_bit(__I40E_SUSPENDED, pf->state);
set_bit(__I40E_DOWN, pf->state);
@@ -15354,11 +15398,6 @@
*/
i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
- if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
- i40e_free_vfs(pf);
- pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
- }
-
i40e_fdir_teardown(pf);
/* If there is a switch structure or any orphans, remove them.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index e3f29dc..0698791 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1195,6 +1195,11 @@
rc->total_packets = 0;
}
+static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
+{
+ return &rx_ring->rx_bi[idx];
+}
+
/**
* i40e_reuse_rx_page - page flip buffer and store it back on the ring
* @rx_ring: rx descriptor ring to store buffers on
@@ -1208,7 +1213,7 @@
struct i40e_rx_buffer *new_buff;
u16 nta = rx_ring->next_to_alloc;
- new_buff = &rx_ring->rx_bi[nta];
+ new_buff = i40e_rx_bi(rx_ring, nta);
/* update, and store next to alloc */
nta++;
@@ -1272,7 +1277,7 @@
ntc = rx_ring->next_to_clean;
/* fetch, update, and store next to clean */
- rx_buffer = &rx_ring->rx_bi[ntc++];
+ rx_buffer = i40e_rx_bi(rx_ring, ntc++);
ntc = (ntc < rx_ring->count) ? ntc : 0;
rx_ring->next_to_clean = ntc;
@@ -1361,7 +1366,7 @@
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
- struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+ struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i);
if (!rx_bi->page)
continue;
@@ -1576,7 +1581,7 @@
return false;
rx_desc = I40E_RX_DESC(rx_ring, ntu);
- bi = &rx_ring->rx_bi[ntu];
+ bi = i40e_rx_bi(rx_ring, ntu);
do {
if (!i40e_alloc_mapped_page(rx_ring, bi))
@@ -1598,7 +1603,7 @@
ntu++;
if (unlikely(ntu == rx_ring->count)) {
rx_desc = I40E_RX_DESC(rx_ring, 0);
- bi = rx_ring->rx_bi;
+ bi = i40e_rx_bi(rx_ring, 0);
ntu = 0;
}
@@ -1864,6 +1869,7 @@
* the adapter for another receive
*
* @rx_buffer: buffer containing the page
+ * @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
*
* If page is reusable, rx_buffer->page_offset is adjusted to point to
* an unused region in the page.
@@ -1886,7 +1892,8 @@
*
* In either case, if the page is reusable its refcount is increased.
**/
-static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
@@ -1897,7 +1904,7 @@
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((page_count(page) - pagecnt_bias) > 1))
+ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
return false;
#else
#define I40E_LAST_OFFSET \
@@ -1956,17 +1963,25 @@
* i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
* @rx_ring: rx descriptor ring to transact packets on
* @size: size of buffer to add to skb
+ * @rx_buffer_pgcnt: buffer page refcount
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
- const unsigned int size)
+ const unsigned int size,
+ int *rx_buffer_pgcnt)
{
struct i40e_rx_buffer *rx_buffer;
- rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
- prefetchw(rx_buffer->page);
+ rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
+ *rx_buffer_pgcnt =
+#if (PAGE_SIZE < 8192)
+ page_count(rx_buffer->page);
+#else
+ 0;
+#endif
+ prefetch_page_address(rx_buffer->page);
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
@@ -2120,14 +2135,16 @@
* i40e_put_rx_buffer - Clean up used buffer and either recycle or free
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: rx buffer to pull data from
+ * @rx_buffer_pgcnt: rx buffer page refcount pre xdp_do_redirect() call
*
* This function will clean up the contents of the rx_buffer. It will
* either recycle the buffer or unmap it and free the associated resources.
*/
static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer)
+ struct i40e_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
{
- if (i40e_can_reuse_rx_page(rx_buffer)) {
+ if (i40e_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
} else {
@@ -2216,15 +2233,20 @@
case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+ if (result == I40E_XDP_CONSUMED)
+ goto out_failure;
break;
case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
+ result = I40E_XDP_REDIR;
break;
default:
bpf_warn_invalid_xdp_action(act);
/* fall through */
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
/* fall through -- handle aborts by dropping packet */
case XDP_DROP:
@@ -2340,6 +2362,7 @@
while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
+ int rx_buffer_pgcnt;
unsigned int size;
u64 qword;
@@ -2379,7 +2402,7 @@
break;
i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
- rx_buffer = i40e_get_rx_buffer(rx_ring, size);
+ rx_buffer = i40e_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
/* retrieve a buffer from the ring */
if (!skb) {
@@ -2419,7 +2442,7 @@
break;
}
- i40e_put_rx_buffer(rx_ring, rx_buffer);
+ i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
cleaned_count++;
if (i40e_is_non_eop(rx_ring, rx_desc, skb))
@@ -3075,13 +3098,16 @@
l4_proto = ip.v4->protocol;
} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
+ int ret;
+
tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
- if (l4.hdr != exthdr)
- ipv6_skip_exthdr(skb, exthdr - skb->data,
- &l4_proto, &frag_off);
+ ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ if (ret < 0)
+ return -1;
}
/* define outer transport */
@@ -3495,6 +3521,55 @@
return -1;
}
+static u16 i40e_swdcb_skb_tx_hash(struct net_device *dev,
+ const struct sk_buff *skb,
+ u16 num_tx_queues)
+{
+ u32 jhash_initval_salt = 0xd631614b;
+ u32 hash;
+
+ if (skb->sk && skb->sk->sk_hash)
+ hash = skb->sk->sk_hash;
+ else
+ hash = (__force u16)skb->protocol ^ skb->hash;
+
+ hash = jhash_1word(hash, jhash_initval_salt);
+
+ return (u16)(((u64)hash * num_tx_queues) >> 32);
+}
+
+u16 i40e_lan_select_queue(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct net_device __always_unused *sb_dev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_hw *hw;
+ u16 qoffset;
+ u16 qcount;
+ u8 tclass;
+ u16 hash;
+ u8 prio;
+
+ /* is DCB enabled at all? */
+ if (vsi->tc_config.numtc == 1)
+ return netdev_pick_tx(netdev, skb, sb_dev);
+
+ prio = skb->priority;
+ hw = &vsi->back->hw;
+ tclass = hw->local_dcbx_config.etscfg.prioritytable[prio];
+ /* sanity check */
+ if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass))))
+ tclass = 0;
+
+ /* select a queue assigned for the given TC */
+ qcount = vsi->tc_config.tc_info[tclass].qcount;
+ hash = i40e_swdcb_skb_tx_hash(netdev, skb, qcount);
+
+ qoffset = vsi->tc_config.tc_info[tclass].qoffset;
+ return qoffset + hash;
+}
+
/**
* i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
* @xdp: data to transmit
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 36d37f3..ba4ce80 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -481,6 +481,8 @@
bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index b43ec94..666a251 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -253,11 +253,8 @@
#define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
I40E_PHY_TYPE_OFFSET)
/* Offset for 2.5G/5G PHY Types value to bit number conversion */
-#define I40E_PHY_TYPE_OFFSET2 (-10)
-#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T + \
- I40E_PHY_TYPE_OFFSET2)
-#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T + \
- I40E_PHY_TYPE_OFFSET2)
+#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T)
+#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T)
#define I40E_HW_CAP_MAX_GPIO 30
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 3d24408..e561073 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -55,12 +55,7 @@
pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = PF_EVENT_SEVERITY_INFO;
-
- /* Always report link is down if the VF queues aren't enabled */
- if (!vf->queues_enabled) {
- pfe.event_data.link_event.link_status = false;
- pfe.event_data.link_event.link_speed = 0;
- } else if (vf->link_forced) {
+ if (vf->link_forced) {
pfe.event_data.link_event.link_status = vf->link_up;
pfe.event_data.link_event.link_speed =
(vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
@@ -70,7 +65,6 @@
pfe.event_data.link_event.link_speed =
i40e_virtchnl_link_speed(ls->link_speed);
}
-
i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL);
}
@@ -143,6 +137,7 @@
**/
static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
{
+ struct i40e_pf *pf = vf->pf;
int i;
i40e_vc_notify_vf_reset(vf);
@@ -153,6 +148,11 @@
* ensure a reset.
*/
for (i = 0; i < 20; i++) {
+ /* If PF is in VFs releasing state reset VF is impossible,
+ * so leave it.
+ */
+ if (test_bit(__I40E_VFS_RELEASING, pf->state))
+ return;
if (i40e_reset_vf(vf, false))
return;
usleep_range(10000, 20000);
@@ -1335,7 +1335,8 @@
* @vf: pointer to the VF structure
* @flr: VFLR was issued or not
*
- * Returns true if the VF is reset, false otherwise.
+ * Returns true if the VF is in reset, resets successfully, or resets
+ * are disabled and false otherwise.
**/
bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
{
@@ -1345,11 +1346,14 @@
u32 reg;
int i;
+ if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
+ return true;
+
/* If the VFs have been disabled, this means something else is
* resetting the VF, so we shouldn't continue.
*/
if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
- return false;
+ return true;
i40e_trigger_vf_reset(vf, flr);
@@ -1508,11 +1512,22 @@
if (!pf->vf)
return;
+
+ set_bit(__I40E_VFS_RELEASING, pf->state);
while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
usleep_range(1000, 2000);
i40e_notify_client_of_vf_enable(pf, 0);
+ /* Disable IOV before freeing resources. This lets any VF drivers
+ * running in the host get themselves cleaned up before we yank
+ * the carpet out from underneath their feet.
+ */
+ if (!pci_vfs_assigned(pf->pdev))
+ pci_disable_sriov(pf->pdev);
+ else
+ dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+
/* Amortize wait time by stopping all VFs at the same time */
for (i = 0; i < pf->num_alloc_vfs; i++) {
if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
@@ -1528,15 +1543,6 @@
i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
}
- /* Disable IOV before freeing resources. This lets any VF drivers
- * running in the host get themselves cleaned up before we yank
- * the carpet out from underneath their feet.
- */
- if (!pci_vfs_assigned(pf->pdev))
- pci_disable_sriov(pf->pdev);
- else
- dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
-
/* free up VF resources */
tmp = pf->num_alloc_vfs;
pf->num_alloc_vfs = 0;
@@ -1565,6 +1571,7 @@
}
}
clear_bit(__I40E_VF_DISABLE, pf->state);
+ clear_bit(__I40E_VFS_RELEASING, pf->state);
}
#ifdef CONFIG_PCI_IOV
@@ -1700,7 +1707,7 @@
if (num_vfs) {
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+ i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
}
ret = i40e_pci_sriov_enable(pdev, num_vfs);
goto sriov_configure_out;
@@ -1709,7 +1716,7 @@
if (!pci_vfs_assigned(pf->pdev)) {
i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+ i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
ret = -EINVAL;
@@ -2323,6 +2330,22 @@
}
/**
+ * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
+ * @vqs: virtchnl_queue_select structure containing bitmaps to validate
+ *
+ * Returns true if validation was successful, else false.
+ */
+static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
+{
+ if ((!vqs->rx_queues && !vqs->tx_queues) ||
+ vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
+ vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
+ return false;
+
+ return true;
+}
+
+/**
* i40e_vc_enable_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2347,7 +2370,7 @@
goto error_param;
}
- if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+ if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2373,8 +2396,6 @@
}
}
- vf->queues_enabled = true;
-
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
@@ -2396,9 +2417,6 @@
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
- /* Immediately mark queues as disabled */
- vf->queues_enabled = false;
-
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -2409,9 +2427,7 @@
goto error_param;
}
- if ((vqs->rx_queues == 0 && vqs->tx_queues == 0) ||
- vqs->rx_queues > I40E_MAX_VF_QUEUES ||
- vqs->tx_queues > I40E_MAX_VF_QUEUES) {
+ if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -3962,20 +3978,16 @@
goto error_param;
vf = &pf->vf[vf_id];
- vsi = pf->vsi[vf->lan_vsi_idx];
/* When the VF is resetting wait until it is done.
* It can take up to 200 milliseconds,
* but wait for up to 300 milliseconds to be safe.
- * If the VF is indeed in reset, the vsi pointer has
- * to show on the newly loaded vsi under pf->vsi[id].
+ * Acquire the VSI pointer only after the VF has been
+ * properly initialized.
*/
for (i = 0; i < 15; i++) {
- if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
- if (i > 0)
- vsi = pf->vsi[vf->lan_vsi_idx];
+ if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
break;
- }
msleep(20);
}
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
@@ -3984,6 +3996,7 @@
ret = -EAGAIN;
goto error_param;
}
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (is_multicast_ether_addr(mac)) {
dev_err(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 7164b9b..f65cc0c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -99,7 +99,6 @@
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
bool link_forced;
bool link_up; /* only valid if VF link is forced */
- bool queues_enabled; /* true if the VF queues are enabled */
bool spoofchk;
u16 num_mac;
u16 num_vlan;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index d07e1a8..a9ad788 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -9,6 +9,11 @@
#include "i40e_txrx_common.h"
#include "i40e_xsk.h"
+static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
+{
+ return &rx_ring->rx_bi[idx];
+}
+
/**
* i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev
* @vsi: Current VSI
@@ -207,21 +212,28 @@
xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset);
+ if (likely(act == XDP_REDIRECT)) {
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ if (err)
+ goto out_failure;
+ rcu_read_unlock();
+ return I40E_XDP_REDIR;
+ }
+
switch (act) {
case XDP_PASS:
break;
case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
- break;
- case XDP_REDIRECT:
- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
+ if (result == I40E_XDP_CONSUMED)
+ goto out_failure;
break;
default:
bpf_warn_invalid_xdp_action(act);
/* fall through */
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
/* fallthrough -- handle aborts by dropping packet */
case XDP_DROP:
@@ -321,7 +333,7 @@
bool ok = true;
rx_desc = I40E_RX_DESC(rx_ring, ntu);
- bi = &rx_ring->rx_bi[ntu];
+ bi = i40e_rx_bi(rx_ring, ntu);
do {
if (!alloc(rx_ring, bi)) {
ok = false;
@@ -340,7 +352,7 @@
if (unlikely(ntu == rx_ring->count)) {
rx_desc = I40E_RX_DESC(rx_ring, 0);
- bi = rx_ring->rx_bi;
+ bi = i40e_rx_bi(rx_ring, 0);
ntu = 0;
}
@@ -402,7 +414,7 @@
{
struct i40e_rx_buffer *bi;
- bi = &rx_ring->rx_bi[rx_ring->next_to_clean];
+ bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
@@ -424,7 +436,8 @@
static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *old_bi)
{
- struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc];
+ struct i40e_rx_buffer *new_bi = i40e_rx_bi(rx_ring,
+ rx_ring->next_to_alloc);
u16 nta = rx_ring->next_to_alloc;
/* update, and store next to alloc */
@@ -456,7 +469,7 @@
mask = rx_ring->xsk_umem->chunk_mask;
nta = rx_ring->next_to_alloc;
- bi = &rx_ring->rx_bi[nta];
+ bi = i40e_rx_bi(rx_ring, nta);
nta++;
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
@@ -787,8 +800,12 @@
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
struct i40e_ring *ring;
+ if (test_bit(__I40E_CONFIG_BUSY, pf->state))
+ return -EAGAIN;
+
if (test_bit(__I40E_VSI_DOWN, vsi->state))
return -ENETDOWN;
@@ -820,7 +837,7 @@
u16 i;
for (i = 0; i < rx_ring->count; i++) {
- struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+ struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i);
if (!rx_bi->addr)
continue;
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 29de3ae..81ca647 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -87,6 +87,10 @@
#define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4)
#define IAVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */
+#define IAVF_VIRTCHNL_VF_RESOURCE_SIZE (sizeof(struct virtchnl_vf_resource) + \
+ (IAVF_MAX_VF_VSI * \
+ sizeof(struct virtchnl_vsi_resource)))
+
/* MAX_MSIX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector.
*/
@@ -130,6 +134,7 @@
struct iavf_mac_filter {
struct list_head list;
u8 macaddr[ETH_ALEN];
+ bool is_new_mac; /* filter is new, wait for PF decision */
bool remove; /* filter needs to be removed */
bool add; /* filter needs to be added */
};
@@ -306,6 +311,14 @@
bool netdev_registered;
bool link_up;
enum virtchnl_link_speed link_speed;
+ /* This is only populated if the VIRTCHNL_VF_CAP_ADV_LINK_SPEED is set
+ * in vf_res->vf_cap_flags. Use ADV_LINK_SUPPORT macro to determine if
+ * this field is valid. This field should be used going forward and the
+ * enum virtchnl_link_speed above should be considered the legacy way of
+ * storing/communicating link speeds.
+ */
+ u32 link_speed_mbps;
+
enum virtchnl_ops current_op;
#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \
(_a)->vf_res->vf_cap_flags & \
@@ -322,6 +335,8 @@
VIRTCHNL_VF_OFFLOAD_RSS_PF)))
#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_VLAN)
+#define ADV_LINK_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
+ VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */
struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
struct virtchnl_version_info pf_version;
@@ -415,4 +430,6 @@
void iavf_disable_channels(struct iavf_adapter *adapter);
void iavf_add_cloud_filter(struct iavf_adapter *adapter);
void iavf_del_cloud_filter(struct iavf_adapter *adapter);
+struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
+ const u8 *macaddr);
#endif /* _IAVF_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index dad3eec..758bef0 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -278,7 +278,18 @@
ethtool_link_ksettings_zero_link_mode(cmd, supported);
cmd->base.autoneg = AUTONEG_DISABLE;
cmd->base.port = PORT_NONE;
- /* Set speed and duplex */
+ cmd->base.duplex = DUPLEX_FULL;
+
+ if (ADV_LINK_SUPPORT(adapter)) {
+ if (adapter->link_speed_mbps &&
+ adapter->link_speed_mbps < U32_MAX)
+ cmd->base.speed = adapter->link_speed_mbps;
+ else
+ cmd->base.speed = SPEED_UNKNOWN;
+
+ return 0;
+ }
+
switch (adapter->link_speed) {
case IAVF_LINK_SPEED_40GB:
cmd->base.speed = SPEED_40000;
@@ -306,7 +317,6 @@
default:
break;
}
- cmd->base.duplex = DUPLEX_FULL;
return 0;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 821987d..bc46c26 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -143,6 +143,30 @@
}
/**
+ * iavf_lock_timeout - try to set bit but give up after timeout
+ * @adapter: board private structure
+ * @bit: bit to set
+ * @msecs: timeout in msecs
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int iavf_lock_timeout(struct iavf_adapter *adapter,
+ enum iavf_critical_section_t bit,
+ unsigned int msecs)
+{
+ unsigned int wait, delay = 10;
+
+ for (wait = 0; wait < msecs; wait += delay) {
+ if (!test_and_set_bit(bit, &adapter->crit_section))
+ return 0;
+
+ msleep(delay);
+ }
+
+ return -1;
+}
+
+/**
* iavf_schedule_reset - Set the flags and schedule a reset event
* @adapter: board private structure
**/
@@ -743,9 +767,8 @@
*
* Returns ptr to the filter object or NULL when no memory available.
**/
-static struct
-iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
- const u8 *macaddr)
+struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
+ const u8 *macaddr)
{
struct iavf_mac_filter *f;
@@ -762,6 +785,7 @@
list_add_tail(&f->list, &adapter->mac_filter_list);
f->add = true;
+ f->is_new_mac = true;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
} else {
f->remove = false;
@@ -1500,11 +1524,6 @@
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
iavf_map_rings_to_vectors(adapter);
-
- if (RSS_AQ(adapter))
- adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
- else
- err = iavf_init_rss(adapter);
err:
return err;
}
@@ -1757,17 +1776,17 @@
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct iavf_hw *hw = &adapter->hw;
- int err = 0, bufsz;
+ int err;
WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
/* aq msg sent, awaiting reply */
if (!adapter->vf_res) {
- bufsz = sizeof(struct virtchnl_vf_resource) +
- (IAVF_MAX_VF_VSI *
- sizeof(struct virtchnl_vsi_resource));
- adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
- if (!adapter->vf_res)
+ adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
+ GFP_KERNEL);
+ if (!adapter->vf_res) {
+ err = -ENOMEM;
goto err;
+ }
}
err = iavf_get_vf_config(adapter);
if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) {
@@ -1787,7 +1806,8 @@
goto err_alloc;
}
- if (iavf_process_config(adapter))
+ err = iavf_process_config(adapter);
+ if (err)
goto err_alloc;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
@@ -1845,11 +1865,9 @@
netif_tx_stop_all_queues(netdev);
if (CLIENT_ALLOWED(adapter)) {
err = iavf_lan_add_device(adapter);
- if (err) {
- rtnl_unlock();
+ if (err)
dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
err);
- }
}
dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
if (netdev->features & NETIF_F_GRO)
@@ -1864,8 +1882,10 @@
adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
- if (!adapter->rss_key || !adapter->rss_lut)
+ if (!adapter->rss_key || !adapter->rss_lut) {
+ err = -ENOMEM;
goto err_mem;
+ }
if (RSS_AQ(adapter))
adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
else
@@ -1947,7 +1967,10 @@
iavf_send_api_ver(adapter);
}
} else {
- if (!iavf_process_aq_command(adapter) &&
+ /* An error will be returned if no commands were
+ * processed; use this opportunity to update stats
+ */
+ if (iavf_process_aq_command(adapter) &&
adapter->state == __IAVF_RUNNING)
iavf_request_stats(adapter);
}
@@ -1962,7 +1985,6 @@
/* check for hw reset */
reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
if (!reg_val) {
- adapter->state = __IAVF_RESETTING;
adapter->flags |= IAVF_FLAG_RESET_PENDING;
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
@@ -2037,7 +2059,7 @@
iavf_reset_interrupt_capability(adapter);
iavf_free_queues(adapter);
iavf_free_q_vectors(adapter);
- kfree(adapter->vf_res);
+ memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
iavf_shutdown_adminq(&adapter->hw);
adapter->netdev->flags &= ~IFF_UP;
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
@@ -2065,9 +2087,9 @@
struct virtchnl_vf_resource *vfres = adapter->vf_res;
struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw;
+ struct iavf_mac_filter *f, *ftmp;
struct iavf_vlan_filter *vlf;
struct iavf_cloud_filter *cf;
- struct iavf_mac_filter *f;
u32 reg_val;
int i = 0, err;
bool running;
@@ -2078,6 +2100,10 @@
if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
return;
+ if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 200)) {
+ schedule_work(&adapter->reset_task);
+ return;
+ }
while (test_and_set_bit(__IAVF_IN_CLIENT_TASK,
&adapter->crit_section))
usleep_range(500, 1000);
@@ -2176,11 +2202,29 @@
goto reset_err;
}
+ if (RSS_AQ(adapter)) {
+ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
+ } else {
+ err = iavf_init_rss(adapter);
+ if (err)
+ goto reset_err;
+ }
+
adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
spin_lock_bh(&adapter->mac_vlan_list_lock);
+ /* Delete filter for the current MAC address, it could have
+ * been changed by the PF via administratively set MAC.
+ * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
+ */
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
/* re-add all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->add = true;
@@ -2274,6 +2318,8 @@
if (!event.msg_buf)
goto out;
+ if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 200))
+ goto freedom;
do {
ret = iavf_clean_arq_element(hw, &event, &pending);
v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
@@ -2287,6 +2333,7 @@
if (pending != 0)
memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
} while (pending);
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
if ((adapter->flags &
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
@@ -2478,6 +2525,16 @@
{
int speed = 0, ret = 0;
+ if (ADV_LINK_SUPPORT(adapter)) {
+ if (adapter->link_speed_mbps < U32_MAX) {
+ speed = adapter->link_speed_mbps;
+ goto validate_bw;
+ } else {
+ dev_err(&adapter->pdev->dev, "Unknown link speed\n");
+ return -EINVAL;
+ }
+ }
+
switch (adapter->link_speed) {
case IAVF_LINK_SPEED_40GB:
speed = 40000;
@@ -2501,6 +2558,7 @@
break;
}
+validate_bw:
if (max_tx_rate > speed) {
dev_err(&adapter->pdev->dev,
"Invalid tx rate specified\n");
@@ -3052,9 +3110,6 @@
static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
struct flow_cls_offload *cls_flower)
{
- if (cls_flower->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_flower->command) {
case FLOW_CLS_REPLACE:
return iavf_configure_clsflower(adapter, cls_flower);
@@ -3078,6 +3133,11 @@
static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
+ struct iavf_adapter *adapter = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
+ return -EOPNOTSUPP;
+
switch (type) {
case TC_SETUP_CLSFLOWER:
return iavf_setup_tc_cls_flower(cb_priv, type_data);
@@ -3570,6 +3630,10 @@
init_task.work);
struct iavf_hw *hw = &adapter->hw;
+ if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000)) {
+ dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__);
+ return;
+ }
switch (adapter->state) {
case __IAVF_STARTUP:
if (iavf_startup(adapter) < 0)
@@ -3582,14 +3646,14 @@
case __IAVF_INIT_GET_RESOURCES:
if (iavf_init_get_resources(adapter) < 0)
goto init_failed;
- return;
+ goto out;
default:
goto init_failed;
}
queue_delayed_work(iavf_wq, &adapter->init_task,
msecs_to_jiffies(30));
- return;
+ goto out;
init_failed:
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
dev_err(&adapter->pdev->dev,
@@ -3598,9 +3662,11 @@
iavf_shutdown_adminq(hw);
adapter->state = __IAVF_STARTUP;
queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5);
- return;
+ goto out;
}
queue_delayed_work(iavf_wq, &adapter->init_task, HZ);
+out:
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
}
/**
@@ -3617,9 +3683,12 @@
if (netif_running(netdev))
iavf_close(netdev);
+ if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000))
+ dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__);
/* Prevent the watchdog from running. */
adapter->state = __IAVF_REMOVE;
adapter->aq_required = 0;
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
#ifdef CONFIG_PM
pci_save_state(pdev);
@@ -3739,6 +3808,7 @@
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_pci_reg:
err_dma:
@@ -3746,7 +3816,6 @@
return err;
}
-#ifdef CONFIG_PM
/**
* iavf_suspend - Power management suspend routine
* @pdev: PCI device information struct
@@ -3754,11 +3823,10 @@
*
* Called when the system (VM) is entering sleep/suspend.
**/
-static int iavf_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused iavf_suspend(struct device *dev_d)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev_d);
struct iavf_adapter *adapter = netdev_priv(netdev);
- int retval = 0;
netif_device_detach(netdev);
@@ -3776,12 +3844,6 @@
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
- pci_disable_device(pdev);
-
return 0;
}
@@ -3791,24 +3853,13 @@
*
* Called when the system (VM) is resumed from sleep/suspend.
**/
-static int iavf_resume(struct pci_dev *pdev)
+static int __maybe_unused iavf_resume(struct device *dev_d)
{
- struct iavf_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = to_pci_dev(dev_d);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct iavf_adapter *adapter = netdev_priv(netdev);
u32 err;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- /* pci_restore_state clears dev->state_saved so call
- * pci_save_state to restore it.
- */
- pci_save_state(pdev);
-
- err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
- return err;
- }
pci_set_master(pdev);
rtnl_lock();
@@ -3832,7 +3883,6 @@
return err;
}
-#endif /* CONFIG_PM */
/**
* iavf_remove - Device Removal Routine
* @pdev: PCI device information struct
@@ -3867,10 +3917,6 @@
err);
}
- /* Shut down all the garbage mashers on the detention level */
- adapter->state = __IAVF_REMOVE;
- adapter->aq_required = 0;
- adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
iavf_request_reset(adapter);
msleep(50);
/* If the FW isn't responding, kick it once, but only once. */
@@ -3878,6 +3924,13 @@
iavf_request_reset(adapter);
msleep(50);
}
+ if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000))
+ dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__);
+
+ /* Shut down all the garbage mashers on the detention level */
+ adapter->state = __IAVF_REMOVE;
+ adapter->aq_required = 0;
+ adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
iavf_free_all_tx_resources(adapter);
iavf_free_all_rx_resources(adapter);
iavf_misc_irq_disable(adapter);
@@ -3900,8 +3953,6 @@
iounmap(hw->hw_addr);
pci_release_regions(pdev);
- iavf_free_all_tx_resources(adapter);
- iavf_free_all_rx_resources(adapter);
iavf_free_queues(adapter);
kfree(adapter->vf_res);
spin_lock_bh(&adapter->mac_vlan_list_lock);
@@ -3934,16 +3985,15 @@
pci_disable_device(pdev);
}
+static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
+
static struct pci_driver iavf_driver = {
- .name = iavf_driver_name,
- .id_table = iavf_pci_tbl,
- .probe = iavf_probe,
- .remove = iavf_remove,
-#ifdef CONFIG_PM
- .suspend = iavf_suspend,
- .resume = iavf_resume,
-#endif
- .shutdown = iavf_shutdown,
+ .name = iavf_driver_name,
+ .id_table = iavf_pci_tbl,
+ .probe = iavf_probe,
+ .remove = iavf_remove,
+ .driver.pm = &iavf_pm_ops,
+ .shutdown = iavf_shutdown,
};
/**
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index c46770e..4d471a6 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -139,7 +139,8 @@
VIRTCHNL_VF_OFFLOAD_ENCAP |
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
- VIRTCHNL_VF_OFFLOAD_ADQ;
+ VIRTCHNL_VF_OFFLOAD_ADQ |
+ VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG;
@@ -564,6 +565,47 @@
}
/**
+ * iavf_mac_add_ok
+ * @adapter: adapter structure
+ *
+ * Submit list of filters based on PF response.
+ **/
+static void iavf_mac_add_ok(struct iavf_adapter *adapter)
+{
+ struct iavf_mac_filter *f, *ftmp;
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ f->is_new_mac = false;
+ }
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
+ * iavf_mac_add_reject
+ * @adapter: adapter structure
+ *
+ * Remove filters from list based on PF response.
+ **/
+static void iavf_mac_add_reject(struct iavf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct iavf_mac_filter *f, *ftmp;
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
+ f->remove = false;
+
+ if (f->is_new_mac) {
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
* iavf_add_vlans
* @adapter: adapter structure
*
@@ -918,6 +960,8 @@
iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0);
}
+#define IAVF_MAX_SPEED_STRLEN 13
+
/**
* iavf_print_link_message - print link up or down
* @adapter: adapter structure
@@ -927,37 +971,99 @@
static void iavf_print_link_message(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- char *speed = "Unknown ";
+ int link_speed_mbps;
+ char *speed;
if (!adapter->link_up) {
netdev_info(netdev, "NIC Link is Down\n");
return;
}
+ speed = kcalloc(1, IAVF_MAX_SPEED_STRLEN, GFP_KERNEL);
+ if (!speed)
+ return;
+
+ if (ADV_LINK_SUPPORT(adapter)) {
+ link_speed_mbps = adapter->link_speed_mbps;
+ goto print_link_msg;
+ }
+
switch (adapter->link_speed) {
case IAVF_LINK_SPEED_40GB:
- speed = "40 G";
+ link_speed_mbps = SPEED_40000;
break;
case IAVF_LINK_SPEED_25GB:
- speed = "25 G";
+ link_speed_mbps = SPEED_25000;
break;
case IAVF_LINK_SPEED_20GB:
- speed = "20 G";
+ link_speed_mbps = SPEED_20000;
break;
case IAVF_LINK_SPEED_10GB:
- speed = "10 G";
+ link_speed_mbps = SPEED_10000;
break;
case IAVF_LINK_SPEED_1GB:
- speed = "1000 M";
+ link_speed_mbps = SPEED_1000;
break;
case IAVF_LINK_SPEED_100MB:
- speed = "100 M";
+ link_speed_mbps = SPEED_100;
break;
default:
+ link_speed_mbps = SPEED_UNKNOWN;
break;
}
- netdev_info(netdev, "NIC Link is Up %sbps Full Duplex\n", speed);
+print_link_msg:
+ if (link_speed_mbps > SPEED_1000) {
+ if (link_speed_mbps == SPEED_2500)
+ snprintf(speed, IAVF_MAX_SPEED_STRLEN, "2.5 Gbps");
+ else
+ /* convert to Gbps inline */
+ snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s",
+ link_speed_mbps / 1000, "Gbps");
+ } else if (link_speed_mbps == SPEED_UNKNOWN) {
+ snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps");
+ } else {
+ snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%u %s",
+ link_speed_mbps, "Mbps");
+ }
+
+ netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed);
+ kfree(speed);
+}
+
+/**
+ * iavf_get_vpe_link_status
+ * @adapter: adapter structure
+ * @vpe: virtchnl_pf_event structure
+ *
+ * Helper function for determining the link status
+ **/
+static bool
+iavf_get_vpe_link_status(struct iavf_adapter *adapter,
+ struct virtchnl_pf_event *vpe)
+{
+ if (ADV_LINK_SUPPORT(adapter))
+ return vpe->event_data.link_event_adv.link_status;
+ else
+ return vpe->event_data.link_event.link_status;
+}
+
+/**
+ * iavf_set_adapter_link_speed_from_vpe
+ * @adapter: adapter structure for which we are setting the link speed
+ * @vpe: virtchnl_pf_event structure that contains the link speed we are setting
+ *
+ * Helper function for setting iavf_adapter link speed
+ **/
+static void
+iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter,
+ struct virtchnl_pf_event *vpe)
+{
+ if (ADV_LINK_SUPPORT(adapter))
+ adapter->link_speed_mbps =
+ vpe->event_data.link_event_adv.link_speed;
+ else
+ adapter->link_speed = vpe->event_data.link_event.link_speed;
}
/**
@@ -1187,12 +1293,11 @@
if (v_opcode == VIRTCHNL_OP_EVENT) {
struct virtchnl_pf_event *vpe =
(struct virtchnl_pf_event *)msg;
- bool link_up = vpe->event_data.link_event.link_status;
+ bool link_up = iavf_get_vpe_link_status(adapter, vpe);
switch (vpe->event) {
case VIRTCHNL_EVENT_LINK_CHANGE:
- adapter->link_speed =
- vpe->event_data.link_event.link_speed;
+ iavf_set_adapter_link_speed_from_vpe(adapter, vpe);
/* we've already got the right link status, bail */
if (adapter->link_up == link_up)
@@ -1252,6 +1357,7 @@
case VIRTCHNL_OP_ADD_ETH_ADDR:
dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
iavf_stat_str(&adapter->hw, v_retval));
+ iavf_mac_add_reject(adapter);
/* restore administratively set MAC address */
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
break;
@@ -1321,10 +1427,11 @@
}
}
switch (v_opcode) {
- case VIRTCHNL_OP_ADD_ETH_ADDR: {
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ if (!v_retval)
+ iavf_mac_add_ok(adapter);
if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
- }
break;
case VIRTCHNL_OP_GET_STATS: {
struct iavf_eth_stats *stats =
@@ -1359,6 +1466,9 @@
ether_addr_copy(netdev->perm_addr,
adapter->hw.mac.addr);
}
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ iavf_add_filter(adapter, adapter->hw.mac.addr);
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
iavf_process_config(adapter);
}
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 3a6b395..d68b8aa 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -436,6 +436,7 @@
static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
{
struct ice_switch_info *sw;
+ enum ice_status status;
hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*hw->switch_info), GFP_KERNEL);
@@ -446,7 +447,12 @@
INIT_LIST_HEAD(&sw->vsi_list_map_head);
- return ice_init_def_sw_recp(hw);
+ status = ice_init_def_sw_recp(hw);
+ if (status) {
+ devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
+ return status;
+ }
+ return 0;
}
/**
@@ -934,7 +940,7 @@
*/
enum ice_status ice_check_reset(struct ice_hw *hw)
{
- u32 cnt, reg = 0, grst_delay;
+ u32 cnt, reg = 0, grst_delay, uld_mask;
/* Poll for Device Active state in case a recent CORER, GLOBR,
* or EMPR has occurred. The grst delay value is in 100ms units.
@@ -956,13 +962,20 @@
return ICE_ERR_RESET_FAILED;
}
-#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
- GLNVM_ULD_GLOBR_DONE_M)
+#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
+ GLNVM_ULD_PCIER_DONE_1_M |\
+ GLNVM_ULD_CORER_DONE_M |\
+ GLNVM_ULD_GLOBR_DONE_M |\
+ GLNVM_ULD_POR_DONE_M |\
+ GLNVM_ULD_POR_DONE_1_M |\
+ GLNVM_ULD_PCIER_DONE_2_M)
+
+ uld_mask = ICE_RESET_DONE_MASK;
/* Device is Active; check Global Reset processes are done */
for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
- reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
- if (reg == ICE_RESET_DONE_MASK) {
+ reg = rd32(hw, GLNVM_ULD) & uld_mask;
+ if (reg == uld_mask) {
ice_debug(hw, ICE_DBG_INIT,
"Global reset processes done. %d\n", cnt);
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 2353166..2e9c97b 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -199,7 +199,9 @@
cq->rq.r.rq_bi[i].pa = 0;
cq->rq.r.rq_bi[i].size = 0;
}
+ cq->rq.r.rq_bi = NULL;
devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
+ cq->rq.dma_head = NULL;
return ICE_ERR_NO_MEMORY;
}
@@ -245,7 +247,9 @@
cq->sq.r.sq_bi[i].pa = 0;
cq->sq.r.sq_bi[i].size = 0;
}
+ cq->sq.r.sq_bi = NULL;
devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
+ cq->sq.dma_head = NULL;
return ICE_ERR_NO_MEMORY;
}
@@ -304,6 +308,28 @@
return 0;
}
+#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
+do { \
+ int i; \
+ /* free descriptors */ \
+ if ((qi)->ring.r.ring##_bi) \
+ for (i = 0; i < (qi)->num_##ring##_entries; i++) \
+ if ((qi)->ring.r.ring##_bi[i].pa) { \
+ dmam_free_coherent(ice_hw_to_dev(hw), \
+ (qi)->ring.r.ring##_bi[i].size, \
+ (qi)->ring.r.ring##_bi[i].va, \
+ (qi)->ring.r.ring##_bi[i].pa); \
+ (qi)->ring.r.ring##_bi[i].va = NULL;\
+ (qi)->ring.r.ring##_bi[i].pa = 0;\
+ (qi)->ring.r.ring##_bi[i].size = 0;\
+ } \
+ /* free the buffer info list */ \
+ if ((qi)->ring.cmd_buf) \
+ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
+ /* free DMA head */ \
+ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
+} while (0)
+
/**
* ice_init_sq - main initialization routine for Control ATQ
* @hw: pointer to the hardware structure
@@ -357,6 +383,7 @@
goto init_ctrlq_exit;
init_ctrlq_free_rings:
+ ICE_FREE_CQ_BUFS(hw, cq, sq);
ice_free_cq_ring(hw, &cq->sq);
init_ctrlq_exit:
@@ -416,33 +443,13 @@
goto init_ctrlq_exit;
init_ctrlq_free_rings:
+ ICE_FREE_CQ_BUFS(hw, cq, rq);
ice_free_cq_ring(hw, &cq->rq);
init_ctrlq_exit:
return ret_code;
}
-#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
-do { \
- int i; \
- /* free descriptors */ \
- for (i = 0; i < (qi)->num_##ring##_entries; i++) \
- if ((qi)->ring.r.ring##_bi[i].pa) { \
- dmam_free_coherent(ice_hw_to_dev(hw), \
- (qi)->ring.r.ring##_bi[i].size,\
- (qi)->ring.r.ring##_bi[i].va,\
- (qi)->ring.r.ring##_bi[i].pa);\
- (qi)->ring.r.ring##_bi[i].va = NULL; \
- (qi)->ring.r.ring##_bi[i].pa = 0; \
- (qi)->ring.r.ring##_bi[i].size = 0; \
- } \
- /* free the buffer info list */ \
- if ((qi)->ring.cmd_buf) \
- devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
- /* free DMA head */ \
- devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
-} while (0)
-
/**
* ice_shutdown_sq - shutdown the Control ATQ
* @hw: pointer to the hardware structure
@@ -948,7 +955,7 @@
if (ice_sq_done(hw, cq))
break;
- mdelay(1);
+ udelay(ICE_CTL_Q_SQ_CMD_USEC);
total_delay++;
} while (total_delay < cq->sq_cmd_timeout);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index 44945c2..3b1d353 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -31,8 +31,9 @@
ICE_CTL_Q_MAILBOX,
};
-/* Control Queue default settings */
-#define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */
+/* Control Queue timeout settings - max delay 1s */
+#define ICE_CTL_Q_SQ_CMD_TIMEOUT 10000 /* Count 10000 times */
+#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
struct ice_ctl_q_ring {
void *dma_head; /* Virtual address to DMA head */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 7e23034..fc9ff98 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -2635,14 +2635,14 @@
netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n",
vsi->tx_rings[0]->count, new_tx_cnt);
- tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
+ tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_txq,
sizeof(*tx_rings), GFP_KERNEL);
if (!tx_rings) {
err = -ENOMEM;
goto done;
}
- for (i = 0; i < vsi->alloc_txq; i++) {
+ ice_for_each_txq(vsi, i) {
/* clone ring and setup updated count */
tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_cnt;
@@ -2667,14 +2667,14 @@
netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n",
vsi->rx_rings[0]->count, new_rx_cnt);
- rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
+ rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_rxq,
sizeof(*rx_rings), GFP_KERNEL);
if (!rx_rings) {
err = -ENOMEM;
goto done;
}
- for (i = 0; i < vsi->alloc_rxq; i++) {
+ ice_for_each_rxq(vsi, i) {
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_cnt;
@@ -2712,7 +2712,7 @@
ice_down(vsi);
if (tx_rings) {
- for (i = 0; i < vsi->alloc_txq; i++) {
+ ice_for_each_txq(vsi, i) {
ice_free_tx_ring(vsi->tx_rings[i]);
*vsi->tx_rings[i] = tx_rings[i];
}
@@ -2720,7 +2720,7 @@
}
if (rx_rings) {
- for (i = 0; i < vsi->alloc_rxq; i++) {
+ ice_for_each_rxq(vsi, i) {
ice_free_rx_ring(vsi->rx_rings[i]);
/* copy the real tail offset */
rx_rings[i].tail = vsi->rx_rings[i]->tail;
@@ -2744,7 +2744,7 @@
free_tx:
/* error cleanup if the Rx allocations failed after getting Tx */
if (tx_rings) {
- for (i = 0; i < vsi->alloc_txq; i++)
+ ice_for_each_txq(vsi, i)
ice_free_tx_ring(&tx_rings[i]);
devm_kfree(&pf->pdev->dev, tx_rings);
}
@@ -2916,13 +2916,6 @@
else
return -EINVAL;
- /* Tell the OS link is going down, the link will go back up when fw
- * says it is ready asynchronously
- */
- ice_print_link_msg(vsi, false);
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
-
/* Set the FC mode and only restart AN if link is up */
status = ice_set_fc(pi, &aq_failures, link_up);
@@ -3368,10 +3361,17 @@
struct ice_vsi *vsi = np->vsi;
if (q_num < 0) {
- int i;
+ int v_idx;
- ice_for_each_q_vector(vsi, i) {
- if (ice_set_q_coalesce(vsi, ec, i))
+ ice_for_each_q_vector(vsi, v_idx) {
+ /* In some cases if DCB is configured the num_[rx|tx]q
+ * can be less than vsi->num_q_vectors. This check
+ * accounts for that so we don't report a false failure
+ */
+ if (v_idx >= vsi->num_rxq && v_idx >= vsi->num_txq)
+ goto set_complete;
+
+ if (ice_set_q_coalesce(vsi, ec, v_idx))
return -EINVAL;
}
goto set_complete;
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index cbd53b5..6cfe8eb 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -1535,10 +1535,12 @@
es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
sizeof(*es->ref_count),
GFP_KERNEL);
+ if (!es->ref_count)
+ goto err;
es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
sizeof(*es->written), GFP_KERNEL);
- if (!es->ref_count)
+ if (!es->written)
goto err;
}
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 152fbd5..f2bb83a 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -34,6 +34,7 @@
#define PF_FW_ATQLEN_ATQOVFL_M BIT(29)
#define PF_FW_ATQLEN_ATQCRIT_M BIT(30)
#define VF_MBX_ARQLEN(_VF) (0x0022BC00 + ((_VF) * 4))
+#define VF_MBX_ATQLEN(_VF) (0x0022A800 + ((_VF) * 4))
#define PF_FW_ATQLEN_ATQENABLE_M BIT(31)
#define PF_FW_ATQT 0x00080400
#define PF_MBX_ARQBAH 0x0022E400
@@ -273,8 +274,14 @@
#define GLNVM_GENS_SR_SIZE_S 5
#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, 5)
#define GLNVM_ULD 0x000B6008
+#define GLNVM_ULD_PCIER_DONE_M BIT(0)
+#define GLNVM_ULD_PCIER_DONE_1_M BIT(1)
#define GLNVM_ULD_CORER_DONE_M BIT(3)
#define GLNVM_ULD_GLOBR_DONE_M BIT(4)
+#define GLNVM_ULD_POR_DONE_M BIT(5)
+#define GLNVM_ULD_POR_DONE_1_M BIT(8)
+#define GLNVM_ULD_PCIER_DONE_2_M BIT(9)
+#define GLNVM_ULD_PE_DONE_M BIT(10)
#define GLPCI_CNF2 0x000BE004
#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
#define PF_FUNC_RID 0x0009E880
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 214cd6e..d0ccb7a 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2900,7 +2900,7 @@
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
- goto err_init_interrupt_unroll;
+ goto err_init_vsi_unroll;
}
/* Driver is mostly up */
@@ -2986,6 +2986,7 @@
ice_free_irq_msix_misc(pf);
err_init_interrupt_unroll:
ice_clear_interrupt_scheme(pf);
+err_init_vsi_unroll:
devm_kfree(dev, pf->vsi);
err_init_pf_unroll:
ice_deinit_pf(pf);
@@ -3970,8 +3971,13 @@
}
ice_for_each_txq(vsi, i) {
- vsi->tx_rings[i]->netdev = vsi->netdev;
- err = ice_setup_tx_ring(vsi->tx_rings[i]);
+ struct ice_ring *ring = vsi->tx_rings[i];
+
+ if (!ring)
+ return -EINVAL;
+
+ ring->netdev = vsi->netdev;
+ err = ice_setup_tx_ring(ring);
if (err)
break;
}
@@ -3996,8 +4002,13 @@
}
ice_for_each_rxq(vsi, i) {
- vsi->rx_rings[i]->netdev = vsi->netdev;
- err = ice_setup_rx_ring(vsi->rx_rings[i]);
+ struct ice_ring *ring = vsi->rx_rings[i];
+
+ if (!ring)
+ return -EINVAL;
+
+ ring->netdev = vsi->netdev;
+ err = ice_setup_rx_ring(ring);
if (err)
break;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 1acdd43..7ff2e07 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1279,6 +1279,9 @@
ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
vsi_list_id);
+ if (!m_entry->vsi_list_info)
+ return ICE_ERR_NO_MEMORY;
+
/* If this entry was large action then the large action needs
* to be updated to point to FWD to VSI list
*/
@@ -2266,6 +2269,7 @@
return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
fm_entry->fltr_info.vsi_handle == vsi_handle) ||
(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
+ fm_entry->vsi_list_info &&
(test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
}
@@ -2338,14 +2342,12 @@
return ICE_ERR_PARAM;
list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
- struct ice_fltr_info *fi;
-
- fi = &fm_entry->fltr_info;
- if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
+ if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
continue;
status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
- vsi_list_head, fi);
+ vsi_list_head,
+ &fm_entry->fltr_info);
if (status)
return status;
}
@@ -2663,7 +2665,7 @@
&remove_list_head);
mutex_unlock(rule_lock);
if (status)
- return;
+ goto free_fltr_list;
switch (lkup) {
case ICE_SW_LKUP_MAC:
@@ -2686,6 +2688,7 @@
break;
}
+free_fltr_list:
list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
list_del(&fm_entry->list_entry);
devm_kfree(ice_hw_to_dev(hw), fm_entry);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 33dd103..2b55efe 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -2109,6 +2109,7 @@
struct ice_tx_offload_params offload = { 0 };
struct ice_vsi *vsi = tx_ring->vsi;
struct ice_tx_buf *first;
+ struct ethhdr *eth;
unsigned int count;
int tso, csum;
@@ -2156,7 +2157,9 @@
goto out_drop;
/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
- if (unlikely(skb->priority == TC_PRIO_CONTROL &&
+ eth = (struct ethhdr *)skb_mac_header(skb);
+ if (unlikely((skb->priority == TC_PRIO_CONTROL ||
+ eth->h_proto == htons(ETH_P_LLDP)) &&
vsi->type == ICE_VSI_PF &&
vsi->port_info->is_sw_lldp))
offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 6667d17..0b2e657 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -48,7 +48,7 @@
/* FW update timeout definitions are in milliseconds */
#define ICE_NVM_TIMEOUT 180000
#define ICE_CHANGE_LOCK_TIMEOUT 1000
-#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000
+#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 5000
enum ice_aq_res_access_type {
ICE_RES_READ = 1,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index b45797f..5e97fdc 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -317,8 +317,9 @@
pf->num_alloc_vfs = 0;
for (i = 0; i < tmp; i++) {
if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
- /* disable VF qp mappings */
+ /* disable VF qp mappings and set VF disable state */
ice_dis_vf_mappings(&pf->vf[i]);
+ set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
ice_free_vf_res(&pf->vf[i]);
}
}
@@ -383,13 +384,15 @@
*/
clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
- /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
- * in the case of VFR. If this is done for PFR, it can mess up VF
- * resets because the VF driver may already have started cleanup
- * by the time we get here.
+ /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
+ * needs to clear them in the case of VFR/VFLR. If this is done for
+ * PFR, it can mess up VF resets because the VF driver may already
+ * have started cleanup by the time we get here.
*/
- if (!is_pfr)
- wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0);
+ if (!is_pfr) {
+ wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
+ wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
+ }
/* In the case of a VFLR, the HW has already reset the VF and we
* just need to clean up, so don't hit the VFRTRIG register.
@@ -1287,9 +1290,12 @@
if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
return;
- /* verify if the VF is in either init or active before proceeding */
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
- !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ /* Bail out if VF is in disabled state, neither initialized, nor active
+ * state - otherwise proceed with notifications
+ */
+ if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
+ test_bit(ICE_VF_STATE_DIS, vf->vf_states))
return;
pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
@@ -1869,8 +1875,8 @@
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
+ struct ice_eth_stats stats = { 0 };
struct ice_pf *pf = vf->pf;
- struct ice_eth_stats stats;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
@@ -1889,7 +1895,6 @@
goto error_param;
}
- memset(&stats, 0, sizeof(struct ice_eth_stats));
ice_update_eth_stats(vsi);
stats = vsi->eth_stats;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 8a6ef35..438b42c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -530,7 +530,7 @@
dev_spec->module_plugged = true;
if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
hw->phy.media_type = e1000_media_type_internal_serdes;
- } else if (eth_flags->e100_base_fx) {
+ } else if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) {
dev_spec->sgmii_active = true;
hw->phy.media_type = e1000_media_type_internal_serdes;
} else if (eth_flags->e1000_base_t) {
@@ -657,14 +657,10 @@
break;
}
- /* do not change link mode for 100BaseFX */
- if (dev_spec->eth_flags.e100_base_fx)
- break;
-
/* change current link mode setting */
ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
- if (hw->phy.media_type == e1000_media_type_copper)
+ if (dev_spec->sgmii_active)
ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
else
ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 3182b05..f809333 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -143,7 +143,8 @@
u32 speed;
u32 supported, advertising;
- status = rd32(E1000_STATUS);
+ status = pm_runtime_suspended(&adapter->pdev->dev) ?
+ 0 : rd32(E1000_STATUS);
if (hw->phy.media_type == e1000_media_type_copper) {
supported = (SUPPORTED_10baseT_Half |
@@ -181,7 +182,7 @@
advertising &= ~ADVERTISED_1000baseKX_Full;
}
}
- if (eth_flags->e100_base_fx) {
+ if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) {
supported |= SUPPORTED_100baseT_Full;
advertising |= ADVERTISED_100baseT_Full;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ed7e667..158feb0 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -940,6 +940,7 @@
**/
static int igb_request_msix(struct igb_adapter *adapter)
{
+ unsigned int num_q_vectors = adapter->num_q_vectors;
struct net_device *netdev = adapter->netdev;
int i, err = 0, vector = 0, free_vector = 0;
@@ -948,7 +949,13 @@
if (err)
goto err_out;
- for (i = 0; i < adapter->num_q_vectors; i++) {
+ if (num_q_vectors > MAX_Q_VECTORS) {
+ num_q_vectors = MAX_Q_VECTORS;
+ dev_warn(&adapter->pdev->dev,
+ "The number of queue vectors (%d) is higher than max allowed (%d)\n",
+ adapter->num_q_vectors, MAX_Q_VECTORS);
+ }
+ for (i = 0; i < num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
vector++;
@@ -1687,14 +1694,15 @@
**/
static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
{
- struct igb_ring *ring = adapter->tx_ring[queue];
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
+ struct igb_ring *ring;
u32 tqavcc, tqavctrl;
u16 value;
WARN_ON(hw->mac.type != e1000_i210);
WARN_ON(queue < 0 || queue > 1);
+ ring = adapter->tx_ring[queue];
/* If any of the Qav features is enabled, configure queues as SR and
* with HIGH PRIO. If none is, then configure them with LOW PRIO and
@@ -2651,7 +2659,8 @@
}
input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
- input->filter.vlan_tci = match.key->vlan_priority;
+ input->filter.vlan_tci =
+ (__force __be16)match.key->vlan_priority;
}
}
@@ -3468,6 +3477,7 @@
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -4326,8 +4336,7 @@
else
mrqc |= E1000_MRQC_ENABLE_VMDQ;
} else {
- if (hw->mac.type != e1000_i211)
- mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
+ mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
}
igb_vmm_control(adapter);
@@ -4657,6 +4666,8 @@
DMA_TO_DEVICE);
}
+ tx_buffer->next_to_watch = NULL;
+
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
i++;
@@ -6194,9 +6205,18 @@
struct igb_adapter *adapter;
adapter = container_of(work, struct igb_adapter, reset_task);
+ rtnl_lock();
+ /* If we're already down or resetting, just bail */
+ if (test_bit(__IGB_DOWN, &adapter->state) ||
+ test_bit(__IGB_RESETTING, &adapter->state)) {
+ rtnl_unlock();
+ return;
+ }
+
igb_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
igb_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
@@ -8247,7 +8267,7 @@
if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
- vid = be16_to_cpu(rx_desc->wb.upper.vlan);
+ vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
else
vid = le16_to_cpu(rx_desc->wb.upper.vlan);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 0f2b68f..77cb2ab 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -83,14 +83,14 @@
static void igbvf_receive_skb(struct igbvf_adapter *adapter,
struct net_device *netdev,
struct sk_buff *skb,
- u32 status, u16 vlan)
+ u32 status, __le16 vlan)
{
u16 vid;
if (status & E1000_RXD_STAT_VP) {
if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) &&
(status & E1000_RXDEXT_STATERR_LB))
- vid = be16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
+ vid = be16_to_cpu((__force __be16)vlan) & E1000_RXD_SPC_VLAN_MASK;
else
vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
if (test_bit(vid, adapter->active_vlans))
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 7e16345..aec998c 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -504,7 +504,7 @@
if (hw->phy.ops.read_reg)
return hw->phy.ops.read_reg(hw, offset, data);
- return 0;
+ return -EOPNOTSUPP;
}
/* forward declaration */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index ac98f1d..cbcb861 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1670,12 +1670,18 @@
cmd->base.phy_address = hw->phy.addr;
/* advertising link modes */
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
+ if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half);
+ if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
+ if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
/* set autoneg settings */
if (hw->mac.autoneg == 1) {
@@ -1684,6 +1690,9 @@
Autoneg);
}
+ /* Set pause flow control settings */
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+
switch (hw->fc.requested_mode) {
case igc_fc_full:
ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
@@ -1698,12 +1707,11 @@
Asym_Pause);
break;
default:
- ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- Asym_Pause);
+ break;
}
- status = rd32(IGC_STATUS);
+ status = pm_runtime_suspended(&adapter->pdev->dev) ?
+ 0 : rd32(IGC_STATUS);
if (status & IGC_STATUS_LU) {
if (status & IGC_STATUS_SPEED_1000) {
@@ -1786,6 +1794,12 @@
ethtool_convert_link_mode_to_legacy_u32(&advertising,
cmd->link_modes.advertising);
+ /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT.
+ * We have to check this and convert it to ADVERTISE_2500_FULL
+ * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly.
+ */
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full))
+ advertising |= ADVERTISE_2500_FULL;
if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index c25f555..ed5d09c 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -219,9 +219,9 @@
u16 *data)
{
struct igc_nvm_info *nvm = &hw->nvm;
+ s32 ret_val = -IGC_ERR_NVM;
u32 attempts = 100000;
u32 i, k, eewr = 0;
- s32 ret_val = 0;
/* A check for invalid values: offset too large, too many words,
* too many words for the offset, and not enough words.
@@ -229,7 +229,6 @@
if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
words == 0) {
hw_dbg("nvm parameter(s) out of bounds\n");
- ret_val = -IGC_ERR_NVM;
goto out;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index 5eeb4c8..08adf10 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -647,7 +647,7 @@
}
out:
- return 0;
+ return ret_val;
}
/**
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 2488867..9ba05d9 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -256,6 +256,8 @@
DMA_TO_DEVICE);
}
+ tx_buffer->next_to_watch = NULL;
+
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
i++;
@@ -2222,21 +2224,23 @@
}
/**
- * igc_get_stats - Get System Network Statistics
+ * igc_get_stats64 - Get System Network Statistics
* @netdev: network interface device structure
+ * @stats: rtnl_link_stats64 pointer
*
* Returns the address of the device statistics structure.
* The statistics are updated here and also from the timer callback.
*/
-static struct net_device_stats *igc_get_stats(struct net_device *netdev)
+static void igc_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct igc_adapter *adapter = netdev_priv(netdev);
+ spin_lock(&adapter->stats64_lock);
if (!test_bit(__IGC_RESETTING, &adapter->state))
igc_update_stats(adapter);
-
- /* only return the current stats */
- return &netdev->stats;
+ memcpy(stats, &adapter->stats64, sizeof(*stats));
+ spin_unlock(&adapter->stats64_lock);
}
static netdev_features_t igc_fix_features(struct net_device *netdev,
@@ -2689,6 +2693,7 @@
*/
static int igc_request_msix(struct igc_adapter *adapter)
{
+ unsigned int num_q_vectors = adapter->num_q_vectors;
int i = 0, err = 0, vector = 0, free_vector = 0;
struct net_device *netdev = adapter->netdev;
@@ -2697,7 +2702,13 @@
if (err)
goto err_out;
- for (i = 0; i < adapter->num_q_vectors; i++) {
+ if (num_q_vectors > MAX_Q_VECTORS) {
+ num_q_vectors = MAX_Q_VECTORS;
+ dev_warn(&adapter->pdev->dev,
+ "The number of queue vectors (%d) is higher than max allowed (%d)\n",
+ adapter->num_q_vectors, MAX_Q_VECTORS);
+ }
+ for (i = 0; i < num_q_vectors; i++) {
struct igc_q_vector *q_vector = adapter->q_vector[i];
vector++;
@@ -3984,7 +3995,7 @@
.ndo_start_xmit = igc_xmit_frame,
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
- .ndo_get_stats = igc_get_stats,
+ .ndo_get_stats64 = igc_get_stats64,
.ndo_fix_features = igc_fix_features,
.ndo_set_features = igc_set_features,
.ndo_features_check = igc_features_check,
@@ -4308,8 +4319,8 @@
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 0bd1294..39c5e6f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -2243,7 +2243,7 @@
}
/* Configure pause time (2 TCs per register) */
- reg = hw->fc.pause_time * 0x00010001;
+ reg = hw->fc.pause_time * 0x00010001U;
for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index ccd852a..d50c5b5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -192,7 +192,7 @@
}
/* alloc the udl from per cpu ddp pool */
- ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp);
+ ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
if (!ddp->udl) {
e_err(drv, "failed allocated ddp context\n");
goto out_noddp_unmap;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 113f608..b14b164 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -575,6 +575,11 @@
return -EINVAL;
}
+ if (xs->props.mode != XFRM_MODE_TRANSPORT) {
+ netdev_err(dev, "Unsupported mode for ipsec offload\n");
+ return -EINVAL;
+ }
+
if (ixgbe_ipsec_check_mgmt_ip(xs)) {
netdev_err(dev, "IPsec IP addr clash with mgmt filters\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index cc3196a..636e6e8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -923,7 +923,7 @@
ring->queue_index = txr_idx;
/* assign ring to adapter */
- adapter->tx_ring[txr_idx] = ring;
+ WRITE_ONCE(adapter->tx_ring[txr_idx], ring);
/* update count and index */
txr_count--;
@@ -950,7 +950,7 @@
set_ring_xdp(ring);
/* assign ring to adapter */
- adapter->xdp_ring[xdp_idx] = ring;
+ WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
/* update count and index */
xdp_count--;
@@ -993,7 +993,7 @@
ring->queue_index = rxr_idx;
/* assign ring to adapter */
- adapter->rx_ring[rxr_idx] = ring;
+ WRITE_ONCE(adapter->rx_ring[rxr_idx], ring);
/* update count and index */
rxr_count--;
@@ -1022,13 +1022,13 @@
ixgbe_for_each_ring(ring, q_vector->tx) {
if (ring_is_xdp(ring))
- adapter->xdp_ring[ring->queue_index] = NULL;
+ WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL);
else
- adapter->tx_ring[ring->queue_index] = NULL;
+ WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL);
}
ixgbe_for_each_ring(ring, q_vector->rx)
- adapter->rx_ring[ring->queue_index] = NULL;
+ WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
adapter->q_vector[v_idx] = NULL;
napi_hash_del(&q_vector->napi);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 91b3780..8a894e5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1827,7 +1827,8 @@
struct sk_buff *skb)
{
if (ring_uses_build_skb(rx_ring)) {
- unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
+ unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1;
+ unsigned long offset = (unsigned long)(skb->data) & mask;
dma_sync_single_range_for_cpu(rx_ring->dev,
IXGBE_CB(skb)->dma,
@@ -1947,7 +1948,8 @@
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
-static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
+static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
@@ -1958,7 +1960,7 @@
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
return false;
#else
/* The last offset is a bit aggressive in that we assume the
@@ -2023,11 +2025,18 @@
static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff **skb,
- const unsigned int size)
+ const unsigned int size,
+ int *rx_buffer_pgcnt)
{
struct ixgbe_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ *rx_buffer_pgcnt =
+#if (PAGE_SIZE < 8192)
+ page_count(rx_buffer->page);
+#else
+ 0;
+#endif
prefetchw(rx_buffer->page);
*skb = rx_buffer->skb;
@@ -2057,9 +2066,10 @@
static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ int rx_buffer_pgcnt)
{
- if (ixgbe_can_reuse_rx_page(rx_buffer)) {
+ if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else {
@@ -2254,7 +2264,8 @@
rx_buffer->page_offset ^= truesize;
#else
unsigned int truesize = ring_uses_build_skb(rx_ring) ?
- SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
+ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
SKB_DATA_ALIGN(size);
rx_buffer->page_offset += truesize;
@@ -2294,6 +2305,7 @@
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer;
struct sk_buff *skb;
+ int rx_buffer_pgcnt;
unsigned int size;
/* return some buffers to hardware, one at a time is too slow */
@@ -2313,7 +2325,7 @@
*/
dma_rmb();
- rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
+ rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt);
/* retrieve a buffer from the ring */
if (!skb) {
@@ -2355,7 +2367,7 @@
break;
}
- ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
+ ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
cleaned_count++;
/* place incomplete frames back on ring for completion */
@@ -5239,7 +5251,7 @@
struct ixgbe_hw *hw = &adapter->hw;
struct hlist_node *node2;
struct ixgbe_fdir_filter *filter;
- u64 action;
+ u8 queue;
spin_lock(&adapter->fdir_perfect_lock);
@@ -5248,17 +5260,34 @@
hlist_for_each_entry_safe(filter, node2,
&adapter->fdir_filter_list, fdir_node) {
- action = filter->action;
- if (action != IXGBE_FDIR_DROP_QUEUE && action != 0)
- action =
- (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1;
+ if (filter->action == IXGBE_FDIR_DROP_QUEUE) {
+ queue = IXGBE_FDIR_DROP_QUEUE;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(filter->action);
+ u8 vf = ethtool_get_flow_spec_ring_vf(filter->action);
+
+ if (!vf && (ring >= adapter->num_rx_queues)) {
+ e_err(drv, "FDIR restore failed without VF, ring: %u\n",
+ ring);
+ continue;
+ } else if (vf &&
+ ((vf > adapter->num_vfs) ||
+ ring >= adapter->num_rx_queues_per_pool)) {
+ e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n",
+ vf, ring);
+ continue;
+ }
+
+ /* Map the ring onto the absolute queue index */
+ if (!vf)
+ queue = adapter->rx_ring[ring]->reg_idx;
+ else
+ queue = ((vf - 1) *
+ adapter->num_rx_queues_per_pool) + ring;
+ }
ixgbe_fdir_write_perfect_filter_82599(hw,
- &filter->filter,
- filter->sw_idx,
- (action == IXGBE_FDIR_DROP_QUEUE) ?
- IXGBE_FDIR_DROP_QUEUE :
- adapter->rx_ring[action]->reg_idx);
+ &filter->filter, filter->sw_idx, queue);
}
spin_unlock(&adapter->fdir_perfect_lock);
@@ -7046,7 +7075,10 @@
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
+ struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]);
+
+ if (!rx_ring)
+ continue;
non_eop_descs += rx_ring->rx_stats.non_eop_descs;
alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
@@ -7067,15 +7099,20 @@
packets = 0;
/* gather some stats to the adapter struct that are per queue */
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]);
+
+ if (!tx_ring)
+ continue;
restart_queue += tx_ring->tx_stats.restart_queue;
tx_busy += tx_ring->tx_stats.tx_busy;
bytes += tx_ring->stats.bytes;
packets += tx_ring->stats.packets;
}
for (i = 0; i < adapter->num_xdp_queues; i++) {
- struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
+ struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]);
+ if (!xdp_ring)
+ continue;
restart_queue += xdp_ring->tx_stats.restart_queue;
tx_busy += xdp_ring->tx_stats.tx_busy;
bytes += xdp_ring->stats.bytes;
@@ -8639,7 +8676,8 @@
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
adapter->ptp_clock) {
- if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
+ if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
+ !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
&adapter->state)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
@@ -9558,8 +9596,10 @@
ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
input->sw_idx, queue);
- if (!err)
- ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
+ if (err)
+ goto err_out_w_lock;
+
+ ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
spin_unlock(&adapter->fdir_perfect_lock);
if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
@@ -10247,7 +10287,12 @@
/* If transitioning XDP modes reconfigure rings */
if (need_reset) {
- int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
+ int err;
+
+ if (!prog)
+ /* Wait until ndo_xsk_wakeup completes. */
+ synchronize_rcu();
+ err = ixgbe_setup_tc(dev, adapter->hw_tcs);
if (err) {
rcu_assign_pointer(adapter->xdp_prog, old_prog);
@@ -11163,6 +11208,7 @@
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 537dfff..47a9201 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -467,12 +467,16 @@
return err;
}
-static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
+static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
- int max_frame = msgbuf[1];
u32 max_frs;
+ if (max_frame < ETH_MIN_MTU || max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
+ e_err(drv, "VF max_frame %d out of range\n", max_frame);
+ return -EINVAL;
+ }
+
/*
* For 82599EB we have to keep all PFs and VFs operating with
* the same max_frame value in order to avoid sending an oversize
@@ -533,12 +537,6 @@
}
}
- /* MTU < 68 is an error and causes problems on some kernels */
- if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
- e_err(drv, "VF max_frame %d out of range\n", max_frame);
- return -EINVAL;
- }
-
/* pull current max frame size from hardware */
max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
max_frs &= IXGBE_MHADD_MFS_MASK;
@@ -1249,7 +1247,7 @@
retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
break;
case IXGBE_VF_SET_LPE:
- retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
+ retval = ixgbe_set_vf_lpe(adapter, msgbuf[1], vf);
break;
case IXGBE_VF_SET_MACVLAN:
retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index d6feaac..b43be9f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -709,10 +709,14 @@
if (qid >= adapter->num_xdp_queues)
return -ENXIO;
- if (!adapter->xdp_ring[qid]->xsk_umem)
+ ring = adapter->xdp_ring[qid];
+
+ if (test_bit(__IXGBE_TX_DISABLED, &ring->state))
+ return -ENETDOWN;
+
+ if (!ring->xsk_umem)
return -ENXIO;
- ring = adapter->xdp_ring[qid];
if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
u64 eics = BIT_ULL(ring->q_vector->v_idx);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index 5170dd9..caaea2c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -272,6 +272,11 @@
return -EINVAL;
}
+ if (xs->props.mode != XFRM_MODE_TRANSPORT) {
+ netdev_err(dev, "Unsupported mode for ipsec offload\n");
+ return -EINVAL;
+ }
+
if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
struct rx_sa rsa;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 076f2da..be8e6d4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1079,11 +1079,14 @@
case XDP_TX:
xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
+ if (result == IXGBEVF_XDP_CONSUMED)
+ goto out_failure;
break;
default:
bpf_warn_invalid_xdp_action(act);
/* fallthrough */
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
/* fallthrough -- handle aborts by dropping packet */
case XDP_DROP:
@@ -2081,11 +2084,6 @@
struct ixgbe_hw *hw = &adapter->hw;
int count = 0;
- if ((netdev_uc_count(netdev)) > 10) {
- pr_err("Too many unicast filters - No Space\n");
- return -ENOSPC;
- }
-
if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha;