summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/e100.c16
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c14
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c21
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c12
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c10
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h48
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c68
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c387
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c287
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h24
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c23
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c20
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_devids.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c279
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h9
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c12
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c11
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c15
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c46
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h177
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c115
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c256
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c754
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c76
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c50
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h90
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c594
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c38
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h114
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c136
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c24
62 files changed, 2445 insertions, 1472 deletions
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 25c6dfd500b4..2b7323d392dc 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2253,7 +2253,7 @@ static int e100_poll(struct napi_struct *napi, int budget)
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
e100_enable_irq(nic);
}
@@ -2426,19 +2426,21 @@ err_clean_rx:
#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
-static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+static int e100_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct nic *nic = netdev_priv(netdev);
- return mii_ethtool_gset(&nic->mii, cmd);
+ return mii_ethtool_get_link_ksettings(&nic->mii, cmd);
}
-static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+static int e100_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct nic *nic = netdev_priv(netdev);
int err;
mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
- err = mii_ethtool_sset(&nic->mii, cmd);
+ err = mii_ethtool_set_link_ksettings(&nic->mii, cmd);
e100_exec_cb(nic, NULL, e100_configure);
return err;
@@ -2741,8 +2743,6 @@ static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
static const struct ethtool_ops e100_ethtool_ops = {
- .get_settings = e100_get_settings,
- .set_settings = e100_set_settings,
.get_drvinfo = e100_get_drvinfo,
.get_regs_len = e100_get_regs_len,
.get_regs = e100_get_regs,
@@ -2763,6 +2763,8 @@ static const struct ethtool_ops e100_ethtool_ops = {
.get_ethtool_stats = e100_get_ethtool_stats,
.get_sset_count = e100_get_sset_count,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = e100_get_link_ksettings,
+ .set_link_ksettings = e100_set_link_ksettings,
};
static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 879cca47b021..a29b12e80855 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -493,8 +493,8 @@ int e1000e_setup_rx_resources(struct e1000_ring *ring);
int e1000e_setup_tx_resources(struct e1000_ring *ring);
void e1000e_free_rx_resources(struct e1000_ring *ring);
void e1000e_free_tx_resources(struct e1000_ring *ring);
-struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats);
+void e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats);
void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
void e1000e_get_hw_control(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index eccf1da9356b..2175cced402f 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -240,9 +240,9 @@ static void e1000e_dump(struct e1000_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- pr_info("Device Name state trans_start last_rx\n");
- pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
- netdev->state, dev_trans_start(netdev), netdev->last_rx);
+ pr_info("Device Name state trans_start\n");
+ pr_info("%-15s %016lX %016lX\n", netdev->name,
+ netdev->state, dev_trans_start(netdev));
}
/* Print Registers */
@@ -5920,12 +5920,11 @@ static void e1000_reset_task(struct work_struct *work)
*
* Returns the address of the device statistics structure.
**/
-struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+void e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- memset(stats, 0, sizeof(struct rtnl_link_stats64));
spin_lock(&adapter->stats64_lock);
e1000e_update_stats(adapter);
/* Fill out the OS statistics structure */
@@ -5958,7 +5957,6 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
/* Tx Dropped needs to be maintained elsewhere */
spin_unlock(&adapter->stats64_lock);
- return stats;
}
/**
@@ -6276,8 +6274,8 @@ static int e1000e_pm_freeze(struct device *dev)
/* Quiesce the device without resetting the hardware */
e1000e_down(adapter, false);
e1000_free_irq(adapter);
+ e1000e_reset_interrupt_capability(adapter);
}
- e1000e_reset_interrupt_capability(adapter);
/* Allow time for pending master requests to run */
e1000e_disable_pcie_master(&adapter->hw);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 4d19e46f7c55..52b979443cde 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -260,9 +260,7 @@ struct fm10k_intfc {
#define FM10K_FLAG_RESET_REQUESTED (u32)(BIT(0))
#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(BIT(1))
#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(BIT(2))
-#define FM10K_FLAG_RX_TS_ENABLED (u32)(BIT(3))
-#define FM10K_FLAG_SWPRI_CONFIG (u32)(BIT(4))
-#define FM10K_FLAG_DEBUG_STATS (u32)(BIT(5))
+#define FM10K_FLAG_SWPRI_CONFIG (u32)(BIT(3))
int xcast_mode;
/* Tx fast path data */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
index dd95ac4f4c64..62a6ad9b3eed 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
@@ -506,7 +506,7 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)
goto out;
/* if we somehow dropped the Tx enable we should reset */
- if (hw->mac.tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) {
+ if (mac->tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) {
ret_val = FM10K_ERR_RESET_REQUESTED;
goto out;
}
@@ -523,8 +523,8 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)
/* interface cannot receive traffic without logical ports */
if (mac->dglort_map == FM10K_DGLORTMAP_NONE) {
- if (hw->mac.ops.request_lport_map)
- ret_val = hw->mac.ops.request_lport_map(hw);
+ if (mac->ops.request_lport_map)
+ ret_val = mac->ops.request_lport_map(hw);
goto out;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 5241e0873397..0c84fef750f4 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -148,7 +148,7 @@ enum {
static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = {
};
-static void fm10k_add_stat_strings(char **p, const char *prefix,
+static void fm10k_add_stat_strings(u8 **p, const char *prefix,
const struct fm10k_stats stats[],
const unsigned int size)
{
@@ -164,32 +164,31 @@ static void fm10k_add_stat_strings(char **p, const char *prefix,
static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- char *p = (char *)data;
unsigned int i;
- fm10k_add_stat_strings(&p, "", fm10k_gstrings_net_stats,
+ fm10k_add_stat_strings(&data, "", fm10k_gstrings_net_stats,
FM10K_NETDEV_STATS_LEN);
- fm10k_add_stat_strings(&p, "", fm10k_gstrings_global_stats,
+ fm10k_add_stat_strings(&data, "", fm10k_gstrings_global_stats,
FM10K_GLOBAL_STATS_LEN);
- fm10k_add_stat_strings(&p, "", fm10k_gstrings_mbx_stats,
+ fm10k_add_stat_strings(&data, "", fm10k_gstrings_mbx_stats,
FM10K_MBX_STATS_LEN);
if (interface->hw.mac.type != fm10k_mac_vf)
- fm10k_add_stat_strings(&p, "", fm10k_gstrings_pf_stats,
+ fm10k_add_stat_strings(&data, "", fm10k_gstrings_pf_stats,
FM10K_PF_STATS_LEN);
for (i = 0; i < interface->hw.mac.max_queues; i++) {
char prefix[ETH_GSTRING_LEN];
snprintf(prefix, ETH_GSTRING_LEN, "tx_queue_%u_", i);
- fm10k_add_stat_strings(&p, prefix,
+ fm10k_add_stat_strings(&data, prefix,
fm10k_gstrings_queue_stats,
FM10K_QUEUE_STATS_LEN);
snprintf(prefix, ETH_GSTRING_LEN, "rx_queue_%u_", i);
- fm10k_add_stat_strings(&p, prefix,
+ fm10k_add_stat_strings(&data, prefix,
fm10k_gstrings_queue_stats,
FM10K_QUEUE_STATS_LEN);
}
@@ -198,18 +197,16 @@ static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
static void fm10k_get_strings(struct net_device *dev,
u32 stringset, u8 *data)
{
- char *p = (char *)data;
-
switch (stringset) {
case ETH_SS_TEST:
- memcpy(data, *fm10k_gstrings_test,
+ memcpy(data, fm10k_gstrings_test,
FM10K_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
fm10k_get_stat_strings(dev, data);
break;
case ETH_SS_PRIV_FLAGS:
- memcpy(p, fm10k_prv_flags,
+ memcpy(data, fm10k_prv_flags,
FM10K_PRV_FLAG_LEN * ETH_GSTRING_LEN);
break;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 5de937852436..5bb233a9614c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -28,7 +28,7 @@
#include "fm10k.h"
-#define DRV_VERSION "0.21.2-k"
+#define DRV_VERSION "0.21.7-k"
#define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver"
const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
@@ -251,6 +251,7 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
/**
* fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_buffer: buffer containing page to add
+ * @size: packet size from rx_desc
* @rx_desc: descriptor containing length of buffer written by hardware
* @skb: sk_buff to place the data into
*
@@ -263,12 +264,12 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
* true if the buffer can be reused by the interface.
**/
static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
+ unsigned int size,
union fm10k_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
unsigned char *va = page_address(page) + rx_buffer->page_offset;
- unsigned int size = le16_to_cpu(rx_desc->w.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = FM10K_RX_BUFSZ;
#else
@@ -314,6 +315,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
union fm10k_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ unsigned int size = le16_to_cpu(rx_desc->w.length);
struct fm10k_rx_buffer *rx_buffer;
struct page *page;
@@ -350,11 +352,11 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
- FM10K_RX_BUFSZ,
+ size,
DMA_FROM_DEVICE);
/* pull page into skb */
- if (fm10k_add_rx_frag(rx_buffer, rx_desc, skb)) {
+ if (fm10k_add_rx_frag(rx_buffer, size, rx_desc, skb)) {
/* hand second half of page back to the ring */
fm10k_reuse_rx_page(rx_ring, rx_buffer);
} else {
@@ -473,6 +475,8 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
fm10k_rx_checksum(rx_ring, rx_desc, skb);
+ FM10K_CB(skb)->tstamp = rx_desc->q.timestamp;
+
FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
skb_record_rx_queue(skb, rx_ring->queue_index);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index c9dfa6564fcf..334088a101c3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -2011,9 +2011,10 @@ static void fm10k_sm_mbx_create_reply(struct fm10k_hw *hw,
* function can also be used to respond to an error as the connection
* resetting would also be a means of dealing with errors.
**/
-static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
- struct fm10k_mbx_info *mbx)
+static s32 fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
{
+ s32 err = 0;
const enum fm10k_mbx_state state = mbx->state;
switch (state) {
@@ -2026,6 +2027,7 @@ static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
case FM10K_STATE_OPEN:
/* flush any incomplete work */
fm10k_sm_mbx_connect_reset(mbx);
+ err = FM10K_ERR_RESET_REQUESTED;
break;
case FM10K_STATE_CONNECT:
/* Update remote value to match local value */
@@ -2035,6 +2037,8 @@ static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
}
fm10k_sm_mbx_create_reply(hw, mbx, mbx->tail);
+
+ return err;
}
/**
@@ -2115,7 +2119,7 @@ static s32 fm10k_sm_mbx_process(struct fm10k_hw *hw,
switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_VER)) {
case 0:
- fm10k_sm_mbx_process_reset(hw, mbx);
+ err = fm10k_sm_mbx_process_reset(hw, mbx);
break;
case FM10K_SM_MBX_VERSION:
err = fm10k_sm_mbx_process_version_1(hw, mbx);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index bc5ef6eb3dd6..01db688cf539 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1118,8 +1118,8 @@ void fm10k_reset_rx_state(struct fm10k_intfc *interface)
* Returns 64bit statistics, for use in the ndo_get_stats64 callback. This
* function replaces fm10k_get_stats for kernels which support it.
*/
-static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+static void fm10k_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_ring *ring;
@@ -1164,8 +1164,6 @@ static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev,
/* following stats updated by fm10k_service_task() */
stats->rx_missed_errors = netdev->stats.rx_missed_errors;
-
- return stats;
}
int fm10k_setup_tc(struct net_device *dev, u8 tc)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index b1a2f8437d59..e372a5823480 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1144,6 +1144,7 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
struct fm10k_hw *hw = &interface->hw;
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 eicr;
+ s32 err = 0;
/* unmask any set bits related to this interrupt */
eicr = fm10k_read_reg(hw, FM10K_EICR);
@@ -1159,12 +1160,15 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
/* service mailboxes */
if (fm10k_mbx_trylock(interface)) {
- mbx->ops.process(hw, mbx);
+ err = mbx->ops.process(hw, mbx);
/* handle VFLRE events */
fm10k_iov_event(interface);
fm10k_mbx_unlock(interface);
}
+ if (err == FM10K_ERR_RESET_REQUESTED)
+ interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+
/* if switch toggled state we should reset GLORTs */
if (eicr & FM10K_EICR_SWITCHNOTREADY) {
/* force link down for at least 4 seconds */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 23fb319fd2a0..40ee0242a80a 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -72,10 +72,6 @@ force_reset:
fm10k_write_flush(hw);
udelay(FM10K_RESET_TIMEOUT);
- /* Reset mailbox global interrupts */
- reg = FM10K_MBX_GLOBAL_REQ_INTERRUPT | FM10K_MBX_GLOBAL_ACK_INTERRUPT;
- fm10k_write_reg(hw, FM10K_GMBX, reg);
-
/* Verify we made it out of reset */
reg = fm10k_read_reg(hw, FM10K_IP);
if (!(reg & FM10K_IP_NOTINRESET))
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index ba8d30984bee..82d8040fa418 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -134,19 +134,6 @@
/* default to trying for four seconds */
#define I40E_TRY_LINK_TIMEOUT (4 * HZ)
-/**
- * i40e_is_mac_710 - Return true if MAC is X710/XL710
- * @hw: ptr to the hardware info
- **/
-static inline bool i40e_is_mac_710(struct i40e_hw *hw)
-{
- if ((hw->mac.type == I40E_MAC_X710) ||
- (hw->mac.type == I40E_MAC_XL710))
- return true;
-
- return false;
-}
-
/* driver state flags */
enum i40e_state_t {
__I40E_TESTING,
@@ -361,6 +348,8 @@ struct i40e_pf {
#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51)
#define I40E_FLAG_HAVE_CRT_RETIMER BIT_ULL(52)
#define I40E_FLAG_PTP_L4_CAPABLE BIT_ULL(53)
+#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(54)
+#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55)
/* tracks features that get auto disabled by errors */
u64 auto_disable_flags;
@@ -480,6 +469,22 @@ struct i40e_mac_filter {
enum i40e_filter_state state;
};
+/* Wrapper structure to keep track of filters while we are preparing to send
+ * firmware commands. We cannot send firmware commands while holding a
+ * spinlock, since it might sleep. To avoid this, we wrap the added filters in
+ * a separate structure, which will track the state change and update the real
+ * filter while under lock. We can't simply hold the filters in a separate
+ * list, as this opens a window for a race condition when adding new MAC
+ * addresses to all VLANs, or when adding new VLANs to all MAC addresses.
+ */
+struct i40e_new_mac_filter {
+ struct hlist_node hlist;
+ struct i40e_mac_filter *f;
+
+ /* Track future changes to state separately */
+ enum i40e_filter_state state;
+};
+
struct i40e_veb {
struct i40e_pf *pf;
u16 idx;
@@ -762,6 +767,7 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
const u8 *macaddr, s16 vlan);
+void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f);
void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan);
int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
@@ -804,7 +810,6 @@ int i40e_lan_add_device(struct i40e_pf *pf);
int i40e_lan_del_device(struct i40e_pf *pf);
void i40e_client_subtask(struct i40e_pf *pf);
void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
-void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi);
void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
@@ -834,9 +839,8 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba);
#ifdef I40E_FCOE
-struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
- struct net_device *netdev,
- struct rtnl_link_stats64 *storage);
+void i40e_get_netdev_stats_struct(struct net_device *netdev,
+ struct rtnl_link_stats64 *storage);
int i40e_set_mac(struct net_device *netdev, void *p);
void i40e_set_rx_mode(struct net_device *netdev);
#endif
@@ -853,12 +857,12 @@ int i40e_close(struct net_device *netdev);
int i40e_vsi_open(struct i40e_vsi *vsi);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid);
-int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid);
void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid);
-void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
-struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
- const u8 *macaddr);
-int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr);
+void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid);
+struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
+ const u8 *macaddr);
+int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
#ifdef I40E_FCOE
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index b2101a51534c..451f48b7540a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -538,6 +538,8 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
/* Manage MAC Address Write Command (0x0108) */
struct i40e_aqc_mac_address_write {
__le16 command_flags;
+#define I40E_AQC_MC_MAG_EN 0x0100
+#define I40E_AQC_WOL_PRESERVE_ON_PFR 0x0200
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 7fe72abc0b4a..d570219efd9f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -174,8 +174,6 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
if (!vsi)
return;
- memset(&params, 0, sizeof(params));
- i40e_client_get_params(vsi, &params);
mutex_lock(&i40e_client_instance_mutex);
list_for_each_entry(cdev, &i40e_client_instances, list) {
if (cdev->lan_info.pf == vsi->back) {
@@ -186,6 +184,8 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
"Cannot locate client instance l2_param_change routine\n");
continue;
}
+ memset(&params, 0, sizeof(params));
+ i40e_client_get_params(vsi, &params);
if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
&cdev->state)) {
dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
@@ -201,41 +201,6 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
}
/**
- * i40e_notify_client_of_netdev_open - call the client open callback
- * @vsi: the VSI with netdev opened
- *
- * If there is a client to this netdev, call the client with open
- **/
-void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
-{
- struct i40e_client_instance *cdev;
- int ret = 0;
-
- if (!vsi)
- return;
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if (cdev->lan_info.netdev == vsi->netdev) {
- if (!cdev->client ||
- !cdev->client->ops || !cdev->client->ops->open) {
- dev_dbg(&vsi->back->pdev->dev,
- "Cannot locate client instance open routine\n");
- continue;
- }
- if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state))) {
- ret = cdev->client->ops->open(&cdev->lan_info,
- cdev->client);
- if (!ret)
- set_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state);
- }
- }
- }
- mutex_unlock(&i40e_client_instance_mutex);
-}
-
-/**
* i40e_client_release_qvlist
* @ldev: pointer to L2 context.
*
@@ -545,9 +510,10 @@ void i40e_client_subtask(struct i40e_pf *pf)
continue;
if (!existing) {
- dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
+ dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x dev=0x%02x func=0x%02x\n",
client->name, pf->hw.pf_id,
- pf->hw.bus.device, pf->hw.bus.func);
+ pf->hw.bus.bus_id, pf->hw.bus.device,
+ pf->hw.bus.func);
}
mutex_lock(&i40e_client_instance_mutex);
@@ -596,8 +562,9 @@ int i40e_lan_add_device(struct i40e_pf *pf)
ldev->pf = pf;
INIT_LIST_HEAD(&ldev->list);
list_add(&ldev->list, &i40e_devices);
- dev_info(&pf->pdev->dev, "Added LAN device PF%d bus=0x%02x func=0x%02x\n",
- pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func);
+ dev_info(&pf->pdev->dev, "Added LAN device PF%d bus=0x%02x dev=0x%02x func=0x%02x\n",
+ pf->hw.pf_id, pf->hw.bus.bus_id,
+ pf->hw.bus.device, pf->hw.bus.func);
/* Since in some cases register may have happened before a device gets
* added, we can schedule a subtask to go initiate the clients if
@@ -625,9 +592,9 @@ int i40e_lan_del_device(struct i40e_pf *pf)
mutex_lock(&i40e_device_mutex);
list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) {
if (ldev->pf == pf) {
- dev_info(&pf->pdev->dev, "Deleted LAN device PF%d bus=0x%02x func=0x%02x\n",
- pf->hw.pf_id, pf->hw.bus.device,
- pf->hw.bus.func);
+ dev_info(&pf->pdev->dev, "Deleted LAN device PF%d bus=0x%02x dev=0x%02x func=0x%02x\n",
+ pf->hw.pf_id, pf->hw.bus.bus_id,
+ pf->hw.bus.device, pf->hw.bus.func);
list_del(&ldev->list);
kfree(ldev);
ret = 0;
@@ -688,13 +655,11 @@ static int i40e_client_release(struct i40e_client *client)
* i40e_client_prepare - prepare client specific resources
* @client: pointer to the registered client
*
- * Return 0 on success or < 0 on error
**/
-static int i40e_client_prepare(struct i40e_client *client)
+static void i40e_client_prepare(struct i40e_client *client)
{
struct i40e_device *ldev;
struct i40e_pf *pf;
- int ret = 0;
mutex_lock(&i40e_device_mutex);
list_for_each_entry(ldev, &i40e_devices, list) {
@@ -704,7 +669,6 @@ static int i40e_client_prepare(struct i40e_client *client)
i40e_service_event_schedule(pf);
}
mutex_unlock(&i40e_device_mutex);
- return ret;
}
/**
@@ -961,13 +925,9 @@ int i40e_register_client(struct i40e_client *client)
set_bit(__I40E_CLIENT_REGISTERED, &client->state);
mutex_unlock(&i40e_client_mutex);
- if (i40e_client_prepare(client)) {
- ret = -EIO;
- goto out;
- }
+ i40e_client_prepare(client);
- pr_info("i40e: Registered client %s with return code %d\n",
- client->name, ret);
+ pr_info("i40e: Registered client %s\n", client->name);
out:
return ret;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 128735975caa..ece57d6a6e23 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -300,7 +300,6 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
u16 len;
u8 *buf = (u8 *)buffer;
- u16 i = 0;
if ((!(mask & hw->debug_mask)) || (desc == NULL))
return;
@@ -328,12 +327,18 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
if (buf_len < len)
len = buf_len;
/* write the full 16-byte chunks */
- for (i = 0; i < (len - 16); i += 16)
- i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i);
- /* write whatever's left over without overrunning the buffer */
- if (i < len)
- i40e_debug(hw, mask, "\t0x%04X %*ph\n",
- i, len - i, buf + i);
+ if (hw->debug_mask & mask) {
+ char prefix[20];
+
+ snprintf(prefix, 20,
+ "i40e %02x:%02x.%x: \t0x",
+ hw->bus.bus_id,
+ hw->bus.device,
+ hw->bus.func);
+
+ print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
+ 16, 1, buf, len, false);
+ }
}
}
@@ -1838,6 +1843,8 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
hw_link_info->link_info = resp->link_info;
hw_link_info->an_info = resp->an_info;
+ hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
+ I40E_AQ_CONFIG_FEC_RS_ENA);
hw_link_info->ext_info = resp->ext_info;
hw_link_info->loopback = resp->loopback;
hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index f1f41f12902f..267ad2588255 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -974,7 +974,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
struct i40e_dcbx_config *r_cfg =
&pf->hw.remote_dcbx_config;
int i, ret;
- u32 switch_id;
+ u16 switch_id;
bw_data = kzalloc(sizeof(
struct i40e_aqc_query_port_ets_config_resp),
@@ -986,7 +986,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
vsi = pf->vsi[pf->lan_vsi];
switch_id =
- vsi->info.switch_id & I40E_AQ_VSI_SW_ID_MASK;
+ le16_to_cpu(vsi->info.switch_id) &
+ I40E_AQ_VSI_SW_ID_MASK;
ret = i40e_aq_query_port_ets_config(&pf->hw,
switch_id,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index cc1465aac2ef..a22e26200bcc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -803,9 +803,12 @@ static int i40e_set_settings(struct net_device *netdev,
if (change || (abilities.link_speed != config.link_speed)) {
/* copy over the rest of the abilities */
config.phy_type = abilities.phy_type;
+ config.phy_type_ext = abilities.phy_type_ext;
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
/* save the requested speeds */
hw->phy.link_info.requested_speeds = config.link_speed;
@@ -2072,7 +2075,7 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
struct i40e_q_vector *q_vector;
u16 vector, intrl;
- intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit);
+ intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit);
vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs;
vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs;
@@ -2116,6 +2119,7 @@ static int __i40e_set_coalesce(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ u16 intrl_reg;
int i;
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
@@ -2127,8 +2131,9 @@ static int __i40e_set_coalesce(struct net_device *netdev,
return -EINVAL;
}
- if (ec->rx_coalesce_usecs_high >= INTRL_REG_TO_USEC(I40E_MAX_INTRL)) {
- netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-235\n");
+ if (ec->rx_coalesce_usecs_high > INTRL_REG_TO_USEC(I40E_MAX_INTRL)) {
+ netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-%lu\n",
+ INTRL_REG_TO_USEC(I40E_MAX_INTRL));
return -EINVAL;
}
@@ -2141,7 +2146,12 @@ static int __i40e_set_coalesce(struct net_device *netdev,
return -EINVAL;
}
- vsi->int_rate_limit = ec->rx_coalesce_usecs_high;
+ intrl_reg = i40e_intrl_usec_to_reg(ec->rx_coalesce_usecs_high);
+ vsi->int_rate_limit = INTRL_REG_TO_USEC(intrl_reg);
+ if (vsi->int_rate_limit != ec->rx_coalesce_usecs_high) {
+ netif_info(pf, drv, netdev, "Interrupt rate limit rounded down to %d\n",
+ vsi->int_rate_limit);
+ }
if (ec->tx_coalesce_usecs == 0) {
if (ec->use_adaptive_tx_coalesce)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index ad4cf639430e..e8a8351c8ea9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -41,7 +41,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 25
+#define DRV_VERSION_BUILD 27
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -77,7 +77,6 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
- {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
@@ -409,15 +408,11 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
* Returns the address of the device statistics structure.
* The statistics are actually updated from the service task.
**/
-#ifdef I40E_FCOE
-struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
- struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
-#else
-static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
- struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+#ifndef I40E_FCOE
+static
#endif
+void i40e_get_netdev_stats_struct(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_ring *tx_ring, *rx_ring;
@@ -426,10 +421,10 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
int i;
if (test_bit(__I40E_DOWN, &vsi->state))
- return stats;
+ return;
if (!vsi->tx_rings)
- return stats;
+ return;
rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) {
@@ -469,8 +464,6 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
stats->rx_dropped = vsi_stats->rx_dropped;
stats->rx_crc_errors = vsi_stats->rx_crc_errors;
stats->rx_length_errors = vsi_stats->rx_length_errors;
-
- return stats;
}
/**
@@ -1260,7 +1253,9 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
struct hlist_head *tmp_del_list,
int vlan_filters)
{
+ s16 pvid = le16_to_cpu(vsi->info.pvid);
struct i40e_mac_filter *f, *add_head;
+ struct i40e_new_mac_filter *new;
struct hlist_node *h;
int bkt, new_vlan;
@@ -1279,13 +1274,13 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
*/
/* Update the filters about to be added in place */
- hlist_for_each_entry(f, tmp_add_list, hlist) {
- if (vsi->info.pvid && f->vlan != vsi->info.pvid)
- f->vlan = vsi->info.pvid;
- else if (vlan_filters && f->vlan == I40E_VLAN_ANY)
- f->vlan = 0;
- else if (!vlan_filters && f->vlan == 0)
- f->vlan = I40E_VLAN_ANY;
+ hlist_for_each_entry(new, tmp_add_list, hlist) {
+ if (pvid && new->f->vlan != pvid)
+ new->f->vlan = pvid;
+ else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
+ new->f->vlan = 0;
+ else if (!vlan_filters && new->f->vlan == 0)
+ new->f->vlan = I40E_VLAN_ANY;
}
/* Update the remaining active filters */
@@ -1295,12 +1290,12 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
* order to avoid duplicating code for adding the new filter
* then deleting the old filter.
*/
- if ((vsi->info.pvid && f->vlan != vsi->info.pvid) ||
+ if ((pvid && f->vlan != pvid) ||
(vlan_filters && f->vlan == I40E_VLAN_ANY) ||
(!vlan_filters && f->vlan == 0)) {
/* Determine the new vlan we will be adding */
- if (vsi->info.pvid)
- new_vlan = vsi->info.pvid;
+ if (pvid)
+ new_vlan = pvid;
else if (vlan_filters)
new_vlan = 0;
else
@@ -1311,9 +1306,16 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
if (!add_head)
return -ENOMEM;
- /* Put the replacement filter into the add list */
- hash_del(&add_head->hlist);
- hlist_add_head(&add_head->hlist, tmp_add_list);
+ /* Create a temporary i40e_new_mac_filter */
+ new = kzalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new)
+ return -ENOMEM;
+
+ new->f = add_head;
+ new->state = add_head->state;
+
+ /* Add the new filter to the tmp list */
+ hlist_add_head(&new->hlist, tmp_add_list);
/* Put the original filter into the delete list */
f->state = I40E_FILTER_REMOVE;
@@ -1440,23 +1442,25 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
* the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
* instead of list_for_each_entry().
**/
-static void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
+void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
{
if (!f)
return;
+ /* If the filter was never added to firmware then we can just delete it
+ * directly and we don't want to set the status to remove or else an
+ * admin queue command will unnecessarily fire.
+ */
if ((f->state == I40E_FILTER_FAILED) ||
(f->state == I40E_FILTER_NEW)) {
- /* this one never got added by the FW. Just remove it,
- * no need to sync anything.
- */
hash_del(&f->hlist);
kfree(f);
} else {
f->state = I40E_FILTER_REMOVE;
- vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
}
+
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
}
/**
@@ -1483,18 +1487,19 @@ void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
}
/**
- * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
+ * i40e_add_mac_filter - Add a MAC filter for all active VLANs
* @vsi: the VSI to be searched
* @macaddr: the mac address to be filtered
*
- * Goes through all the macvlan filters and adds a macvlan filter for each
+ * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
+ * go through all the macvlan filters and add a macvlan filter for each
* unique vlan that already exists. If a PVID has been assigned, instead only
* add the macaddr to that VLAN.
*
* Returns last filter added on success, else NULL
**/
-struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
- const u8 *macaddr)
+struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
+ const u8 *macaddr)
{
struct i40e_mac_filter *f, *add = NULL;
struct hlist_node *h;
@@ -1504,6 +1509,9 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
return i40e_add_filter(vsi, macaddr,
le16_to_cpu(vsi->info.pvid));
+ if (!i40e_is_vsi_in_vlan(vsi))
+ return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
+
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (f->state == I40E_FILTER_REMOVE)
continue;
@@ -1516,15 +1524,16 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
}
/**
- * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
+ * i40e_del_mac_filter - Remove a MAC filter from all VLANs
* @vsi: the VSI to be searched
* @macaddr: the mac address to be removed
*
- * Removes a given MAC address from a VSI, regardless of VLAN
+ * Removes a given MAC address from a VSI regardless of what VLAN it has been
+ * associated with.
*
* Returns 0 for success, or error
**/
-int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr)
+int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
{
struct i40e_mac_filter *f;
struct hlist_node *h;
@@ -1585,8 +1594,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
spin_lock_bh(&vsi->mac_filter_hash_lock);
- i40e_del_mac_all_vlan(vsi, netdev->dev_addr);
- i40e_put_mac_in_vlan(vsi, addr->sa_data);
+ i40e_del_mac_filter(vsi, netdev->dev_addr);
+ i40e_add_mac_filter(vsi, addr->sa_data);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
ether_addr_copy(netdev->dev_addr, addr->sa_data);
if (vsi->type == I40E_VSI_MAIN) {
@@ -1762,14 +1771,8 @@ static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- struct i40e_mac_filter *f;
-
- if (i40e_is_vsi_in_vlan(vsi))
- f = i40e_put_mac_in_vlan(vsi, addr);
- else
- f = i40e_add_filter(vsi, addr, I40E_VLAN_ANY);
- if (f)
+ if (i40e_add_mac_filter(vsi, addr))
return 0;
else
return -ENOMEM;
@@ -1788,10 +1791,7 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- if (i40e_is_vsi_in_vlan(vsi))
- i40e_del_mac_all_vlan(vsi, addr);
- else
- i40e_del_filter(vsi, addr, I40E_VLAN_ANY);
+ i40e_del_mac_filter(vsi, addr);
return 0;
}
@@ -1829,16 +1829,15 @@ static void i40e_set_rx_mode(struct net_device *netdev)
}
/**
- * i40e_undo_filter_entries - Undo the changes made to MAC filter entries
+ * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
* @vsi: Pointer to VSI struct
* @from: Pointer to list which contains MAC filter entries - changes to
* those entries needs to be undone.
*
- * MAC filter entries from list were slated to be sent to firmware, either for
- * addition or deletion.
+ * MAC filter entries from this list were slated for deletion.
**/
-static void i40e_undo_filter_entries(struct i40e_vsi *vsi,
- struct hlist_head *from)
+static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
+ struct hlist_head *from)
{
struct i40e_mac_filter *f;
struct hlist_node *h;
@@ -1853,6 +1852,53 @@ static void i40e_undo_filter_entries(struct i40e_vsi *vsi,
}
/**
+ * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
+ * @vsi: Pointer to vsi struct
+ * @from: Pointer to list which contains MAC filter entries - changes to
+ * those entries needs to be undone.
+ *
+ * MAC filter entries from this list were slated for addition.
+ **/
+static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
+ struct hlist_head *from)
+{
+ struct i40e_new_mac_filter *new;
+ struct hlist_node *h;
+
+ hlist_for_each_entry_safe(new, h, from, hlist) {
+ /* We can simply free the wrapper structure */
+ hlist_del(&new->hlist);
+ kfree(new);
+ }
+}
+
+/**
+ * i40e_next_entry - Get the next non-broadcast filter from a list
+ * @next: pointer to filter in list
+ *
+ * Returns the next non-broadcast filter in the list. Required so that we
+ * ignore broadcast filters within the list, since these are not handled via
+ * the normal firmware update path.
+ */
+static
+struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
+{
+ while (next) {
+ next = hlist_entry(next->hlist.next,
+ typeof(struct i40e_new_mac_filter),
+ hlist);
+
+ /* keep going if we found a broadcast filter */
+ if (next && is_broadcast_ether_addr(next->f->macaddr))
+ continue;
+
+ break;
+ }
+
+ return next;
+}
+
+/**
* i40e_update_filter_state - Update filter state based on return data
* from firmware
* @count: Number of filters added
@@ -1865,7 +1911,7 @@ static void i40e_undo_filter_entries(struct i40e_vsi *vsi,
static int
i40e_update_filter_state(int count,
struct i40e_aqc_add_macvlan_element_data *add_list,
- struct i40e_mac_filter *add_head)
+ struct i40e_new_mac_filter *add_head)
{
int retval = 0;
int i;
@@ -1884,9 +1930,9 @@ i40e_update_filter_state(int count,
retval++;
}
- add_head = hlist_entry(add_head->hlist.next,
- typeof(struct i40e_mac_filter),
- hlist);
+ add_head = i40e_next_filter(add_head);
+ if (!add_head)
+ break;
}
return retval;
@@ -1943,7 +1989,7 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
static
void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
struct i40e_aqc_add_macvlan_element_data *list,
- struct i40e_mac_filter *add_head,
+ struct i40e_new_mac_filter *add_head,
int num_add, bool *promisc_changed)
{
struct i40e_hw *hw = &vsi->back->hw;
@@ -1971,10 +2017,12 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
* This function sets or clears the promiscuous broadcast flags for VLAN
* filters in order to properly receive broadcast frames. Assumes that only
* broadcast filters are passed.
+ *
+ * Returns status indicating success or failure;
**/
-static
-void i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
- struct i40e_mac_filter *f)
+static i40e_status
+i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
+ struct i40e_mac_filter *f)
{
bool enable = f->state == I40E_FILTER_NEW;
struct i40e_hw *hw = &vsi->back->hw;
@@ -1993,15 +2041,13 @@ void i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
NULL);
}
- if (aq_ret) {
+ if (aq_ret)
dev_warn(&vsi->back->pdev->dev,
"Error %s setting broadcast promiscuous mode on %s\n",
i40e_aq_str(hw, hw->aq.asq_last_status),
vsi_name);
- f->state = I40E_FILTER_FAILED;
- } else if (enable) {
- f->state = I40E_FILTER_ACTIVE;
- }
+
+ return aq_ret;
}
/**
@@ -2015,7 +2061,8 @@ void i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
{
struct hlist_head tmp_add_list, tmp_del_list;
- struct i40e_mac_filter *f, *add_head = NULL;
+ struct i40e_mac_filter *f;
+ struct i40e_new_mac_filter *new, *add_head = NULL;
struct i40e_hw *hw = &vsi->back->hw;
unsigned int failed_filters = 0;
unsigned int vlan_filters = 0;
@@ -2069,8 +2116,17 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
continue;
}
if (f->state == I40E_FILTER_NEW) {
- hash_del(&f->hlist);
- hlist_add_head(&f->hlist, &tmp_add_list);
+ /* Create a temporary i40e_new_mac_filter */
+ new = kzalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new)
+ goto err_no_memory_locked;
+
+ /* Store pointer to the real filter */
+ new->f = f;
+ new->state = f->state;
+
+ /* Add it to the hash list */
+ hlist_add_head(&new->hlist, &tmp_add_list);
}
/* Count the number of active (current and new) VLAN
@@ -2105,7 +2161,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cmd_flags = 0;
/* handle broadcast filters by updating the broadcast
- * promiscuous flag instead of deleting a MAC filter.
+ * promiscuous flag and release filter list.
*/
if (is_broadcast_ether_addr(f->macaddr)) {
i40e_aqc_broadcast_filter(vsi, vsi_name, f);
@@ -2163,36 +2219,37 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
goto err_no_memory;
num_add = 0;
- hlist_for_each_entry_safe(f, h, &tmp_add_list, hlist) {
+ hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
&vsi->state)) {
- f->state = I40E_FILTER_FAILED;
+ new->state = I40E_FILTER_FAILED;
continue;
}
/* handle broadcast filters by updating the broadcast
* promiscuous flag instead of adding a MAC filter.
*/
- if (is_broadcast_ether_addr(f->macaddr)) {
- u64 key = i40e_addr_to_hkey(f->macaddr);
- i40e_aqc_broadcast_filter(vsi, vsi_name, f);
-
- hlist_del(&f->hlist);
- hash_add(vsi->mac_filter_hash, &f->hlist, key);
+ if (is_broadcast_ether_addr(new->f->macaddr)) {
+ if (i40e_aqc_broadcast_filter(vsi, vsi_name,
+ new->f))
+ new->state = I40E_FILTER_FAILED;
+ else
+ new->state = I40E_FILTER_ACTIVE;
continue;
}
/* add to add array */
if (num_add == 0)
- add_head = f;
+ add_head = new;
cmd_flags = 0;
- ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
- if (f->vlan == I40E_VLAN_ANY) {
+ ether_addr_copy(add_list[num_add].mac_addr,
+ new->f->macaddr);
+ if (new->f->vlan == I40E_VLAN_ANY) {
add_list[num_add].vlan_tag = 0;
cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
} else {
add_list[num_add].vlan_tag =
- cpu_to_le16((u16)(f->vlan));
+ cpu_to_le16((u16)(new->f->vlan));
}
add_list[num_add].queue_number = 0;
/* set invalid match method for later detection */
@@ -2218,11 +2275,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
* the VSI's list.
*/
spin_lock_bh(&vsi->mac_filter_hash_lock);
- hlist_for_each_entry_safe(f, h, &tmp_add_list, hlist) {
- u64 key = i40e_addr_to_hkey(f->macaddr);
-
- hlist_del(&f->hlist);
- hash_add(vsi->mac_filter_hash, &f->hlist, key);
+ hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
+ /* Only update the state if we're still NEW */
+ if (new->f->state == I40E_FILTER_NEW)
+ new->f->state = new->state;
+ hlist_del(&new->hlist);
+ kfree(new);
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
kfree(add_list);
@@ -2383,8 +2441,8 @@ err_no_memory:
/* Restore elements on the temporary add and delete lists */
spin_lock_bh(&vsi->mac_filter_hash_lock);
err_no_memory_locked:
- i40e_undo_filter_entries(vsi, &tmp_del_list);
- i40e_undo_filter_entries(vsi, &tmp_add_list);
+ i40e_undo_del_filter_entries(vsi, &tmp_del_list);
+ i40e_undo_add_filter_entries(vsi, &tmp_add_list);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
@@ -2574,12 +2632,15 @@ int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
/**
* i40e_vsi_add_vlan - Add VSI membership for given VLAN
* @vsi: the VSI being configured
- * @vid: VLAN id to be added (0 = untagged only , -1 = any)
+ * @vid: VLAN id to be added
**/
-int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
{
int err;
+ if (!vid || vsi->info.pvid)
+ return -EINVAL;
+
/* Locked once because all functions invoked below iterates list*/
spin_lock_bh(&vsi->mac_filter_hash_lock);
err = i40e_add_vlan_all_mac(vsi, vid);
@@ -2622,10 +2683,13 @@ void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
/**
* i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
* @vsi: the VSI being configured
- * @vid: VLAN id to be removed (0 = untagged only , -1 = any)
+ * @vid: VLAN id to be removed
**/
-void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
+void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
{
+ if (!vid || vsi->info.pvid)
+ return;
+
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_rm_vlan_all_mac(vsi, vid);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -3272,7 +3336,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
q_vector->tx.itr);
wr32(hw, I40E_PFINT_RATEN(vector - 1),
- INTRL_USEC_TO_REG(vsi->int_rate_limit));
+ i40e_intrl_usec_to_reg(vsi->int_rate_limit));
/* Linked list for the queuepairs assigned to this vector */
wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
@@ -4621,8 +4685,10 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
*/
if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) &&
(!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
+ local_bh_disable();
if (napi_reschedule(&tx_ring->q_vector->napi))
tx_ring->tx_stats.tx_lost_interrupt++;
+ local_bh_enable();
}
}
@@ -5276,6 +5342,8 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
enum i40e_aq_link_speed new_speed;
char *speed = "Unknown";
char *fc = "Unknown";
+ char *fec = "";
+ char *an = "";
new_speed = vsi->back->hw.phy.link_info.link_speed;
@@ -5335,8 +5403,23 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
break;
}
- netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
- speed, fc);
+ if (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
+ fec = ", FEC: None";
+ an = ", Autoneg: False";
+
+ if (vsi->back->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
+ an = ", Autoneg: True";
+
+ if (vsi->back->hw.phy.link_info.fec_info &
+ I40E_AQ_CONFIG_FEC_KR_ENA)
+ fec = ", FEC: CL74 FC-FEC/BASE-R";
+ else if (vsi->back->hw.phy.link_info.fec_info &
+ I40E_AQ_CONFIG_FEC_RS_ENA)
+ fec = ", FEC: CL108 RS-FEC";
+ }
+
+ netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s, Flow Control: %s\n",
+ speed, fec, an, fc);
}
/**
@@ -6271,7 +6354,16 @@ static void i40e_link_event(struct i40e_pf *pf)
old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
status = i40e_get_link_status(&pf->hw, &new_link);
- if (status) {
+
+ /* On success, disable temp link polling */
+ if (status == I40E_SUCCESS) {
+ if (pf->flags & I40E_FLAG_TEMP_LINK_POLLING)
+ pf->flags &= ~I40E_FLAG_TEMP_LINK_POLLING;
+ } else {
+ /* Enable link polling temporarily until i40e_get_link_status
+ * returns I40E_SUCCESS
+ */
+ pf->flags |= I40E_FLAG_TEMP_LINK_POLLING;
dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
status);
return;
@@ -6323,7 +6415,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
return;
pf->service_timer_previous = jiffies;
- if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
+ if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
+ (pf->flags & I40E_FLAG_TEMP_LINK_POLLING))
i40e_link_event(pf);
/* Update the stats for active netdevs so the network stack
@@ -8688,7 +8781,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->hw.func_caps.fd_filters_best_effort;
}
- if (i40e_is_mac_710(&pf->hw) &&
+ if ((pf->hw.mac.type == I40E_MAC_XL710) &&
(((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
(pf->hw.aq.fw_maj_ver < 4))) {
pf->flags |= I40E_FLAG_RESTART_AUTONEG;
@@ -8697,13 +8790,13 @@ static int i40e_sw_init(struct i40e_pf *pf)
}
/* Disable FW LLDP if FW < v4.3 */
- if (i40e_is_mac_710(&pf->hw) &&
+ if ((pf->hw.mac.type == I40E_MAC_XL710) &&
(((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
(pf->hw.aq.fw_maj_ver < 4)))
pf->flags |= I40E_FLAG_STOP_FW_LLDP;
/* Use the FW Set LLDP MIB API if FW > v4.40 */
- if (i40e_is_mac_710(&pf->hw) &&
+ if ((pf->hw.mac.type == I40E_MAC_XL710) &&
(((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
(pf->hw.aq.fw_maj_ver >= 5)))
pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB;
@@ -8734,16 +8827,17 @@ static int i40e_sw_init(struct i40e_pf *pf)
}
#endif /* CONFIG_PCI_IOV */
if (pf->hw.mac.type == I40E_MAC_X722) {
- pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
- I40E_FLAG_128_QP_RSS_CAPABLE |
- I40E_FLAG_HW_ATR_EVICT_CAPABLE |
- I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
- I40E_FLAG_WB_ON_ITR_CAPABLE |
- I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
- I40E_FLAG_NO_PCI_LINK_CHECK |
- I40E_FLAG_USE_SET_LLDP_MIB |
- I40E_FLAG_GENEVE_OFFLOAD_CAPABLE |
- I40E_FLAG_PTP_L4_CAPABLE;
+ pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE
+ | I40E_FLAG_128_QP_RSS_CAPABLE
+ | I40E_FLAG_HW_ATR_EVICT_CAPABLE
+ | I40E_FLAG_OUTER_UDP_CSUM_CAPABLE
+ | I40E_FLAG_WB_ON_ITR_CAPABLE
+ | I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE
+ | I40E_FLAG_NO_PCI_LINK_CHECK
+ | I40E_FLAG_USE_SET_LLDP_MIB
+ | I40E_FLAG_GENEVE_OFFLOAD_CAPABLE
+ | I40E_FLAG_PTP_L4_CAPABLE
+ | I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE;
} else if ((pf->hw.aq.api_maj_ver > 1) ||
((pf->hw.aq.api_maj_ver == 1) &&
(pf->hw.aq.api_min_ver > 4))) {
@@ -9345,7 +9439,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
*/
i40e_rm_default_mac_filter(vsi, mac_addr);
spin_lock_bh(&vsi->mac_filter_hash_lock);
- i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
+ i40e_add_mac_filter(vsi, mac_addr);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
} else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */
@@ -9354,7 +9448,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
random_ether_addr(mac_addr);
spin_lock_bh(&vsi->mac_filter_hash_lock);
- i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
+ i40e_add_mac_filter(vsi, mac_addr);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
}
@@ -9373,7 +9467,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
*/
eth_broadcast_addr(broadcast);
spin_lock_bh(&vsi->mac_filter_hash_lock);
- i40e_add_filter(vsi, broadcast, I40E_VLAN_ANY);
+ i40e_add_mac_filter(vsi, broadcast);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
ether_addr_copy(netdev->dev_addr, mac_addr);
@@ -10679,7 +10773,6 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
i40e_pf_config_rss(pf);
/* fill in link information and enable LSE reporting */
- i40e_update_link_info(&pf->hw);
i40e_link_event(pf);
/* Initialize user-specific link properties */
@@ -10994,6 +11087,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->subsystem_device_id = pdev->subsystem_device;
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
+ hw->bus.bus_id = pdev->bus->number;
pf->instance = pfs_found;
/* set up the locks for the AQ, do this only once in probe
@@ -11659,6 +11753,53 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
}
/**
+ * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
+ * using the mac_address_write admin q function
+ * @pf: pointer to i40e_pf struct
+ **/
+static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ u8 mac_addr[6];
+ u16 flags = 0;
+
+ /* Get current MAC address in case it's an LAA */
+ if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
+ ether_addr_copy(mac_addr,
+ pf->vsi[pf->lan_vsi]->netdev->dev_addr);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "Failed to retrieve MAC address; using default\n");
+ ether_addr_copy(mac_addr, hw->mac.addr);
+ }
+
+ /* The FW expects the mac address write cmd to first be called with
+ * one of these flags before calling it again with the multicast
+ * enable flags.
+ */
+ flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
+
+ if (hw->func_caps.flex10_enable && hw->partition_id != 1)
+ flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
+
+ ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
+ return;
+ }
+
+ flags = I40E_AQC_MC_MAG_EN
+ | I40E_AQC_WOL_PRESERVE_ON_PFR
+ | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
+ ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "Failed to enable Multicast Magic Packet wake up\n");
+}
+
+/**
* i40e_shutdown - PCI callback for shutting down
* @pdev: PCI device information struct
**/
@@ -11680,6 +11821,9 @@ static void i40e_shutdown(struct pci_dev *pdev)
cancel_work_sync(&pf->service_task);
i40e_fdir_teardown(pf);
+ if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
+ i40e_enable_mc_magic_wake(pf);
+
rtnl_lock();
i40e_prep_for_reset(pf);
rtnl_unlock();
@@ -11711,6 +11855,9 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
set_bit(__I40E_SUSPENDED, &pf->state);
set_bit(__I40E_DOWN, &pf->state);
+ if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
+ i40e_enable_mc_magic_wake(pf);
+
rtnl_lock();
i40e_prep_for_reset(pf);
rtnl_unlock();
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index 5b6feb7edeb1..fea81ed065db 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -55,7 +55,7 @@ struct i40e_dma_mem {
void *va;
dma_addr_t pa;
u32 size;
-} __packed;
+};
#define i40e_allocate_dma_mem(h, m, unused, s, a) \
i40e_allocate_dma_mem_d(h, m, s, a)
@@ -64,17 +64,17 @@ struct i40e_dma_mem {
struct i40e_virt_mem {
void *va;
u32 size;
-} __packed;
+};
#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s)
#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m)
-#define i40e_debug(h, m, s, ...) \
-do { \
- if (((m) & (h)->debug_mask)) \
- pr_info("i40e %02x.%x " s, \
- (h)->bus.device, (h)->bus.func, \
- ##__VA_ARGS__); \
+#define i40e_debug(h, m, s, ...) \
+do { \
+ if (((m) & (h)->debug_mask)) \
+ pr_info("i40e %02x:%02x.%x " s, \
+ (h)->bus.bus_id, (h)->bus.device, \
+ (h)->bus.func, ##__VA_ARGS__); \
} while (0)
typedef enum i40e_status_code i40e_status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 9e49ffafce28..2caee35528fa 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -280,7 +280,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- int i;
+ unsigned int i, cleared = 0;
/* Since we cannot turn off the Rx timestamp logic if the device is
* configured for Tx timestamping, we check if Rx timestamping is
@@ -306,14 +306,25 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
time_is_before_jiffies(pf->latch_events[i] + HZ)) {
rd32(hw, I40E_PRTTSYN_RXTIME_H(i));
pf->latch_event_flags &= ~BIT(i);
- pf->rx_hwtstamp_cleared++;
- dev_warn(&pf->pdev->dev,
- "Clearing a missed Rx timestamp event for RXTIME[%d]\n",
- i);
+ cleared++;
}
}
spin_unlock_bh(&pf->ptp_rx_lock);
+
+ /* Log a warning if more than 2 timestamps got dropped in the same
+ * check. We don't want to warn about all drops because it can occur
+ * in normal scenarios such as PTP frames on multicast addresses we
+ * aren't listening to. However, administrator should know if this is
+ * the reason packets aren't receiving timestamps.
+ */
+ if (cleared > 2)
+ dev_dbg(&pf->pdev->dev,
+ "Dropped %d missed RXTIME timestamp events\n",
+ cleared);
+
+ /* Finally, update the rx_hwtstamp_cleared counter */
+ pf->rx_hwtstamp_cleared += cleared;
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 352cf7cd2ef4..97d46058d71d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -432,7 +432,12 @@ unsupported_flow:
ret = -EINVAL;
}
- /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
+ /* The buffer allocated here will be normally be freed by
+ * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
+ * completion. In the event of an error adding the buffer to the FDIR
+ * ring, it will immediately be freed. It may also be freed by
+ * i40e_clean_tx_ring() when closing the VSI.
+ */
return ret;
}
@@ -1013,14 +1018,15 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
+ if (rx_ring->skb) {
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
+ }
+
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
- if (rx_bi->skb) {
- dev_kfree_skb(rx_bi->skb);
- rx_bi->skb = NULL;
- }
if (!rx_bi->page)
continue;
@@ -1425,45 +1431,6 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
}
/**
- * i40e_pull_tail - i40e specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @skb: pointer to current skb being adjusted
- *
- * This function is an i40e specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- */
-static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
-{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned char *va;
- unsigned int pull_len;
-
- /* it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-}
-
-/**
* i40e_cleanup_headers - Correct empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being fixed
@@ -1478,10 +1445,6 @@ static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
**/
static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
{
- /* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
- i40e_pull_tail(rx_ring, skb);
-
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;
@@ -1513,19 +1476,85 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
}
/**
- * i40e_page_is_reserved - check if reuse is possible
+ * i40e_page_is_reusable - check if any reuse is possible
* @page: page struct to check
+ *
+ * A page is not reusable if it was allocated under low memory
+ * conditions, or it's not in the same NUMA node as this CPU.
*/
-static inline bool i40e_page_is_reserved(struct page *page)
+static inline bool i40e_page_is_reusable(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ return (page_to_nid(page) == numa_mem_id()) &&
+ !page_is_pfmemalloc(page);
+}
+
+/**
+ * i40e_can_reuse_rx_page - Determine if this page can be reused by
+ * the adapter for another receive
+ *
+ * @rx_buffer: buffer containing the page
+ * @page: page address from rx_buffer
+ * @truesize: actual size of the buffer in this page
+ *
+ * If page is reusable, rx_buffer->page_offset is adjusted to point to
+ * an unused region in the page.
+ *
+ * For small pages, @truesize will be a constant value, half the size
+ * of the memory at page. We'll attempt to alternate between high and
+ * low halves of the page, with one half ready for use by the hardware
+ * and the other half being consumed by the stack. We use the page
+ * ref count to determine whether the stack has finished consuming the
+ * portion of this page that was passed up with a previous packet. If
+ * the page ref count is >1, we'll assume the "other" half page is
+ * still busy, and this page cannot be reused.
+ *
+ * For larger pages, @truesize will be the actual space used by the
+ * received packet (adjusted upward to an even multiple of the cache
+ * line size). This will advance through the page by the amount
+ * actually consumed by the received packets while there is still
+ * space for a buffer. Each region of larger pages will be used at
+ * most once, after which the page will not be reused.
+ *
+ * In either case, if the page is reusable its refcount is increased.
+ **/
+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
+ struct page *page,
+ const unsigned int truesize)
+{
+#if (PAGE_SIZE >= 8192)
+ unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
+#endif
+
+ /* Is any reuse possible? */
+ if (unlikely(!i40e_page_is_reusable(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= truesize;
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > last_offset)
+ return false;
+#endif
+
+ /* Inc ref count on page before passing it up to the stack */
+ get_page(page);
+
+ return true;
}
/**
* i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
+ * @size: packet length from rx_desc
* @skb: sk_buff to place the data into
*
* This function will add the data contained in rx_buffer->page to the skb.
@@ -1538,30 +1567,29 @@ static inline bool i40e_page_is_reserved(struct page *page)
**/
static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
- union i40e_rx_desc *rx_desc,
+ unsigned int size,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
- u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
unsigned int truesize = I40E_RXBUFFER_2048;
#else
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
- unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
#endif
+ unsigned int pull_len;
+
+ if (unlikely(skb_is_nonlinear(skb)))
+ goto add_tail_frag;
/* will the data fit in the skb we allocated? if so, just
* copy it as it is pretty small anyway
*/
- if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
-
+ if (size <= I40E_RX_HDR_SIZE) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- /* page is not reserved, we can reuse buffer as-is */
- if (likely(!i40e_page_is_reserved(page)))
+ /* page is reusable, we can reuse buffer as-is */
+ if (likely(i40e_page_is_reusable(page)))
return true;
/* this page cannot be reused so discard it */
@@ -1569,34 +1597,26 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
return false;
}
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buffer->page_offset, size, truesize);
-
- /* avoid re-using remote pages */
- if (unlikely(i40e_page_is_reserved(page)))
- return false;
-
-#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
- return false;
+ /* we need the header to contain the greater of either
+ * ETH_HLEN or 60 bytes if the skb->len is less than
+ * 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= truesize;
-#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += truesize;
+ /* align pull length to size of long to optimize
+ * memcpy performance
+ */
+ memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
- if (rx_buffer->page_offset > last_offset)
- return false;
-#endif
+ /* update all of the pointers */
+ va += pull_len;
+ size -= pull_len;
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
- */
- get_page(rx_buffer->page);
+add_tail_frag:
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ (unsigned long)va & ~PAGE_MASK, size, truesize);
- return true;
+ return i40e_can_reuse_rx_page(rx_buffer, page, truesize);
}
/**
@@ -1611,18 +1631,21 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
*/
static inline
struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc)
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
+ u64 local_status_error_len =
+ le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ unsigned int size =
+ (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
struct i40e_rx_buffer *rx_buffer;
- struct sk_buff *skb;
struct page *page;
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
page = rx_buffer->page;
prefetchw(page);
- skb = rx_buffer->skb;
-
if (likely(!skb)) {
void *page_addr = page_address(page) + rx_buffer->page_offset;
@@ -1646,19 +1669,17 @@ struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
* it now to avoid a possible cache miss
*/
prefetchw(skb->data);
- } else {
- rx_buffer->skb = NULL;
}
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
- I40E_RXBUFFER_2048,
+ size,
DMA_FROM_DEVICE);
/* pull page into skb */
- if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) {
/* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
rx_ring->rx_stats.page_reuse_count++;
@@ -1700,7 +1721,6 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
#define staterrlen rx_desc->wb.qword1.status_error_len
if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) {
i40e_clean_programming_status(rx_ring, rx_desc);
- rx_ring->rx_bi[ntc].skb = skb;
return true;
}
/* if we are the last buffer then there is nothing else to do */
@@ -1708,8 +1728,6 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
return false;
- /* place skb in next buffer to be received */
- rx_ring->rx_bi[ntc].skb = skb;
rx_ring->rx_stats.non_eop_descs++;
return true;
@@ -1730,12 +1748,12 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
bool failure = false;
while (likely(total_rx_packets < budget)) {
union i40e_rx_desc *rx_desc;
- struct sk_buff *skb;
u16 vlan_tag;
u8 rx_ptype;
u64 qword;
@@ -1764,7 +1782,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
*/
dma_rmb();
- skb = i40e_fetch_rx_buffer(rx_ring, rx_desc);
+ skb = i40e_fetch_rx_buffer(rx_ring, rx_desc, skb);
if (!skb)
break;
@@ -1783,8 +1801,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
continue;
}
- if (i40e_cleanup_headers(rx_ring, skb))
+ if (i40e_cleanup_headers(rx_ring, skb)) {
+ skb = NULL;
continue;
+ }
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
@@ -1809,11 +1829,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
i40e_receive_skb(rx_ring, skb, vlan_tag);
+ skb = NULL;
/* update budget accounting */
total_rx_packets++;
}
+ rx_ring->skb = skb;
+
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
@@ -1841,14 +1864,14 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
/* a small macro to shorten up some long lines */
#define INTREG I40E_PFINT_DYN_CTLN
-static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx)
+static inline int get_rx_itr(struct i40e_vsi *vsi, int idx)
{
- return !!(vsi->rx_rings[idx]->rx_itr_setting);
+ return vsi->rx_rings[idx]->rx_itr_setting;
}
-static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx)
+static inline int get_tx_itr(struct i40e_vsi *vsi, int idx)
{
- return !!(vsi->tx_rings[idx]->tx_itr_setting);
+ return vsi->tx_rings[idx]->tx_itr_setting;
}
/**
@@ -1874,8 +1897,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
- rx_itr_setting = get_rx_itr_enabled(vsi, idx);
- tx_itr_setting = get_tx_itr_enabled(vsi, idx);
+ rx_itr_setting = get_rx_itr(vsi, idx);
+ tx_itr_setting = get_tx_itr(vsi, idx);
if (q_vector->itr_countdown > 0 ||
(!ITR_IS_DYNAMIC(rx_itr_setting) &&
@@ -2251,14 +2274,16 @@ out:
/**
* i40e_tso - set up the tso context descriptor
- * @skb: ptr to the skb we're sending
+ * @first: pointer to first Tx buffer for xmit
* @hdr_len: ptr to the size of the packet header
* @cd_type_cmd_tso_mss: Quad Word 1
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
-static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
+static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
+ u64 *cd_type_cmd_tso_mss)
{
+ struct sk_buff *skb = first->skb;
u64 cd_cmd, cd_tso_len, cd_mss;
union {
struct iphdr *v4;
@@ -2271,6 +2296,7 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
+ u16 gso_segs, gso_size;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -2309,7 +2335,8 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
/* remove payload length from outer checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.udp->check, htonl(paylen));
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
}
/* reset pointers to inner headers */
@@ -2330,15 +2357,23 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+ csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
/* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ /* pull values out of skb_shinfo */
+ gso_size = skb_shinfo(skb)->gso_size;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ /* update GSO size and bytecount with header size */
+ first->gso_segs = gso_segs;
+ first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
/* find the field values */
cd_cmd = I40E_TX_CTX_DESC_TSO;
cd_tso_len = skb->len - *hdr_len;
- cd_mss = skb_shinfo(skb)->gso_size;
+ cd_mss = gso_size;
*cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
(cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
(cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
@@ -2699,7 +2734,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u16 i = tx_ring->next_to_use;
u32 td_tag = 0;
dma_addr_t dma;
- u16 gso_segs;
u16 desc_count = 1;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
@@ -2708,15 +2742,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TX_FLAGS_VLAN_SHIFT;
}
- if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
- gso_segs = skb_shinfo(skb)->gso_segs;
- else
- gso_segs = 1;
-
- /* multiply data chunks by size of headers */
- first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
- first->gso_segs = gso_segs;
- first->skb = skb;
first->tx_flags = tx_flags;
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
@@ -2902,8 +2927,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
count = i40e_xmit_descriptor_count(skb);
if (i40e_chk_linearize(skb, count)) {
- if (__skb_linearize(skb))
- goto out_drop;
+ if (__skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
count = i40e_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
@@ -2919,6 +2946,12 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_bi[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
+
/* prepare the xmit flags */
if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop;
@@ -2926,16 +2959,13 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
/* obtain protocol of skb */
protocol = vlan_get_protocol(skb);
- /* record the location of the first descriptor for this packet */
- first = &tx_ring->tx_bi[tx_ring->next_to_use];
-
/* setup IPv4/IPv6 offloads */
if (protocol == htons(ETH_P_IP))
tx_flags |= I40E_TX_FLAGS_IPV4;
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
+ tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0)
goto out_drop;
@@ -2973,7 +3003,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK;
out_drop:
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index e065321ce8ed..f80979025c01 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -52,7 +52,20 @@
*/
#define INTRL_ENA BIT(6)
#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
-#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
+/**
+ * i40e_intrl_usec_to_reg - convert interrupt rate limit to register
+ * @intrl: interrupt rate limit to convert
+ *
+ * This function converts a decimal interrupt rate limit to the appropriate
+ * register format expected by the firmware when setting interrupt rate limit.
+ */
+static inline u16 i40e_intrl_usec_to_reg(int intrl)
+{
+ if (intrl >> 2)
+ return ((intrl >> 2) | INTRL_ENA);
+ else
+ return 0;
+}
#define I40E_INTRL_8K 125 /* 8000 ints/sec */
#define I40E_INTRL_62K 16 /* 62500 ints/sec */
#define I40E_INTRL_83K 12 /* 83333 ints/sec */
@@ -240,7 +253,6 @@ struct i40e_tx_buffer {
};
struct i40e_rx_buffer {
- struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
unsigned int page_offset;
@@ -341,6 +353,14 @@ struct i40e_ring {
struct rcu_head rcu; /* to avoid race on free */
u16 next_to_alloc;
+ struct sk_buff *skb; /* When i40e_clean_rx_ring_irq() must
+ * return before it sees the EOP for
+ * the current packet, we save that skb
+ * here and resume receiving this
+ * packet the next time
+ * i40e_clean_rx_ring_irq() is called
+ * for this ring.
+ */
} ____cacheline_internodealigned_in_smp;
enum i40e_latency_range {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index edc0abdf4783..939f9fdc8f85 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -125,7 +125,6 @@ enum i40e_debug_mask {
*/
enum i40e_mac_type {
I40E_MAC_UNKNOWN = 0,
- I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
I40E_MAC_X722,
@@ -185,6 +184,7 @@ struct i40e_link_status {
enum i40e_aq_link_speed link_speed;
u8 link_info;
u8 an_info;
+ u8 fec_info;
u8 ext_info;
u8 loopback;
/* is Link Status Event notification to SW enabled */
@@ -470,6 +470,7 @@ struct i40e_bus_info {
u16 func;
u16 device;
u16 lan_id;
+ u16 bus_id;
};
/* Flow control (FC) parameters */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index a6198b727e24..cbbf8648307a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -689,17 +689,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
spin_lock_bh(&vsi->mac_filter_hash_lock);
if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
- f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
- vf->port_vlan_id ?
- vf->port_vlan_id : -1);
+ f = i40e_add_mac_filter(vsi,
+ vf->default_lan_addr.addr);
if (!f)
dev_info(&pf->pdev->dev,
"Could not add MAC filter %pM for VF %d\n",
vf->default_lan_addr.addr, vf->vf_id);
}
eth_broadcast_addr(broadcast);
- f = i40e_add_filter(vsi, broadcast,
- vf->port_vlan_id ? vf->port_vlan_id : -1);
+ f = i40e_add_mac_filter(vsi, broadcast);
if (!f)
dev_info(&pf->pdev->dev,
"Could not allocate VF broadcast filter\n");
@@ -1942,12 +1940,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_mac_filter *f;
f = i40e_find_mac(vsi, al->list[i].addr);
- if (!f) {
- if (i40e_is_vsi_in_vlan(vsi))
- f = i40e_put_mac_in_vlan(vsi, al->list[i].addr);
- else
- f = i40e_add_filter(vsi, al->list[i].addr, -1);
- }
+ if (!f)
+ f = i40e_add_mac_filter(vsi, al->list[i].addr);
if (!f) {
dev_err(&pf->pdev->dev,
@@ -2012,7 +2006,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
spin_lock_bh(&vsi->mac_filter_hash_lock);
/* delete addresses from the list */
for (i = 0; i < al->num_elements; i++)
- if (i40e_del_mac_all_vlan(vsi, al->list[i].addr)) {
+ if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
ret = I40E_ERR_INVALID_MAC_ADDR;
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
@@ -2722,14 +2716,13 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
/* delete the temporary mac address */
if (!is_zero_ether_addr(vf->default_lan_addr.addr))
- i40e_del_filter(vsi, vf->default_lan_addr.addr,
- vf->port_vlan_id ? vf->port_vlan_id : -1);
+ i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
/* Delete all the filters for this VSI - we're going to kill it
* anyway.
*/
hash_for_each(vsi->mac_filter_hash, bkt, f, hlist)
- i40e_del_filter(vsi, f->macaddr, f->vlan);
+ __i40e_del_filter(vsi, f);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index aa63b7fb993d..89dfdbca13db 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -64,7 +64,6 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
hw->mac.type = I40E_MAC_X722;
break;
case I40E_DEV_ID_X722_VF:
- case I40E_DEV_ID_X722_VF_HV:
hw->mac.type = I40E_MAC_X722_VF;
break;
case I40E_DEV_ID_VF:
@@ -305,7 +304,6 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
u8 *buf = (u8 *)buffer;
- u16 i = 0;
if ((!(mask & hw->debug_mask)) || (desc == NULL))
return;
@@ -333,12 +331,18 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
if (buf_len < len)
len = buf_len;
/* write the full 16-byte chunks */
- for (i = 0; i < (len - 16); i += 16)
- i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i);
- /* write whatever's left over without overrunning the buffer */
- if (i < len)
- i40e_debug(hw, mask, "\t0x%04X %*ph\n",
- i, len - i, buf + i);
+ if (hw->debug_mask & mask) {
+ char prefix[20];
+
+ snprintf(prefix, 20,
+ "i40evf %02x:%02x.%x: \t0x",
+ hw->bus.bus_id,
+ hw->bus.device,
+ hw->bus.func);
+
+ print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
+ 16, 1, buf, len, false);
+ }
}
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
index 21dcaee1ad1d..d76393c95056 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
@@ -48,7 +48,6 @@
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
#define I40E_DEV_ID_SFP_I_X722 0x37D3
#define I40E_DEV_ID_X722_VF 0x37CD
-#define I40E_DEV_ID_X722_VF_HV 0x37D9
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index df67ef37b7f3..c91fcf43ccbc 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -501,14 +501,15 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
+ if (rx_ring->skb) {
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
+ }
+
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
- if (rx_bi->skb) {
- dev_kfree_skb(rx_bi->skb);
- rx_bi->skb = NULL;
- }
if (!rx_bi->page)
continue;
@@ -903,45 +904,6 @@ void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
}
/**
- * i40e_pull_tail - i40e specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @skb: pointer to current skb being adjusted
- *
- * This function is an i40e specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- */
-static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
-{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned char *va;
- unsigned int pull_len;
-
- /* it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-}
-
-/**
* i40e_cleanup_headers - Correct empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being fixed
@@ -956,10 +918,6 @@ static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
**/
static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
{
- /* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
- i40e_pull_tail(rx_ring, skb);
-
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;
@@ -991,19 +949,85 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
}
/**
- * i40e_page_is_reserved - check if reuse is possible
+ * i40e_page_is_reusable - check if any reuse is possible
* @page: page struct to check
+ *
+ * A page is not reusable if it was allocated under low memory
+ * conditions, or it's not in the same NUMA node as this CPU.
*/
-static inline bool i40e_page_is_reserved(struct page *page)
+static inline bool i40e_page_is_reusable(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ return (page_to_nid(page) == numa_mem_id()) &&
+ !page_is_pfmemalloc(page);
+}
+
+/**
+ * i40e_can_reuse_rx_page - Determine if this page can be reused by
+ * the adapter for another receive
+ *
+ * @rx_buffer: buffer containing the page
+ * @page: page address from rx_buffer
+ * @truesize: actual size of the buffer in this page
+ *
+ * If page is reusable, rx_buffer->page_offset is adjusted to point to
+ * an unused region in the page.
+ *
+ * For small pages, @truesize will be a constant value, half the size
+ * of the memory at page. We'll attempt to alternate between high and
+ * low halves of the page, with one half ready for use by the hardware
+ * and the other half being consumed by the stack. We use the page
+ * ref count to determine whether the stack has finished consuming the
+ * portion of this page that was passed up with a previous packet. If
+ * the page ref count is >1, we'll assume the "other" half page is
+ * still busy, and this page cannot be reused.
+ *
+ * For larger pages, @truesize will be the actual space used by the
+ * received packet (adjusted upward to an even multiple of the cache
+ * line size). This will advance through the page by the amount
+ * actually consumed by the received packets while there is still
+ * space for a buffer. Each region of larger pages will be used at
+ * most once, after which the page will not be reused.
+ *
+ * In either case, if the page is reusable its refcount is increased.
+ **/
+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
+ struct page *page,
+ const unsigned int truesize)
+{
+#if (PAGE_SIZE >= 8192)
+ unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
+#endif
+
+ /* Is any reuse possible? */
+ if (unlikely(!i40e_page_is_reusable(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= truesize;
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > last_offset)
+ return false;
+#endif
+
+ /* Inc ref count on page before passing it up to the stack */
+ get_page(page);
+
+ return true;
}
/**
* i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
+ * @size: packet length from rx_desc
* @skb: sk_buff to place the data into
*
* This function will add the data contained in rx_buffer->page to the skb.
@@ -1016,30 +1040,29 @@ static inline bool i40e_page_is_reserved(struct page *page)
**/
static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
- union i40e_rx_desc *rx_desc,
+ unsigned int size,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
- u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
unsigned int truesize = I40E_RXBUFFER_2048;
#else
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
- unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
#endif
+ unsigned int pull_len;
+
+ if (unlikely(skb_is_nonlinear(skb)))
+ goto add_tail_frag;
/* will the data fit in the skb we allocated? if so, just
* copy it as it is pretty small anyway
*/
- if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
-
+ if (size <= I40E_RX_HDR_SIZE) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- /* page is not reserved, we can reuse buffer as-is */
- if (likely(!i40e_page_is_reserved(page)))
+ /* page is reusable, we can reuse buffer as-is */
+ if (likely(i40e_page_is_reusable(page)))
return true;
/* this page cannot be reused so discard it */
@@ -1047,34 +1070,26 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
return false;
}
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buffer->page_offset, size, truesize);
-
- /* avoid re-using remote pages */
- if (unlikely(i40e_page_is_reserved(page)))
- return false;
-
-#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
- return false;
+ /* we need the header to contain the greater of either
+ * ETH_HLEN or 60 bytes if the skb->len is less than
+ * 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= truesize;
-#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += truesize;
+ /* align pull length to size of long to optimize
+ * memcpy performance
+ */
+ memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
- if (rx_buffer->page_offset > last_offset)
- return false;
-#endif
+ /* update all of the pointers */
+ va += pull_len;
+ size -= pull_len;
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
- */
- get_page(rx_buffer->page);
+add_tail_frag:
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ (unsigned long)va & ~PAGE_MASK, size, truesize);
- return true;
+ return i40e_can_reuse_rx_page(rx_buffer, page, truesize);
}
/**
@@ -1089,18 +1104,21 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
*/
static inline
struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc)
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
+ u64 local_status_error_len =
+ le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ unsigned int size =
+ (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
struct i40e_rx_buffer *rx_buffer;
- struct sk_buff *skb;
struct page *page;
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
page = rx_buffer->page;
prefetchw(page);
- skb = rx_buffer->skb;
-
if (likely(!skb)) {
void *page_addr = page_address(page) + rx_buffer->page_offset;
@@ -1124,19 +1142,17 @@ struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
* it now to avoid a possible cache miss
*/
prefetchw(skb->data);
- } else {
- rx_buffer->skb = NULL;
}
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
- I40E_RXBUFFER_2048,
+ size,
DMA_FROM_DEVICE);
/* pull page into skb */
- if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) {
/* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
rx_ring->rx_stats.page_reuse_count++;
@@ -1180,8 +1196,6 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
return false;
- /* place skb in next buffer to be received */
- rx_ring->rx_bi[ntc].skb = skb;
rx_ring->rx_stats.non_eop_descs++;
return true;
@@ -1202,12 +1216,12 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
bool failure = false;
while (likely(total_rx_packets < budget)) {
union i40e_rx_desc *rx_desc;
- struct sk_buff *skb;
u16 vlan_tag;
u8 rx_ptype;
u64 qword;
@@ -1236,7 +1250,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
*/
dma_rmb();
- skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc);
+ skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc, skb);
if (!skb)
break;
@@ -1255,8 +1269,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
continue;
}
- if (i40e_cleanup_headers(rx_ring, skb))
+ if (i40e_cleanup_headers(rx_ring, skb)) {
+ skb = NULL;
continue;
+ }
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
@@ -1273,11 +1289,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
i40e_receive_skb(rx_ring, skb, vlan_tag);
+ skb = NULL;
/* update budget accounting */
total_rx_packets++;
}
+ rx_ring->skb = skb;
+
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
@@ -1305,18 +1324,18 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
/* a small macro to shorten up some long lines */
#define INTREG I40E_VFINT_DYN_CTLN1
-static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx)
+static inline int get_rx_itr(struct i40e_vsi *vsi, int idx)
{
struct i40evf_adapter *adapter = vsi->back;
- return !!(adapter->rx_rings[idx].rx_itr_setting);
+ return adapter->rx_rings[idx].rx_itr_setting;
}
-static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx)
+static inline int get_tx_itr(struct i40e_vsi *vsi, int idx)
{
struct i40evf_adapter *adapter = vsi->back;
- return !!(adapter->tx_rings[idx].tx_itr_setting);
+ return adapter->tx_rings[idx].tx_itr_setting;
}
/**
@@ -1342,8 +1361,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
- rx_itr_setting = get_rx_itr_enabled(vsi, idx);
- tx_itr_setting = get_tx_itr_enabled(vsi, idx);
+ rx_itr_setting = get_rx_itr(vsi, idx);
+ tx_itr_setting = get_tx_itr(vsi, idx);
if (q_vector->itr_countdown > 0 ||
(!ITR_IS_DYNAMIC(rx_itr_setting) &&
@@ -1549,14 +1568,16 @@ out:
/**
* i40e_tso - set up the tso context descriptor
- * @skb: ptr to the skb we're sending
+ * @first: pointer to first Tx buffer for xmit
* @hdr_len: ptr to the size of the packet header
* @cd_type_cmd_tso_mss: Quad Word 1
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
-static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
+static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
+ u64 *cd_type_cmd_tso_mss)
{
+ struct sk_buff *skb = first->skb;
u64 cd_cmd, cd_tso_len, cd_mss;
union {
struct iphdr *v4;
@@ -1569,6 +1590,7 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
+ u16 gso_segs, gso_size;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -1607,7 +1629,8 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
/* remove payload length from outer checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.udp->check, htonl(paylen));
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
}
/* reset pointers to inner headers */
@@ -1628,15 +1651,23 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+ csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
/* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ /* pull values out of skb_shinfo */
+ gso_size = skb_shinfo(skb)->gso_size;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ /* update GSO size and bytecount with header size */
+ first->gso_segs = gso_segs;
+ first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
/* find the field values */
cd_cmd = I40E_TX_CTX_DESC_TSO;
cd_tso_len = skb->len - *hdr_len;
- cd_mss = skb_shinfo(skb)->gso_size;
+ cd_mss = gso_size;
*cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
(cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
(cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
@@ -1949,7 +1980,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u16 i = tx_ring->next_to_use;
u32 td_tag = 0;
dma_addr_t dma;
- u16 gso_segs;
u16 desc_count = 1;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
@@ -1958,15 +1988,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TX_FLAGS_VLAN_SHIFT;
}
- if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
- gso_segs = skb_shinfo(skb)->gso_segs;
- else
- gso_segs = 1;
-
- /* multiply data chunks by size of headers */
- first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
- first->gso_segs = gso_segs;
- first->skb = skb;
first->tx_flags = tx_flags;
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
@@ -2151,8 +2172,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
count = i40e_xmit_descriptor_count(skb);
if (i40e_chk_linearize(skb, count)) {
- if (__skb_linearize(skb))
- goto out_drop;
+ if (__skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
count = i40e_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
@@ -2168,6 +2191,12 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_bi[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
+
/* prepare the xmit flags */
if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop;
@@ -2175,16 +2204,13 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
/* obtain protocol of skb */
protocol = vlan_get_protocol(skb);
- /* record the location of the first descriptor for this packet */
- first = &tx_ring->tx_bi[tx_ring->next_to_use];
-
/* setup IPv4/IPv6 offloads */
if (protocol == htons(ETH_P_IP))
tx_flags |= I40E_TX_FLAGS_IPV4;
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
+ tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0)
goto out_drop;
@@ -2211,7 +2237,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK;
out_drop:
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index a5fc789f78eb..8274ba68bd32 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -239,7 +239,6 @@ struct i40e_tx_buffer {
};
struct i40e_rx_buffer {
- struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
unsigned int page_offset;
@@ -340,6 +339,14 @@ struct i40e_ring {
struct rcu_head rcu; /* to avoid race on free */
u16 next_to_alloc;
+ struct sk_buff *skb; /* When i40evf_clean_rx_ring_irq() must
+ * return before it sees the EOP for
+ * the current packet, we save that skb
+ * here and resume receiving this
+ * packet the next time
+ * i40evf_clean_rx_ring_irq() is called
+ * for this ring.
+ */
} ____cacheline_internodealigned_in_smp;
enum i40e_latency_range {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index c85e8a31c072..16bb88084bb9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -100,7 +100,6 @@ enum i40e_debug_mask {
*/
enum i40e_mac_type {
I40E_MAC_UNKNOWN = 0,
- I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
I40E_MAC_X722,
@@ -159,6 +158,7 @@ struct i40e_link_status {
enum i40e_aq_link_speed link_speed;
u8 link_info;
u8 an_info;
+ u8 fec_info;
u8 ext_info;
u8 loopback;
/* is Link Status Event notification to SW enabled */
@@ -443,6 +443,7 @@ struct i40e_bus_info {
u16 func;
u16 device;
u16 lan_id;
+ u16 bus_id;
};
/* Flow control (FC) parameters */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index fc374f833aa9..d38a2b2aea2b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -81,6 +81,7 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index fffe4cf2c20b..00c42d803276 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -195,6 +195,7 @@ struct i40evf_adapter {
u64 hw_csum_rx_error;
u32 rx_desc_count;
int num_msix_vectors;
+ u32 client_pending;
struct msix_entry *msix_entries;
u32 flags;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index c0fc53361800..f35dcaac5bb7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -38,7 +38,7 @@ static const char i40evf_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 25
+#define DRV_VERSION_BUILD 27
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -59,7 +59,6 @@ static const struct pci_device_id i40evf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
- {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF_HV), 0},
/* required last entry */
{0, }
};
@@ -2154,6 +2153,11 @@ static int i40evf_close(struct net_device *netdev)
adapter->state = __I40EVF_DOWN_PENDING;
i40evf_free_traffic_irqs(adapter);
+ /* We explicitly don't free resources here because the hardware is
+ * still active and can DMA into memory. Resources are cleared in
+ * i40evf_virtchnl_completion() after we get confirmation from the PF
+ * driver that the rings have been stopped.
+ */
return 0;
}
@@ -2727,6 +2731,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->subsystem_device_id = pdev->subsystem_device;
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
+ hw->bus.bus_id = pdev->bus->number;
/* set up the locks for the AQ, do this only once in probe
* and destroy them only once in remove
@@ -2871,7 +2876,8 @@ static void i40evf_remove(struct pci_dev *pdev)
i40evf_request_reset(adapter);
msleep(50);
}
-
+ i40evf_free_all_tx_resources(adapter);
+ i40evf_free_all_rx_resources(adapter);
i40evf_misc_irq_disable(adapter);
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 2059a8e88908..bee58af390e1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -999,6 +999,10 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
if (v_opcode != adapter->current_op)
return;
break;
+ case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ adapter->client_pending &=
+ ~(BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
+ break;
case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
struct i40e_virtchnl_rss_hena *vrh =
(struct i40e_virtchnl_rss_hena *)msg;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index a61447fd778e..ee443985581f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -245,6 +245,17 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
E1000_STATUS_FUNC_SHIFT;
+ /* Make sure the PHY is in a good state. Several people have reported
+ * firmware leaving the PHY's page select register set to something
+ * other than the default of zero, which causes the PHY ID read to
+ * access something other than the intended register.
+ */
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val) {
+ hw_dbg("Error resetting the PHY.\n");
+ goto out;
+ }
+
/* Set phy->phy_addr and phy->id. */
ret_val = igb_get_phy_id_82575(hw);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 8aa798737d4d..07d48f2e3369 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -699,9 +699,9 @@ static s32 igb_update_flash_i210(struct e1000_hw *hw)
ret_val = igb_pool_flash_update_done_i210(hw);
if (ret_val)
- hw_dbg("Flash update complete\n");
- else
hw_dbg("Flash update time out\n");
+ else
+ hw_dbg("Flash update complete\n");
out:
return ret_val;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 5010e2232c50..5eff82678f0b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -792,15 +792,13 @@ static s32 igb_set_default_fc(struct e1000_hw *hw)
* control setting, then the variable hw->fc will
* be initialized based on a value in the EEPROM.
*/
- if (hw->mac.type == e1000_i350) {
+ if (hw->mac.type == e1000_i350)
lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
- ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG
- + lan_offset, 1, &nvm_data);
- } else {
- ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG,
- 1, &nvm_data);
- }
+ else
+ lan_offset = 0;
+ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG + lan_offset,
+ 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error\n");
goto out;
@@ -808,8 +806,7 @@ static s32 igb_set_default_fc(struct e1000_hw *hw)
if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
hw->fc.requested_mode = e1000_fc_none;
- else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
- NVM_WORD0F_ASM_DIR)
+ else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR)
hw->fc.requested_mode = e1000_fc_tx_pause;
else
hw->fc.requested_mode = e1000_fc_full;
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 5b54254aed4f..2788a5409023 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -77,6 +77,10 @@ s32 igb_get_phy_id(struct e1000_hw *hw)
s32 ret_val = 0;
u16 phy_id;
+ /* ensure PHY page selection to fix misconfigured i210 */
+ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
+ phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0);
+
ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
if (ret_val)
goto out;
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index d84afdd83e53..58adbf234e07 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -320,7 +320,7 @@
#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
#define E1000_WUC 0x05800 /* Wakeup Control - RW */
#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
-#define E1000_WUS 0x05810 /* Wakeup Status - RO */
+#define E1000_WUS 0x05810 /* Wakeup Status - R/W1C */
#define E1000_MANC 0x05820 /* Management Control - RW */
#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 1515abaa5ac9..be456bae8169 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -137,8 +137,8 @@ static void igb_update_phy_info(unsigned long);
static void igb_watchdog(unsigned long);
static void igb_watchdog_task(struct work_struct *);
static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
-static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats);
+static void igb_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
static int igb_change_mtu(struct net_device *, int);
static int igb_set_mac(struct net_device *, void *);
static void igb_set_uta(struct igb_adapter *adapter, bool set);
@@ -383,9 +383,9 @@ static void igb_dump(struct igb_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- pr_info("Device Name state trans_start last_rx\n");
- pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
- netdev->state, dev_trans_start(netdev), netdev->last_rx);
+ pr_info("Device Name state trans_start\n");
+ pr_info("%-15s %016lX %016lX\n", netdev->name,
+ netdev->state, dev_trans_start(netdev));
}
/* Print Registers */
@@ -3275,7 +3275,9 @@ static int __igb_close(struct net_device *netdev, bool suspending)
int igb_close(struct net_device *netdev)
{
- return __igb_close(netdev, false);
+ if (netif_device_present(netdev))
+ return __igb_close(netdev, false);
+ return 0;
}
/**
@@ -3394,7 +3396,7 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
tdba & 0x00000000ffffffffULL);
wr32(E1000_TDBAH(reg_idx), tdba >> 32);
- ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
+ ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
wr32(E1000_TDH(reg_idx), 0);
writel(0, ring->tail);
@@ -3733,7 +3735,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
ring->count * sizeof(union e1000_adv_rx_desc));
/* initialize head and tail */
- ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
+ ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
wr32(E1000_RDH(reg_idx), 0);
writel(0, ring->tail);
@@ -5402,8 +5404,8 @@ static void igb_reset_task(struct work_struct *work)
* @netdev: network interface device structure
* @stats: rtnl_link_stats64 pointer
**/
-static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+static void igb_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -5411,8 +5413,6 @@ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
igb_update_stats(adapter, &adapter->stats64);
memcpy(stats, &adapter->stats64, sizeof(*stats));
spin_unlock(&adapter->stats64_lock);
-
- return stats;
}
/**
@@ -7564,6 +7564,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
int retval = 0;
#endif
+ rtnl_lock();
netif_device_detach(netdev);
if (netif_running(netdev))
@@ -7572,6 +7573,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
igb_ptp_suspend(adapter);
igb_clear_interrupt_scheme(adapter);
+ rtnl_unlock();
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
@@ -7690,16 +7692,15 @@ static int igb_resume(struct device *dev)
wr32(E1000_WUS, ~0);
- if (netdev->flags & IFF_UP) {
- rtnl_lock();
+ rtnl_lock();
+ if (!err && netif_running(netdev))
err = __igb_open(netdev, true);
- rtnl_unlock();
- if (err)
- return err;
- }
- netif_device_attach(netdev);
- return 0;
+ if (!err)
+ netif_device_attach(netdev);
+ rtnl_unlock();
+
+ return err;
}
static int igb_runtime_idle(struct device *dev)
@@ -7898,6 +7899,11 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
+ /* In case of PCI error, adapter lose its HW address
+ * so we should re-assign it here.
+ */
+ hw->hw_addr = adapter->io_addr;
+
igb_reset(adapter);
wr32(E1000_WUS, ~0);
result = PCI_ERS_RESULT_RECOVERED;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 5826b1ddedcf..fbd220d137b3 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1817,7 +1817,7 @@ ixgb_clean(struct napi_struct *napi, int budget)
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (!test_bit(__IXGB_DOWN, &adapter->flags))
ixgb_irq_enable(adapter);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index ef81c3d8c295..a2cc43d28888 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -55,9 +55,6 @@
#include <net/busy_poll.h>
-#ifdef CONFIG_NET_RX_BUSY_POLL
-#define BP_EXTENDED_STATS
-#endif
/* common prefix used by pr_<> macros */
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -94,6 +91,14 @@
#define IXGBE_RXBUFFER_4K 4096
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
+#define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#if (PAGE_SIZE < 8192)
+#define IXGBE_MAX_FRAME_BUILD_SKB \
+ (SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K) - IXGBE_SKB_PAD)
+#else
+#define IGB_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K
+#endif
+
/*
* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
* reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
@@ -107,6 +112,9 @@
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define IXGBE_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
enum ixgbe_tx_flags {
/* cmd_type flags */
IXGBE_TX_FLAGS_HW_VLAN = 0x01,
@@ -159,6 +167,7 @@ enum ixgbevf_xcast_modes {
IXGBEVF_XCAST_MODE_NONE = 0,
IXGBEVF_XCAST_MODE_MULTI,
IXGBEVF_XCAST_MODE_ALLMULTI,
+ IXGBEVF_XCAST_MODE_PROMISC,
};
struct vf_macvlans {
@@ -194,17 +203,17 @@ struct ixgbe_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
- unsigned int page_offset;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+ __u32 page_offset;
+#else
+ __u16 page_offset;
+#endif
+ __u16 pagecnt_bias;
};
struct ixgbe_queue_stats {
u64 packets;
u64 bytes;
-#ifdef BP_EXTENDED_STATS
- u64 yields;
- u64 misses;
- u64 cleaned;
-#endif /* BP_EXTENDED_STATS */
};
struct ixgbe_tx_queue_stats {
@@ -225,15 +234,20 @@ struct ixgbe_rx_queue_stats {
#define IXGBE_TS_HDR_LEN 8
enum ixgbe_ring_state_t {
+ __IXGBE_RX_3K_BUFFER,
+ __IXGBE_RX_BUILD_SKB_ENABLED,
+ __IXGBE_RX_RSC_ENABLED,
+ __IXGBE_RX_CSUM_UDP_ZERO_ERR,
+ __IXGBE_RX_FCOE,
__IXGBE_TX_FDIR_INIT_DONE,
__IXGBE_TX_XPS_INIT_DONE,
__IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED,
- __IXGBE_RX_RSC_ENABLED,
- __IXGBE_RX_CSUM_UDP_ZERO_ERR,
- __IXGBE_RX_FCOE,
};
+#define ring_uses_build_skb(ring) \
+ test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state)
+
struct ixgbe_fwd_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct net_device *netdev;
@@ -343,19 +357,20 @@ struct ixgbe_ring_feature {
*/
static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
{
-#ifdef IXGBE_FCOE
- if (test_bit(__IXGBE_RX_FCOE, &ring->state))
- return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K :
- IXGBE_RXBUFFER_3K;
+ if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
+ return IXGBE_RXBUFFER_3K;
+#if (PAGE_SIZE < 8192)
+ if (ring_uses_build_skb(ring))
+ return IXGBE_MAX_FRAME_BUILD_SKB;
#endif
return IXGBE_RXBUFFER_2K;
}
static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
{
-#ifdef IXGBE_FCOE
- if (test_bit(__IXGBE_RX_FCOE, &ring->state))
- return (PAGE_SIZE < 8192) ? 1 : 0;
+#if (PAGE_SIZE < 8192)
+ if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
+ return 1;
#endif
return 0;
}
@@ -398,127 +413,10 @@ struct ixgbe_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
-#ifdef CONFIG_NET_RX_BUSY_POLL
- atomic_t state;
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
/* for dynamic allocation of rings associated with this q_vector */
struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
};
-#ifdef CONFIG_NET_RX_BUSY_POLL
-enum ixgbe_qv_state_t {
- IXGBE_QV_STATE_IDLE = 0,
- IXGBE_QV_STATE_NAPI,
- IXGBE_QV_STATE_POLL,
- IXGBE_QV_STATE_DISABLE
-};
-
-static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
-{
- /* reset state to idle */
- atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
-}
-
-/* called from the device poll routine to get ownership of a q_vector */
-static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
-{
- int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
- IXGBE_QV_STATE_NAPI);
-#ifdef BP_EXTENDED_STATS
- if (rc != IXGBE_QV_STATE_IDLE)
- q_vector->tx.ring->stats.yields++;
-#endif
-
- return rc == IXGBE_QV_STATE_IDLE;
-}
-
-/* returns true is someone tried to get the qv while napi had it */
-static inline void ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
-{
- WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_NAPI);
-
- /* flush any outstanding Rx frames */
- if (q_vector->napi.gro_list)
- napi_gro_flush(&q_vector->napi, false);
-
- /* reset state to idle */
- atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
-}
-
-/* called from ixgbe_low_latency_poll() */
-static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
-{
- int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
- IXGBE_QV_STATE_POLL);
-#ifdef BP_EXTENDED_STATS
- if (rc != IXGBE_QV_STATE_IDLE)
- q_vector->rx.ring->stats.yields++;
-#endif
- return rc == IXGBE_QV_STATE_IDLE;
-}
-
-/* returns true if someone tried to get the qv while it was locked */
-static inline void ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
-{
- WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_POLL);
-
- /* reset state to idle */
- atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
-}
-
-/* true if a socket is polling, even if it did not get the lock */
-static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
-{
- return atomic_read(&q_vector->state) == IXGBE_QV_STATE_POLL;
-}
-
-/* false if QV is currently owned */
-static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
-{
- int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
- IXGBE_QV_STATE_DISABLE);
-
- return rc == IXGBE_QV_STATE_IDLE;
-}
-
-#else /* CONFIG_NET_RX_BUSY_POLL */
-static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
-{
-}
-
-static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
-{
- return true;
-}
-
-static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
-{
- return false;
-}
-
-static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
-{
- return false;
-}
-
-static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
-{
- return false;
-}
-
-static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
-{
- return false;
-}
-
-static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
-{
- return true;
-}
-
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
#ifdef CONFIG_IXGBE_HWMON
#define IXGBE_HWMON_TYPE_LOC 0
@@ -661,6 +559,9 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11)
#define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED BIT(12)
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
+#define IXGBE_FLAG2_EEE_CAPABLE BIT(14)
+#define IXGBE_FLAG2_EEE_ENABLED BIT(15)
+#define IXGBE_FLAG2_RX_LEGACY BIT(16)
/* Tx fast path data */
int num_tx_queues;
@@ -862,6 +763,7 @@ enum ixgbe_boards {
board_X550,
board_X550EM_x,
board_x550em_a,
+ board_x550em_a_fw,
};
extern const struct ixgbe_info ixgbe_82598_info;
@@ -870,8 +772,9 @@ extern const struct ixgbe_info ixgbe_X540_info;
extern const struct ixgbe_info ixgbe_X550_info;
extern const struct ixgbe_info ixgbe_X550EM_x_info;
extern const struct ixgbe_info ixgbe_x550em_a_info;
+extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
#ifdef CONFIG_IXGBE_DCB
-extern const struct dcbnl_rtnl_ops dcbnl_ops;
+extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
#endif
extern char ixgbe_driver_name[];
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 805ab319e578..523f9d05a810 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -139,8 +139,6 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
case ixgbe_phy_tn:
phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
- phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
break;
case ixgbe_phy_nl:
phy->ops.reset = &ixgbe_reset_phy_nl;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index e00aaeb91827..30535e6b68f0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -331,8 +331,6 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
case ixgbe_phy_tn:
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
- phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
break;
default:
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 8832df3eba25..c38d50c1fcf7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -100,6 +100,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550T1:
case IXGBE_DEV_ID_X550EM_X_10G_T:
case IXGBE_DEV_ID_X550EM_A_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
supported = true;
break;
default:
@@ -348,7 +350,7 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
}
IXGBE_WRITE_FLUSH(hw);
-#ifndef CONFIG_SPARC
+#ifndef CONFIG_ARCH_WANT_RELAX_ORDER
/* Disable relaxed ordering */
for (i = 0; i < hw->mac.max_tx_queues; i++) {
u32 regval;
@@ -3382,6 +3384,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
else
*speed = IXGBE_LINK_SPEED_100_FULL;
break;
+ case IXGBE_LINKS_SPEED_10_X550EM_A:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ }
+ break;
default:
*speed = IXGBE_LINK_SPEED_UNKNOWN;
}
@@ -3578,7 +3587,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,
* Calculates the checksum for some buffer on a specified length. The
* checksum calculated is returned.
**/
-static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
{
u32 i;
u8 sum = 0;
@@ -3593,43 +3602,29 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
}
/**
- * ixgbe_host_interface_command - Issue command to manageability block
+ * ixgbe_hic_unlocked - Issue command to manageability block unlocked
* @hw: pointer to the HW structure
- * @buffer: contains the command to write and where the return status will
- * be placed
+ * @buffer: command to write and where the return status will be placed
* @length: length of buffer, must be multiple of 4 bytes
* @timeout: time in ms to wait for command completion
- * @return_data: read and return data from the buffer (true) or not (false)
- * Needed because FW structures are big endian and decoding of
- * these fields can be 8 bit or 16 bit based on command. Decoding
- * is not easily understood without making a table of commands.
- * So we will leave this up to the caller to read back the data
- * in these cases.
*
- * Communicates with the manageability block. On success return 0
- * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ * Communicates with the manageability block. On success return 0
+ * else returns semaphore error when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ *
+ * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
+ * by the caller.
**/
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
- u32 length, u32 timeout,
- bool return_data)
+s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
+ u32 timeout)
{
- u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
- u32 hicr, i, bi, fwsts;
- u16 buf_len, dword_len;
- union {
- struct ixgbe_hic_hdr hdr;
- u32 u32arr[1];
- } *bp = buffer;
- s32 status;
+ u32 hicr, i, fwsts;
+ u16 dword_len;
if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
- /* Take management host interface semaphore */
- status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
- if (status)
- return status;
/* Set bit 9 of FWSTS clearing FW reset indication */
fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
@@ -3639,15 +3634,13 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
if (!(hicr & IXGBE_HICR_EN)) {
hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
- status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
- goto rel_out;
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
/* Calculate length in DWORDs. We must be DWORD aligned */
if (length % sizeof(u32)) {
hw_dbg(hw, "Buffer length failure, not aligned to dword");
- status = IXGBE_ERR_INVALID_ARGUMENT;
- goto rel_out;
+ return IXGBE_ERR_INVALID_ARGUMENT;
}
dword_len = length >> 2;
@@ -3657,7 +3650,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
*/
for (i = 0; i < dword_len; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
- i, cpu_to_le32(bp->u32arr[i]));
+ i, cpu_to_le32(buffer[i]));
/* Setting this bit tells the ARC that a new command is pending. */
IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
@@ -3671,11 +3664,54 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
/* Check command successful completion. */
if ((timeout && i == timeout) ||
- !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
- hw_dbg(hw, "Command has failed with no status valid.\n");
- status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
- goto rel_out;
+ !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+ return 0;
+}
+
+/**
+ * ixgbe_host_interface_command - Issue command to manageability block
+ * @hw: pointer to the HW structure
+ * @buffer: contains the command to write and where the return status will
+ * be placed
+ * @length: length of buffer, must be multiple of 4 bytes
+ * @timeout: time in ms to wait for command completion
+ * @return_data: read and return data from the buffer (true) or not (false)
+ * Needed because FW structures are big endian and decoding of
+ * these fields can be 8 bit or 16 bit based on command. Decoding
+ * is not easily understood without making a table of commands.
+ * So we will leave this up to the caller to read back the data
+ * in these cases.
+ *
+ * Communicates with the manageability block. On success return 0
+ * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ **/
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
+ u32 length, u32 timeout,
+ bool return_data)
+{
+ u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
+ union {
+ struct ixgbe_hic_hdr hdr;
+ u32 u32arr[1];
+ } *bp = buffer;
+ u16 buf_len, dword_len;
+ s32 status;
+ u32 bi;
+
+ if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
+ /* Take management host interface semaphore */
+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+ if (status)
+ return status;
+
+ status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
+ if (status)
+ goto rel_out;
if (!return_data)
goto rel_out;
@@ -3722,6 +3758,8 @@ rel_out:
* @min: driver version minor number
* @build: driver version build number
* @sub: driver version sub build number
+ * @len: length of driver_ver string
+ * @driver_ver: driver string
*
* Sends driver version number to firmware through the manageability
* block. On success return 0
@@ -3729,7 +3767,8 @@ rel_out:
* semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
**/
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
- u8 build, u8 sub)
+ u8 build, u8 sub, __always_unused u16 len,
+ __always_unused const char *driver_ver)
{
struct ixgbe_hic_drv_info fw_cmd;
int i;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 5b3e3c65927e..e083732adf64 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -111,9 +111,13 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
- u8 build, u8 ver);
+ u8 build, u8 ver, u16 len, const char *str);
+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length,
u32 timeout, bool return_data);
+s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout);
+s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+ u32 (*data)[FW_PHY_ACT_DATA_COUNT]);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
bool ixgbe_mng_present(struct ixgbe_hw *hw);
bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index b8fc3cfec831..78c52375acc6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -777,7 +777,7 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
return err ? 1 : 0;
}
-const struct dcbnl_rtnl_ops dcbnl_ops = {
+const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops = {
.ieee_getets = ixgbe_dcbnl_ieee_getets,
.ieee_setets = ixgbe_dcbnl_ieee_setets,
.ieee_getpfc = ixgbe_dcbnl_ieee_getpfc,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index fd192bf29b26..a7574c7b12af 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -151,6 +151,13 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
};
#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
+static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0)
+ "legacy-rx",
+};
+
+#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
+
/* currently supported speeds for 10G */
#define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
SUPPORTED_10000baseKX4_Full | \
@@ -197,15 +204,17 @@ static int ixgbe_get_settings(struct net_device *netdev,
SUPPORTED_1000baseKX_Full :
SUPPORTED_1000baseT_Full;
if (supported_link & IXGBE_LINK_SPEED_100_FULL)
- ecmd->supported |= ixgbe_isbackplane(hw->phy.media_type) ?
- SUPPORTED_1000baseKX_Full :
- SUPPORTED_1000baseT_Full;
+ ecmd->supported |= SUPPORTED_100baseT_Full;
+ if (supported_link & IXGBE_LINK_SPEED_10_FULL)
+ ecmd->supported |= SUPPORTED_10baseT_Full;
/* default advertised speed if phy.autoneg_advertised isn't set */
ecmd->advertising = ecmd->supported;
/* set the advertised speeds */
if (hw->phy.autoneg_advertised) {
ecmd->advertising = 0;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
+ ecmd->advertising |= ADVERTISED_10baseT_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
ecmd->advertising |= ADVERTISED_100baseT_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
@@ -237,6 +246,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
case ixgbe_phy_tn:
case ixgbe_phy_aq:
case ixgbe_phy_x550em_ext_t:
+ case ixgbe_phy_fw:
case ixgbe_phy_cu_unknown:
ecmd->supported |= SUPPORTED_TP;
ecmd->advertising |= ADVERTISED_TP;
@@ -337,6 +347,9 @@ static int ixgbe_get_settings(struct net_device *netdev,
case IXGBE_LINK_SPEED_10GB_FULL:
ethtool_cmd_speed_set(ecmd, SPEED_10000);
break;
+ case IXGBE_LINK_SPEED_5GB_FULL:
+ ethtool_cmd_speed_set(ecmd, SPEED_5000);
+ break;
case IXGBE_LINK_SPEED_2_5GB_FULL:
ethtool_cmd_speed_set(ecmd, SPEED_2500);
break;
@@ -346,6 +359,9 @@ static int ixgbe_get_settings(struct net_device *netdev,
case IXGBE_LINK_SPEED_100_FULL:
ethtool_cmd_speed_set(ecmd, SPEED_100);
break;
+ case IXGBE_LINK_SPEED_10_FULL:
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
+ break;
default:
break;
}
@@ -394,6 +410,9 @@ static int ixgbe_set_settings(struct net_device *netdev,
if (ecmd->advertising & ADVERTISED_100baseT_Full)
advertised |= IXGBE_LINK_SPEED_100_FULL;
+ if (ecmd->advertising & ADVERTISED_10baseT_Full)
+ advertised |= IXGBE_LINK_SPEED_10_FULL;
+
if (old == advertised)
return err;
/* this sets the link speed and restarts auto-neg */
@@ -989,6 +1008,8 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
+
+ drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
}
static void ixgbe_get_ringparam(struct net_device *netdev,
@@ -1128,6 +1149,8 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
return IXGBE_TEST_LEN;
case ETH_SS_STATS:
return IXGBE_STATS_LEN;
+ case ETH_SS_PRIV_FLAGS:
+ return IXGBE_PRIV_FLAGS_STR_LEN;
default:
return -EOPNOTSUPP;
}
@@ -1170,12 +1193,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i] = 0;
data[i+1] = 0;
i += 2;
-#ifdef BP_EXTENDED_STATS
- data[i] = 0;
- data[i+1] = 0;
- data[i+2] = 0;
- i += 3;
-#endif
continue;
}
@@ -1185,12 +1202,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i+1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
-#ifdef BP_EXTENDED_STATS
- data[i] = ring->stats.yields;
- data[i+1] = ring->stats.misses;
- data[i+2] = ring->stats.cleaned;
- i += 3;
-#endif
}
for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
ring = adapter->rx_ring[j];
@@ -1198,12 +1209,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i] = 0;
data[i+1] = 0;
i += 2;
-#ifdef BP_EXTENDED_STATS
- data[i] = 0;
- data[i+1] = 0;
- data[i+2] = 0;
- i += 3;
-#endif
continue;
}
@@ -1213,12 +1218,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i+1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
-#ifdef BP_EXTENDED_STATS
- data[i] = ring->stats.yields;
- data[i+1] = ring->stats.misses;
- data[i+2] = ring->stats.cleaned;
- i += 3;
-#endif
}
for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
@@ -1255,28 +1254,12 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
-#ifdef BP_EXTENDED_STATS
- sprintf(p, "tx_queue_%u_bp_napi_yield", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_bp_misses", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_bp_cleaned", i);
- p += ETH_GSTRING_LEN;
-#endif /* BP_EXTENDED_STATS */
}
for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
-#ifdef BP_EXTENDED_STATS
- sprintf(p, "rx_queue_%u_bp_poll_yield", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_bp_misses", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_bp_cleaned", i);
- p += ETH_GSTRING_LEN;
-#endif /* BP_EXTENDED_STATS */
}
for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
sprintf(p, "tx_pb_%u_pxon", i);
@@ -1292,6 +1275,9 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
}
/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, ixgbe_priv_flags_strings,
+ IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
}
}
@@ -1896,7 +1882,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
tx_ntc = tx_ring->next_to_clean;
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
- while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) {
+ while (rx_desc->wb.upper.length) {
/* check Rx buffer */
rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
@@ -1918,7 +1904,16 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
/* unmap buffer on Tx side */
tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
- ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
+
+ /* Free all the Tx ring sk_buffs */
+ dev_kfree_skb_any(tx_buffer->skb);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
/* increment Rx/Tx next to clean counters */
rx_ntc++;
@@ -3173,6 +3168,9 @@ static int ixgbe_get_module_info(struct net_device *dev,
u8 sff8472_rev, addr_mode;
bool page_swap = false;
+ if (hw->phy.type == ixgbe_phy_fw)
+ return -ENXIO;
+
/* Check whether we support SFF-8472 or not */
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_SFF_8472_COMP,
@@ -3218,6 +3216,9 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
if (ee->len == 0)
return -EINVAL;
+ if (hw->phy.type == ixgbe_phy_fw)
+ return -ENXIO;
+
for (i = ee->offset; i < ee->offset + ee->len; i++) {
/* I2C reads can take long time */
if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
@@ -3237,6 +3238,167 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
return 0;
}
+static const struct {
+ ixgbe_link_speed mac_speed;
+ u32 supported;
+} ixgbe_ls_map[] = {
+ { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full },
+ { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full },
+ { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full },
+ { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full },
+ { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full },
+};
+
+static const struct {
+ u32 lp_advertised;
+ u32 mac_speed;
+} ixgbe_lp_map[] = {
+ { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full },
+ { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full },
+ { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full },
+ { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full },
+ { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full },
+ { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full},
+};
+
+static int
+ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata)
+{
+ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ struct ixgbe_hw *hw = &adapter->hw;
+ s32 rc;
+ u16 i;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
+ if (rc)
+ return rc;
+
+ edata->lp_advertised = 0;
+ for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
+ if (info[0] & ixgbe_lp_map[i].lp_advertised)
+ edata->lp_advertised |= ixgbe_lp_map[i].mac_speed;
+ }
+
+ edata->supported = 0;
+ for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
+ if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
+ edata->supported |= ixgbe_ls_map[i].supported;
+ }
+
+ edata->advertised = 0;
+ for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
+ if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
+ edata->advertised |= ixgbe_ls_map[i].supported;
+ }
+
+ edata->eee_enabled = !!edata->advertised;
+ edata->tx_lpi_enabled = edata->eee_enabled;
+ if (edata->advertised & edata->lp_advertised)
+ edata->eee_active = true;
+
+ return 0;
+}
+
+static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
+ return -EOPNOTSUPP;
+
+ if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw)
+ return ixgbe_get_eee_fw(adapter, edata);
+
+ return -EOPNOTSUPP;
+}
+
+static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ethtool_eee eee_data;
+ s32 ret_val;
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
+ return -EOPNOTSUPP;
+
+ memset(&eee_data, 0, sizeof(struct ethtool_eee));
+
+ ret_val = ixgbe_get_eee(netdev, &eee_data);
+ if (ret_val)
+ return ret_val;
+
+ if (eee_data.eee_enabled && !edata->eee_enabled) {
+ if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) {
+ e_err(drv, "Setting EEE tx-lpi is not supported\n");
+ return -EINVAL;
+ }
+
+ if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) {
+ e_err(drv,
+ "Setting EEE Tx LPI timer is not supported\n");
+ return -EINVAL;
+ }
+
+ if (eee_data.advertised != edata->advertised) {
+ e_err(drv,
+ "Setting EEE advertised speeds is not supported\n");
+ return -EINVAL;
+ }
+ }
+
+ if (eee_data.eee_enabled != edata->eee_enabled) {
+ if (edata->eee_enabled) {
+ adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
+ hw->phy.eee_speeds_advertised =
+ hw->phy.eee_speeds_supported;
+ } else {
+ adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
+ hw->phy.eee_speeds_advertised = 0;
+ }
+
+ /* reset link */
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
+ ixgbe_reset(adapter);
+ }
+
+ return 0;
+}
+
+static u32 ixgbe_get_priv_flags(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ u32 priv_flags = 0;
+
+ if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
+ priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
+
+ return priv_flags;
+}
+
+static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ unsigned int flags2 = adapter->flags2;
+
+ flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
+ if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
+ flags2 |= IXGBE_FLAG2_RX_LEGACY;
+
+ if (flags2 != adapter->flags2) {
+ adapter->flags2 = flags2;
+
+ /* reset interface to repopulate queues */
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_settings = ixgbe_get_settings,
.set_settings = ixgbe_set_settings,
@@ -3269,8 +3431,12 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_rxfh_key_size = ixgbe_get_rxfh_key_size,
.get_rxfh = ixgbe_get_rxfh,
.set_rxfh = ixgbe_set_rxfh,
+ .get_eee = ixgbe_get_eee,
+ .set_eee = ixgbe_set_eee,
.get_channels = ixgbe_get_channels,
.set_channels = ixgbe_set_channels,
+ .get_priv_flags = ixgbe_get_priv_flags,
+ .set_priv_flags = ixgbe_set_priv_flags,
.get_ts_info = ixgbe_get_ts_info,
.get_module_info = ixgbe_get_module_info,
.get_module_eeprom = ixgbe_get_module_eeprom,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 15ab337fd7ad..1b8be7d813bd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -308,6 +308,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
ixgbe_cache_ring_rss(adapter);
}
+#define IXGBE_RSS_64Q_MASK 0x3F
#define IXGBE_RSS_16Q_MASK 0xF
#define IXGBE_RSS_8Q_MASK 0x7
#define IXGBE_RSS_4Q_MASK 0x3
@@ -604,6 +605,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
**/
static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
{
+ struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_ring_feature *f;
u16 rss_i;
@@ -612,7 +614,11 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
rss_i = f->limit;
f->indices = rss_i;
- f->mask = IXGBE_RSS_16Q_MASK;
+
+ if (hw->mac.type < ixgbe_mac_X550)
+ f->mask = IXGBE_RSS_16Q_MASK;
+ else
+ f->mask = IXGBE_RSS_64Q_MASK;
/* disable ATR by default, it will be configured below */
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
@@ -847,11 +853,6 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
netif_napi_add(adapter->netdev, &q_vector->napi,
ixgbe_poll, 64);
-#ifdef CONFIG_NET_RX_BUSY_POLL
- /* initialize busy poll */
- atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE);
-
-#endif
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
q_vector->adapter = adapter;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 1e2f39ebd824..060cdce8058f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -72,7 +72,7 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "4.4.0-k"
+#define DRV_VERSION "5.0.0-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2016 Intel Corporation.";
@@ -86,6 +86,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_X550] = &ixgbe_X550_info,
[board_X550EM_x] = &ixgbe_X550EM_x_info,
[board_x550em_a] = &ixgbe_x550em_a_info,
+ [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
};
/* ixgbe_pci_tbl - PCI Device ID Table
@@ -140,6 +141,8 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
/* required last entry */
{0, }
};
@@ -180,6 +183,7 @@ MODULE_VERSION(DRV_VERSION);
static struct workqueue_struct *ixgbe_wq;
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
+static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
u32 reg, u16 *value)
@@ -607,12 +611,11 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
pr_info("Device Name state "
- "trans_start last_rx\n");
- pr_info("%-15s %016lX %016lX %016lX\n",
+ "trans_start\n");
+ pr_info("%-15s %016lX %016lX\n",
netdev->name,
netdev->state,
- dev_trans_start(netdev),
- netdev->last_rx);
+ dev_trans_start(netdev));
}
/* Print Registers */
@@ -942,28 +945,6 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
}
}
-void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
- struct ixgbe_tx_buffer *tx_buffer)
-{
- if (tx_buffer->skb) {
- dev_kfree_skb_any(tx_buffer->skb);
- if (dma_unmap_len(tx_buffer, len))
- dma_unmap_single(ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- } else if (dma_unmap_len(tx_buffer, len)) {
- dma_unmap_page(ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- }
- tx_buffer->next_to_watch = NULL;
- tx_buffer->skb = NULL;
- dma_unmap_len_set(tx_buffer, len, 0);
- /* tx_buffer must be completely set up in the transmit path */
-}
-
static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -1195,7 +1176,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
DMA_TO_DEVICE);
/* clear tx_buffer data */
- tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
/* unmap remaining buffers */
@@ -1549,6 +1529,11 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
}
}
+static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
+}
+
static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *bi)
{
@@ -1567,8 +1552,10 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0,
- ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IXGBE_RX_DMA_ATTR);
/*
* if mapping failed free memory back to system since
@@ -1583,7 +1570,8 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
bi->dma = dma;
bi->page = page;
- bi->page_offset = 0;
+ bi->page_offset = ixgbe_rx_offset(rx_ring);
+ bi->pagecnt_bias = 1;
return true;
}
@@ -1598,6 +1586,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
u16 i = rx_ring->next_to_use;
+ u16 bufsz;
/* nothing to do */
if (!cleaned_count)
@@ -1607,10 +1596,17 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count;
+ bufsz = ixgbe_rx_bufsz(rx_ring);
+
do {
if (!ixgbe_alloc_mapped_page(rx_ring, bi))
break;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset, bufsz,
+ DMA_FROM_DEVICE);
+
/*
* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
@@ -1626,8 +1622,8 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
i -= rx_ring->count;
}
- /* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.upper.status_error = 0;
+ /* clear the length for the next_to_use descriptor */
+ rx_desc->wb.upper.length = 0;
cleaned_count--;
} while (cleaned_count);
@@ -1717,11 +1713,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb)
{
- skb_mark_napi_id(skb, &q_vector->napi);
- if (ixgbe_qv_busy_polling(q_vector))
- netif_receive_skb(skb);
- else
- napi_gro_receive(&q_vector->napi, skb);
+ napi_gro_receive(&q_vector->napi, skb);
}
/**
@@ -1833,19 +1825,19 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
{
/* if the page was released unmap it, else just sync our portion */
if (unlikely(IXGBE_CB(skb)->page_released)) {
- dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
- ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
- IXGBE_CB(skb)->page_released = false;
+ dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IXGBE_RX_DMA_ATTR);
} else {
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
dma_sync_single_range_for_cpu(rx_ring->dev,
IXGBE_CB(skb)->dma,
frag->page_offset,
- ixgbe_rx_bufsz(rx_ring),
+ skb_frag_size(frag),
DMA_FROM_DEVICE);
}
- IXGBE_CB(skb)->dma = 0;
}
/**
@@ -1881,7 +1873,7 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
}
/* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
+ if (!skb_headlen(skb))
ixgbe_pull_tail(rx_ring, skb);
#ifdef IXGBE_FCOE
@@ -1916,14 +1908,14 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
nta++;
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
- /* transfer page from old buffer to new buffer */
- *new_buff = *old_buff;
-
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
- new_buff->page_offset,
- ixgbe_rx_bufsz(rx_ring),
- DMA_FROM_DEVICE);
+ /* Transfer page from old buffer to new buffer.
+ * Move each member individually to avoid possible store
+ * forwarding stalls and unnecessary copy of skb.
+ */
+ new_buff->dma = old_buff->dma;
+ new_buff->page = old_buff->page;
+ new_buff->page_offset = old_buff->page_offset;
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
static inline bool ixgbe_page_is_reserved(struct page *page)
@@ -1931,6 +1923,43 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
+static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
+{
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+ struct page *page = rx_buffer->page;
+
+ /* avoid re-using remote pages */
+ if (unlikely(ixgbe_page_is_reserved(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+ return false;
+#else
+ /* The last offset is a bit aggressive in that we assume the
+ * worst case of FCoE being enabled and using a 3K buffer.
+ * However this should have minimal impact as the 1K extra is
+ * still less than one buffer in size.
+ */
+#define IXGBE_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K)
+ if (rx_buffer->page_offset > IXGBE_LAST_OFFSET)
+ return false;
+#endif
+
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
+ */
+ if (unlikely(!pagecnt_bias)) {
+ page_ref_add(page, USHRT_MAX);
+ rx_buffer->pagecnt_bias = USHRT_MAX;
+ }
+
+ return true;
+}
+
/**
* ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
@@ -1946,144 +1975,172 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
* The function will then update the page offset if necessary and return
* true if the buffer can be reused by the adapter.
**/
-static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
+static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ unsigned int size)
{
- struct page *page = rx_buffer->page;
- unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
- unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
+ unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
- unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
- ixgbe_rx_bufsz(rx_ring);
+ unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
+ SKB_DATA_ALIGN(size);
#endif
-
- if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
-
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
- /* page is not reserved, we can reuse buffer as-is */
- if (likely(!ixgbe_page_is_reserved(page)))
- return true;
-
- /* this page cannot be reused so discard it */
- __free_pages(page, ixgbe_rx_pg_order(rx_ring));
- return false;
- }
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
rx_buffer->page_offset, size, truesize);
-
- /* avoid re-using remote pages */
- if (unlikely(ixgbe_page_is_reserved(page)))
- return false;
-
#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
- return false;
-
- /* flip page offset to other buffer */
rx_buffer->page_offset ^= truesize;
#else
- /* move offset up to the next cache line */
rx_buffer->page_offset += truesize;
-
- if (rx_buffer->page_offset > last_offset)
- return false;
#endif
-
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
- */
- page_ref_inc(page);
-
- return true;
}
-static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
- union ixgbe_adv_rx_desc *rx_desc)
+static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff **skb,
+ const unsigned int size)
{
struct ixgbe_rx_buffer *rx_buffer;
- struct sk_buff *skb;
- struct page *page;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
- page = rx_buffer->page;
- prefetchw(page);
+ prefetchw(rx_buffer->page);
+ *skb = rx_buffer->skb;
- skb = rx_buffer->skb;
+ /* Delay unmapping of the first packet. It carries the header
+ * information, HW may still access the header after the writeback.
+ * Only unmap it when EOP is reached
+ */
+ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) {
+ if (!*skb)
+ goto skip_sync;
+ } else {
+ if (*skb)
+ ixgbe_dma_sync_frag(rx_ring, *skb);
+ }
- if (likely(!skb)) {
- void *page_addr = page_address(page) +
- rx_buffer->page_offset;
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+skip_sync:
+ rx_buffer->pagecnt_bias--;
- /* prefetch first cache line of first page */
- prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
-#endif
+ return rx_buffer;
+}
- /* allocate a skb to store the frags */
- skb = napi_alloc_skb(&rx_ring->q_vector->napi,
- IXGBE_RX_HDR_SIZE);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_rx_buff_failed++;
- return NULL;
+static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *rx_buffer,
+ struct sk_buff *skb)
+{
+ if (ixgbe_can_reuse_rx_page(rx_buffer)) {
+ /* hand second half of page back to the ring */
+ ixgbe_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
+ /* the page has been released from the ring */
+ IXGBE_CB(skb)->page_released = true;
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IXGBE_RX_DMA_ATTR);
}
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
+ }
- /*
- * we will be copying header into skb->data in
- * pskb_may_pull so it is in our interest to prefetch
- * it now to avoid a possible cache miss
- */
- prefetchw(skb->data);
+ /* clear contents of rx_buffer */
+ rx_buffer->page = NULL;
+ rx_buffer->skb = NULL;
+}
- /*
- * Delay unmapping of the first packet. It carries the
- * header information, HW may still access the header
- * after the writeback. Only unmap it when EOP is
- * reached
- */
- if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
- goto dma_sync;
+static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *rx_buffer,
+ union ixgbe_adv_rx_desc *rx_desc,
+ unsigned int size)
+{
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+ struct sk_buff *skb;
- IXGBE_CB(skb)->dma = rx_buffer->dma;
- } else {
- if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
- ixgbe_dma_sync_frag(rx_ring, skb);
+ /* prefetch first cache line of first page */
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
-dma_sync:
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- ixgbe_rx_bufsz(rx_ring),
- DMA_FROM_DEVICE);
+ /* allocate a skb to store the frags */
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
+ if (unlikely(!skb))
+ return NULL;
- rx_buffer->skb = NULL;
- }
+ if (size > IXGBE_RX_HDR_SIZE) {
+ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
+ IXGBE_CB(skb)->dma = rx_buffer->dma;
- /* pull page into skb */
- if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
- /* hand second half of page back to the ring */
- ixgbe_reuse_rx_page(rx_ring, rx_buffer);
- } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
- /* the page has been released from the ring */
- IXGBE_CB(skb)->page_released = true;
+ skb_add_rx_frag(skb, 0, rx_buffer->page,
+ rx_buffer->page_offset,
+ size, truesize);
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
} else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma,
- ixgbe_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE);
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+ rx_buffer->pagecnt_bias++;
}
- /* clear contents of buffer_info */
- rx_buffer->page = NULL;
+ return skb;
+}
+
+static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *rx_buffer,
+ union ixgbe_adv_rx_desc *rx_desc,
+ unsigned int size)
+{
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size);
+#endif
+ struct sk_buff *skb;
+
+ /* prefetch first cache line of first page */
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
+
+ /* build an skb to around the page buffer */
+ skb = build_skb(va - IXGBE_SKB_PAD, truesize);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, IXGBE_SKB_PAD);
+ __skb_put(skb, size);
+
+ /* record DMA address if this is the start of a chain of buffers */
+ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
+ IXGBE_CB(skb)->dma = rx_buffer->dma;
+
+ /* update buffer offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
return skb;
}
@@ -2115,7 +2172,9 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbe_rx_buffer *rx_buffer;
struct sk_buff *skb;
+ unsigned int size;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
@@ -2124,8 +2183,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
-
- if (!rx_desc->wb.upper.status_error)
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+ if (!size)
break;
/* This memory barrier is needed to keep us from reading
@@ -2134,13 +2193,26 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
*/
dma_rmb();
+ rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
+
/* retrieve a buffer from the ring */
- skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
+ if (skb)
+ ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
+ else if (ring_uses_build_skb(rx_ring))
+ skb = ixgbe_build_skb(rx_ring, rx_buffer,
+ rx_desc, size);
+ else
+ skb = ixgbe_construct_skb(rx_ring, rx_buffer,
+ rx_desc, size);
/* exit if we failed to retrieve a buffer */
- if (!skb)
+ if (!skb) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ rx_buffer->pagecnt_bias++;
break;
+ }
+ ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
cleaned_count++;
/* place incomplete frames back on ring for completion */
@@ -2198,40 +2270,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
return total_rx_packets;
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
-/* must be called with local_bh_disable()d */
-static int ixgbe_low_latency_recv(struct napi_struct *napi)
-{
- struct ixgbe_q_vector *q_vector =
- container_of(napi, struct ixgbe_q_vector, napi);
- struct ixgbe_adapter *adapter = q_vector->adapter;
- struct ixgbe_ring *ring;
- int found = 0;
-
- if (test_bit(__IXGBE_DOWN, &adapter->state))
- return LL_FLUSH_FAILED;
-
- if (!ixgbe_qv_lock_poll(q_vector))
- return LL_FLUSH_BUSY;
-
- ixgbe_for_each_ring(ring, q_vector->rx) {
- found = ixgbe_clean_rx_irq(q_vector, ring, 4);
-#ifdef BP_EXTENDED_STATS
- if (found)
- ring->stats.cleaned += found;
- else
- ring->stats.misses++;
-#endif
- if (found)
- break;
- }
-
- ixgbe_qv_unlock_poll(q_vector);
-
- return found;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
/**
* ixgbe_configure_msix - Configure MSI-X hardware
* @adapter: board private structure
@@ -2447,6 +2485,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 eicr = adapter->interrupt_event;
+ s32 rc;
if (test_bit(__IXGBE_DOWN, &adapter->state))
return;
@@ -2485,6 +2524,12 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
return;
break;
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ rc = hw->phy.ops.check_overtemp(hw);
+ if (rc != IXGBE_ERR_OVERTEMP)
+ return;
+ break;
default:
if (adapter->hw.mac.type >= ixgbe_mac_X540)
return;
@@ -2531,6 +2576,18 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
return;
}
return;
+ case ixgbe_mac_x550em_a:
+ if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) {
+ adapter->interrupt_event = eicr;
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
+ ixgbe_service_event_schedule(adapter);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
+ IXGBE_EICR_GPI_SDP0_X550EM_a);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR,
+ IXGBE_EICR_GPI_SDP0_X550EM_a);
+ }
+ return;
+ case ixgbe_mac_X550:
case ixgbe_mac_X540:
if (!(eicr & IXGBE_EICR_TS))
return;
@@ -2856,8 +2913,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
clean_complete = false;
}
- /* Exit if we are called by netpoll or busy polling is active */
- if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
+ /* Exit if we are called by netpoll */
+ if (budget <= 0)
return budget;
/* attempt to distribute budget to each queue fairly, but don't allow
@@ -2876,7 +2933,6 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
clean_complete = false;
}
- ixgbe_qv_unlock_napi(q_vector);
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return budget;
@@ -3214,6 +3270,10 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
+ /* reinitialize tx_buffer_info */
+ memset(ring->tx_buffer_info, 0,
+ sizeof(struct ixgbe_tx_buffer) * ring->count);
+
/* enable queue */
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
@@ -3384,7 +3444,10 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
/* configure the packet buffer length */
- srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state))
+ srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
/* configure descriptor type */
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -3685,6 +3748,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
+ union ixgbe_adv_rx_desc *rx_desc;
u64 rdba = ring->dma;
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
@@ -3717,8 +3781,27 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
*/
rxdctl &= ~0x3FFFFF;
rxdctl |= 0x080420;
+#if (PAGE_SIZE < 8192)
+ } else {
+ rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
+ IXGBE_RXDCTL_RLPML_EN);
+
+ /* Limit the maximum frame size so we don't overrun the skb */
+ if (ring_uses_build_skb(ring) &&
+ !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
+ rxdctl |= IXGBE_MAX_FRAME_BUILD_SKB |
+ IXGBE_RXDCTL_RLPML_EN;
+#endif
}
+ /* initialize rx_buffer_info */
+ memset(ring->rx_buffer_info, 0,
+ sizeof(struct ixgbe_rx_buffer) * ring->count);
+
+ /* initialize Rx descriptor 0 */
+ rx_desc = IXGBE_RX_DESC(ring, 0);
+ rx_desc->wb.upper.length = 0;
+
/* enable receive descriptor ring */
rxdctl |= IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
@@ -3855,10 +3938,30 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
rx_ring = adapter->rx_ring[i];
+
+ clear_ring_rsc_enabled(rx_ring);
+ clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+ clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
set_ring_rsc_enabled(rx_ring);
- else
- clear_ring_rsc_enabled(rx_ring);
+
+ if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
+ set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+
+ clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+ if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
+ continue;
+
+ set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+
+#if (PAGE_SIZE < 8192)
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+ set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+
+ if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))
+ set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+#endif
}
}
@@ -4559,23 +4662,16 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
{
int q_idx;
- for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
- ixgbe_qv_init_lock(adapter->q_vector[q_idx]);
+ for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
napi_enable(&adapter->q_vector[q_idx]->napi);
- }
}
static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
{
int q_idx;
- for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
+ for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
napi_disable(&adapter->q_vector[q_idx]->napi);
- while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
- pr_info("QV %d locked\n", q_idx);
- usleep_range(1000, 20000);
- }
- }
}
static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
@@ -4879,45 +4975,47 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
**/
static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
- unsigned long size;
- u16 i;
-
- /* ring already cleared, nothing to do */
- if (!rx_ring->rx_buffer_info)
- return;
+ u16 i = rx_ring->next_to_clean;
+ struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
/* Free all the Rx ring sk_buffs */
- for (i = 0; i < rx_ring->count; i++) {
- struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
-
+ while (i != rx_ring->next_to_alloc) {
if (rx_buffer->skb) {
struct sk_buff *skb = rx_buffer->skb;
if (IXGBE_CB(skb)->page_released)
- dma_unmap_page(dev,
- IXGBE_CB(skb)->dma,
- ixgbe_rx_bufsz(rx_ring),
- DMA_FROM_DEVICE);
+ dma_unmap_page_attrs(rx_ring->dev,
+ IXGBE_CB(skb)->dma,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IXGBE_RX_DMA_ATTR);
dev_kfree_skb(skb);
- rx_buffer->skb = NULL;
}
- if (!rx_buffer->page)
- continue;
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ ixgbe_rx_bufsz(rx_ring),
+ DMA_FROM_DEVICE);
- dma_unmap_page(dev, rx_buffer->dma,
- ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
- __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring));
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IXGBE_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
- rx_buffer->page = NULL;
+ i++;
+ rx_buffer++;
+ if (i == rx_ring->count) {
+ i = 0;
+ rx_buffer = rx_ring->rx_buffer_info;
+ }
}
- size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
- memset(rx_ring->rx_buffer_info, 0, size);
-
- /* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
-
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -5294,6 +5392,8 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
usleep_range(1000, 2000);
+ if (adapter->hw.phy.type == ixgbe_phy_fw)
+ ixgbe_watchdog_link_is_down(adapter);
ixgbe_down(adapter);
/*
* If SR-IOV enabled then wait a bit before bringing the adapter
@@ -5384,28 +5484,57 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
**/
static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
{
- struct ixgbe_tx_buffer *tx_buffer_info;
- unsigned long size;
- u16 i;
+ u16 i = tx_ring->next_to_clean;
+ struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
- /* ring already cleared, nothing to do */
- if (!tx_ring->tx_buffer_info)
- return;
+ while (i != tx_ring->next_to_use) {
+ union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
- /* Free all the Tx ring sk_buffs */
- for (i = 0; i < tx_ring->count; i++) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
- }
+ /* Free all the Tx ring sk_buffs */
+ dev_kfree_skb_any(tx_buffer->skb);
- netdev_tx_reset_queue(txring_txq(tx_ring));
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
- size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
- memset(tx_ring->tx_buffer_info, 0, size);
+ /* check for eop_desc to determine the end of the packet */
+ eop_desc = tx_buffer->next_to_watch;
+ tx_desc = IXGBE_TX_DESC(tx_ring, i);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IXGBE_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
- /* Zero out the descriptor ring */
- memset(tx_ring->desc, 0, tx_ring->size);
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ }
+ }
+
+ /* reset BQL for queue */
+ netdev_tx_reset_queue(txring_txq(tx_ring));
+ /* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
}
@@ -5554,6 +5683,31 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_eee_capable - helper function to determine EEE support on X550
+ * @adapter: board private structure
+ */
+static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ if (!hw->phy.eee_speeds_supported)
+ break;
+ adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE;
+ if (!hw->phy.eee_speeds_advertised)
+ break;
+ adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
+ break;
+ default:
+ adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE;
+ adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
+ break;
+ }
+}
+
+/**
* ixgbe_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
@@ -5717,6 +5871,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
break;
case ixgbe_mac_x550em_a:
adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE;
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
+ break;
+ default:
+ break;
+ }
/* fall through */
case ixgbe_mac_X550EM_x:
#ifdef CONFIG_IXGBE_DCB
@@ -5730,6 +5892,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
#endif /* IXGBE_FCOE */
/* Fall Through */
case ixgbe_mac_X550:
+ if (hw->mac.type == ixgbe_mac_X550)
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
#ifdef CONFIG_IXGBE_DCA
adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
#endif
@@ -5816,9 +5980,9 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
if (tx_ring->q_vector)
ring_node = tx_ring->q_vector->numa_node;
- tx_ring->tx_buffer_info = vzalloc_node(size, ring_node);
+ tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
if (!tx_ring->tx_buffer_info)
- tx_ring->tx_buffer_info = vzalloc(size);
+ tx_ring->tx_buffer_info = vmalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
@@ -5900,9 +6064,9 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
if (rx_ring->q_vector)
ring_node = rx_ring->q_vector->numa_node;
- rx_ring->rx_buffer_info = vzalloc_node(size, ring_node);
+ rx_ring->rx_buffer_info = vmalloc_node(size, ring_node);
if (!rx_ring->rx_buffer_info)
- rx_ring->rx_buffer_info = vzalloc(size);
+ rx_ring->rx_buffer_info = vmalloc(size);
if (!rx_ring->rx_buffer_info)
goto err;
@@ -6200,7 +6364,8 @@ int ixgbe_close(struct net_device *netdev)
ixgbe_ptp_stop(adapter);
- ixgbe_close_suspend(adapter);
+ if (netif_device_present(netdev))
+ ixgbe_close_suspend(adapter);
ixgbe_fdir_filter_exit(adapter);
@@ -6245,14 +6410,12 @@ static int ixgbe_resume(struct pci_dev *pdev)
if (!err && netif_running(netdev))
err = ixgbe_open(netdev);
- rtnl_unlock();
- if (err)
- return err;
-
- netif_device_attach(netdev);
+ if (!err)
+ netif_device_attach(netdev);
+ rtnl_unlock();
- return 0;
+ return err;
}
#endif /* CONFIG_PM */
@@ -6267,14 +6430,14 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
int retval = 0;
#endif
+ rtnl_lock();
netif_device_detach(netdev);
- rtnl_lock();
if (netif_running(netdev))
ixgbe_close_suspend(adapter);
- rtnl_unlock();
ixgbe_clear_interrupt_scheme(adapter);
+ rtnl_unlock();
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
@@ -6808,6 +6971,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
case IXGBE_LINK_SPEED_100_FULL:
speed_str = "100 Mbps";
break;
+ case IXGBE_LINK_SPEED_10_FULL:
+ speed_str = "10 Mbps";
+ break;
default:
speed_str = "unknown speed";
break;
@@ -7615,18 +7781,32 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
return;
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
+ tx_buffer = &tx_ring->tx_buffer_info[i];
/* clear dma mappings for failed tx_buffer_info map */
- for (;;) {
+ while (tx_buffer != first) {
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ if (i--)
+ i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i];
- ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
- if (tx_buffer == first)
- break;
- if (i == 0)
- i = tx_ring->count;
- i--;
}
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
+
tx_ring->next_to_use = i;
}
@@ -8111,8 +8291,9 @@ static void ixgbe_netpoll(struct net_device *netdev)
}
#endif
-static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+
+static void ixgbe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int i;
@@ -8150,13 +8331,13 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
}
}
rcu_read_unlock();
+
/* following stats updated by ixgbe_watchdog_task() */
stats->multicast = netdev->stats.multicast;
stats->rx_errors = netdev->stats.rx_errors;
stats->rx_length_errors = netdev->stats.rx_length_errors;
stats->rx_crc_errors = netdev->stats.rx_crc_errors;
stats->rx_missed_errors = netdev->stats.rx_missed_errors;
- return stats;
}
#ifdef CONFIG_IXGBE_DCB
@@ -9290,9 +9471,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbe_netpoll,
#endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = ixgbe_low_latency_recv,
-#endif
#ifdef IXGBE_FCOE
.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
.ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
@@ -9596,6 +9774,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->phy.reset_if_overtemp = true;
err = hw->mac.ops.reset_hw(hw);
hw->phy.reset_if_overtemp = false;
+ ixgbe_set_eee_capable(adapter);
if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
err = 0;
} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
@@ -9673,7 +9852,7 @@ skip_sriov:
#ifdef CONFIG_IXGBE_DCB
if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
- netdev->dcbnl_ops = &dcbnl_ops;
+ netdev->dcbnl_ops = &ixgbe_dcbnl_ops;
#endif
#ifdef IXGBE_FCOE
@@ -9833,8 +10012,9 @@ skip_sriov:
* since os does not support feature
*/
if (hw->mac.ops.set_fw_drv_ver)
- hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF,
- 0xFF);
+ hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
+ sizeof(ixgbe_driver_version) - 1,
+ ixgbe_driver_version);
/* add san mac addr to netdev */
ixgbe_add_sanmac_netdev(netdev);
@@ -10082,7 +10262,7 @@ skip_bad_vf_detection:
}
if (netif_running(netdev))
- ixgbe_down(adapter);
+ ixgbe_close_suspend(adapter);
if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
pci_disable_device(pdev);
@@ -10152,10 +10332,12 @@ static void ixgbe_io_resume(struct pci_dev *pdev)
}
#endif
+ rtnl_lock();
if (netif_running(netdev))
- ixgbe_up(adapter);
+ ixgbe_open(netdev);
netif_device_attach(netdev);
+ rtnl_unlock();
}
static const struct pci_error_handlers ixgbe_err_handler = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 01c2667c0f92..811cb4f64a5b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -74,6 +74,7 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
+ ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 3b8362085f57..e55b2602f371 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -113,7 +113,7 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- int max_retry = 10;
+ int max_retry = 3;
int retry = 0;
u8 csum_byte;
u8 high_bits;
@@ -452,10 +452,27 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
*/
for (i = 0; i < 30; i++) {
msleep(100);
- hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &ctrl);
- if (!(ctrl & MDIO_CTRL1_RESET)) {
- udelay(2);
- break;
+ if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_TX_VENDOR_ALARMS_3,
+ MDIO_MMD_PMAPMD, &ctrl);
+ if (status)
+ return status;
+
+ if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
+ udelay(2);
+ break;
+ }
+ } else {
+ status = hw->phy.ops.read_reg(hw, MDIO_CTRL1,
+ MDIO_MMD_PHYXS, &ctrl);
+ if (status)
+ return status;
+
+ if (!(ctrl & MDIO_CTRL1_RESET)) {
+ udelay(2);
+ break;
+ }
}
}
@@ -751,9 +768,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
-
- /*
- * Clear autoneg_advertised and set new values based on input link
+ /* Clear autoneg_advertised and set new values based on input link
* speed.
*/
hw->phy.autoneg_advertised = 0;
@@ -761,12 +776,21 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
if (speed & IXGBE_LINK_SPEED_10GB_FULL)
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (speed & IXGBE_LINK_SPEED_5GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_2_5GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
+
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
if (speed & IXGBE_LINK_SPEED_100_FULL)
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+ if (speed & IXGBE_LINK_SPEED_10_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL;
+
/* Setup link based on the new speed settings */
hw->phy.ops.setup_link(hw);
@@ -960,40 +984,6 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
}
/**
- * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
- * @hw: pointer to hardware structure
- * @firmware_version: pointer to the PHY Firmware Version
- **/
-s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
- u16 *firmware_version)
-{
- s32 status;
-
- status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
- MDIO_MMD_VEND1,
- firmware_version);
-
- return status;
-}
-
-/**
- * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
- * @hw: pointer to hardware structure
- * @firmware_version: pointer to the PHY Firmware Version
- **/
-s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
- u16 *firmware_version)
-{
- s32 status;
-
- status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
- MDIO_MMD_VEND1,
- firmware_version);
-
- return status;
-}
-
-/**
* ixgbe_reset_phy_nl - Performs a PHY reset
* @hw: pointer to hardware structure
**/
@@ -1738,6 +1728,8 @@ static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
u32 swfw_mask = hw->phy.phy_semaphore_mask;
bool nack = true;
+ if (hw->mac.type >= ixgbe_mac_X550)
+ max_retry = 3;
if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr))
max_retry = IXGBE_SFP_DETECT_RETRIES;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index ecf05f838fc5..5aa2c3cf7aec 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -168,10 +168,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up);
s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
-s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
- u16 *firmware_version);
-s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
- u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 1efb404431e9..ef0635e0918c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -858,14 +858,14 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG;
- adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
- adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
@@ -879,8 +879,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
is_l2 = true;
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_ALL:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 7e5d9850e4b2..044cb44747cf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -512,6 +512,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
switch (adapter->vfinfo[vf].vf_api) {
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
+ case ixgbe_mbox_api_13:
/*
* Version 1.1 supports jumbo frames on VFs if PF has
* jumbo frames enabled which means legacy VFs are
@@ -934,7 +935,8 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
IXGBE_VT_MSGINFO_SHIFT;
int err;
- if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
+ if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
+ index > 0) {
e_warn(drv,
"VF %d requested MACVLAN filter but is administratively denied\n",
vf);
@@ -978,6 +980,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
case ixgbe_mbox_api_10:
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
+ case ixgbe_mbox_api_13:
adapter->vfinfo[vf].vf_api = api;
return 0;
default:
@@ -1002,6 +1005,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
case ixgbe_mbox_api_20:
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
+ case ixgbe_mbox_api_13:
break;
default:
return -1;
@@ -1041,8 +1045,13 @@ static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
return -EPERM;
/* verify the PF is supporting the correct API */
- if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12)
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_12:
+ break;
+ default:
return -EOPNOTSUPP;
+ }
/* This mailbox command is supported (required) only for 82599 and x540
* VFs which support up to 4 RSS queues. Therefore we will compress the
@@ -1068,8 +1077,13 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
return -EPERM;
/* verify the PF is supporting the correct API */
- if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12)
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_12:
+ break;
+ default:
return -EOPNOTSUPP;
+ }
memcpy(rss_key, adapter->rss_key, sizeof(adapter->rss_key));
@@ -1081,11 +1095,16 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
{
struct ixgbe_hw *hw = &adapter->hw;
int xcast_mode = msgbuf[1];
- u32 vmolr, disable, enable;
+ u32 vmolr, fctrl, disable, enable;
/* verify the PF is supporting the correct APIs */
switch (adapter->vfinfo[vf].vf_api) {
case ixgbe_mbox_api_12:
+ /* promisc introduced in 1.3 version */
+ if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
+ return -EOPNOTSUPP;
+ /* Fall threw */
+ case ixgbe_mbox_api_13:
break;
default:
return -EOPNOTSUPP;
@@ -1101,17 +1120,34 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
switch (xcast_mode) {
case IXGBEVF_XCAST_MODE_NONE:
- disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
+ disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
+ IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
enable = 0;
break;
case IXGBEVF_XCAST_MODE_MULTI:
- disable = IXGBE_VMOLR_MPE;
+ disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE;
break;
case IXGBEVF_XCAST_MODE_ALLMULTI:
- disable = 0;
+ disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
break;
+ case IXGBEVF_XCAST_MODE_PROMISC:
+ if (hw->mac.type <= ixgbe_mac_82599EB)
+ return -EOPNOTSUPP;
+
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ if (!(fctrl & IXGBE_FCTRL_UPE)) {
+ /* VF promisc requires PF in promisc */
+ e_warn(drv,
+ "Enabling VF promisc requires PF in promisc\n");
+ return -EPERM;
+ }
+
+ disable = 0;
+ enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
+ IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
+ break;
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index cf21273db201..1d07f2ead914 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -92,6 +92,8 @@
#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7
#define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8
#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE
+#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4
+#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5
/* VF Device IDs */
#define IXGBE_DEV_ID_82599_VF 0x10ED
@@ -1499,6 +1501,8 @@ enum {
#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT)
/* VMOLR bitmasks */
+#define IXGBE_VMOLR_UPE 0x00400000 /* unicast promiscuous */
+#define IXGBE_VMOLR_VPE 0x00800000 /* VLAN promiscuous */
#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */
#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */
#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */
@@ -1914,6 +1918,7 @@ enum {
#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
#define IXGBE_LINKS_SPEED_100_82599 0x10000000
+#define IXGBE_LINKS_SPEED_10_X550EM_A 0
#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
@@ -2619,6 +2624,7 @@ enum ixgbe_fdir_pballoc_type {
#define FW_CEM_UNUSED_VER 0x0
#define FW_CEM_MAX_RETRIES 3
#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+#define FW_CEM_DRIVER_VERSION_SIZE 39 /* +9 would send 48 bytes to fw */
#define FW_READ_SHADOW_RAM_CMD 0x31
#define FW_READ_SHADOW_RAM_LEN 0x6
#define FW_WRITE_SHADOW_RAM_CMD 0x33
@@ -2644,6 +2650,59 @@ enum ixgbe_fdir_pballoc_type {
#define FW_INT_PHY_REQ_LEN 10
#define FW_INT_PHY_REQ_READ 0
#define FW_INT_PHY_REQ_WRITE 1
+#define FW_PHY_ACT_REQ_CMD 5
+#define FW_PHY_ACT_DATA_COUNT 4
+#define FW_PHY_ACT_REQ_LEN (4 + 4 * FW_PHY_ACT_DATA_COUNT)
+#define FW_PHY_ACT_INIT_PHY 1
+#define FW_PHY_ACT_SETUP_LINK 2
+#define FW_PHY_ACT_LINK_SPEED_10 BIT(0)
+#define FW_PHY_ACT_LINK_SPEED_100 BIT(1)
+#define FW_PHY_ACT_LINK_SPEED_1G BIT(2)
+#define FW_PHY_ACT_LINK_SPEED_2_5G BIT(3)
+#define FW_PHY_ACT_LINK_SPEED_5G BIT(4)
+#define FW_PHY_ACT_LINK_SPEED_10G BIT(5)
+#define FW_PHY_ACT_LINK_SPEED_20G BIT(6)
+#define FW_PHY_ACT_LINK_SPEED_25G BIT(7)
+#define FW_PHY_ACT_LINK_SPEED_40G BIT(8)
+#define FW_PHY_ACT_LINK_SPEED_50G BIT(9)
+#define FW_PHY_ACT_LINK_SPEED_100G BIT(10)
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3 << \
+ HW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT)
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u
+#define FW_PHY_ACT_SETUP_LINK_LP BIT(18)
+#define FW_PHY_ACT_SETUP_LINK_HP BIT(19)
+#define FW_PHY_ACT_SETUP_LINK_EEE BIT(20)
+#define FW_PHY_ACT_SETUP_LINK_AN BIT(22)
+#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN BIT(0)
+#define FW_PHY_ACT_GET_LINK_INFO 3
+#define FW_PHY_ACT_GET_LINK_INFO_EEE BIT(19)
+#define FW_PHY_ACT_GET_LINK_INFO_FC_TX BIT(20)
+#define FW_PHY_ACT_GET_LINK_INFO_FC_RX BIT(21)
+#define FW_PHY_ACT_GET_LINK_INFO_POWER BIT(22)
+#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE BIT(24)
+#define FW_PHY_ACT_GET_LINK_INFO_TEMP BIT(25)
+#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX BIT(28)
+#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX BIT(29)
+#define FW_PHY_ACT_FORCE_LINK_DOWN 4
+#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF BIT(0)
+#define FW_PHY_ACT_PHY_SW_RESET 5
+#define FW_PHY_ACT_PHY_HW_RESET 6
+#define FW_PHY_ACT_GET_PHY_INFO 7
+#define FW_PHY_ACT_UD_2 0x1002
+#define FW_PHY_ACT_UD_2_10G_KR_EEE BIT(6)
+#define FW_PHY_ACT_UD_2_10G_KX4_EEE BIT(5)
+#define FW_PHY_ACT_UD_2_1G_KX_EEE BIT(4)
+#define FW_PHY_ACT_UD_2_10G_T_EEE BIT(3)
+#define FW_PHY_ACT_UD_2_1G_T_EEE BIT(2)
+#define FW_PHY_ACT_UD_2_100M_TX_EEE BIT(1)
+#define FW_PHY_ACT_RETRIES 50
+#define FW_PHY_INFO_SPEED_MASK 0xFFFu
+#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u
+#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu
/* Host Interface Command Structures */
struct ixgbe_hic_hdr {
@@ -2686,6 +2745,16 @@ struct ixgbe_hic_drv_info {
u16 pad2; /* end spacing to ensure length is mult. of dword2 */
};
+struct ixgbe_hic_drv_info2 {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_num;
+ u8 ver_sub;
+ u8 ver_build;
+ u8 ver_min;
+ u8 ver_maj;
+ char driver_string[FW_CEM_DRIVER_VERSION_SIZE];
+};
+
/* These need to be dword aligned */
struct ixgbe_hic_read_shadow_ram {
union ixgbe_hic_hdr2 hdr;
@@ -2734,6 +2803,19 @@ struct ixgbe_hic_internal_phy_resp {
__be32 read_data;
};
+struct ixgbe_hic_phy_activity_req {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 pad;
+ __le16 activity_id;
+ __be32 data[FW_PHY_ACT_DATA_COUNT];
+};
+
+struct ixgbe_hic_phy_activity_resp {
+ struct ixgbe_hic_hdr hdr;
+ __be32 data[FW_PHY_ACT_DATA_COUNT];
+};
+
/* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc {
struct {
@@ -2849,6 +2931,7 @@ typedef u32 ixgbe_autoneg_advertised;
/* Link speed */
typedef u32 ixgbe_link_speed;
#define IXGBE_LINK_SPEED_UNKNOWN 0
+#define IXGBE_LINK_SPEED_10_FULL 0x0002
#define IXGBE_LINK_SPEED_100_FULL 0x0008
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400
@@ -3064,6 +3147,7 @@ enum ixgbe_phy_type {
ixgbe_phy_qsfp_unknown,
ixgbe_phy_sfp_unsupported,
ixgbe_phy_sgmii,
+ ixgbe_phy_fw,
ixgbe_phy_generic
};
@@ -3362,7 +3446,8 @@ struct ixgbe_mac_operations {
void (*fc_autoneg)(struct ixgbe_hw *);
/* Manageability interface */
- s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
+ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16,
+ const char *);
s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
void (*disable_rx)(struct ixgbe_hw *hw);
@@ -3392,7 +3477,6 @@ struct ixgbe_phy_operations {
s32 (*setup_internal_link)(struct ixgbe_hw *);
s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
- s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
@@ -3478,6 +3562,8 @@ struct ixgbe_phy_info {
bool reset_disable;
ixgbe_autoneg_advertised autoneg_advertised;
ixgbe_link_speed speeds_supported;
+ ixgbe_link_speed eee_speeds_supported;
+ ixgbe_link_speed eee_speeds_advertised;
enum ixgbe_smart_speed smart_speed;
bool smart_speed_active;
bool multispeed_fiber;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index e2ff823ee202..84a467a8ed3d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -780,8 +780,10 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
ixgbe_link_speed speed;
bool link_up;
- /*
- * Link should be up in order for the blink bit in the LED control
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
+ /* Link should be up in order for the blink bit in the LED control
* register to work. Force link and speed in the MAC if link is down.
* This will be reversed when we stop the blinking.
*/
@@ -814,6 +816,9 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
u32 macc_reg;
u32 ledctl_reg;
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
/* Restore the LED to its default value. */
ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
@@ -913,7 +918,6 @@ static const struct ixgbe_phy_operations phy_ops_X540 = {
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
.check_overtemp = &ixgbe_tn_check_overtemp,
.set_phy_power = &ixgbe_set_copper_phy_power,
- .get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
};
static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 11fb433eb924..200f847fd8f3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -63,6 +63,18 @@ static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
return 0;
}
+static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw)
+{
+ struct ixgbe_phy_info *phy = &hw->phy;
+
+ /* Start with X540 invariants, since so similar */
+ ixgbe_get_invariants_X540(hw);
+
+ phy->ops.set_phy_power = NULL;
+
+ return 0;
+}
+
/** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
* @hw: pointer to hardware structure
**/
@@ -402,6 +414,204 @@ ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false);
}
+/**
+ * ixgbe_fw_phy_activity - Perform an activity on a PHY
+ * @hw: pointer to hardware structure
+ * @activity: activity to perform
+ * @data: Pointer to 4 32-bit words of data
+ */
+s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+ u32 (*data)[FW_PHY_ACT_DATA_COUNT])
+{
+ union {
+ struct ixgbe_hic_phy_activity_req cmd;
+ struct ixgbe_hic_phy_activity_resp rsp;
+ } hic;
+ u16 retries = FW_PHY_ACT_RETRIES;
+ s32 rc;
+ u32 i;
+
+ do {
+ memset(&hic, 0, sizeof(hic));
+ hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
+ hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
+ hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ hic.cmd.port_number = hw->bus.lan_id;
+ hic.cmd.activity_id = cpu_to_le16(activity);
+ for (i = 0; i < ARRAY_SIZE(hic.cmd.data); ++i)
+ hic.cmd.data[i] = cpu_to_be32((*data)[i]);
+
+ rc = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (rc)
+ return rc;
+ if (hic.rsp.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS) {
+ for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
+ (*data)[i] = be32_to_cpu(hic.rsp.data[i]);
+ return 0;
+ }
+ usleep_range(20, 30);
+ --retries;
+ } while (retries > 0);
+
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+}
+
+static const struct {
+ u16 fw_speed;
+ ixgbe_link_speed phy_speed;
+} ixgbe_fw_map[] = {
+ { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
+ { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
+ { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
+ { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
+ { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
+ { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
+};
+
+/**
+ * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
+{
+ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ u16 phy_speeds;
+ u16 phy_id_lo;
+ s32 rc;
+ u16 i;
+
+ if (hw->phy.id)
+ return 0;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
+ if (rc)
+ return rc;
+
+ hw->phy.speeds_supported = 0;
+ phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
+ for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) {
+ if (phy_speeds & ixgbe_fw_map[i].fw_speed)
+ hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
+ }
+
+ hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
+ phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
+ hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
+ hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
+ if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
+ return IXGBE_ERR_PHY_ADDR_INVALID;
+
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+ hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
+ return 0;
+}
+
+/**
+ * ixgbe_identify_phy_fw - Get PHY type based on firmware command
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
+{
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+
+ hw->phy.type = ixgbe_phy_fw;
+ hw->phy.ops.read_reg = NULL;
+ hw->phy.ops.write_reg = NULL;
+ return ixgbe_get_phy_id_fw(hw);
+}
+
+/**
+ * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
+{
+ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
+
+ setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
+ return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
+}
+
+/**
+ * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
+{
+ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ s32 rc;
+ u16 i;
+
+ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
+ return 0;
+
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ hw_err(hw, "rx_pause not valid in strict IEEE mode\n");
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
+
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_full:
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+ break;
+ case ixgbe_fc_rx_pause:
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+ break;
+ case ixgbe_fc_tx_pause:
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) {
+ if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
+ setup[0] |= ixgbe_fw_map[i].fw_speed;
+ }
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
+
+ if (hw->phy.eee_speeds_advertised)
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
+ if (rc)
+ return rc;
+ if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
+ return IXGBE_ERR_OVERTEMP;
+ return 0;
+}
+
+/**
+ * ixgbe_fc_autoneg_fw - Set up flow control for FW-controlled PHYs
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ */
+static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
+{
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ return ixgbe_setup_fw_link(hw);
+}
+
/** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
* @hw: pointer to hardware structure
*
@@ -624,41 +834,6 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
return status;
}
-/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface
- * command assuming that the semaphore is already obtained.
- * @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to read
- * @data: word read from the EEPROM
- *
- * Reads a 16 bit word from the EEPROM using the hostif.
- **/
-static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
- u16 *data)
-{
- s32 status;
- struct ixgbe_hic_read_shadow_ram buffer;
-
- buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
- buffer.hdr.req.buf_lenh = 0;
- buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
- buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
-
- /* convert offset from words to bytes */
- buffer.address = cpu_to_be32(offset * 2);
- /* one word */
- buffer.length = cpu_to_be16(sizeof(u16));
-
- status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
- IXGBE_HI_COMMAND_TIMEOUT, false);
- if (status)
- return status;
-
- *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
- FW_NVM_DATA_OFFSET);
-
- return 0;
-}
-
/** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to read
@@ -670,6 +845,7 @@ static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data)
{
+ const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
struct ixgbe_hic_read_shadow_ram buffer;
u32 current_word = 0;
u16 words_to_read;
@@ -677,7 +853,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
u32 i;
/* Take semaphore for the entire operation. */
- status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ status = hw->mac.ops.acquire_swfw_sync(hw, mask);
if (status) {
hw_dbg(hw, "EEPROM read buffer - semaphore failed\n");
return status;
@@ -698,10 +874,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
buffer.address = cpu_to_be32((offset + current_word) * 2);
buffer.length = cpu_to_be16(words_to_read * 2);
- status = ixgbe_host_interface_command(hw, &buffer,
- sizeof(buffer),
- IXGBE_HI_COMMAND_TIMEOUT,
- false);
+ status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT);
if (status) {
hw_dbg(hw, "Host interface command failed\n");
goto out;
@@ -725,7 +899,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
}
out:
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ hw->mac.ops.release_swfw_sync(hw, mask);
return status;
}
@@ -896,15 +1070,32 @@ static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
**/
static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
- s32 status = 0;
+ const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
+ struct ixgbe_hic_read_shadow_ram buffer;
+ s32 status;
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
- status = ixgbe_read_ee_hostif_data_X550(hw, offset, data);
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
- } else {
- status = IXGBE_ERR_SWFW_SYNC;
+ buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
+ buffer.hdr.req.buf_lenh = 0;
+ buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
+ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+ /* convert offset from words to bytes */
+ buffer.address = cpu_to_be32(offset * 2);
+ /* one word */
+ buffer.length = cpu_to_be16(sizeof(u16));
+
+ status = hw->mac.ops.acquire_swfw_sync(hw, mask);
+ if (status)
+ return status;
+
+ status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT);
+ if (!status) {
+ *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+ FW_NVM_DATA_OFFSET);
}
+ hw->mac.ops.release_swfw_sync(hw, mask);
return status;
}
@@ -1768,6 +1959,125 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
return rc;
}
+/**
+ * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 lval, sval, flx_val;
+ s32 rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
+ if (rc)
+ return rc;
+
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
+ if (rc)
+ return rc;
+
+ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
+ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
+ if (rc)
+ return rc;
+
+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
+
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
+ if (rc)
+ return rc;
+
+ ixgbe_restart_an_internal_phy_x550em(hw);
+
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/**
+ * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ */
+static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ ixgbe_link_speed speed;
+ bool link_up;
+
+ /* AN should have completed when the cable was plugged in.
+ * Look for reasons to bail out. Bail out if:
+ * - FC autoneg is disabled, or if
+ * - link is not up.
+ */
+ if (hw->fc.disable_fc_autoneg)
+ goto out;
+
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ if (!link_up)
+ goto out;
+
+ /* Check if auto-negotiation has completed */
+ status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
+ if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
+ status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+
+ /* Negotiate the flow control */
+ status = ixgbe_negotiate_fc(hw, info[0], info[0],
+ FW_PHY_ACT_GET_LINK_INFO_FC_RX,
+ FW_PHY_ACT_GET_LINK_INFO_FC_TX,
+ FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
+ FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
+
+out:
+ if (!status) {
+ hw->fc.fc_was_autonegged = true;
+ } else {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+}
+
/** ixgbe_init_mac_link_ops_X550em_a - Init mac link function pointers
* @hw: pointer to hardware structure
**/
@@ -1780,6 +2090,17 @@ static void ixgbe_init_mac_link_ops_X550em_a(struct ixgbe_hw *hw)
mac->ops.setup_fc = NULL;
mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
break;
+ case ixgbe_media_type_copper:
+ if (hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T &&
+ hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T_L) {
+ mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
+ break;
+ }
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
+ mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
+ mac->ops.setup_link = ixgbe_setup_sgmii_fw;
+ mac->ops.check_link = ixgbe_check_mac_link_generic;
+ break;
case ixgbe_media_type_backplane:
mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
@@ -1827,7 +2148,7 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
mac->ops.setup_fc = ixgbe_setup_fc_generic;
mac->ops.check_link = ixgbe_check_link_t_X550em;
- return;
+ break;
case ixgbe_media_type_backplane:
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
@@ -1870,6 +2191,12 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
+ if (hw->phy.type == ixgbe_phy_fw) {
+ *autoneg = true;
+ *speed = hw->phy.speeds_supported;
+ return 0;
+ }
+
/* SFP */
if (hw->phy.media_type == ixgbe_media_type_fiber) {
/* CS4227 SFP must not enable auto-negotiation */
@@ -2108,8 +2435,6 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
return status;
reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
- reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ |
- IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC);
reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
@@ -2189,12 +2514,11 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
/**
* ixgbe_setup_kr_x550em - Configure the KR PHY
* @hw: pointer to hardware structure
- *
- * Configures the integrated KR PHY for X550EM_x.
**/
static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
{
- if (hw->mac.type != ixgbe_mac_X550EM_x)
+ /* leave link alone for 2.5G */
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
return 0;
return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
@@ -2356,6 +2680,62 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
return 0;
}
+/**
+ * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
+ * @hw: pointer to the HW structure
+ * @maj: driver version major number
+ * @min: driver version minor number
+ * @build: driver version build number
+ * @sub: driver version sub build number
+ * @len: length of driver_ver string
+ * @driver_ver: driver string
+ *
+ * Sends driver version number to firmware through the manageability
+ * block. On success return 0
+ * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, u16 len,
+ const char *driver_ver)
+{
+ struct ixgbe_hic_drv_info2 fw_cmd;
+ s32 ret_val;
+ int i;
+
+ if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string)))
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+ fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
+ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ fw_cmd.port_num = (u8)hw->bus.func;
+ fw_cmd.ver_maj = maj;
+ fw_cmd.ver_min = min;
+ fw_cmd.ver_build = build;
+ fw_cmd.ver_sub = sub;
+ fw_cmd.hdr.checksum = 0;
+ memcpy(fw_cmd.driver_string, driver_ver, len);
+ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+
+ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+ ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ sizeof(fw_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (ret_val)
+ continue;
+
+ if (fw_cmd.hdr.cmd_or_resp.ret_status !=
+ FW_CEM_RESP_STATUS_SUCCESS)
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ return 0;
+ }
+
+ return ret_val;
+}
+
/** ixgbe_get_lcd_x550em - Determine lowest common denominator
* @hw: pointer to hardware structure
* @lcd_speed: pointer to lowest common link speed
@@ -2655,6 +3035,50 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
+{
+ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ s32 rc;
+
+ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
+ return 0;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
+ if (rc)
+ return rc;
+ memset(store, 0, sizeof(store));
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
+ if (rc)
+ return rc;
+
+ return ixgbe_setup_fw_link(hw);
+}
+
+/**
+ * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
+{
+ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ s32 rc;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
+ if (rc)
+ return rc;
+
+ if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
+ ixgbe_shutdown_fw_phy(hw);
+ return IXGBE_ERR_OVERTEMP;
+ }
+ return 0;
+}
+
+/**
* ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
* @hw: pointer to hardware structure
*
@@ -2740,6 +3164,10 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
phy->ops.reset = ixgbe_reset_phy_t_X550em;
break;
+ case ixgbe_phy_fw:
+ phy->ops.setup_link = ixgbe_setup_fw_link;
+ phy->ops.reset = ixgbe_reset_phy_fw;
+ break;
default:
break;
}
@@ -2777,6 +3205,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
case IXGBE_DEV_ID_X550EM_A_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
media_type = ixgbe_media_type_copper;
break;
default:
@@ -2844,6 +3274,13 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
break;
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ /* Select fast MDIO clock speed for these devices */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ hlreg0 |= IXGBE_HLREG0_MDCSPD;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ break;
default:
break;
}
@@ -3275,7 +3712,7 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
.clear_vfta = &ixgbe_clear_vfta_generic, \
.set_vfta = &ixgbe_set_vfta_generic, \
.fc_enable = &ixgbe_fc_enable_generic, \
- .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, \
+ .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_x550, \
.init_uta_tables = &ixgbe_init_uta_tables_generic, \
.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \
@@ -3355,6 +3792,27 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = {
.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a,
};
+static struct ixgbe_mac_operations mac_ops_x550em_a_fw = {
+ X550_COMMON_MAC
+ .led_on = ixgbe_led_on_generic,
+ .led_off = ixgbe_led_off_generic,
+ .init_led_link_act = ixgbe_init_led_link_act_generic,
+ .reset_hw = ixgbe_reset_hw_X550em,
+ .get_media_type = ixgbe_get_media_type_X550em,
+ .get_san_mac_addr = NULL,
+ .get_wwn_prefix = NULL,
+ .setup_link = NULL, /* defined later */
+ .get_link_capabilities = ixgbe_get_link_capabilities_X550em,
+ .get_bus_info = ixgbe_get_bus_info_X550em,
+ .setup_sfp = ixgbe_setup_sfp_modules_X550em,
+ .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a,
+ .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a,
+ .setup_fc = ixgbe_setup_fc_x550em,
+ .fc_autoneg = ixgbe_fc_autoneg,
+ .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a,
+ .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a,
+};
+
#define X550_COMMON_EEP \
.read = &ixgbe_read_ee_hostif_X550, \
.read_buffer = &ixgbe_read_ee_hostif_buffer_X550, \
@@ -3384,12 +3842,11 @@ static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \
.setup_link = &ixgbe_setup_phy_link_generic, \
- .set_phy_power = NULL, \
- .check_overtemp = &ixgbe_tn_check_overtemp, \
- .get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
+ .set_phy_power = NULL,
static const struct ixgbe_phy_operations phy_ops_X550 = {
X550_COMMON_PHY
+ .check_overtemp = &ixgbe_tn_check_overtemp,
.init = NULL,
.identify = &ixgbe_identify_phy_generic,
.read_reg = &ixgbe_read_phy_reg_generic,
@@ -3398,6 +3855,7 @@ static const struct ixgbe_phy_operations phy_ops_X550 = {
static const struct ixgbe_phy_operations phy_ops_X550EM_x = {
X550_COMMON_PHY
+ .check_overtemp = &ixgbe_tn_check_overtemp,
.init = &ixgbe_init_phy_ops_X550em,
.identify = &ixgbe_identify_phy_x550em,
.read_reg = &ixgbe_read_phy_reg_generic,
@@ -3406,6 +3864,7 @@ static const struct ixgbe_phy_operations phy_ops_X550EM_x = {
static const struct ixgbe_phy_operations phy_ops_x550em_a = {
X550_COMMON_PHY
+ .check_overtemp = &ixgbe_tn_check_overtemp,
.init = &ixgbe_init_phy_ops_X550em,
.identify = &ixgbe_identify_phy_x550em,
.read_reg = &ixgbe_read_phy_reg_x550a,
@@ -3414,6 +3873,17 @@ static const struct ixgbe_phy_operations phy_ops_x550em_a = {
.write_reg_mdi = &ixgbe_write_phy_reg_mdi,
};
+static const struct ixgbe_phy_operations phy_ops_x550em_a_fw = {
+ X550_COMMON_PHY
+ .check_overtemp = ixgbe_check_overtemp_fw,
+ .init = ixgbe_init_phy_ops_X550em,
+ .identify = ixgbe_identify_phy_fw,
+ .read_reg = NULL,
+ .write_reg = NULL,
+ .read_reg_mdi = NULL,
+ .write_reg_mdi = NULL,
+};
+
static const struct ixgbe_link_operations link_ops_x550em_x = {
.read_link = &ixgbe_read_i2c_combined_generic,
.read_link_unlocked = &ixgbe_read_i2c_combined_generic_unlocked,
@@ -3463,3 +3933,13 @@ const struct ixgbe_info ixgbe_x550em_a_info = {
.mbx_ops = &mbx_ops_generic,
.mvals = ixgbe_mvals_x550em_a,
};
+
+const struct ixgbe_info ixgbe_x550em_a_fw_info = {
+ .mac = ixgbe_mac_x550em_a,
+ .get_invariants = ixgbe_get_invariants_X550_a_fw,
+ .mac_ops = &mac_ops_x550em_a_fw,
+ .eeprom_ops = &eeprom_ops_X550EM_x,
+ .phy_ops = &phy_ops_x550em_a_fw,
+ .mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_x550em_a,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 508e72c5f1c2..1f6c0ecd50bb 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -432,11 +432,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
if (!ring) {
data[i++] = 0;
data[i++] = 0;
-#ifdef BP_EXTENDED_STATS
- data[i++] = 0;
- data[i++] = 0;
- data[i++] = 0;
-#endif
continue;
}
@@ -446,12 +441,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
data[i + 1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
-#ifdef BP_EXTENDED_STATS
- data[i] = ring->stats.yields;
- data[i + 1] = ring->stats.misses;
- data[i + 2] = ring->stats.cleaned;
- i += 3;
-#endif
}
/* populate Rx queue data */
@@ -460,11 +449,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
if (!ring) {
data[i++] = 0;
data[i++] = 0;
-#ifdef BP_EXTENDED_STATS
- data[i++] = 0;
- data[i++] = 0;
- data[i++] = 0;
-#endif
continue;
}
@@ -474,12 +458,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
data[i + 1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
-#ifdef BP_EXTENDED_STATS
- data[i] = ring->stats.yields;
- data[i + 1] = ring->stats.misses;
- data[i + 2] = ring->stats.cleaned;
- i += 3;
-#endif
}
}
@@ -507,28 +485,12 @@ static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
-#ifdef BP_EXTENDED_STATS
- sprintf(p, "tx_queue_%u_bp_napi_yield", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_bp_misses", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_bp_cleaned", i);
- p += ETH_GSTRING_LEN;
-#endif /* BP_EXTENDED_STATS */
}
for (i = 0; i < adapter->num_rx_queues; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
-#ifdef BP_EXTENDED_STATS
- sprintf(p, "rx_queue_%u_bp_poll_yield", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_bp_misses", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_bp_cleaned", i);
- p += ETH_GSTRING_LEN;
-#endif /* BP_EXTENDED_STATS */
}
break;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 5639fbe294d0..a8cbc2dda0dd 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -37,11 +37,6 @@
#include "vf.h"
-#ifdef CONFIG_NET_RX_BUSY_POLL
-#include <net/busy_poll.h>
-#define BP_EXTENDED_STATS
-#endif
-
#define IXGBE_MAX_TXD_PWR 14
#define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR)
@@ -73,11 +68,6 @@ struct ixgbevf_rx_buffer {
struct ixgbevf_stats {
u64 packets;
u64 bytes;
-#ifdef BP_EXTENDED_STATS
- u64 yields;
- u64 misses;
- u64 cleaned;
-#endif
};
struct ixgbevf_tx_queue_stats {
@@ -217,109 +207,6 @@ struct ixgbevf_q_vector {
#endif /* CONFIG_NET_RX_BUSY_POLL */
};
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector)
-{
- spin_lock_init(&q_vector->lock);
- q_vector->state = IXGBEVF_QV_STATE_IDLE;
-}
-
-/* called from the device poll routine to get ownership of a q_vector */
-static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
-{
- int rc = true;
-
- spin_lock_bh(&q_vector->lock);
- if (q_vector->state & IXGBEVF_QV_LOCKED) {
- WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI);
- q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
- rc = false;
-#ifdef BP_EXTENDED_STATS
- q_vector->tx.ring->stats.yields++;
-#endif
- } else {
- /* we don't care if someone yielded */
- q_vector->state = IXGBEVF_QV_STATE_NAPI;
- }
- spin_unlock_bh(&q_vector->lock);
- return rc;
-}
-
-/* returns true is someone tried to get the qv while napi had it */
-static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector)
-{
- int rc = false;
-
- spin_lock_bh(&q_vector->lock);
- WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL |
- IXGBEVF_QV_STATE_NAPI_YIELD));
-
- if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
- rc = true;
- /* reset state to idle, unless QV is disabled */
- q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
- spin_unlock_bh(&q_vector->lock);
- return rc;
-}
-
-/* called from ixgbevf_low_latency_poll() */
-static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
-{
- int rc = true;
-
- spin_lock_bh(&q_vector->lock);
- if ((q_vector->state & IXGBEVF_QV_LOCKED)) {
- q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
- rc = false;
-#ifdef BP_EXTENDED_STATS
- q_vector->rx.ring->stats.yields++;
-#endif
- } else {
- /* preserve yield marks */
- q_vector->state |= IXGBEVF_QV_STATE_POLL;
- }
- spin_unlock_bh(&q_vector->lock);
- return rc;
-}
-
-/* returns true if someone tried to get the qv while it was locked */
-static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector)
-{
- int rc = false;
-
- spin_lock_bh(&q_vector->lock);
- WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI));
-
- if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
- rc = true;
- /* reset state to idle, unless QV is disabled */
- q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
- spin_unlock_bh(&q_vector->lock);
- return rc;
-}
-
-/* true if a socket is polling, even if it did not get the lock */
-static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector)
-{
- WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED));
- return q_vector->state & IXGBEVF_QV_USER_PEND;
-}
-
-/* false if QV is currently owned */
-static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
-{
- int rc = true;
-
- spin_lock_bh(&q_vector->lock);
- if (q_vector->state & IXGBEVF_QV_OWNED)
- rc = false;
- q_vector->state |= IXGBEVF_QV_STATE_DISABLED;
- spin_unlock_bh(&q_vector->lock);
- return rc;
-}
-
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
/* microsecond values for various ITR rates shifted by 2 to fit itr register
* with the first 3 bits reserved 0
*/
@@ -464,6 +351,7 @@ enum ixgbevf_xcast_modes {
IXGBEVF_XCAST_MODE_NONE = 0,
IXGBEVF_XCAST_MODE_MULTI,
IXGBEVF_XCAST_MODE_ALLMULTI,
+ IXGBEVF_XCAST_MODE_PROMISC,
};
extern const struct ixgbevf_info ixgbevf_82599_vf_info;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 6d4bef5803f2..80bab261a0ec 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -457,16 +457,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
struct sk_buff *skb)
{
-#ifdef CONFIG_NET_RX_BUSY_POLL
- skb_mark_napi_id(skb, &q_vector->napi);
-
- if (ixgbevf_qv_busy_polling(q_vector)) {
- netif_receive_skb(skb);
- /* exit early if we busy polled */
- return;
- }
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
napi_gro_receive(&q_vector->napi, skb);
}
@@ -1031,10 +1021,6 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
if (budget <= 0)
return budget;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- if (!ixgbevf_qv_lock_napi(q_vector))
- return budget;
-#endif
/* attempt to distribute budget to each queue fairly, but don't allow
* the budget to go below 1 because we'll exit polling
@@ -1052,10 +1038,6 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
clean_complete = false;
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
- ixgbevf_qv_unlock_napi(q_vector);
-#endif
-
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return budget;
@@ -1090,40 +1072,6 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
-/* must be called with local_bh_disable()d */
-static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
-{
- struct ixgbevf_q_vector *q_vector =
- container_of(napi, struct ixgbevf_q_vector, napi);
- struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct ixgbevf_ring *ring;
- int found = 0;
-
- if (test_bit(__IXGBEVF_DOWN, &adapter->state))
- return LL_FLUSH_FAILED;
-
- if (!ixgbevf_qv_lock_poll(q_vector))
- return LL_FLUSH_BUSY;
-
- ixgbevf_for_each_ring(ring, q_vector->rx) {
- found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
-#ifdef BP_EXTENDED_STATS
- if (found)
- ring->stats.cleaned += found;
- else
- ring->stats.misses++;
-#endif
- if (found)
- break;
- }
-
- ixgbevf_qv_unlock_poll(q_vector);
-
- return found;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
/**
* ixgbevf_configure_msix - Configure MSI-X hardware
* @adapter: board private structure
@@ -1930,6 +1878,16 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
(flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE;
+ /* request the most inclusive mode we need */
+ if (flags & IFF_PROMISC)
+ xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
+ else if (flags & IFF_ALLMULTI)
+ xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI;
+ else if (flags & (IFF_BROADCAST | IFF_MULTICAST))
+ xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
+ else
+ xcast_mode = IXGBEVF_XCAST_MODE_NONE;
+
spin_lock_bh(&adapter->mbx_lock);
hw->mac.ops.update_xcast_mode(hw, xcast_mode);
@@ -1950,9 +1908,6 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx];
-#ifdef CONFIG_NET_RX_BUSY_POLL
- ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
-#endif
napi_enable(&q_vector->napi);
}
}
@@ -1966,12 +1921,6 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx];
napi_disable(&q_vector->napi);
-#ifdef CONFIG_NET_RX_BUSY_POLL
- while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
- pr_info("QV %d locked\n", q_idx);
- usleep_range(1000, 20000);
- }
-#endif /* CONFIG_NET_RX_BUSY_POLL */
}
}
@@ -2071,7 +2020,8 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- int api[] = { ixgbe_mbox_api_12,
+ int api[] = { ixgbe_mbox_api_13,
+ ixgbe_mbox_api_12,
ixgbe_mbox_api_11,
ixgbe_mbox_api_10,
ixgbe_mbox_api_unknown };
@@ -2373,6 +2323,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
switch (hw->api_version) {
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
+ case ixgbe_mbox_api_13:
adapter->num_rx_queues = rss;
adapter->num_tx_queues = rss;
default:
@@ -3228,6 +3179,21 @@ err_setup_reset:
}
/**
+ * ixgbevf_close_suspend - actions necessary to both suspend and close flows
+ * @adapter: the private adapter struct
+ *
+ * This function should contain the necessary work common to both suspending
+ * and closing of the device.
+ */
+static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter)
+{
+ ixgbevf_down(adapter);
+ ixgbevf_free_irq(adapter);
+ ixgbevf_free_all_tx_resources(adapter);
+ ixgbevf_free_all_rx_resources(adapter);
+}
+
+/**
* ixgbevf_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -3242,11 +3208,8 @@ int ixgbevf_close(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- ixgbevf_down(adapter);
- ixgbevf_free_irq(adapter);
-
- ixgbevf_free_all_tx_resources(adapter);
- ixgbevf_free_all_rx_resources(adapter);
+ if (netif_device_present(netdev))
+ ixgbevf_close_suspend(adapter);
return 0;
}
@@ -3268,6 +3231,8 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
* match packet buffer alignment. Unfortunately, the
* hardware is not flexible enough to do this dynamically.
*/
+ rtnl_lock();
+
if (netif_running(dev))
ixgbevf_close(dev);
@@ -3276,6 +3241,8 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
if (netif_running(dev))
ixgbevf_open(dev);
+
+ rtnl_unlock();
}
static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
@@ -3796,17 +3763,14 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
int retval = 0;
#endif
+ rtnl_lock();
netif_device_detach(netdev);
- if (netif_running(netdev)) {
- rtnl_lock();
- ixgbevf_down(adapter);
- ixgbevf_free_irq(adapter);
- ixgbevf_free_all_tx_resources(adapter);
- ixgbevf_free_all_rx_resources(adapter);
- ixgbevf_clear_interrupt_scheme(adapter);
- rtnl_unlock();
- }
+ if (netif_running(netdev))
+ ixgbevf_close_suspend(adapter);
+
+ ixgbevf_clear_interrupt_scheme(adapter);
+ rtnl_unlock();
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
@@ -3838,6 +3802,8 @@ static int ixgbevf_resume(struct pci_dev *pdev)
dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
return err;
}
+
+ adapter->hw.hw_addr = adapter->io_addr;
smp_mb__before_atomic();
clear_bit(__IXGBEVF_DISABLED, &adapter->state);
pci_set_master(pdev);
@@ -3869,8 +3835,8 @@ static void ixgbevf_shutdown(struct pci_dev *pdev)
ixgbevf_suspend(pdev, PMSG_SUSPEND);
}
-static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+static void ixgbevf_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
unsigned int start;
@@ -3903,8 +3869,6 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
stats->tx_bytes += bytes;
stats->tx_packets += packets;
}
-
- return stats;
}
#define IXGBEVF_MAX_MAC_HDR_LEN 127
@@ -3953,9 +3917,6 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
.ndo_tx_timeout = ixgbevf_tx_timeout,
.ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = ixgbevf_busy_poll_recv,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbevf_netpoll,
#endif
@@ -4102,6 +4063,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
switch (adapter->hw.api_version) {
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
+ case ixgbe_mbox_api_13:
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN);
break;
@@ -4244,7 +4206,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
}
if (netif_running(netdev))
- ixgbevf_down(adapter);
+ ixgbevf_close_suspend(adapter);
if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
pci_disable_device(pdev);
@@ -4272,6 +4234,7 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
}
+ adapter->hw.hw_addr = adapter->io_addr;
smp_mb__before_atomic();
clear_bit(__IXGBEVF_DISABLED, &adapter->state);
pci_set_master(pdev);
@@ -4292,12 +4255,13 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
static void ixgbevf_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ rtnl_lock();
if (netif_running(netdev))
- ixgbevf_up(adapter);
+ ixgbevf_open(netdev);
netif_device_attach(netdev);
+ rtnl_unlock();
}
/* PCI Error Recovery (ERS) */
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 340cdd469455..bc0442acae78 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -84,6 +84,7 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
+ ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index d46ba1dabcb7..8a5db9d7219d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -330,9 +330,14 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
* Thus return an error if API doesn't support RETA querying or querying
* is not supported for this device type.
*/
- if (hw->api_version != ixgbe_mbox_api_12 ||
- hw->mac.type >= ixgbe_mac_X550_vf)
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_12:
+ if (hw->mac.type >= ixgbe_mac_X550_vf)
+ break;
+ default:
return -EOPNOTSUPP;
+ }
msgbuf[0] = IXGBE_VF_GET_RETA;
@@ -391,9 +396,14 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
* Thus return an error if API doesn't support RSS Random Key retrieval
* or if the operation is not supported for this device type.
*/
- if (hw->api_version != ixgbe_mbox_api_12 ||
- hw->mac.type >= ixgbe_mac_X550_vf)
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_12:
+ if (hw->mac.type >= ixgbe_mac_X550_vf)
+ break;
+ default:
return -EOPNOTSUPP;
+ }
msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
@@ -545,6 +555,11 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
switch (hw->api_version) {
case ixgbe_mbox_api_12:
+ /* promisc introduced in 1.3 version */
+ if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
+ return -EOPNOTSUPP;
+ /* Fall threw */
+ case ixgbe_mbox_api_13:
break;
default:
return -EOPNOTSUPP;
@@ -884,6 +899,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
switch (hw->api_version) {
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
+ case ixgbe_mbox_api_13:
break;
default:
return 0;