summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell')
-rw-r--r--drivers/net/ethernet/marvell/Kconfig3
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c6
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c69
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c224
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c22
-rw-r--r--drivers/net/ethernet/marvell/skge.c71
-rw-r--r--drivers/net/ethernet/marvell/sky2.c80
-rw-r--r--drivers/net/ethernet/marvell/sky2.h1
8 files changed, 279 insertions, 197 deletions
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index f4b7cf18fb0f..d2555e8b947e 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -83,9 +83,8 @@ config MVNETA_BM
config MVPP2
tristate "Marvell Armada 375 network interface support"
- depends on MACH_ARMADA_375 || COMPILE_TEST
+ depends on ARCH_MVEBU || COMPILE_TEST
depends on HAS_DMA
- depends on !64BIT
select MVMDIO
---help---
This driver supports the network interface units in the
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 1fa7c03edec2..25642dee49d3 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1504,9 +1504,7 @@ mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp,
int err;
u32 supported, advertising;
- err = phy_read_status(dev->phydev);
- if (err == 0)
- err = phy_ethtool_ksettings_get(dev->phydev, cmd);
+ err = phy_ethtool_ksettings_get(dev->phydev, cmd);
/*
* The MAC does not support 1000baseT_Half.
@@ -2319,7 +2317,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
if (mp->oom)
mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
wrlp(mp, INT_MASK, mp->int_mask);
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index e05e22705cf7..61dd4462411c 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -28,6 +28,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include <net/hwbm.h>
@@ -224,6 +225,7 @@
#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
#define MVNETA_TXQ_DEC_SENT_SHIFT 16
+#define MVNETA_TXQ_DEC_SENT_MASK 0xff
#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
#define MVNETA_TXQ_SENT_DESC_SHIFT 16
#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
@@ -525,6 +527,7 @@ struct mvneta_tx_queue {
* descriptor ring
*/
int count;
+ int pending;
int tx_stop_threshold;
int tx_wake_threshold;
@@ -652,7 +655,7 @@ static void mvneta_mib_counters_clear(struct mvneta_port *pp)
}
/* Get System Network Statistics */
-static struct rtnl_link_stats64 *
+static void
mvneta_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
@@ -686,8 +689,6 @@ mvneta_get_stats64(struct net_device *dev,
stats->rx_dropped = dev->stats.rx_dropped;
stats->tx_dropped = dev->stats.tx_dropped;
-
- return stats;
}
/* Rx descriptors helper methods */
@@ -820,8 +821,9 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
/* Only 255 descriptors can be added at once ; Assume caller
* process TX desriptors in quanta less than 256
*/
- val = pend_desc;
+ val = pend_desc + txq->pending;
mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+ txq->pending = 0;
}
/* Get pointer to next TX descriptor to be processed (send) by HW */
@@ -1758,8 +1760,10 @@ static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
/* Free tx queue skbuffs */
static void mvneta_txq_bufs_free(struct mvneta_port *pp,
- struct mvneta_tx_queue *txq, int num)
+ struct mvneta_tx_queue *txq, int num,
+ struct netdev_queue *nq)
{
+ unsigned int bytes_compl = 0, pkts_compl = 0;
int i;
for (i = 0; i < num; i++) {
@@ -1767,6 +1771,11 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
txq->txq_get_index;
struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
+ if (skb) {
+ bytes_compl += skb->len;
+ pkts_compl++;
+ }
+
mvneta_txq_inc_get(txq);
if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
@@ -1777,6 +1786,8 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
continue;
dev_kfree_skb_any(skb);
}
+
+ netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
}
/* Handle end of transmission */
@@ -1790,7 +1801,7 @@ static void mvneta_txq_done(struct mvneta_port *pp,
if (!tx_done)
return;
- mvneta_txq_bufs_free(pp, txq, tx_done);
+ mvneta_txq_bufs_free(pp, txq, tx_done, nq);
txq->count -= tx_done;
@@ -2400,12 +2411,18 @@ out:
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
- txq->count += frags;
- mvneta_txq_pend_desc_add(pp, txq, frags);
+ netdev_tx_sent_queue(nq, len);
+ txq->count += frags;
if (txq->count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq);
+ if (!skb->xmit_more || netif_xmit_stopped(nq) ||
+ txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
+ mvneta_txq_pend_desc_add(pp, txq, frags);
+ else
+ txq->pending += frags;
+
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += len;
@@ -2424,9 +2441,10 @@ static void mvneta_txq_done_force(struct mvneta_port *pp,
struct mvneta_tx_queue *txq)
{
+ struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
int tx_done = txq->count;
- mvneta_txq_bufs_free(pp, txq, tx_done);
+ mvneta_txq_bufs_free(pp, txq, tx_done, nq);
/* reset txq */
txq->count = 0;
@@ -2750,11 +2768,9 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
}
- budget -= rx_done;
-
- if (budget > 0) {
+ if (rx_done < budget) {
cause_rx_tx = 0;
- napi_complete(napi);
+ napi_complete_done(napi, rx_done);
if (pp->neta_armada3700) {
unsigned long flags;
@@ -2952,6 +2968,8 @@ static int mvneta_txq_init(struct mvneta_port *pp,
static void mvneta_txq_deinit(struct mvneta_port *pp,
struct mvneta_tx_queue *txq)
{
+ struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
+
kfree(txq->tx_skb);
if (txq->tso_hdrs)
@@ -2963,6 +2981,8 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
+ netdev_tx_reset_queue(nq);
+
txq->descs = NULL;
txq->last_desc = 0;
txq->next_desc_to_proc = 0;
@@ -3908,6 +3928,25 @@ static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
return 0;
}
+static void mvneta_ethtool_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (dev->phydev)
+ phy_ethtool_get_wol(dev->phydev, wol);
+}
+
+static int mvneta_ethtool_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ if (!dev->phydev)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_set_wol(dev->phydev, wol);
+}
+
static const struct net_device_ops mvneta_netdev_ops = {
.ndo_open = mvneta_open,
.ndo_stop = mvneta_stop,
@@ -3920,7 +3959,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
.ndo_do_ioctl = mvneta_ioctl,
};
-const struct ethtool_ops mvneta_eth_tool_ops = {
+static const struct ethtool_ops mvneta_eth_tool_ops = {
.nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.set_coalesce = mvneta_ethtool_set_coalesce,
@@ -3937,6 +3976,8 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
.set_rxfh = mvneta_ethtool_set_rxfh,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = mvneta_ethtool_set_link_ksettings,
+ .get_wol = mvneta_ethtool_get_wol,
+ .set_wol = mvneta_ethtool_set_wol,
};
/* Initialize hw */
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 4fe430ceb194..d00421b9ffea 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -154,6 +154,7 @@
/* Interrupt Cause and Mask registers */
#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
+#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
#define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
@@ -252,12 +253,8 @@
#define MVPP2_SRC_ADDR_HIGH 0x28
#define MVPP2_PHY_AN_CFG0_REG 0x34
#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
-#define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * \
- 0x400 + (port) * 0x400)
-#define MVPP2_MIB_LATE_COLLISION 0x7c
-#define MVPP2_ISR_SUM_MASK_REG 0x220c
#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
-#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
+#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
/* Per-port registers */
#define MVPP2_GMAC_CTRL_0_REG 0x0
@@ -513,28 +510,28 @@ enum mvpp2_tag_type {
/* Sram result info bits assignment */
#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
#define MVPP2_PRS_RI_DSA_MASK 0x2
-#define MVPP2_PRS_RI_VLAN_MASK 0xc
-#define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_VLAN_NONE 0x0
#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
-#define MVPP2_PRS_RI_L2_CAST_MASK 0x600
-#define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10))
+#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
+#define MVPP2_PRS_RI_L2_UCAST 0x0
#define MVPP2_PRS_RI_L2_MCAST BIT(9)
#define MVPP2_PRS_RI_L2_BCAST BIT(10)
#define MVPP2_PRS_RI_PPPOE_MASK 0x800
-#define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000
-#define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_UN 0x0
#define MVPP2_PRS_RI_L3_IP4 BIT(12)
#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
#define MVPP2_PRS_RI_L3_IP6 BIT(14)
#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
-#define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000
-#define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_L3_UCAST 0x0
#define MVPP2_PRS_RI_L3_MCAST BIT(15)
#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
@@ -822,9 +819,6 @@ struct mvpp2_tx_queue {
/* Per-CPU control of physical Tx queues */
struct mvpp2_txq_pcpu __percpu *pcpu;
- /* Array of transmitted skb */
- struct sk_buff **tx_skb;
-
u32 done_pkts_coal;
/* Virtual address of thex Tx DMA descriptors array */
@@ -924,6 +918,7 @@ struct mvpp2_bm_pool {
int buf_size;
/* Packet size */
int pkt_size;
+ int frag_size;
/* BPPE virtual base address */
u32 *virt_addr;
@@ -932,10 +927,6 @@ struct mvpp2_bm_pool {
/* Ports using BM pool */
u32 port_map;
-
- /* Occupied buffers indicator */
- atomic_t in_use;
- int in_use_thresh;
};
struct mvpp2_buff_hdr {
@@ -991,7 +982,7 @@ static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
txq_pcpu->buffs + txq_pcpu->txq_put_index;
tx_buf->skb = skb;
tx_buf->size = tx_desc->data_size;
- tx_buf->phys = tx_desc->buf_phys_addr;
+ tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset;
txq_pcpu->txq_put_index++;
if (txq_pcpu->txq_put_index == txq_pcpu->size)
txq_pcpu->txq_put_index = 0;
@@ -3364,6 +3355,22 @@ static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
}
+static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
+{
+ if (likely(pool->frag_size <= PAGE_SIZE))
+ return netdev_alloc_frag(pool->frag_size);
+ else
+ return kmalloc(pool->frag_size, GFP_ATOMIC);
+}
+
+static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
+{
+ if (likely(pool->frag_size <= PAGE_SIZE))
+ skb_free_frag(data);
+ else
+ kfree(data);
+}
+
/* Buffer Manager configuration routines */
/* Create pool */
@@ -3381,7 +3388,8 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
if (!bm_pool->virt_addr)
return -ENOMEM;
- if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) {
+ if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
+ MVPP2_BM_POOL_PTR_ALIGN)) {
dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
bm_pool->phys_addr);
dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
@@ -3401,7 +3409,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
bm_pool->size = size;
bm_pool->pkt_size = 0;
bm_pool->buf_num = 0;
- atomic_set(&bm_pool->in_use, 0);
return 0;
}
@@ -3427,7 +3434,7 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
for (i = 0; i < bm_pool->buf_num; i++) {
dma_addr_t buf_phys_addr;
- u32 vaddr;
+ unsigned long vaddr;
/* Get buffer virtual address (indirect access) */
buf_phys_addr = mvpp2_read(priv,
@@ -3439,7 +3446,8 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
if (!vaddr)
break;
- dev_kfree_skb_any((struct sk_buff *)vaddr);
+
+ mvpp2_frag_free(bm_pool, (void *)vaddr);
}
/* Update BM driver with number of buffers removed from pool */
@@ -3553,29 +3561,28 @@ static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
}
-/* Allocate skb for BM pool */
-static struct sk_buff *mvpp2_skb_alloc(struct mvpp2_port *port,
- struct mvpp2_bm_pool *bm_pool,
- dma_addr_t *buf_phys_addr,
- gfp_t gfp_mask)
+static void *mvpp2_buf_alloc(struct mvpp2_port *port,
+ struct mvpp2_bm_pool *bm_pool,
+ dma_addr_t *buf_phys_addr,
+ gfp_t gfp_mask)
{
- struct sk_buff *skb;
dma_addr_t phys_addr;
+ void *data;
- skb = __dev_alloc_skb(bm_pool->pkt_size, gfp_mask);
- if (!skb)
+ data = mvpp2_frag_alloc(bm_pool);
+ if (!data)
return NULL;
- phys_addr = dma_map_single(port->dev->dev.parent, skb->head,
+ phys_addr = dma_map_single(port->dev->dev.parent, data,
MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) {
- dev_kfree_skb_any(skb);
+ mvpp2_frag_free(bm_pool, data);
return NULL;
}
*buf_phys_addr = phys_addr;
- return skb;
+ return data;
}
/* Set pool number in a BM cookie */
@@ -3590,14 +3597,15 @@ static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
}
/* Get pool number from a BM cookie */
-static inline int mvpp2_bm_cookie_pool_get(u32 cookie)
+static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
{
return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
}
/* Release buffer to BM */
static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
- u32 buf_phys_addr, u32 buf_virt_addr)
+ dma_addr_t buf_phys_addr,
+ unsigned long buf_virt_addr)
{
mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
@@ -3605,7 +3613,8 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
/* Release multicast buffer */
static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
- u32 buf_phys_addr, u32 buf_virt_addr,
+ dma_addr_t buf_phys_addr,
+ unsigned long buf_virt_addr,
int mc_id)
{
u32 val = 0;
@@ -3620,7 +3629,8 @@ static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
/* Refill BM pool */
static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
- u32 phys_addr, u32 cookie)
+ dma_addr_t phys_addr,
+ unsigned long cookie)
{
int pool = mvpp2_bm_cookie_pool_get(bm);
@@ -3631,10 +3641,9 @@ static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool, int buf_num)
{
- struct sk_buff *skb;
int i, buf_size, total_size;
- u32 bm;
dma_addr_t phys_addr;
+ void *buf;
buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
@@ -3647,18 +3656,17 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
return 0;
}
- bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id);
for (i = 0; i < buf_num; i++) {
- skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
- if (!skb)
+ buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
+ if (!buf)
break;
- mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
+ mvpp2_bm_pool_put(port, bm_pool->id, phys_addr,
+ (unsigned long)buf);
}
/* Update BM driver with number of buffers added to pool */
bm_pool->buf_num += i;
- bm_pool->in_use_thresh = bm_pool->buf_num / 4;
netdev_dbg(port->dev,
"%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
@@ -3710,6 +3718,9 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
port->priv, new_pool);
new_pool->pkt_size = pkt_size;
+ new_pool->frag_size =
+ SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
+ MVPP2_SKB_SHINFO_SIZE;
/* Allocate buffers for this pool */
num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
@@ -3778,6 +3789,8 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
}
port_pool->pkt_size = pkt_size;
+ port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
+ MVPP2_SKB_SHINFO_SIZE;
num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
if (num != pkts_num) {
WARN(1, "pool %d: %d of %d allocated\n",
@@ -4379,27 +4392,50 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
* will be generated by HW.
*/
static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
- struct mvpp2_rx_queue *rxq, u32 pkts)
+ struct mvpp2_rx_queue *rxq)
{
- u32 val;
+ if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
+ rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
- val = (pkts & MVPP2_OCCUPIED_THRESH_MASK);
mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
- mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
+ mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG,
+ rxq->pkts_coal);
+}
+
+static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
+{
+ u64 tmp = (u64)clk_hz * usec;
+
+ do_div(tmp, USEC_PER_SEC);
+
+ return tmp > U32_MAX ? U32_MAX : tmp;
+}
+
+static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
+{
+ u64 tmp = (u64)cycles * USEC_PER_SEC;
+
+ do_div(tmp, clk_hz);
- rxq->pkts_coal = pkts;
+ return tmp > U32_MAX ? U32_MAX : tmp;
}
/* Set the time delay in usec before Rx interrupt */
static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
- struct mvpp2_rx_queue *rxq, u32 usec)
+ struct mvpp2_rx_queue *rxq)
{
- u32 val;
+ unsigned long freq = port->priv->tclk;
+ u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
- val = (port->priv->tclk / USEC_PER_SEC) * usec;
- mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
+ if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
+ rxq->time_coal =
+ mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
+
+ /* re-evaluate to get actual register value */
+ val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
+ }
- rxq->time_coal = usec;
+ mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
}
/* Free Tx queue skbuffs */
@@ -4413,13 +4449,12 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_get_index;
- mvpp2_txq_inc_get(txq_pcpu);
-
dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
tx_buf->size, DMA_TO_DEVICE);
- if (!tx_buf->skb)
- continue;
- dev_kfree_skb_any(tx_buf->skb);
+ if (tx_buf->skb)
+ dev_kfree_skb_any(tx_buf->skb);
+
+ mvpp2_txq_inc_get(txq_pcpu);
}
}
@@ -4543,8 +4578,8 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
/* Set coalescing pkts and time */
- mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
- mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
+ mvpp2_rx_pkts_coal_set(port, rxq);
+ mvpp2_rx_time_coal_set(port, rxq);
/* Add number of descriptors ready for receiving packets */
mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
@@ -4994,23 +5029,18 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
static int mvpp2_rx_refill(struct mvpp2_port *port,
- struct mvpp2_bm_pool *bm_pool,
- u32 bm, int is_recycle)
+ struct mvpp2_bm_pool *bm_pool, u32 bm)
{
- struct sk_buff *skb;
dma_addr_t phys_addr;
-
- if (is_recycle &&
- (atomic_read(&bm_pool->in_use) < bm_pool->in_use_thresh))
- return 0;
+ void *buf;
/* No recycle or too many buffers are in use, so allocate a new skb */
- skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
- if (!skb)
+ buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
+ if (!buf)
return -ENOMEM;
- mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
- atomic_dec(&bm_pool->in_use);
+ mvpp2_pool_refill(port, bm, phys_addr, (unsigned long)buf);
+
return 0;
}
@@ -5051,10 +5081,10 @@ static void mvpp2_buff_hdr_rx(struct mvpp2_port *port,
struct mvpp2_buff_hdr *buff_hdr;
struct sk_buff *skb;
u32 rx_status = rx_desc->status;
- u32 buff_phys_addr;
- u32 buff_virt_addr;
- u32 buff_phys_addr_next;
- u32 buff_virt_addr_next;
+ dma_addr_t buff_phys_addr;
+ unsigned long buff_virt_addr;
+ dma_addr_t buff_phys_addr_next;
+ unsigned long buff_virt_addr_next;
int mc_id;
int pool_id;
@@ -5101,14 +5131,17 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
struct mvpp2_bm_pool *bm_pool;
struct sk_buff *skb;
+ unsigned int frag_size;
dma_addr_t phys_addr;
u32 bm, rx_status;
int pool, rx_bytes, err;
+ void *data;
rx_done++;
rx_status = rx_desc->status;
rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
phys_addr = rx_desc->buf_phys_addr;
+ data = (void *)(uintptr_t)rx_desc->buf_cookie;
bm = mvpp2_bm_cookie_build(rx_desc);
pool = mvpp2_bm_cookie_pool_get(bm);
@@ -5129,14 +5162,24 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
dev->stats.rx_errors++;
mvpp2_rx_error(port, rx_desc);
/* Return the buffer to the pool */
+
mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
rx_desc->buf_cookie);
continue;
}
- skb = (struct sk_buff *)rx_desc->buf_cookie;
+ if (bm_pool->frag_size > PAGE_SIZE)
+ frag_size = 0;
+ else
+ frag_size = bm_pool->frag_size;
+
+ skb = build_skb(data, frag_size);
+ if (!skb) {
+ netdev_warn(port->dev, "skb build failed\n");
+ goto err_drop_frame;
+ }
- err = mvpp2_rx_refill(port, bm_pool, bm, 0);
+ err = mvpp2_rx_refill(port, bm_pool, bm);
if (err) {
netdev_err(port->dev, "failed to refill BM pools\n");
goto err_drop_frame;
@@ -5147,9 +5190,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
rcvd_pkts++;
rcvd_bytes += rx_bytes;
- atomic_inc(&bm_pool->in_use);
- skb_reserve(skb, MVPP2_MH_SIZE);
+ skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
skb_put(skb, rx_bytes);
skb->protocol = eth_type_trans(skb, dev);
mvpp2_rx_csum(port, rx_status, skb);
@@ -5405,7 +5447,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
if (budget > 0) {
cause_rx = 0;
- napi_complete(napi);
+ napi_complete_done(napi, rx_done);
mvpp2_interrupts_enable(port);
}
@@ -5739,7 +5781,7 @@ error:
return err;
}
-static struct rtnl_link_stats64 *
+static void
mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mvpp2_port *port = netdev_priv(dev);
@@ -5771,8 +5813,6 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_errors = dev->stats.rx_errors;
stats->rx_dropped = dev->stats.rx_dropped;
stats->tx_dropped = dev->stats.tx_dropped;
-
- return stats;
}
static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -5803,8 +5843,8 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
rxq->time_coal = c->rx_coalesce_usecs;
rxq->pkts_coal = c->rx_max_coalesced_frames;
- mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
- mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
+ mvpp2_rx_pkts_coal_set(port, rxq);
+ mvpp2_rx_time_coal_set(port, rxq);
}
for (queue = 0; queue < txq_number; queue++) {
@@ -5973,8 +6013,10 @@ static int mvpp2_port_init(struct mvpp2_port *port)
struct mvpp2_tx_queue *txq;
txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
- if (!txq)
- return -ENOMEM;
+ if (!txq) {
+ err = -ENOMEM;
+ goto err_free_percpu;
+ }
txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
if (!txq->pcpu) {
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 3af2814ada23..28cb36d9e50a 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -274,8 +274,6 @@ enum hash_table_entry {
HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
};
-static int pxa168_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd);
static int pxa168_init_hw(struct pxa168_eth_private *pep);
static int pxa168_init_phy(struct net_device *dev);
static void eth_port_reset(struct net_device *dev);
@@ -987,10 +985,6 @@ static int pxa168_init_phy(struct net_device *dev)
if (err)
return err;
- err = pxa168_get_link_ksettings(dev, &cmd);
- if (err)
- return err;
-
cmd.base.phy_address = pep->phy_addr;
cmd.base.speed = pep->phy_speed;
cmd.base.duplex = pep->phy_duplex;
@@ -1261,7 +1255,7 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget)
}
work_done = rxq_process(dev, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
wrl(pep, INT_MASK, ALL_INTS);
}
@@ -1370,18 +1364,6 @@ static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
return -EOPNOTSUPP;
}
-static int pxa168_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- int err;
-
- err = phy_read_status(dev->phydev);
- if (err == 0)
- err = phy_ethtool_ksettings_get(dev->phydev, cmd);
-
- return err;
-}
-
static void pxa168_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -1396,7 +1378,7 @@ static const struct ethtool_ops pxa168_ethtool_ops = {
.nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
- .get_link_ksettings = pxa168_get_link_ksettings,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 9146a514fb33..edb95271a4f2 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -300,65 +300,76 @@ static u32 skge_supported_modes(const struct skge_hw *hw)
return supported;
}
-static int skge_get_settings(struct net_device *dev,
- struct ethtool_cmd *ecmd)
+static int skge_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
+ u32 supported, advertising;
- ecmd->transceiver = XCVR_INTERNAL;
- ecmd->supported = skge_supported_modes(hw);
+ supported = skge_supported_modes(hw);
if (hw->copper) {
- ecmd->port = PORT_TP;
- ecmd->phy_address = hw->phy_addr;
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = hw->phy_addr;
} else
- ecmd->port = PORT_FIBRE;
+ cmd->base.port = PORT_FIBRE;
+
+ advertising = skge->advertising;
+ cmd->base.autoneg = skge->autoneg;
+ cmd->base.speed = skge->speed;
+ cmd->base.duplex = skge->duplex;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
- ecmd->advertising = skge->advertising;
- ecmd->autoneg = skge->autoneg;
- ethtool_cmd_speed_set(ecmd, skge->speed);
- ecmd->duplex = skge->duplex;
return 0;
}
-static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int skge_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct skge_port *skge = netdev_priv(dev);
const struct skge_hw *hw = skge->hw;
u32 supported = skge_supported_modes(hw);
int err = 0;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
- if (ecmd->autoneg == AUTONEG_ENABLE) {
- ecmd->advertising = supported;
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ advertising = supported;
skge->duplex = -1;
skge->speed = -1;
} else {
u32 setting;
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
switch (speed) {
case SPEED_1000:
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
setting = SUPPORTED_1000baseT_Full;
- else if (ecmd->duplex == DUPLEX_HALF)
+ else if (cmd->base.duplex == DUPLEX_HALF)
setting = SUPPORTED_1000baseT_Half;
else
return -EINVAL;
break;
case SPEED_100:
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
setting = SUPPORTED_100baseT_Full;
- else if (ecmd->duplex == DUPLEX_HALF)
+ else if (cmd->base.duplex == DUPLEX_HALF)
setting = SUPPORTED_100baseT_Half;
else
return -EINVAL;
break;
case SPEED_10:
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
setting = SUPPORTED_10baseT_Full;
- else if (ecmd->duplex == DUPLEX_HALF)
+ else if (cmd->base.duplex == DUPLEX_HALF)
setting = SUPPORTED_10baseT_Half;
else
return -EINVAL;
@@ -371,11 +382,11 @@ static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return -EINVAL;
skge->speed = speed;
- skge->duplex = ecmd->duplex;
+ skge->duplex = cmd->base.duplex;
}
- skge->autoneg = ecmd->autoneg;
- skge->advertising = ecmd->advertising;
+ skge->autoneg = cmd->base.autoneg;
+ skge->advertising = advertising;
if (netif_running(dev)) {
skge_down(dev);
@@ -875,8 +886,6 @@ static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
}
static const struct ethtool_ops skge_ethtool_ops = {
- .get_settings = skge_get_settings,
- .set_settings = skge_set_settings,
.get_drvinfo = skge_get_drvinfo,
.get_regs_len = skge_get_regs_len,
.get_regs = skge_get_regs,
@@ -899,6 +908,8 @@ static const struct ethtool_ops skge_ethtool_ops = {
.set_phys_id = skge_set_phys_id,
.get_sset_count = skge_get_sset_count,
.get_ethtool_stats = skge_get_ethtool_stats,
+ .get_link_ksettings = skge_get_link_ksettings,
+ .set_link_ksettings = skge_set_link_ksettings,
};
/*
@@ -3190,7 +3201,7 @@ static void skge_tx_done(struct net_device *dev)
}
}
-static int skge_poll(struct napi_struct *napi, int to_do)
+static int skge_poll(struct napi_struct *napi, int budget)
{
struct skge_port *skge = container_of(napi, struct skge_port, napi);
struct net_device *dev = skge->netdev;
@@ -3203,7 +3214,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
- for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
+ for (e = ring->to_clean; prefetch(e->next), work_done < budget; e = e->next) {
struct skge_rx_desc *rd = e->desc;
struct sk_buff *skb;
u32 control;
@@ -3225,12 +3236,10 @@ static int skge_poll(struct napi_struct *napi, int to_do)
wmb();
skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
- if (work_done < to_do) {
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
unsigned long flags;
- napi_gro_flush(napi, false);
spin_lock_irqsave(&hw->hw_lock, flags);
- __napi_complete(napi);
hw->intr_mask |= napimask[skge->port];
skge_write32(hw, B0_IMSK, hw->intr_mask);
skge_read32(hw, B0_IMSK);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index b60ad0e56a9f..2b2cc3f3ca10 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2666,7 +2666,7 @@ static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port,
sky2->rx_stats.bytes += bytes;
u64_stats_update_end(&sky2->rx_stats.syncp);
- dev->last_rx = jiffies;
+ sky2->last_rx = jiffies;
sky2_rx_update(netdev_priv(dev), rxqaddr[port]);
}
@@ -2953,7 +2953,7 @@ static int sky2_rx_hung(struct net_device *dev)
u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL));
/* If idle and MAC or PCI is stuck */
- if (sky2->check.last == dev->last_rx &&
+ if (sky2->check.last == sky2->last_rx &&
((mac_rp == sky2->check.mac_rp &&
mac_lev != 0 && mac_lev >= sky2->check.mac_lev) ||
/* Check if the PCI RX hang */
@@ -2965,7 +2965,7 @@ static int sky2_rx_hung(struct net_device *dev)
fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP)));
return 1;
} else {
- sky2->check.last = dev->last_rx;
+ sky2->check.last = sky2->last_rx;
sky2->check.mac_rp = mac_rp;
sky2->check.mac_lev = mac_lev;
sky2->check.fifo_rp = fifo_rp;
@@ -3589,47 +3589,59 @@ static u32 sky2_supported_modes(const struct sky2_hw *hw)
| SUPPORTED_1000baseT_Full;
}
-static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int sky2_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
+ u32 supported, advertising;
- ecmd->transceiver = XCVR_INTERNAL;
- ecmd->supported = sky2_supported_modes(hw);
- ecmd->phy_address = PHY_ADDR_MARV;
+ supported = sky2_supported_modes(hw);
+ cmd->base.phy_address = PHY_ADDR_MARV;
if (sky2_is_copper(hw)) {
- ecmd->port = PORT_TP;
- ethtool_cmd_speed_set(ecmd, sky2->speed);
- ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP;
+ cmd->base.port = PORT_TP;
+ cmd->base.speed = sky2->speed;
+ supported |= SUPPORTED_Autoneg | SUPPORTED_TP;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
- ecmd->port = PORT_FIBRE;
- ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE;
+ cmd->base.speed = SPEED_1000;
+ cmd->base.port = PORT_FIBRE;
+ supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE;
}
- ecmd->advertising = sky2->advertising;
- ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED)
+ advertising = sky2->advertising;
+ cmd->base.autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED)
? AUTONEG_ENABLE : AUTONEG_DISABLE;
- ecmd->duplex = sky2->duplex;
+ cmd->base.duplex = sky2->duplex;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
-static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int sky2_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct sky2_port *sky2 = netdev_priv(dev);
const struct sky2_hw *hw = sky2->hw;
u32 supported = sky2_supported_modes(hw);
+ u32 new_advertising;
- if (ecmd->autoneg == AUTONEG_ENABLE) {
- if (ecmd->advertising & ~supported)
+ ethtool_convert_link_mode_to_legacy_u32(&new_advertising,
+ cmd->link_modes.advertising);
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ if (new_advertising & ~supported)
return -EINVAL;
if (sky2_is_copper(hw))
- sky2->advertising = ecmd->advertising |
+ sky2->advertising = new_advertising |
ADVERTISED_TP |
ADVERTISED_Autoneg;
else
- sky2->advertising = ecmd->advertising |
+ sky2->advertising = new_advertising |
ADVERTISED_FIBRE |
ADVERTISED_Autoneg;
@@ -3638,30 +3650,30 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
sky2->speed = -1;
} else {
u32 setting;
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
switch (speed) {
case SPEED_1000:
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
setting = SUPPORTED_1000baseT_Full;
- else if (ecmd->duplex == DUPLEX_HALF)
+ else if (cmd->base.duplex == DUPLEX_HALF)
setting = SUPPORTED_1000baseT_Half;
else
return -EINVAL;
break;
case SPEED_100:
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
setting = SUPPORTED_100baseT_Full;
- else if (ecmd->duplex == DUPLEX_HALF)
+ else if (cmd->base.duplex == DUPLEX_HALF)
setting = SUPPORTED_100baseT_Half;
else
return -EINVAL;
break;
case SPEED_10:
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
setting = SUPPORTED_10baseT_Full;
- else if (ecmd->duplex == DUPLEX_HALF)
+ else if (cmd->base.duplex == DUPLEX_HALF)
setting = SUPPORTED_10baseT_Half;
else
return -EINVAL;
@@ -3674,7 +3686,7 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return -EINVAL;
sky2->speed = speed;
- sky2->duplex = ecmd->duplex;
+ sky2->duplex = cmd->base.duplex;
sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
}
@@ -3888,8 +3900,8 @@ static void sky2_set_multicast(struct net_device *dev)
gma_write16(hw, port, GM_RX_CTRL, reg);
}
-static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static void sky2_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
@@ -3929,8 +3941,6 @@ static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev,
stats->rx_dropped = dev->stats.rx_dropped;
stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
stats->tx_fifo_errors = dev->stats.tx_fifo_errors;
-
- return stats;
}
/* Can have one global because blinking is controlled by
@@ -4407,8 +4417,6 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features)
}
static const struct ethtool_ops sky2_ethtool_ops = {
- .get_settings = sky2_get_settings,
- .set_settings = sky2_set_settings,
.get_drvinfo = sky2_get_drvinfo,
.get_wol = sky2_get_wol,
.set_wol = sky2_set_wol,
@@ -4431,6 +4439,8 @@ static const struct ethtool_ops sky2_ethtool_ops = {
.set_phys_id = sky2_set_phys_id,
.get_sset_count = sky2_get_sset_count,
.get_ethtool_stats = sky2_get_ethtool_stats,
+ .get_link_ksettings = sky2_get_link_ksettings,
+ .set_link_ksettings = sky2_set_link_ksettings,
};
#ifdef CONFIG_SKY2_DEBUG
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index ec6dcd80152b..0fe160796842 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -2247,6 +2247,7 @@ struct sky2_port {
u16 rx_data_size;
u16 rx_nfrags;
+ unsigned long last_rx;
struct {
unsigned long last;
u32 mac_rp;