summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/hisilicon
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-06 18:39:49 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-06 18:39:49 -0700
commit1c8c5a9d38f607c0b6fd12c91cbe1a4418762a21 (patch)
treedcc97181d4d187252e0cc8fdf29d9b365fa3ffd0 /drivers/net/ethernet/hisilicon
parent285767604576148fc1be7fcd112e4a90eb0d6ad2 (diff)
parent7170e6045a6a8b33f4fa5753589dc77b16198e2d (diff)
downloadlinux-0-day-1c8c5a9d38f607c0b6fd12c91cbe1a4418762a21.tar.gz
linux-0-day-1c8c5a9d38f607c0b6fd12c91cbe1a4418762a21.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Add Maglev hashing scheduler to IPVS, from Inju Song. 2) Lots of new TC subsystem tests from Roman Mashak. 3) Add TCP zero copy receive and fix delayed acks and autotuning with SO_RCVLOWAT, from Eric Dumazet. 4) Add XDP_REDIRECT support to mlx5 driver, from Jesper Dangaard Brouer. 5) Add ttl inherit support to vxlan, from Hangbin Liu. 6) Properly separate ipv6 routes into their logically independant components. fib6_info for the routing table, and fib6_nh for sets of nexthops, which thus can be shared. From David Ahern. 7) Add bpf_xdp_adjust_tail helper, which can be used to generate ICMP messages from XDP programs. From Nikita V. Shirokov. 8) Lots of long overdue cleanups to the r8169 driver, from Heiner Kallweit. 9) Add BTF ("BPF Type Format"), from Martin KaFai Lau. 10) Add traffic condition monitoring to iwlwifi, from Luca Coelho. 11) Plumb extack down into fib_rules, from Roopa Prabhu. 12) Add Flower classifier offload support to igb, from Vinicius Costa Gomes. 13) Add UDP GSO support, from Willem de Bruijn. 14) Add documentation for eBPF helpers, from Quentin Monnet. 15) Add TLS tx offload to mlx5, from Ilya Lesokhin. 16) Allow applications to be given the number of bytes available to read on a socket via a control message returned from recvmsg(), from Soheil Hassas Yeganeh. 17) Add x86_32 eBPF JIT compiler, from Wang YanQing. 18) Add AF_XDP sockets, with zerocopy support infrastructure as well. From Björn Töpel. 19) Remove indirect load support from all of the BPF JITs and handle these operations in the verifier by translating them into native BPF instead. From Daniel Borkmann. 20) Add GRO support to ipv6 gre tunnels, from Eran Ben Elisha. 21) Allow XDP programs to do lookups in the main kernel routing tables for forwarding. From David Ahern. 22) Allow drivers to store hardware state into an ELF section of kernel dump vmcore files, and use it in cxgb4. From Rahul Lakkireddy. 23) Various RACK and loss detection improvements in TCP, from Yuchung Cheng. 24) Add TCP SACK compression, from Eric Dumazet. 25) Add User Mode Helper support and basic bpfilter infrastructure, from Alexei Starovoitov. 26) Support ports and protocol values in RTM_GETROUTE, from Roopa Prabhu. 27) Support bulking in ->ndo_xdp_xmit() API, from Jesper Dangaard Brouer. 28) Add lots of forwarding selftests, from Petr Machata. 29) Add generic network device failover driver, from Sridhar Samudrala. * ra.kernel.org:/pub/scm/linux/kernel/git/davem/net-next: (1959 commits) strparser: Add __strp_unpause and use it in ktls. rxrpc: Fix terminal retransmission connection ID to include the channel net: hns3: Optimize PF CMDQ interrupt switching process net: hns3: Fix for VF mailbox receiving unknown message net: hns3: Fix for VF mailbox cannot receiving PF response bnx2x: use the right constant Revert "net: sched: cls: Fix offloading when ingress dev is vxlan" net: dsa: b53: Fix for brcm tag issue in Cygnus SoC enic: fix UDP rss bits netdev-FAQ: clarify DaveM's position for stable backports rtnetlink: validate attributes in do_setlink() mlxsw: Add extack messages for port_{un, }split failures netdevsim: Add extack error message for devlink reload devlink: Add extack to reload and port_{un, }split operations net: metrics: add proper netlink validation ipmr: fix error path when ipmr_new_table fails ip6mr: only set ip6mr_table from setsockopt when ip6mr_new_table succeeds net: hns3: remove unused hclgevf_cfg_func_mta_filter netfilter: provide udp*_lib_lookup for nf_tproxy qed*: Utilize FW 8.37.2.0 ...
Diffstat (limited to 'drivers/net/ethernet/hisilicon')
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c88
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c565
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c29
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h22
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c694
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h43
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c98
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c50
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c190
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c23
18 files changed, 1207 insertions, 689 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index e0bc79ea3d880..85e1d14514fc8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1648,6 +1648,15 @@ int hns_dsaf_rm_mac_addr(
mac_entry->addr);
}
+static void hns_dsaf_setup_mc_mask(struct dsaf_device *dsaf_dev,
+ u8 port_num, u8 *mask, u8 *addr)
+{
+ if (MAC_IS_BROADCAST(addr))
+ memset(mask, 0xff, ETH_ALEN);
+ else
+ memcpy(mask, dsaf_dev->mac_cb[port_num]->mc_mask, ETH_ALEN);
+}
+
static void hns_dsaf_mc_mask_bit_clear(char *dst, const char *src)
{
u16 *a = (u16 *)dst;
@@ -1676,7 +1685,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
struct dsaf_drv_tbl_tcam_key tmp_mac_key;
struct dsaf_tbl_tcam_data tcam_data;
u8 mc_addr[ETH_ALEN];
- u8 *mc_mask;
int mskid;
/*chechk mac addr */
@@ -1687,9 +1695,12 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
}
ether_addr_copy(mc_addr, mac_entry->addr);
- mc_mask = dsaf_dev->mac_cb[mac_entry->in_port_num]->mc_mask;
if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ u8 mc_mask[ETH_ALEN];
+
/* prepare for key data setting */
+ hns_dsaf_setup_mc_mask(dsaf_dev, mac_entry->in_port_num,
+ mc_mask, mac_entry->addr);
hns_dsaf_mc_mask_bit_clear(mc_addr, mc_mask);
/* config key mask */
@@ -1844,7 +1855,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
struct dsaf_drv_tbl_tcam_key mask_key, tmp_mac_key;
struct dsaf_tbl_tcam_data *pmask_key = NULL;
u8 mc_addr[ETH_ALEN];
- u8 *mc_mask;
if (!(void *)mac_entry) {
dev_err(dsaf_dev->dev,
@@ -1861,14 +1871,17 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
/* always mask vlan_id field */
ether_addr_copy(mc_addr, mac_entry->addr);
- mc_mask = dsaf_dev->mac_cb[mac_entry->in_port_num]->mc_mask;
if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ u8 mc_mask[ETH_ALEN];
+
/* prepare for key data setting */
+ hns_dsaf_setup_mc_mask(dsaf_dev, mac_entry->in_port_num,
+ mc_mask, mac_entry->addr);
hns_dsaf_mc_mask_bit_clear(mc_addr, mc_mask);
/* config key mask */
- hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_addr);
+ hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask);
mask_key.high.val = le32_to_cpu(mask_key.high.val);
mask_key.low.val = le32_to_cpu(mask_key.low.val);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 519e2bd6aa60e..be9dc08ccf678 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -47,6 +47,8 @@ enum hclge_mbx_mac_vlan_subcode {
HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */
HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */
HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */
+ HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, /* read func MTA type */
+ HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, /* update MTA status */
};
/* below are per-VF vlan cfg subcodes */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 02145f2de8203..9d79dad2c6aae 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -36,6 +36,49 @@ static bool hnae3_client_match(enum hnae3_client_type client_type,
return false;
}
+static void hnae3_set_client_init_flag(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev, int inited)
+{
+ switch (client->type) {
+ case HNAE3_CLIENT_KNIC:
+ hnae_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
+ break;
+ case HNAE3_CLIENT_UNIC:
+ hnae_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
+ break;
+ case HNAE3_CLIENT_ROCE:
+ hnae_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
+ break;
+ default:
+ break;
+ }
+}
+
+static int hnae3_get_client_init_flag(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ int inited = 0;
+
+ switch (client->type) {
+ case HNAE3_CLIENT_KNIC:
+ inited = hnae_get_bit(ae_dev->flag,
+ HNAE3_KNIC_CLIENT_INITED_B);
+ break;
+ case HNAE3_CLIENT_UNIC:
+ inited = hnae_get_bit(ae_dev->flag,
+ HNAE3_UNIC_CLIENT_INITED_B);
+ break;
+ case HNAE3_CLIENT_ROCE:
+ inited = hnae_get_bit(ae_dev->flag,
+ HNAE3_ROCE_CLIENT_INITED_B);
+ break;
+ default:
+ break;
+ }
+
+ return inited;
+}
+
static int hnae3_match_n_instantiate(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev, bool is_reg)
{
@@ -50,13 +93,22 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
/* now, (un-)instantiate client by calling lower layer */
if (is_reg) {
ret = ae_dev->ops->init_client_instance(client, ae_dev);
- if (ret)
+ if (ret) {
dev_err(&ae_dev->pdev->dev,
"fail to instantiate client\n");
- return ret;
+ return ret;
+ }
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+ return 0;
+ }
+
+ if (hnae3_get_client_init_flag(client, ae_dev)) {
+ ae_dev->ops->uninit_client_instance(client, ae_dev);
+
+ hnae3_set_client_init_flag(client, ae_dev, 0);
}
- ae_dev->ops->uninit_client_instance(client, ae_dev);
return 0;
}
@@ -89,7 +141,7 @@ int hnae3_register_client(struct hnae3_client *client)
exit:
mutex_unlock(&hnae3_common_lock);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(hnae3_register_client);
@@ -112,7 +164,7 @@ EXPORT_SYMBOL(hnae3_unregister_client);
* @ae_algo: AE algorithm
* NOTE: the duplicated name will not be checked
*/
-int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
+void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
{
const struct pci_device_id *id;
struct hnae3_ae_dev *ae_dev;
@@ -151,8 +203,6 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
}
mutex_unlock(&hnae3_common_lock);
-
- return ret;
}
EXPORT_SYMBOL(hnae3_register_ae_algo);
@@ -168,6 +218,9 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_dev */
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
+ if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ continue;
+
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
if (!id)
continue;
@@ -191,22 +244,14 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo);
* @ae_dev: the AE device
* NOTE: the duplicated name will not be checked
*/
-int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
+void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
{
const struct pci_device_id *id;
struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client;
- int ret = 0, lock_acquired;
+ int ret = 0;
- /* we can get deadlocked if SRIOV is being enabled in context to probe
- * and probe gets called again in same context. This can happen when
- * pci_enable_sriov() is called to create VFs from PF probes context.
- * Therefore, for simplicity uniformly defering further probing in all
- * cases where we detect contention.
- */
- lock_acquired = mutex_trylock(&hnae3_common_lock);
- if (!lock_acquired)
- return -EPROBE_DEFER;
+ mutex_lock(&hnae3_common_lock);
list_add_tail(&ae_dev->node, &hnae3_ae_dev_list);
@@ -220,7 +265,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
if (!ae_dev->ops) {
dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n");
- ret = -EOPNOTSUPP;
goto out_err;
}
@@ -247,8 +291,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
out_err:
mutex_unlock(&hnae3_common_lock);
-
- return ret;
}
EXPORT_SYMBOL(hnae3_register_ae_dev);
@@ -264,6 +306,9 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_algo */
list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
+ if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ continue;
+
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
if (!id)
continue;
@@ -283,3 +328,4 @@ EXPORT_SYMBOL(hnae3_unregister_ae_dev);
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("HNAE3(Hisilicon Network Acceleration Engine) Framework");
+MODULE_VERSION(HNAE3_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 37ec1b3286c6d..8acb1d116a028 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -36,6 +36,8 @@
#include <linux/pci.h>
#include <linux/types.h>
+#define HNAE3_MOD_VERSION "1.0"
+
/* Device IDs */
#define HNAE3_DEV_ID_GE 0xA220
#define HNAE3_DEV_ID_25GE 0xA221
@@ -52,6 +54,9 @@
#define HNAE3_DEV_INITED_B 0x0
#define HNAE3_DEV_SUPPORT_ROCE_B 0x1
#define HNAE3_DEV_SUPPORT_DCB_B 0x2
+#define HNAE3_KNIC_CLIENT_INITED_B 0x3
+#define HNAE3_UNIC_CLIENT_INITED_B 0x4
+#define HNAE3_ROCE_CLIENT_INITED_B 0x5
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
@@ -273,10 +278,6 @@ struct hnae3_ae_dev {
* Map rings to vector
* unmap_ring_from_vector()
* Unmap rings from vector
- * add_tunnel_udp()
- * Add tunnel information to hardware
- * del_tunnel_udp()
- * Delete tunnel information from hardware
* reset_queue()
* Reset queue
* get_fw_version()
@@ -315,7 +316,8 @@ struct hnae3_ae_ops {
int (*set_loopback)(struct hnae3_handle *handle,
enum hnae3_loop loop_mode, bool en);
- void (*set_promisc_mode)(struct hnae3_handle *handle, u32 en);
+ void (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc,
+ bool en_mc_pmc);
int (*set_mtu)(struct hnae3_handle *handle, int new_mtu);
void (*get_pauseparam)(struct hnae3_handle *handle,
@@ -351,6 +353,7 @@ struct hnae3_ae_ops {
const unsigned char *addr);
int (*rm_mc_addr)(struct hnae3_handle *handle,
const unsigned char *addr);
+ int (*update_mta_status)(struct hnae3_handle *handle);
void (*set_tso_stats)(struct hnae3_handle *handle, int enable);
void (*update_stats)(struct hnae3_handle *handle,
@@ -388,9 +391,6 @@ struct hnae3_ae_ops {
int vector_num,
struct hnae3_ring_chain_node *vr_chain);
- int (*add_tunnel_udp)(struct hnae3_handle *handle, u16 port_num);
- int (*del_tunnel_udp)(struct hnae3_handle *handle, u16 port_num);
-
void (*reset_queue)(struct hnae3_handle *handle, u16 queue_id);
u32 (*get_fw_version)(struct hnae3_handle *handle);
void (*get_mdix_mode)(struct hnae3_handle *handle,
@@ -521,11 +521,11 @@ struct hnae3_handle {
#define hnae_get_bit(origin, shift) \
hnae_get_field((origin), (0x1 << (shift)), (shift))
-int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
+void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
-int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
+void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
void hnae3_unregister_client(struct hnae3_client *client);
int hnae3_register_client(struct hnae3_client *client);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 8c55965a66ac3..f2b31d278bc9b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -25,6 +25,9 @@
#include "hnae3.h"
#include "hns3_enet.h"
+static void hns3_clear_all_ring(struct hnae3_handle *h);
+static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
+
static const char hns3_driver_name[] = "hns3";
const char hns3_driver_version[] = VERMAGIC_STRING;
static const char hns3_driver_string[] =
@@ -273,6 +276,10 @@ static int hns3_nic_net_up(struct net_device *netdev)
int i, j;
int ret;
+ ret = hns3_nic_reset_all_ring(h);
+ if (ret)
+ return ret;
+
/* get irq resource for all vectors */
ret = hns3_nic_init_irq(priv);
if (ret) {
@@ -333,17 +340,19 @@ static void hns3_nic_net_down(struct net_device *netdev)
if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return;
+ /* disable vectors */
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_vector_disable(&priv->tqp_vector[i]);
+
/* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops;
if (ops->stop)
ops->stop(priv->ae_handle);
- /* disable vectors */
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_disable(&priv->tqp_vector[i]);
-
/* free irq resources */
hns3_nic_uninit_irq(priv);
+
+ hns3_clear_all_ring(priv->ae_handle);
}
static int hns3_nic_net_stop(struct net_device *netdev)
@@ -406,15 +415,21 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev)
if (h->ae_algo->ops->set_promisc_mode) {
if (netdev->flags & IFF_PROMISC)
- h->ae_algo->ops->set_promisc_mode(h, 1);
+ h->ae_algo->ops->set_promisc_mode(h, true, true);
+ else if (netdev->flags & IFF_ALLMULTI)
+ h->ae_algo->ops->set_promisc_mode(h, false, true);
else
- h->ae_algo->ops->set_promisc_mode(h, 0);
+ h->ae_algo->ops->set_promisc_mode(h, false, false);
}
if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
netdev_err(netdev, "sync uc address fail\n");
- if (netdev->flags & IFF_MULTICAST)
+ if (netdev->flags & IFF_MULTICAST) {
if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
netdev_err(netdev, "sync mc address fail\n");
+
+ if (h->ae_algo->ops->update_mta_status)
+ h->ae_algo->ops->update_mta_status(h);
+ }
}
static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
@@ -502,7 +517,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
/* find outer header point */
l3.hdr = skb_network_header(skb);
- l4_hdr = skb_inner_transport_header(skb);
+ l4_hdr = skb_transport_header(skb);
if (skb->protocol == htons(ETH_P_IPV6)) {
exthdr = l3.hdr + sizeof(*l3.v6);
@@ -644,6 +659,32 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
}
}
+/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
+ * and it is udp packet, which has a dest port as the IANA assigned.
+ * the hardware is expected to do the checksum offload, but the
+ * hardware will not do the checksum offload when udp dest port is
+ * 4789.
+ */
+static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
+{
+#define IANA_VXLAN_PORT 4789
+ union {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ struct gre_base_hdr *gre;
+ unsigned char *hdr;
+ } l4;
+
+ l4.hdr = skb_transport_header(skb);
+
+ if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
+ return false;
+
+ skb_checksum_help(skb);
+
+ return true;
+}
+
static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
u8 il4_proto, u32 *type_cs_vlan_tso,
u32 *ol_type_vlan_len_msec)
@@ -732,6 +773,9 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
HNS3_L4T_TCP);
break;
case IPPROTO_UDP:
+ if (hns3_tunnel_csum_bug(skb))
+ break;
+
hnae_set_field(*type_cs_vlan_tso,
HNS3_TXD_L4T_M,
HNS3_TXD_L4T_S,
@@ -1121,6 +1165,12 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
return -EADDRNOTAVAIL;
+ if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
+ netdev_info(netdev, "already using mac address %pM\n",
+ mac_addr->sa_data);
+ return 0;
+ }
+
ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
if (ret) {
netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
@@ -1244,93 +1294,6 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
stats->tx_compressed = netdev->stats.tx_compressed;
}
-static void hns3_add_tunnel_port(struct net_device *netdev, u16 port,
- enum hns3_udp_tnl_type type)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
- struct hnae3_handle *h = priv->ae_handle;
-
- if (udp_tnl->used && udp_tnl->dst_port == port) {
- udp_tnl->used++;
- return;
- }
-
- if (udp_tnl->used) {
- netdev_warn(netdev,
- "UDP tunnel [%d], port [%d] offload\n", type, port);
- return;
- }
-
- udp_tnl->dst_port = port;
- udp_tnl->used = 1;
- /* TBD send command to hardware to add port */
- if (h->ae_algo->ops->add_tunnel_udp)
- h->ae_algo->ops->add_tunnel_udp(h, port);
-}
-
-static void hns3_del_tunnel_port(struct net_device *netdev, u16 port,
- enum hns3_udp_tnl_type type)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
- struct hnae3_handle *h = priv->ae_handle;
-
- if (!udp_tnl->used || udp_tnl->dst_port != port) {
- netdev_warn(netdev,
- "Invalid UDP tunnel port %d\n", port);
- return;
- }
-
- udp_tnl->used--;
- if (udp_tnl->used)
- return;
-
- udp_tnl->dst_port = 0;
- /* TBD send command to hardware to del port */
- if (h->ae_algo->ops->del_tunnel_udp)
- h->ae_algo->ops->del_tunnel_udp(h, port);
-}
-
-/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
- * @netdev: This physical ports's netdev
- * @ti: Tunnel information
- */
-static void hns3_nic_udp_tunnel_add(struct net_device *netdev,
- struct udp_tunnel_info *ti)
-{
- u16 port_n = ntohs(ti->port);
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
- break;
- default:
- netdev_err(netdev, "unsupported tunnel type %d\n", ti->type);
- break;
- }
-}
-
-static void hns3_nic_udp_tunnel_del(struct net_device *netdev,
- struct udp_tunnel_info *ti)
-{
- u16 port_n = ntohs(ti->port);
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
- break;
- default:
- break;
- }
-}
-
static int hns3_setup_tc(struct net_device *netdev, void *type_data)
{
struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
@@ -1569,13 +1532,50 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_get_stats64 = hns3_nic_get_stats64,
.ndo_setup_tc = hns3_nic_setup_tc,
.ndo_set_rx_mode = hns3_nic_set_rx_mode,
- .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add,
- .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del,
.ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
.ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
};
+static bool hns3_is_phys_func(struct pci_dev *pdev)
+{
+ u32 dev_id = pdev->device;
+
+ switch (dev_id) {
+ case HNAE3_DEV_ID_GE:
+ case HNAE3_DEV_ID_25GE:
+ case HNAE3_DEV_ID_25GE_RDMA:
+ case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
+ case HNAE3_DEV_ID_50GE_RDMA:
+ case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
+ case HNAE3_DEV_ID_100G_RDMA_MACSEC:
+ return true;
+ case HNAE3_DEV_ID_100G_VF:
+ case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
+ return false;
+ default:
+ dev_warn(&pdev->dev, "un-recognized pci device-id %d",
+ dev_id);
+ }
+
+ return false;
+}
+
+static void hns3_disable_sriov(struct pci_dev *pdev)
+{
+ /* If our VFs are assigned we cannot shut down SR-IOV
+ * without causing issues, so just leave the hardware
+ * available but disabled
+ */
+ if (pci_vfs_assigned(pdev)) {
+ dev_warn(&pdev->dev,
+ "disabling driver while VFs are assigned\n");
+ return;
+ }
+
+ pci_disable_sriov(pdev);
+}
+
/* hns3_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in hns3_pci_tbl
@@ -1603,7 +1603,9 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ae_dev->dev_type = HNAE3_DEV_KNIC;
pci_set_drvdata(pdev, ae_dev);
- return hnae3_register_ae_dev(ae_dev);
+ hnae3_register_ae_dev(ae_dev);
+
+ return 0;
}
/* hns3_remove - Device removal routine
@@ -1613,21 +1615,56 @@ static void hns3_remove(struct pci_dev *pdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
+ hns3_disable_sriov(pdev);
+
hnae3_unregister_ae_dev(ae_dev);
}
+/**
+ * hns3_pci_sriov_configure
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of VFs to allocate
+ *
+ * Enable or change the number of VFs. Called when the user updates the number
+ * of VFs in sysfs.
+ **/
+static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ int ret;
+
+ if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
+ dev_warn(&pdev->dev, "Can not config SRIOV\n");
+ return -EINVAL;
+ }
+
+ if (num_vfs) {
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret)
+ dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
+ else
+ return num_vfs;
+ } else if (!pci_vfs_assigned(pdev)) {
+ pci_disable_sriov(pdev);
+ } else {
+ dev_warn(&pdev->dev,
+ "Unable to free VFs because some are assigned to VMs.\n");
+ }
+
+ return 0;
+}
+
static struct pci_driver hns3_driver = {
.name = hns3_driver_name,
.id_table = hns3_pci_tbl,
.probe = hns3_probe,
.remove = hns3_remove,
+ .sriov_configure = hns3_pci_sriov_configure,
};
/* set default feature to hns3 */
static void hns3_set_default_feature(struct net_device *netdev)
{
- struct hnae3_handle *h = hns3_get_handle(netdev);
-
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -1656,15 +1693,11 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
- if (!(h->flags & HNAE3_SUPPORT_VF))
- netdev->hw_features |=
- NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
@@ -1836,6 +1869,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring->desc_cb[i] = *res_cb;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
+ ring->desc[i].rx.bd_base_info = 0;
}
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
@@ -1843,6 +1877,7 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
ring->desc_cb[i].reuse_flag = 0;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
+ ring->desc_cb[i].page_offset);
+ ring->desc[i].rx.bd_base_info = 0;
}
static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
@@ -1971,106 +2006,6 @@ hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
}
-/* hns3_nic_get_headlen - determine size of header for LRO/GRO
- * @data: pointer to the start of the headers
- * @max: total length of section to find headers in
- *
- * This function is meant to determine the length of headers that will
- * be recognized by hardware for LRO, GRO, and RSC offloads. The main
- * motivation of doing this is to only perform one pull for IPv4 TCP
- * packets so that we can do basic things like calculating the gso_size
- * based on the average data per packet.
- */
-static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag,
- unsigned int max_size)
-{
- unsigned char *network;
- u8 hlen;
-
- /* This should never happen, but better safe than sorry */
- if (max_size < ETH_HLEN)
- return max_size;
-
- /* Initialize network frame pointer */
- network = data;
-
- /* Set first protocol and move network header forward */
- network += ETH_HLEN;
-
- /* Handle any vlan tag if present */
- if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S)
- == HNS3_RX_FLAG_VLAN_PRESENT) {
- if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
- return max_size;
-
- network += VLAN_HLEN;
- }
-
- /* Handle L3 protocols */
- if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
- == HNS3_RX_FLAG_L3ID_IPV4) {
- if ((typeof(max_size))(network - data) >
- (max_size - sizeof(struct iphdr)))
- return max_size;
-
- /* Access ihl as a u8 to avoid unaligned access on ia64 */
- hlen = (network[0] & 0x0F) << 2;
-
- /* Verify hlen meets minimum size requirements */
- if (hlen < sizeof(struct iphdr))
- return network - data;
-
- /* Record next protocol if header is present */
- } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
- == HNS3_RX_FLAG_L3ID_IPV6) {
- if ((typeof(max_size))(network - data) >
- (max_size - sizeof(struct ipv6hdr)))
- return max_size;
-
- /* Record next protocol */
- hlen = sizeof(struct ipv6hdr);
- } else {
- return network - data;
- }
-
- /* Relocate pointer to start of L4 header */
- network += hlen;
-
- /* Finally sort out TCP/UDP */
- if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
- == HNS3_RX_FLAG_L4ID_TCP) {
- if ((typeof(max_size))(network - data) >
- (max_size - sizeof(struct tcphdr)))
- return max_size;
-
- /* Access doff as a u8 to avoid unaligned access on ia64 */
- hlen = (network[12] & 0xF0) >> 2;
-
- /* Verify hlen meets minimum size requirements */
- if (hlen < sizeof(struct tcphdr))
- return network - data;
-
- network += hlen;
- } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
- == HNS3_RX_FLAG_L4ID_UDP) {
- if ((typeof(max_size))(network - data) >
- (max_size - sizeof(struct udphdr)))
- return max_size;
-
- network += sizeof(struct udphdr);
- }
-
- /* If everything has gone correctly network should be the
- * data section of the packet and will be the end of the header.
- * If not then it probably represents the end of the last recognized
- * header.
- */
- if ((typeof(max_size))(network - data) < max_size)
- return network - data;
- else
- return max_size;
-}
-
static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb)
@@ -2183,6 +2118,39 @@ static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
napi_gro_receive(&ring->tqp_vector->napi, skb);
}
+static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
+ struct hns3_desc *desc, u32 l234info)
+{
+ struct pci_dev *pdev = ring->tqp->handle->pdev;
+ u16 vlan_tag;
+
+ if (pdev->revision == 0x20) {
+ vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ if (!(vlan_tag & VLAN_VID_MASK))
+ vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+
+ return vlan_tag;
+ }
+
+#define HNS3_STRP_OUTER_VLAN 0x1
+#define HNS3_STRP_INNER_VLAN 0x2
+
+ switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
+ HNS3_RXD_STRP_TAGP_S)) {
+ case HNS3_STRP_OUTER_VLAN:
+ vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ break;
+ case HNS3_STRP_INNER_VLAN:
+ vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+ break;
+ default:
+ vlan_tag = 0;
+ break;
+ }
+
+ return vlan_tag;
+}
+
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
struct sk_buff **out_skb, int *out_bnum)
{
@@ -2202,9 +2170,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
prefetch(desc);
- length = le16_to_cpu(desc->rx.pkt_len);
+ length = le16_to_cpu(desc->rx.size);
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
- l234info = le32_to_cpu(desc->rx.l234_info);
/* Check valid BD */
if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
@@ -2238,22 +2205,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
prefetchw(skb->data);
- /* Based on hw strategy, the tag offloaded will be stored at
- * ot_vlan_tag in two layer tag case, and stored at vlan_tag
- * in one layer tag case.
- */
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
- u16 vlan_tag;
-
- vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
- if (!(vlan_tag & VLAN_VID_MASK))
- vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
- if (vlan_tag & VLAN_VID_MASK)
- __vlan_hwaccel_put_tag(skb,
- htons(ETH_P_8021Q),
- vlan_tag);
- }
-
bnum = 1;
if (length <= HNS3_RX_HEAD_SIZE) {
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
@@ -2270,8 +2221,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ring->stats.seg_pkt_cnt++;
u64_stats_update_end(&ring->syncp);
- pull_len = hns3_nic_get_headlen(va, l234info,
- HNS3_RX_HEAD_SIZE);
+ pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
+
memcpy(__skb_put(skb, pull_len), va,
ALIGN(pull_len, sizeof(long)));
@@ -2290,6 +2241,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
*out_bnum = bnum;
+ l234info = le32_to_cpu(desc->rx.l234_info);
+
+ /* Based on hw strategy, the tag offloaded will be stored at
+ * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+ * in one layer tag case.
+ */
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ u16 vlan_tag;
+
+ vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info);
+ if (vlan_tag & VLAN_VID_MASK)
+ __vlan_hwaccel_put_tag(skb,
+ htons(ETH_P_8021Q),
+ vlan_tag);
+ }
+
if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
((u64 *)desc)[0], ((u64 *)desc)[1]);
@@ -3022,8 +2989,6 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
goto out_when_alloc_ring_memory;
}
- hns3_init_ring_hw(priv->ring_data[i].ring);
-
u64_stats_init(&priv->ring_data[i].ring->syncp);
}
@@ -3052,13 +3017,13 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
}
/* Set mac addr if it is configured. or leave it to the AE driver */
-static void hns3_init_mac_addr(struct net_device *netdev)
+static void hns3_init_mac_addr(struct net_device *netdev, bool init)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
u8 mac_addr_temp[ETH_ALEN];
- if (h->ae_algo->ops->get_mac_addr) {
+ if (h->ae_algo->ops->get_mac_addr && init) {
h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
ether_addr_copy(netdev->dev_addr, mac_addr_temp);
}
@@ -3075,6 +3040,15 @@ static void hns3_init_mac_addr(struct net_device *netdev)
}
+static void hns3_uninit_mac_addr(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+
+ if (h->ae_algo->ops->rm_uc_addr)
+ h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr);
+}
+
static void hns3_nic_set_priv_ops(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -3112,7 +3086,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
handle->kinfo.netdev = netdev;
handle->priv = (void *)priv;
- hns3_init_mac_addr(netdev);
+ hns3_init_mac_addr(netdev, true);
hns3_set_default_feature(netdev);
@@ -3185,6 +3159,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev);
+ hns3_force_clear_all_rx_ring(handle);
+
ret = hns3_nic_uninit_vector_data(priv);
if (ret)
netdev_err(netdev, "uninit vector error\n");
@@ -3201,6 +3177,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
priv->ring_data = NULL;
+ hns3_uninit_mac_addr(netdev);
+
free_netdev(netdev);
}
@@ -3298,9 +3276,76 @@ static void hns3_recover_hw_addr(struct net_device *ndev)
hns3_nic_mc_sync(ndev, ha->addr);
}
-static void hns3_drop_skb_data(struct hns3_enet_ring *ring, struct sk_buff *skb)
+static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
{
- dev_kfree_skb_any(skb);
+ while (ring->next_to_clean != ring->next_to_use) {
+ ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
+ hns3_free_buffer_detach(ring, ring->next_to_clean);
+ ring_ptr_move_fw(ring, next_to_clean);
+ }
+}
+
+static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
+{
+ struct hns3_desc_cb res_cbs;
+ int ret;
+
+ while (ring->next_to_use != ring->next_to_clean) {
+ /* When a buffer is not reused, it's memory has been
+ * freed in hns3_handle_rx_bd or will be freed by
+ * stack, so we need to replace the buffer here.
+ */
+ if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
+ ret = hns3_reserve_buffer_map(ring, &res_cbs);
+ if (ret) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+ /* if alloc new buffer fail, exit directly
+ * and reclear in up flow.
+ */
+ netdev_warn(ring->tqp->handle->kinfo.netdev,
+ "reserve buffer map failed, ret = %d\n",
+ ret);
+ return ret;
+ }
+ hns3_replace_buffer(ring, ring->next_to_use,
+ &res_cbs);
+ }
+ ring_ptr_move_fw(ring, next_to_use);
+ }
+
+ return 0;
+}
+
+static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
+{
+ while (ring->next_to_use != ring->next_to_clean) {
+ /* When a buffer is not reused, it's memory has been
+ * freed in hns3_handle_rx_bd or will be freed by
+ * stack, so only need to unmap the buffer here.
+ */
+ if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
+ hns3_unmap_buffer(ring,
+ &ring->desc_cb[ring->next_to_use]);
+ ring->desc_cb[ring->next_to_use].dma = 0;
+ }
+
+ ring_ptr_move_fw(ring, next_to_use);
+ }
+}
+
+static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
+{
+ struct net_device *ndev = h->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hns3_enet_ring *ring;
+ u32 i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
+ hns3_force_clear_rx_ring(ring);
+ }
}
static void hns3_clear_all_ring(struct hnae3_handle *h)
@@ -3314,14 +3359,55 @@ static void hns3_clear_all_ring(struct hnae3_handle *h)
struct hns3_enet_ring *ring;
ring = priv->ring_data[i].ring;
- hns3_clean_tx_ring(ring, ring->desc_num);
+ hns3_clear_tx_ring(ring);
dev_queue = netdev_get_tx_queue(ndev,
priv->ring_data[i].queue_index);
netdev_tx_reset_queue(dev_queue);
ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
- hns3_clean_rx_ring(ring, ring->desc_num, hns3_drop_skb_data);
+ /* Continue to clear other rings even if clearing some
+ * rings failed.
+ */
+ hns3_clear_rx_ring(ring);
+ }
+}
+
+int hns3_nic_reset_all_ring(struct hnae3_handle *h)
+{
+ struct net_device *ndev = h->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hns3_enet_ring *rx_ring;
+ int i, j;
+ int ret;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ h->ae_algo->ops->reset_queue(h, i);
+ hns3_init_ring_hw(priv->ring_data[i].ring);
+
+ /* We need to clear tx ring here because self test will
+ * use the ring and will not run down before up
+ */
+ hns3_clear_tx_ring(priv->ring_data[i].ring);
+ priv->ring_data[i].ring->next_to_clean = 0;
+ priv->ring_data[i].ring->next_to_use = 0;
+
+ rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
+ hns3_init_ring_hw(rx_ring);
+ ret = hns3_clear_rx_ring(rx_ring);
+ if (ret)
+ return ret;
+
+ /* We can not know the hardware head and tail when this
+ * function is called in reset flow, so we reuse all desc.
+ */
+ for (j = 0; j < rx_ring->desc_num; j++)
+ hns3_reuse_buffer(rx_ring, j);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
}
+
+ return 0;
}
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
@@ -3359,7 +3445,7 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
- hns3_init_mac_addr(netdev);
+ hns3_init_mac_addr(netdev, false);
hns3_nic_set_rx_mode(netdev);
hns3_recover_hw_addr(netdev);
@@ -3393,7 +3479,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
- hns3_clear_all_ring(handle);
+ hns3_force_clear_all_rx_ring(handle);
ret = hns3_nic_uninit_vector_data(priv);
if (ret) {
@@ -3409,6 +3495,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
priv->ring_data = NULL;
+ hns3_uninit_mac_addr(netdev);
+
return ret;
}
@@ -3529,8 +3617,6 @@ int hns3_set_channels(struct net_device *netdev,
if (if_running)
hns3_nic_net_stop(netdev);
- hns3_clear_all_ring(h);
-
ret = hns3_nic_uninit_vector_data(priv);
if (ret) {
dev_err(&netdev->dev,
@@ -3600,6 +3686,8 @@ static int __init hns3_init_module(void)
client.ops = &client_ops;
+ INIT_LIST_HEAD(&client.node);
+
ret = hnae3_register_client(&client);
if (ret)
return ret;
@@ -3627,3 +3715,4 @@ MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
MODULE_LICENSE("GPL");
MODULE_ALIAS("pci:hns-nic");
+MODULE_VERSION(HNS3_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 98cdbd3a1163d..3b083d5ae9ce2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -14,6 +14,8 @@
#include "hnae3.h"
+#define HNS3_MOD_VERSION "1.0"
+
extern const char hns3_driver_version[];
enum hns3_nic_state {
@@ -102,6 +104,9 @@ enum hns3_nic_state {
#define HNS3_RXD_L4ID_S 8
#define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S)
#define HNS3_RXD_FRAG_B 12
+#define HNS3_RXD_STRP_TAGP_S 13
+#define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S)
+
#define HNS3_RXD_L2E_B 16
#define HNS3_RXD_L3E_B 17
#define HNS3_RXD_L4E_B 18
@@ -620,6 +625,7 @@ int hns3_set_channels(struct net_device *netdev,
bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
int hns3_init_all_ring(struct hns3_nic_priv *priv);
int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
+int hns3_nic_reset_all_ring(struct hnae3_handle *h);
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
int hns3_clean_rx_ring(
struct hns3_enet_ring *ring, int budget,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index eb3c34f3cf87b..40c0425b4023b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -74,7 +74,7 @@ struct hns3_link_mode_mapping {
u32 ethtool_link_mode;
};
-static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop)
+static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
int ret;
@@ -85,11 +85,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop)
switch (loop) {
case HNAE3_MAC_INTER_LOOP_MAC:
- ret = h->ae_algo->ops->set_loopback(h, loop, true);
- break;
- case HNAE3_MAC_LOOP_NONE:
- ret = h->ae_algo->ops->set_loopback(h,
- HNAE3_MAC_INTER_LOOP_MAC, false);
+ ret = h->ae_algo->ops->set_loopback(h, loop, en);
break;
default:
ret = -ENOTSUPP;
@@ -99,10 +95,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop)
if (ret)
return ret;
- if (loop == HNAE3_MAC_LOOP_NONE)
- h->ae_algo->ops->set_promisc_mode(h, ndev->flags & IFF_PROMISC);
- else
- h->ae_algo->ops->set_promisc_mode(h, 1);
+ h->ae_algo->ops->set_promisc_mode(h, en, en);
return ret;
}
@@ -115,6 +108,10 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode)
if (!h->ae_algo->ops->start)
return -EOPNOTSUPP;
+ ret = hns3_nic_reset_all_ring(h);
+ if (ret)
+ return ret;
+
ret = h->ae_algo->ops->start(h);
if (ret) {
netdev_err(ndev,
@@ -122,13 +119,13 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode)
return ret;
}
- ret = hns3_lp_setup(ndev, loop_mode);
+ ret = hns3_lp_setup(ndev, loop_mode, true);
usleep_range(10000, 20000);
return ret;
}
-static int hns3_lp_down(struct net_device *ndev)
+static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
int ret;
@@ -136,7 +133,7 @@ static int hns3_lp_down(struct net_device *ndev)
if (!h->ae_algo->ops->stop)
return -EOPNOTSUPP;
- ret = hns3_lp_setup(ndev, HNAE3_MAC_LOOP_NONE);
+ ret = hns3_lp_setup(ndev, loop_mode, false);
if (ret) {
netdev_err(ndev, "lb_setup return error: %d\n", ret);
return ret;
@@ -332,7 +329,7 @@ static void hns3_self_test(struct net_device *ndev,
data[test_index] = hns3_lp_up(ndev, loop_type);
if (!data[test_index]) {
data[test_index] = hns3_lp_run_test(ndev, loop_type);
- hns3_lp_down(ndev);
+ hns3_lp_down(ndev, loop_type);
}
if (data[test_index])
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index ff13d1876d9ef..c36d64710fa69 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -31,6 +31,17 @@ static int hclge_ring_space(struct hclge_cmq_ring *ring)
return ring->desc_num - used - 1;
}
+static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int h)
+{
+ int u = ring->next_to_use;
+ int c = ring->next_to_clean;
+
+ if (unlikely(h >= ring->desc_num))
+ return 0;
+
+ return u > c ? (h > c && h <= u) : (h > c || h <= u);
+}
+
static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclge_desc);
@@ -141,6 +152,7 @@ static void hclge_cmd_init_regs(struct hclge_hw *hw)
static int hclge_cmd_csq_clean(struct hclge_hw *hw)
{
+ struct hclge_dev *hdev = (struct hclge_dev *)hw->back;
struct hclge_cmq_ring *csq = &hw->cmq.csq;
u16 ntc = csq->next_to_clean;
struct hclge_desc *desc;
@@ -149,6 +161,13 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw)
desc = &csq->desc[ntc];
head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
+ rmb(); /* Make sure head is ready before touch any data */
+
+ if (!is_valid_csq_clean_head(csq, head)) {
+ dev_warn(&hdev->pdev->dev, "wrong head (%d, %d-%d)\n", head,
+ csq->next_to_use, csq->next_to_clean);
+ return 0;
+ }
while (head != ntc) {
memset(desc, 0, sizeof(*desc));
@@ -171,7 +190,11 @@ static int hclge_cmd_csq_done(struct hclge_hw *hw)
static bool hclge_is_special_opcode(u16 opcode)
{
- u16 spec_opcode[3] = {0x0030, 0x0031, 0x0032};
+ /* these commands have several descriptors,
+ * and use the first one to save opcode and return value
+ */
+ u16 spec_opcode[3] = {HCLGE_OPC_STATS_64_BIT,
+ HCLGE_OPC_STATS_32_BIT, HCLGE_OPC_STATS_MAC};
int i;
for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
@@ -362,9 +385,9 @@ int hclge_cmd_init(struct hclge_dev *hdev)
static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
{
- spin_lock_bh(&ring->lock);
+ spin_lock(&ring->lock);
hclge_free_cmd_desc(ring);
- spin_unlock_bh(&ring->lock);
+ spin_unlock(&ring->lock);
}
void hclge_destroy_cmd_queue(struct hclge_hw *hw)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index ee3cbac6dfaa8..d9aaa76c76eb4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -115,7 +115,6 @@ enum hclge_opcode_type {
HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
- HCLGE_OPC_STATS_MAC_TRAFFIC = 0x0314,
/* MACSEC command */
/* PFC/Pause CMD*/
@@ -484,6 +483,8 @@ struct hclge_promisc_param {
u8 enable;
};
+#define HCLGE_PROMISC_TX_EN_B BIT(4)
+#define HCLGE_PROMISC_RX_EN_B BIT(5)
#define HCLGE_PROMISC_EN_B 1
#define HCLGE_PROMISC_EN_ALL 0x7
#define HCLGE_PROMISC_EN_UC 0x1
@@ -704,11 +705,14 @@ struct hclge_vlan_filter_vf_cfg_cmd {
u8 vf_bitmap[16];
};
-#define HCLGE_ACCEPT_TAG_B 0
-#define HCLGE_ACCEPT_UNTAG_B 1
+#define HCLGE_ACCEPT_TAG1_B 0
+#define HCLGE_ACCEPT_UNTAG1_B 1
#define HCLGE_PORT_INS_TAG1_EN_B 2
#define HCLGE_PORT_INS_TAG2_EN_B 3
#define HCLGE_CFG_NIC_ROCE_SEL_B 4
+#define HCLGE_ACCEPT_TAG2_B 5
+#define HCLGE_ACCEPT_UNTAG2_B 6
+
struct hclge_vport_vtag_tx_cfg_cmd {
u8 vport_vlan_cfg;
u8 vf_offset;
@@ -813,21 +817,13 @@ struct hclge_reset_cmd {
#define HCLGE_NIC_CMQ_DESC_NUM 1024
#define HCLGE_NIC_CMQ_DESC_NUM_S 3
-#define HCLGE_LED_PORT_SPEED_STATE_S 0
-#define HCLGE_LED_PORT_SPEED_STATE_M GENMASK(5, 0)
-#define HCLGE_LED_ACTIVITY_STATE_S 0
-#define HCLGE_LED_ACTIVITY_STATE_M GENMASK(1, 0)
-#define HCLGE_LED_LINK_STATE_S 0
-#define HCLGE_LED_LINK_STATE_M GENMASK(1, 0)
#define HCLGE_LED_LOCATE_STATE_S 0
#define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0)
struct hclge_set_led_state_cmd {
- u8 port_speed_led_config;
- u8 link_led_config;
- u8 activity_led_config;
+ u8 rsv1[3];
u8 locate_led_config;
- u8 rsv[20];
+ u8 rsv2[20];
};
int hclge_cmd_init(struct hclge_dev *hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 2066dd7344444..d318d35e598fd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -39,7 +39,6 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
-static int hclge_update_led_status(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo;
@@ -304,8 +303,6 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
{"mac_tx_4096_8191_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
- {"mac_tx_8192_12287_oct_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_12287_oct_pkt_num)},
{"mac_tx_8192_9216_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
{"mac_tx_9217_12287_oct_pkt_num",
@@ -356,8 +353,6 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
{"mac_rx_4096_8191_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
- {"mac_rx_8192_12287_oct_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_12287_oct_pkt_num)},
{"mac_rx_8192_9216_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
{"mac_rx_9217_12287_oct_pkt_num",
@@ -508,38 +503,6 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
return 0;
}
-static int hclge_mac_get_traffic_stats(struct hclge_dev *hdev)
-{
- struct hclge_mac_stats *mac_stats = &hdev->hw_stats.mac_stats;
- struct hclge_desc desc;
- __le64 *desc_data;
- int ret;
-
- /* for fiber port, need to query the total rx/tx packets statstics,
- * used for data transferring checking.
- */
- if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
- return 0;
-
- if (test_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
- return 0;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_STATS_MAC_TRAFFIC, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get MAC total pkt stats fail, ret = %d\n", ret);
-
- return ret;
- }
-
- desc_data = (__le64 *)(&desc.data[0]);
- mac_stats->mac_tx_total_pkt_num += le64_to_cpu(*desc_data++);
- mac_stats->mac_rx_total_pkt_num += le64_to_cpu(*desc_data);
-
- return 0;
-}
-
static int hclge_mac_update_stats(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
@@ -1459,8 +1422,11 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
/* We need to alloc a vport for main NIC of PF */
num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
- if (hdev->num_tqps < num_vport)
- num_vport = hdev->num_tqps;
+ if (hdev->num_tqps < num_vport) {
+ dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
+ hdev->num_tqps, num_vport);
+ return -EINVAL;
+ }
/* Alloc the same number of TQPs for every vport */
tqp_per_vport = hdev->num_tqps / num_vport;
@@ -1474,21 +1440,8 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
hdev->vport = vport;
hdev->num_alloc_vport = num_vport;
-#ifdef CONFIG_PCI_IOV
- /* Enable SRIOV */
- if (hdev->num_req_vfs) {
- dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n",
- hdev->num_req_vfs);
- ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs);
- if (ret) {
- hdev->num_alloc_vfs = 0;
- dev_err(&pdev->dev, "SRIOV enable failed %d\n",
- ret);
- return ret;
- }
- }
- hdev->num_alloc_vfs = hdev->num_req_vfs;
-#endif
+ if (IS_ENABLED(CONFIG_PCI_IOV))
+ hdev->num_alloc_vfs = hdev->num_req_vfs;
for (i = 0; i < num_vport; i++) {
vport->back = hdev;
@@ -2335,8 +2288,10 @@ static int hclge_mac_init(struct hclge_dev *hdev)
struct net_device *netdev = handle->kinfo.netdev;
struct hclge_mac *mac = &hdev->hw.mac;
u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct hclge_vport *vport;
int mtu;
int ret;
+ int i;
ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
if (ret) {
@@ -2348,7 +2303,6 @@ static int hclge_mac_init(struct hclge_dev *hdev)
mac->link = 0;
/* Initialize the MTA table work mode */
- hdev->accept_mta_mc = true;
hdev->enable_mta = true;
hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
@@ -2361,11 +2315,17 @@ static int hclge_mac_init(struct hclge_dev *hdev)
return ret;
}
- ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "set mta filter mode fail ret=%d\n", ret);
- return ret;
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ vport->accept_mta_mc = false;
+
+ memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow));
+ ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set mta filter mode fail ret=%d\n", ret);
+ return ret;
+ }
}
ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
@@ -2597,6 +2557,15 @@ static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
}
}
+static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
+{
+ hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
+ BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
+ BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
+ BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
+ hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
+}
+
static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
{
writel(enable ? 1 : 0, vector->addr);
@@ -2627,16 +2596,18 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
* mbx messages reported by this interrupt.
*/
hclge_mbx_task_schedule(hdev);
-
+ break;
default:
- dev_dbg(&hdev->pdev->dev,
- "received unknown or unhandled event of vector0\n");
+ dev_warn(&hdev->pdev->dev,
+ "received unknown or unhandled event of vector0\n");
break;
}
- /* we should clear the source of interrupt */
- hclge_clear_event_cause(hdev, event_cause, clearval);
- hclge_enable_vector(&hdev->misc_vector, true);
+ /* clear the source of interrupt if it is not cause by reset */
+ if (event_cause != HCLGE_VECTOR0_EVENT_RST) {
+ hclge_clear_event_cause(hdev, event_cause, clearval);
+ hclge_enable_vector(&hdev->misc_vector, true);
+ }
return IRQ_HANDLED;
}
@@ -2824,6 +2795,33 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
return rst_level;
}
+static void hclge_clear_reset_cause(struct hclge_dev *hdev)
+{
+ u32 clearval = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_IMP_RESET:
+ clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+ break;
+ case HNAE3_GLOBAL_RESET:
+ clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
+ break;
+ case HNAE3_CORE_RESET:
+ clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
+ break;
+ default:
+ dev_warn(&hdev->pdev->dev, "Unsupported reset event to clear:%d",
+ hdev->reset_type);
+ break;
+ }
+
+ if (!clearval)
+ return;
+
+ hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
+ hclge_enable_vector(&hdev->misc_vector, true);
+}
+
static void hclge_reset(struct hclge_dev *hdev)
{
/* perform reset of the stack & ae device for a client */
@@ -2836,6 +2834,8 @@ static void hclge_reset(struct hclge_dev *hdev)
hclge_reset_ae_dev(hdev->ae_dev);
hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
rtnl_unlock();
+
+ hclge_clear_reset_cause(hdev);
} else {
/* schedule again to check pending resets later */
set_bit(hdev->reset_type, &hdev->reset_pending);
@@ -2930,38 +2930,16 @@ static void hclge_service_task(struct work_struct *work)
struct hclge_dev *hdev =
container_of(work, struct hclge_dev, service_task);
- /* The total rx/tx packets statstics are wanted to be updated
- * per second. Both hclge_update_stats_for_all() and
- * hclge_mac_get_traffic_stats() can do it.
- */
if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
hclge_update_stats_for_all(hdev);
hdev->hw_stats.stats_timer = 0;
- } else {
- hclge_mac_get_traffic_stats(hdev);
}
hclge_update_speed_duplex(hdev);
hclge_update_link_status(hdev);
- hclge_update_led_status(hdev);
hclge_service_complete(hdev);
}
-static void hclge_disable_sriov(struct hclge_dev *hdev)
-{
- /* If our VFs are assigned we cannot shut down SR-IOV
- * without causing issues, so just leave the hardware
- * available but disabled
- */
- if (pci_vfs_assigned(hdev->pdev)) {
- dev_warn(&hdev->pdev->dev,
- "disabling driver while VFs are assigned\n");
- return;
- }
-
- pci_disable_sriov(hdev->pdev);
-}
-
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
{
/* VF handle has no client */
@@ -3615,7 +3593,14 @@ int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
req = (struct hclge_promisc_cfg_cmd *)desc.data;
req->vf_id = param->vf_id;
- req->flag = (param->enable << HCLGE_PROMISC_EN_B);
+
+ /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
+ * pdev revision(0x20), new revision support them. The
+ * value of this two fields will not return error when driver
+ * send command to fireware in revision(0x20).
+ */
+ req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
+ HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -3642,13 +3627,15 @@ void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
param->vf_id = vport_id;
}
-static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en)
+static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
+ bool en_mc_pmc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct hclge_promisc_param param;
- hclge_promisc_param_init(&param, en, en, true, vport->vport_id);
+ hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, true,
+ vport->vport_id);
hclge_cmd_set_promisc_mode(hdev, &param);
}
@@ -3683,48 +3670,50 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
"mac enable fail, ret =%d.\n", ret);
}
-static int hclge_set_loopback(struct hnae3_handle *handle,
- enum hnae3_loop loop_mode, bool en)
+static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_config_mac_mode_cmd *req;
- struct hclge_dev *hdev = vport->back;
struct hclge_desc desc;
u32 loop_en;
int ret;
- switch (loop_mode) {
- case HNAE3_MAC_INTER_LOOP_MAC:
- req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
- /* 1 Read out the MAC mode config at first */
- hclge_cmd_setup_basic_desc(&desc,
- HCLGE_OPC_CONFIG_MAC_MODE,
- true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "mac loopback get fail, ret =%d.\n",
- ret);
- return ret;
- }
+ req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
+ /* 1 Read out the MAC mode config at first */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "mac loopback get fail, ret =%d.\n", ret);
+ return ret;
+ }
- /* 2 Then setup the loopback flag */
- loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
- if (en)
- hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 1);
- else
- hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
+ /* 2 Then setup the loopback flag */
+ loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
+ hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
- req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
+ req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
- /* 3 Config mac work mode with loopback flag
- * and its original configure parameters
- */
- hclge_cmd_reuse_desc(&desc, false);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "mac loopback set fail, ret =%d.\n", ret);
+ /* 3 Config mac work mode with loopback flag
+ * and its original configure parameters
+ */
+ hclge_cmd_reuse_desc(&desc, false);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "mac loopback set fail, ret =%d.\n", ret);
+ return ret;
+}
+
+static int hclge_set_loopback(struct hnae3_handle *handle,
+ enum hnae3_loop loop_mode, bool en)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ switch (loop_mode) {
+ case HNAE3_MAC_INTER_LOOP_MAC:
+ ret = hclge_set_mac_loopback(hdev, en);
break;
default:
ret = -ENOTSUPP;
@@ -3783,13 +3772,11 @@ static int hclge_ae_start(struct hnae3_handle *handle)
hclge_cfg_mac_mode(hdev, true);
clear_bit(HCLGE_STATE_DOWN, &hdev->state);
mod_timer(&hdev->service_timer, jiffies + HZ);
+ hdev->hw.mac.link = 0;
/* reset tqp stats */
hclge_reset_tqp_stats(handle);
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
- return 0;
-
ret = hclge_mac_start_phy(hdev);
if (ret)
return ret;
@@ -3805,9 +3792,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
del_timer_sync(&hdev->service_timer);
cancel_work_sync(&hdev->service_task);
+ clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+ hclge_mac_stop_phy(hdev);
return;
+ }
for (i = 0; i < vport->alloc_tqps; i++)
hclge_tqp_enable(hdev, i, 0, false);
@@ -3819,6 +3809,8 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
/* reset tqp stats */
hclge_reset_tqp_stats(handle);
+ del_timer_sync(&hdev->service_timer);
+ cancel_work_sync(&hdev->service_task);
hclge_update_link_status(hdev);
}
@@ -4029,9 +4021,88 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport,
return ret;
}
+ if (enable)
+ set_bit(idx, vport->mta_shadow);
+ else
+ clear_bit(idx, vport->mta_shadow);
+
return 0;
}
+static int hclge_update_mta_status(struct hnae3_handle *handle)
+{
+ unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct net_device *netdev = handle->kinfo.netdev;
+ struct netdev_hw_addr *ha;
+ u16 tbl_idx;
+
+ memset(mta_status, 0, sizeof(mta_status));
+
+ /* update mta_status from mc addr list */
+ netdev_for_each_mc_addr(ha, netdev) {
+ tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr);
+ set_bit(tbl_idx, mta_status);
+ }
+
+ return hclge_update_mta_status_common(vport, mta_status,
+ 0, HCLGE_MTA_TBL_SIZE, true);
+}
+
+int hclge_update_mta_status_common(struct hclge_vport *vport,
+ unsigned long *status,
+ u16 idx,
+ u16 count,
+ bool update_filter)
+{
+ struct hclge_dev *hdev = vport->back;
+ u16 update_max = idx + count;
+ u16 check_max;
+ int ret = 0;
+ bool used;
+ u16 i;
+
+ /* setup mta check range */
+ if (update_filter) {
+ i = 0;
+ check_max = HCLGE_MTA_TBL_SIZE;
+ } else {
+ i = idx;
+ check_max = update_max;
+ }
+
+ used = false;
+ /* check and update all mta item */
+ for (; i < check_max; i++) {
+ /* ignore unused item */
+ if (!test_bit(i, vport->mta_shadow))
+ continue;
+
+ /* if i in update range then update it */
+ if (i >= idx && i < update_max)
+ if (!test_bit(i - idx, status))
+ hclge_set_mta_table_item(vport, i, false);
+
+ if (!used && test_bit(i, vport->mta_shadow))
+ used = true;
+ }
+
+ /* no longer use mta, disable it */
+ if (vport->accept_mta_mc && update_filter && !used) {
+ ret = hclge_cfg_func_mta_filter(hdev,
+ vport->vport_id,
+ false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "disable func mta filter fail ret=%d\n",
+ ret);
+ else
+ vport->accept_mta_mc = false;
+ }
+
+ return ret;
+}
+
static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
struct hclge_mac_vlan_tbl_entry_cmd *req)
{
@@ -4299,9 +4370,25 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
}
- /* Set MTA table for this MAC address */
- tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
- status = hclge_set_mta_table_item(vport, tbl_idx, true);
+ /* If mc mac vlan table is full, use MTA table */
+ if (status == -ENOSPC) {
+ if (!vport->accept_mta_mc) {
+ status = hclge_cfg_func_mta_filter(hdev,
+ vport->vport_id,
+ true);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "set mta filter mode fail ret=%d\n",
+ status);
+ return status;
+ }
+ vport->accept_mta_mc = true;
+ }
+
+ /* Set MTA table for this MAC address */
+ tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
+ status = hclge_set_mta_table_item(vport, tbl_idx, true);
+ }
return status;
}
@@ -4321,7 +4408,6 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
struct hclge_mac_vlan_tbl_entry_cmd req;
enum hclge_cmd_status status;
struct hclge_desc desc[3];
- u16 tbl_idx;
/* mac addr check */
if (!is_multicast_ether_addr(addr)) {
@@ -4350,17 +4436,15 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
} else {
- /* This mac addr do not exist, can't delete it */
- dev_err(&hdev->pdev->dev,
- "Rm multicast mac addr failed, ret = %d.\n",
- status);
- return -EIO;
+ /* Maybe this mac address is in mta table, but it cannot be
+ * deleted here because an entry of mta represents an address
+ * range rather than a specific address. the delete action to
+ * all entries will take effect in update_mta_status called by
+ * hns3_nic_set_rx_mode.
+ */
+ status = 0;
}
- /* Set MTB table for this MAC address */
- tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
- status = hclge_set_mta_table_item(vport, tbl_idx, false);
-
return status;
}
@@ -4540,8 +4624,9 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
}
-int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
- bool is_kill, u16 vlan, u8 qos, __be16 proto)
+static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
+ bool is_kill, u16 vlan, u8 qos,
+ __be16 proto)
{
#define HCLGE_MAX_VF_BYTES 16
struct hclge_vlan_filter_vf_cfg_cmd *req0;
@@ -4581,9 +4666,16 @@ int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
}
if (!is_kill) {
+#define HCLGE_VF_VLAN_NO_ENTRY 2
if (!req0->resp_code || req0->resp_code == 1)
return 0;
+ if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
+ dev_warn(&hdev->pdev->dev,
+ "vf vlan table is full, vf vlan filter is disabled\n");
+ return 0;
+ }
+
dev_err(&hdev->pdev->dev,
"Add vf vlan filter fail, ret =%d.\n",
req0->resp_code);
@@ -4599,12 +4691,9 @@ int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
return -EIO;
}
-static int hclge_set_port_vlan_filter(struct hnae3_handle *handle,
- __be16 proto, u16 vlan_id,
- bool is_kill)
+static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
+ u16 vlan_id, bool is_kill)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
struct hclge_vlan_filter_pf_cfg_cmd *req;
struct hclge_desc desc;
u8 vlan_offset_byte_val;
@@ -4624,22 +4713,66 @@ static int hclge_set_port_vlan_filter(struct hnae3_handle *handle,
req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "port vlan command, send fail, ret =%d.\n", ret);
+ return ret;
+}
+
+static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
+ u16 vport_id, u16 vlan_id, u8 qos,
+ bool is_kill)
+{
+ u16 vport_idx, vport_num = 0;
+ int ret;
+
+ ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
+ 0, proto);
if (ret) {
dev_err(&hdev->pdev->dev,
- "port vlan command, send fail, ret =%d.\n",
- ret);
+ "Set %d vport vlan filter config fail, ret =%d.\n",
+ vport_id, ret);
return ret;
}
- ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto);
- if (ret) {
+ /* vlan 0 may be added twice when 8021q module is enabled */
+ if (!is_kill && !vlan_id &&
+ test_bit(vport_id, hdev->vlan_table[vlan_id]))
+ return 0;
+
+ if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
dev_err(&hdev->pdev->dev,
- "Set pf vlan filter config fail, ret =%d.\n",
- ret);
- return -EIO;
+ "Add port vlan failed, vport %d is already in vlan %d\n",
+ vport_id, vlan_id);
+ return -EINVAL;
}
- return 0;
+ if (is_kill &&
+ !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
+ dev_err(&hdev->pdev->dev,
+ "Delete port vlan failed, vport %d is not in vlan %d\n",
+ vport_id, vlan_id);
+ return -EINVAL;
+ }
+
+ for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], VLAN_N_VID)
+ vport_num++;
+
+ if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
+ ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
+ is_kill);
+
+ return ret;
+}
+
+int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ u16 vlan_id, bool is_kill)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
+ 0, is_kill);
}
static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
@@ -4653,7 +4786,7 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
- return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
+ return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
}
static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
@@ -4669,10 +4802,14 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B,
- vcfg->accept_tag ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B,
- vcfg->accept_untag ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
+ vcfg->accept_tag1 ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
+ vcfg->accept_untag1 ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
+ vcfg->accept_tag2 ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
+ vcfg->accept_untag2 ? 1 : 0);
hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
vcfg->insert_tag1_en ? 1 : 0);
hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
@@ -4796,8 +4933,18 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
- vport->txvlan_cfg.accept_tag = true;
- vport->txvlan_cfg.accept_untag = true;
+ vport->txvlan_cfg.accept_tag1 = true;
+ vport->txvlan_cfg.accept_untag1 = true;
+
+ /* accept_tag2 and accept_untag2 are not supported on
+ * pdev revision(0x20), new revision support them. The
+ * value of this two fields will not return error when driver
+ * send command to fireware in revision(0x20).
+ * This two fields can not configured by user.
+ */
+ vport->txvlan_cfg.accept_tag2 = true;
+ vport->txvlan_cfg.accept_untag2 = true;
+
vport->txvlan_cfg.insert_tag1_en = false;
vport->txvlan_cfg.insert_tag2_en = false;
vport->txvlan_cfg.default_tag1 = 0;
@@ -4818,10 +4965,10 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
}
handle = &hdev->vport[0].nic;
- return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
+ return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
-static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -5166,12 +5313,6 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
struct phy_device *phydev = hdev->hw.mac.phydev;
u32 fc_autoneg;
- /* Only support flow control negotiation for netdev with
- * phy attached for now.
- */
- if (!phydev)
- return -EOPNOTSUPP;
-
fc_autoneg = hclge_get_autoneg(handle);
if (auto_neg != fc_autoneg) {
dev_info(&hdev->pdev->dev,
@@ -5190,6 +5331,12 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
if (!fc_autoneg)
return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
+ /* Only support flow control negotiation for netdev with
+ * phy attached for now.
+ */
+ if (!phydev)
+ return -EOPNOTSUPP;
+
return phy_start_aneg(phydev);
}
@@ -5282,7 +5429,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
vport->nic.client = client;
ret = client->ops->init_instance(&vport->nic);
if (ret)
- goto err;
+ return ret;
if (hdev->roce_client &&
hnae3_dev_roce_supported(hdev)) {
@@ -5290,11 +5437,11 @@ static int hclge_init_client_instance(struct hnae3_client *client,
ret = hclge_init_roce_base_info(vport);
if (ret)
- goto err;
+ return ret;
ret = rc->ops->init_instance(&vport->roce);
if (ret)
- goto err;
+ return ret;
}
break;
@@ -5304,7 +5451,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
ret = client->ops->init_instance(&vport->nic);
if (ret)
- goto err;
+ return ret;
break;
case HNAE3_CLIENT_ROCE:
@@ -5316,18 +5463,16 @@ static int hclge_init_client_instance(struct hnae3_client *client,
if (hdev->roce_client && hdev->nic_client) {
ret = hclge_init_roce_base_info(vport);
if (ret)
- goto err;
+ return ret;
ret = client->ops->init_instance(&vport->roce);
if (ret)
- goto err;
+ return ret;
}
}
}
return 0;
-err:
- return ret;
}
static void hclge_uninit_client_instance(struct hnae3_client *client,
@@ -5364,7 +5509,7 @@ static int hclge_pci_init(struct hclge_dev *hdev)
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "failed to enable PCI device\n");
- goto err_no_drvdata;
+ return ret;
}
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
@@ -5402,8 +5547,6 @@ err_clr_master:
pci_release_regions(pdev);
err_disable_device:
pci_disable_device(pdev);
-err_no_drvdata:
- pci_set_drvdata(pdev, NULL);
return ret;
}
@@ -5412,6 +5555,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
+ pcim_iounmap(pdev, hdev->hw.io_base);
pci_free_irq_vectors(pdev);
pci_clear_master(pdev);
pci_release_mem_regions(pdev);
@@ -5427,7 +5571,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
if (!hdev) {
ret = -ENOMEM;
- goto err_hclge_dev;
+ goto out;
}
hdev->pdev = pdev;
@@ -5440,38 +5584,38 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
ret = hclge_pci_init(hdev);
if (ret) {
dev_err(&pdev->dev, "PCI init failed\n");
- goto err_pci_init;
+ goto out;
}
/* Firmware command queue initialize */
ret = hclge_cmd_queue_init(hdev);
if (ret) {
dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
- return ret;
+ goto err_pci_uninit;
}
/* Firmware command initialize */
ret = hclge_cmd_init(hdev);
if (ret)
- goto err_cmd_init;
+ goto err_cmd_uninit;
ret = hclge_get_cap(hdev);
if (ret) {
dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
ret);
- return ret;
+ goto err_cmd_uninit;
}
ret = hclge_configure(hdev);
if (ret) {
dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
- return ret;
+ goto err_cmd_uninit;
}
ret = hclge_init_msi(hdev);
if (ret) {
dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
- return ret;
+ goto err_cmd_uninit;
}
ret = hclge_misc_irq_init(hdev);
@@ -5479,69 +5623,71 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
dev_err(&pdev->dev,
"Misc IRQ(vector0) init error, ret = %d.\n",
ret);
- return ret;
+ goto err_msi_uninit;
}
ret = hclge_alloc_tqps(hdev);
if (ret) {
dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
- return ret;
+ goto err_msi_irq_uninit;
}
ret = hclge_alloc_vport(hdev);
if (ret) {
dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
- return ret;
+ goto err_msi_irq_uninit;
}
ret = hclge_map_tqp(hdev);
if (ret) {
dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
- return ret;
+ goto err_msi_irq_uninit;
}
- ret = hclge_mac_mdio_config(hdev);
- if (ret) {
- dev_warn(&hdev->pdev->dev,
- "mdio config fail ret=%d\n", ret);
- return ret;
+ if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
+ ret = hclge_mac_mdio_config(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "mdio config fail ret=%d\n", ret);
+ goto err_msi_irq_uninit;
+ }
}
ret = hclge_mac_init(hdev);
if (ret) {
dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
- return ret;
+ goto err_mdiobus_unreg;
}
ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
if (ret) {
dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
- return ret;
+ goto err_mdiobus_unreg;
}
ret = hclge_init_vlan_config(hdev);
if (ret) {
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
- return ret;
+ goto err_mdiobus_unreg;
}
ret = hclge_tm_schd_init(hdev);
if (ret) {
dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
- return ret;
+ goto err_mdiobus_unreg;
}
hclge_rss_init_cfg(hdev);
ret = hclge_rss_init_hw(hdev);
if (ret) {
dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
- return ret;
+ goto err_mdiobus_unreg;
}
ret = init_mgr_tbl(hdev);
if (ret) {
dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
- return ret;
+ goto err_mdiobus_unreg;
}
hclge_dcb_ops_set(hdev);
@@ -5551,6 +5697,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
+ hclge_clear_all_event_cause(hdev);
+
/* Enable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, true);
@@ -5564,11 +5712,21 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
return 0;
-err_cmd_init:
+err_mdiobus_unreg:
+ if (hdev->hw.mac.phydev)
+ mdiobus_unregister(hdev->hw.mac.mdio_bus);
+err_msi_irq_uninit:
+ hclge_misc_irq_uninit(hdev);
+err_msi_uninit:
+ pci_free_irq_vectors(pdev);
+err_cmd_uninit:
+ hclge_destroy_cmd_queue(&hdev->hw);
+err_pci_uninit:
+ pcim_iounmap(pdev, hdev->hw.io_base);
+ pci_clear_master(pdev);
pci_release_regions(pdev);
-err_pci_init:
- pci_set_drvdata(pdev, NULL);
-err_hclge_dev:
+ pci_disable_device(pdev);
+out:
return ret;
}
@@ -5586,6 +5744,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
hclge_stats_clear(hdev);
+ memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
ret = hclge_cmd_init(hdev);
if (ret) {
@@ -5642,9 +5801,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
- /* Enable MISC vector(vector0) */
- hclge_enable_vector(&hdev->misc_vector, true);
-
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -5658,9 +5814,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
- if (IS_ENABLED(CONFIG_PCI_IOV))
- hclge_disable_sriov(hdev);
-
if (hdev->service_timer.function)
del_timer_sync(&hdev->service_timer);
if (hdev->service_task.func)
@@ -5675,6 +5828,8 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
+ synchronize_irq(hdev->misc_vector.vector_irq);
+
hclge_destroy_cmd_queue(&hdev->hw);
hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
@@ -5985,9 +6140,7 @@ static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
"Get 64 bit register failed, ret = %d.\n", ret);
}
-static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status,
- u8 act_led_status, u8 link_led_status,
- u8 locate_led_status)
+static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
{
struct hclge_set_led_state_cmd *req;
struct hclge_desc desc;
@@ -5996,12 +6149,6 @@ static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status,
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
req = (struct hclge_set_led_state_cmd *)desc.data;
- hnae_set_field(req->port_speed_led_config, HCLGE_LED_PORT_SPEED_STATE_M,
- HCLGE_LED_PORT_SPEED_STATE_S, speed_led_status);
- hnae_set_field(req->link_led_config, HCLGE_LED_ACTIVITY_STATE_M,
- HCLGE_LED_ACTIVITY_STATE_S, act_led_status);
- hnae_set_field(req->activity_led_config, HCLGE_LED_LINK_STATE_M,
- HCLGE_LED_LINK_STATE_S, link_led_status);
hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
HCLGE_LED_LOCATE_STATE_S, locate_led_status);
@@ -6022,105 +6169,17 @@ enum hclge_led_status {
static int hclge_set_led_id(struct hnae3_handle *handle,
enum ethtool_phys_id_state status)
{
-#define BLINK_FREQUENCY 2
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct phy_device *phydev = hdev->hw.mac.phydev;
- int ret = 0;
-
- if (phydev || hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
- return -EOPNOTSUPP;
switch (status) {
case ETHTOOL_ID_ACTIVE:
- ret = hclge_set_led_status_sfp(hdev,
- HCLGE_LED_NO_CHANGE,
- HCLGE_LED_NO_CHANGE,
- HCLGE_LED_NO_CHANGE,
- HCLGE_LED_ON);
- break;
+ return hclge_set_led_status(hdev, HCLGE_LED_ON);
case ETHTOOL_ID_INACTIVE:
- ret = hclge_set_led_status_sfp(hdev,
- HCLGE_LED_NO_CHANGE,
- HCLGE_LED_NO_CHANGE,
- HCLGE_LED_NO_CHANGE,
- HCLGE_LED_OFF);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-enum hclge_led_port_speed {
- HCLGE_SPEED_LED_FOR_1G,
- HCLGE_SPEED_LED_FOR_10G,
- HCLGE_SPEED_LED_FOR_25G,
- HCLGE_SPEED_LED_FOR_40G,
- HCLGE_SPEED_LED_FOR_50G,
- HCLGE_SPEED_LED_FOR_100G,
-};
-
-static u8 hclge_led_get_speed_status(u32 speed)
-{
- u8 speed_led;
-
- switch (speed) {
- case HCLGE_MAC_SPEED_1G:
- speed_led = HCLGE_SPEED_LED_FOR_1G;
- break;
- case HCLGE_MAC_SPEED_10G:
- speed_led = HCLGE_SPEED_LED_FOR_10G;
- break;
- case HCLGE_MAC_SPEED_25G:
- speed_led = HCLGE_SPEED_LED_FOR_25G;
- break;
- case HCLGE_MAC_SPEED_40G:
- speed_led = HCLGE_SPEED_LED_FOR_40G;
- break;
- case HCLGE_MAC_SPEED_50G:
- speed_led = HCLGE_SPEED_LED_FOR_50G;
- break;
- case HCLGE_MAC_SPEED_100G:
- speed_led = HCLGE_SPEED_LED_FOR_100G;
- break;
+ return hclge_set_led_status(hdev, HCLGE_LED_OFF);
default:
- speed_led = HCLGE_LED_NO_CHANGE;
+ return -EINVAL;
}
-
- return speed_led;
-}
-
-static int hclge_update_led_status(struct hclge_dev *hdev)
-{
- u8 port_speed_status, link_status, activity_status;
- u64 rx_pkts, tx_pkts;
-
- if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
- return 0;
-
- port_speed_status = hclge_led_get_speed_status(hdev->hw.mac.speed);
-
- rx_pkts = hdev->hw_stats.mac_stats.mac_rx_total_pkt_num;
- tx_pkts = hdev->hw_stats.mac_stats.mac_tx_total_pkt_num;
- if (rx_pkts != hdev->rx_pkts_for_led ||
- tx_pkts != hdev->tx_pkts_for_led)
- activity_status = HCLGE_LED_ON;
- else
- activity_status = HCLGE_LED_OFF;
- hdev->rx_pkts_for_led = rx_pkts;
- hdev->tx_pkts_for_led = tx_pkts;
-
- if (hdev->hw.mac.link)
- link_status = HCLGE_LED_ON;
- else
- link_status = HCLGE_LED_OFF;
-
- return hclge_set_led_status_sfp(hdev, port_speed_status,
- activity_status, link_status,
- HCLGE_LED_NO_CHANGE);
}
static void hclge_get_link_mode(struct hnae3_handle *handle,
@@ -6190,6 +6249,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.rm_uc_addr = hclge_rm_uc_addr,
.add_mc_addr = hclge_add_mc_addr,
.rm_mc_addr = hclge_rm_mc_addr,
+ .update_mta_status = hclge_update_mta_status,
.set_autoneg = hclge_set_autoneg,
.get_autoneg = hclge_get_autoneg,
.get_pauseparam = hclge_get_pauseparam,
@@ -6203,7 +6263,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fw_version = hclge_get_fw_version,
.get_mdix_mode = hclge_get_mdix_mode,
.enable_vlan_filter = hclge_enable_vlan_filter,
- .set_vlan_filter = hclge_set_port_vlan_filter,
+ .set_vlan_filter = hclge_set_vlan_filter,
.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
.reset_event = hclge_reset_event,
@@ -6228,7 +6288,9 @@ static int hclge_init(void)
{
pr_info("%s is initializing\n", HCLGE_NAME);
- return hnae3_register_ae_algo(&ae_algo);
+ hnae3_register_ae_algo(&ae_algo);
+
+ return 0;
}
static void hclge_exit(void)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 0f4157e712821..7488534528cdb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -12,10 +12,12 @@
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/phy.h>
+#include <linux/if_vlan.h>
+
#include "hclge_cmd.h"
#include "hnae3.h"
-#define HCLGE_MOD_VERSION "v1.0"
+#define HCLGE_MOD_VERSION "1.0"
#define HCLGE_DRIVER_NAME "hclge"
#define HCLGE_INVALID_VPORT 0xffff
@@ -59,6 +61,8 @@
#define HCLGE_RSS_TC_SIZE_6 64
#define HCLGE_RSS_TC_SIZE_7 128
+#define HCLGE_MTA_TBL_SIZE 4096
+
#define HCLGE_TQP_RESET_TRY_TIMES 10
#define HCLGE_PHY_PAGE_MDIX 0
@@ -406,9 +410,9 @@ struct hclge_mac_stats {
u64 mac_tx_1519_2047_oct_pkt_num;
u64 mac_tx_2048_4095_oct_pkt_num;
u64 mac_tx_4096_8191_oct_pkt_num;
- u64 mac_tx_8192_12287_oct_pkt_num; /* valid for GE MAC only */
- u64 mac_tx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
- u64 mac_tx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC */
+ u64 rsv0;
+ u64 mac_tx_8192_9216_oct_pkt_num;
+ u64 mac_tx_9217_12287_oct_pkt_num;
u64 mac_tx_12288_16383_oct_pkt_num;
u64 mac_tx_1519_max_good_oct_pkt_num;
u64 mac_tx_1519_max_bad_oct_pkt_num;
@@ -433,9 +437,9 @@ struct hclge_mac_stats {
u64 mac_rx_1519_2047_oct_pkt_num;
u64 mac_rx_2048_4095_oct_pkt_num;
u64 mac_rx_4096_8191_oct_pkt_num;
- u64 mac_rx_8192_12287_oct_pkt_num;/* valid for GE MAC only */
- u64 mac_rx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
- u64 mac_rx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC only */
+ u64 rsv1;
+ u64 mac_rx_8192_9216_oct_pkt_num;
+ u64 mac_rx_9217_12287_oct_pkt_num;
u64 mac_rx_12288_16383_oct_pkt_num;
u64 mac_rx_1519_max_good_oct_pkt_num;
u64 mac_rx_1519_max_bad_oct_pkt_num;
@@ -471,6 +475,7 @@ struct hclge_vlan_type_cfg {
u16 tx_in_vlan_type;
};
+#define HCLGE_VPORT_NUM 256
struct hclge_dev {
struct pci_dev *pdev;
struct hnae3_ae_dev *ae_dev;
@@ -556,18 +561,18 @@ struct hclge_dev {
enum hclge_mta_dmac_sel_type mta_mac_sel_type;
bool enable_mta; /* Mutilcast filter enable */
- bool accept_mta_mc; /* Whether accept mta filter multicast */
struct hclge_vlan_type_cfg vlan_type_cfg;
- u64 rx_pkts_for_led;
- u64 tx_pkts_for_led;
+ unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
};
/* VPort level vlan tag configuration for TX direction */
struct hclge_tx_vtag_cfg {
- bool accept_tag; /* Whether accept tagged packet from host */
- bool accept_untag; /* Whether accept untagged packet from host */
+ bool accept_tag1; /* Whether accept tag1 packet from host */
+ bool accept_untag1; /* Whether accept untag1 packet from host */
+ bool accept_tag2;
+ bool accept_untag2;
bool insert_tag1_en; /* Whether insert inner vlan tag */
bool insert_tag2_en; /* Whether insert outer vlan tag */
u16 default_tag1; /* The default inner vlan tag to insert */
@@ -616,6 +621,9 @@ struct hclge_vport {
struct hclge_dev *back; /* Back reference to associated dev */
struct hnae3_handle nic;
struct hnae3_handle roce;
+
+ bool accept_mta_mc; /* whether to accept mta filter multicast */
+ unsigned long mta_shadow[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
};
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
@@ -633,6 +641,12 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
u8 func_id,
bool enable);
+int hclge_update_mta_status_common(struct hclge_vport *vport,
+ unsigned long *status,
+ u16 idx,
+ u16 count,
+ bool update_filter);
+
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
int vector_id, bool en,
@@ -646,8 +660,9 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue)
}
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
-int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid,
- bool is_kill, u16 vlan, u8 qos, __be16 proto);
+int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ u16 vlan_id, bool is_kill);
+int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index a6f7ffa9c2597..7541cb9b71ce2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -190,11 +190,12 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *req)
{
- bool en = req->msg[1] ? true : false;
+ bool en_uc = req->msg[1] ? true : false;
+ bool en_mc = req->msg[2] ? true : false;
struct hclge_promisc_param param;
/* always enable broadcast promisc bit */
- hclge_promisc_param_init(&param, en, en, true, vport->vport_id);
+ hclge_promisc_param_init(&param, en_uc, en_mc, true, vport->vport_id);
return hclge_cmd_set_promisc_mode(vport->back, &param);
}
@@ -230,12 +231,51 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
return 0;
}
+static int hclge_set_vf_mc_mta_status(struct hclge_vport *vport,
+ u8 *msg, u8 idx, bool is_end)
+{
+#define HCLGE_MTA_STATUS_MSG_SIZE 13
+#define HCLGE_MTA_STATUS_MSG_BITS \
+ (HCLGE_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
+#define HCLGE_MTA_STATUS_MSG_END_BITS \
+ (HCLGE_MTA_TBL_SIZE % HCLGE_MTA_STATUS_MSG_BITS)
+ unsigned long status[BITS_TO_LONGS(HCLGE_MTA_STATUS_MSG_BITS)];
+ u16 tbl_cnt;
+ u16 tbl_idx;
+ u8 msg_ofs;
+ u8 msg_bit;
+
+ tbl_cnt = is_end ? HCLGE_MTA_STATUS_MSG_END_BITS :
+ HCLGE_MTA_STATUS_MSG_BITS;
+
+ /* set msg field */
+ msg_ofs = 0;
+ msg_bit = 0;
+ memset(status, 0, sizeof(status));
+ for (tbl_idx = 0; tbl_idx < tbl_cnt; tbl_idx++) {
+ if (msg[msg_ofs] & BIT(msg_bit))
+ set_bit(tbl_idx, status);
+
+ msg_bit++;
+ if (msg_bit == BITS_PER_BYTE) {
+ msg_bit = 0;
+ msg_ofs++;
+ }
+ }
+
+ return hclge_update_mta_status_common(vport,
+ status, idx * HCLGE_MTA_STATUS_MSG_BITS,
+ tbl_cnt, is_end);
+}
+
static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
{
const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
struct hclge_dev *hdev = vport->back;
+ u8 resp_len = 0;
+ u8 resp_data;
int status;
if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) {
@@ -247,6 +287,22 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
bool enable = mbx_req->msg[2];
status = hclge_cfg_func_mta_filter(hdev, func_id, enable);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ) {
+ resp_data = hdev->mta_mac_sel_type;
+ resp_len = sizeof(u8);
+ gen_resp = true;
+ status = 0;
+ } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE) {
+ /* mta status update msg format
+ * msg[2.6 : 2.0] msg index
+ * msg[2.7] msg is end
+ * msg[15 : 3] mta status bits[103 : 0]
+ */
+ bool is_end = (mbx_req->msg[2] & 0x80) ? true : false;
+
+ status = hclge_set_vf_mc_mta_status(vport, &mbx_req->msg[3],
+ mbx_req->msg[2] & 0x7F,
+ is_end);
} else {
dev_err(&hdev->pdev->dev,
"failed to set mcast mac addr, unknown subcode %d\n",
@@ -255,7 +311,8 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
}
if (gen_resp)
- hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+ hclge_gen_resp_to_vf(vport, mbx_req, status,
+ &resp_data, resp_len);
return 0;
}
@@ -264,19 +321,23 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
{
- struct hclge_dev *hdev = vport->back;
int status = 0;
if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) {
+ struct hnae3_handle *handle = &vport->nic;
u16 vlan, proto;
bool is_kill;
is_kill = !!mbx_req->msg[2];
memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan));
memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
- status = hclge_set_vf_vlan_common(hdev, vport->vport_id,
- is_kill, vlan, 0,
- cpu_to_be16(proto));
+ status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
+ vlan, is_kill);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) {
+ struct hnae3_handle *handle = &vport->nic;
+ bool en = mbx_req->msg[2] ? true : false;
+
+ status = hclge_en_hw_strip_rxvtag(handle, en);
}
if (gen_resp)
@@ -378,6 +439,13 @@ static void hclge_reset_vf(struct hclge_vport *vport,
hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid);
}
+static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
+{
+ u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
+
+ return tail == hw->cmq.crq.next_to_use;
+}
+
void hclge_mbx_handler(struct hclge_dev *hdev)
{
struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
@@ -386,12 +454,23 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
struct hclge_desc *desc;
int ret, flag;
- flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
/* handle all the mailbox requests in the queue */
- while (hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B)) {
+ while (!hclge_cmd_crq_empty(&hdev->hw)) {
desc = &crq->desc[crq->next_to_use];
req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
+ flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+ if (unlikely(!hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
+ dev_warn(&hdev->pdev->dev,
+ "dropped invalid mailbox message, code = %d\n",
+ req->msg[0]);
+
+ /* dropping/not processing this invalid message */
+ crq->desc[crq->next_to_use].flag = 0;
+ hclge_mbx_ring_ptr_move_crq(crq);
+ continue;
+ }
+
vport = &hdev->vport[req->mbx_src_vfid];
switch (req->msg[0]) {
@@ -466,7 +545,6 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
}
crq->desc[crq->next_to_use].flag = 0;
hclge_mbx_ring_ptr_move_crq(crq);
- flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
}
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 682c2d6618e7b..9f7932e423b5e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -140,8 +140,11 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev)
struct mii_bus *mdio_bus;
int ret;
- if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR)
- return 0;
+ if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) {
+ dev_err(&hdev->pdev->dev, "phy_addr(%d) is too large.\n",
+ hdev->hw.mac.phy_addr);
+ return -EINVAL;
+ }
mdio_bus = devm_mdiobus_alloc(&hdev->pdev->dev);
if (!mdio_bus)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 885f25cd7be49..262c125f81375 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -134,11 +134,8 @@ static int hclge_pfc_stats_get(struct hclge_dev *hdev,
}
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get pfc pause stats fail, ret = %d.\n", ret);
+ if (ret)
return ret;
- }
for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
struct hclge_pfc_stats_cmd *pfc_stats =
@@ -503,7 +500,8 @@ static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
-static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc)
+static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
+ u32 bit_map)
{
struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
struct hclge_desc desc;
@@ -514,9 +512,8 @@ static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc)
bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
bp_to_qs_map_cmd->tc_id = tc;
-
- /* Qset and tc is one by one mapping */
- bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(1 << tc);
+ bp_to_qs_map_cmd->qs_group_id = grp_id;
+ bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -1170,6 +1167,41 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
hdev->tm_info.hw_pfc_map);
}
+/* Each Tc has a 1024 queue sets to backpress, it divides to
+ * 32 group, each group contains 32 queue sets, which can be
+ * represented by u32 bitmap.
+ */
+static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
+{
+ struct hclge_vport *vport = hdev->vport;
+ u32 i, k, qs_bitmap;
+ int ret;
+
+ for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
+ qs_bitmap = 0;
+
+ for (k = 0; k < hdev->num_alloc_vport; k++) {
+ u16 qs_id = vport->qs_offset + tc;
+ u8 grp, sub_grp;
+
+ grp = hnae_get_field(qs_id, HCLGE_BP_GRP_ID_M,
+ HCLGE_BP_GRP_ID_S);
+ sub_grp = hnae_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
+ HCLGE_BP_SUB_GRP_ID_S);
+ if (i == grp)
+ qs_bitmap |= (1 << sub_grp);
+
+ vport++;
+ }
+
+ ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
{
bool tx_en, rx_en;
@@ -1221,7 +1253,7 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret);
for (i = 0; i < hdev->tm_info.num_tc; i++) {
- ret = hclge_tm_qs_bp_cfg(hdev, i);
+ ret = hclge_bp_setup_hw(hdev, i);
if (ret)
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 2dbe177581e98..c2b6e8a6700f0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -89,6 +89,11 @@ struct hclge_pg_shapping_cmd {
__le32 pg_shapping_para;
};
+#define HCLGE_BP_GRP_NUM 32
+#define HCLGE_BP_SUB_GRP_ID_S 0
+#define HCLGE_BP_SUB_GRP_ID_M GENMASK(4, 0)
+#define HCLGE_BP_GRP_ID_S 5
+#define HCLGE_BP_GRP_ID_M GENMASK(9, 5)
struct hclge_bp_to_qs_map_cmd {
u8 tc_id;
u8 rsvd[2];
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 2b8426412cc9a..a17872aab1681 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -654,7 +654,8 @@ static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
return 0;
}
-static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en)
+static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
+ bool en_uc_pmc, bool en_mc_pmc)
{
struct hclge_mbx_vf_to_pf_cmd *req;
struct hclgevf_desc desc;
@@ -664,7 +665,8 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en)
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
- req->msg[1] = en;
+ req->msg[1] = en_uc_pmc ? 1 : 0;
+ req->msg[2] = en_mc_pmc ? 1 : 0;
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status)
@@ -674,11 +676,12 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en)
return status;
}
-static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, u32 en)
+static void hclgevf_set_promisc_mode(struct hnae3_handle *handle,
+ bool en_uc_pmc, bool en_mc_pmc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- hclgevf_cmd_set_promisc_mode(hdev, en);
+ hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc);
}
static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
@@ -725,15 +728,124 @@ static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
}
}
-static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en)
+static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev)
{
+ u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX;
+ int ret;
+
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+ HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ,
+ NULL, 0, true, &resp_msg, sizeof(u8));
+
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Read mta type fail, ret=%d.\n", ret);
+ return ret;
+ }
+
+ if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) {
+ dev_err(&hdev->pdev->dev,
+ "Read mta type invalid, resp=%d.\n", resp_msg);
+ return -EINVAL;
+ }
+
+ hdev->mta_mac_sel_type = resp_msg;
+
+ return 0;
+}
+
+static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev,
+ const u8 *addr)
+{
+ u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type;
+ u16 high_val = addr[1] | (addr[0] << 8);
+
+ return (high_val >> rsh) & 0xfff;
+}
+
+static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev,
+ unsigned long *status)
+{
+#define HCLGEVF_MTA_STATUS_MSG_SIZE 13
+#define HCLGEVF_MTA_STATUS_MSG_BITS \
+ (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
+#define HCLGEVF_MTA_STATUS_MSG_END_BITS \
+ (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS)
+ u16 tbl_cnt;
+ u16 tbl_idx;
+ u8 msg_cnt;
+ u8 msg_idx;
+ int ret;
+
+ msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE,
+ HCLGEVF_MTA_STATUS_MSG_BITS);
+ tbl_idx = 0;
+ msg_idx = 0;
+ while (msg_cnt--) {
+ u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1];
+ u8 *p = &msg[1];
+ u8 msg_ofs;
+ u8 msg_bit;
+
+ memset(msg, 0, sizeof(msg));
+
+ /* set index field */
+ msg[0] = 0x7F & msg_idx;
+
+ /* set end flag field */
+ if (msg_cnt == 0) {
+ msg[0] |= 0x80;
+ tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS;
+ } else {
+ tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS;
+ }
+
+ /* set status field */
+ msg_ofs = 0;
+ msg_bit = 0;
+ while (tbl_cnt--) {
+ if (test_bit(tbl_idx, status))
+ p[msg_ofs] |= BIT(msg_bit);
+
+ tbl_idx++;
+
+ msg_bit++;
+ if (msg_bit == BITS_PER_BYTE) {
+ msg_bit = 0;
+ msg_ofs++;
+ }
+ }
+
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+ HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE,
+ msg, sizeof(msg), false, NULL, 0);
+ if (ret)
+ break;
+
+ msg_idx++;
+ }
+
+ return ret;
+}
+
+static int hclgevf_update_mta_status(struct hnae3_handle *handle)
+{
+ unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)];
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- u8 msg[2] = {0};
+ struct net_device *netdev = hdev->nic.kinfo.netdev;
+ struct netdev_hw_addr *ha;
+ u16 tbl_idx;
- msg[0] = en;
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
- HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE,
- msg, 1, false, NULL, 0);
+ /* clear status */
+ memset(mta_status, 0, sizeof(mta_status));
+
+ /* update status from mc addr list */
+ netdev_for_each_mc_addr(ha, netdev) {
+ tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr);
+ set_bit(tbl_idx, mta_status);
+ }
+
+ return hclgevf_do_update_mta_status(hdev, mta_status);
}
static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
@@ -830,6 +942,17 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
}
+static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg_data;
+
+ msg_data = enable ? 1 : 0;
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data,
+ 1, false, NULL, 0);
+}
+
static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -1323,6 +1446,7 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
hclgevf_reset_tqp_stats(handle);
del_timer_sync(&hdev->service_timer);
cancel_work_sync(&hdev->service_task);
+ clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
hclgevf_update_link_status(hdev, 0);
}
@@ -1441,6 +1565,8 @@ static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
return ret;
}
+ hclgevf_clear_event_cause(hdev, 0);
+
/* enable misc. vector(vector 0) */
hclgevf_enable_vector(&hdev->misc_vector, true);
@@ -1451,6 +1577,7 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
{
/* disable misc vector(vector 0) */
hclgevf_enable_vector(&hdev->misc_vector, false);
+ synchronize_irq(hdev->misc_vector.vector_irq);
free_irq(hdev->misc_vector.vector_irq, hdev);
hclgevf_free_vector(hdev, 0);
}
@@ -1489,10 +1616,12 @@ static int hclgevf_init_instance(struct hclgevf_dev *hdev,
return ret;
break;
case HNAE3_CLIENT_ROCE:
- hdev->roce_client = client;
- hdev->roce.client = client;
+ if (hnae3_dev_roce_supported(hdev)) {
+ hdev->roce_client = client;
+ hdev->roce.client = client;
+ }
- if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
+ if (hdev->roce_client && hdev->nic_client) {
ret = hclgevf_init_roce_base_info(hdev);
if (ret)
return ret;
@@ -1552,7 +1681,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "failed to enable PCI device\n");
- goto err_no_drvdata;
+ return ret;
}
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
@@ -1584,8 +1713,7 @@ err_clr_master:
pci_release_regions(pdev);
err_disable_device:
pci_disable_device(pdev);
-err_no_drvdata:
- pci_set_drvdata(pdev, NULL);
+
return ret;
}
@@ -1597,7 +1725,6 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
@@ -1625,6 +1752,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
hclgevf_state_init(hdev);
+ ret = hclgevf_cmd_init(hdev);
+ if (ret)
+ goto err_cmd_init;
+
ret = hclgevf_misc_irq_init(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
@@ -1632,10 +1763,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_misc_irq_init;
}
- ret = hclgevf_cmd_init(hdev);
- if (ret)
- goto err_cmd_init;
-
ret = hclgevf_configure(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
@@ -1654,12 +1781,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
- /* Initialize VF's MTA */
- hdev->accept_mta_mc = true;
- ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc);
+ /* Initialize mta type for this VF */
+ ret = hclgevf_cfg_func_mta_type(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
- "failed(%d) to set mta filter mode\n", ret);
+ "failed(%d) to initialize MTA type\n", ret);
goto err_config;
}
@@ -1683,10 +1809,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
return 0;
err_config:
- hclgevf_cmd_uninit(hdev);
-err_cmd_init:
hclgevf_misc_irq_uninit(hdev);
err_misc_irq_init:
+ hclgevf_cmd_uninit(hdev);
+err_cmd_init:
hclgevf_state_uninit(hdev);
hclgevf_uninit_msi(hdev);
err_irq_init:
@@ -1696,9 +1822,9 @@ err_irq_init:
static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
{
- hclgevf_cmd_uninit(hdev);
- hclgevf_misc_irq_uninit(hdev);
hclgevf_state_uninit(hdev);
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_cmd_uninit(hdev);
hclgevf_uninit_msi(hdev);
hclgevf_pci_uninit(hdev);
}
@@ -1814,6 +1940,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.rm_uc_addr = hclgevf_rm_uc_addr,
.add_mc_addr = hclgevf_add_mc_addr,
.rm_mc_addr = hclgevf_rm_mc_addr,
+ .update_mta_status = hclgevf_update_mta_status,
.get_stats = hclgevf_get_stats,
.update_stats = hclgevf_update_stats,
.get_strings = hclgevf_get_strings,
@@ -1825,6 +1952,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_tc_size = hclgevf_get_tc_size,
.get_fw_version = hclgevf_get_fw_version,
.set_vlan_filter = hclgevf_set_vlan_filter,
+ .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
.reset_event = hclgevf_reset_event,
.get_channels = hclgevf_get_channels,
.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
@@ -1842,7 +1970,9 @@ static int hclgevf_init(void)
{
pr_info("%s is initializing\n", HCLGEVF_NAME);
- return hnae3_register_ae_algo(&ae_algovf);
+ hnae3_register_ae_algo(&ae_algovf);
+
+ return 0;
}
static void hclgevf_exit(void)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index a477a7c36bbd3..0656e8e5c5f0a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -9,7 +9,7 @@
#include "hclgevf_cmd.h"
#include "hnae3.h"
-#define HCLGEVF_MOD_VERSION "v1.0"
+#define HCLGEVF_MOD_VERSION "1.0"
#define HCLGEVF_DRIVER_NAME "hclgevf"
#define HCLGEVF_ROCEE_VECTOR_NUM 0
@@ -48,6 +48,9 @@
#define HCLGEVF_RSS_CFG_TBL_NUM \
(HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
+#define HCLGEVF_MTA_TBL_SIZE 4096
+#define HCLGEVF_MTA_TYPE_SEL_MAX 4
+
/* states of hclgevf device & tasks */
enum hclgevf_states {
/* device states */
@@ -152,6 +155,7 @@ struct hclgevf_dev {
int *vector_irq;
bool accept_mta_mc; /* whether to accept mta filter multicast */
+ u8 mta_mac_sel_type;
bool mbx_event_pending;
struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index a286184283384..b598c06af8e09 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -126,6 +126,13 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
return status;
}
+static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
+{
+ u32 tail = hclgevf_read_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG);
+
+ return tail == hw->cmq.crq.next_to_use;
+}
+
void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
{
struct hclgevf_mbx_resp_status *resp;
@@ -140,11 +147,22 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
resp = &hdev->mbx_resp;
crq = &hdev->hw.cmq.crq;
- flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
- while (hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B)) {
+ while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
desc = &crq->desc[crq->next_to_use];
req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
+ flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+ if (unlikely(!hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
+ dev_warn(&hdev->pdev->dev,
+ "dropped invalid mailbox message, code = %d\n",
+ req->msg[0]);
+
+ /* dropping/not processing this invalid message */
+ crq->desc[crq->next_to_use].flag = 0;
+ hclge_mbx_ring_ptr_move_crq(crq);
+ continue;
+ }
+
/* synchronous messages are time critical and need preferential
* treatment. Therefore, we need to acknowledge all the sync
* responses as quickly as possible so that waiting tasks do not
@@ -205,7 +223,6 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
}
crq->desc[crq->next_to_use].flag = 0;
hclge_mbx_ring_ptr_move_crq(crq);
- flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
}
/* Write back CMDQ_RQ header pointer, M7 need this pointer */