summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/qlogic/qed/qed_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed/qed_main.c')
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c170
1 files changed, 160 insertions, 10 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 7870ae2a6f7e8..b04d57ca5176e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -64,6 +64,7 @@
#define QED_ROCE_QPS (8192)
#define QED_ROCE_DPIS (8)
+#define QED_RDMA_SRQS QED_ROCE_QPS
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -264,7 +265,6 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->pci_mem_end = cdev->pci_params.mem_end;
dev_info->pci_irq = cdev->pci_params.irq;
dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
- dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
dev_info->dev_type = cdev->type;
ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
@@ -273,7 +273,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->fw_minor = FW_MINOR_VERSION;
dev_info->fw_rev = FW_REVISION_VERSION;
dev_info->fw_eng = FW_ENGINEERING_VERSION;
- dev_info->mf_mode = cdev->mf_mode;
+ dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
+ &cdev->mf_bits);
dev_info->tx_switching = true;
if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
@@ -922,6 +923,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
if (IS_ENABLED(CONFIG_QED_RDMA)) {
params->rdma_pf_params.num_qps = QED_ROCE_QPS;
params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
+ params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
/* divide by 3 the MRs to avoid MF ILT overflow */
params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
}
@@ -946,6 +948,68 @@ static void qed_update_pf_params(struct qed_dev *cdev,
}
}
+static void qed_slowpath_wq_stop(struct qed_dev *cdev)
+{
+ int i;
+
+ if (IS_VF(cdev))
+ return;
+
+ for_each_hwfn(cdev, i) {
+ if (!cdev->hwfns[i].slowpath_wq)
+ continue;
+
+ flush_workqueue(cdev->hwfns[i].slowpath_wq);
+ destroy_workqueue(cdev->hwfns[i].slowpath_wq);
+ }
+}
+
+static void qed_slowpath_task(struct work_struct *work)
+{
+ struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
+ slowpath_task.work);
+ struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+
+ if (!ptt) {
+ queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
+ return;
+ }
+
+ if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
+ &hwfn->slowpath_task_flags))
+ qed_mfw_process_tlv_req(hwfn, ptt);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static int qed_slowpath_wq_start(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn;
+ char name[NAME_SIZE];
+ int i;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ for_each_hwfn(cdev, i) {
+ hwfn = &cdev->hwfns[i];
+
+ snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
+ cdev->pdev->bus->number,
+ PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
+
+ hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
+ if (!hwfn->slowpath_wq) {
+ DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
+ return -ENOMEM;
+ }
+
+ INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
+ }
+
+ return 0;
+}
+
static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_slowpath_params *params)
{
@@ -961,6 +1025,9 @@ static int qed_slowpath_start(struct qed_dev *cdev,
if (qed_iov_wq_start(cdev))
goto err;
+ if (qed_slowpath_wq_start(cdev))
+ goto err;
+
if (IS_PF(cdev)) {
rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
&cdev->pdev->dev);
@@ -1095,6 +1162,8 @@ err:
qed_iov_wq_stop(cdev, false);
+ qed_slowpath_wq_stop(cdev);
+
return rc;
}
@@ -1103,6 +1172,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
if (!cdev)
return -ENODEV;
+ qed_slowpath_wq_stop(cdev);
+
qed_ll2_dealloc_if(cdev);
if (IS_PF(cdev)) {
@@ -1894,15 +1965,8 @@ static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
u8 *buf, u16 len)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
- struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
- int rc;
- if (!ptt)
- return -EAGAIN;
-
- rc = qed_mcp_get_nvm_image(hwfn, ptt, type, buf, len);
- qed_ptt_release(hwfn, ptt);
- return rc;
+ return qed_mcp_get_nvm_image(hwfn, type, buf, len);
}
static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
@@ -2095,3 +2159,89 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
return;
}
}
+
+int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
+{
+ DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
+ "Scheduling slowpath task [Flag: %d]\n",
+ QED_SLOWPATH_MFW_TLV_REQ);
+ smp_mb__before_atomic();
+ set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
+ smp_mb__after_atomic();
+ queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
+
+ return 0;
+}
+
+static void
+qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
+{
+ struct qed_common_cb_ops *op = cdev->protocol_ops.common;
+ struct qed_eth_stats_common *p_common;
+ struct qed_generic_tlvs gen_tlvs;
+ struct qed_eth_stats stats;
+ int i;
+
+ memset(&gen_tlvs, 0, sizeof(gen_tlvs));
+ op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
+
+ if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
+ tlv->flags.ipv4_csum_offload = true;
+ if (gen_tlvs.feat_flags & QED_TLV_LSO)
+ tlv->flags.lso_supported = true;
+ tlv->flags.b_set = true;
+
+ for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
+ if (is_valid_ether_addr(gen_tlvs.mac[i])) {
+ ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
+ tlv->mac_set[i] = true;
+ }
+ }
+
+ qed_get_vport_stats(cdev, &stats);
+ p_common = &stats.common;
+ tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
+ p_common->rx_bcast_pkts;
+ tlv->rx_frames_set = true;
+ tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
+ p_common->rx_bcast_bytes;
+ tlv->rx_bytes_set = true;
+ tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
+ p_common->tx_bcast_pkts;
+ tlv->tx_frames_set = true;
+ tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
+ p_common->tx_bcast_bytes;
+ tlv->rx_bytes_set = true;
+}
+
+int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
+ union qed_mfw_tlv_data *tlv_buf)
+{
+ struct qed_dev *cdev = hwfn->cdev;
+ struct qed_common_cb_ops *ops;
+
+ ops = cdev->protocol_ops.common;
+ if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
+ DP_NOTICE(hwfn, "Can't collect TLV management info\n");
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case QED_MFW_TLV_GENERIC:
+ qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
+ break;
+ case QED_MFW_TLV_ETH:
+ ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
+ break;
+ case QED_MFW_TLV_FCOE:
+ ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
+ break;
+ case QED_MFW_TLV_ISCSI:
+ ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}