summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/cq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx5/cq.c')
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c83
1 files changed, 78 insertions, 5 deletions
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 5ece9a89f7c2..2a9ad8401750 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -568,18 +568,44 @@ repoll:
return 0;
}
+static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
+ struct ib_wc *wc)
+{
+ struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
+ struct mlx5_ib_wc *soft_wc, *next;
+ int npolled = 0;
+
+ list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
+ if (npolled >= num_entries)
+ break;
+
+ mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
+ cq->mcq.cqn);
+
+ wc[npolled++] = soft_wc->wc;
+ list_del(&soft_wc->list);
+ kfree(soft_wc);
+ }
+
+ return npolled;
+}
+
int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
struct mlx5_ib_cq *cq = to_mcq(ibcq);
struct mlx5_ib_qp *cur_qp = NULL;
unsigned long flags;
+ int soft_polled = 0;
int npolled;
int err = 0;
spin_lock_irqsave(&cq->lock, flags);
- for (npolled = 0; npolled < num_entries; npolled++) {
- err = mlx5_poll_one(cq, &cur_qp, wc + npolled);
+ if (unlikely(!list_empty(&cq->wc_list)))
+ soft_polled = poll_soft_wc(cq, num_entries, wc);
+
+ for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
+ err = mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled);
if (err)
break;
}
@@ -590,7 +616,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
spin_unlock_irqrestore(&cq->lock, flags);
if (err == 0 || err == -EAGAIN)
- return npolled;
+ return soft_polled + npolled;
else
return err;
}
@@ -598,16 +624,27 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
void __iomem *uar_page = mdev->priv.uuari.uars[0].map;
+ unsigned long irq_flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&cq->lock, irq_flags);
+ if (cq->notify_flags != IB_CQ_NEXT_COMP)
+ cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
- mlx5_cq_arm(&to_mcq(ibcq)->mcq,
+ if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
+ ret = 1;
+ spin_unlock_irqrestore(&cq->lock, irq_flags);
+
+ mlx5_cq_arm(&cq->mcq,
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
uar_page,
MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock),
to_mcq(ibcq)->mcq.cons_index);
- return 0;
+ return ret;
}
static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
@@ -760,6 +797,14 @@ static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
mlx5_db_free(dev->mdev, &cq->db);
}
+static void notify_soft_wc_handler(struct work_struct *work)
+{
+ struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
+ notify_work);
+
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+}
+
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
@@ -810,6 +855,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
&index, &inlen);
if (err)
goto err_create;
+
+ INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
}
cq->cqe_size = cqe_size;
@@ -835,6 +882,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
cq->mcq.comp = mlx5_ib_cq_comp;
cq->mcq.event = mlx5_ib_cq_event;
+ INIT_LIST_HEAD(&cq->wc_list);
+
if (context)
if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
err = -EFAULT;
@@ -1222,3 +1271,27 @@ int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
cq = to_mcq(ibcq);
return cq->cqe_size;
}
+
+/* Called from atomic context */
+int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc)
+{
+ struct mlx5_ib_wc *soft_wc;
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
+ unsigned long flags;
+
+ soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC);
+ if (!soft_wc)
+ return -ENOMEM;
+
+ soft_wc->wc = *wc;
+ spin_lock_irqsave(&cq->lock, flags);
+ list_add_tail(&soft_wc->list, &cq->wc_list);
+ if (cq->notify_flags == IB_CQ_NEXT_COMP ||
+ wc->status != IB_WC_SUCCESS) {
+ cq->notify_flags = 0;
+ schedule_work(&cq->notify_work);
+ }
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ return 0;
+}