summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShaobo Xu <xushaobo2@huawei.com>2017-08-30 17:23:09 +0800
committerDoug Ledford <dledford@redhat.com>2017-09-27 08:34:56 -0400
commit9766edc34ea17a8264b76696367aeb88a52ab108 (patch)
tree96a9fb63f353e536d50bf7d7e56c0ff0220e8967
parent6a93c77afe088225363f6941a29fff415b1f7172 (diff)
downloadlinux-0-day-9766edc34ea17a8264b76696367aeb88a52ab108.tar.gz
linux-0-day-9766edc34ea17a8264b76696367aeb88a52ab108.tar.xz
RDMA/hns: Split CQE from MTT in hip08
In hip08, the SQWQE/SGE/RQWQE and CQE have different hop num and page size, so we need to manage the base address table of the SQWQE/SGE/RQWQE and CQE separately. This patch is to split CQE from MTT(SQWQE/SGE/RQWQE). Signed-off-by: Shaobo Xu <xushaobo2@huawei.com> Signed-off-by: Lijun Ou <oulijun@huawei.com> Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c15
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c17
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c66
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c2
6 files changed, 98 insertions, 19 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 362aeecc1a086..9cf213c53f4cf 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -96,7 +96,11 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
cq_table = &hr_dev->cq_table;
/* Get the physical address of cq buf */
- mtt_table = &hr_dev->mr_table.mtt_table;
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+ mtt_table = &hr_dev->mr_table.mtt_cqe_table;
+ else
+ mtt_table = &hr_dev->mr_table.mtt_table;
+
mtts = hns_roce_table_find(hr_dev, mtt_table,
hr_mtt->first_seg, &dma_handle);
if (!mtts) {
@@ -221,6 +225,10 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
if (IS_ERR(*umem))
return PTR_ERR(*umem);
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+ buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
+ else
+ buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
(*umem)->page_shift, &buf->hr_mtt);
if (ret)
@@ -250,6 +258,11 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
if (ret)
goto out;
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+ buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
+ else
+ buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
+
ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
buf->hr_buf.page_shift, &buf->hr_mtt);
if (ret)
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 7dd8fbb2b9f20..319c7054905dc 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -170,6 +170,11 @@ enum {
HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07,
};
+enum hns_roce_mtt_type {
+ MTT_TYPE_WQE = 0,
+ MTT_TYPE_CQE,
+};
+
#define HNS_ROCE_CMD_SUCCESS 1
#define HNS_ROCE_PORT_DOWN 0
@@ -241,9 +246,10 @@ struct hns_roce_hem_table {
};
struct hns_roce_mtt {
- unsigned long first_seg;
- int order;
- int page_shift;
+ unsigned long first_seg;
+ int order;
+ int page_shift;
+ enum hns_roce_mtt_type mtt_type;
};
/* Only support 4K page size for mr register */
@@ -268,6 +274,8 @@ struct hns_roce_mr_table {
struct hns_roce_buddy mtt_buddy;
struct hns_roce_hem_table mtt_table;
struct hns_roce_hem_table mtpt_table;
+ struct hns_roce_buddy mtt_cqe_buddy;
+ struct hns_roce_hem_table mtt_cqe_table;
};
struct hns_roce_wq {
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index ac2d671ed2f9a..8a3e174f5b5fa 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -1041,4 +1041,7 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->mr_table.mtt_cqe_table);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index db6593e15e026..a110f968987f2 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -546,6 +546,17 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
return ret;
}
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
+ ret = hns_roce_init_hem_table(hr_dev,
+ &hr_dev->mr_table.mtt_cqe_table,
+ HEM_TYPE_CQE, hr_dev->caps.mtt_entry_sz,
+ hr_dev->caps.num_cqe_segs, 1);
+ if (ret) {
+ dev_err(dev, "Failed to init MTT CQE context memory, aborting.\n");
+ goto err_unmap_cqe;
+ }
+ }
+
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
hr_dev->caps.num_mtpts, 1);
@@ -593,6 +604,12 @@ err_unmap_dmpt:
err_unmap_mtt:
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->mr_table.mtt_cqe_table);
+
+err_unmap_cqe:
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
return ret;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index d8f7b41148d18..88b436fb92ee9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -177,18 +177,28 @@ static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
}
static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
- unsigned long *seg)
+ unsigned long *seg, u32 mtt_type)
{
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
- int ret = 0;
+ struct hns_roce_hem_table *table;
+ struct hns_roce_buddy *buddy;
+ int ret;
+
+ if (mtt_type == MTT_TYPE_WQE) {
+ buddy = &mr_table->mtt_buddy;
+ table = &mr_table->mtt_table;
+ } else {
+ buddy = &mr_table->mtt_cqe_buddy;
+ table = &mr_table->mtt_cqe_table;
+ }
- ret = hns_roce_buddy_alloc(&mr_table->mtt_buddy, order, seg);
+ ret = hns_roce_buddy_alloc(buddy, order, seg);
if (ret == -1)
return -1;
- if (hns_roce_table_get_range(hr_dev, &mr_table->mtt_table, *seg,
+ if (hns_roce_table_get_range(hr_dev, table, *seg,
*seg + (1 << order) - 1)) {
- hns_roce_buddy_free(&mr_table->mtt_buddy, *seg, order);
+ hns_roce_buddy_free(buddy, *seg, order);
return -1;
}
@@ -198,7 +208,7 @@ static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
struct hns_roce_mtt *mtt)
{
- int ret = 0;
+ int ret;
int i;
/* Page num is zero, correspond to DMA memory register */
@@ -217,7 +227,8 @@ int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
++mtt->order;
/* Allocate MTT entry */
- ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg);
+ ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg,
+ mtt->mtt_type);
if (ret == -1)
return -ENOMEM;
@@ -231,9 +242,19 @@ void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
if (mtt->order < 0)
return;
- hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
- hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, mtt->first_seg,
- mtt->first_seg + (1 << mtt->order) - 1);
+ if (mtt->mtt_type == MTT_TYPE_WQE) {
+ hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg,
+ mtt->order);
+ hns_roce_table_put_range(hr_dev, &mr_table->mtt_table,
+ mtt->first_seg,
+ mtt->first_seg + (1 << mtt->order) - 1);
+ } else {
+ hns_roce_buddy_free(&mr_table->mtt_cqe_buddy, mtt->first_seg,
+ mtt->order);
+ hns_roce_table_put_range(hr_dev, &mr_table->mtt_cqe_table,
+ mtt->first_seg,
+ mtt->first_seg + (1 << mtt->order) - 1);
+ }
}
EXPORT_SYMBOL_GPL(hns_roce_mtt_cleanup);
@@ -362,7 +383,11 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
return -EINVAL;
- table = &hr_dev->mr_table.mtt_table;
+ if (mtt->mtt_type == MTT_TYPE_WQE)
+ table = &hr_dev->mr_table.mtt_table;
+ else
+ table = &hr_dev->mr_table.mtt_cqe_table;
+
mtts = hns_roce_table_find(hr_dev, table,
mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
&dma_handle);
@@ -409,9 +434,9 @@ static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
{
- u32 i = 0;
- int ret = 0;
- u64 *page_list = NULL;
+ u64 *page_list;
+ int ret;
+ u32 i;
page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
if (!page_list)
@@ -434,7 +459,7 @@ int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
- int ret = 0;
+ int ret;
ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
hr_dev->caps.num_mtpts,
@@ -448,8 +473,17 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
if (ret)
goto err_buddy;
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
+ ret = hns_roce_buddy_init(&mr_table->mtt_cqe_buddy,
+ ilog2(hr_dev->caps.num_cqe_segs));
+ if (ret)
+ goto err_buddy_cqe;
+ }
return 0;
+err_buddy_cqe:
+ hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
+
err_buddy:
hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
return ret;
@@ -460,6 +494,8 @@ void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+ hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index b512ecc6df225..881ea67e6bac6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -440,6 +440,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_out;
}
+ hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem),
hr_qp->umem->page_shift, &hr_qp->mtt);
if (ret) {
@@ -490,6 +491,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_out;
}
+ hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
/* Write MTT */
ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
hr_qp->hr_buf.page_shift, &hr_qp->mtt);