summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/uar.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/uar.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c351
1 files changed, 224 insertions, 127 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index ab0b896621a0..2e6b0f290ddc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -37,11 +37,6 @@
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
-enum {
- NUM_DRIVER_UARS = 4,
- NUM_LOW_LAT_UUARS = 4,
-};
-
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
{
u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0};
@@ -67,167 +62,269 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
}
EXPORT_SYMBOL(mlx5_cmd_free_uar);
-static int need_uuar_lock(int uuarn)
+static int uars_per_sys_page(struct mlx5_core_dev *mdev)
{
- int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE;
-
- if (uuarn == 0 || tot_uuars - NUM_LOW_LAT_UUARS)
- return 0;
+ if (MLX5_CAP_GEN(mdev, uar_4k))
+ return MLX5_CAP_GEN(mdev, num_of_uars_per_page);
return 1;
}
-int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
+static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index)
{
- int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE;
- struct mlx5_bf *bf;
- phys_addr_t addr;
- int err;
+ u32 system_page_index;
+
+ if (MLX5_CAP_GEN(mdev, uar_4k))
+ system_page_index = index >> (PAGE_SHIFT - MLX5_ADAPTER_PAGE_SHIFT);
+ else
+ system_page_index = index;
+
+ return (pci_resource_start(mdev->pdev, 0) >> PAGE_SHIFT) + system_page_index;
+}
+
+static void up_rel_func(struct kref *kref)
+{
+ struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
+
+ list_del(&up->list);
+ if (mlx5_cmd_free_uar(up->mdev, up->index))
+ mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
+ kfree(up->reg_bitmap);
+ kfree(up->fp_bitmap);
+ kfree(up);
+}
+
+static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev,
+ bool map_wc)
+{
+ struct mlx5_uars_page *up;
+ int err = -ENOMEM;
+ phys_addr_t pfn;
+ int bfregs;
int i;
- uuari->num_uars = NUM_DRIVER_UARS;
- uuari->num_low_latency_uuars = NUM_LOW_LAT_UUARS;
+ bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR;
+ up = kzalloc(sizeof(*up), GFP_KERNEL);
+ if (!up)
+ return ERR_PTR(err);
- mutex_init(&uuari->lock);
- uuari->uars = kcalloc(uuari->num_uars, sizeof(*uuari->uars), GFP_KERNEL);
- if (!uuari->uars)
- return -ENOMEM;
+ up->mdev = mdev;
+ up->reg_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL);
+ if (!up->reg_bitmap)
+ goto error1;
- uuari->bfs = kcalloc(tot_uuars, sizeof(*uuari->bfs), GFP_KERNEL);
- if (!uuari->bfs) {
- err = -ENOMEM;
- goto out_uars;
- }
+ up->fp_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL);
+ if (!up->fp_bitmap)
+ goto error1;
- uuari->bitmap = kcalloc(BITS_TO_LONGS(tot_uuars), sizeof(*uuari->bitmap),
- GFP_KERNEL);
- if (!uuari->bitmap) {
- err = -ENOMEM;
- goto out_bfs;
- }
+ for (i = 0; i < bfregs; i++)
+ if ((i % MLX5_BFREGS_PER_UAR) < MLX5_NON_FP_BFREGS_PER_UAR)
+ set_bit(i, up->reg_bitmap);
+ else
+ set_bit(i, up->fp_bitmap);
- uuari->count = kcalloc(tot_uuars, sizeof(*uuari->count), GFP_KERNEL);
- if (!uuari->count) {
- err = -ENOMEM;
- goto out_bitmap;
- }
+ up->bfregs = bfregs;
+ up->fp_avail = bfregs * MLX5_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
+ up->reg_avail = bfregs * MLX5_NON_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
- for (i = 0; i < uuari->num_uars; i++) {
- err = mlx5_cmd_alloc_uar(dev, &uuari->uars[i].index);
- if (err)
- goto out_count;
+ err = mlx5_cmd_alloc_uar(mdev, &up->index);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
+ goto error1;
+ }
- addr = dev->iseg_base + ((phys_addr_t)(uuari->uars[i].index) << PAGE_SHIFT);
- uuari->uars[i].map = ioremap(addr, PAGE_SIZE);
- if (!uuari->uars[i].map) {
- mlx5_cmd_free_uar(dev, uuari->uars[i].index);
+ pfn = uar2pfn(mdev, up->index);
+ if (map_wc) {
+ up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!up->map) {
+ err = -EAGAIN;
+ goto error2;
+ }
+ } else {
+ up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!up->map) {
err = -ENOMEM;
- goto out_count;
+ goto error2;
}
- mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n",
- uuari->uars[i].index, uuari->uars[i].map);
- }
-
- for (i = 0; i < tot_uuars; i++) {
- bf = &uuari->bfs[i];
-
- bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2;
- bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE];
- bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map;
- bf->reg = NULL; /* Add WC support */
- bf->offset = (i % MLX5_BF_REGS_PER_PAGE) *
- (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) +
- MLX5_BF_OFFSET;
- bf->need_lock = need_uuar_lock(i);
- spin_lock_init(&bf->lock);
- spin_lock_init(&bf->lock32);
- bf->uuarn = i;
}
+ kref_init(&up->ref_count);
+ mlx5_core_dbg(mdev, "allocated UAR page: index %d, total bfregs %d\n",
+ up->index, up->bfregs);
+ return up;
+
+error2:
+ if (mlx5_cmd_free_uar(mdev, up->index))
+ mlx5_core_warn(mdev, "failed to free uar index %d\n", up->index);
+error1:
+ kfree(up->fp_bitmap);
+ kfree(up->reg_bitmap);
+ kfree(up);
+ return ERR_PTR(err);
+}
- return 0;
-
-out_count:
- for (i--; i >= 0; i--) {
- iounmap(uuari->uars[i].map);
- mlx5_cmd_free_uar(dev, uuari->uars[i].index);
+struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_uars_page *ret;
+
+ mutex_lock(&mdev->priv.bfregs.reg_head.lock);
+ if (list_empty(&mdev->priv.bfregs.reg_head.list)) {
+ ret = alloc_uars_page(mdev, false);
+ if (IS_ERR(ret)) {
+ ret = NULL;
+ goto out;
+ }
+ list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
+ } else {
+ ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
+ struct mlx5_uars_page, list);
+ kref_get(&ret->ref_count);
}
- kfree(uuari->count);
+out:
+ mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
-out_bitmap:
- kfree(uuari->bitmap);
-
-out_bfs:
- kfree(uuari->bfs);
+ return ret;
+}
+EXPORT_SYMBOL(mlx5_get_uars_page);
-out_uars:
- kfree(uuari->uars);
- return err;
+void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up)
+{
+ mutex_lock(&mdev->priv.bfregs.reg_head.lock);
+ kref_put(&up->ref_count, up_rel_func);
+ mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
}
+EXPORT_SYMBOL(mlx5_put_uars_page);
-int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
+static unsigned long map_offset(struct mlx5_core_dev *mdev, int dbi)
{
- int i = uuari->num_uars;
+ /* return the offset in bytes from the start of the page to the
+ * blue flame area of the UAR
+ */
+ return dbi / MLX5_BFREGS_PER_UAR * MLX5_ADAPTER_PAGE_SIZE +
+ (dbi % MLX5_BFREGS_PER_UAR) *
+ (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET;
+}
- for (i--; i >= 0; i--) {
- iounmap(uuari->uars[i].map);
- mlx5_cmd_free_uar(dev, uuari->uars[i].index);
+static int alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
+ bool map_wc, bool fast_path)
+{
+ struct mlx5_bfreg_data *bfregs;
+ struct mlx5_uars_page *up;
+ struct list_head *head;
+ unsigned long *bitmap;
+ unsigned int *avail;
+ struct mutex *lock; /* pointer to right mutex */
+ int dbi;
+
+ bfregs = &mdev->priv.bfregs;
+ if (map_wc) {
+ head = &bfregs->wc_head.list;
+ lock = &bfregs->wc_head.lock;
+ } else {
+ head = &bfregs->reg_head.list;
+ lock = &bfregs->reg_head.lock;
}
-
- kfree(uuari->count);
- kfree(uuari->bitmap);
- kfree(uuari->bfs);
- kfree(uuari->uars);
+ mutex_lock(lock);
+ if (list_empty(head)) {
+ up = alloc_uars_page(mdev, map_wc);
+ if (IS_ERR(up)) {
+ mutex_unlock(lock);
+ return PTR_ERR(up);
+ }
+ list_add(&up->list, head);
+ } else {
+ up = list_entry(head->next, struct mlx5_uars_page, list);
+ kref_get(&up->ref_count);
+ }
+ if (fast_path) {
+ bitmap = up->fp_bitmap;
+ avail = &up->fp_avail;
+ } else {
+ bitmap = up->reg_bitmap;
+ avail = &up->reg_avail;
+ }
+ dbi = find_first_bit(bitmap, up->bfregs);
+ clear_bit(dbi, bitmap);
+ (*avail)--;
+ if (!(*avail))
+ list_del(&up->list);
+
+ bfreg->map = up->map + map_offset(mdev, dbi);
+ bfreg->up = up;
+ bfreg->wc = map_wc;
+ bfreg->index = up->index + dbi / MLX5_BFREGS_PER_UAR;
+ mutex_unlock(lock);
return 0;
}
-int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
- bool map_wc)
+int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
+ bool map_wc, bool fast_path)
{
- phys_addr_t pfn;
- phys_addr_t uar_bar_start;
int err;
- err = mlx5_cmd_alloc_uar(mdev, &uar->index);
- if (err) {
- mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
- return err;
- }
+ err = alloc_bfreg(mdev, bfreg, map_wc, fast_path);
+ if (!err)
+ return 0;
- uar_bar_start = pci_resource_start(mdev->pdev, 0);
- pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index;
+ if (err == -EAGAIN && map_wc)
+ return alloc_bfreg(mdev, bfreg, false, fast_path);
- if (map_wc) {
- uar->bf_map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
- if (!uar->bf_map) {
- mlx5_core_warn(mdev, "ioremap_wc() failed\n");
- uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
- if (!uar->map)
- goto err_free_uar;
- }
- } else {
- uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
- if (!uar->map)
- goto err_free_uar;
- }
+ return err;
+}
+EXPORT_SYMBOL(mlx5_alloc_bfreg);
- return 0;
+static unsigned int addr_to_dbi_in_syspage(struct mlx5_core_dev *dev,
+ struct mlx5_uars_page *up,
+ struct mlx5_sq_bfreg *bfreg)
+{
+ unsigned int uar_idx;
+ unsigned int bfreg_idx;
+ unsigned int bf_reg_size;
-err_free_uar:
- mlx5_core_warn(mdev, "ioremap() failed\n");
- err = -ENOMEM;
- mlx5_cmd_free_uar(mdev, uar->index);
+ bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size);
- return err;
+ uar_idx = (bfreg->map - up->map) >> MLX5_ADAPTER_PAGE_SHIFT;
+ bfreg_idx = (((uintptr_t)bfreg->map % MLX5_ADAPTER_PAGE_SIZE) - MLX5_BF_OFFSET) / bf_reg_size;
+
+ return uar_idx * MLX5_BFREGS_PER_UAR + bfreg_idx;
}
-EXPORT_SYMBOL(mlx5_alloc_map_uar);
-void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg)
{
- if (uar->map)
- iounmap(uar->map);
- else
- iounmap(uar->bf_map);
- mlx5_cmd_free_uar(mdev, uar->index);
+ struct mlx5_bfreg_data *bfregs;
+ struct mlx5_uars_page *up;
+ struct mutex *lock; /* pointer to right mutex */
+ unsigned int dbi;
+ bool fp;
+ unsigned int *avail;
+ unsigned long *bitmap;
+ struct list_head *head;
+
+ bfregs = &mdev->priv.bfregs;
+ if (bfreg->wc) {
+ head = &bfregs->wc_head.list;
+ lock = &bfregs->wc_head.lock;
+ } else {
+ head = &bfregs->reg_head.list;
+ lock = &bfregs->reg_head.lock;
+ }
+ up = bfreg->up;
+ dbi = addr_to_dbi_in_syspage(mdev, up, bfreg);
+ fp = (dbi % MLX5_BFREGS_PER_UAR) >= MLX5_NON_FP_BFREGS_PER_UAR;
+ if (fp) {
+ avail = &up->fp_avail;
+ bitmap = up->fp_bitmap;
+ } else {
+ avail = &up->reg_avail;
+ bitmap = up->reg_bitmap;
+ }
+ mutex_lock(lock);
+ (*avail)++;
+ set_bit(dbi, bitmap);
+ if (*avail == 1)
+ list_add_tail(&up->list, head);
+
+ kref_put(&up->ref_count, up_rel_func);
+ mutex_unlock(lock);
}
-EXPORT_SYMBOL(mlx5_unmap_free_uar);
+EXPORT_SYMBOL(mlx5_free_bfreg);