RDMA/mlx5: Send UAR page index as ioctl attribute
Add UAR page index as a driver ioctl attribute to increase the number of supported indices, previously limited to 16 bits by mlx5_ib_create_cq struct. Link: https://lore.kernel.org/r/0e18b34d7ec3b1ae02d694b0d545aed7413c0ef7.1719512393.git.leon@kernel.org Signed-off-by: Akiva Goldberger <agoldberger@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
dd6d7f8574
commit
589b844f1b
@ -38,6 +38,9 @@
|
||||
#include "srq.h"
|
||||
#include "qp.h"
|
||||
|
||||
#define UVERBS_MODULE_NAME mlx5_ib
|
||||
#include <rdma/uverbs_named_ioctl.h>
|
||||
|
||||
static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
|
||||
{
|
||||
struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
|
||||
@ -714,7 +717,8 @@ static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev *dev, u8 format)
|
||||
|
||||
static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
|
||||
struct mlx5_ib_cq *cq, int entries, u32 **cqb,
|
||||
int *cqe_size, int *index, int *inlen)
|
||||
int *cqe_size, int *index, int *inlen,
|
||||
struct uverbs_attr_bundle *attrs)
|
||||
{
|
||||
struct mlx5_ib_create_cq ucmd = {};
|
||||
unsigned long page_size;
|
||||
@ -788,7 +792,11 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
|
||||
order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
|
||||
MLX5_SET(cqc, cqc, page_offset, page_offset_quantized);
|
||||
|
||||
if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX) {
|
||||
if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_CREATE_CQ_UAR_INDEX)) {
|
||||
err = uverbs_copy_from(index, attrs, MLX5_IB_ATTR_CREATE_CQ_UAR_INDEX);
|
||||
if (err)
|
||||
goto err_cqb;
|
||||
} else if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX) {
|
||||
*index = ucmd.uar_page_index;
|
||||
} else if (context->bfregi.lib_uar_dyn) {
|
||||
err = -EINVAL;
|
||||
@ -981,7 +989,7 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||
|
||||
if (udata) {
|
||||
err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size,
|
||||
&index, &inlen);
|
||||
&index, &inlen, attrs);
|
||||
if (err)
|
||||
return err;
|
||||
} else {
|
||||
@ -1443,3 +1451,17 @@ int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
ADD_UVERBS_ATTRIBUTES_SIMPLE(
|
||||
mlx5_ib_cq_create,
|
||||
UVERBS_OBJECT_CQ,
|
||||
UVERBS_METHOD_CQ_CREATE,
|
||||
UVERBS_ATTR_PTR_IN(
|
||||
MLX5_IB_ATTR_CREATE_CQ_UAR_INDEX,
|
||||
UVERBS_ATTR_TYPE(u32),
|
||||
UA_OPTIONAL));
|
||||
|
||||
const struct uapi_definition mlx5_ib_create_cq_defs[] = {
|
||||
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_CQ, &mlx5_ib_cq_create),
|
||||
{},
|
||||
};
|
||||
|
@ -3750,6 +3750,7 @@ static const struct uapi_definition mlx5_ib_defs[] = {
|
||||
UAPI_DEF_CHAIN(mlx5_ib_qos_defs),
|
||||
UAPI_DEF_CHAIN(mlx5_ib_std_types_defs),
|
||||
UAPI_DEF_CHAIN(mlx5_ib_dm_defs),
|
||||
UAPI_DEF_CHAIN(mlx5_ib_create_cq_defs),
|
||||
|
||||
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
|
||||
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
|
||||
|
@ -1528,6 +1528,7 @@ extern const struct uapi_definition mlx5_ib_devx_defs[];
|
||||
extern const struct uapi_definition mlx5_ib_flow_defs[];
|
||||
extern const struct uapi_definition mlx5_ib_qos_defs[];
|
||||
extern const struct uapi_definition mlx5_ib_std_types_defs[];
|
||||
extern const struct uapi_definition mlx5_ib_create_cq_defs[];
|
||||
|
||||
static inline int is_qp1(enum ib_qp_type qp_type)
|
||||
{
|
||||
|
@ -270,6 +270,10 @@ enum mlx5_ib_device_query_context_attrs {
|
||||
MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX = (1U << UVERBS_ID_NS_SHIFT),
|
||||
};
|
||||
|
||||
enum mlx5_ib_create_cq_attrs {
|
||||
MLX5_IB_ATTR_CREATE_CQ_UAR_INDEX = UVERBS_ID_DRIVER_NS_WITH_UHW,
|
||||
};
|
||||
|
||||
#define MLX5_IB_DW_MATCH_PARAM 0xA0
|
||||
|
||||
struct mlx5_ib_match_params {
|
||||
|
Loading…
Reference in New Issue
Block a user