RDMA/bnxt_re: Remove roundup_pow_of_two depth for all hardware queue resources
Rounding up the queue depth to power of two is not a hardware requirement. In order to optimize the per connection memory usage, removing drivers implementation which round up to the queue depths to the power of 2. Implements a mask to maintain backward compatibility with older library. Signed-off-by: Chandramohan Akula <chandramohan.akula@broadcom.com> Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com> Link: https://lore.kernel.org/r/1698069803-1787-3-git-send-email-selvin.xavier@broadcom.com Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
parent
3a4304d826
commit
48f996d4ad
@ -1184,7 +1184,8 @@ fail:
|
||||
}
|
||||
|
||||
static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
|
||||
struct ib_qp_init_attr *init_attr)
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct bnxt_re_ucontext *uctx)
|
||||
{
|
||||
struct bnxt_qplib_dev_attr *dev_attr;
|
||||
struct bnxt_qplib_qp *qplqp;
|
||||
@ -1213,7 +1214,7 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
|
||||
/* Allocate 1 more than what's provided so posting max doesn't
|
||||
* mean empty.
|
||||
*/
|
||||
entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1);
|
||||
entries = bnxt_re_init_depth(init_attr->cap.max_recv_wr + 1, uctx);
|
||||
rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
|
||||
rq->q_full_delta = 0;
|
||||
rq->sg_info.pgsize = PAGE_SIZE;
|
||||
@ -1243,7 +1244,7 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
|
||||
|
||||
static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
struct bnxt_re_ucontext *uctx)
|
||||
{
|
||||
struct bnxt_qplib_dev_attr *dev_attr;
|
||||
struct bnxt_qplib_qp *qplqp;
|
||||
@ -1272,7 +1273,7 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
|
||||
/* Allocate 128 + 1 more than what's provided */
|
||||
diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
|
||||
0 : BNXT_QPLIB_RESERVED_QP_WRS;
|
||||
entries = roundup_pow_of_two(entries + diff + 1);
|
||||
entries = bnxt_re_init_depth(entries + diff + 1, uctx);
|
||||
sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
|
||||
sq->q_full_delta = diff + 1;
|
||||
/*
|
||||
@ -1288,7 +1289,8 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
|
||||
}
|
||||
|
||||
static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
|
||||
struct ib_qp_init_attr *init_attr)
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct bnxt_re_ucontext *uctx)
|
||||
{
|
||||
struct bnxt_qplib_dev_attr *dev_attr;
|
||||
struct bnxt_qplib_qp *qplqp;
|
||||
@ -1300,7 +1302,7 @@ static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
|
||||
dev_attr = &rdev->dev_attr;
|
||||
|
||||
if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
|
||||
entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1);
|
||||
entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
|
||||
qplqp->sq.max_wqe = min_t(u32, entries,
|
||||
dev_attr->max_qp_wqes + 1);
|
||||
qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
|
||||
@ -1338,6 +1340,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct bnxt_qplib_dev_attr *dev_attr;
|
||||
struct bnxt_re_ucontext *uctx;
|
||||
struct bnxt_qplib_qp *qplqp;
|
||||
struct bnxt_re_dev *rdev;
|
||||
struct bnxt_re_cq *cq;
|
||||
@ -1347,6 +1350,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
|
||||
qplqp = &qp->qplib_qp;
|
||||
dev_attr = &rdev->dev_attr;
|
||||
|
||||
uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
|
||||
/* Setup misc params */
|
||||
ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
|
||||
qplqp->pd = &pd->qplib_pd;
|
||||
@ -1388,18 +1392,18 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
|
||||
}
|
||||
|
||||
/* Setup RQ/SRQ */
|
||||
rc = bnxt_re_init_rq_attr(qp, init_attr);
|
||||
rc = bnxt_re_init_rq_attr(qp, init_attr, uctx);
|
||||
if (rc)
|
||||
goto out;
|
||||
if (init_attr->qp_type == IB_QPT_GSI)
|
||||
bnxt_re_adjust_gsi_rq_attr(qp);
|
||||
|
||||
/* Setup SQ */
|
||||
rc = bnxt_re_init_sq_attr(qp, init_attr, udata);
|
||||
rc = bnxt_re_init_sq_attr(qp, init_attr, uctx);
|
||||
if (rc)
|
||||
goto out;
|
||||
if (init_attr->qp_type == IB_QPT_GSI)
|
||||
bnxt_re_adjust_gsi_sq_attr(qp, init_attr);
|
||||
bnxt_re_adjust_gsi_sq_attr(qp, init_attr, uctx);
|
||||
|
||||
if (udata) /* This will update DPI and qp_handle */
|
||||
rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
|
||||
@ -1715,6 +1719,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
|
||||
{
|
||||
struct bnxt_qplib_dev_attr *dev_attr;
|
||||
struct bnxt_qplib_nq *nq = NULL;
|
||||
struct bnxt_re_ucontext *uctx;
|
||||
struct bnxt_re_dev *rdev;
|
||||
struct bnxt_re_srq *srq;
|
||||
struct bnxt_re_pd *pd;
|
||||
@ -1739,13 +1744,14 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
|
||||
goto exit;
|
||||
}
|
||||
|
||||
uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
|
||||
srq->rdev = rdev;
|
||||
srq->qplib_srq.pd = &pd->qplib_pd;
|
||||
srq->qplib_srq.dpi = &rdev->dpi_privileged;
|
||||
/* Allocate 1 more than what's provided so posting max doesn't
|
||||
* mean empty
|
||||
*/
|
||||
entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
|
||||
entries = bnxt_re_init_depth(srq_init_attr->attr.max_wr + 1, uctx);
|
||||
if (entries > dev_attr->max_srq_wqes + 1)
|
||||
entries = dev_attr->max_srq_wqes + 1;
|
||||
srq->qplib_srq.max_wqe = entries;
|
||||
@ -2103,6 +2109,9 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
|
||||
qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
|
||||
}
|
||||
if (qp_attr_mask & IB_QP_CAP) {
|
||||
struct bnxt_re_ucontext *uctx =
|
||||
rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
|
||||
|
||||
qp->qplib_qp.modify_flags |=
|
||||
CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
|
||||
CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
|
||||
@ -2119,7 +2128,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
|
||||
"Create QP failed - max exceeded");
|
||||
return -EINVAL;
|
||||
}
|
||||
entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
|
||||
entries = bnxt_re_init_depth(qp_attr->cap.max_send_wr, uctx);
|
||||
qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
|
||||
dev_attr->max_qp_wqes + 1);
|
||||
qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
|
||||
@ -2132,7 +2141,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
|
||||
qp->qplib_qp.sq.q_full_delta -= 1;
|
||||
qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
|
||||
if (qp->qplib_qp.rq.max_wqe) {
|
||||
entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
|
||||
entries = bnxt_re_init_depth(qp_attr->cap.max_recv_wr, uctx);
|
||||
qp->qplib_qp.rq.max_wqe =
|
||||
min_t(u32, entries, dev_attr->max_qp_wqes + 1);
|
||||
qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
|
||||
@ -2920,9 +2929,11 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
|
||||
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
|
||||
struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
|
||||
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
|
||||
struct bnxt_re_ucontext *uctx =
|
||||
rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
|
||||
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
|
||||
int rc, entries;
|
||||
int cqe = attr->cqe;
|
||||
struct bnxt_qplib_nq *nq = NULL;
|
||||
@ -2941,7 +2952,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||
cq->rdev = rdev;
|
||||
cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
|
||||
|
||||
entries = roundup_pow_of_two(cqe + 1);
|
||||
entries = bnxt_re_init_depth(cqe + 1, uctx);
|
||||
if (entries > dev_attr->max_cq_wqes + 1)
|
||||
entries = dev_attr->max_cq_wqes + 1;
|
||||
|
||||
@ -2949,8 +2960,6 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||
cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
|
||||
if (udata) {
|
||||
struct bnxt_re_cq_req req;
|
||||
struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
|
||||
udata, struct bnxt_re_ucontext, ib_uctx);
|
||||
if (ib_copy_from_udata(&req, udata, sizeof(req))) {
|
||||
rc = -EFAULT;
|
||||
goto fail;
|
||||
@ -3072,12 +3081,11 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
entries = roundup_pow_of_two(cqe + 1);
|
||||
uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
|
||||
entries = bnxt_re_init_depth(cqe + 1, uctx);
|
||||
if (entries > dev_attr->max_cq_wqes + 1)
|
||||
entries = dev_attr->max_cq_wqes + 1;
|
||||
|
||||
uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
|
||||
ib_uctx);
|
||||
/* uverbs consumer */
|
||||
if (ib_copy_from_udata(&req, udata, sizeof(req))) {
|
||||
rc = -EFAULT;
|
||||
@ -4108,6 +4116,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
|
||||
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
|
||||
struct bnxt_re_user_mmap_entry *entry;
|
||||
struct bnxt_re_uctx_resp resp = {};
|
||||
struct bnxt_re_uctx_req ureq = {};
|
||||
u32 chip_met_rev_num = 0;
|
||||
int rc;
|
||||
|
||||
@ -4157,6 +4166,16 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
|
||||
if (rdev->pacing.dbr_pacing)
|
||||
resp.comp_mask |= BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED;
|
||||
|
||||
if (udata->inlen >= sizeof(ureq)) {
|
||||
rc = ib_copy_from_udata(&ureq, udata, min(udata->inlen, sizeof(ureq)));
|
||||
if (rc)
|
||||
goto cfail;
|
||||
if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT) {
|
||||
resp.comp_mask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
|
||||
uctx->cmask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
|
||||
}
|
||||
}
|
||||
|
||||
rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
|
||||
if (rc) {
|
||||
ibdev_err(ibdev, "Failed to copy user context");
|
||||
|
@ -140,6 +140,7 @@ struct bnxt_re_ucontext {
|
||||
void *shpg;
|
||||
spinlock_t sh_lock; /* protect shpg */
|
||||
struct rdma_user_mmap_entry *shpage_mmap;
|
||||
u64 cmask;
|
||||
};
|
||||
|
||||
enum bnxt_re_mmap_flag {
|
||||
@ -167,6 +168,12 @@ static inline u16 bnxt_re_get_rwqe_size(int nsge)
|
||||
return sizeof(struct rq_wqe_hdr) + (nsge * sizeof(struct sq_sge));
|
||||
}
|
||||
|
||||
static inline u32 bnxt_re_init_depth(u32 ent, struct bnxt_re_ucontext *uctx)
|
||||
{
|
||||
return uctx ? (uctx->cmask & BNXT_RE_UCNTX_CMASK_POW2_DISABLED) ?
|
||||
ent : roundup_pow_of_two(ent) : ent;
|
||||
}
|
||||
|
||||
int bnxt_re_query_device(struct ib_device *ibdev,
|
||||
struct ib_device_attr *ib_attr,
|
||||
struct ib_udata *udata);
|
||||
|
@ -54,6 +54,7 @@ enum {
|
||||
BNXT_RE_UCNTX_CMASK_HAVE_MODE = 0x02ULL,
|
||||
BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED = 0x04ULL,
|
||||
BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED = 0x08ULL,
|
||||
BNXT_RE_UCNTX_CMASK_POW2_DISABLED = 0x10ULL,
|
||||
};
|
||||
|
||||
enum bnxt_re_wqe_mode {
|
||||
@ -62,6 +63,14 @@ enum bnxt_re_wqe_mode {
|
||||
BNXT_QPLIB_WQE_MODE_INVALID = 0x02,
|
||||
};
|
||||
|
||||
enum {
|
||||
BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT = 0x01,
|
||||
};
|
||||
|
||||
struct bnxt_re_uctx_req {
|
||||
__aligned_u64 comp_mask;
|
||||
};
|
||||
|
||||
struct bnxt_re_uctx_resp {
|
||||
__u32 dev_id;
|
||||
__u32 max_qp;
|
||||
|
Loading…
Reference in New Issue
Block a user