1

RDMA/srpt: Make slab cache names unique

Since commit 4c39529663 ("slab: Warn on duplicate cache names when
DEBUG_VM=y"), slab complains about duplicate cache names. Hence this
patch. The approach is as follows:
- Maintain an xarray with the slab size as index and a reference count
  and a kmem_cache pointer as contents. Use srpt-${slab_size} as kmem
  cache name.
- Use 512-byte alignment for all slabs instead of only for some of the
  slabs.
- Increment the reference count instead of calling kmem_cache_create().
- Decrement the reference count instead of calling kmem_cache_destroy().

Fixes: 5dabcd0456 ("RDMA/srpt: Add support for immediate data")
Link: https://patch.msgid.link/r/20241009210048.4122518-1-bvanassche@acm.org
Reported-by: Shinichiro Kawasaki <shinichiro.kawasaki@wdc.com>
Closes: https://lore.kernel.org/linux-block/xpe6bea7rakpyoyfvspvin2dsozjmjtjktpph7rep3h25tv7fb@ooz4cu5z6bq6/
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Tested-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Bart Van Assche 2024-10-09 14:00:48 -07:00 committed by Jason Gunthorpe
parent 8cddfa535c
commit 4d784c042d

View File

@ -68,6 +68,8 @@ MODULE_LICENSE("Dual BSD/GPL");
static u64 srpt_service_guid; static u64 srpt_service_guid;
static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */ static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */
static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */ static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */
static DEFINE_MUTEX(srpt_mc_mutex); /* Protects srpt_memory_caches. */
static DEFINE_XARRAY(srpt_memory_caches); /* See also srpt_memory_cache_entry */
static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE; static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
module_param(srp_max_req_size, int, 0444); module_param(srp_max_req_size, int, 0444);
@ -105,6 +107,63 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc); static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
static void srpt_process_wait_list(struct srpt_rdma_ch *ch); static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
/* Type of the entries in srpt_memory_caches. */
struct srpt_memory_cache_entry {
refcount_t ref;
struct kmem_cache *c;
};
static struct kmem_cache *srpt_cache_get(unsigned int object_size)
{
struct srpt_memory_cache_entry *e;
char name[32];
void *res;
guard(mutex)(&srpt_mc_mutex);
e = xa_load(&srpt_memory_caches, object_size);
if (e) {
refcount_inc(&e->ref);
return e->c;
}
snprintf(name, sizeof(name), "srpt-%u", object_size);
e = kmalloc(sizeof(*e), GFP_KERNEL);
if (!e)
return NULL;
refcount_set(&e->ref, 1);
e->c = kmem_cache_create(name, object_size, /*align=*/512, 0, NULL);
if (!e->c)
goto free_entry;
res = xa_store(&srpt_memory_caches, object_size, e, GFP_KERNEL);
if (xa_is_err(res))
goto destroy_cache;
return e->c;
destroy_cache:
kmem_cache_destroy(e->c);
free_entry:
kfree(e);
return NULL;
}
static void srpt_cache_put(struct kmem_cache *c)
{
struct srpt_memory_cache_entry *e = NULL;
unsigned long object_size;
guard(mutex)(&srpt_mc_mutex);
xa_for_each(&srpt_memory_caches, object_size, e)
if (e->c == c)
break;
if (WARN_ON_ONCE(!e))
return;
if (!refcount_dec_and_test(&e->ref))
return;
WARN_ON_ONCE(xa_erase(&srpt_memory_caches, object_size) != e);
kmem_cache_destroy(e->c);
kfree(e);
}
/* /*
* The only allowed channel state changes are those that change the channel * The only allowed channel state changes are those that change the channel
* state into a state with a higher numerical value. Hence the new > prev test. * state into a state with a higher numerical value. Hence the new > prev test.
@ -2119,13 +2178,13 @@ static void srpt_release_channel_work(struct work_struct *w)
ch->sport->sdev, ch->rq_size, ch->sport->sdev, ch->rq_size,
ch->rsp_buf_cache, DMA_TO_DEVICE); ch->rsp_buf_cache, DMA_TO_DEVICE);
kmem_cache_destroy(ch->rsp_buf_cache); srpt_cache_put(ch->rsp_buf_cache);
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring, srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
sdev, ch->rq_size, sdev, ch->rq_size,
ch->req_buf_cache, DMA_FROM_DEVICE); ch->req_buf_cache, DMA_FROM_DEVICE);
kmem_cache_destroy(ch->req_buf_cache); srpt_cache_put(ch->req_buf_cache);
kref_put(&ch->kref, srpt_free_ch); kref_put(&ch->kref, srpt_free_ch);
} }
@ -2245,8 +2304,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
INIT_LIST_HEAD(&ch->cmd_wait_list); INIT_LIST_HEAD(&ch->cmd_wait_list);
ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size; ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
ch->rsp_buf_cache = kmem_cache_create("srpt-rsp-buf", ch->max_rsp_size, ch->rsp_buf_cache = srpt_cache_get(ch->max_rsp_size);
512, 0, NULL);
if (!ch->rsp_buf_cache) if (!ch->rsp_buf_cache)
goto free_ch; goto free_ch;
@ -2280,8 +2338,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
alignment_offset = round_up(imm_data_offset, 512) - alignment_offset = round_up(imm_data_offset, 512) -
imm_data_offset; imm_data_offset;
req_sz = alignment_offset + imm_data_offset + srp_max_req_size; req_sz = alignment_offset + imm_data_offset + srp_max_req_size;
ch->req_buf_cache = kmem_cache_create("srpt-req-buf", req_sz, ch->req_buf_cache = srpt_cache_get(req_sz);
512, 0, NULL);
if (!ch->req_buf_cache) if (!ch->req_buf_cache)
goto free_rsp_ring; goto free_rsp_ring;
@ -2478,7 +2535,7 @@ free_recv_ring:
ch->req_buf_cache, DMA_FROM_DEVICE); ch->req_buf_cache, DMA_FROM_DEVICE);
free_recv_cache: free_recv_cache:
kmem_cache_destroy(ch->req_buf_cache); srpt_cache_put(ch->req_buf_cache);
free_rsp_ring: free_rsp_ring:
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
@ -2486,7 +2543,7 @@ free_rsp_ring:
ch->rsp_buf_cache, DMA_TO_DEVICE); ch->rsp_buf_cache, DMA_TO_DEVICE);
free_rsp_cache: free_rsp_cache:
kmem_cache_destroy(ch->rsp_buf_cache); srpt_cache_put(ch->rsp_buf_cache);
free_ch: free_ch:
if (rdma_cm_id) if (rdma_cm_id)
@ -3055,7 +3112,7 @@ static void srpt_free_srq(struct srpt_device *sdev)
srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev, srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
sdev->srq_size, sdev->req_buf_cache, sdev->srq_size, sdev->req_buf_cache,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
kmem_cache_destroy(sdev->req_buf_cache); srpt_cache_put(sdev->req_buf_cache);
sdev->srq = NULL; sdev->srq = NULL;
} }
@ -3082,8 +3139,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size, pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
sdev->device->attrs.max_srq_wr, dev_name(&device->dev)); sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
sdev->req_buf_cache = kmem_cache_create("srpt-srq-req-buf", sdev->req_buf_cache = srpt_cache_get(srp_max_req_size);
srp_max_req_size, 0, 0, NULL);
if (!sdev->req_buf_cache) if (!sdev->req_buf_cache)
goto free_srq; goto free_srq;
@ -3105,7 +3161,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
return 0; return 0;
free_cache: free_cache:
kmem_cache_destroy(sdev->req_buf_cache); srpt_cache_put(sdev->req_buf_cache);
free_srq: free_srq:
ib_destroy_srq(srq); ib_destroy_srq(srq);