2020-08-27 07:54:40 -07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
|
2016-06-16 06:45:23 -07:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
|
|
|
|
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef RXE_QUEUE_H
|
|
|
|
#define RXE_QUEUE_H
|
|
|
|
|
2021-09-14 09:42:03 -07:00
|
|
|
/* Implements a simple circular buffer that is shared between user
|
|
|
|
* and the driver and can be resized. The requested element size is
|
|
|
|
* rounded up to a power of 2 and the number of elements in the buffer
|
|
|
|
* is also rounded up to a power of 2. Since the queue is empty when
|
|
|
|
* the producer and consumer indices match the maximum capacity of the
|
|
|
|
* queue is one less than the number of element slots.
|
2021-05-27 12:47:48 -07:00
|
|
|
*
|
|
|
|
* Notes:
|
2021-09-14 09:42:03 -07:00
|
|
|
* - The driver indices are always masked off to q->index_mask
|
|
|
|
* before storing so do not need to be checked on reads.
|
|
|
|
* - The user whether user space or kernel is generally
|
|
|
|
* not trusted so its parameters are masked to make sure
|
|
|
|
* they do not access the queue out of bounds on reads.
|
|
|
|
* - The driver indices for queues must not be written
|
|
|
|
* by user so a local copy is used and a shared copy is
|
|
|
|
* stored when the local copy is changed.
|
2021-05-27 12:47:48 -07:00
|
|
|
* - By passing the type in the parameter list separate from q
|
2021-09-14 09:42:03 -07:00
|
|
|
* the compiler can eliminate the switch statement when the
|
|
|
|
* actual queue type is known when the function is called at
|
|
|
|
* compile time.
|
|
|
|
* - These queues are lock free. The user and driver must protect
|
|
|
|
* changes to their end of the queues with locks if more than one
|
|
|
|
* CPU can be accessing it at the same time.
|
2016-06-16 06:45:23 -07:00
|
|
|
*/
|
|
|
|
|
2021-09-14 09:42:03 -07:00
|
|
|
/**
|
|
|
|
* enum queue_type - type of queue
|
|
|
|
* @QUEUE_TYPE_TO_CLIENT: Queue is written by rxe driver and
|
2023-02-14 00:10:54 -07:00
|
|
|
* read by client which may be a user space
|
|
|
|
* application or a kernel ulp.
|
|
|
|
* Used by rxe internals only.
|
2021-09-14 09:42:03 -07:00
|
|
|
* @QUEUE_TYPE_FROM_CLIENT: Queue is written by client and
|
2023-02-14 00:10:54 -07:00
|
|
|
* read by rxe driver.
|
|
|
|
* Used by rxe internals only.
|
|
|
|
* @QUEUE_TYPE_FROM_ULP: Queue is written by kernel ulp and
|
|
|
|
* read by rxe driver.
|
|
|
|
* Used by kernel verbs APIs only on
|
|
|
|
* behalf of ulps.
|
|
|
|
* @QUEUE_TYPE_TO_ULP: Queue is written by rxe driver and
|
|
|
|
* read by kernel ulp.
|
|
|
|
* Used by kernel verbs APIs only on
|
|
|
|
* behalf of ulps.
|
2021-09-14 09:42:03 -07:00
|
|
|
*/
|
2021-05-27 12:47:46 -07:00
|
|
|
enum queue_type {
|
2021-09-14 09:42:03 -07:00
|
|
|
QUEUE_TYPE_TO_CLIENT,
|
|
|
|
QUEUE_TYPE_FROM_CLIENT,
|
2023-02-14 00:10:54 -07:00
|
|
|
QUEUE_TYPE_FROM_ULP,
|
|
|
|
QUEUE_TYPE_TO_ULP,
|
2021-05-27 12:47:46 -07:00
|
|
|
};
|
|
|
|
|
2022-06-30 12:04:21 -07:00
|
|
|
struct rxe_queue_buf;
|
|
|
|
|
2016-06-16 06:45:23 -07:00
|
|
|
struct rxe_queue {
|
|
|
|
struct rxe_dev *rxe;
|
|
|
|
struct rxe_queue_buf *buf;
|
|
|
|
struct rxe_mmap_info *ip;
|
|
|
|
size_t buf_size;
|
|
|
|
size_t elem_size;
|
|
|
|
unsigned int log2_elem_size;
|
2020-12-10 10:42:59 -07:00
|
|
|
u32 index_mask;
|
2021-05-27 12:47:46 -07:00
|
|
|
enum queue_type type;
|
2021-05-27 12:47:48 -07:00
|
|
|
/* private copy of index for shared queues between
|
2023-02-14 00:10:54 -07:00
|
|
|
* driver and clients. Driver reads and writes
|
2021-05-27 12:47:48 -07:00
|
|
|
* this copy and then replicates to rxe_queue_buf
|
2023-02-14 00:10:54 -07:00
|
|
|
* for read access by clients.
|
2021-05-27 12:47:48 -07:00
|
|
|
*/
|
|
|
|
u32 index;
|
2016-06-16 06:45:23 -07:00
|
|
|
};
|
|
|
|
|
2019-03-31 09:10:07 -07:00
|
|
|
int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
|
|
|
|
struct ib_udata *udata, struct rxe_queue_buf *buf,
|
|
|
|
size_t buf_size, struct rxe_mmap_info **ip_p);
|
2016-06-16 06:45:23 -07:00
|
|
|
|
2016-11-16 01:39:17 -07:00
|
|
|
void rxe_queue_reset(struct rxe_queue *q);
|
|
|
|
|
2021-05-27 12:47:46 -07:00
|
|
|
struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
|
|
|
|
unsigned int elem_size, enum queue_type type);
|
2016-06-16 06:45:23 -07:00
|
|
|
|
2019-03-31 09:10:07 -07:00
|
|
|
int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
|
|
|
|
unsigned int elem_size, struct ib_udata *udata,
|
2018-03-13 15:33:18 -07:00
|
|
|
struct mminfo __user *outbuf,
|
2021-09-14 09:42:03 -07:00
|
|
|
spinlock_t *producer_lock, spinlock_t *consumer_lock);
|
2016-06-16 06:45:23 -07:00
|
|
|
|
|
|
|
void rxe_queue_cleanup(struct rxe_queue *queue);
|
|
|
|
|
2021-09-14 09:42:03 -07:00
|
|
|
static inline u32 queue_next_index(struct rxe_queue *q, int index)
|
2016-06-16 06:45:23 -07:00
|
|
|
{
|
2021-09-14 09:42:03 -07:00
|
|
|
return (index + 1) & q->index_mask;
|
2016-06-16 06:45:23 -07:00
|
|
|
}
|
|
|
|
|
2021-09-14 09:42:03 -07:00
|
|
|
static inline u32 queue_get_producer(const struct rxe_queue *q,
|
|
|
|
enum queue_type type)
|
2016-06-16 06:45:23 -07:00
|
|
|
{
|
2020-12-10 10:42:59 -07:00
|
|
|
u32 prod;
|
|
|
|
|
2021-05-27 12:47:48 -07:00
|
|
|
switch (type) {
|
2021-09-14 09:42:03 -07:00
|
|
|
case QUEUE_TYPE_FROM_CLIENT:
|
2023-02-14 00:10:54 -07:00
|
|
|
/* used by rxe, client owns the index */
|
2021-05-27 12:47:47 -07:00
|
|
|
prod = smp_load_acquire(&q->buf->producer_index);
|
|
|
|
break;
|
2021-09-14 09:42:03 -07:00
|
|
|
case QUEUE_TYPE_TO_CLIENT:
|
2023-02-14 00:10:54 -07:00
|
|
|
/* used by rxe which owns the index */
|
2021-05-27 12:47:48 -07:00
|
|
|
prod = q->index;
|
2021-05-27 12:47:47 -07:00
|
|
|
break;
|
2023-02-14 00:10:54 -07:00
|
|
|
case QUEUE_TYPE_FROM_ULP:
|
|
|
|
/* used by ulp which owns the index */
|
2021-05-27 12:47:47 -07:00
|
|
|
prod = q->buf->producer_index;
|
|
|
|
break;
|
2023-02-14 00:10:54 -07:00
|
|
|
case QUEUE_TYPE_TO_ULP:
|
|
|
|
/* used by ulp, rxe owns the index */
|
|
|
|
prod = smp_load_acquire(&q->buf->producer_index);
|
|
|
|
break;
|
2021-05-27 12:47:47 -07:00
|
|
|
}
|
2020-12-10 10:42:59 -07:00
|
|
|
|
2021-09-14 09:42:03 -07:00
|
|
|
return prod;
|
2016-06-16 06:45:23 -07:00
|
|
|
}
|
|
|
|
|
2021-09-14 09:42:03 -07:00
|
|
|
static inline u32 queue_get_consumer(const struct rxe_queue *q,
|
|
|
|
enum queue_type type)
|
2016-06-16 06:45:23 -07:00
|
|
|
{
|
2020-12-10 10:42:59 -07:00
|
|
|
u32 cons;
|
|
|
|
|
2021-05-27 12:47:48 -07:00
|
|
|
switch (type) {
|
2021-09-14 09:42:03 -07:00
|
|
|
case QUEUE_TYPE_FROM_CLIENT:
|
2023-02-14 00:10:54 -07:00
|
|
|
/* used by rxe which owns the index */
|
2021-05-27 12:47:48 -07:00
|
|
|
cons = q->index;
|
2021-05-27 12:47:47 -07:00
|
|
|
break;
|
2021-09-14 09:42:03 -07:00
|
|
|
case QUEUE_TYPE_TO_CLIENT:
|
2023-02-14 00:10:54 -07:00
|
|
|
/* used by rxe, client owns the index */
|
2021-05-27 12:47:47 -07:00
|
|
|
cons = smp_load_acquire(&q->buf->consumer_index);
|
|
|
|
break;
|
2023-02-14 00:10:54 -07:00
|
|
|
case QUEUE_TYPE_FROM_ULP:
|
|
|
|
/* used by ulp, rxe owns the index */
|
2021-09-14 09:42:03 -07:00
|
|
|
cons = smp_load_acquire(&q->buf->consumer_index);
|
|
|
|
break;
|
2023-02-14 00:10:54 -07:00
|
|
|
case QUEUE_TYPE_TO_ULP:
|
|
|
|
/* used by ulp which owns the index */
|
|
|
|
cons = q->buf->consumer_index;
|
|
|
|
break;
|
2021-05-27 12:47:47 -07:00
|
|
|
}
|
2020-12-10 10:42:59 -07:00
|
|
|
|
2021-09-14 09:42:03 -07:00
|
|
|
return cons;
|
2016-06-16 06:45:23 -07:00
|
|
|
}
|
|
|
|
|
2021-09-14 09:42:03 -07:00
|
|
|
static inline int queue_empty(struct rxe_queue *q, enum queue_type type)
|
2016-06-16 06:45:23 -07:00
|
|
|
{
|
2021-09-14 09:42:03 -07:00
|
|
|
u32 prod = queue_get_producer(q, type);
|
|
|
|
u32 cons = queue_get_consumer(q, type);
|
2021-05-27 12:47:47 -07:00
|
|
|
|
2021-09-14 09:42:03 -07:00
|
|
|
return ((prod - cons) & q->index_mask) == 0;
|
2021-05-27 12:47:47 -07:00
|
|
|
}
|
|
|
|
|
2021-09-14 09:42:03 -07:00
|
|
|
static inline int queue_full(struct rxe_queue *q, enum queue_type type)
|
2021-05-27 12:47:47 -07:00
|
|
|
{
|
2021-09-14 09:42:03 -07:00
|
|
|
u32 prod = queue_get_producer(q, type);
|
|
|
|
u32 cons = queue_get_consumer(q, type);
|
2020-12-10 10:42:59 -07:00
|
|
|
|
2021-09-14 09:42:03 -07:00
|
|
|
return ((prod + 1 - cons) & q->index_mask) == 0;
|
2016-06-16 06:45:23 -07:00
|
|
|
}
|
|
|
|
|
2021-09-14 09:42:03 -07:00
|
|
|
static inline u32 queue_count(const struct rxe_queue *q,
|
|
|
|
enum queue_type type)
|
2016-06-16 06:45:23 -07:00
|
|
|
{
|
2021-09-14 09:42:03 -07:00
|
|
|
u32 prod = queue_get_producer(q, type);
|
|
|
|
u32 cons = queue_get_consumer(q, type);
|
2020-12-10 10:42:59 -07:00
|
|
|
|
2021-09-14 09:42:03 -07:00
|
|
|
return (prod - cons) & q->index_mask;
|
2016-06-16 06:45:23 -07:00
|
|
|
}
|
|
|
|
|
2021-09-14 09:42:03 -07:00
|
|
|
static inline void queue_advance_producer(struct rxe_queue *q,
|
|
|
|
enum queue_type type)
|
2016-06-16 06:45:23 -07:00
|
|
|
{
|
2021-05-27 12:47:47 -07:00
|
|
|
u32 prod;
|
|
|
|
|
2021-05-27 12:47:48 -07:00
|
|
|
switch (type) {
|
2021-09-14 09:42:03 -07:00
|
|
|
case QUEUE_TYPE_FROM_CLIENT:
|
2023-02-14 00:10:54 -07:00
|
|
|
/* used by rxe, client owns the index */
|
|
|
|
if (WARN_ON(1))
|
|
|
|
pr_warn("%s: attempt to advance client index\n",
|
|
|
|
__func__);
|
2021-05-27 12:47:48 -07:00
|
|
|
break;
|
2021-09-14 09:42:03 -07:00
|
|
|
case QUEUE_TYPE_TO_CLIENT:
|
2023-02-14 00:10:54 -07:00
|
|
|
/* used by rxe which owns the index */
|
2021-05-27 12:47:48 -07:00
|
|
|
prod = q->index;
|
2021-09-14 09:42:03 -07:00
|
|
|
prod = (prod + 1) & q->index_mask;
|
|
|
|
q->index = prod;
|
2023-02-14 00:10:54 -07:00
|
|
|
/* release so client can read it safely */
|
2021-09-14 09:42:03 -07:00
|
|
|
smp_store_release(&q->buf->producer_index, prod);
|
|
|
|
break;
|
2023-02-14 00:10:54 -07:00
|
|
|
case QUEUE_TYPE_FROM_ULP:
|
|
|
|
/* used by ulp which owns the index */
|
2021-05-27 12:47:47 -07:00
|
|
|
prod = q->buf->producer_index;
|
2021-09-14 09:42:03 -07:00
|
|
|
prod = (prod + 1) & q->index_mask;
|
2023-02-14 00:10:54 -07:00
|
|
|
/* release so rxe can read it safely */
|
|
|
|
smp_store_release(&q->buf->producer_index, prod);
|
|
|
|
break;
|
|
|
|
case QUEUE_TYPE_TO_ULP:
|
|
|
|
/* used by ulp, rxe owns the index */
|
|
|
|
if (WARN_ON(1))
|
|
|
|
pr_warn("%s: attempt to advance driver index\n",
|
|
|
|
__func__);
|
2021-05-27 12:47:48 -07:00
|
|
|
break;
|
|
|
|
}
|
2016-06-16 06:45:23 -07:00
|
|
|
}
|
|
|
|
|
2021-09-14 09:42:03 -07:00
|
|
|
static inline void queue_advance_consumer(struct rxe_queue *q,
|
|
|
|
enum queue_type type)
|
2016-06-16 06:45:23 -07:00
|
|
|
{
|
2021-05-27 12:47:47 -07:00
|
|
|
u32 cons;
|
|
|
|
|
2021-05-27 12:47:48 -07:00
|
|
|
switch (type) {
|
2021-09-14 09:42:03 -07:00
|
|
|
case QUEUE_TYPE_FROM_CLIENT:
|
2023-02-14 00:10:54 -07:00
|
|
|
/* used by rxe which owns the index */
|
|
|
|
cons = (q->index + 1) & q->index_mask;
|
2021-09-14 09:42:03 -07:00
|
|
|
q->index = cons;
|
2023-02-14 00:10:54 -07:00
|
|
|
/* release so client can read it safely */
|
2021-09-14 09:42:03 -07:00
|
|
|
smp_store_release(&q->buf->consumer_index, cons);
|
2021-05-27 12:47:48 -07:00
|
|
|
break;
|
2021-09-14 09:42:03 -07:00
|
|
|
case QUEUE_TYPE_TO_CLIENT:
|
2023-02-14 00:10:54 -07:00
|
|
|
/* used by rxe, client owns the index */
|
|
|
|
if (WARN_ON(1))
|
|
|
|
pr_warn("%s: attempt to advance client index\n",
|
|
|
|
__func__);
|
|
|
|
break;
|
|
|
|
case QUEUE_TYPE_FROM_ULP:
|
|
|
|
/* used by ulp, rxe owns the index */
|
|
|
|
if (WARN_ON(1))
|
|
|
|
pr_warn("%s: attempt to advance driver index\n",
|
|
|
|
__func__);
|
2021-05-27 12:47:48 -07:00
|
|
|
break;
|
2023-02-14 00:10:54 -07:00
|
|
|
case QUEUE_TYPE_TO_ULP:
|
|
|
|
/* used by ulp which owns the index */
|
2021-05-27 12:47:47 -07:00
|
|
|
cons = q->buf->consumer_index;
|
2021-09-14 09:42:03 -07:00
|
|
|
cons = (cons + 1) & q->index_mask;
|
2023-02-14 00:10:54 -07:00
|
|
|
/* release so rxe can read it safely */
|
|
|
|
smp_store_release(&q->buf->consumer_index, cons);
|
2021-05-27 12:47:48 -07:00
|
|
|
break;
|
|
|
|
}
|
2016-06-16 06:45:23 -07:00
|
|
|
}
|
|
|
|
|
2021-09-14 09:42:03 -07:00
|
|
|
static inline void *queue_producer_addr(struct rxe_queue *q,
|
|
|
|
enum queue_type type)
|
2016-06-16 06:45:23 -07:00
|
|
|
{
|
2021-09-14 09:42:03 -07:00
|
|
|
u32 prod = queue_get_producer(q, type);
|
2021-05-27 12:47:47 -07:00
|
|
|
|
2021-09-14 09:42:03 -07:00
|
|
|
return q->buf->data + (prod << q->log2_elem_size);
|
2016-06-16 06:45:23 -07:00
|
|
|
}
|
|
|
|
|
2021-09-14 09:42:03 -07:00
|
|
|
static inline void *queue_consumer_addr(struct rxe_queue *q,
|
|
|
|
enum queue_type type)
|
2016-06-16 06:45:23 -07:00
|
|
|
{
|
2021-09-14 09:42:03 -07:00
|
|
|
u32 cons = queue_get_consumer(q, type);
|
2021-05-27 12:47:47 -07:00
|
|
|
|
2021-09-14 09:42:03 -07:00
|
|
|
return q->buf->data + (cons << q->log2_elem_size);
|
2016-06-16 06:45:23 -07:00
|
|
|
}
|
|
|
|
|
2021-09-14 09:42:03 -07:00
|
|
|
static inline void *queue_addr_from_index(struct rxe_queue *q, u32 index)
|
2016-06-16 06:45:23 -07:00
|
|
|
{
|
|
|
|
return q->buf->data + ((index & q->index_mask)
|
2021-09-14 09:42:03 -07:00
|
|
|
<< q->log2_elem_size);
|
2016-06-16 06:45:23 -07:00
|
|
|
}
|
|
|
|
|
2021-09-14 09:42:03 -07:00
|
|
|
static inline u32 queue_index_from_addr(const struct rxe_queue *q,
|
2021-05-27 12:47:47 -07:00
|
|
|
const void *addr)
|
2016-06-16 06:45:23 -07:00
|
|
|
{
|
|
|
|
return (((u8 *)addr - q->buf->data) >> q->log2_elem_size)
|
2021-05-27 12:47:47 -07:00
|
|
|
& q->index_mask;
|
2016-06-16 06:45:23 -07:00
|
|
|
}
|
|
|
|
|
2021-05-27 12:47:48 -07:00
|
|
|
static inline void *queue_head(struct rxe_queue *q, enum queue_type type)
|
2016-06-16 06:45:23 -07:00
|
|
|
{
|
2021-09-14 09:42:03 -07:00
|
|
|
return queue_empty(q, type) ? NULL : queue_consumer_addr(q, type);
|
2016-06-16 06:45:23 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* RXE_QUEUE_H */
|