virtio_pci_modern: use completion instead of busy loop to wait on admin cmd result
Currently, the code waits in a busy loop on every admin virtqueue issued command to get a reply. That prevents callers from issuing multiple commands in parallel. To overcome this limitation, introduce a virtqueue event callback for admin virtqueue. For every issued command, use completion mechanism to wait on a reply. In the event callback, trigger the completion is done for every incoming reply. Alongside with that, introduce a spin lock to protect the admin virtqueue operations. Signed-off-by: Jiri Pirko <jiri@nvidia.com> Message-Id: <20240716113552.80599-13-jiri@resnulli.us> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
7090f2b5ad
commit
4c3b54af90
@ -395,6 +395,8 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
|
|||||||
if (vqi->name && vqi->callback)
|
if (vqi->name && vqi->callback)
|
||||||
++nvectors;
|
++nvectors;
|
||||||
}
|
}
|
||||||
|
if (avq_num && vector_policy == VP_VQ_VECTOR_POLICY_EACH)
|
||||||
|
++nvectors;
|
||||||
} else {
|
} else {
|
||||||
/* Second best: one for change, shared for all vqs. */
|
/* Second best: one for change, shared for all vqs. */
|
||||||
nvectors = 2;
|
nvectors = 2;
|
||||||
@ -425,9 +427,9 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
|
|||||||
if (!avq_num)
|
if (!avq_num)
|
||||||
return 0;
|
return 0;
|
||||||
sprintf(avq->name, "avq.%u", avq->vq_index);
|
sprintf(avq->name, "avq.%u", avq->vq_index);
|
||||||
vq = vp_find_one_vq_msix(vdev, avq->vq_index, NULL, avq->name, false,
|
vq = vp_find_one_vq_msix(vdev, avq->vq_index, vp_modern_avq_done,
|
||||||
true, &allocated_vectors, vector_policy,
|
avq->name, false, true, &allocated_vectors,
|
||||||
&vp_dev->admin_vq.info);
|
vector_policy, &vp_dev->admin_vq.info);
|
||||||
if (IS_ERR(vq)) {
|
if (IS_ERR(vq)) {
|
||||||
err = PTR_ERR(vq);
|
err = PTR_ERR(vq);
|
||||||
goto error_find;
|
goto error_find;
|
||||||
@ -486,8 +488,9 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
|
|||||||
if (!avq_num)
|
if (!avq_num)
|
||||||
return 0;
|
return 0;
|
||||||
sprintf(avq->name, "avq.%u", avq->vq_index);
|
sprintf(avq->name, "avq.%u", avq->vq_index);
|
||||||
vq = vp_setup_vq(vdev, queue_idx++, NULL, avq->name, false,
|
vq = vp_setup_vq(vdev, queue_idx++, vp_modern_avq_done, avq->name,
|
||||||
VIRTIO_MSI_NO_VECTOR, &vp_dev->admin_vq.info);
|
false, VIRTIO_MSI_NO_VECTOR,
|
||||||
|
&vp_dev->admin_vq.info);
|
||||||
if (IS_ERR(vq)) {
|
if (IS_ERR(vq)) {
|
||||||
err = PTR_ERR(vq);
|
err = PTR_ERR(vq);
|
||||||
goto out_del_vqs;
|
goto out_del_vqs;
|
||||||
|
@ -47,6 +47,8 @@ struct virtio_pci_admin_vq {
|
|||||||
struct virtio_pci_vq_info *info;
|
struct virtio_pci_vq_info *info;
|
||||||
/* serializing admin commands execution. */
|
/* serializing admin commands execution. */
|
||||||
struct mutex cmd_lock;
|
struct mutex cmd_lock;
|
||||||
|
/* Protects virtqueue access. */
|
||||||
|
spinlock_t lock;
|
||||||
u64 supported_cmds;
|
u64 supported_cmds;
|
||||||
/* Name of the admin queue: avq.$vq_index. */
|
/* Name of the admin queue: avq.$vq_index. */
|
||||||
char name[10];
|
char name[10];
|
||||||
@ -178,6 +180,7 @@ struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev);
|
|||||||
#define VIRTIO_ADMIN_CMD_BITMAP 0
|
#define VIRTIO_ADMIN_CMD_BITMAP 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
void vp_modern_avq_done(struct virtqueue *vq);
|
||||||
int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
|
int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
|
||||||
struct virtio_admin_cmd *cmd);
|
struct virtio_admin_cmd *cmd);
|
||||||
|
|
||||||
|
@ -53,6 +53,23 @@ static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
|
|||||||
return index == vp_dev->admin_vq.vq_index;
|
return index == vp_dev->admin_vq.vq_index;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void vp_modern_avq_done(struct virtqueue *vq)
|
||||||
|
{
|
||||||
|
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
|
||||||
|
struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
|
||||||
|
struct virtio_admin_cmd *cmd;
|
||||||
|
unsigned long flags;
|
||||||
|
unsigned int len;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&admin_vq->lock, flags);
|
||||||
|
do {
|
||||||
|
virtqueue_disable_cb(vq);
|
||||||
|
while ((cmd = virtqueue_get_buf(vq, &len)))
|
||||||
|
complete(&cmd->completion);
|
||||||
|
} while (!virtqueue_enable_cb(vq));
|
||||||
|
spin_unlock_irqrestore(&admin_vq->lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
|
static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
|
||||||
u16 opcode,
|
u16 opcode,
|
||||||
struct scatterlist **sgs,
|
struct scatterlist **sgs,
|
||||||
@ -61,7 +78,8 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
|
|||||||
struct virtio_admin_cmd *cmd)
|
struct virtio_admin_cmd *cmd)
|
||||||
{
|
{
|
||||||
struct virtqueue *vq;
|
struct virtqueue *vq;
|
||||||
int ret, len;
|
unsigned long flags;
|
||||||
|
int ret;
|
||||||
|
|
||||||
vq = admin_vq->info->vq;
|
vq = admin_vq->info->vq;
|
||||||
if (!vq)
|
if (!vq)
|
||||||
@ -72,21 +90,33 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
|
|||||||
!((1ULL << opcode) & admin_vq->supported_cmds))
|
!((1ULL << opcode) & admin_vq->supported_cmds))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
|
init_completion(&cmd->completion);
|
||||||
if (ret < 0)
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
if (unlikely(!virtqueue_kick(vq)))
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
while (!virtqueue_get_buf(vq, &len) &&
|
|
||||||
!virtqueue_is_broken(vq))
|
|
||||||
cpu_relax();
|
|
||||||
|
|
||||||
|
again:
|
||||||
if (virtqueue_is_broken(vq))
|
if (virtqueue_is_broken(vq))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
return 0;
|
spin_lock_irqsave(&admin_vq->lock, flags);
|
||||||
|
ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
|
||||||
|
if (ret < 0) {
|
||||||
|
if (ret == -ENOSPC) {
|
||||||
|
spin_unlock_irqrestore(&admin_vq->lock, flags);
|
||||||
|
cpu_relax();
|
||||||
|
goto again;
|
||||||
|
}
|
||||||
|
goto unlock_err;
|
||||||
|
}
|
||||||
|
if (!virtqueue_kick(vq))
|
||||||
|
goto unlock_err;
|
||||||
|
spin_unlock_irqrestore(&admin_vq->lock, flags);
|
||||||
|
|
||||||
|
wait_for_completion(&cmd->completion);
|
||||||
|
|
||||||
|
return cmd->ret;
|
||||||
|
|
||||||
|
unlock_err:
|
||||||
|
spin_unlock_irqrestore(&admin_vq->lock, flags);
|
||||||
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
|
int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
|
||||||
@ -209,6 +239,25 @@ static void vp_modern_avq_activate(struct virtio_device *vdev)
|
|||||||
virtio_pci_admin_cmd_list_init(vdev);
|
virtio_pci_admin_cmd_list_init(vdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vp_modern_avq_cleanup(struct virtio_device *vdev)
|
||||||
|
{
|
||||||
|
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
||||||
|
struct virtio_admin_cmd *cmd;
|
||||||
|
struct virtqueue *vq;
|
||||||
|
|
||||||
|
if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
|
||||||
|
return;
|
||||||
|
|
||||||
|
vq = vp_dev->vqs[vp_dev->admin_vq.vq_index]->vq;
|
||||||
|
if (!vq)
|
||||||
|
return;
|
||||||
|
|
||||||
|
while ((cmd = virtqueue_detach_unused_buf(vq))) {
|
||||||
|
cmd->ret = -EIO;
|
||||||
|
complete(&cmd->completion);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void vp_transport_features(struct virtio_device *vdev, u64 features)
|
static void vp_transport_features(struct virtio_device *vdev, u64 features)
|
||||||
{
|
{
|
||||||
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
||||||
@ -403,6 +452,8 @@ static void vp_reset(struct virtio_device *vdev)
|
|||||||
while (vp_modern_get_status(mdev))
|
while (vp_modern_get_status(mdev))
|
||||||
msleep(1);
|
msleep(1);
|
||||||
|
|
||||||
|
vp_modern_avq_cleanup(vdev);
|
||||||
|
|
||||||
/* Flush pending VQ/configuration callbacks. */
|
/* Flush pending VQ/configuration callbacks. */
|
||||||
vp_synchronize_vectors(vdev);
|
vp_synchronize_vectors(vdev);
|
||||||
}
|
}
|
||||||
@ -785,6 +836,7 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
|
|||||||
vp_dev->isr = mdev->isr;
|
vp_dev->isr = mdev->isr;
|
||||||
vp_dev->vdev.id = mdev->id;
|
vp_dev->vdev.id = mdev->id;
|
||||||
|
|
||||||
|
spin_lock_init(&vp_dev->admin_vq.lock);
|
||||||
mutex_init(&vp_dev->admin_vq.cmd_lock);
|
mutex_init(&vp_dev->admin_vq.cmd_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#include <linux/mod_devicetable.h>
|
#include <linux/mod_devicetable.h>
|
||||||
#include <linux/gfp.h>
|
#include <linux/gfp.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
|
#include <linux/completion.h>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct virtqueue - a queue to register buffers for sending or receiving.
|
* struct virtqueue - a queue to register buffers for sending or receiving.
|
||||||
@ -109,6 +110,8 @@ struct virtio_admin_cmd {
|
|||||||
__le64 group_member_id;
|
__le64 group_member_id;
|
||||||
struct scatterlist *data_sg;
|
struct scatterlist *data_sg;
|
||||||
struct scatterlist *result_sg;
|
struct scatterlist *result_sg;
|
||||||
|
struct completion completion;
|
||||||
|
int ret;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
Loading…
Reference in New Issue
Block a user