2022-03-01 20:39:16 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
/* Asymmetric algorithms supported by virtio crypto device
|
|
|
|
*
|
|
|
|
* Authors: zhenwei pi <pizhenwei@bytedance.com>
|
|
|
|
* lei he <helei.sig11@bytedance.com>
|
|
|
|
*
|
|
|
|
* Copyright 2022 Bytedance CO., LTD.
|
|
|
|
*/
|
|
|
|
|
2023-08-12 23:55:16 -07:00
|
|
|
#include <crypto/engine.h>
|
2022-03-01 20:39:16 -07:00
|
|
|
#include <crypto/internal/akcipher.h>
|
|
|
|
#include <crypto/internal/rsa.h>
|
|
|
|
#include <crypto/scatterwalk.h>
|
2023-08-12 23:55:16 -07:00
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/mpi.h>
|
|
|
|
#include <linux/scatterlist.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/string.h>
|
2022-03-01 20:39:16 -07:00
|
|
|
#include <uapi/linux/virtio_crypto.h>
|
|
|
|
#include "virtio_crypto_common.h"
|
|
|
|
|
|
|
|
struct virtio_crypto_rsa_ctx {
|
|
|
|
MPI n;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct virtio_crypto_akcipher_ctx {
|
|
|
|
struct virtio_crypto *vcrypto;
|
|
|
|
struct crypto_akcipher *tfm;
|
|
|
|
bool session_valid;
|
|
|
|
__u64 session_id;
|
|
|
|
union {
|
|
|
|
struct virtio_crypto_rsa_ctx rsa_ctx;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
struct virtio_crypto_akcipher_request {
|
|
|
|
struct virtio_crypto_request base;
|
|
|
|
struct virtio_crypto_akcipher_ctx *akcipher_ctx;
|
|
|
|
struct akcipher_request *akcipher_req;
|
|
|
|
void *src_buf;
|
|
|
|
void *dst_buf;
|
|
|
|
uint32_t opcode;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct virtio_crypto_akcipher_algo {
|
|
|
|
uint32_t algonum;
|
|
|
|
uint32_t service;
|
|
|
|
unsigned int active_devs;
|
2023-08-12 23:55:16 -07:00
|
|
|
struct akcipher_engine_alg algo;
|
2022-03-01 20:39:16 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
static DEFINE_MUTEX(algs_lock);
|
|
|
|
|
|
|
|
static void virtio_crypto_akcipher_finalize_req(
|
|
|
|
struct virtio_crypto_akcipher_request *vc_akcipher_req,
|
|
|
|
struct akcipher_request *req, int err)
|
|
|
|
{
|
2022-09-19 00:51:58 -07:00
|
|
|
kfree(vc_akcipher_req->src_buf);
|
|
|
|
kfree(vc_akcipher_req->dst_buf);
|
|
|
|
vc_akcipher_req->src_buf = NULL;
|
|
|
|
vc_akcipher_req->dst_buf = NULL;
|
2022-03-01 20:39:16 -07:00
|
|
|
virtcrypto_clear_request(&vc_akcipher_req->base);
|
|
|
|
|
|
|
|
crypto_finalize_akcipher_request(vc_akcipher_req->base.dataq->engine, req, err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *vc_req, int len)
|
|
|
|
{
|
|
|
|
struct virtio_crypto_akcipher_request *vc_akcipher_req =
|
|
|
|
container_of(vc_req, struct virtio_crypto_akcipher_request, base);
|
|
|
|
struct akcipher_request *akcipher_req;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
switch (vc_req->status) {
|
|
|
|
case VIRTIO_CRYPTO_OK:
|
|
|
|
error = 0;
|
|
|
|
break;
|
|
|
|
case VIRTIO_CRYPTO_INVSESS:
|
|
|
|
case VIRTIO_CRYPTO_ERR:
|
|
|
|
error = -EINVAL;
|
|
|
|
break;
|
|
|
|
case VIRTIO_CRYPTO_BADMSG:
|
|
|
|
error = -EBADMSG;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIRTIO_CRYPTO_KEY_REJECTED:
|
|
|
|
error = -EKEYREJECTED;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
error = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
akcipher_req = vc_akcipher_req->akcipher_req;
|
2022-05-06 06:16:26 -07:00
|
|
|
if (vc_akcipher_req->opcode != VIRTIO_CRYPTO_AKCIPHER_VERIFY) {
|
|
|
|
/* actuall length maybe less than dst buffer */
|
|
|
|
akcipher_req->dst_len = len - sizeof(vc_req->status);
|
2022-03-01 20:39:16 -07:00
|
|
|
sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst),
|
|
|
|
vc_akcipher_req->dst_buf, akcipher_req->dst_len);
|
2022-05-06 06:16:26 -07:00
|
|
|
}
|
2022-03-01 20:39:16 -07:00
|
|
|
virtio_crypto_akcipher_finalize_req(vc_akcipher_req, akcipher_req, error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx,
|
2024-01-30 04:27:40 -07:00
|
|
|
struct virtio_crypto_ctrl_header *header,
|
|
|
|
struct virtio_crypto_akcipher_session_para *para,
|
2022-03-01 20:39:16 -07:00
|
|
|
const uint8_t *key, unsigned int keylen)
|
|
|
|
{
|
|
|
|
struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
|
|
|
|
struct virtio_crypto *vcrypto = ctx->vcrypto;
|
|
|
|
uint8_t *pkey;
|
|
|
|
int err;
|
|
|
|
unsigned int num_out = 0, num_in = 0;
|
2022-05-06 06:16:23 -07:00
|
|
|
struct virtio_crypto_op_ctrl_req *ctrl;
|
|
|
|
struct virtio_crypto_session_input *input;
|
2022-05-06 06:16:24 -07:00
|
|
|
struct virtio_crypto_ctrl_request *vc_ctrl_req;
|
2022-03-01 20:39:16 -07:00
|
|
|
|
2023-02-04 13:54:08 -07:00
|
|
|
pkey = kmemdup(key, keylen, GFP_KERNEL);
|
2022-03-01 20:39:16 -07:00
|
|
|
if (!pkey)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2022-05-06 06:16:24 -07:00
|
|
|
vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
|
|
|
|
if (!vc_ctrl_req) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctrl = &vc_ctrl_req->ctrl;
|
2022-05-06 06:16:23 -07:00
|
|
|
memcpy(&ctrl->header, header, sizeof(ctrl->header));
|
2024-01-30 04:27:40 -07:00
|
|
|
memcpy(&ctrl->u.akcipher_create_session.para, para, sizeof(*para));
|
2022-05-06 06:16:24 -07:00
|
|
|
input = &vc_ctrl_req->input;
|
2022-05-06 06:16:23 -07:00
|
|
|
input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
|
2022-03-01 20:39:16 -07:00
|
|
|
|
2022-05-06 06:16:23 -07:00
|
|
|
sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl));
|
2022-03-01 20:39:16 -07:00
|
|
|
sgs[num_out++] = &outhdr_sg;
|
|
|
|
|
|
|
|
sg_init_one(&key_sg, pkey, keylen);
|
|
|
|
sgs[num_out++] = &key_sg;
|
|
|
|
|
2022-05-06 06:16:23 -07:00
|
|
|
sg_init_one(&inhdr_sg, input, sizeof(*input));
|
2022-03-01 20:39:16 -07:00
|
|
|
sgs[num_out + num_in++] = &inhdr_sg;
|
|
|
|
|
virtio-crypto: wait ctrl queue instead of busy polling
Originally, after submitting request into virtio crypto control
queue, the guest side polls the result from the virt queue. This
works like following:
CPU0 CPU1 ... CPUx CPUy
| | | |
\ \ / /
\--------spin_lock(&vcrypto->ctrl_lock)-------/
|
virtqueue add & kick
|
busy poll virtqueue
|
spin_unlock(&vcrypto->ctrl_lock)
...
There are two problems:
1, The queue depth is always 1, the performance of a virtio crypto
device gets limited. Multi user processes share a single control
queue, and hit spin lock race from control queue. Test on Intel
Platinum 8260, a single worker gets ~35K/s create/close session
operations, and 8 workers get ~40K/s operations with 800% CPU
utilization.
2, The control request is supposed to get handled immediately, but
in the current implementation of QEMU(v6.2), the vCPU thread kicks
another thread to do this work, the latency also gets unstable.
Tracking latency of virtio_crypto_alg_akcipher_close_session in 5s:
usecs : count distribution
0 -> 1 : 0 | |
2 -> 3 : 7 | |
4 -> 7 : 72 | |
8 -> 15 : 186485 |************************|
16 -> 31 : 687 | |
32 -> 63 : 5 | |
64 -> 127 : 3 | |
128 -> 255 : 1 | |
256 -> 511 : 0 | |
512 -> 1023 : 0 | |
1024 -> 2047 : 0 | |
2048 -> 4095 : 0 | |
4096 -> 8191 : 0 | |
8192 -> 16383 : 2 | |
This means that a CPU may hold vcrypto->ctrl_lock as long as 8192~16383us.
To improve the performance of control queue, a request on control queue
waits completion instead of busy polling to reduce lock racing, and gets
completed by control queue callback.
CPU0 CPU1 ... CPUx CPUy
| | | |
\ \ / /
\--------spin_lock(&vcrypto->ctrl_lock)-------/
|
virtqueue add & kick
|
---------spin_unlock(&vcrypto->ctrl_lock)------
/ / \ \
| | | |
wait wait wait wait
Test this patch, the guest side get ~200K/s operations with 300% CPU
utilization.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Gonglei <arei.gonglei@huawei.com>
Reviewed-by: Gonglei <arei.gonglei@huawei.com>
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
Message-Id: <20220506131627.180784-4-pizhenwei@bytedance.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-05-06 06:16:25 -07:00
|
|
|
err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
|
|
|
|
if (err < 0)
|
2022-03-01 20:39:16 -07:00
|
|
|
goto out;
|
|
|
|
|
2022-05-06 06:16:23 -07:00
|
|
|
if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
|
2022-05-06 06:16:24 -07:00
|
|
|
pr_err("virtio_crypto: Create session failed status: %u\n",
|
|
|
|
le32_to_cpu(input->status));
|
2022-03-01 20:39:16 -07:00
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2022-05-06 06:16:23 -07:00
|
|
|
ctx->session_id = le64_to_cpu(input->session_id);
|
2022-03-01 20:39:16 -07:00
|
|
|
ctx->session_valid = true;
|
|
|
|
err = 0;
|
|
|
|
|
|
|
|
out:
|
2022-05-06 06:16:24 -07:00
|
|
|
kfree(vc_ctrl_req);
|
2022-03-01 20:39:16 -07:00
|
|
|
kfree_sensitive(pkey);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_crypto_alg_akcipher_close_session(struct virtio_crypto_akcipher_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct scatterlist outhdr_sg, inhdr_sg, *sgs[2];
|
|
|
|
struct virtio_crypto_destroy_session_req *destroy_session;
|
|
|
|
struct virtio_crypto *vcrypto = ctx->vcrypto;
|
virtio-crypto: wait ctrl queue instead of busy polling
Originally, after submitting request into virtio crypto control
queue, the guest side polls the result from the virt queue. This
works like following:
CPU0 CPU1 ... CPUx CPUy
| | | |
\ \ / /
\--------spin_lock(&vcrypto->ctrl_lock)-------/
|
virtqueue add & kick
|
busy poll virtqueue
|
spin_unlock(&vcrypto->ctrl_lock)
...
There are two problems:
1, The queue depth is always 1, the performance of a virtio crypto
device gets limited. Multi user processes share a single control
queue, and hit spin lock race from control queue. Test on Intel
Platinum 8260, a single worker gets ~35K/s create/close session
operations, and 8 workers get ~40K/s operations with 800% CPU
utilization.
2, The control request is supposed to get handled immediately, but
in the current implementation of QEMU(v6.2), the vCPU thread kicks
another thread to do this work, the latency also gets unstable.
Tracking latency of virtio_crypto_alg_akcipher_close_session in 5s:
usecs : count distribution
0 -> 1 : 0 | |
2 -> 3 : 7 | |
4 -> 7 : 72 | |
8 -> 15 : 186485 |************************|
16 -> 31 : 687 | |
32 -> 63 : 5 | |
64 -> 127 : 3 | |
128 -> 255 : 1 | |
256 -> 511 : 0 | |
512 -> 1023 : 0 | |
1024 -> 2047 : 0 | |
2048 -> 4095 : 0 | |
4096 -> 8191 : 0 | |
8192 -> 16383 : 2 | |
This means that a CPU may hold vcrypto->ctrl_lock as long as 8192~16383us.
To improve the performance of control queue, a request on control queue
waits completion instead of busy polling to reduce lock racing, and gets
completed by control queue callback.
CPU0 CPU1 ... CPUx CPUy
| | | |
\ \ / /
\--------spin_lock(&vcrypto->ctrl_lock)-------/
|
virtqueue add & kick
|
---------spin_unlock(&vcrypto->ctrl_lock)------
/ / \ \
| | | |
wait wait wait wait
Test this patch, the guest side get ~200K/s operations with 300% CPU
utilization.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Gonglei <arei.gonglei@huawei.com>
Reviewed-by: Gonglei <arei.gonglei@huawei.com>
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
Message-Id: <20220506131627.180784-4-pizhenwei@bytedance.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-05-06 06:16:25 -07:00
|
|
|
unsigned int num_out = 0, num_in = 0;
|
2022-03-01 20:39:16 -07:00
|
|
|
int err;
|
2022-05-06 06:16:23 -07:00
|
|
|
struct virtio_crypto_op_ctrl_req *ctrl;
|
|
|
|
struct virtio_crypto_inhdr *ctrl_status;
|
2022-05-06 06:16:24 -07:00
|
|
|
struct virtio_crypto_ctrl_request *vc_ctrl_req;
|
2022-03-01 20:39:16 -07:00
|
|
|
|
2022-05-06 06:16:24 -07:00
|
|
|
if (!ctx->session_valid)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
|
|
|
|
if (!vc_ctrl_req)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ctrl_status = &vc_ctrl_req->ctrl_status;
|
2022-05-06 06:16:23 -07:00
|
|
|
ctrl_status->status = VIRTIO_CRYPTO_ERR;
|
2022-05-06 06:16:24 -07:00
|
|
|
ctrl = &vc_ctrl_req->ctrl;
|
2022-05-06 06:16:23 -07:00
|
|
|
ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION);
|
|
|
|
ctrl->header.queue_id = 0;
|
2022-03-01 20:39:16 -07:00
|
|
|
|
2022-05-06 06:16:23 -07:00
|
|
|
destroy_session = &ctrl->u.destroy_session;
|
2022-03-01 20:39:16 -07:00
|
|
|
destroy_session->session_id = cpu_to_le64(ctx->session_id);
|
|
|
|
|
2022-05-06 06:16:23 -07:00
|
|
|
sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl));
|
2022-03-01 20:39:16 -07:00
|
|
|
sgs[num_out++] = &outhdr_sg;
|
|
|
|
|
2022-05-06 06:16:23 -07:00
|
|
|
sg_init_one(&inhdr_sg, &ctrl_status->status, sizeof(ctrl_status->status));
|
2022-03-01 20:39:16 -07:00
|
|
|
sgs[num_out + num_in++] = &inhdr_sg;
|
|
|
|
|
virtio-crypto: wait ctrl queue instead of busy polling
Originally, after submitting request into virtio crypto control
queue, the guest side polls the result from the virt queue. This
works like following:
CPU0 CPU1 ... CPUx CPUy
| | | |
\ \ / /
\--------spin_lock(&vcrypto->ctrl_lock)-------/
|
virtqueue add & kick
|
busy poll virtqueue
|
spin_unlock(&vcrypto->ctrl_lock)
...
There are two problems:
1, The queue depth is always 1, the performance of a virtio crypto
device gets limited. Multi user processes share a single control
queue, and hit spin lock race from control queue. Test on Intel
Platinum 8260, a single worker gets ~35K/s create/close session
operations, and 8 workers get ~40K/s operations with 800% CPU
utilization.
2, The control request is supposed to get handled immediately, but
in the current implementation of QEMU(v6.2), the vCPU thread kicks
another thread to do this work, the latency also gets unstable.
Tracking latency of virtio_crypto_alg_akcipher_close_session in 5s:
usecs : count distribution
0 -> 1 : 0 | |
2 -> 3 : 7 | |
4 -> 7 : 72 | |
8 -> 15 : 186485 |************************|
16 -> 31 : 687 | |
32 -> 63 : 5 | |
64 -> 127 : 3 | |
128 -> 255 : 1 | |
256 -> 511 : 0 | |
512 -> 1023 : 0 | |
1024 -> 2047 : 0 | |
2048 -> 4095 : 0 | |
4096 -> 8191 : 0 | |
8192 -> 16383 : 2 | |
This means that a CPU may hold vcrypto->ctrl_lock as long as 8192~16383us.
To improve the performance of control queue, a request on control queue
waits completion instead of busy polling to reduce lock racing, and gets
completed by control queue callback.
CPU0 CPU1 ... CPUx CPUy
| | | |
\ \ / /
\--------spin_lock(&vcrypto->ctrl_lock)-------/
|
virtqueue add & kick
|
---------spin_unlock(&vcrypto->ctrl_lock)------
/ / \ \
| | | |
wait wait wait wait
Test this patch, the guest side get ~200K/s operations with 300% CPU
utilization.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Gonglei <arei.gonglei@huawei.com>
Reviewed-by: Gonglei <arei.gonglei@huawei.com>
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
Message-Id: <20220506131627.180784-4-pizhenwei@bytedance.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-05-06 06:16:25 -07:00
|
|
|
err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
|
|
|
|
if (err < 0)
|
2022-03-01 20:39:16 -07:00
|
|
|
goto out;
|
|
|
|
|
2022-05-06 06:16:23 -07:00
|
|
|
if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
|
2022-05-06 06:16:24 -07:00
|
|
|
pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
|
|
|
|
ctrl_status->status, destroy_session->session_id);
|
2022-03-01 20:39:16 -07:00
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
ctx->session_valid = false;
|
|
|
|
|
|
|
|
out:
|
2022-05-06 06:16:24 -07:00
|
|
|
kfree(vc_ctrl_req);
|
2022-03-01 20:39:16 -07:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request *vc_akcipher_req,
|
|
|
|
struct akcipher_request *req, struct data_queue *data_vq)
|
|
|
|
{
|
|
|
|
struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
|
|
|
|
struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
|
|
|
|
struct virtio_crypto *vcrypto = ctx->vcrypto;
|
|
|
|
struct virtio_crypto_op_data_req *req_data = vc_req->req_data;
|
|
|
|
struct scatterlist *sgs[4], outhdr_sg, inhdr_sg, srcdata_sg, dstdata_sg;
|
2023-12-26 03:00:20 -07:00
|
|
|
void *src_buf, *dst_buf = NULL;
|
2022-03-01 20:39:16 -07:00
|
|
|
unsigned int num_out = 0, num_in = 0;
|
|
|
|
int node = dev_to_node(&vcrypto->vdev->dev);
|
|
|
|
unsigned long flags;
|
2023-12-26 03:00:20 -07:00
|
|
|
int ret;
|
2022-03-01 20:39:16 -07:00
|
|
|
bool verify = vc_akcipher_req->opcode == VIRTIO_CRYPTO_AKCIPHER_VERIFY;
|
|
|
|
unsigned int src_len = verify ? req->src_len + req->dst_len : req->src_len;
|
|
|
|
|
|
|
|
/* out header */
|
|
|
|
sg_init_one(&outhdr_sg, req_data, sizeof(*req_data));
|
|
|
|
sgs[num_out++] = &outhdr_sg;
|
|
|
|
|
|
|
|
/* src data */
|
|
|
|
src_buf = kcalloc_node(src_len, 1, GFP_KERNEL, node);
|
|
|
|
if (!src_buf)
|
2023-12-26 03:00:20 -07:00
|
|
|
return -ENOMEM;
|
2022-03-01 20:39:16 -07:00
|
|
|
|
|
|
|
if (verify) {
|
|
|
|
/* for verify operation, both src and dst data work as OUT direction */
|
|
|
|
sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
|
|
|
|
sg_init_one(&srcdata_sg, src_buf, src_len);
|
|
|
|
sgs[num_out++] = &srcdata_sg;
|
|
|
|
} else {
|
|
|
|
sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
|
|
|
|
sg_init_one(&srcdata_sg, src_buf, src_len);
|
|
|
|
sgs[num_out++] = &srcdata_sg;
|
|
|
|
|
|
|
|
/* dst data */
|
|
|
|
dst_buf = kcalloc_node(req->dst_len, 1, GFP_KERNEL, node);
|
|
|
|
if (!dst_buf)
|
2023-12-26 03:00:20 -07:00
|
|
|
goto free_src;
|
2022-03-01 20:39:16 -07:00
|
|
|
|
|
|
|
sg_init_one(&dstdata_sg, dst_buf, req->dst_len);
|
|
|
|
sgs[num_out + num_in++] = &dstdata_sg;
|
|
|
|
}
|
|
|
|
|
|
|
|
vc_akcipher_req->src_buf = src_buf;
|
|
|
|
vc_akcipher_req->dst_buf = dst_buf;
|
|
|
|
|
|
|
|
/* in header */
|
|
|
|
sg_init_one(&inhdr_sg, &vc_req->status, sizeof(vc_req->status));
|
|
|
|
sgs[num_out + num_in++] = &inhdr_sg;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&data_vq->lock, flags);
|
|
|
|
ret = virtqueue_add_sgs(data_vq->vq, sgs, num_out, num_in, vc_req, GFP_ATOMIC);
|
|
|
|
virtqueue_kick(data_vq->vq);
|
|
|
|
spin_unlock_irqrestore(&data_vq->lock, flags);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
kfree(dst_buf);
|
2023-12-26 03:00:20 -07:00
|
|
|
free_src:
|
|
|
|
kfree(src_buf);
|
2022-03-01 20:39:16 -07:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_crypto_rsa_do_req(struct crypto_engine *engine, void *vreq)
|
|
|
|
{
|
|
|
|
struct akcipher_request *req = container_of(vreq, struct akcipher_request, base);
|
|
|
|
struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
|
|
|
|
struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
|
|
|
|
struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
|
|
|
|
struct virtio_crypto *vcrypto = ctx->vcrypto;
|
|
|
|
struct data_queue *data_vq = vc_req->dataq;
|
|
|
|
struct virtio_crypto_op_header *header;
|
|
|
|
struct virtio_crypto_akcipher_data_req *akcipher_req;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
vc_req->sgs = NULL;
|
|
|
|
vc_req->req_data = kzalloc_node(sizeof(*vc_req->req_data),
|
|
|
|
GFP_KERNEL, dev_to_node(&vcrypto->vdev->dev));
|
|
|
|
if (!vc_req->req_data)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* build request header */
|
|
|
|
header = &vc_req->req_data->header;
|
|
|
|
header->opcode = cpu_to_le32(vc_akcipher_req->opcode);
|
|
|
|
header->algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
|
|
|
|
header->session_id = cpu_to_le64(ctx->session_id);
|
|
|
|
|
|
|
|
/* build request akcipher data */
|
|
|
|
akcipher_req = &vc_req->req_data->u.akcipher_req;
|
|
|
|
akcipher_req->para.src_data_len = cpu_to_le32(req->src_len);
|
|
|
|
akcipher_req->para.dst_data_len = cpu_to_le32(req->dst_len);
|
|
|
|
|
|
|
|
ret = __virtio_crypto_akcipher_do_req(vc_akcipher_req, req, data_vq);
|
|
|
|
if (ret < 0) {
|
|
|
|
kfree_sensitive(vc_req->req_data);
|
|
|
|
vc_req->req_data = NULL;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_crypto_rsa_req(struct akcipher_request *req, uint32_t opcode)
|
|
|
|
{
|
|
|
|
struct crypto_akcipher *atfm = crypto_akcipher_reqtfm(req);
|
|
|
|
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(atfm);
|
|
|
|
struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
|
|
|
|
struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
|
|
|
|
struct virtio_crypto *vcrypto = ctx->vcrypto;
|
|
|
|
/* Use the first data virtqueue as default */
|
|
|
|
struct data_queue *data_vq = &vcrypto->data_vq[0];
|
|
|
|
|
|
|
|
vc_req->dataq = data_vq;
|
|
|
|
vc_req->alg_cb = virtio_crypto_dataq_akcipher_callback;
|
|
|
|
vc_akcipher_req->akcipher_ctx = ctx;
|
|
|
|
vc_akcipher_req->akcipher_req = req;
|
|
|
|
vc_akcipher_req->opcode = opcode;
|
|
|
|
|
|
|
|
return crypto_transfer_akcipher_request_to_engine(data_vq->engine, req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_crypto_rsa_encrypt(struct akcipher_request *req)
|
|
|
|
{
|
|
|
|
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_ENCRYPT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_crypto_rsa_decrypt(struct akcipher_request *req)
|
|
|
|
{
|
|
|
|
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_DECRYPT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_crypto_rsa_sign(struct akcipher_request *req)
|
|
|
|
{
|
|
|
|
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_SIGN);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_crypto_rsa_verify(struct akcipher_request *req)
|
|
|
|
{
|
|
|
|
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_VERIFY);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm,
|
|
|
|
const void *key,
|
|
|
|
unsigned int keylen,
|
|
|
|
bool private,
|
|
|
|
int padding_algo,
|
|
|
|
int hash_algo)
|
|
|
|
{
|
|
|
|
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
|
|
|
|
struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
|
|
|
|
struct virtio_crypto *vcrypto;
|
|
|
|
struct virtio_crypto_ctrl_header header;
|
|
|
|
struct virtio_crypto_akcipher_session_para para;
|
|
|
|
struct rsa_key rsa_key = {0};
|
|
|
|
int node = virtio_crypto_get_current_node();
|
|
|
|
uint32_t keytype;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* mpi_free will test n, just free it. */
|
|
|
|
mpi_free(rsa_ctx->n);
|
|
|
|
rsa_ctx->n = NULL;
|
|
|
|
|
|
|
|
if (private) {
|
|
|
|
keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE;
|
|
|
|
ret = rsa_parse_priv_key(&rsa_key, key, keylen);
|
|
|
|
} else {
|
|
|
|
keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC;
|
|
|
|
ret = rsa_parse_pub_key(&rsa_key, key, keylen);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
rsa_ctx->n = mpi_read_raw_data(rsa_key.n, rsa_key.n_sz);
|
|
|
|
if (!rsa_ctx->n)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (!ctx->vcrypto) {
|
|
|
|
vcrypto = virtcrypto_get_dev_node(node, VIRTIO_CRYPTO_SERVICE_AKCIPHER,
|
|
|
|
VIRTIO_CRYPTO_AKCIPHER_RSA);
|
|
|
|
if (!vcrypto) {
|
|
|
|
pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->vcrypto = vcrypto;
|
|
|
|
} else {
|
|
|
|
virtio_crypto_alg_akcipher_close_session(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* set ctrl header */
|
|
|
|
header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION);
|
|
|
|
header.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
|
|
|
|
header.queue_id = 0;
|
|
|
|
|
|
|
|
/* set RSA para */
|
|
|
|
para.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
|
|
|
|
para.keytype = cpu_to_le32(keytype);
|
|
|
|
para.keylen = cpu_to_le32(keylen);
|
|
|
|
para.u.rsa.padding_algo = cpu_to_le32(padding_algo);
|
|
|
|
para.u.rsa.hash_algo = cpu_to_le32(hash_algo);
|
|
|
|
|
|
|
|
return virtio_crypto_alg_akcipher_init_session(ctx, &header, ¶, key, keylen);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_crypto_rsa_raw_set_priv_key(struct crypto_akcipher *tfm,
|
|
|
|
const void *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
return virtio_crypto_rsa_set_key(tfm, key, keylen, 1,
|
|
|
|
VIRTIO_CRYPTO_RSA_RAW_PADDING,
|
|
|
|
VIRTIO_CRYPTO_RSA_NO_HASH);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int virtio_crypto_p1pad_rsa_sha1_set_priv_key(struct crypto_akcipher *tfm,
|
|
|
|
const void *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
return virtio_crypto_rsa_set_key(tfm, key, keylen, 1,
|
|
|
|
VIRTIO_CRYPTO_RSA_PKCS1_PADDING,
|
|
|
|
VIRTIO_CRYPTO_RSA_SHA1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_crypto_rsa_raw_set_pub_key(struct crypto_akcipher *tfm,
|
|
|
|
const void *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
return virtio_crypto_rsa_set_key(tfm, key, keylen, 0,
|
|
|
|
VIRTIO_CRYPTO_RSA_RAW_PADDING,
|
|
|
|
VIRTIO_CRYPTO_RSA_NO_HASH);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_crypto_p1pad_rsa_sha1_set_pub_key(struct crypto_akcipher *tfm,
|
|
|
|
const void *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
return virtio_crypto_rsa_set_key(tfm, key, keylen, 0,
|
|
|
|
VIRTIO_CRYPTO_RSA_PKCS1_PADDING,
|
|
|
|
VIRTIO_CRYPTO_RSA_SHA1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int virtio_crypto_rsa_max_size(struct crypto_akcipher *tfm)
|
|
|
|
{
|
|
|
|
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
|
|
|
|
struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
|
|
|
|
|
|
|
|
return mpi_get_size(rsa_ctx->n);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm)
|
|
|
|
{
|
|
|
|
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
|
|
|
|
|
|
|
|
ctx->tfm = tfm;
|
|
|
|
|
2022-11-22 02:42:26 -07:00
|
|
|
akcipher_set_reqsize(tfm,
|
|
|
|
sizeof(struct virtio_crypto_akcipher_request));
|
|
|
|
|
2022-03-01 20:39:16 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_crypto_rsa_exit_tfm(struct crypto_akcipher *tfm)
|
|
|
|
{
|
|
|
|
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
|
|
|
|
struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
|
|
|
|
|
|
|
|
virtio_crypto_alg_akcipher_close_session(ctx);
|
|
|
|
virtcrypto_dev_put(ctx->vcrypto);
|
|
|
|
mpi_free(rsa_ctx->n);
|
|
|
|
rsa_ctx->n = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
|
|
|
|
{
|
|
|
|
.algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
|
|
|
|
.service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
|
2023-08-12 23:55:16 -07:00
|
|
|
.algo.base = {
|
2022-03-01 20:39:16 -07:00
|
|
|
.encrypt = virtio_crypto_rsa_encrypt,
|
|
|
|
.decrypt = virtio_crypto_rsa_decrypt,
|
|
|
|
.set_pub_key = virtio_crypto_rsa_raw_set_pub_key,
|
|
|
|
.set_priv_key = virtio_crypto_rsa_raw_set_priv_key,
|
|
|
|
.max_size = virtio_crypto_rsa_max_size,
|
|
|
|
.init = virtio_crypto_rsa_init_tfm,
|
|
|
|
.exit = virtio_crypto_rsa_exit_tfm,
|
|
|
|
.base = {
|
|
|
|
.cra_name = "rsa",
|
|
|
|
.cra_driver_name = "virtio-crypto-rsa",
|
|
|
|
.cra_priority = 150,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
.cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
|
|
|
|
},
|
|
|
|
},
|
2023-08-12 23:55:16 -07:00
|
|
|
.algo.op = {
|
|
|
|
.do_one_request = virtio_crypto_rsa_do_req,
|
|
|
|
},
|
2022-03-01 20:39:16 -07:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
|
|
|
|
.service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
|
2023-08-12 23:55:16 -07:00
|
|
|
.algo.base = {
|
2022-03-01 20:39:16 -07:00
|
|
|
.encrypt = virtio_crypto_rsa_encrypt,
|
|
|
|
.decrypt = virtio_crypto_rsa_decrypt,
|
|
|
|
.sign = virtio_crypto_rsa_sign,
|
|
|
|
.verify = virtio_crypto_rsa_verify,
|
|
|
|
.set_pub_key = virtio_crypto_p1pad_rsa_sha1_set_pub_key,
|
|
|
|
.set_priv_key = virtio_crypto_p1pad_rsa_sha1_set_priv_key,
|
|
|
|
.max_size = virtio_crypto_rsa_max_size,
|
|
|
|
.init = virtio_crypto_rsa_init_tfm,
|
|
|
|
.exit = virtio_crypto_rsa_exit_tfm,
|
|
|
|
.base = {
|
|
|
|
.cra_name = "pkcs1pad(rsa,sha1)",
|
|
|
|
.cra_driver_name = "virtio-pkcs1-rsa-with-sha1",
|
|
|
|
.cra_priority = 150,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
.cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
|
|
|
|
},
|
|
|
|
},
|
2023-08-12 23:55:16 -07:00
|
|
|
.algo.op = {
|
|
|
|
.do_one_request = virtio_crypto_rsa_do_req,
|
|
|
|
},
|
2022-03-01 20:39:16 -07:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
mutex_lock(&algs_lock);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) {
|
|
|
|
uint32_t service = virtio_crypto_akcipher_algs[i].service;
|
|
|
|
uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum;
|
|
|
|
|
|
|
|
if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (virtio_crypto_akcipher_algs[i].active_devs == 0) {
|
2023-08-12 23:55:16 -07:00
|
|
|
ret = crypto_engine_register_akcipher(&virtio_crypto_akcipher_algs[i].algo);
|
2022-03-01 20:39:16 -07:00
|
|
|
if (ret)
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtio_crypto_akcipher_algs[i].active_devs++;
|
|
|
|
dev_info(&vcrypto->vdev->dev, "Registered akcipher algo %s\n",
|
2023-08-12 23:55:16 -07:00
|
|
|
virtio_crypto_akcipher_algs[i].algo.base.base.cra_name);
|
2022-03-01 20:39:16 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
mutex_unlock(&algs_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto)
|
|
|
|
{
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
mutex_lock(&algs_lock);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) {
|
|
|
|
uint32_t service = virtio_crypto_akcipher_algs[i].service;
|
|
|
|
uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum;
|
|
|
|
|
|
|
|
if (virtio_crypto_akcipher_algs[i].active_devs == 0 ||
|
|
|
|
!virtcrypto_algo_is_supported(vcrypto, service, algonum))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (virtio_crypto_akcipher_algs[i].active_devs == 1)
|
2023-08-12 23:55:16 -07:00
|
|
|
crypto_engine_unregister_akcipher(&virtio_crypto_akcipher_algs[i].algo);
|
2022-03-01 20:39:16 -07:00
|
|
|
|
|
|
|
virtio_crypto_akcipher_algs[i].active_devs--;
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_unlock(&algs_lock);
|
|
|
|
}
|