2019-02-18 03:36:11 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2016-06-21 09:04:20 -07:00
|
|
|
/*
|
|
|
|
* Common code for the NVMe target.
|
|
|
|
* Copyright (c) 2015-2016 HGST, a Western Digital Company.
|
|
|
|
*/
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <linux/module.h>
|
2016-08-04 01:18:49 -07:00
|
|
|
#include <linux/random.h>
|
2017-02-03 17:27:20 -07:00
|
|
|
#include <linux/rculist.h>
|
2018-10-04 14:27:47 -07:00
|
|
|
#include <linux/pci-p2pdma.h>
|
2019-04-24 03:34:39 -07:00
|
|
|
#include <linux/scatterlist.h>
|
2017-02-03 17:27:20 -07:00
|
|
|
|
2022-11-15 04:58:10 -07:00
|
|
|
#include <generated/utsrelease.h>
|
|
|
|
|
2019-06-12 05:45:33 -07:00
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
|
#include "trace.h"
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
#include "nvmet.h"
|
2024-05-26 22:15:19 -07:00
|
|
|
#include "debugfs.h"
|
2016-06-21 09:04:20 -07:00
|
|
|
|
2022-11-07 06:01:24 -07:00
|
|
|
struct kmem_cache *nvmet_bvec_cache;
|
2018-06-19 21:01:41 -07:00
|
|
|
struct workqueue_struct *buffered_io_wq;
|
2021-06-09 18:32:52 -07:00
|
|
|
struct workqueue_struct *zbd_wq;
|
2018-03-20 12:41:35 -07:00
|
|
|
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
|
2016-11-14 05:24:21 -07:00
|
|
|
static DEFINE_IDA(cntlid_ida);
|
2016-06-21 09:04:20 -07:00
|
|
|
|
2022-03-21 04:57:27 -07:00
|
|
|
struct workqueue_struct *nvmet_wq;
|
|
|
|
EXPORT_SYMBOL_GPL(nvmet_wq);
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
/*
|
|
|
|
* This read/write semaphore is used to synchronize access to configuration
|
|
|
|
* information on a target system that will result in discovery log page
|
|
|
|
* information change for at least one host.
|
|
|
|
* The full list of resources to protected by this semaphore is:
|
|
|
|
*
|
|
|
|
* - subsystems list
|
|
|
|
* - per-subsystem allowed hosts list
|
|
|
|
* - allow_any_host subsystem attribute
|
|
|
|
* - nvmet_genctr
|
|
|
|
* - the nvmet_transports array
|
|
|
|
*
|
|
|
|
* When updating any of those lists/structures write lock should be obtained,
|
|
|
|
* while when reading (popolating discovery log page or checking host-subsystem
|
|
|
|
* link) read lock is obtained to allow concurrent reads.
|
|
|
|
*/
|
|
|
|
DECLARE_RWSEM(nvmet_config_sem);
|
|
|
|
|
2018-07-19 07:35:20 -07:00
|
|
|
u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
|
|
|
|
u64 nvmet_ana_chgcnt;
|
|
|
|
DECLARE_RWSEM(nvmet_ana_sem);
|
|
|
|
|
2018-12-12 16:11:43 -07:00
|
|
|
inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
|
|
|
|
{
|
|
|
|
switch (errno) {
|
2019-07-31 16:35:33 -07:00
|
|
|
case 0:
|
2021-06-13 18:58:51 -07:00
|
|
|
return NVME_SC_SUCCESS;
|
2018-12-12 16:11:43 -07:00
|
|
|
case -ENOSPC:
|
|
|
|
req->error_loc = offsetof(struct nvme_rw_command, length);
|
2024-06-03 05:57:01 -07:00
|
|
|
return NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR;
|
2018-12-12 16:11:43 -07:00
|
|
|
case -EREMOTEIO:
|
|
|
|
req->error_loc = offsetof(struct nvme_rw_command, slba);
|
2024-06-03 05:57:01 -07:00
|
|
|
return NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
|
2018-12-12 16:11:43 -07:00
|
|
|
case -EOPNOTSUPP:
|
|
|
|
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
|
|
|
switch (req->cmd->common.opcode) {
|
|
|
|
case nvme_cmd_dsm:
|
|
|
|
case nvme_cmd_write_zeroes:
|
2024-06-03 05:57:01 -07:00
|
|
|
return NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR;
|
2018-12-12 16:11:43 -07:00
|
|
|
default:
|
2024-06-03 05:57:01 -07:00
|
|
|
return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
|
2018-12-12 16:11:43 -07:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case -ENODATA:
|
|
|
|
req->error_loc = offsetof(struct nvme_rw_command, nsid);
|
2021-06-13 18:58:51 -07:00
|
|
|
return NVME_SC_ACCESS_DENIED;
|
2018-12-12 16:11:43 -07:00
|
|
|
case -EIO:
|
2020-08-23 15:36:59 -07:00
|
|
|
fallthrough;
|
2018-12-12 16:11:43 -07:00
|
|
|
default:
|
|
|
|
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
2024-06-03 05:57:01 -07:00
|
|
|
return NVME_SC_INTERNAL | NVME_STATUS_DNR;
|
2018-12-12 16:11:43 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-09 22:47:56 -07:00
|
|
|
u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode,
|
|
|
|
req->sq->qid);
|
|
|
|
|
|
|
|
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
2024-06-03 05:57:01 -07:00
|
|
|
return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
|
2021-02-09 22:47:56 -07:00
|
|
|
}
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
|
|
|
|
const char *subsysnqn);
|
|
|
|
|
|
|
|
u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
|
|
|
|
size_t len)
|
|
|
|
{
|
2018-12-12 16:11:41 -07:00
|
|
|
if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
|
|
|
|
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
2024-06-03 05:57:01 -07:00
|
|
|
return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
|
2018-12-12 16:11:41 -07:00
|
|
|
}
|
2016-06-21 09:04:20 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
|
|
|
|
{
|
2018-12-12 16:11:41 -07:00
|
|
|
if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
|
|
|
|
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
2024-06-03 05:57:01 -07:00
|
|
|
return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
|
2018-12-12 16:11:41 -07:00
|
|
|
}
|
2016-06-21 09:04:20 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-22 02:10:02 -07:00
|
|
|
u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
|
|
|
|
{
|
2018-12-12 16:11:41 -07:00
|
|
|
if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
|
|
|
|
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
2024-06-03 05:57:01 -07:00
|
|
|
return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
|
2018-12-12 16:11:41 -07:00
|
|
|
}
|
2018-05-22 02:10:02 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-06-13 18:58:49 -07:00
|
|
|
static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
|
2017-10-18 03:46:07 -07:00
|
|
|
{
|
nvmet: use xarray for ctrl ns storing
This patch replaces the ctrl->namespaces tracking from linked list to
xarray and improves the performance when accessing one namespce :-
XArray vs Default:-
IOPS and BW (more the better) increase BW (~1.8%):-
---------------------------------------------------
XArray :-
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=162k, BW=631MiB/s (662MB/s)(18.5GiB/30001msec)
Default:-
read: IOPS=156k, BW=609MiB/s (639MB/s)(17.8GiB/30001msec)
read: IOPS=157k, BW=613MiB/s (643MB/s)(17.0GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
Submission latency (less the better) decrease (~8.3%):-
-------------------------------------------------------
XArray:-
slat (usec): min=7, max=8386, avg=11.19, stdev=5.96
slat (usec): min=7, max=441, avg=11.09, stdev=4.48
slat (usec): min=7, max=1088, avg=11.21, stdev=4.54
Default :-
slat (usec): min=8, max=2826.5k, avg=23.96, stdev=3911.50
slat (usec): min=8, max=503, avg=12.52, stdev=5.07
slat (usec): min=8, max=2384, avg=12.50, stdev=5.28
CPU Usage (less the better) decrease (~5.2%):-
----------------------------------------------
XArray:-
cpu : usr=1.84%, sys=18.61%, ctx=949471, majf=0, minf=250
cpu : usr=1.83%, sys=18.41%, ctx=950262, majf=0, minf=237
cpu : usr=1.82%, sys=18.82%, ctx=957224, majf=0, minf=234
Default:-
cpu : usr=1.70%, sys=19.21%, ctx=858196, majf=0, minf=251
cpu : usr=1.82%, sys=19.98%, ctx=929720, majf=0, minf=227
cpu : usr=1.83%, sys=20.33%, ctx=947208, majf=0, minf=235.
Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2020-07-19 20:32:02 -07:00
|
|
|
struct nvmet_ns *cur;
|
|
|
|
unsigned long idx;
|
2021-06-13 18:58:49 -07:00
|
|
|
u32 nsid = 0;
|
2017-10-18 03:46:07 -07:00
|
|
|
|
nvmet: use xarray for ctrl ns storing
This patch replaces the ctrl->namespaces tracking from linked list to
xarray and improves the performance when accessing one namespce :-
XArray vs Default:-
IOPS and BW (more the better) increase BW (~1.8%):-
---------------------------------------------------
XArray :-
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=162k, BW=631MiB/s (662MB/s)(18.5GiB/30001msec)
Default:-
read: IOPS=156k, BW=609MiB/s (639MB/s)(17.8GiB/30001msec)
read: IOPS=157k, BW=613MiB/s (643MB/s)(17.0GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
Submission latency (less the better) decrease (~8.3%):-
-------------------------------------------------------
XArray:-
slat (usec): min=7, max=8386, avg=11.19, stdev=5.96
slat (usec): min=7, max=441, avg=11.09, stdev=4.48
slat (usec): min=7, max=1088, avg=11.21, stdev=4.54
Default :-
slat (usec): min=8, max=2826.5k, avg=23.96, stdev=3911.50
slat (usec): min=8, max=503, avg=12.52, stdev=5.07
slat (usec): min=8, max=2384, avg=12.50, stdev=5.28
CPU Usage (less the better) decrease (~5.2%):-
----------------------------------------------
XArray:-
cpu : usr=1.84%, sys=18.61%, ctx=949471, majf=0, minf=250
cpu : usr=1.83%, sys=18.41%, ctx=950262, majf=0, minf=237
cpu : usr=1.82%, sys=18.82%, ctx=957224, majf=0, minf=234
Default:-
cpu : usr=1.70%, sys=19.21%, ctx=858196, majf=0, minf=251
cpu : usr=1.82%, sys=19.98%, ctx=929720, majf=0, minf=227
cpu : usr=1.83%, sys=20.33%, ctx=947208, majf=0, minf=235.
Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2020-07-19 20:32:02 -07:00
|
|
|
xa_for_each(&subsys->namespaces, idx, cur)
|
|
|
|
nsid = cur->nsid;
|
2017-10-18 03:46:07 -07:00
|
|
|
|
nvmet: use xarray for ctrl ns storing
This patch replaces the ctrl->namespaces tracking from linked list to
xarray and improves the performance when accessing one namespce :-
XArray vs Default:-
IOPS and BW (more the better) increase BW (~1.8%):-
---------------------------------------------------
XArray :-
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=162k, BW=631MiB/s (662MB/s)(18.5GiB/30001msec)
Default:-
read: IOPS=156k, BW=609MiB/s (639MB/s)(17.8GiB/30001msec)
read: IOPS=157k, BW=613MiB/s (643MB/s)(17.0GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
Submission latency (less the better) decrease (~8.3%):-
-------------------------------------------------------
XArray:-
slat (usec): min=7, max=8386, avg=11.19, stdev=5.96
slat (usec): min=7, max=441, avg=11.09, stdev=4.48
slat (usec): min=7, max=1088, avg=11.21, stdev=4.54
Default :-
slat (usec): min=8, max=2826.5k, avg=23.96, stdev=3911.50
slat (usec): min=8, max=503, avg=12.52, stdev=5.07
slat (usec): min=8, max=2384, avg=12.50, stdev=5.28
CPU Usage (less the better) decrease (~5.2%):-
----------------------------------------------
XArray:-
cpu : usr=1.84%, sys=18.61%, ctx=949471, majf=0, minf=250
cpu : usr=1.83%, sys=18.41%, ctx=950262, majf=0, minf=237
cpu : usr=1.82%, sys=18.82%, ctx=957224, majf=0, minf=234
Default:-
cpu : usr=1.70%, sys=19.21%, ctx=858196, majf=0, minf=251
cpu : usr=1.82%, sys=19.98%, ctx=929720, majf=0, minf=227
cpu : usr=1.83%, sys=20.33%, ctx=947208, majf=0, minf=235.
Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2020-07-19 20:32:02 -07:00
|
|
|
return nsid;
|
2017-10-18 03:46:07 -07:00
|
|
|
}
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
|
|
|
|
{
|
|
|
|
return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
|
|
|
|
}
|
|
|
|
|
2020-06-09 16:55:14 -07:00
|
|
|
static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
struct nvmet_req *req;
|
|
|
|
|
|
|
|
mutex_lock(&ctrl->lock);
|
|
|
|
while (ctrl->nr_async_event_cmds) {
|
|
|
|
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
|
|
|
|
mutex_unlock(&ctrl->lock);
|
2024-06-03 05:57:01 -07:00
|
|
|
nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_STATUS_DNR);
|
2020-06-09 16:55:14 -07:00
|
|
|
mutex_lock(&ctrl->lock);
|
|
|
|
}
|
|
|
|
mutex_unlock(&ctrl->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
|
2016-06-21 09:04:20 -07:00
|
|
|
{
|
|
|
|
struct nvmet_async_event *aen;
|
|
|
|
struct nvmet_req *req;
|
|
|
|
|
2020-05-18 11:59:55 -07:00
|
|
|
mutex_lock(&ctrl->lock);
|
|
|
|
while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
|
|
|
|
aen = list_first_entry(&ctrl->async_events,
|
|
|
|
struct nvmet_async_event, entry);
|
2016-06-21 09:04:20 -07:00
|
|
|
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
|
2020-06-09 16:55:14 -07:00
|
|
|
nvmet_set_result(req, nvmet_async_event_result(aen));
|
2016-06-21 09:04:20 -07:00
|
|
|
|
|
|
|
list_del(&aen->entry);
|
|
|
|
kfree(aen);
|
|
|
|
|
|
|
|
mutex_unlock(&ctrl->lock);
|
2020-05-19 01:06:30 -07:00
|
|
|
trace_nvmet_async_event(ctrl, req->cqe->result.u32);
|
2020-06-09 16:55:14 -07:00
|
|
|
nvmet_req_complete(req, 0);
|
2020-05-18 11:59:55 -07:00
|
|
|
mutex_lock(&ctrl->lock);
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
2020-05-18 11:59:55 -07:00
|
|
|
mutex_unlock(&ctrl->lock);
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
|
2020-01-30 11:29:34 -07:00
|
|
|
static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
|
|
|
|
{
|
2020-05-20 12:48:12 -07:00
|
|
|
struct nvmet_async_event *aen, *tmp;
|
2020-01-30 11:29:34 -07:00
|
|
|
|
|
|
|
mutex_lock(&ctrl->lock);
|
2020-05-20 12:48:12 -07:00
|
|
|
list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
|
|
|
|
list_del(&aen->entry);
|
|
|
|
kfree(aen);
|
2020-01-30 11:29:34 -07:00
|
|
|
}
|
|
|
|
mutex_unlock(&ctrl->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvmet_async_event_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct nvmet_ctrl *ctrl =
|
|
|
|
container_of(work, struct nvmet_ctrl, async_event_work);
|
|
|
|
|
2020-06-09 16:55:14 -07:00
|
|
|
nvmet_async_events_process(ctrl);
|
2020-01-30 11:29:34 -07:00
|
|
|
}
|
|
|
|
|
2018-11-12 14:56:40 -07:00
|
|
|
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
|
2016-06-21 09:04:20 -07:00
|
|
|
u8 event_info, u8 log_page)
|
|
|
|
{
|
|
|
|
struct nvmet_async_event *aen;
|
|
|
|
|
|
|
|
aen = kmalloc(sizeof(*aen), GFP_KERNEL);
|
|
|
|
if (!aen)
|
|
|
|
return;
|
|
|
|
|
|
|
|
aen->event_type = event_type;
|
|
|
|
aen->event_info = event_info;
|
|
|
|
aen->log_page = log_page;
|
|
|
|
|
|
|
|
mutex_lock(&ctrl->lock);
|
|
|
|
list_add_tail(&aen->entry, &ctrl->async_events);
|
|
|
|
mutex_unlock(&ctrl->lock);
|
|
|
|
|
2022-03-21 04:57:27 -07:00
|
|
|
queue_work(nvmet_wq, &ctrl->async_event_work);
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
|
2018-05-25 08:16:09 -07:00
|
|
|
static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
|
|
|
|
{
|
|
|
|
u32 i;
|
|
|
|
|
|
|
|
mutex_lock(&ctrl->lock);
|
|
|
|
if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
for (i = 0; i < ctrl->nr_changed_ns; i++) {
|
|
|
|
if (ctrl->changed_ns_list[i] == nsid)
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
|
|
|
|
ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
|
|
|
|
ctrl->nr_changed_ns = U32_MAX;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
|
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&ctrl->lock);
|
|
|
|
}
|
|
|
|
|
2018-08-07 23:01:07 -07:00
|
|
|
void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
|
2018-05-25 08:16:09 -07:00
|
|
|
{
|
|
|
|
struct nvmet_ctrl *ctrl;
|
|
|
|
|
2019-04-02 04:51:54 -07:00
|
|
|
lockdep_assert_held(&subsys->lock);
|
|
|
|
|
2018-05-25 08:16:09 -07:00
|
|
|
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
|
|
|
|
nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
|
2018-11-12 14:56:34 -07:00
|
|
|
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
|
2018-05-30 06:04:47 -07:00
|
|
|
continue;
|
2024-01-18 05:51:45 -07:00
|
|
|
nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
|
2018-05-25 08:16:09 -07:00
|
|
|
NVME_AER_NOTICE_NS_CHANGED,
|
|
|
|
NVME_LOG_CHANGED_NS);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-31 23:59:25 -07:00
|
|
|
void nvmet_send_ana_event(struct nvmet_subsys *subsys,
|
|
|
|
struct nvmet_port *port)
|
|
|
|
{
|
|
|
|
struct nvmet_ctrl *ctrl;
|
|
|
|
|
|
|
|
mutex_lock(&subsys->lock);
|
|
|
|
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
|
|
|
|
if (port && ctrl->port != port)
|
|
|
|
continue;
|
2018-11-12 14:56:34 -07:00
|
|
|
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
|
2018-05-31 23:59:25 -07:00
|
|
|
continue;
|
2024-01-18 05:51:45 -07:00
|
|
|
nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
|
2018-05-31 23:59:25 -07:00
|
|
|
NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
|
|
|
|
}
|
|
|
|
mutex_unlock(&subsys->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void nvmet_port_send_ana_event(struct nvmet_port *port)
|
|
|
|
{
|
|
|
|
struct nvmet_subsys_link *p;
|
|
|
|
|
|
|
|
down_read(&nvmet_config_sem);
|
|
|
|
list_for_each_entry(p, &port->subsystems, entry)
|
|
|
|
nvmet_send_ana_event(p->subsys, port);
|
|
|
|
up_read(&nvmet_config_sem);
|
|
|
|
}
|
|
|
|
|
2018-03-20 12:41:35 -07:00
|
|
|
int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
|
2016-06-21 09:04:20 -07:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
down_write(&nvmet_config_sem);
|
|
|
|
if (nvmet_transports[ops->type])
|
|
|
|
ret = -EINVAL;
|
|
|
|
else
|
|
|
|
nvmet_transports[ops->type] = ops;
|
|
|
|
up_write(&nvmet_config_sem);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvmet_register_transport);
|
|
|
|
|
2018-03-20 12:41:35 -07:00
|
|
|
void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
|
2016-06-21 09:04:20 -07:00
|
|
|
{
|
|
|
|
down_write(&nvmet_config_sem);
|
|
|
|
nvmet_transports[ops->type] = NULL;
|
|
|
|
up_write(&nvmet_config_sem);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
|
|
|
|
|
2019-07-31 16:35:31 -07:00
|
|
|
void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
|
|
|
|
{
|
|
|
|
struct nvmet_ctrl *ctrl;
|
|
|
|
|
|
|
|
mutex_lock(&subsys->lock);
|
|
|
|
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
|
|
|
|
if (ctrl->port == port)
|
|
|
|
ctrl->ops->delete_ctrl(ctrl);
|
|
|
|
}
|
|
|
|
mutex_unlock(&subsys->lock);
|
|
|
|
}
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
int nvmet_enable_port(struct nvmet_port *port)
|
|
|
|
{
|
2018-03-20 12:41:35 -07:00
|
|
|
const struct nvmet_fabrics_ops *ops;
|
2016-06-21 09:04:20 -07:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
lockdep_assert_held(&nvmet_config_sem);
|
|
|
|
|
|
|
|
ops = nvmet_transports[port->disc_addr.trtype];
|
|
|
|
if (!ops) {
|
|
|
|
up_write(&nvmet_config_sem);
|
|
|
|
request_module("nvmet-transport-%d", port->disc_addr.trtype);
|
|
|
|
down_write(&nvmet_config_sem);
|
|
|
|
ops = nvmet_transports[port->disc_addr.trtype];
|
|
|
|
if (!ops) {
|
|
|
|
pr_err("transport type %d not supported\n",
|
|
|
|
port->disc_addr.trtype);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!try_module_get(ops->owner))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-05-19 07:06:01 -07:00
|
|
|
/*
|
|
|
|
* If the user requested PI support and the transport isn't pi capable,
|
|
|
|
* don't enable the port.
|
|
|
|
*/
|
2020-06-02 06:15:46 -07:00
|
|
|
if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
|
2020-05-19 07:06:01 -07:00
|
|
|
pr_err("T10-PI is not supported by transport type %d\n",
|
|
|
|
port->disc_addr.trtype);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_put;
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
|
2020-05-19 07:06:01 -07:00
|
|
|
ret = ops->add_port(port);
|
|
|
|
if (ret)
|
|
|
|
goto out_put;
|
|
|
|
|
nvmet-rdma: support max(16KB, PAGE_SIZE) inline data
The patch enables inline data sizes using up to 4 recv sges, and capping
the size at 16KB or at least 1 page size. So on a 4K page system, up to
16KB is supported, and for a 64K page system 1 page of 64KB is supported.
We avoid > 0 order page allocations for the inline buffers by using
multiple recv sges, one for each page. If the device cannot support
the configured inline data size due to lack of enough recv sges, then
log a warning and reduce the inline size.
Add a new configfs port attribute, called param_inline_data_size,
to allow configuring the size of inline data for a given nvmf port.
The maximum size allowed is still enforced by nvmet-rdma with
NVMET_RDMA_MAX_INLINE_DATA_SIZE, which is now max(16KB, PAGE_SIZE).
And the default size, if not specified via configfs, is still PAGE_SIZE.
This preserves the existing behavior, but allows larger inline sizes
for small page systems. If the configured inline data size exceeds
NVMET_RDMA_MAX_INLINE_DATA_SIZE, a warning is logged and the size is
reduced. If param_inline_data_size is set to 0, then inline data is
disabled for that nvmf port.
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2018-06-20 07:15:10 -07:00
|
|
|
/* If the transport didn't set inline_data_size, then disable it. */
|
|
|
|
if (port->inline_data_size < 0)
|
|
|
|
port->inline_data_size = 0;
|
|
|
|
|
2024-01-23 07:40:31 -07:00
|
|
|
/*
|
|
|
|
* If the transport didn't set the max_queue_size properly, then clamp
|
|
|
|
* it to the target limits. Also set default values in case the
|
|
|
|
* transport didn't set it at all.
|
|
|
|
*/
|
|
|
|
if (port->max_queue_size < 0)
|
|
|
|
port->max_queue_size = NVMET_MAX_QUEUE_SIZE;
|
|
|
|
else
|
|
|
|
port->max_queue_size = clamp_t(int, port->max_queue_size,
|
|
|
|
NVMET_MIN_QUEUE_SIZE,
|
|
|
|
NVMET_MAX_QUEUE_SIZE);
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
port->enabled = true;
|
2019-05-14 14:58:02 -07:00
|
|
|
port->tr_ops = ops;
|
2016-06-21 09:04:20 -07:00
|
|
|
return 0;
|
2020-05-19 07:06:01 -07:00
|
|
|
|
|
|
|
out_put:
|
|
|
|
module_put(ops->owner);
|
|
|
|
return ret;
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void nvmet_disable_port(struct nvmet_port *port)
|
|
|
|
{
|
2018-03-20 12:41:35 -07:00
|
|
|
const struct nvmet_fabrics_ops *ops;
|
2016-06-21 09:04:20 -07:00
|
|
|
|
|
|
|
lockdep_assert_held(&nvmet_config_sem);
|
|
|
|
|
|
|
|
port->enabled = false;
|
2019-05-14 14:58:02 -07:00
|
|
|
port->tr_ops = NULL;
|
2016-06-21 09:04:20 -07:00
|
|
|
|
|
|
|
ops = nvmet_transports[port->disc_addr.trtype];
|
|
|
|
ops->remove_port(port);
|
|
|
|
module_put(ops->owner);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvmet_keep_alive_timer(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
|
|
|
|
struct nvmet_ctrl, ka_work);
|
2021-05-25 08:49:05 -07:00
|
|
|
bool reset_tbkas = ctrl->reset_tbkas;
|
2018-11-02 10:28:13 -07:00
|
|
|
|
2021-05-25 08:49:05 -07:00
|
|
|
ctrl->reset_tbkas = false;
|
|
|
|
if (reset_tbkas) {
|
2018-11-02 10:28:13 -07:00
|
|
|
pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
|
|
|
|
ctrl->cntlid);
|
2022-03-21 04:57:27 -07:00
|
|
|
queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
|
2018-11-02 10:28:13 -07:00
|
|
|
return;
|
|
|
|
}
|
2016-06-21 09:04:20 -07:00
|
|
|
|
|
|
|
pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
|
|
|
|
ctrl->cntlid, ctrl->kato);
|
|
|
|
|
2017-01-01 04:18:26 -07:00
|
|
|
nvmet_ctrl_fatal_error(ctrl);
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
|
2020-09-16 10:47:20 -07:00
|
|
|
void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
|
2016-06-21 09:04:20 -07:00
|
|
|
{
|
2020-08-19 01:31:11 -07:00
|
|
|
if (unlikely(ctrl->kato == 0))
|
|
|
|
return;
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
pr_debug("ctrl %d start keep-alive timer for %d secs\n",
|
|
|
|
ctrl->cntlid, ctrl->kato);
|
|
|
|
|
2022-03-21 04:57:27 -07:00
|
|
|
queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
|
2020-09-16 10:47:20 -07:00
|
|
|
void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
|
2016-06-21 09:04:20 -07:00
|
|
|
{
|
2020-08-19 01:31:11 -07:00
|
|
|
if (unlikely(ctrl->kato == 0))
|
|
|
|
return;
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
|
|
|
|
|
|
|
|
cancel_delayed_work_sync(&ctrl->ka_work);
|
|
|
|
}
|
|
|
|
|
2021-02-09 22:47:54 -07:00
|
|
|
u16 nvmet_req_find_ns(struct nvmet_req *req)
|
2016-06-21 09:04:20 -07:00
|
|
|
{
|
2021-02-09 22:47:54 -07:00
|
|
|
u32 nsid = le32_to_cpu(req->cmd->common.nsid);
|
2024-04-28 02:25:40 -07:00
|
|
|
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
|
2016-06-21 09:04:20 -07:00
|
|
|
|
2024-04-28 02:25:40 -07:00
|
|
|
req->ns = xa_load(&subsys->namespaces, nsid);
|
2021-02-09 22:47:54 -07:00
|
|
|
if (unlikely(!req->ns)) {
|
|
|
|
req->error_loc = offsetof(struct nvme_common_command, nsid);
|
2024-04-28 02:25:40 -07:00
|
|
|
if (nvmet_subsys_nsid_exists(subsys, nsid))
|
|
|
|
return NVME_SC_INTERNAL_PATH_ERROR;
|
2024-06-03 05:57:01 -07:00
|
|
|
return NVME_SC_INVALID_NS | NVME_STATUS_DNR;
|
2021-02-09 22:47:54 -07:00
|
|
|
}
|
2016-06-21 09:04:20 -07:00
|
|
|
|
2021-02-09 22:47:54 -07:00
|
|
|
percpu_ref_get(&req->ns->ref);
|
|
|
|
return NVME_SC_SUCCESS;
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nvmet_destroy_namespace(struct percpu_ref *ref)
|
|
|
|
{
|
|
|
|
struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
|
|
|
|
|
|
|
|
complete(&ns->disable_done);
|
|
|
|
}
|
|
|
|
|
|
|
|
void nvmet_put_namespace(struct nvmet_ns *ns)
|
|
|
|
{
|
|
|
|
percpu_ref_put(&ns->ref);
|
|
|
|
}
|
|
|
|
|
2018-05-22 21:34:39 -07:00
|
|
|
static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
|
|
|
|
{
|
|
|
|
nvmet_bdev_ns_disable(ns);
|
|
|
|
nvmet_file_ns_disable(ns);
|
|
|
|
}
|
|
|
|
|
2018-10-04 14:27:47 -07:00
|
|
|
static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct pci_dev *p2p_dev;
|
|
|
|
|
|
|
|
if (!ns->use_p2pmem)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!ns->bdev) {
|
|
|
|
pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-06-26 01:01:56 -07:00
|
|
|
if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
|
2018-10-04 14:27:47 -07:00
|
|
|
pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
|
|
|
|
ns->device_path);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ns->p2p_dev) {
|
|
|
|
ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
|
|
|
|
if (ret < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Right now we just check that there is p2pmem available so
|
|
|
|
* we can report an error to the user right away if there
|
|
|
|
* is not. We'll find the actual device to use once we
|
|
|
|
* setup the controller when the port's device is available.
|
|
|
|
*/
|
|
|
|
|
|
|
|
p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
|
|
|
|
if (!p2p_dev) {
|
|
|
|
pr_err("no peer-to-peer memory is available for %s\n",
|
|
|
|
ns->device_path);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_dev_put(p2p_dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: ctrl->subsys->lock should be held when calling this function
|
|
|
|
*/
|
|
|
|
static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
|
|
|
|
struct nvmet_ns *ns)
|
|
|
|
{
|
|
|
|
struct device *clients[2];
|
|
|
|
struct pci_dev *p2p_dev;
|
|
|
|
int ret;
|
|
|
|
|
2018-11-02 16:12:21 -07:00
|
|
|
if (!ctrl->p2p_client || !ns->use_p2pmem)
|
2018-10-04 14:27:47 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (ns->p2p_dev) {
|
|
|
|
ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
|
|
|
|
if (ret < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
p2p_dev = pci_dev_get(ns->p2p_dev);
|
|
|
|
} else {
|
|
|
|
clients[0] = ctrl->p2p_client;
|
|
|
|
clients[1] = nvmet_ns_dev(ns);
|
|
|
|
|
|
|
|
p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
|
|
|
|
if (!p2p_dev) {
|
|
|
|
pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
|
|
|
|
dev_name(ctrl->p2p_client), ns->device_path);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
|
|
|
|
if (ret < 0)
|
|
|
|
pci_dev_put(p2p_dev);
|
|
|
|
|
|
|
|
pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
|
|
|
|
ns->nsid);
|
|
|
|
}
|
|
|
|
|
2022-03-15 00:13:04 -07:00
|
|
|
bool nvmet_ns_revalidate(struct nvmet_ns *ns)
|
2020-05-19 01:06:27 -07:00
|
|
|
{
|
2020-05-19 01:06:28 -07:00
|
|
|
loff_t oldsize = ns->size;
|
|
|
|
|
2020-05-19 01:06:27 -07:00
|
|
|
if (ns->bdev)
|
|
|
|
nvmet_bdev_ns_revalidate(ns);
|
|
|
|
else
|
|
|
|
nvmet_file_ns_revalidate(ns);
|
2020-05-19 01:06:28 -07:00
|
|
|
|
2022-03-15 00:13:04 -07:00
|
|
|
return oldsize != ns->size;
|
2020-05-19 01:06:27 -07:00
|
|
|
}
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
int nvmet_ns_enable(struct nvmet_ns *ns)
|
|
|
|
{
|
|
|
|
struct nvmet_subsys *subsys = ns->subsys;
|
2018-10-04 14:27:47 -07:00
|
|
|
struct nvmet_ctrl *ctrl;
|
2018-05-13 10:00:13 -07:00
|
|
|
int ret;
|
2016-06-21 09:04:20 -07:00
|
|
|
|
|
|
|
mutex_lock(&subsys->lock);
|
2018-05-13 10:00:13 -07:00
|
|
|
ret = 0;
|
2020-07-24 10:25:18 -07:00
|
|
|
|
2021-08-26 23:11:12 -07:00
|
|
|
if (nvmet_is_passthru_subsys(subsys)) {
|
2020-07-24 10:25:18 -07:00
|
|
|
pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2016-10-30 01:35:15 -07:00
|
|
|
if (ns->enabled)
|
2016-06-21 09:04:20 -07:00
|
|
|
goto out_unlock;
|
|
|
|
|
2019-04-02 04:52:47 -07:00
|
|
|
ret = -EMFILE;
|
|
|
|
if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
|
|
|
|
goto out_unlock;
|
|
|
|
|
2018-05-22 21:34:39 -07:00
|
|
|
ret = nvmet_bdev_ns_enable(ns);
|
2018-07-24 23:34:45 -07:00
|
|
|
if (ret == -ENOTBLK)
|
2018-05-22 21:34:39 -07:00
|
|
|
ret = nvmet_file_ns_enable(ns);
|
|
|
|
if (ret)
|
2016-06-21 09:04:20 -07:00
|
|
|
goto out_unlock;
|
|
|
|
|
2018-10-04 14:27:47 -07:00
|
|
|
ret = nvmet_p2pmem_ns_enable(ns);
|
|
|
|
if (ret)
|
2019-03-28 03:54:03 -07:00
|
|
|
goto out_dev_disable;
|
2018-10-04 14:27:47 -07:00
|
|
|
|
|
|
|
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
|
|
|
|
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
|
|
|
|
0, GFP_KERNEL);
|
|
|
|
if (ret)
|
2018-05-22 21:34:39 -07:00
|
|
|
goto out_dev_put;
|
2016-06-21 09:04:20 -07:00
|
|
|
|
|
|
|
if (ns->nsid > subsys->max_nsid)
|
|
|
|
subsys->max_nsid = ns->nsid;
|
|
|
|
|
nvmet: use xarray for ctrl ns storing
This patch replaces the ctrl->namespaces tracking from linked list to
xarray and improves the performance when accessing one namespce :-
XArray vs Default:-
IOPS and BW (more the better) increase BW (~1.8%):-
---------------------------------------------------
XArray :-
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=162k, BW=631MiB/s (662MB/s)(18.5GiB/30001msec)
Default:-
read: IOPS=156k, BW=609MiB/s (639MB/s)(17.8GiB/30001msec)
read: IOPS=157k, BW=613MiB/s (643MB/s)(17.0GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
Submission latency (less the better) decrease (~8.3%):-
-------------------------------------------------------
XArray:-
slat (usec): min=7, max=8386, avg=11.19, stdev=5.96
slat (usec): min=7, max=441, avg=11.09, stdev=4.48
slat (usec): min=7, max=1088, avg=11.21, stdev=4.54
Default :-
slat (usec): min=8, max=2826.5k, avg=23.96, stdev=3911.50
slat (usec): min=8, max=503, avg=12.52, stdev=5.07
slat (usec): min=8, max=2384, avg=12.50, stdev=5.28
CPU Usage (less the better) decrease (~5.2%):-
----------------------------------------------
XArray:-
cpu : usr=1.84%, sys=18.61%, ctx=949471, majf=0, minf=250
cpu : usr=1.83%, sys=18.41%, ctx=950262, majf=0, minf=237
cpu : usr=1.82%, sys=18.82%, ctx=957224, majf=0, minf=234
Default:-
cpu : usr=1.70%, sys=19.21%, ctx=858196, majf=0, minf=251
cpu : usr=1.82%, sys=19.98%, ctx=929720, majf=0, minf=227
cpu : usr=1.83%, sys=20.33%, ctx=947208, majf=0, minf=235.
Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2020-07-19 20:32:02 -07:00
|
|
|
ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
|
|
|
|
if (ret)
|
|
|
|
goto out_restore_subsys_maxnsid;
|
2016-06-21 09:04:20 -07:00
|
|
|
|
2018-05-13 10:00:13 -07:00
|
|
|
subsys->nr_namespaces++;
|
2016-06-21 09:04:20 -07:00
|
|
|
|
2018-05-25 08:16:09 -07:00
|
|
|
nvmet_ns_changed(subsys, ns->nsid);
|
2016-10-30 01:35:15 -07:00
|
|
|
ns->enabled = true;
|
2016-06-21 09:04:20 -07:00
|
|
|
ret = 0;
|
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&subsys->lock);
|
|
|
|
return ret;
|
nvmet: use xarray for ctrl ns storing
This patch replaces the ctrl->namespaces tracking from linked list to
xarray and improves the performance when accessing one namespce :-
XArray vs Default:-
IOPS and BW (more the better) increase BW (~1.8%):-
---------------------------------------------------
XArray :-
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=162k, BW=631MiB/s (662MB/s)(18.5GiB/30001msec)
Default:-
read: IOPS=156k, BW=609MiB/s (639MB/s)(17.8GiB/30001msec)
read: IOPS=157k, BW=613MiB/s (643MB/s)(17.0GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
Submission latency (less the better) decrease (~8.3%):-
-------------------------------------------------------
XArray:-
slat (usec): min=7, max=8386, avg=11.19, stdev=5.96
slat (usec): min=7, max=441, avg=11.09, stdev=4.48
slat (usec): min=7, max=1088, avg=11.21, stdev=4.54
Default :-
slat (usec): min=8, max=2826.5k, avg=23.96, stdev=3911.50
slat (usec): min=8, max=503, avg=12.52, stdev=5.07
slat (usec): min=8, max=2384, avg=12.50, stdev=5.28
CPU Usage (less the better) decrease (~5.2%):-
----------------------------------------------
XArray:-
cpu : usr=1.84%, sys=18.61%, ctx=949471, majf=0, minf=250
cpu : usr=1.83%, sys=18.41%, ctx=950262, majf=0, minf=237
cpu : usr=1.82%, sys=18.82%, ctx=957224, majf=0, minf=234
Default:-
cpu : usr=1.70%, sys=19.21%, ctx=858196, majf=0, minf=251
cpu : usr=1.82%, sys=19.98%, ctx=929720, majf=0, minf=227
cpu : usr=1.83%, sys=20.33%, ctx=947208, majf=0, minf=235.
Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2020-07-19 20:32:02 -07:00
|
|
|
|
|
|
|
out_restore_subsys_maxnsid:
|
|
|
|
subsys->max_nsid = nvmet_max_nsid(subsys);
|
|
|
|
percpu_ref_exit(&ns->ref);
|
2018-05-22 21:34:39 -07:00
|
|
|
out_dev_put:
|
2018-10-04 14:27:47 -07:00
|
|
|
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
|
|
|
|
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
|
2019-03-28 03:54:03 -07:00
|
|
|
out_dev_disable:
|
2018-05-22 21:34:39 -07:00
|
|
|
nvmet_ns_dev_disable(ns);
|
2016-06-21 09:04:20 -07:00
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
void nvmet_ns_disable(struct nvmet_ns *ns)
|
|
|
|
{
|
|
|
|
struct nvmet_subsys *subsys = ns->subsys;
|
2018-10-04 14:27:47 -07:00
|
|
|
struct nvmet_ctrl *ctrl;
|
2016-06-21 09:04:20 -07:00
|
|
|
|
|
|
|
mutex_lock(&subsys->lock);
|
2016-10-30 01:35:15 -07:00
|
|
|
if (!ns->enabled)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
ns->enabled = false;
|
nvmet: use xarray for ctrl ns storing
This patch replaces the ctrl->namespaces tracking from linked list to
xarray and improves the performance when accessing one namespce :-
XArray vs Default:-
IOPS and BW (more the better) increase BW (~1.8%):-
---------------------------------------------------
XArray :-
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=162k, BW=631MiB/s (662MB/s)(18.5GiB/30001msec)
Default:-
read: IOPS=156k, BW=609MiB/s (639MB/s)(17.8GiB/30001msec)
read: IOPS=157k, BW=613MiB/s (643MB/s)(17.0GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
Submission latency (less the better) decrease (~8.3%):-
-------------------------------------------------------
XArray:-
slat (usec): min=7, max=8386, avg=11.19, stdev=5.96
slat (usec): min=7, max=441, avg=11.09, stdev=4.48
slat (usec): min=7, max=1088, avg=11.21, stdev=4.54
Default :-
slat (usec): min=8, max=2826.5k, avg=23.96, stdev=3911.50
slat (usec): min=8, max=503, avg=12.52, stdev=5.07
slat (usec): min=8, max=2384, avg=12.50, stdev=5.28
CPU Usage (less the better) decrease (~5.2%):-
----------------------------------------------
XArray:-
cpu : usr=1.84%, sys=18.61%, ctx=949471, majf=0, minf=250
cpu : usr=1.83%, sys=18.41%, ctx=950262, majf=0, minf=237
cpu : usr=1.82%, sys=18.82%, ctx=957224, majf=0, minf=234
Default:-
cpu : usr=1.70%, sys=19.21%, ctx=858196, majf=0, minf=251
cpu : usr=1.82%, sys=19.98%, ctx=929720, majf=0, minf=227
cpu : usr=1.83%, sys=20.33%, ctx=947208, majf=0, minf=235.
Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2020-07-19 20:32:02 -07:00
|
|
|
xa_erase(&ns->subsys->namespaces, ns->nsid);
|
2017-10-18 03:46:07 -07:00
|
|
|
if (ns->nsid == subsys->max_nsid)
|
|
|
|
subsys->max_nsid = nvmet_max_nsid(subsys);
|
2018-10-04 14:27:47 -07:00
|
|
|
|
|
|
|
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
|
|
|
|
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
mutex_unlock(&subsys->lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now that we removed the namespaces from the lookup list, we
|
|
|
|
* can kill the per_cpu ref and wait for any remaining references
|
|
|
|
* to be dropped, as well as a RCU grace period for anyone only
|
|
|
|
* using the namepace under rcu_read_lock(). Note that we can't
|
|
|
|
* use call_rcu here as we need to ensure the namespaces have
|
|
|
|
* been fully destroyed before unloading the module.
|
|
|
|
*/
|
|
|
|
percpu_ref_kill(&ns->ref);
|
|
|
|
synchronize_rcu();
|
|
|
|
wait_for_completion(&ns->disable_done);
|
|
|
|
percpu_ref_exit(&ns->ref);
|
|
|
|
|
|
|
|
mutex_lock(&subsys->lock);
|
2018-10-04 14:27:47 -07:00
|
|
|
|
2018-05-13 10:00:13 -07:00
|
|
|
subsys->nr_namespaces--;
|
2018-05-25 08:16:09 -07:00
|
|
|
nvmet_ns_changed(subsys, ns->nsid);
|
2018-05-22 21:34:39 -07:00
|
|
|
nvmet_ns_dev_disable(ns);
|
2016-10-30 01:35:15 -07:00
|
|
|
out_unlock:
|
2016-06-21 09:04:20 -07:00
|
|
|
mutex_unlock(&subsys->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void nvmet_ns_free(struct nvmet_ns *ns)
|
|
|
|
{
|
|
|
|
nvmet_ns_disable(ns);
|
|
|
|
|
2018-07-19 07:35:20 -07:00
|
|
|
down_write(&nvmet_ana_sem);
|
|
|
|
nvmet_ana_group_enabled[ns->anagrpid]--;
|
|
|
|
up_write(&nvmet_ana_sem);
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
kfree(ns->device_path);
|
|
|
|
kfree(ns);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
|
|
|
|
{
|
|
|
|
struct nvmet_ns *ns;
|
|
|
|
|
|
|
|
ns = kzalloc(sizeof(*ns), GFP_KERNEL);
|
|
|
|
if (!ns)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
init_completion(&ns->disable_done);
|
|
|
|
|
|
|
|
ns->nsid = nsid;
|
|
|
|
ns->subsys = subsys;
|
2018-07-19 07:35:20 -07:00
|
|
|
|
|
|
|
down_write(&nvmet_ana_sem);
|
|
|
|
ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
|
|
|
|
nvmet_ana_group_enabled[ns->anagrpid]++;
|
|
|
|
up_write(&nvmet_ana_sem);
|
|
|
|
|
2017-06-07 02:45:32 -07:00
|
|
|
uuid_gen(&ns->uuid);
|
2018-06-19 21:01:41 -07:00
|
|
|
ns->buffered_io = false;
|
2021-06-09 18:32:51 -07:00
|
|
|
ns->csi = NVME_CSI_NVM;
|
2016-06-21 09:04:20 -07:00
|
|
|
|
|
|
|
return ns;
|
|
|
|
}
|
|
|
|
|
2018-11-19 15:11:12 -07:00
|
|
|
static void nvmet_update_sq_head(struct nvmet_req *req)
|
2016-06-21 09:04:20 -07:00
|
|
|
{
|
2017-10-18 14:33:59 -07:00
|
|
|
if (req->sq->size) {
|
2018-11-19 15:11:12 -07:00
|
|
|
u32 old_sqhd, new_sqhd;
|
|
|
|
|
2022-10-20 08:35:40 -07:00
|
|
|
old_sqhd = READ_ONCE(req->sq->sqhd);
|
2017-10-18 14:33:59 -07:00
|
|
|
do {
|
|
|
|
new_sqhd = (old_sqhd + 1) % req->sq->size;
|
2022-10-20 08:35:40 -07:00
|
|
|
} while (!try_cmpxchg(&req->sq->sqhd, &old_sqhd, new_sqhd));
|
2017-10-18 14:33:59 -07:00
|
|
|
}
|
2019-04-08 08:39:59 -07:00
|
|
|
req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
|
2018-11-19 15:11:12 -07:00
|
|
|
}
|
|
|
|
|
2018-12-12 16:11:40 -07:00
|
|
|
static void nvmet_set_error(struct nvmet_req *req, u16 status)
|
|
|
|
{
|
|
|
|
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
|
|
|
struct nvme_error_slot *new_error_slot;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2019-04-08 08:39:59 -07:00
|
|
|
req->cqe->status = cpu_to_le16(status << 1);
|
2018-12-12 16:11:40 -07:00
|
|
|
|
2018-12-17 19:35:29 -07:00
|
|
|
if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
|
2018-12-12 16:11:40 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ctrl->error_lock, flags);
|
|
|
|
ctrl->err_counter++;
|
|
|
|
new_error_slot =
|
|
|
|
&ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
|
|
|
|
|
|
|
|
new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
|
|
|
|
new_error_slot->sqid = cpu_to_le16(req->sq->qid);
|
|
|
|
new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
|
|
|
|
new_error_slot->status_field = cpu_to_le16(status << 1);
|
|
|
|
new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
|
|
|
|
new_error_slot->lba = cpu_to_le64(req->error_slba);
|
|
|
|
new_error_slot->nsid = req->cmd->common.nsid;
|
|
|
|
spin_unlock_irqrestore(&ctrl->error_lock, flags);
|
|
|
|
|
|
|
|
/* set the more bit for this request */
|
2019-04-08 08:39:59 -07:00
|
|
|
req->cqe->status |= cpu_to_le16(1 << 14);
|
2018-12-12 16:11:40 -07:00
|
|
|
}
|
|
|
|
|
2018-11-19 15:11:12 -07:00
|
|
|
static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
|
|
|
|
{
|
2022-08-12 14:03:17 -07:00
|
|
|
struct nvmet_ns *ns = req->ns;
|
|
|
|
|
2018-11-19 15:11:12 -07:00
|
|
|
if (!req->sq->sqhd_disabled)
|
|
|
|
nvmet_update_sq_head(req);
|
2019-04-08 08:39:59 -07:00
|
|
|
req->cqe->sq_id = cpu_to_le16(req->sq->qid);
|
|
|
|
req->cqe->command_id = req->cmd->common.command_id;
|
2018-12-12 16:11:40 -07:00
|
|
|
|
2018-11-19 14:35:30 -07:00
|
|
|
if (unlikely(status))
|
2018-12-12 16:11:40 -07:00
|
|
|
nvmet_set_error(req, status);
|
2019-06-12 05:45:33 -07:00
|
|
|
|
|
|
|
trace_nvmet_req_complete(req);
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
req->ops->queue_response(req);
|
2022-08-12 14:03:17 -07:00
|
|
|
if (ns)
|
|
|
|
nvmet_put_namespace(ns);
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void nvmet_req_complete(struct nvmet_req *req, u16 status)
|
|
|
|
{
|
2023-03-05 18:13:13 -07:00
|
|
|
struct nvmet_sq *sq = req->sq;
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
__nvmet_req_complete(req, status);
|
2023-03-05 18:13:13 -07:00
|
|
|
percpu_ref_put(&sq->ref);
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvmet_req_complete);
|
|
|
|
|
|
|
|
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
|
|
|
|
u16 qid, u16 size)
|
|
|
|
{
|
|
|
|
cq->qid = qid;
|
|
|
|
cq->size = size;
|
|
|
|
}
|
|
|
|
|
|
|
|
void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
|
|
|
|
u16 qid, u16 size)
|
|
|
|
{
|
2017-09-18 09:08:29 -07:00
|
|
|
sq->sqhd = 0;
|
2016-06-21 09:04:20 -07:00
|
|
|
sq->qid = qid;
|
|
|
|
sq->size = size;
|
|
|
|
|
|
|
|
ctrl->sqs[qid] = sq;
|
|
|
|
}
|
|
|
|
|
2017-03-06 09:46:20 -07:00
|
|
|
static void nvmet_confirm_sq(struct percpu_ref *ref)
|
|
|
|
{
|
|
|
|
struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
|
|
|
|
|
|
|
|
complete(&sq->confirm_done);
|
|
|
|
}
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
void nvmet_sq_destroy(struct nvmet_sq *sq)
|
|
|
|
{
|
2020-01-30 11:29:34 -07:00
|
|
|
struct nvmet_ctrl *ctrl = sq->ctrl;
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
/*
|
|
|
|
* If this is the admin queue, complete all AERs so that our
|
|
|
|
* queue doesn't have outstanding requests on it.
|
|
|
|
*/
|
2020-05-20 12:48:12 -07:00
|
|
|
if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
|
2020-06-09 16:55:14 -07:00
|
|
|
nvmet_async_events_failall(ctrl);
|
2017-03-06 09:46:20 -07:00
|
|
|
percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
|
|
|
|
wait_for_completion(&sq->confirm_done);
|
2016-06-21 09:04:20 -07:00
|
|
|
wait_for_completion(&sq->free_done);
|
|
|
|
percpu_ref_exit(&sq->ref);
|
2022-06-27 02:52:05 -07:00
|
|
|
nvmet_auth_sq_free(sq);
|
2016-06-21 09:04:20 -07:00
|
|
|
|
2024-05-27 12:38:52 -07:00
|
|
|
/*
|
|
|
|
* we must reference the ctrl again after waiting for inflight IO
|
|
|
|
* to complete. Because admin connect may have sneaked in after we
|
|
|
|
* store sq->ctrl locally, but before we killed the percpu_ref. the
|
|
|
|
* admin connect allocates and assigns sq->ctrl, which now needs a
|
|
|
|
* final ref put, as this ctrl is going away.
|
|
|
|
*/
|
|
|
|
ctrl = sq->ctrl;
|
|
|
|
|
2020-01-30 11:29:34 -07:00
|
|
|
if (ctrl) {
|
2021-05-25 08:49:05 -07:00
|
|
|
/*
|
|
|
|
* The teardown flow may take some time, and the host may not
|
|
|
|
* send us keep-alive during this period, hence reset the
|
|
|
|
* traffic based keep-alive timer so we don't trigger a
|
|
|
|
* controller teardown as a result of a keep-alive expiration.
|
|
|
|
*/
|
|
|
|
ctrl->reset_tbkas = true;
|
2021-08-08 08:06:15 -07:00
|
|
|
sq->ctrl->sqs[sq->qid] = NULL;
|
2020-01-30 11:29:34 -07:00
|
|
|
nvmet_ctrl_put(ctrl);
|
2016-06-21 09:04:20 -07:00
|
|
|
sq->ctrl = NULL; /* allows reusing the queue later */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
|
|
|
|
|
|
|
|
static void nvmet_sq_free(struct percpu_ref *ref)
|
|
|
|
{
|
|
|
|
struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
|
|
|
|
|
|
|
|
complete(&sq->free_done);
|
|
|
|
}
|
|
|
|
|
|
|
|
int nvmet_sq_init(struct nvmet_sq *sq)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
|
|
|
|
if (ret) {
|
|
|
|
pr_err("percpu_ref init failed!\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
init_completion(&sq->free_done);
|
2017-03-06 09:46:20 -07:00
|
|
|
init_completion(&sq->confirm_done);
|
2022-09-20 06:37:18 -07:00
|
|
|
nvmet_auth_sq_init(sq);
|
2016-06-21 09:04:20 -07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvmet_sq_init);
|
|
|
|
|
2018-07-19 07:35:20 -07:00
|
|
|
static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
|
|
|
|
struct nvmet_ns *ns)
|
|
|
|
{
|
|
|
|
enum nvme_ana_state state = port->ana_state[ns->anagrpid];
|
|
|
|
|
|
|
|
if (unlikely(state == NVME_ANA_INACCESSIBLE))
|
|
|
|
return NVME_SC_ANA_INACCESSIBLE;
|
|
|
|
if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
|
|
|
|
return NVME_SC_ANA_PERSISTENT_LOSS;
|
|
|
|
if (unlikely(state == NVME_ANA_CHANGE))
|
|
|
|
return NVME_SC_ANA_TRANSITION;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-07 23:01:07 -07:00
|
|
|
static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
if (unlikely(req->ns->readonly)) {
|
|
|
|
switch (req->cmd->common.opcode) {
|
|
|
|
case nvme_cmd_read:
|
|
|
|
case nvme_cmd_flush:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return NVME_SC_NS_WRITE_PROTECTED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-22 21:34:39 -07:00
|
|
|
static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
|
|
|
|
{
|
2022-06-27 02:52:04 -07:00
|
|
|
struct nvme_command *cmd = req->cmd;
|
2018-05-22 21:34:39 -07:00
|
|
|
u16 ret;
|
|
|
|
|
2022-06-27 02:52:04 -07:00
|
|
|
if (nvme_is_fabrics(cmd))
|
|
|
|
return nvmet_parse_fabrics_io_cmd(req);
|
|
|
|
|
2022-06-27 02:52:05 -07:00
|
|
|
if (unlikely(!nvmet_check_auth_status(req)))
|
2024-06-03 05:57:01 -07:00
|
|
|
return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
|
2022-06-27 02:52:05 -07:00
|
|
|
|
2021-02-24 18:56:40 -07:00
|
|
|
ret = nvmet_check_ctrl_status(req);
|
2018-05-22 21:34:39 -07:00
|
|
|
if (unlikely(ret))
|
|
|
|
return ret;
|
|
|
|
|
2021-08-26 23:11:12 -07:00
|
|
|
if (nvmet_is_passthru_req(req))
|
2020-07-24 10:25:17 -07:00
|
|
|
return nvmet_parse_passthru_io_cmd(req);
|
|
|
|
|
2021-02-09 22:47:54 -07:00
|
|
|
ret = nvmet_req_find_ns(req);
|
|
|
|
if (unlikely(ret))
|
|
|
|
return ret;
|
|
|
|
|
2018-07-19 07:35:20 -07:00
|
|
|
ret = nvmet_check_ana_state(req->port, req->ns);
|
2018-12-12 16:11:41 -07:00
|
|
|
if (unlikely(ret)) {
|
|
|
|
req->error_loc = offsetof(struct nvme_common_command, nsid);
|
2018-08-07 23:01:07 -07:00
|
|
|
return ret;
|
2018-12-12 16:11:41 -07:00
|
|
|
}
|
2018-08-07 23:01:07 -07:00
|
|
|
ret = nvmet_io_cmd_check_access(req);
|
2018-12-12 16:11:41 -07:00
|
|
|
if (unlikely(ret)) {
|
|
|
|
req->error_loc = offsetof(struct nvme_common_command, nsid);
|
2018-07-19 07:35:20 -07:00
|
|
|
return ret;
|
2018-12-12 16:11:41 -07:00
|
|
|
}
|
2018-05-22 21:34:39 -07:00
|
|
|
|
2021-06-09 18:32:51 -07:00
|
|
|
switch (req->ns->csi) {
|
|
|
|
case NVME_CSI_NVM:
|
|
|
|
if (req->ns->file)
|
|
|
|
return nvmet_file_parse_io_cmd(req);
|
|
|
|
return nvmet_bdev_parse_io_cmd(req);
|
2021-06-09 18:32:52 -07:00
|
|
|
case NVME_CSI_ZNS:
|
|
|
|
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
|
|
|
|
return nvmet_bdev_zns_parse_io_cmd(req);
|
|
|
|
return NVME_SC_INVALID_IO_CMD_SET;
|
2021-06-09 18:32:51 -07:00
|
|
|
default:
|
|
|
|
return NVME_SC_INVALID_IO_CMD_SET;
|
|
|
|
}
|
2018-05-22 21:34:39 -07:00
|
|
|
}
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
|
2018-03-20 12:41:35 -07:00
|
|
|
struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
|
2016-06-21 09:04:20 -07:00
|
|
|
{
|
|
|
|
u8 flags = req->cmd->common.flags;
|
|
|
|
u16 status;
|
|
|
|
|
|
|
|
req->cq = cq;
|
|
|
|
req->sq = sq;
|
|
|
|
req->ops = ops;
|
|
|
|
req->sg = NULL;
|
2020-05-19 07:06:02 -07:00
|
|
|
req->metadata_sg = NULL;
|
2016-06-21 09:04:20 -07:00
|
|
|
req->sg_cnt = 0;
|
2020-05-19 07:06:02 -07:00
|
|
|
req->metadata_sg_cnt = 0;
|
2017-11-09 06:29:58 -07:00
|
|
|
req->transfer_len = 0;
|
2020-05-19 07:06:02 -07:00
|
|
|
req->metadata_len = 0;
|
2024-06-12 07:11:59 -07:00
|
|
|
req->cqe->result.u64 = 0;
|
2019-04-08 08:39:59 -07:00
|
|
|
req->cqe->status = 0;
|
|
|
|
req->cqe->sq_head = 0;
|
2018-01-14 09:34:22 -07:00
|
|
|
req->ns = NULL;
|
2018-12-17 19:35:29 -07:00
|
|
|
req->error_loc = NVMET_NO_ERROR_LOC;
|
2018-12-12 16:11:39 -07:00
|
|
|
req->error_slba = 0;
|
2016-06-21 09:04:20 -07:00
|
|
|
|
|
|
|
/* no support for fused commands yet */
|
|
|
|
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
|
2018-12-12 16:11:41 -07:00
|
|
|
req->error_loc = offsetof(struct nvme_common_command, flags);
|
2024-06-03 05:57:01 -07:00
|
|
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
2016-06-21 09:04:20 -07:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2018-01-24 08:31:45 -07:00
|
|
|
/*
|
|
|
|
* For fabrics, PSDT field shall describe metadata pointer (MPTR) that
|
|
|
|
* contains an address of a single contiguous physical buffer that is
|
|
|
|
* byte aligned.
|
|
|
|
*/
|
|
|
|
if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
|
2018-12-12 16:11:41 -07:00
|
|
|
req->error_loc = offsetof(struct nvme_common_command, flags);
|
2024-06-03 05:57:01 -07:00
|
|
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
2016-06-21 09:04:20 -07:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!req->sq->ctrl))
|
2019-10-25 06:38:58 -07:00
|
|
|
/* will return an error for any non-connect command: */
|
2016-06-21 09:04:20 -07:00
|
|
|
status = nvmet_parse_connect_cmd(req);
|
|
|
|
else if (likely(req->sq->qid != 0))
|
|
|
|
status = nvmet_parse_io_cmd(req);
|
|
|
|
else
|
|
|
|
status = nvmet_parse_admin_cmd(req);
|
|
|
|
|
|
|
|
if (status)
|
|
|
|
goto fail;
|
|
|
|
|
2020-10-22 16:58:21 -07:00
|
|
|
trace_nvmet_req_init(req, req->cmd);
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
|
2024-06-03 05:57:01 -07:00
|
|
|
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
|
2016-06-21 09:04:20 -07:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2018-11-02 10:28:13 -07:00
|
|
|
if (sq->ctrl)
|
2021-05-25 08:49:05 -07:00
|
|
|
sq->ctrl->reset_tbkas = true;
|
2018-11-02 10:28:13 -07:00
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
return true;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
__nvmet_req_complete(req, status);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvmet_req_init);
|
|
|
|
|
2017-05-08 16:38:35 -07:00
|
|
|
void nvmet_req_uninit(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
percpu_ref_put(&req->sq->ref);
|
2018-01-14 09:34:22 -07:00
|
|
|
if (req->ns)
|
|
|
|
nvmet_put_namespace(req->ns);
|
2017-05-08 16:38:35 -07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
|
|
|
|
|
2020-05-19 07:05:59 -07:00
|
|
|
bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
|
2017-11-09 06:29:58 -07:00
|
|
|
{
|
2020-05-19 07:05:59 -07:00
|
|
|
if (unlikely(len != req->transfer_len)) {
|
2018-12-12 16:11:41 -07:00
|
|
|
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
2024-06-03 05:57:01 -07:00
|
|
|
nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR);
|
2019-10-23 09:35:44 -07:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2020-05-19 07:05:59 -07:00
|
|
|
EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
|
2019-10-23 09:35:44 -07:00
|
|
|
|
2020-01-27 00:23:28 -07:00
|
|
|
bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
|
|
|
|
{
|
|
|
|
if (unlikely(data_len > req->transfer_len)) {
|
|
|
|
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
2024-06-03 05:57:01 -07:00
|
|
|
nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR);
|
2020-01-27 00:23:28 -07:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-05-19 07:06:02 -07:00
|
|
|
static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
|
2018-10-04 14:27:46 -07:00
|
|
|
{
|
2020-05-19 07:06:02 -07:00
|
|
|
return req->transfer_len - req->metadata_len;
|
|
|
|
}
|
2018-10-04 14:27:47 -07:00
|
|
|
|
2021-06-01 09:22:05 -07:00
|
|
|
static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
|
|
|
|
struct nvmet_req *req)
|
2020-05-19 07:06:02 -07:00
|
|
|
{
|
2021-06-01 09:22:05 -07:00
|
|
|
req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
|
2020-05-19 07:06:02 -07:00
|
|
|
nvmet_data_transfer_len(req));
|
|
|
|
if (!req->sg)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
if (req->metadata_len) {
|
2021-06-01 09:22:05 -07:00
|
|
|
req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
|
2020-05-19 07:06:02 -07:00
|
|
|
&req->metadata_sg_cnt, req->metadata_len);
|
|
|
|
if (!req->metadata_sg)
|
|
|
|
goto out_free_sg;
|
|
|
|
}
|
2021-06-01 09:22:05 -07:00
|
|
|
|
|
|
|
req->p2p_dev = p2p_dev;
|
|
|
|
|
2020-05-19 07:06:02 -07:00
|
|
|
return 0;
|
|
|
|
out_free_sg:
|
|
|
|
pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
|
|
|
|
out_err:
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2021-06-01 09:22:05 -07:00
|
|
|
static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
|
2020-05-19 07:06:02 -07:00
|
|
|
{
|
2021-06-01 09:22:05 -07:00
|
|
|
if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
|
|
|
|
!req->sq->ctrl || !req->sq->qid || !req->ns)
|
|
|
|
return NULL;
|
|
|
|
return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
|
2020-05-19 07:06:02 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
int nvmet_req_alloc_sgls(struct nvmet_req *req)
|
|
|
|
{
|
2021-06-01 09:22:05 -07:00
|
|
|
struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
|
|
|
|
|
|
|
|
if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
|
2020-05-19 07:06:02 -07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
|
|
|
|
&req->sg_cnt);
|
2019-10-13 09:57:33 -07:00
|
|
|
if (unlikely(!req->sg))
|
2020-05-19 07:06:02 -07:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (req->metadata_len) {
|
|
|
|
req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
|
|
|
|
&req->metadata_sg_cnt);
|
|
|
|
if (unlikely(!req->metadata_sg))
|
|
|
|
goto out_free;
|
|
|
|
}
|
2018-10-04 14:27:46 -07:00
|
|
|
|
|
|
|
return 0;
|
2020-05-19 07:06:02 -07:00
|
|
|
out_free:
|
|
|
|
sgl_free(req->sg);
|
|
|
|
out:
|
|
|
|
return -ENOMEM;
|
2018-10-04 14:27:46 -07:00
|
|
|
}
|
2020-05-19 07:06:02 -07:00
|
|
|
EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
|
2018-10-04 14:27:46 -07:00
|
|
|
|
2020-05-19 07:06:02 -07:00
|
|
|
void nvmet_req_free_sgls(struct nvmet_req *req)
|
2018-10-04 14:27:46 -07:00
|
|
|
{
|
2020-05-19 07:06:02 -07:00
|
|
|
if (req->p2p_dev) {
|
2018-10-04 14:27:47 -07:00
|
|
|
pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
|
2020-05-19 07:06:02 -07:00
|
|
|
if (req->metadata_sg)
|
|
|
|
pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
|
2021-06-01 09:22:05 -07:00
|
|
|
req->p2p_dev = NULL;
|
2020-05-19 07:06:02 -07:00
|
|
|
} else {
|
2018-10-04 14:27:47 -07:00
|
|
|
sgl_free(req->sg);
|
2020-05-19 07:06:02 -07:00
|
|
|
if (req->metadata_sg)
|
|
|
|
sgl_free(req->metadata_sg);
|
|
|
|
}
|
2018-10-04 14:27:47 -07:00
|
|
|
|
2018-10-04 14:27:46 -07:00
|
|
|
req->sg = NULL;
|
2020-05-19 07:06:02 -07:00
|
|
|
req->metadata_sg = NULL;
|
2018-10-04 14:27:46 -07:00
|
|
|
req->sg_cnt = 0;
|
2020-05-19 07:06:02 -07:00
|
|
|
req->metadata_sg_cnt = 0;
|
2018-10-04 14:27:46 -07:00
|
|
|
}
|
2020-05-19 07:06:02 -07:00
|
|
|
EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
|
2018-10-04 14:27:46 -07:00
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
static inline bool nvmet_cc_en(u32 cc)
|
|
|
|
{
|
2017-08-13 09:21:06 -07:00
|
|
|
return (cc >> NVME_CC_EN_SHIFT) & 0x1;
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline u8 nvmet_cc_css(u32 cc)
|
|
|
|
{
|
2017-08-13 09:21:06 -07:00
|
|
|
return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline u8 nvmet_cc_mps(u32 cc)
|
|
|
|
{
|
2017-08-13 09:21:06 -07:00
|
|
|
return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline u8 nvmet_cc_ams(u32 cc)
|
|
|
|
{
|
2017-08-13 09:21:06 -07:00
|
|
|
return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline u8 nvmet_cc_shn(u32 cc)
|
|
|
|
{
|
2017-08-13 09:21:06 -07:00
|
|
|
return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline u8 nvmet_cc_iosqes(u32 cc)
|
|
|
|
{
|
2017-08-13 09:21:06 -07:00
|
|
|
return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline u8 nvmet_cc_iocqes(u32 cc)
|
|
|
|
{
|
2017-08-13 09:21:06 -07:00
|
|
|
return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
|
2021-06-09 18:32:51 -07:00
|
|
|
static inline bool nvmet_css_supported(u8 cc_css)
|
|
|
|
{
|
2022-03-17 18:30:14 -07:00
|
|
|
switch (cc_css << NVME_CC_CSS_SHIFT) {
|
2021-06-09 18:32:51 -07:00
|
|
|
case NVME_CC_CSS_NVM:
|
|
|
|
case NVME_CC_CSS_CSI:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
lockdep_assert_held(&ctrl->lock);
|
|
|
|
|
2021-03-15 15:34:51 -07:00
|
|
|
/*
|
|
|
|
* Only I/O controllers should verify iosqes,iocqes.
|
|
|
|
* Strictly speaking, the spec says a discovery controller
|
|
|
|
* should verify iosqes,iocqes are zeroed, however that
|
|
|
|
* would break backwards compatibility, so don't enforce it.
|
|
|
|
*/
|
2021-09-21 23:35:21 -07:00
|
|
|
if (!nvmet_is_disc_subsys(ctrl->subsys) &&
|
2021-03-15 15:34:51 -07:00
|
|
|
(nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
|
|
|
|
nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
|
|
|
|
ctrl->csts = NVME_CSTS_CFS;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nvmet_cc_mps(ctrl->cc) != 0 ||
|
2016-06-21 09:04:20 -07:00
|
|
|
nvmet_cc_ams(ctrl->cc) != 0 ||
|
2021-06-09 18:32:51 -07:00
|
|
|
!nvmet_css_supported(nvmet_cc_css(ctrl->cc))) {
|
2016-06-21 09:04:20 -07:00
|
|
|
ctrl->csts = NVME_CSTS_CFS;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctrl->csts = NVME_CSTS_RDY;
|
2018-06-19 05:45:33 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Controllers that are not yet enabled should not really enforce the
|
|
|
|
* keep alive timeout, but we still want to track a timeout and cleanup
|
|
|
|
* in case a host died before it enabled the controller. Hence, simply
|
|
|
|
* reset the keep alive timer when the controller is enabled.
|
|
|
|
*/
|
2020-10-14 18:51:40 -07:00
|
|
|
if (ctrl->kato)
|
2022-09-27 23:39:10 -07:00
|
|
|
mod_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
lockdep_assert_held(&ctrl->lock);
|
|
|
|
|
|
|
|
/* XXX: tear down queues? */
|
|
|
|
ctrl->csts &= ~NVME_CSTS_RDY;
|
|
|
|
ctrl->cc = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
|
|
|
|
{
|
|
|
|
u32 old;
|
|
|
|
|
|
|
|
mutex_lock(&ctrl->lock);
|
|
|
|
old = ctrl->cc;
|
|
|
|
ctrl->cc = new;
|
|
|
|
|
|
|
|
if (nvmet_cc_en(new) && !nvmet_cc_en(old))
|
|
|
|
nvmet_start_ctrl(ctrl);
|
|
|
|
if (!nvmet_cc_en(new) && nvmet_cc_en(old))
|
|
|
|
nvmet_clear_ctrl(ctrl);
|
|
|
|
if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
|
|
|
|
nvmet_clear_ctrl(ctrl);
|
|
|
|
ctrl->csts |= NVME_CSTS_SHST_CMPLT;
|
|
|
|
}
|
|
|
|
if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
|
|
|
|
ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
|
|
|
|
mutex_unlock(&ctrl->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
/* command sets supported: NVMe command set: */
|
|
|
|
ctrl->cap = (1ULL << 37);
|
2021-06-09 18:32:51 -07:00
|
|
|
/* Controller supports one or more I/O Command Sets */
|
|
|
|
ctrl->cap |= (1ULL << 43);
|
2016-06-21 09:04:20 -07:00
|
|
|
/* CC.EN timeout in 500msec units: */
|
|
|
|
ctrl->cap |= (15ULL << 24);
|
|
|
|
/* maximum queue entries supported: */
|
2021-09-22 14:55:36 -07:00
|
|
|
if (ctrl->ops->get_max_queue_size)
|
2024-01-23 07:40:31 -07:00
|
|
|
ctrl->cap |= min_t(u16, ctrl->ops->get_max_queue_size(ctrl),
|
|
|
|
ctrl->port->max_queue_size) - 1;
|
2021-09-22 14:55:36 -07:00
|
|
|
else
|
2024-01-23 07:40:31 -07:00
|
|
|
ctrl->cap |= ctrl->port->max_queue_size - 1;
|
2021-08-26 14:15:45 -07:00
|
|
|
|
2021-08-26 23:11:12 -07:00
|
|
|
if (nvmet_is_passthru_subsys(ctrl->subsys))
|
2021-08-26 14:15:45 -07:00
|
|
|
nvmet_passthrough_override_cap(ctrl);
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
|
2021-03-09 18:16:32 -07:00
|
|
|
struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
|
|
|
|
const char *hostnqn, u16 cntlid,
|
|
|
|
struct nvmet_req *req)
|
2016-06-21 09:04:20 -07:00
|
|
|
{
|
2021-03-09 18:16:32 -07:00
|
|
|
struct nvmet_ctrl *ctrl = NULL;
|
2016-06-21 09:04:20 -07:00
|
|
|
struct nvmet_subsys *subsys;
|
|
|
|
|
|
|
|
subsys = nvmet_find_get_subsys(req->port, subsysnqn);
|
|
|
|
if (!subsys) {
|
|
|
|
pr_warn("connect request for invalid subsystem %s!\n",
|
|
|
|
subsysnqn);
|
2019-04-08 08:39:59 -07:00
|
|
|
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
|
2021-03-09 18:16:32 -07:00
|
|
|
goto out;
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
mutex_lock(&subsys->lock);
|
|
|
|
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
|
|
|
|
if (ctrl->cntlid == cntlid) {
|
|
|
|
if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
|
|
|
|
pr_warn("hostnqn mismatch.\n");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!kref_get_unless_zero(&ctrl->ref))
|
|
|
|
continue;
|
|
|
|
|
2021-03-09 18:16:32 -07:00
|
|
|
/* ctrl found */
|
|
|
|
goto found;
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-09 18:16:32 -07:00
|
|
|
ctrl = NULL; /* ctrl not found */
|
2016-06-21 09:04:20 -07:00
|
|
|
pr_warn("could not find controller %d for subsys %s / host %s\n",
|
|
|
|
cntlid, subsysnqn, hostnqn);
|
2019-04-08 08:39:59 -07:00
|
|
|
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
|
2016-06-21 09:04:20 -07:00
|
|
|
|
2021-03-09 18:16:32 -07:00
|
|
|
found:
|
2016-06-21 09:04:20 -07:00
|
|
|
mutex_unlock(&subsys->lock);
|
|
|
|
nvmet_subsys_put(subsys);
|
2021-03-09 18:16:32 -07:00
|
|
|
out:
|
|
|
|
return ctrl;
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
|
2021-02-24 18:56:40 -07:00
|
|
|
u16 nvmet_check_ctrl_status(struct nvmet_req *req)
|
2017-02-27 22:21:33 -07:00
|
|
|
{
|
|
|
|
if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
|
2018-05-07 23:02:33 -07:00
|
|
|
pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
|
2021-02-24 18:56:40 -07:00
|
|
|
req->cmd->common.opcode, req->sq->qid);
|
2024-06-03 05:57:01 -07:00
|
|
|
return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
|
2017-02-27 22:21:33 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
|
2018-05-07 23:02:33 -07:00
|
|
|
pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
|
2021-02-24 18:56:40 -07:00
|
|
|
req->cmd->common.opcode, req->sq->qid);
|
2024-06-03 05:57:01 -07:00
|
|
|
return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
|
2017-02-27 22:21:33 -07:00
|
|
|
}
|
2022-06-27 02:52:05 -07:00
|
|
|
|
|
|
|
if (unlikely(!nvmet_check_auth_status(req))) {
|
|
|
|
pr_warn("qid %d not authenticated\n", req->sq->qid);
|
2024-06-03 05:57:01 -07:00
|
|
|
return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
|
2022-06-27 02:52:05 -07:00
|
|
|
}
|
2017-02-27 22:21:33 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-12 14:56:39 -07:00
|
|
|
bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
|
2016-06-21 09:04:20 -07:00
|
|
|
{
|
|
|
|
struct nvmet_host_link *p;
|
|
|
|
|
2018-11-12 14:56:39 -07:00
|
|
|
lockdep_assert_held(&nvmet_config_sem);
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
if (subsys->allow_any_host)
|
|
|
|
return true;
|
|
|
|
|
2021-09-21 23:35:21 -07:00
|
|
|
if (nvmet_is_disc_subsys(subsys)) /* allow all access to disc subsys */
|
2018-11-12 14:56:39 -07:00
|
|
|
return true;
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
list_for_each_entry(p, &subsys->hosts, entry) {
|
|
|
|
if (!strcmp(nvmet_host_name(p->host), hostnqn))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-10-04 14:27:47 -07:00
|
|
|
/*
|
|
|
|
* Note: ctrl->subsys->lock should be held when calling this function
|
|
|
|
*/
|
|
|
|
static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
|
|
|
|
struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
struct nvmet_ns *ns;
|
nvmet: use xarray for ctrl ns storing
This patch replaces the ctrl->namespaces tracking from linked list to
xarray and improves the performance when accessing one namespce :-
XArray vs Default:-
IOPS and BW (more the better) increase BW (~1.8%):-
---------------------------------------------------
XArray :-
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=162k, BW=631MiB/s (662MB/s)(18.5GiB/30001msec)
Default:-
read: IOPS=156k, BW=609MiB/s (639MB/s)(17.8GiB/30001msec)
read: IOPS=157k, BW=613MiB/s (643MB/s)(17.0GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
Submission latency (less the better) decrease (~8.3%):-
-------------------------------------------------------
XArray:-
slat (usec): min=7, max=8386, avg=11.19, stdev=5.96
slat (usec): min=7, max=441, avg=11.09, stdev=4.48
slat (usec): min=7, max=1088, avg=11.21, stdev=4.54
Default :-
slat (usec): min=8, max=2826.5k, avg=23.96, stdev=3911.50
slat (usec): min=8, max=503, avg=12.52, stdev=5.07
slat (usec): min=8, max=2384, avg=12.50, stdev=5.28
CPU Usage (less the better) decrease (~5.2%):-
----------------------------------------------
XArray:-
cpu : usr=1.84%, sys=18.61%, ctx=949471, majf=0, minf=250
cpu : usr=1.83%, sys=18.41%, ctx=950262, majf=0, minf=237
cpu : usr=1.82%, sys=18.82%, ctx=957224, majf=0, minf=234
Default:-
cpu : usr=1.70%, sys=19.21%, ctx=858196, majf=0, minf=251
cpu : usr=1.82%, sys=19.98%, ctx=929720, majf=0, minf=227
cpu : usr=1.83%, sys=20.33%, ctx=947208, majf=0, minf=235.
Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2020-07-19 20:32:02 -07:00
|
|
|
unsigned long idx;
|
2018-10-04 14:27:47 -07:00
|
|
|
|
|
|
|
if (!req->p2p_client)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ctrl->p2p_client = get_device(req->p2p_client);
|
|
|
|
|
nvmet: use xarray for ctrl ns storing
This patch replaces the ctrl->namespaces tracking from linked list to
xarray and improves the performance when accessing one namespce :-
XArray vs Default:-
IOPS and BW (more the better) increase BW (~1.8%):-
---------------------------------------------------
XArray :-
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=162k, BW=631MiB/s (662MB/s)(18.5GiB/30001msec)
Default:-
read: IOPS=156k, BW=609MiB/s (639MB/s)(17.8GiB/30001msec)
read: IOPS=157k, BW=613MiB/s (643MB/s)(17.0GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
Submission latency (less the better) decrease (~8.3%):-
-------------------------------------------------------
XArray:-
slat (usec): min=7, max=8386, avg=11.19, stdev=5.96
slat (usec): min=7, max=441, avg=11.09, stdev=4.48
slat (usec): min=7, max=1088, avg=11.21, stdev=4.54
Default :-
slat (usec): min=8, max=2826.5k, avg=23.96, stdev=3911.50
slat (usec): min=8, max=503, avg=12.52, stdev=5.07
slat (usec): min=8, max=2384, avg=12.50, stdev=5.28
CPU Usage (less the better) decrease (~5.2%):-
----------------------------------------------
XArray:-
cpu : usr=1.84%, sys=18.61%, ctx=949471, majf=0, minf=250
cpu : usr=1.83%, sys=18.41%, ctx=950262, majf=0, minf=237
cpu : usr=1.82%, sys=18.82%, ctx=957224, majf=0, minf=234
Default:-
cpu : usr=1.70%, sys=19.21%, ctx=858196, majf=0, minf=251
cpu : usr=1.82%, sys=19.98%, ctx=929720, majf=0, minf=227
cpu : usr=1.83%, sys=20.33%, ctx=947208, majf=0, minf=235.
Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2020-07-19 20:32:02 -07:00
|
|
|
xa_for_each(&ctrl->subsys->namespaces, idx, ns)
|
2018-10-04 14:27:47 -07:00
|
|
|
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: ctrl->subsys->lock should be held when calling this function
|
|
|
|
*/
|
|
|
|
static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
struct radix_tree_iter iter;
|
|
|
|
void __rcu **slot;
|
|
|
|
|
|
|
|
radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
|
|
|
|
pci_dev_put(radix_tree_deref_slot(slot));
|
|
|
|
|
|
|
|
put_device(ctrl->p2p_client);
|
|
|
|
}
|
|
|
|
|
2019-03-13 10:54:59 -07:00
|
|
|
static void nvmet_fatal_error_handler(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct nvmet_ctrl *ctrl =
|
|
|
|
container_of(work, struct nvmet_ctrl, fatal_err_work);
|
|
|
|
|
|
|
|
pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
|
|
|
|
ctrl->ops->delete_ctrl(ctrl);
|
|
|
|
}
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
|
|
|
|
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
|
|
|
|
{
|
|
|
|
struct nvmet_subsys *subsys;
|
|
|
|
struct nvmet_ctrl *ctrl;
|
|
|
|
int ret;
|
|
|
|
u16 status;
|
|
|
|
|
2024-06-03 05:57:01 -07:00
|
|
|
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
|
2016-06-21 09:04:20 -07:00
|
|
|
subsys = nvmet_find_get_subsys(req->port, subsysnqn);
|
|
|
|
if (!subsys) {
|
|
|
|
pr_warn("connect request for invalid subsystem %s!\n",
|
|
|
|
subsysnqn);
|
2019-04-08 08:39:59 -07:00
|
|
|
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
|
2021-02-24 18:56:38 -07:00
|
|
|
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
2016-06-21 09:04:20 -07:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
down_read(&nvmet_config_sem);
|
2018-11-12 14:56:39 -07:00
|
|
|
if (!nvmet_host_allowed(subsys, hostnqn)) {
|
2016-06-21 09:04:20 -07:00
|
|
|
pr_info("connect by host %s for subsystem %s not allowed\n",
|
|
|
|
hostnqn, subsysnqn);
|
2019-04-08 08:39:59 -07:00
|
|
|
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
|
2016-06-21 09:04:20 -07:00
|
|
|
up_read(&nvmet_config_sem);
|
2024-06-03 05:57:01 -07:00
|
|
|
status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
|
2021-02-24 18:56:38 -07:00
|
|
|
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
2016-06-21 09:04:20 -07:00
|
|
|
goto out_put_subsystem;
|
|
|
|
}
|
|
|
|
up_read(&nvmet_config_sem);
|
|
|
|
|
|
|
|
status = NVME_SC_INTERNAL;
|
|
|
|
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
|
|
|
|
if (!ctrl)
|
|
|
|
goto out_put_subsystem;
|
|
|
|
mutex_init(&ctrl->lock);
|
|
|
|
|
2018-06-07 06:09:50 -07:00
|
|
|
ctrl->port = req->port;
|
2021-09-22 14:55:36 -07:00
|
|
|
ctrl->ops = req->ops;
|
2018-06-07 06:09:50 -07:00
|
|
|
|
2022-06-27 16:25:43 -07:00
|
|
|
#ifdef CONFIG_NVME_TARGET_PASSTHRU
|
|
|
|
/* By default, set loop targets to clear IDS by default */
|
|
|
|
if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP)
|
|
|
|
subsys->clear_ids = 1;
|
|
|
|
#endif
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
|
|
|
|
INIT_LIST_HEAD(&ctrl->async_events);
|
2018-10-04 14:27:47 -07:00
|
|
|
INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
|
2019-03-13 10:54:59 -07:00
|
|
|
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
|
2021-04-22 05:33:16 -07:00
|
|
|
INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
|
2016-06-21 09:04:20 -07:00
|
|
|
|
|
|
|
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
|
|
|
|
memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
|
|
|
|
|
|
|
|
kref_init(&ctrl->ref);
|
|
|
|
ctrl->subsys = subsys;
|
2024-01-23 07:40:28 -07:00
|
|
|
ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
|
2021-08-26 14:15:45 -07:00
|
|
|
nvmet_init_cap(ctrl);
|
2018-05-30 06:04:47 -07:00
|
|
|
WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
|
2016-06-21 09:04:20 -07:00
|
|
|
|
2018-05-25 08:16:09 -07:00
|
|
|
ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
|
|
|
|
sizeof(__le32), GFP_KERNEL);
|
|
|
|
if (!ctrl->changed_ns_list)
|
|
|
|
goto out_free_ctrl;
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
ctrl->sqs = kcalloc(subsys->max_qid + 1,
|
|
|
|
sizeof(struct nvmet_sq *),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!ctrl->sqs)
|
2020-11-15 05:19:51 -07:00
|
|
|
goto out_free_changed_ns_list;
|
2016-06-21 09:04:20 -07:00
|
|
|
|
2022-02-14 02:07:29 -07:00
|
|
|
ret = ida_alloc_range(&cntlid_ida,
|
2020-01-30 11:29:31 -07:00
|
|
|
subsys->cntlid_min, subsys->cntlid_max,
|
2016-06-21 09:04:20 -07:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (ret < 0) {
|
2024-06-03 05:57:01 -07:00
|
|
|
status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
|
2016-06-21 09:04:20 -07:00
|
|
|
goto out_free_sqs;
|
|
|
|
}
|
|
|
|
ctrl->cntlid = ret;
|
|
|
|
|
2018-11-12 14:56:35 -07:00
|
|
|
/*
|
|
|
|
* Discovery controllers may use some arbitrary high value
|
|
|
|
* in order to cleanup stale discovery sessions
|
|
|
|
*/
|
2021-09-21 23:35:21 -07:00
|
|
|
if (nvmet_is_disc_subsys(ctrl->subsys) && !kato)
|
2018-11-12 14:56:35 -07:00
|
|
|
kato = NVMET_DISC_KATO_MS;
|
|
|
|
|
|
|
|
/* keep-alive timeout in seconds */
|
|
|
|
ctrl->kato = DIV_ROUND_UP(kato, 1000);
|
|
|
|
|
2018-12-12 16:11:39 -07:00
|
|
|
ctrl->err_counter = 0;
|
|
|
|
spin_lock_init(&ctrl->error_lock);
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
nvmet_start_keep_alive_timer(ctrl);
|
|
|
|
|
|
|
|
mutex_lock(&subsys->lock);
|
|
|
|
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
|
2018-10-04 14:27:47 -07:00
|
|
|
nvmet_setup_p2p_ns_map(ctrl, req);
|
2024-05-26 22:15:19 -07:00
|
|
|
nvmet_debugfs_ctrl_setup(ctrl);
|
2016-06-21 09:04:20 -07:00
|
|
|
mutex_unlock(&subsys->lock);
|
|
|
|
|
|
|
|
*ctrlp = ctrl;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_free_sqs:
|
|
|
|
kfree(ctrl->sqs);
|
2018-05-25 08:16:09 -07:00
|
|
|
out_free_changed_ns_list:
|
|
|
|
kfree(ctrl->changed_ns_list);
|
2016-06-21 09:04:20 -07:00
|
|
|
out_free_ctrl:
|
|
|
|
kfree(ctrl);
|
|
|
|
out_put_subsystem:
|
|
|
|
nvmet_subsys_put(subsys);
|
|
|
|
out:
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvmet_ctrl_free(struct kref *ref)
|
|
|
|
{
|
|
|
|
struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
|
|
|
|
struct nvmet_subsys *subsys = ctrl->subsys;
|
|
|
|
|
|
|
|
mutex_lock(&subsys->lock);
|
2018-10-04 14:27:47 -07:00
|
|
|
nvmet_release_p2p_ns_map(ctrl);
|
2016-06-21 09:04:20 -07:00
|
|
|
list_del(&ctrl->subsys_entry);
|
|
|
|
mutex_unlock(&subsys->lock);
|
|
|
|
|
2017-11-13 05:29:41 -07:00
|
|
|
nvmet_stop_keep_alive_timer(ctrl);
|
|
|
|
|
2017-01-01 04:41:56 -07:00
|
|
|
flush_work(&ctrl->async_event_work);
|
|
|
|
cancel_work_sync(&ctrl->fatal_err_work);
|
|
|
|
|
2022-06-27 02:52:05 -07:00
|
|
|
nvmet_destroy_auth(ctrl);
|
|
|
|
|
2024-05-26 22:15:19 -07:00
|
|
|
nvmet_debugfs_ctrl_free(ctrl);
|
|
|
|
|
2022-02-14 02:07:29 -07:00
|
|
|
ida_free(&cntlid_ida, ctrl->cntlid);
|
2016-06-21 09:04:20 -07:00
|
|
|
|
2020-05-20 12:48:12 -07:00
|
|
|
nvmet_async_events_free(ctrl);
|
2016-06-21 09:04:20 -07:00
|
|
|
kfree(ctrl->sqs);
|
2018-05-25 08:16:09 -07:00
|
|
|
kfree(ctrl->changed_ns_list);
|
2016-06-21 09:04:20 -07:00
|
|
|
kfree(ctrl);
|
2017-11-13 05:29:41 -07:00
|
|
|
|
|
|
|
nvmet_subsys_put(subsys);
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
kref_put(&ctrl->ref, nvmet_ctrl_free);
|
|
|
|
}
|
|
|
|
|
|
|
|
void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
|
|
|
|
{
|
2016-11-06 02:03:30 -07:00
|
|
|
mutex_lock(&ctrl->lock);
|
|
|
|
if (!(ctrl->csts & NVME_CSTS_CFS)) {
|
|
|
|
ctrl->csts |= NVME_CSTS_CFS;
|
2022-03-21 04:57:27 -07:00
|
|
|
queue_work(nvmet_wq, &ctrl->fatal_err_work);
|
2016-11-06 02:03:30 -07:00
|
|
|
}
|
|
|
|
mutex_unlock(&ctrl->lock);
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
|
|
|
|
|
2024-05-26 22:15:20 -07:00
|
|
|
ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl,
|
|
|
|
char *traddr, size_t traddr_len)
|
|
|
|
{
|
|
|
|
if (!ctrl->ops->host_traddr)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
return ctrl->ops->host_traddr(ctrl, traddr, traddr_len);
|
|
|
|
}
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
|
|
|
|
const char *subsysnqn)
|
|
|
|
{
|
|
|
|
struct nvmet_subsys_link *p;
|
|
|
|
|
|
|
|
if (!port)
|
|
|
|
return NULL;
|
|
|
|
|
2022-03-15 02:14:36 -07:00
|
|
|
if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
|
2016-06-21 09:04:20 -07:00
|
|
|
if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
|
|
|
|
return NULL;
|
|
|
|
return nvmet_disc_subsys;
|
|
|
|
}
|
|
|
|
|
|
|
|
down_read(&nvmet_config_sem);
|
2024-04-03 04:31:14 -07:00
|
|
|
if (!strncmp(nvmet_disc_subsys->subsysnqn, subsysnqn,
|
|
|
|
NVMF_NQN_SIZE)) {
|
|
|
|
if (kref_get_unless_zero(&nvmet_disc_subsys->ref)) {
|
|
|
|
up_read(&nvmet_config_sem);
|
|
|
|
return nvmet_disc_subsys;
|
|
|
|
}
|
|
|
|
}
|
2016-06-21 09:04:20 -07:00
|
|
|
list_for_each_entry(p, &port->subsystems, entry) {
|
|
|
|
if (!strncmp(p->subsys->subsysnqn, subsysnqn,
|
|
|
|
NVMF_NQN_SIZE)) {
|
|
|
|
if (!kref_get_unless_zero(&p->subsys->ref))
|
|
|
|
break;
|
|
|
|
up_read(&nvmet_config_sem);
|
|
|
|
return p->subsys;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
up_read(&nvmet_config_sem);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
|
|
|
|
enum nvme_subsys_type type)
|
|
|
|
{
|
|
|
|
struct nvmet_subsys *subsys;
|
2021-06-07 02:23:21 -07:00
|
|
|
char serial[NVMET_SN_MAX_SIZE / 2];
|
2021-06-07 02:23:23 -07:00
|
|
|
int ret;
|
2016-06-21 09:04:20 -07:00
|
|
|
|
|
|
|
subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
|
|
|
|
if (!subsys)
|
2019-04-06 23:28:06 -07:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2016-06-21 09:04:20 -07:00
|
|
|
|
2020-07-24 10:25:18 -07:00
|
|
|
subsys->ver = NVMET_DEFAULT_VS;
|
2017-07-14 06:36:55 -07:00
|
|
|
/* generate a random serial number as our controllers are ephemeral: */
|
2021-06-07 02:23:21 -07:00
|
|
|
get_random_bytes(&serial, sizeof(serial));
|
|
|
|
bin2hex(subsys->serial, &serial, sizeof(serial));
|
2016-06-21 09:04:20 -07:00
|
|
|
|
2021-06-07 02:23:23 -07:00
|
|
|
subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
|
|
|
|
if (!subsys->model_number) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto free_subsys;
|
|
|
|
}
|
2016-06-21 09:04:20 -07:00
|
|
|
|
2022-11-15 04:58:09 -07:00
|
|
|
subsys->ieee_oui = 0;
|
|
|
|
|
2022-11-15 04:58:10 -07:00
|
|
|
subsys->firmware_rev = kstrndup(UTS_RELEASE, NVMET_FR_MAX_SIZE, GFP_KERNEL);
|
|
|
|
if (!subsys->firmware_rev) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto free_mn;
|
|
|
|
}
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
switch (type) {
|
|
|
|
case NVME_NQN_NVME:
|
|
|
|
subsys->max_qid = NVMET_NR_QUEUES;
|
|
|
|
break;
|
|
|
|
case NVME_NQN_DISC:
|
2021-10-18 08:21:38 -07:00
|
|
|
case NVME_NQN_CURR:
|
2016-06-21 09:04:20 -07:00
|
|
|
subsys->max_qid = 0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
|
2021-06-07 02:23:23 -07:00
|
|
|
ret = -EINVAL;
|
2022-11-15 04:58:10 -07:00
|
|
|
goto free_fr;
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
subsys->type = type;
|
|
|
|
subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
|
|
|
|
GFP_KERNEL);
|
2016-07-06 05:02:09 -07:00
|
|
|
if (!subsys->subsysnqn) {
|
2021-06-07 02:23:23 -07:00
|
|
|
ret = -ENOMEM;
|
2022-11-15 04:58:10 -07:00
|
|
|
goto free_fr;
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
2020-01-30 11:29:31 -07:00
|
|
|
subsys->cntlid_min = NVME_CNTLID_MIN;
|
|
|
|
subsys->cntlid_max = NVME_CNTLID_MAX;
|
2016-06-21 09:04:20 -07:00
|
|
|
kref_init(&subsys->ref);
|
|
|
|
|
|
|
|
mutex_init(&subsys->lock);
|
nvmet: use xarray for ctrl ns storing
This patch replaces the ctrl->namespaces tracking from linked list to
xarray and improves the performance when accessing one namespce :-
XArray vs Default:-
IOPS and BW (more the better) increase BW (~1.8%):-
---------------------------------------------------
XArray :-
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=162k, BW=631MiB/s (662MB/s)(18.5GiB/30001msec)
Default:-
read: IOPS=156k, BW=609MiB/s (639MB/s)(17.8GiB/30001msec)
read: IOPS=157k, BW=613MiB/s (643MB/s)(17.0GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
Submission latency (less the better) decrease (~8.3%):-
-------------------------------------------------------
XArray:-
slat (usec): min=7, max=8386, avg=11.19, stdev=5.96
slat (usec): min=7, max=441, avg=11.09, stdev=4.48
slat (usec): min=7, max=1088, avg=11.21, stdev=4.54
Default :-
slat (usec): min=8, max=2826.5k, avg=23.96, stdev=3911.50
slat (usec): min=8, max=503, avg=12.52, stdev=5.07
slat (usec): min=8, max=2384, avg=12.50, stdev=5.28
CPU Usage (less the better) decrease (~5.2%):-
----------------------------------------------
XArray:-
cpu : usr=1.84%, sys=18.61%, ctx=949471, majf=0, minf=250
cpu : usr=1.83%, sys=18.41%, ctx=950262, majf=0, minf=237
cpu : usr=1.82%, sys=18.82%, ctx=957224, majf=0, minf=234
Default:-
cpu : usr=1.70%, sys=19.21%, ctx=858196, majf=0, minf=251
cpu : usr=1.82%, sys=19.98%, ctx=929720, majf=0, minf=227
cpu : usr=1.83%, sys=20.33%, ctx=947208, majf=0, minf=235.
Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2020-07-19 20:32:02 -07:00
|
|
|
xa_init(&subsys->namespaces);
|
2016-06-21 09:04:20 -07:00
|
|
|
INIT_LIST_HEAD(&subsys->ctrls);
|
|
|
|
INIT_LIST_HEAD(&subsys->hosts);
|
|
|
|
|
2024-05-26 22:15:19 -07:00
|
|
|
ret = nvmet_debugfs_subsys_setup(subsys);
|
|
|
|
if (ret)
|
|
|
|
goto free_subsysnqn;
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
return subsys;
|
2021-06-07 02:23:23 -07:00
|
|
|
|
2024-05-26 22:15:19 -07:00
|
|
|
free_subsysnqn:
|
|
|
|
kfree(subsys->subsysnqn);
|
2022-11-15 04:58:10 -07:00
|
|
|
free_fr:
|
|
|
|
kfree(subsys->firmware_rev);
|
2021-06-07 02:23:23 -07:00
|
|
|
free_mn:
|
|
|
|
kfree(subsys->model_number);
|
|
|
|
free_subsys:
|
|
|
|
kfree(subsys);
|
|
|
|
return ERR_PTR(ret);
|
2016-06-21 09:04:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nvmet_subsys_free(struct kref *ref)
|
|
|
|
{
|
|
|
|
struct nvmet_subsys *subsys =
|
|
|
|
container_of(ref, struct nvmet_subsys, ref);
|
|
|
|
|
nvmet: use xarray for ctrl ns storing
This patch replaces the ctrl->namespaces tracking from linked list to
xarray and improves the performance when accessing one namespce :-
XArray vs Default:-
IOPS and BW (more the better) increase BW (~1.8%):-
---------------------------------------------------
XArray :-
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=162k, BW=631MiB/s (662MB/s)(18.5GiB/30001msec)
Default:-
read: IOPS=156k, BW=609MiB/s (639MB/s)(17.8GiB/30001msec)
read: IOPS=157k, BW=613MiB/s (643MB/s)(17.0GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
Submission latency (less the better) decrease (~8.3%):-
-------------------------------------------------------
XArray:-
slat (usec): min=7, max=8386, avg=11.19, stdev=5.96
slat (usec): min=7, max=441, avg=11.09, stdev=4.48
slat (usec): min=7, max=1088, avg=11.21, stdev=4.54
Default :-
slat (usec): min=8, max=2826.5k, avg=23.96, stdev=3911.50
slat (usec): min=8, max=503, avg=12.52, stdev=5.07
slat (usec): min=8, max=2384, avg=12.50, stdev=5.28
CPU Usage (less the better) decrease (~5.2%):-
----------------------------------------------
XArray:-
cpu : usr=1.84%, sys=18.61%, ctx=949471, majf=0, minf=250
cpu : usr=1.83%, sys=18.41%, ctx=950262, majf=0, minf=237
cpu : usr=1.82%, sys=18.82%, ctx=957224, majf=0, minf=234
Default:-
cpu : usr=1.70%, sys=19.21%, ctx=858196, majf=0, minf=251
cpu : usr=1.82%, sys=19.98%, ctx=929720, majf=0, minf=227
cpu : usr=1.83%, sys=20.33%, ctx=947208, majf=0, minf=235.
Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2020-07-19 20:32:02 -07:00
|
|
|
WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
|
2016-06-21 09:04:20 -07:00
|
|
|
|
2024-05-26 22:15:19 -07:00
|
|
|
nvmet_debugfs_subsys_free(subsys);
|
|
|
|
|
nvmet: use xarray for ctrl ns storing
This patch replaces the ctrl->namespaces tracking from linked list to
xarray and improves the performance when accessing one namespce :-
XArray vs Default:-
IOPS and BW (more the better) increase BW (~1.8%):-
---------------------------------------------------
XArray :-
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
read: IOPS=162k, BW=631MiB/s (662MB/s)(18.5GiB/30001msec)
Default:-
read: IOPS=156k, BW=609MiB/s (639MB/s)(17.8GiB/30001msec)
read: IOPS=157k, BW=613MiB/s (643MB/s)(17.0GiB/30001msec)
read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec)
Submission latency (less the better) decrease (~8.3%):-
-------------------------------------------------------
XArray:-
slat (usec): min=7, max=8386, avg=11.19, stdev=5.96
slat (usec): min=7, max=441, avg=11.09, stdev=4.48
slat (usec): min=7, max=1088, avg=11.21, stdev=4.54
Default :-
slat (usec): min=8, max=2826.5k, avg=23.96, stdev=3911.50
slat (usec): min=8, max=503, avg=12.52, stdev=5.07
slat (usec): min=8, max=2384, avg=12.50, stdev=5.28
CPU Usage (less the better) decrease (~5.2%):-
----------------------------------------------
XArray:-
cpu : usr=1.84%, sys=18.61%, ctx=949471, majf=0, minf=250
cpu : usr=1.83%, sys=18.41%, ctx=950262, majf=0, minf=237
cpu : usr=1.82%, sys=18.82%, ctx=957224, majf=0, minf=234
Default:-
cpu : usr=1.70%, sys=19.21%, ctx=858196, majf=0, minf=251
cpu : usr=1.82%, sys=19.98%, ctx=929720, majf=0, minf=227
cpu : usr=1.83%, sys=20.33%, ctx=947208, majf=0, minf=235.
Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2020-07-19 20:32:02 -07:00
|
|
|
xa_destroy(&subsys->namespaces);
|
2020-07-24 10:25:18 -07:00
|
|
|
nvmet_passthru_subsys_free(subsys);
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
kfree(subsys->subsysnqn);
|
2021-02-17 10:19:40 -07:00
|
|
|
kfree(subsys->model_number);
|
2022-11-15 04:58:10 -07:00
|
|
|
kfree(subsys->firmware_rev);
|
2016-06-21 09:04:20 -07:00
|
|
|
kfree(subsys);
|
|
|
|
}
|
|
|
|
|
2016-11-27 13:29:17 -07:00
|
|
|
void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
|
|
|
|
{
|
|
|
|
struct nvmet_ctrl *ctrl;
|
|
|
|
|
|
|
|
mutex_lock(&subsys->lock);
|
|
|
|
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
|
|
|
|
ctrl->ops->delete_ctrl(ctrl);
|
|
|
|
mutex_unlock(&subsys->lock);
|
|
|
|
}
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
void nvmet_subsys_put(struct nvmet_subsys *subsys)
|
|
|
|
{
|
|
|
|
kref_put(&subsys->ref, nvmet_subsys_free);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init nvmet_init(void)
|
|
|
|
{
|
2022-11-07 06:01:24 -07:00
|
|
|
int error = -ENOMEM;
|
2016-06-21 09:04:20 -07:00
|
|
|
|
2018-07-19 07:35:20 -07:00
|
|
|
nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
|
|
|
|
|
2022-11-07 06:01:24 -07:00
|
|
|
nvmet_bvec_cache = kmem_cache_create("nvmet-bvec",
|
|
|
|
NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), 0,
|
|
|
|
SLAB_HWCACHE_ALIGN, NULL);
|
|
|
|
if (!nvmet_bvec_cache)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2021-06-09 18:32:52 -07:00
|
|
|
zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
|
|
|
|
if (!zbd_wq)
|
2022-11-07 06:01:24 -07:00
|
|
|
goto out_destroy_bvec_cache;
|
2021-06-09 18:32:52 -07:00
|
|
|
|
2018-06-19 21:01:41 -07:00
|
|
|
buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
|
|
|
|
WQ_MEM_RECLAIM, 0);
|
2022-11-07 06:01:24 -07:00
|
|
|
if (!buffered_io_wq)
|
2021-06-09 18:32:52 -07:00
|
|
|
goto out_free_zbd_work_queue;
|
2018-07-19 07:35:20 -07:00
|
|
|
|
2024-05-06 23:54:10 -07:00
|
|
|
nvmet_wq = alloc_workqueue("nvmet-wq",
|
|
|
|
WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
|
2022-11-07 06:01:24 -07:00
|
|
|
if (!nvmet_wq)
|
2022-03-21 04:57:27 -07:00
|
|
|
goto out_free_buffered_work_queue;
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
error = nvmet_init_discovery();
|
|
|
|
if (error)
|
2022-03-21 04:57:27 -07:00
|
|
|
goto out_free_nvmet_work_queue;
|
2016-06-21 09:04:20 -07:00
|
|
|
|
2024-05-26 22:15:19 -07:00
|
|
|
error = nvmet_init_debugfs();
|
2016-06-21 09:04:20 -07:00
|
|
|
if (error)
|
|
|
|
goto out_exit_discovery;
|
2024-05-26 22:15:19 -07:00
|
|
|
|
|
|
|
error = nvmet_init_configfs();
|
|
|
|
if (error)
|
|
|
|
goto out_exit_debugfs;
|
|
|
|
|
2016-06-21 09:04:20 -07:00
|
|
|
return 0;
|
|
|
|
|
2024-05-26 22:15:19 -07:00
|
|
|
out_exit_debugfs:
|
|
|
|
nvmet_exit_debugfs();
|
2016-06-21 09:04:20 -07:00
|
|
|
out_exit_discovery:
|
|
|
|
nvmet_exit_discovery();
|
2022-03-21 04:57:27 -07:00
|
|
|
out_free_nvmet_work_queue:
|
|
|
|
destroy_workqueue(nvmet_wq);
|
|
|
|
out_free_buffered_work_queue:
|
2018-08-15 18:48:25 -07:00
|
|
|
destroy_workqueue(buffered_io_wq);
|
2021-06-09 18:32:52 -07:00
|
|
|
out_free_zbd_work_queue:
|
|
|
|
destroy_workqueue(zbd_wq);
|
2022-11-07 06:01:24 -07:00
|
|
|
out_destroy_bvec_cache:
|
|
|
|
kmem_cache_destroy(nvmet_bvec_cache);
|
2016-06-21 09:04:20 -07:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit nvmet_exit(void)
|
|
|
|
{
|
|
|
|
nvmet_exit_configfs();
|
2024-05-26 22:15:19 -07:00
|
|
|
nvmet_exit_debugfs();
|
2016-06-21 09:04:20 -07:00
|
|
|
nvmet_exit_discovery();
|
2016-11-14 05:24:21 -07:00
|
|
|
ida_destroy(&cntlid_ida);
|
2022-03-21 04:57:27 -07:00
|
|
|
destroy_workqueue(nvmet_wq);
|
2018-06-19 21:01:41 -07:00
|
|
|
destroy_workqueue(buffered_io_wq);
|
2021-06-09 18:32:52 -07:00
|
|
|
destroy_workqueue(zbd_wq);
|
2022-11-07 06:01:24 -07:00
|
|
|
kmem_cache_destroy(nvmet_bvec_cache);
|
2016-06-21 09:04:20 -07:00
|
|
|
|
|
|
|
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
|
|
|
|
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(nvmet_init);
|
|
|
|
module_exit(nvmet_exit);
|
|
|
|
|
2024-01-23 15:13:41 -07:00
|
|
|
MODULE_DESCRIPTION("NVMe target core framework");
|
2016-06-21 09:04:20 -07:00
|
|
|
MODULE_LICENSE("GPL v2");
|