1

ceph: use osd_req_op_extent_osd_iter for netfs reads

The netfs layer has already pinned the pages involved before calling
issue_op, so we can just pass down the iter directly instead of calling
iov_iter_get_pages_alloc.

Instead of having to allocate a page array, use CEPH_MSG_DATA_ITER and
pass it the iov_iter directly to clone.

Signed-off-by: Jeff Layton <jlayton@kernel.org>
Reviewed-by: Xiubo Li <xiubli@redhat.com>
Reviewed-and-tested-by: Luís Henriques <lhenriques@suse.de>
Reviewed-by: Milind Changire <mchangir@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
This commit is contained in:
Jeff Layton 2023-01-02 12:36:14 +01:00 committed by Ilya Dryomov
parent dee0c5f834
commit 4de77f25fd

View File

@ -246,7 +246,6 @@ static void finish_netfs_read(struct ceph_osd_request *req)
struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
struct netfs_io_subrequest *subreq = req->r_priv; struct netfs_io_subrequest *subreq = req->r_priv;
struct ceph_osd_req_op *op = &req->r_ops[0]; struct ceph_osd_req_op *op = &req->r_ops[0];
int num_pages;
int err = req->r_result; int err = req->r_result;
bool sparse = (op->op == CEPH_OSD_OP_SPARSE_READ); bool sparse = (op->op == CEPH_OSD_OP_SPARSE_READ);
@ -268,9 +267,6 @@ static void finish_netfs_read(struct ceph_osd_request *req)
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
netfs_subreq_terminated(subreq, err, false); netfs_subreq_terminated(subreq, err, false);
num_pages = calc_pages_for(osd_data->alignment, osd_data->length);
ceph_put_page_vector(osd_data->pages, num_pages, false);
iput(req->r_inode); iput(req->r_inode);
} }
@ -338,8 +334,6 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
struct ceph_osd_request *req = NULL; struct ceph_osd_request *req = NULL;
struct ceph_vino vino = ceph_vino(inode); struct ceph_vino vino = ceph_vino(inode);
struct iov_iter iter; struct iov_iter iter;
struct page **pages;
size_t page_off;
int err = 0; int err = 0;
u64 len = subreq->len; u64 len = subreq->len;
bool sparse = ceph_test_mount_opt(fsc, SPARSEREAD); bool sparse = ceph_test_mount_opt(fsc, SPARSEREAD);
@ -370,18 +364,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len); dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len);
iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len); iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len);
err = iov_iter_get_pages_alloc2(&iter, &pages, len, &page_off); osd_req_op_extent_osd_iter(req, 0, &iter);
if (err < 0) {
dout("%s: iov_ter_get_pages_alloc returned %d\n", __func__, err);
goto out;
}
/* should always give us a page-aligned read */
WARN_ON_ONCE(page_off);
len = err;
err = 0;
osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false);
req->r_callback = finish_netfs_read; req->r_callback = finish_netfs_read;
req->r_priv = subreq; req->r_priv = subreq;
req->r_inode = inode; req->r_inode = inode;