1

netfs: Switch to using unsigned long long rather than loff_t

Switch to using unsigned long long rather than loff_t in netfslib to avoid
problems with the sign flipping in the maths when we're dealing with the
byte at position 0x7fffffffffffffff.

Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
cc: Ilya Dryomov <idryomov@gmail.com>
cc: Xiubo Li <xiubli@redhat.com>
cc: netfs@lists.linux.dev
cc: ceph-devel@vger.kernel.org
cc: linux-fsdevel@vger.kernel.org
This commit is contained in:
David Howells 2024-03-18 16:57:31 +00:00
parent 5a550a0c60
commit 7ba167c4c7
9 changed files with 24 additions and 20 deletions

View File

@ -493,7 +493,7 @@ out_no_object:
* boundary as appropriate. * boundary as appropriate.
*/ */
static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq, static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
loff_t i_size) unsigned long long i_size)
{ {
return cachefiles_do_prepare_read(&subreq->rreq->cache_resources, return cachefiles_do_prepare_read(&subreq->rreq->cache_resources,
subreq->start, &subreq->len, i_size, subreq->start, &subreq->len, i_size,

View File

@ -193,7 +193,7 @@ static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
* block, but do not exceed the file size, unless the original * block, but do not exceed the file size, unless the original
* request already exceeds it. * request already exceeds it.
*/ */
new_end = min(round_up(end, lo->stripe_unit), rreq->i_size); new_end = umin(round_up(end, lo->stripe_unit), rreq->i_size);
if (new_end > end && new_end <= rreq->start + max_len) if (new_end > end && new_end <= rreq->start + max_len)
rreq->len = new_end - rreq->start; rreq->len = new_end - rreq->start;

View File

@ -130,7 +130,9 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
} }
static void netfs_cache_expand_readahead(struct netfs_io_request *rreq, static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
loff_t *_start, size_t *_len, loff_t i_size) unsigned long long *_start,
unsigned long long *_len,
unsigned long long i_size)
{ {
struct netfs_cache_resources *cres = &rreq->cache_resources; struct netfs_cache_resources *cres = &rreq->cache_resources;

View File

@ -664,7 +664,7 @@ static void netfs_pages_written_back(struct netfs_io_request *wreq)
last = (wreq->start + wreq->len - 1) / PAGE_SIZE; last = (wreq->start + wreq->len - 1) / PAGE_SIZE;
xas_for_each(&xas, folio, last) { xas_for_each(&xas, folio, last) {
WARN(!folio_test_writeback(folio), WARN(!folio_test_writeback(folio),
"bad %zx @%llx page %lx %lx\n", "bad %llx @%llx page %lx %lx\n",
wreq->len, wreq->start, folio->index, last); wreq->len, wreq->start, folio->index, last);
if ((finfo = netfs_folio_info(folio))) { if ((finfo = netfs_folio_info(folio))) {

View File

@ -476,7 +476,7 @@ netfs_rreq_prepare_read(struct netfs_io_request *rreq,
set: set:
if (subreq->len > rreq->len) if (subreq->len > rreq->len)
pr_warn("R=%08x[%u] SREQ>RREQ %zx > %zx\n", pr_warn("R=%08x[%u] SREQ>RREQ %zx > %llx\n",
rreq->debug_id, subreq->debug_index, rreq->debug_id, subreq->debug_index,
subreq->len, rreq->len); subreq->len, rreq->len);
@ -513,7 +513,7 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
subreq->start = rreq->start + rreq->submitted; subreq->start = rreq->start + rreq->submitted;
subreq->len = io_iter->count; subreq->len = io_iter->count;
_debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted); _debug("slice %llx,%zx,%llx", subreq->start, subreq->len, rreq->submitted);
list_add_tail(&subreq->rreq_link, &rreq->subrequests); list_add_tail(&subreq->rreq_link, &rreq->subrequests);
/* Call out to the cache to find out what it can do with the remaining /* Call out to the cache to find out what it can do with the remaining
@ -588,7 +588,7 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
atomic_set(&rreq->nr_outstanding, 1); atomic_set(&rreq->nr_outstanding, 1);
io_iter = rreq->io_iter; io_iter = rreq->io_iter;
do { do {
_debug("submit %llx + %zx >= %llx", _debug("submit %llx + %llx >= %llx",
rreq->start, rreq->submitted, rreq->i_size); rreq->start, rreq->submitted, rreq->i_size);
if (rreq->origin == NETFS_DIO_READ && if (rreq->origin == NETFS_DIO_READ &&
rreq->start + rreq->submitted >= rreq->i_size) rreq->start + rreq->submitted >= rreq->i_size)

View File

@ -62,7 +62,7 @@ static int netfs_requests_seq_show(struct seq_file *m, void *v)
rreq = list_entry(v, struct netfs_io_request, proc_link); rreq = list_entry(v, struct netfs_io_request, proc_link);
seq_printf(m, seq_printf(m,
"%08x %s %3d %2lx %4d %3d @%04llx %zx/%zx", "%08x %s %3d %2lx %4d %3d @%04llx %llx/%llx",
rreq->debug_id, rreq->debug_id,
netfs_origins[rreq->origin], netfs_origins[rreq->origin],
refcount_read(&rreq->ref), refcount_read(&rreq->ref),

View File

@ -439,7 +439,7 @@ static void netfs_submit_writethrough(struct netfs_io_request *wreq, bool final)
*/ */
int netfs_advance_writethrough(struct netfs_io_request *wreq, size_t copied, bool to_page_end) int netfs_advance_writethrough(struct netfs_io_request *wreq, size_t copied, bool to_page_end)
{ {
_enter("ic=%zu sb=%zu ws=%u cp=%zu tp=%u", _enter("ic=%zu sb=%llu ws=%u cp=%zu tp=%u",
wreq->iter.count, wreq->submitted, wreq->wsize, copied, to_page_end); wreq->iter.count, wreq->submitted, wreq->wsize, copied, to_page_end);
wreq->iter.count += copied; wreq->iter.count += copied;
@ -457,7 +457,7 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct kiocb *iocb)
{ {
int ret = -EIOCBQUEUED; int ret = -EIOCBQUEUED;
_enter("ic=%zu sb=%zu ws=%u", _enter("ic=%zu sb=%llu ws=%u",
wreq->iter.count, wreq->submitted, wreq->wsize); wreq->iter.count, wreq->submitted, wreq->wsize);
if (wreq->submitted < wreq->io_iter.count) if (wreq->submitted < wreq->io_iter.count)

View File

@ -149,7 +149,7 @@ struct netfs_io_subrequest {
struct work_struct work; struct work_struct work;
struct list_head rreq_link; /* Link in rreq->subrequests */ struct list_head rreq_link; /* Link in rreq->subrequests */
struct iov_iter io_iter; /* Iterator for this subrequest */ struct iov_iter io_iter; /* Iterator for this subrequest */
loff_t start; /* Where to start the I/O */ unsigned long long start; /* Where to start the I/O */
size_t len; /* Size of the I/O */ size_t len; /* Size of the I/O */
size_t transferred; /* Amount of data transferred */ size_t transferred; /* Amount of data transferred */
refcount_t ref; refcount_t ref;
@ -205,15 +205,15 @@ struct netfs_io_request {
atomic_t subreq_counter; /* Next subreq->debug_index */ atomic_t subreq_counter; /* Next subreq->debug_index */
atomic_t nr_outstanding; /* Number of ops in progress */ atomic_t nr_outstanding; /* Number of ops in progress */
atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */ atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */
size_t submitted; /* Amount submitted for I/O so far */
size_t len; /* Length of the request */
size_t upper_len; /* Length can be extended to here */ size_t upper_len; /* Length can be extended to here */
unsigned long long submitted; /* Amount submitted for I/O so far */
unsigned long long len; /* Length of the request */
size_t transferred; /* Amount to be indicated as transferred */ size_t transferred; /* Amount to be indicated as transferred */
short error; /* 0 or error that occurred */ short error; /* 0 or error that occurred */
enum netfs_io_origin origin; /* Origin of the request */ enum netfs_io_origin origin; /* Origin of the request */
bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */ bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */
loff_t i_size; /* Size of the file */ unsigned long long i_size; /* Size of the file */
loff_t start; /* Start position */ unsigned long long start; /* Start position */
pgoff_t no_unlock_folio; /* Don't unlock this folio after read */ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
refcount_t ref; refcount_t ref;
unsigned long flags; unsigned long flags;
@ -294,13 +294,15 @@ struct netfs_cache_ops {
/* Expand readahead request */ /* Expand readahead request */
void (*expand_readahead)(struct netfs_cache_resources *cres, void (*expand_readahead)(struct netfs_cache_resources *cres,
loff_t *_start, size_t *_len, loff_t i_size); unsigned long long *_start,
unsigned long long *_len,
unsigned long long i_size);
/* Prepare a read operation, shortening it to a cached/uncached /* Prepare a read operation, shortening it to a cached/uncached
* boundary as appropriate. * boundary as appropriate.
*/ */
enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq, enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq,
loff_t i_size); unsigned long long i_size);
/* Prepare a write operation, working out what part of the write we can /* Prepare a write operation, working out what part of the write we can
* actually do. * actually do.

View File

@ -280,7 +280,7 @@ TRACE_EVENT(netfs_sreq,
__entry->start = sreq->start; __entry->start = sreq->start;
), ),
TP_printk("R=%08x[%u] %s %s f=%02x s=%llx %zx/%zx e=%d", TP_printk("R=%08x[%x] %s %s f=%02x s=%llx %zx/%zx e=%d",
__entry->rreq, __entry->index, __entry->rreq, __entry->index,
__print_symbolic(__entry->source, netfs_sreq_sources), __print_symbolic(__entry->source, netfs_sreq_sources),
__print_symbolic(__entry->what, netfs_sreq_traces), __print_symbolic(__entry->what, netfs_sreq_traces),
@ -320,7 +320,7 @@ TRACE_EVENT(netfs_failure,
__entry->start = sreq ? sreq->start : 0; __entry->start = sreq ? sreq->start : 0;
), ),
TP_printk("R=%08x[%d] %s f=%02x s=%llx %zx/%zx %s e=%d", TP_printk("R=%08x[%x] %s f=%02x s=%llx %zx/%zx %s e=%d",
__entry->rreq, __entry->index, __entry->rreq, __entry->index,
__print_symbolic(__entry->source, netfs_sreq_sources), __print_symbolic(__entry->source, netfs_sreq_sources),
__entry->flags, __entry->flags,
@ -436,7 +436,7 @@ TRACE_EVENT(netfs_write,
__field(unsigned int, cookie ) __field(unsigned int, cookie )
__field(enum netfs_write_trace, what ) __field(enum netfs_write_trace, what )
__field(unsigned long long, start ) __field(unsigned long long, start )
__field(size_t, len ) __field(unsigned long long, len )
), ),
TP_fast_assign( TP_fast_assign(