1

block: convert features and flags to __bitwise types

... and let sparse help us catch mismatches or abuses.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Link: https://lore.kernel.org/r/20240626142637.300624-5-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2024-06-26 16:26:25 +02:00 committed by Jens Axboe
parent ec9b1cf0b0
commit fcf865e357
2 changed files with 46 additions and 45 deletions

View File

@ -288,7 +288,7 @@ static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
}
static ssize_t queue_feature_store(struct request_queue *q, const char *page,
size_t count, unsigned int feature)
size_t count, blk_features_t feature)
{
struct queue_limits lim;
unsigned long val;
@ -418,7 +418,7 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
static ssize_t queue_poll_show(struct request_queue *q, char *page)
{
return queue_var_show(q->limits.features & BLK_FEAT_POLL, page);
return queue_var_show(!!(q->limits.features & BLK_FEAT_POLL), page);
}
static ssize_t queue_poll_store(struct request_queue *q, const char *page,
@ -492,7 +492,7 @@ static ssize_t queue_fua_show(struct request_queue *q, char *page)
static ssize_t queue_dax_show(struct request_queue *q, char *page)
{
return queue_var_show(blk_queue_dax(q), page);
return queue_var_show(!!blk_queue_dax(q), page);
}
#define QUEUE_RO_ENTRY(_prefix, _name) \

View File

@ -283,55 +283,56 @@ static inline bool blk_op_is_passthrough(blk_opf_t op)
}
/* flags set by the driver in queue_limits.features */
enum {
/* supports a volatile write cache */
BLK_FEAT_WRITE_CACHE = (1u << 0),
typedef unsigned int __bitwise blk_features_t;
/* supports passing on the FUA bit */
BLK_FEAT_FUA = (1u << 1),
/* supports a volatile write cache */
#define BLK_FEAT_WRITE_CACHE ((__force blk_features_t)(1u << 0))
/* rotational device (hard drive or floppy) */
BLK_FEAT_ROTATIONAL = (1u << 2),
/* supports passing on the FUA bit */
#define BLK_FEAT_FUA ((__force blk_features_t)(1u << 1))
/* contributes to the random number pool */
BLK_FEAT_ADD_RANDOM = (1u << 3),
/* rotational device (hard drive or floppy) */
#define BLK_FEAT_ROTATIONAL ((__force blk_features_t)(1u << 2))
/* do disk/partitions IO accounting */
BLK_FEAT_IO_STAT = (1u << 4),
/* contributes to the random number pool */
#define BLK_FEAT_ADD_RANDOM ((__force blk_features_t)(1u << 3))
/* don't modify data until writeback is done */
BLK_FEAT_STABLE_WRITES = (1u << 5),
/* do disk/partitions IO accounting */
#define BLK_FEAT_IO_STAT ((__force blk_features_t)(1u << 4))
/* always completes in submit context */
BLK_FEAT_SYNCHRONOUS = (1u << 6),
/* don't modify data until writeback is done */
#define BLK_FEAT_STABLE_WRITES ((__force blk_features_t)(1u << 5))
/* supports REQ_NOWAIT */
BLK_FEAT_NOWAIT = (1u << 7),
/* always completes in submit context */
#define BLK_FEAT_SYNCHRONOUS ((__force blk_features_t)(1u << 6))
/* supports DAX */
BLK_FEAT_DAX = (1u << 8),
/* supports REQ_NOWAIT */
#define BLK_FEAT_NOWAIT ((__force blk_features_t)(1u << 7))
/* supports I/O polling */
BLK_FEAT_POLL = (1u << 9),
/* supports DAX */
#define BLK_FEAT_DAX ((__force blk_features_t)(1u << 8))
/* is a zoned device */
BLK_FEAT_ZONED = (1u << 10),
/* supports I/O polling */
#define BLK_FEAT_POLL ((__force blk_features_t)(1u << 9))
/* supports Zone Reset All */
BLK_FEAT_ZONE_RESETALL = (1u << 11),
/* is a zoned device */
#define BLK_FEAT_ZONED ((__force blk_features_t)(1u << 10))
/* supports PCI(e) p2p requests */
BLK_FEAT_PCI_P2PDMA = (1u << 12),
/* supports Zone Reset All */
#define BLK_FEAT_ZONE_RESETALL ((__force blk_features_t)(1u << 11))
/* skip this queue in blk_mq_(un)quiesce_tagset */
BLK_FEAT_SKIP_TAGSET_QUIESCE = (1u << 13),
/* supports PCI(e) p2p requests */
#define BLK_FEAT_PCI_P2PDMA ((__force blk_features_t)(1u << 12))
/* bounce all highmem pages */
BLK_FEAT_BOUNCE_HIGH = (1u << 14),
/* skip this queue in blk_mq_(un)quiesce_tagset */
#define BLK_FEAT_SKIP_TAGSET_QUIESCE ((__force blk_features_t)(1u << 13))
/* undocumented magic for bcache */
BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE = (1u << 15),
};
/* bounce all highmem pages */
#define BLK_FEAT_BOUNCE_HIGH ((__force blk_features_t)(1u << 14))
/* undocumented magic for bcache */
#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \
((__force blk_features_t)(1u << 15))
/*
* Flags automatically inherited when stacking limits.
@ -342,17 +343,17 @@ enum {
BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE)
/* internal flags in queue_limits.flags */
enum {
/* do not send FLUSH/FUA commands despite advertising a write cache */
BLK_FLAG_WRITE_CACHE_DISABLED = (1u << 0),
typedef unsigned int __bitwise blk_flags_t;
/* I/O topology is misaligned */
BLK_FLAG_MISALIGNED = (1u << 1),
};
/* do not send FLUSH/FUA commands despite advertising a write cache */
#define BLK_FLAG_WRITE_CACHE_DISABLED ((__force blk_flags_t)(1u << 0))
/* I/O topology is misaligned */
#define BLK_FLAG_MISALIGNED ((__force blk_flags_t)(1u << 1))
struct queue_limits {
unsigned int features;
unsigned int flags;
blk_features_t features;
blk_flags_t flags;
unsigned long seg_boundary_mask;
unsigned long virt_boundary_mask;