1
linux/fs/bcachefs/varint.c
Al Viro 5f60d5f6bb move asm/unaligned.h to linux/unaligned.h
asm/unaligned.h is always an include of asm-generic/unaligned.h;
might as well move that thing to linux/unaligned.h and include
that - there's nothing arch-specific in that header.

auto-generated by the following:

for i in `git grep -l -w asm/unaligned.h`; do
	sed -i -e "s/asm\/unaligned.h/linux\/unaligned.h/" $i
done
for i in `git grep -l -w asm-generic/unaligned.h`; do
	sed -i -e "s/asm-generic\/unaligned.h/linux\/unaligned.h/" $i
done
git mv include/asm-generic/unaligned.h include/linux/unaligned.h
git mv tools/include/asm-generic/unaligned.h tools/include/linux/unaligned.h
sed -i -e "/unaligned.h/d" include/asm-generic/Kbuild
sed -i -e "s/__ASM_GENERIC/__LINUX/" include/linux/unaligned.h tools/include/linux/unaligned.h
2024-10-02 17:23:23 -04:00

130 lines
2.8 KiB
C

// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
#include <linux/math.h>
#include <linux/string.h>
#include <linux/unaligned.h>
#ifdef CONFIG_VALGRIND
#include <valgrind/memcheck.h>
#endif
#include "varint.h"
/**
* bch2_varint_encode - encode a variable length integer
* @out: destination to encode to
* @v: unsigned integer to encode
* Returns: size in bytes of the encoded integer - at most 9 bytes
*/
int bch2_varint_encode(u8 *out, u64 v)
{
unsigned bits = fls64(v|1);
unsigned bytes = DIV_ROUND_UP(bits, 7);
__le64 v_le;
if (likely(bytes < 9)) {
v <<= bytes;
v |= ~(~0 << (bytes - 1));
v_le = cpu_to_le64(v);
memcpy(out, &v_le, bytes);
} else {
*out++ = 255;
bytes = 9;
put_unaligned_le64(v, out);
}
return bytes;
}
/**
* bch2_varint_decode - encode a variable length integer
* @in: varint to decode
* @end: end of buffer to decode from
* @out: on success, decoded integer
* Returns: size in bytes of the decoded integer - or -1 on failure (would
* have read past the end of the buffer)
*/
int bch2_varint_decode(const u8 *in, const u8 *end, u64 *out)
{
unsigned bytes = likely(in < end)
? ffz(*in & 255) + 1
: 1;
u64 v;
if (unlikely(in + bytes > end))
return -1;
if (likely(bytes < 9)) {
__le64 v_le = 0;
memcpy(&v_le, in, bytes);
v = le64_to_cpu(v_le);
v >>= bytes;
} else {
v = get_unaligned_le64(++in);
}
*out = v;
return bytes;
}
/**
* bch2_varint_encode_fast - fast version of bch2_varint_encode
* @out: destination to encode to
* @v: unsigned integer to encode
* Returns: size in bytes of the encoded integer - at most 9 bytes
*
* This version assumes it's always safe to write 8 bytes to @out, even if the
* encoded integer would be smaller.
*/
int bch2_varint_encode_fast(u8 *out, u64 v)
{
unsigned bits = fls64(v|1);
unsigned bytes = DIV_ROUND_UP(bits, 7);
if (likely(bytes < 9)) {
v <<= bytes;
v |= ~(~0U << (bytes - 1));
} else {
*out++ = 255;
bytes = 9;
}
put_unaligned_le64(v, out);
return bytes;
}
/**
* bch2_varint_decode_fast - fast version of bch2_varint_decode
* @in: varint to decode
* @end: end of buffer to decode from
* @out: on success, decoded integer
* Returns: size in bytes of the decoded integer - or -1 on failure (would
* have read past the end of the buffer)
*
* This version assumes that it is safe to read at most 8 bytes past the end of
* @end (we still return an error if the varint extends past @end).
*/
int bch2_varint_decode_fast(const u8 *in, const u8 *end, u64 *out)
{
#ifdef CONFIG_VALGRIND
VALGRIND_MAKE_MEM_DEFINED(in, 8);
#endif
u64 v = get_unaligned_le64(in);
unsigned bytes = ffz(*in) + 1;
if (unlikely(in + bytes > end))
return -1;
if (likely(bytes < 9)) {
v >>= bytes;
v &= ~(~0ULL << (7 * bytes));
} else {
v = get_unaligned_le64(++in);
}
*out = v;
return bytes;
}