refactor: enable formatting for files under lib

This commit is contained in:
Dundar Göc 2021-10-12 17:23:50 +02:00
parent 29b718d04c
commit f98b8d2d44
6 changed files with 986 additions and 964 deletions

View File

@ -1,5 +1,3 @@
// uncrustify:off
/*-
* Copyright 1997-1999, 2001, John-Mark Gurney.
* 2008-2009, Attractive Chaos <attractor@live.co.uk>
@ -36,386 +34,419 @@
#ifndef NVIM_LIB_KBTREE_H
#define NVIM_LIB_KBTREE_H
#include <assert.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <assert.h>
#include "nvim/memory.h"
#define KB_MAX_DEPTH 64
#define __KB_KEY(type, x) (x->key)
#define __KB_PTR(btr, x) (x->ptr)
#define __KB_KEY(type, x) (x->key)
#define __KB_PTR(btr, x) (x->ptr)
#define __KB_TREE_T(name,key_t,T) \
typedef struct kbnode_##name##_s kbnode_##name##_t; \
struct kbnode_##name##_s { \
int32_t n; \
bool is_internal; \
key_t key[2*T-1]; \
kbnode_##name##_t *ptr[]; \
} ; \
#define __KB_TREE_T(name, key_t, T) \
typedef struct kbnode_##name##_s kbnode_##name##_t; \
struct kbnode_##name##_s { \
int32_t n; \
bool is_internal; \
key_t key[2*T-1]; \
kbnode_##name##_t *ptr[]; \
} ; \
\
typedef struct { \
kbnode_##name##_t *root; \
int n_keys, n_nodes; \
} kbtree_##name##_t; \
typedef struct { \
kbnode_##name##_t *root; \
int n_keys, n_nodes; \
} kbtree_##name##_t; \
\
typedef struct { \
kbnode_##name##_t *x; \
int i; \
} kbpos_##name##_t; \
typedef struct { \
kbpos_##name##_t stack[KB_MAX_DEPTH], *p; \
} kbitr_##name##_t; \
typedef struct { \
kbnode_##name##_t *x; \
int i; \
} kbpos_##name##_t; \
typedef struct { \
kbpos_##name##_t stack[KB_MAX_DEPTH], *p; \
} kbitr_##name##_t; \
#define __kb_destroy(kbnode_t,b) do { \
int i; \
unsigned int max = 8; \
kbnode_t *x, **top, **stack = 0; \
if (b->root) { \
top = stack = (kbnode_t**)xcalloc(max, sizeof(kbnode_t*)); \
*top++ = (b)->root; \
while (top != stack) { \
x = *--top; \
if (x->is_internal == 0) { XFREE_CLEAR(x); continue; } \
for (i = 0; i <= x->n; ++i) \
if (__KB_PTR(b, x)[i]) { \
if (top - stack == (int)max) { \
max <<= 1; \
stack = (kbnode_t**)xrealloc(stack, max * sizeof(kbnode_t*)); \
top = stack + (max>>1); \
} \
*top++ = __KB_PTR(b, x)[i]; \
} \
XFREE_CLEAR(x); \
} \
} \
XFREE_CLEAR(stack); \
} while (0)
#define __kb_destroy(kbnode_t, b) do { \
int i; \
unsigned int max = 8; \
kbnode_t *x, **top, **stack = 0; \
if (b->root) { \
top = stack = (kbnode_t **)xcalloc(max, sizeof(kbnode_t *)); \
*top++ = (b)->root; \
while (top != stack) { \
x = *--top; \
if (x->is_internal == 0) { XFREE_CLEAR(x); continue; } \
for (i = 0; i <= x->n; ++i) \
if (__KB_PTR(b, x)[i]) { \
if (top - stack == (int)max) { \
max <<= 1; \
stack = (kbnode_t **)xrealloc(stack, max * sizeof(kbnode_t *)); \
top = stack + (max>>1); \
} \
*top++ = __KB_PTR(b, x)[i]; \
} \
XFREE_CLEAR(x); \
} \
} \
XFREE_CLEAR(stack); \
} while (0)
#define __KB_GET_AUX1(name, key_t, kbnode_t, __cmp) \
static inline int __kb_getp_aux_##name(const kbnode_t * __restrict x, key_t * __restrict k, int *r) \
{ \
int tr, *rr, begin = 0, end = x->n; \
if (x->n == 0) return -1; \
rr = r? r : &tr; \
while (begin < end) { \
int mid = (begin + end) >> 1; \
if (__cmp(__KB_KEY(key_t, x)[mid], *k) < 0) begin = mid + 1; \
else end = mid; \
} \
if (begin == x->n) { *rr = 1; return x->n - 1; } \
if ((*rr = __cmp(*k, __KB_KEY(key_t, x)[begin])) < 0) --begin; \
return begin; \
}
#define __KB_GET_AUX1(name, key_t, kbnode_t, __cmp) \
static inline int __kb_getp_aux_##name(const kbnode_t * __restrict x, key_t * __restrict k, \
int *r) \
{ \
int tr, *rr, begin = 0, end = x->n; \
if (x->n == 0) return -1; \
rr = r? r : &tr; \
while (begin < end) { \
int mid = (begin + end) >> 1; \
if (__cmp(__KB_KEY(key_t, x)[mid], *k) < 0) begin = mid + 1; \
else end = mid; \
} \
if (begin == x->n) { *rr = 1; return x->n - 1; } \
if ((*rr = __cmp(*k, __KB_KEY(key_t, x)[begin])) < 0) --begin; \
return begin; \
}
#define __KB_GET(name, key_t, kbnode_t) \
static key_t *kb_getp_##name(kbtree_##name##_t *b, key_t * __restrict k) \
{ \
if (!b->root) { \
return 0; \
} \
int i, r = 0; \
kbnode_t *x = b->root; \
while (x) { \
i = __kb_getp_aux_##name(x, k, &r); \
if (i >= 0 && r == 0) return &__KB_KEY(key_t, x)[i]; \
if (x->is_internal == 0) return 0; \
x = __KB_PTR(b, x)[i + 1]; \
} \
return 0; \
} \
static inline key_t *kb_get_##name(kbtree_##name##_t *b, key_t k) \
{ \
return kb_getp_##name(b, &k); \
}
#define __KB_GET(name, key_t, kbnode_t) \
static key_t *kb_getp_##name(kbtree_##name##_t *b, key_t * __restrict k) \
{ \
if (!b->root) { \
return 0; \
} \
int i, r = 0; \
kbnode_t *x = b->root; \
while (x) { \
i = __kb_getp_aux_##name(x, k, &r); \
if (i >= 0 && r == 0) return &__KB_KEY(key_t, x)[i]; \
if (x->is_internal == 0) return 0; \
x = __KB_PTR(b, x)[i + 1]; \
} \
return 0; \
} \
static inline key_t *kb_get_##name(kbtree_##name##_t *b, key_t k) \
{ \
return kb_getp_##name(b, &k); \
}
#define __KB_INTERVAL(name, key_t, kbnode_t) \
static inline void kb_intervalp_##name(kbtree_##name##_t *b, key_t * __restrict k, key_t **lower, key_t **upper) \
{ \
if (!b->root) { \
return; \
} \
int i, r = 0; \
kbnode_t *x = b->root; \
*lower = *upper = 0; \
while (x) { \
i = __kb_getp_aux_##name(x, k, &r); \
if (i >= 0 && r == 0) { \
*lower = *upper = &__KB_KEY(key_t, x)[i]; \
return; \
} \
if (i >= 0) *lower = &__KB_KEY(key_t, x)[i]; \
if (i < x->n - 1) *upper = &__KB_KEY(key_t, x)[i + 1]; \
if (x->is_internal == 0) return; \
x = __KB_PTR(b, x)[i + 1]; \
} \
} \
static inline void kb_interval_##name(kbtree_##name##_t *b, key_t k, key_t **lower, key_t **upper) \
{ \
kb_intervalp_##name(b, &k, lower, upper); \
}
#define __KB_INTERVAL(name, key_t, kbnode_t) \
static inline void kb_intervalp_##name(kbtree_##name##_t *b, key_t * __restrict k, key_t **lower, \
key_t **upper) \
{ \
if (!b->root) { \
return; \
} \
int i, r = 0; \
kbnode_t *x = b->root; \
*lower = *upper = 0; \
while (x) { \
i = __kb_getp_aux_##name(x, k, &r); \
if (i >= 0 && r == 0) { \
*lower = *upper = &__KB_KEY(key_t, x)[i]; \
return; \
} \
if (i >= 0) *lower = &__KB_KEY(key_t, x)[i]; \
if (i < x->n - 1) *upper = &__KB_KEY(key_t, x)[i + 1]; \
if (x->is_internal == 0) return; \
x = __KB_PTR(b, x)[i + 1]; \
} \
} \
static inline void kb_interval_##name(kbtree_##name##_t *b, key_t k, key_t **lower, key_t **upper) \
{ \
kb_intervalp_##name(b, &k, lower, upper); \
}
#define __KB_PUT(name, key_t, kbnode_t, __cmp, T, ILEN) \
/* x must be an internal node */ \
static inline void __kb_split_##name(kbtree_##name##_t *b, kbnode_t *x, int i, kbnode_t *y) \
{ \
kbnode_t *z; \
z = (kbnode_t*)xcalloc(1, y->is_internal? ILEN : sizeof(kbnode_##name##_t)); \
++b->n_nodes; \
z->is_internal = y->is_internal; \
z->n = T - 1; \
memcpy(__KB_KEY(key_t, z), &__KB_KEY(key_t, y)[T], sizeof(key_t) * (T - 1)); \
if (y->is_internal) memcpy(__KB_PTR(b, z), &__KB_PTR(b, y)[T], sizeof(void*) * T); \
y->n = T - 1; \
memmove(&__KB_PTR(b, x)[i + 2], &__KB_PTR(b, x)[i + 1], sizeof(void*) * (unsigned int)(x->n - i)); \
__KB_PTR(b, x)[i + 1] = z; \
memmove(&__KB_KEY(key_t, x)[i + 1], &__KB_KEY(key_t, x)[i], sizeof(key_t) * (unsigned int)(x->n - i)); \
__KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[T - 1]; \
++x->n; \
} \
static inline key_t *__kb_putp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, key_t * __restrict k) \
{ \
int i = x->n - 1; \
key_t *ret; \
if (x->is_internal == 0) { \
i = __kb_getp_aux_##name(x, k, 0); \
if (i != x->n - 1) \
memmove(&__KB_KEY(key_t, x)[i + 2], &__KB_KEY(key_t, x)[i + 1], (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
ret = &__KB_KEY(key_t, x)[i + 1]; \
*ret = *k; \
++x->n; \
} else { \
i = __kb_getp_aux_##name(x, k, 0) + 1; \
if (__KB_PTR(b, x)[i]->n == 2 * T - 1) { \
__kb_split_##name(b, x, i, __KB_PTR(b, x)[i]); \
if (__cmp(*k, __KB_KEY(key_t, x)[i]) > 0) ++i; \
} \
ret = __kb_putp_aux_##name(b, __KB_PTR(b, x)[i], k); \
} \
return ret; \
} \
static inline key_t *kb_putp_##name(kbtree_##name##_t *b, key_t * __restrict k) \
{ \
if (!b->root) { \
b->root = (kbnode_t*)xcalloc(1, ILEN); \
++b->n_nodes; \
} \
kbnode_t *r, *s; \
++b->n_keys; \
r = b->root; \
if (r->n == 2 * T - 1) { \
++b->n_nodes; \
s = (kbnode_t*)xcalloc(1, ILEN); \
b->root = s; s->is_internal = 1; s->n = 0; \
__KB_PTR(b, s)[0] = r; \
__kb_split_##name(b, s, 0, r); \
r = s; \
} \
return __kb_putp_aux_##name(b, r, k); \
} \
static inline void kb_put_##name(kbtree_##name##_t *b, key_t k) \
{ \
kb_putp_##name(b, &k); \
}
#define __KB_PUT(name, key_t, kbnode_t, __cmp, T, ILEN) \
/* x must be an internal node */ \
static inline void __kb_split_##name(kbtree_##name##_t *b, kbnode_t *x, int i, kbnode_t *y) \
{ \
kbnode_t *z; \
z = (kbnode_t *)xcalloc(1, y->is_internal? ILEN : sizeof(kbnode_##name##_t)); \
++b->n_nodes; \
z->is_internal = y->is_internal; \
z->n = T - 1; \
memcpy(__KB_KEY(key_t, z), &__KB_KEY(key_t, y)[T], sizeof(key_t) * (T - 1)); \
if (y->is_internal) memcpy(__KB_PTR(b, z), &__KB_PTR(b, y)[T], sizeof(void *) * T); \
y->n = T - 1; \
memmove(&__KB_PTR(b, x)[i + 2], &__KB_PTR(b, \
x)[i + 1], sizeof(void *) * (unsigned int)(x->n - i)); \
__KB_PTR(b, x)[i + 1] = z; \
memmove(&__KB_KEY(key_t, x)[i + 1], &__KB_KEY(key_t, x)[i], \
sizeof(key_t) * (unsigned int)(x->n - i)); \
__KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[T - 1]; \
++x->n; \
} \
static inline key_t *__kb_putp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, key_t * __restrict k) \
{ \
int i = x->n - 1; \
key_t *ret; \
if (x->is_internal == 0) { \
i = __kb_getp_aux_##name(x, k, 0); \
if (i != x->n - 1) \
memmove(&__KB_KEY(key_t, x)[i + 2], &__KB_KEY(key_t, \
x)[i + 1], \
(unsigned int)(x->n - i - 1) * sizeof(key_t)); \
ret = &__KB_KEY(key_t, x)[i + 1]; \
*ret = *k; \
++x->n; \
} else { \
i = __kb_getp_aux_##name(x, k, 0) + 1; \
if (__KB_PTR(b, x)[i]->n == 2 * T - 1) { \
__kb_split_##name(b, x, i, __KB_PTR(b, x)[i]); \
if (__cmp(*k, __KB_KEY(key_t, x)[i]) > 0) ++i; \
} \
ret = __kb_putp_aux_##name(b, __KB_PTR(b, x)[i], k); \
} \
return ret; \
} \
static inline key_t *kb_putp_##name(kbtree_##name##_t *b, key_t * __restrict k) \
{ \
if (!b->root) { \
b->root = (kbnode_t *)xcalloc(1, ILEN); \
++b->n_nodes; \
} \
kbnode_t *r, *s; \
++b->n_keys; \
r = b->root; \
if (r->n == 2 * T - 1) { \
++b->n_nodes; \
s = (kbnode_t *)xcalloc(1, ILEN); \
b->root = s; s->is_internal = 1; s->n = 0; \
__KB_PTR(b, s)[0] = r; \
__kb_split_##name(b, s, 0, r); \
r = s; \
} \
return __kb_putp_aux_##name(b, r, k); \
} \
static inline void kb_put_##name(kbtree_##name##_t *b, key_t k) \
{ \
kb_putp_##name(b, &k); \
}
#define __KB_DEL(name, key_t, kbnode_t, T) \
static inline key_t __kb_delp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, key_t * __restrict k, int s) \
{ \
int yn, zn, i, r = 0; \
kbnode_t *xp, *y, *z; \
key_t kp; \
if (x == 0) return *k; \
if (s) { /* s can only be 0, 1 or 2 */ \
r = x->is_internal == 0? 0 : s == 1? 1 : -1; \
i = s == 1? x->n - 1 : -1; \
} else i = __kb_getp_aux_##name(x, k, &r); \
if (x->is_internal == 0) { \
if (s == 2) ++i; \
kp = __KB_KEY(key_t, x)[i]; \
memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, x)[i + 1], (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
--x->n; \
return kp; \
} \
if (r == 0) { \
if ((yn = __KB_PTR(b, x)[i]->n) >= T) { \
xp = __KB_PTR(b, x)[i]; \
kp = __KB_KEY(key_t, x)[i]; \
__KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 1); \
return kp; \
} else if ((zn = __KB_PTR(b, x)[i + 1]->n) >= T) { \
xp = __KB_PTR(b, x)[i + 1]; \
kp = __KB_KEY(key_t, x)[i]; \
__KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 2); \
return kp; \
} else if (yn == T - 1 && zn == T - 1) { \
y = __KB_PTR(b, x)[i]; z = __KB_PTR(b, x)[i + 1]; \
__KB_KEY(key_t, y)[y->n++] = *k; \
memmove(&__KB_KEY(key_t, y)[y->n], __KB_KEY(key_t, z), (unsigned int)z->n * sizeof(key_t)); \
if (y->is_internal) memmove(&__KB_PTR(b, y)[y->n], __KB_PTR(b, z), (unsigned int)(z->n + 1) * sizeof(void*)); \
y->n += z->n; \
memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, x)[i + 1], (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
memmove(&__KB_PTR(b, x)[i + 1], &__KB_PTR(b, x)[i + 2], (unsigned int)(x->n - i - 1) * sizeof(void*)); \
--x->n; \
XFREE_CLEAR(z); \
return __kb_delp_aux_##name(b, y, k, s); \
} \
} \
++i; \
if ((xp = __KB_PTR(b, x)[i])->n == T - 1) { \
if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n >= T) { \
memmove(&__KB_KEY(key_t, xp)[1], __KB_KEY(key_t, xp), (unsigned int)xp->n * sizeof(key_t)); \
if (xp->is_internal) memmove(&__KB_PTR(b, xp)[1], __KB_PTR(b, xp), (unsigned int)(xp->n + 1) * sizeof(void*)); \
__KB_KEY(key_t, xp)[0] = __KB_KEY(key_t, x)[i - 1]; \
__KB_KEY(key_t, x)[i - 1] = __KB_KEY(key_t, y)[y->n - 1]; \
if (xp->is_internal) __KB_PTR(b, xp)[0] = __KB_PTR(b, y)[y->n]; \
--y->n; ++xp->n; \
} else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n >= T) { \
__KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \
__KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[0]; \
if (xp->is_internal) __KB_PTR(b, xp)[xp->n] = __KB_PTR(b, y)[0]; \
--y->n; \
memmove(__KB_KEY(key_t, y), &__KB_KEY(key_t, y)[1], (unsigned int)y->n * sizeof(key_t)); \
if (y->is_internal) memmove(__KB_PTR(b, y), &__KB_PTR(b, y)[1], (unsigned int)(y->n + 1) * sizeof(void*)); \
} else if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n == T - 1) { \
__KB_KEY(key_t, y)[y->n++] = __KB_KEY(key_t, x)[i - 1]; \
memmove(&__KB_KEY(key_t, y)[y->n], __KB_KEY(key_t, xp), (unsigned int)xp->n * sizeof(key_t)); \
if (y->is_internal) memmove(&__KB_PTR(b, y)[y->n], __KB_PTR(b, xp), (unsigned int)(xp->n + 1) * sizeof(void*)); \
y->n += xp->n; \
memmove(&__KB_KEY(key_t, x)[i - 1], &__KB_KEY(key_t, x)[i], (unsigned int)(x->n - i) * sizeof(key_t)); \
memmove(&__KB_PTR(b, x)[i], &__KB_PTR(b, x)[i + 1], (unsigned int)(x->n - i) * sizeof(void*)); \
--x->n; \
XFREE_CLEAR(xp); \
xp = y; \
} else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n == T - 1) { \
__KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \
memmove(&__KB_KEY(key_t, xp)[xp->n], __KB_KEY(key_t, y), (unsigned int)y->n * sizeof(key_t)); \
if (xp->is_internal) memmove(&__KB_PTR(b, xp)[xp->n], __KB_PTR(b, y), (unsigned int)(y->n + 1) * sizeof(void*)); \
xp->n += y->n; \
memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, x)[i + 1], (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
memmove(&__KB_PTR(b, x)[i + 1], &__KB_PTR(b, x)[i + 2], (unsigned int)(x->n - i - 1) * sizeof(void*)); \
--x->n; \
XFREE_CLEAR(y); \
} \
} \
return __kb_delp_aux_##name(b, xp, k, s); \
} \
static inline key_t kb_delp_##name(kbtree_##name##_t *b, key_t * __restrict k) \
{ \
kbnode_t *x; \
key_t ret; \
ret = __kb_delp_aux_##name(b, b->root, k, 0); \
--b->n_keys; \
if (b->root->n == 0 && b->root->is_internal) { \
--b->n_nodes; \
x = b->root; \
b->root = __KB_PTR(b, x)[0]; \
XFREE_CLEAR(x); \
} \
return ret; \
} \
static inline key_t kb_del_##name(kbtree_##name##_t *b, key_t k) \
{ \
return kb_delp_##name(b, &k); \
}
#define __KB_DEL(name, key_t, kbnode_t, T) \
static inline key_t __kb_delp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, key_t * __restrict k, \
int s) \
{ \
int yn, zn, i, r = 0; \
kbnode_t *xp, *y, *z; \
key_t kp; \
if (x == 0) return *k; \
if (s) { /* s can only be 0, 1 or 2 */ \
r = x->is_internal == 0? 0 : s == 1? 1 : -1; \
i = s == 1? x->n - 1 : -1; \
} else i = __kb_getp_aux_##name(x, k, &r); \
if (x->is_internal == 0) { \
if (s == 2) ++i; \
kp = __KB_KEY(key_t, x)[i]; \
memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, \
x)[i + 1], \
(unsigned int)(x->n - i - 1) * sizeof(key_t)); \
--x->n; \
return kp; \
} \
if (r == 0) { \
if ((yn = __KB_PTR(b, x)[i]->n) >= T) { \
xp = __KB_PTR(b, x)[i]; \
kp = __KB_KEY(key_t, x)[i]; \
__KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 1); \
return kp; \
} else if ((zn = __KB_PTR(b, x)[i + 1]->n) >= T) { \
xp = __KB_PTR(b, x)[i + 1]; \
kp = __KB_KEY(key_t, x)[i]; \
__KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 2); \
return kp; \
} else if (yn == T - 1 && zn == T - 1) { \
y = __KB_PTR(b, x)[i]; z = __KB_PTR(b, x)[i + 1]; \
__KB_KEY(key_t, y)[y->n++] = *k; \
memmove(&__KB_KEY(key_t, y)[y->n], __KB_KEY(key_t, z), (unsigned int)z->n * sizeof(key_t)); \
if (y->is_internal) memmove(&__KB_PTR(b, y)[y->n], __KB_PTR(b, \
z), \
(unsigned int)(z->n + 1) * sizeof(void *)); \
y->n += z->n; \
memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, \
x)[i + 1], \
(unsigned int)(x->n - i - 1) * sizeof(key_t)); \
memmove(&__KB_PTR(b, x)[i + 1], &__KB_PTR(b, \
x)[i + 2], \
(unsigned int)(x->n - i - 1) * sizeof(void *)); \
--x->n; \
XFREE_CLEAR(z); \
return __kb_delp_aux_##name(b, y, k, s); \
} \
} \
++i; \
if ((xp = __KB_PTR(b, x)[i])->n == T - 1) { \
if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n >= T) { \
memmove(&__KB_KEY(key_t, xp)[1], __KB_KEY(key_t, xp), (unsigned int)xp->n * sizeof(key_t)); \
if (xp->is_internal) memmove(&__KB_PTR(b, xp)[1], __KB_PTR(b, \
xp), \
(unsigned int)(xp->n + 1) * sizeof(void *)); \
__KB_KEY(key_t, xp)[0] = __KB_KEY(key_t, x)[i - 1]; \
__KB_KEY(key_t, x)[i - 1] = __KB_KEY(key_t, y)[y->n - 1]; \
if (xp->is_internal) __KB_PTR(b, xp)[0] = __KB_PTR(b, y)[y->n]; \
--y->n; ++xp->n; \
} else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n >= T) { \
__KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \
__KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[0]; \
if (xp->is_internal) __KB_PTR(b, xp)[xp->n] = __KB_PTR(b, y)[0]; \
--y->n; \
memmove(__KB_KEY(key_t, y), &__KB_KEY(key_t, y)[1], (unsigned int)y->n * sizeof(key_t)); \
if (y->is_internal) memmove(__KB_PTR(b, y), &__KB_PTR(b, \
y)[1], \
(unsigned int)(y->n + 1) * sizeof(void *)); \
} else if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n == T - 1) { \
__KB_KEY(key_t, y)[y->n++] = __KB_KEY(key_t, x)[i - 1]; \
memmove(&__KB_KEY(key_t, y)[y->n], __KB_KEY(key_t, xp), \
(unsigned int)xp->n * sizeof(key_t)); \
if (y->is_internal) memmove(&__KB_PTR(b, y)[y->n], __KB_PTR(b, \
xp), \
(unsigned int)(xp->n + 1) * sizeof(void *)); \
y->n += xp->n; \
memmove(&__KB_KEY(key_t, x)[i - 1], &__KB_KEY(key_t, \
x)[i], \
(unsigned int)(x->n - i) * sizeof(key_t)); \
memmove(&__KB_PTR(b, x)[i], &__KB_PTR(b, \
x)[i + 1], (unsigned int)(x->n - i) * sizeof(void *)); \
--x->n; \
XFREE_CLEAR(xp); \
xp = y; \
} else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n == T - 1) { \
__KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \
memmove(&__KB_KEY(key_t, xp)[xp->n], __KB_KEY(key_t, y), \
(unsigned int)y->n * sizeof(key_t)); \
if (xp->is_internal) memmove(&__KB_PTR(b, xp)[xp->n], __KB_PTR(b, y), \
(unsigned int)(y->n + 1) * sizeof(void *)); \
xp->n += y->n; \
memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, \
x)[i + 1], \
(unsigned int)(x->n - i - 1) * sizeof(key_t)); \
memmove(&__KB_PTR(b, x)[i + 1], &__KB_PTR(b, \
x)[i + 2], \
(unsigned int)(x->n - i - 1) * sizeof(void *)); \
--x->n; \
XFREE_CLEAR(y); \
} \
} \
return __kb_delp_aux_##name(b, xp, k, s); \
} \
static inline key_t kb_delp_##name(kbtree_##name##_t *b, key_t * __restrict k) \
{ \
kbnode_t *x; \
key_t ret; \
ret = __kb_delp_aux_##name(b, b->root, k, 0); \
--b->n_keys; \
if (b->root->n == 0 && b->root->is_internal) { \
--b->n_nodes; \
x = b->root; \
b->root = __KB_PTR(b, x)[0]; \
XFREE_CLEAR(x); \
} \
return ret; \
} \
static inline key_t kb_del_##name(kbtree_##name##_t *b, key_t k) \
{ \
return kb_delp_##name(b, &k); \
}
#define __KB_ITR(name, key_t, kbnode_t) \
static inline void kb_itr_first_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
{ \
itr->p = NULL; \
if (b->n_keys == 0) return; \
itr->p = itr->stack; \
itr->p->x = b->root; itr->p->i = 0; \
while (itr->p->x->is_internal && __KB_PTR(b, itr->p->x)[0] != 0) { \
kbnode_t *x = itr->p->x; \
++itr->p; \
itr->p->x = __KB_PTR(b, x)[0]; itr->p->i = 0; \
} \
} \
static inline int kb_itr_next_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
{ \
if (itr->p == NULL) return 0; \
for (;;) { \
++itr->p->i; \
assert(itr->p->i <= 21); \
while (itr->p->x && itr->p->i <= itr->p->x->n) { \
itr->p[1].i = 0; \
itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[itr->p->i] : 0; \
++itr->p; \
} \
if (itr->p == itr->stack) { \
itr->p = NULL; \
return 0; \
} \
--itr->p; \
if (itr->p->x && itr->p->i < itr->p->x->n) return 1; \
} \
} \
static inline int kb_itr_prev_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
{ \
if (itr->p == NULL) return 0; \
for (;;) { \
while (itr->p->x && itr->p->i >= 0) { \
itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[itr->p->i] : 0; \
itr->p[1].i = itr->p[1].x ? itr->p[1].x->n : -1; \
++itr->p; \
} \
if (itr->p == itr->stack) { \
itr->p = NULL; \
return 0; \
} \
--itr->p; \
--itr->p->i; \
if (itr->p->x && itr->p->i >= 0) return 1; \
} \
} \
static inline int kb_itr_getp_##name(kbtree_##name##_t *b, key_t * __restrict k, kbitr_##name##_t *itr) \
{ \
if (b->n_keys == 0) { \
static inline void kb_itr_first_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
{ \
itr->p = NULL; \
if (b->n_keys == 0) return; \
itr->p = itr->stack; \
itr->p->x = b->root; itr->p->i = 0; \
while (itr->p->x->is_internal && __KB_PTR(b, itr->p->x)[0] != 0) { \
kbnode_t *x = itr->p->x; \
++itr->p; \
itr->p->x = __KB_PTR(b, x)[0]; itr->p->i = 0; \
} \
} \
static inline int kb_itr_next_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
{ \
if (itr->p == NULL) return 0; \
for (;;) { \
++itr->p->i; \
assert(itr->p->i <= 21); \
while (itr->p->x && itr->p->i <= itr->p->x->n) { \
itr->p[1].i = 0; \
itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[itr->p->i] : 0; \
++itr->p; \
} \
if (itr->p == itr->stack) { \
itr->p = NULL; \
return 0; \
} \
--itr->p; \
if (itr->p->x && itr->p->i < itr->p->x->n) return 1; \
} \
} \
static inline int kb_itr_prev_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
{ \
if (itr->p == NULL) return 0; \
for (;;) { \
while (itr->p->x && itr->p->i >= 0) { \
itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[itr->p->i] : 0; \
itr->p[1].i = itr->p[1].x ? itr->p[1].x->n : -1; \
++itr->p; \
} \
if (itr->p == itr->stack) { \
itr->p = NULL; \
return 0; \
} \
int i, r = 0; \
itr->p = itr->stack; \
itr->p->x = b->root; \
while (itr->p->x) { \
i = __kb_getp_aux_##name(itr->p->x, k, &r); \
itr->p->i = i; \
if (i >= 0 && r == 0) return 1; \
++itr->p->i; \
assert(itr->p->i <= 21); \
itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[i + 1] : 0; \
++itr->p; \
} \
itr->p->i = 0; \
return 0; \
} \
static inline int kb_itr_get_##name(kbtree_##name##_t *b, key_t k, kbitr_##name##_t *itr) \
{ \
return kb_itr_getp_##name(b,&k,itr); \
} \
static inline void kb_del_itr_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
{ \
key_t k = kb_itr_key(itr); \
kb_delp_##name(b, &k); \
kb_itr_getp_##name(b, &k, itr); \
}
} \
--itr->p; \
--itr->p->i; \
if (itr->p->x && itr->p->i >= 0) return 1; \
} \
} \
static inline int kb_itr_getp_##name(kbtree_##name##_t *b, key_t * __restrict k, \
kbitr_##name##_t *itr) \
{ \
if (b->n_keys == 0) { \
itr->p = NULL; \
return 0; \
} \
int i, r = 0; \
itr->p = itr->stack; \
itr->p->x = b->root; \
while (itr->p->x) { \
i = __kb_getp_aux_##name(itr->p->x, k, &r); \
itr->p->i = i; \
if (i >= 0 && r == 0) return 1; \
++itr->p->i; \
assert(itr->p->i <= 21); \
itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[i + 1] : 0; \
++itr->p; \
} \
itr->p->i = 0; \
return 0; \
} \
static inline int kb_itr_get_##name(kbtree_##name##_t *b, key_t k, kbitr_##name##_t *itr) \
{ \
return kb_itr_getp_##name(b, &k, itr); \
} \
static inline void kb_del_itr_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
{ \
key_t k = kb_itr_key(itr); \
kb_delp_##name(b, &k); \
kb_itr_getp_##name(b, &k, itr); \
}
#define KBTREE_INIT(name, key_t, __cmp, T) \
KBTREE_INIT_IMPL(name, key_t, kbnode_##name##_t, __cmp, T, (sizeof(kbnode_##name##_t)+(2*T)*sizeof(void *)))
KBTREE_INIT_IMPL(name, key_t, kbnode_##name##_t, __cmp, T, \
(sizeof(kbnode_##name##_t)+(2*T)*sizeof(void *)))
#define KBTREE_INIT_IMPL(name, key_t, kbnode_t, __cmp, T, ILEN) \
__KB_TREE_T(name, key_t, T) \
__KB_GET_AUX1(name, key_t, kbnode_t, __cmp) \
__KB_GET(name, key_t, kbnode_t) \
__KB_INTERVAL(name, key_t, kbnode_t) \
__KB_PUT(name, key_t, kbnode_t, __cmp, T, ILEN) \
__KB_DEL(name, key_t, kbnode_t, T) \
__KB_ITR(name, key_t, kbnode_t)
#define KBTREE_INIT_IMPL(name, key_t, kbnode_t, __cmp, T, ILEN) \
__KB_TREE_T(name, key_t, T) \
__KB_GET_AUX1(name, key_t, kbnode_t, __cmp) \
__KB_GET(name, key_t, kbnode_t) \
__KB_INTERVAL(name, key_t, kbnode_t) \
__KB_PUT(name, key_t, kbnode_t, __cmp, T, ILEN) \
__KB_DEL(name, key_t, kbnode_t, T) \
__KB_ITR(name, key_t, kbnode_t)
#define KB_DEFAULT_SIZE 512

View File

@ -1,5 +1,3 @@
// uncrustify:off
/* The MIT License
Copyright (c) 2008, 2009, 2011 by Attractive Chaos <attractor@live.co.uk>
@ -23,14 +21,14 @@
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
*/
/*
Example:
Example:
#include "nvim/khash.h"
KHASH_MAP_INIT_INT(32, char)
int main() {
#include "nvim/khash.h"
KHASH_MAP_INIT_INT(32, char)
int main() {
int ret, is_missing;
khiter_t k;
khash_t(32) *h = kh_init(32);
@ -44,99 +42,98 @@ int main() {
if (kh_exist(h, k)) kh_value(h, k) = 1;
kh_destroy(32, h);
return 0;
}
*/
}
*/
/*
2013-05-02 (0.2.8):
2013-05-02 (0.2.8):
* Use quadratic probing. When the capacity is power of 2, stepping function
i*(i+1)/2 guarantees to traverse each bucket. It is better than double
hashing on cache performance and is more robust than linear probing.
* Use quadratic probing. When the capacity is power of 2, stepping function
i*(i+1)/2 guarantees to traverse each bucket. It is better than double
hashing on cache performance and is more robust than linear probing.
In theory, double hashing should be more robust than quadratic probing.
However, my implementation is probably not for large hash tables, because
the second hash function is closely tied to the first hash function,
which reduce the effectiveness of double hashing.
In theory, double hashing should be more robust than quadratic probing.
However, my implementation is probably not for large hash tables, because
the second hash function is closely tied to the first hash function,
which reduce the effectiveness of double hashing.
Reference: http://research.cs.vt.edu/AVresearch/hashing/quadratic.php
Reference: http://research.cs.vt.edu/AVresearch/hashing/quadratic.php
2011-12-29 (0.2.7):
2011-12-29 (0.2.7):
* Minor code clean up; no actual effect.
* Minor code clean up; no actual effect.
2011-09-16 (0.2.6):
2011-09-16 (0.2.6):
* The capacity is a power of 2. This seems to dramatically improve the
speed for simple keys. Thank Zilong Tan for the suggestion. Reference:
* The capacity is a power of 2. This seems to dramatically improve the
speed for simple keys. Thank Zilong Tan for the suggestion. Reference:
- http://code.google.com/p/ulib/
- http://nothings.org/computer/judy/
- http://code.google.com/p/ulib/
- http://nothings.org/computer/judy/
* Allow to optionally use linear probing which usually has better
performance for random input. Double hashing is still the default as it
is more robust to certain non-random input.
* Allow to optionally use linear probing which usually has better
performance for random input. Double hashing is still the default as it
is more robust to certain non-random input.
* Added Wang's integer hash function (not used by default). This hash
function is more robust to certain non-random input.
* Added Wang's integer hash function (not used by default). This hash
function is more robust to certain non-random input.
2011-02-14 (0.2.5):
2011-02-14 (0.2.5):
* Allow to declare global functions.
* Allow to declare global functions.
2009-09-26 (0.2.4):
2009-09-26 (0.2.4):
* Improve portability
* Improve portability
2008-09-19 (0.2.3):
2008-09-19 (0.2.3):
* Corrected the example
* Improved interfaces
* Corrected the example
* Improved interfaces
2008-09-11 (0.2.2):
2008-09-11 (0.2.2):
* Improved speed a little in kh_put()
* Improved speed a little in kh_put()
2008-09-10 (0.2.1):
2008-09-10 (0.2.1):
* Added kh_clear()
* Fixed a compiling error
* Added kh_clear()
* Fixed a compiling error
2008-09-02 (0.2.0):
2008-09-02 (0.2.0):
* Changed to token concatenation which increases flexibility.
* Changed to token concatenation which increases flexibility.
2008-08-31 (0.1.2):
2008-08-31 (0.1.2):
* Fixed a bug in kh_get(), which has not been tested previously.
* Fixed a bug in kh_get(), which has not been tested previously.
2008-08-31 (0.1.1):
2008-08-31 (0.1.1):
* Added destructor
*/
* Added destructor
*/
#ifndef NVIM_LIB_KHASH_H
#define NVIM_LIB_KHASH_H
/*!
@header
@header
Generic hash table library.
Generic hash table library.
*/
#define AC_VERSION_KHASH_H "0.2.8"
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <stdint.h>
#include "nvim/memory.h"
#include <stdlib.h>
#include <string.h>
#include "nvim/func_attr.h"
#include "nvim/memory.h"
/* compiler specific configuration */
// compiler specific configuration
#if UINT_MAX == 0xffffffffu
typedef unsigned int khint32_t;
@ -151,9 +148,9 @@ typedef unsigned long long khint64_t;
#endif
#ifdef _MSC_VER
#define kh_inline __inline
# define kh_inline __inline
#else
#define kh_inline inline
# define kh_inline inline
#endif
typedef khint32_t khint_t;
@ -170,31 +167,32 @@ typedef khint_t khiter_t;
#define __ac_fsize(m) ((m) < 16? 1 : (m)>>4)
#ifndef kroundup32
#define kroundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x))
# define kroundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, \
++(x))
#endif
#ifndef kcalloc
#define kcalloc(N,Z) xcalloc(N,Z)
# define kcalloc(N, Z) xcalloc(N, Z)
#endif
#ifndef kmalloc
#define kmalloc(Z) xmalloc(Z)
# define kmalloc(Z) xmalloc(Z)
#endif
#ifndef krealloc
#define krealloc(P,Z) xrealloc(P,Z)
# define krealloc(P, Z) xrealloc(P, Z)
#endif
#ifndef kfree
#define kfree(P) XFREE_CLEAR(P)
# define kfree(P) XFREE_CLEAR(P)
#endif
#define __ac_HASH_UPPER 0.77
#define __KHASH_TYPE(name, khkey_t, khval_t) \
typedef struct { \
khint_t n_buckets, size, n_occupied, upper_bound; \
khint32_t *flags; \
khkey_t *keys; \
khval_t *vals; \
} kh_##name##_t;
typedef struct { \
khint_t n_buckets, size, n_occupied, upper_bound; \
khint32_t *flags; \
khkey_t *keys; \
khval_t *vals; \
} kh_##name##_t;
#define __KHASH_PROTOTYPES(name, khkey_t, khval_t) \
extern kh_##name##_t *kh_init_##name(void); \
@ -209,12 +207,12 @@ typedef khint_t khiter_t;
#define __KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, \
__hash_equal) \
SCOPE kh_##name##_t *kh_init_##name(void) \
REAL_FATTR_UNUSED; \
REAL_FATTR_UNUSED; \
SCOPE kh_##name##_t *kh_init_##name(void) { \
return (kh_##name##_t*)kcalloc(1, sizeof(kh_##name##_t)); \
return (kh_##name##_t *)kcalloc(1, sizeof(kh_##name##_t)); \
} \
SCOPE void kh_dealloc_##name(kh_##name##_t *h) \
REAL_FATTR_UNUSED; \
REAL_FATTR_UNUSED; \
SCOPE void kh_dealloc_##name(kh_##name##_t *h) \
{ \
kfree(h->keys); \
@ -222,7 +220,7 @@ typedef khint_t khiter_t;
kfree(h->vals); \
} \
SCOPE void kh_destroy_##name(kh_##name##_t *h) \
REAL_FATTR_UNUSED; \
REAL_FATTR_UNUSED; \
SCOPE void kh_destroy_##name(kh_##name##_t *h) \
{ \
if (h) { \
@ -231,7 +229,7 @@ typedef khint_t khiter_t;
} \
} \
SCOPE void kh_clear_##name(kh_##name##_t *h) \
REAL_FATTR_UNUSED; \
REAL_FATTR_UNUSED; \
SCOPE void kh_clear_##name(kh_##name##_t *h) \
{ \
if (h && h->flags) { \
@ -240,7 +238,7 @@ typedef khint_t khiter_t;
} \
} \
SCOPE khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key) \
REAL_FATTR_UNUSED; \
REAL_FATTR_UNUSED; \
SCOPE khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key) \
{ \
if (h->n_buckets) { \
@ -261,10 +259,10 @@ typedef khint_t khiter_t;
} \
} \
SCOPE void kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets) \
REAL_FATTR_UNUSED; \
REAL_FATTR_UNUSED; \
SCOPE void kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets) \
{ /* This function uses 0.25*n_buckets bytes of working space instead of */ \
/* [sizeof(key_t+val_t)+.25]*n_buckets. */ \
{ /* This function uses 0.25*n_buckets bytes of working space instead of */ \
/* [sizeof(key_t+val_t)+.25]*n_buckets. */ \
khint32_t *new_flags = 0; \
khint_t j = 1; \
{ \
@ -275,24 +273,23 @@ typedef khint_t khiter_t;
/* requested size is too small */ \
if (h->size >= (khint_t)(new_n_buckets * __ac_HASH_UPPER + 0.5)) { \
j = 0; \
} else { /* hash table size to be changed (shrink or expand); rehash */ \
new_flags = (khint32_t*)kmalloc(__ac_fsize(new_n_buckets) \
* sizeof(khint32_t)); \
} else { /* hash table size to be changed (shrink or expand); rehash */ \
new_flags = (khint32_t *)kmalloc(__ac_fsize(new_n_buckets) \
* sizeof(khint32_t)); \
memset(new_flags, 0xaa, \
__ac_fsize(new_n_buckets) * sizeof(khint32_t)); \
if (h->n_buckets < new_n_buckets) { /* expand */ \
khkey_t *new_keys = (khkey_t*)krealloc( \
(void *)h->keys, new_n_buckets * sizeof(khkey_t)); \
if (h->n_buckets < new_n_buckets) { /* expand */ \
khkey_t *new_keys = (khkey_t *)krealloc((void *)h->keys, new_n_buckets * sizeof(khkey_t)); \
h->keys = new_keys; \
if (kh_is_map) { \
khval_t *new_vals = (khval_t*)krealloc( \
(void *)h->vals, new_n_buckets * sizeof(khval_t)); \
khval_t *new_vals = \
(khval_t *)krealloc((void *)h->vals, new_n_buckets * sizeof(khval_t)); \
h->vals = new_vals; \
} \
} /* otherwise shrink */ \
} /* otherwise shrink */ \
} \
} \
if (j) { /* rehashing is needed */ \
if (j) { /* rehashing is needed */ \
for (j = 0; j != h->n_buckets; ++j) { \
if (__ac_iseither(h->flags, j) == 0) { \
khkey_t key = h->keys[j]; \
@ -326,7 +323,7 @@ typedef khint_t khiter_t;
} \
/* mark it as deleted in the old hash table */ \
__ac_set_isdel_true(h->flags, i); \
} else { /* write the element and jump out of the loop */ \
} else { /* write the element and jump out of the loop */ \
h->keys[i] = key; \
if (kh_is_map) { \
h->vals[i] = val; \
@ -336,15 +333,15 @@ typedef khint_t khiter_t;
} \
} \
} \
if (h->n_buckets > new_n_buckets) { /* shrink the hash table */ \
h->keys = (khkey_t*)krealloc((void *)h->keys, \
new_n_buckets * sizeof(khkey_t)); \
if (h->n_buckets > new_n_buckets) { /* shrink the hash table */ \
h->keys = (khkey_t *)krealloc((void *)h->keys, \
new_n_buckets * sizeof(khkey_t)); \
if (kh_is_map) { \
h->vals = (khval_t*)krealloc((void *)h->vals, \
new_n_buckets * sizeof(khval_t)); \
h->vals = (khval_t *)krealloc((void *)h->vals, \
new_n_buckets * sizeof(khval_t)); \
} \
} \
kfree(h->flags); /* free the working space */ \
kfree(h->flags); /* free the working space */ \
h->flags = new_flags; \
h->n_buckets = new_n_buckets; \
h->n_occupied = h->size; \
@ -352,25 +349,25 @@ typedef khint_t khiter_t;
} \
} \
SCOPE khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret) \
REAL_FATTR_UNUSED; \
REAL_FATTR_UNUSED; \
SCOPE khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret) \
{ \
khint_t x; \
if (h->n_occupied >= h->upper_bound) { /* update the hash table */ \
if (h->n_occupied >= h->upper_bound) { /* update the hash table */ \
if (h->n_buckets > (h->size << 1)) { \
kh_resize_##name(h, h->n_buckets - 1); /* clear "deleted" elements */ \
kh_resize_##name(h, h->n_buckets - 1); /* clear "deleted" elements */ \
} else { \
kh_resize_##name(h, h->n_buckets + 1); /* expand the hash table */ \
kh_resize_##name(h, h->n_buckets + 1); /* expand the hash table */ \
} \
} /* TODO: implement automatically shrinking; */ \
/* resize() already support shrinking */ \
} /* TODO: implement automatically shrinking; */ \
/* resize() already support shrinking */ \
{ \
khint_t k, i, site, last, mask = h->n_buckets - 1, step = 0; \
x = site = h->n_buckets; \
k = __hash_func(key); \
i = k & mask; \
if (__ac_isempty(h->flags, i)) { \
x = i; /* for speed up */ \
x = i; /* for speed up */ \
} else { \
last = i; \
while (!__ac_isempty(h->flags, i) \
@ -394,24 +391,24 @@ typedef khint_t khiter_t;
} \
} \
} \
if (__ac_isempty(h->flags, x)) { /* not present at all */ \
if (__ac_isempty(h->flags, x)) { /* not present at all */ \
h->keys[x] = key; \
__ac_set_isboth_false(h->flags, x); \
h->size++; \
h->n_occupied++; \
*ret = 1; \
} else if (__ac_isdel(h->flags, x)) { /* deleted */ \
} else if (__ac_isdel(h->flags, x)) { /* deleted */ \
h->keys[x] = key; \
__ac_set_isboth_false(h->flags, x); \
h->size++; \
*ret = 2; \
} else { \
*ret = 0; /* Don't touch h->keys[x] if present and not deleted */ \
*ret = 0; /* Don't touch h->keys[x] if present and not deleted */ \
} \
return x; \
} \
SCOPE void kh_del_##name(kh_##name##_t *h, khint_t x) \
REAL_FATTR_UNUSED; \
REAL_FATTR_UNUSED; \
SCOPE void kh_del_##name(kh_##name##_t *h, khint_t x) \
{ \
if (x != h->n_buckets && !__ac_iseither(h->flags, x)) { \
@ -420,240 +417,242 @@ typedef khint_t khiter_t;
} \
}
#define KHASH_DECLARE(name, khkey_t, khval_t) \
__KHASH_TYPE(name, khkey_t, khval_t) \
__KHASH_PROTOTYPES(name, khkey_t, khval_t)
#define KHASH_DECLARE(name, khkey_t, khval_t) \
__KHASH_TYPE(name, khkey_t, khval_t) \
__KHASH_PROTOTYPES(name, khkey_t, khval_t)
#define KHASH_INIT2(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
__KHASH_TYPE(name, khkey_t, khval_t) \
__KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal)
__KHASH_TYPE(name, khkey_t, khval_t) \
__KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal)
#define KHASH_INIT(name, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
KHASH_INIT2(name, static kh_inline, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal)
KHASH_INIT2(name, static kh_inline, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal)
/* --- BEGIN OF HASH FUNCTIONS --- */
// --- BEGIN OF HASH FUNCTIONS ---
/*! @function
@abstract Integer hash function
@param key The integer [khint32_t]
@return The hash value [khint_t]
@abstract Integer hash function
@param key The integer [khint32_t]
@return The hash value [khint_t]
*/
#define kh_int_hash_func(key) (khint32_t)(key)
/*! @function
@abstract Integer comparison function
@abstract Integer comparison function
*/
#define kh_int_hash_equal(a, b) ((a) == (b))
/*! @function
@abstract 64-bit integer hash function
@param key The integer [khint64_t]
@return The hash value [khint_t]
@abstract 64-bit integer hash function
@param key The integer [khint64_t]
@return The hash value [khint_t]
*/
#define kh_int64_hash_func(key) (khint32_t)((key)>>33^(key)^(key)<<11)
/*! @function
@abstract 64-bit integer comparison function
@abstract 64-bit integer comparison function
*/
#define kh_int64_hash_equal(a, b) ((a) == (b))
/*! @function
@abstract const char* hash function
@param s Pointer to a null terminated string
@return The hash value
@abstract const char* hash function
@param s Pointer to a null terminated string
@return The hash value
*/
static kh_inline khint_t __ac_X31_hash_string(const char *s)
{
khint_t h = (khint_t)*s;
if (h) for (++s ; *s; ++s) h = (h << 5) - h + (uint8_t)*s;
return h;
khint_t h = (khint_t)*s;
if (h) {
for (++s ; *s; ++s) { h = (h << 5) - h + (uint8_t)*s; }
}
return h;
}
/*! @function
@abstract Another interface to const char* hash function
@param key Pointer to a null terminated string [const char*]
@return The hash value [khint_t]
@abstract Another interface to const char* hash function
@param key Pointer to a null terminated string [const char*]
@return The hash value [khint_t]
*/
#define kh_str_hash_func(key) __ac_X31_hash_string(key)
/*! @function
@abstract Const char* comparison function
@abstract Const char* comparison function
*/
#define kh_str_hash_equal(a, b) (strcmp(a, b) == 0)
static kh_inline khint_t __ac_Wang_hash(khint_t key)
{
key += ~(key << 15);
key ^= (key >> 10);
key += (key << 3);
key ^= (key >> 6);
key += ~(key << 11);
key ^= (key >> 16);
return key;
key += ~(key << 15);
key ^= (key >> 10);
key += (key << 3);
key ^= (key >> 6);
key += ~(key << 11);
key ^= (key >> 16);
return key;
}
#define kh_int_hash_func2(k) __ac_Wang_hash((khint_t)key)
/* --- END OF HASH FUNCTIONS --- */
// --- END OF HASH FUNCTIONS ---
/* Other convenient macros... */
// Other convenient macros...
/*!
@abstract Type of the hash table.
@param name Name of the hash table [symbol]
@abstract Type of the hash table.
@param name Name of the hash table [symbol]
*/
#define khash_t(name) kh_##name##_t
/*! @function
@abstract Initiate a hash table.
@param name Name of the hash table [symbol]
@return Pointer to the hash table [khash_t(name)*]
@abstract Initiate a hash table.
@param name Name of the hash table [symbol]
@return Pointer to the hash table [khash_t(name)*]
*/
#define kh_init(name) kh_init_##name()
/*! @function
@abstract Destroy a hash table.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
@abstract Destroy a hash table.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
*/
#define kh_destroy(name, h) kh_destroy_##name(h)
/*! @function
@abstract Free memory referenced directly inside a hash table.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
@abstract Free memory referenced directly inside a hash table.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
*/
#define kh_dealloc(name, h) kh_dealloc_##name(h)
/*! @function
@abstract Reset a hash table without deallocating memory.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
@abstract Reset a hash table without deallocating memory.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
*/
#define kh_clear(name, h) kh_clear_##name(h)
/*! @function
@abstract Resize a hash table.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
@param s New size [khint_t]
@abstract Resize a hash table.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
@param s New size [khint_t]
*/
#define kh_resize(name, h, s) kh_resize_##name(h, s)
/*! @function
@abstract Insert a key to the hash table.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
@param k Key [type of keys]
@param r Extra return code: -1 if the operation failed;
@abstract Insert a key to the hash table.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
@param k Key [type of keys]
@param r Extra return code: -1 if the operation failed;
0 if the key is present in the hash table;
1 if the bucket is empty (never used); 2 if the element in
the bucket has been deleted [int*]
@return Iterator to the inserted element [khint_t]
@return Iterator to the inserted element [khint_t]
*/
#define kh_put(name, h, k, r) kh_put_##name(h, k, r)
/*! @function
@abstract Retrieve a key from the hash table.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
@param k Key [type of keys]
@return Iterator to the found element, or kh_end(h) if the element is absent [khint_t]
@abstract Retrieve a key from the hash table.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
@param k Key [type of keys]
@return Iterator to the found element, or kh_end(h) if the element is absent [khint_t]
*/
#define kh_get(name, h, k) kh_get_##name(h, k)
/*! @function
@abstract Remove a key from the hash table.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
@param k Iterator to the element to be deleted [khint_t]
@abstract Remove a key from the hash table.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
@param k Iterator to the element to be deleted [khint_t]
*/
#define kh_del(name, h, k) kh_del_##name(h, k)
/*! @function
@abstract Test whether a bucket contains data.
@param h Pointer to the hash table [khash_t(name)*]
@param x Iterator to the bucket [khint_t]
@return 1 if containing data; 0 otherwise [int]
@abstract Test whether a bucket contains data.
@param h Pointer to the hash table [khash_t(name)*]
@param x Iterator to the bucket [khint_t]
@return 1 if containing data; 0 otherwise [int]
*/
#define kh_exist(h, x) (!__ac_iseither((h)->flags, (x)))
/*! @function
@abstract Get key given an iterator
@param h Pointer to the hash table [khash_t(name)*]
@param x Iterator to the bucket [khint_t]
@return Key [type of keys]
@abstract Get key given an iterator
@param h Pointer to the hash table [khash_t(name)*]
@param x Iterator to the bucket [khint_t]
@return Key [type of keys]
*/
#define kh_key(h, x) ((h)->keys[x])
/*! @function
@abstract Get value given an iterator
@param h Pointer to the hash table [khash_t(name)*]
@param x Iterator to the bucket [khint_t]
@return Value [type of values]
@discussion For hash sets, calling this results in segfault.
@abstract Get value given an iterator
@param h Pointer to the hash table [khash_t(name)*]
@param x Iterator to the bucket [khint_t]
@return Value [type of values]
@discussion For hash sets, calling this results in segfault.
*/
#define kh_val(h, x) ((h)->vals[x])
/*! @function
@abstract Alias of kh_val()
@abstract Alias of kh_val()
*/
#define kh_value(h, x) ((h)->vals[x])
/*! @function
@abstract Get the start iterator
@param h Pointer to the hash table [khash_t(name)*]
@return The start iterator [khint_t]
@abstract Get the start iterator
@param h Pointer to the hash table [khash_t(name)*]
@return The start iterator [khint_t]
*/
#define kh_begin(h) (khint_t)(0)
/*! @function
@abstract Get the end iterator
@param h Pointer to the hash table [khash_t(name)*]
@return The end iterator [khint_t]
@abstract Get the end iterator
@param h Pointer to the hash table [khash_t(name)*]
@return The end iterator [khint_t]
*/
#define kh_end(h) ((h)->n_buckets)
/*! @function
@abstract Get the number of elements in the hash table
@param h Pointer to the hash table [khash_t(name)*]
@return Number of elements in the hash table [khint_t]
@abstract Get the number of elements in the hash table
@param h Pointer to the hash table [khash_t(name)*]
@return Number of elements in the hash table [khint_t]
*/
#define kh_size(h) ((h)->size)
/*! @function
@abstract Get the number of buckets in the hash table
@param h Pointer to the hash table [khash_t(name)*]
@return Number of buckets in the hash table [khint_t]
@abstract Get the number of buckets in the hash table
@param h Pointer to the hash table [khash_t(name)*]
@return Number of buckets in the hash table [khint_t]
*/
#define kh_n_buckets(h) ((h)->n_buckets)
/*! @function
@abstract Iterate over the entries in the hash table
@param h Pointer to the hash table [khash_t(name)*]
@param kvar Variable to which key will be assigned
@param vvar Variable to which value will be assigned
@param code Block of code to execute
@abstract Iterate over the entries in the hash table
@param h Pointer to the hash table [khash_t(name)*]
@param kvar Variable to which key will be assigned
@param vvar Variable to which value will be assigned
@param code Block of code to execute
*/
#define kh_foreach(h, kvar, vvar, code) { khint_t __i; \
for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \
if (!kh_exist(h,__i)) continue; \
(kvar) = kh_key(h,__i); \
(vvar) = kh_val(h,__i); \
code; \
} }
#define kh_foreach(h, kvar, vvar, code) { khint_t __i; \
for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \
if (!kh_exist(h, __i)) continue; \
(kvar) = kh_key(h, __i); \
(vvar) = kh_val(h, __i); \
code; \
} }
/*! @function
@abstract Iterate over the values in the hash table
@param h Pointer to the hash table [khash_t(name)*]
@param vvar Variable to which value will be assigned
@param code Block of code to execute
@abstract Iterate over the values in the hash table
@param h Pointer to the hash table [khash_t(name)*]
@param vvar Variable to which value will be assigned
@param code Block of code to execute
*/
#define kh_foreach_value(h, vvar, code) { khint_t __i; \
for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \
if (!kh_exist(h,__i)) continue; \
(vvar) = kh_val(h,__i); \
code; \
} }
#define kh_foreach_value(h, vvar, code) { khint_t __i; \
for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \
if (!kh_exist(h, __i)) continue; \
(vvar) = kh_val(h, __i); \
code; \
} }
/*! @function
@abstract Iterate over the keys in the hash table
@param h Pointer to the hash table [khash_t(name)*]
@param kvar Variable to which value will be assigned
@param code Block of code to execute
@abstract Iterate over the keys in the hash table
@param h Pointer to the hash table [khash_t(name)*]
@param kvar Variable to which value will be assigned
@param code Block of code to execute
*/
#define kh_foreach_key(h, kvar, code) \
{ \
@ -667,57 +666,57 @@ static kh_inline khint_t __ac_Wang_hash(khint_t key)
} \
}
/* More conenient interfaces */
// More conenient interfaces
/*! @function
@abstract Instantiate a hash set containing integer keys
@param name Name of the hash table [symbol]
@abstract Instantiate a hash set containing integer keys
@param name Name of the hash table [symbol]
*/
#define KHASH_SET_INIT_INT(name) \
KHASH_INIT(name, khint32_t, char, 0, kh_int_hash_func, kh_int_hash_equal)
#define KHASH_SET_INIT_INT(name) \
KHASH_INIT(name, khint32_t, char, 0, kh_int_hash_func, kh_int_hash_equal)
/*! @function
@abstract Instantiate a hash map containing integer keys
@param name Name of the hash table [symbol]
@param khval_t Type of values [type]
@abstract Instantiate a hash map containing integer keys
@param name Name of the hash table [symbol]
@param khval_t Type of values [type]
*/
#define KHASH_MAP_INIT_INT(name, khval_t) \
KHASH_INIT(name, khint32_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
#define KHASH_MAP_INIT_INT(name, khval_t) \
KHASH_INIT(name, khint32_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
/*! @function
@abstract Instantiate a hash map containing 64-bit integer keys
@param name Name of the hash table [symbol]
@abstract Instantiate a hash map containing 64-bit integer keys
@param name Name of the hash table [symbol]
*/
#define KHASH_SET_INIT_INT64(name) \
KHASH_INIT(name, khint64_t, char, 0, kh_int64_hash_func, kh_int64_hash_equal)
#define KHASH_SET_INIT_INT64(name) \
KHASH_INIT(name, khint64_t, char, 0, kh_int64_hash_func, kh_int64_hash_equal)
/*! @function
@abstract Instantiate a hash map containing 64-bit integer keys
@param name Name of the hash table [symbol]
@param khval_t Type of values [type]
@abstract Instantiate a hash map containing 64-bit integer keys
@param name Name of the hash table [symbol]
@param khval_t Type of values [type]
*/
#define KHASH_MAP_INIT_INT64(name, khval_t) \
KHASH_INIT(name, khint64_t, khval_t, 1, kh_int64_hash_func, kh_int64_hash_equal)
#define KHASH_MAP_INIT_INT64(name, khval_t) \
KHASH_INIT(name, khint64_t, khval_t, 1, kh_int64_hash_func, kh_int64_hash_equal)
typedef const char *kh_cstr_t;
/*! @function
@abstract Instantiate a hash map containing const char* keys
@param name Name of the hash table [symbol]
@abstract Instantiate a hash map containing const char* keys
@param name Name of the hash table [symbol]
*/
#define KHASH_SET_INIT_STR(name) \
KHASH_INIT(name, kh_cstr_t, char, 0, kh_str_hash_func, kh_str_hash_equal)
#define KHASH_SET_INIT_STR(name) \
KHASH_INIT(name, kh_cstr_t, char, 0, kh_str_hash_func, kh_str_hash_equal)
/*! @function
@abstract Instantiate a hash map containing const char* keys
@param name Name of the hash table [symbol]
@param khval_t Type of values [type]
@abstract Instantiate a hash map containing const char* keys
@param name Name of the hash table [symbol]
@param khval_t Type of values [type]
*/
#define KHASH_MAP_INIT_STR(name, khval_t) \
KHASH_INIT(name, kh_cstr_t, khval_t, 1, kh_str_hash_func, kh_str_hash_equal)
/*! @function
@abstract Return a literal for an empty hash table.
@param name Name of the hash table [symbol]
@abstract Return a literal for an empty hash table.
@param name Name of the hash table [symbol]
*/
#define KHASH_EMPTY_TABLE(name) \
((kh_##name##_t) { \

View File

@ -1,5 +1,3 @@
// uncrustify:off
/* The MIT License
Copyright (c) 2008-2009, by Attractive Chaos <attractor@live.co.uk>
@ -23,50 +21,50 @@
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
*/
#ifndef _AC_KLIST_H
#define _AC_KLIST_H
#include <stdlib.h>
#include <assert.h>
#include <stdlib.h>
#include "nvim/memory.h"
#include "nvim/func_attr.h"
#include "nvim/memory.h"
#define KMEMPOOL_INIT(name, kmptype_t, kmpfree_f) \
typedef struct { \
size_t cnt, n, max; \
kmptype_t **buf; \
} kmp_##name##_t; \
static inline kmp_##name##_t *kmp_init_##name(void) { \
return xcalloc(1, sizeof(kmp_##name##_t)); \
typedef struct { \
size_t cnt, n, max; \
kmptype_t **buf; \
} kmp_##name##_t; \
static inline kmp_##name##_t *kmp_init_##name(void) { \
return xcalloc(1, sizeof(kmp_##name##_t)); \
} \
static inline void kmp_destroy_##name(kmp_##name##_t *mp) \
REAL_FATTR_UNUSED; \
static inline void kmp_destroy_##name(kmp_##name##_t *mp) { \
size_t k; \
for (k = 0; k < mp->n; k++) { \
kmpfree_f(mp->buf[k]); XFREE_CLEAR(mp->buf[k]); \
} \
static inline void kmp_destroy_##name(kmp_##name##_t *mp) \
REAL_FATTR_UNUSED; \
static inline void kmp_destroy_##name(kmp_##name##_t *mp) { \
size_t k; \
for (k = 0; k < mp->n; k++) { \
kmpfree_f(mp->buf[k]); XFREE_CLEAR(mp->buf[k]); \
} \
XFREE_CLEAR(mp->buf); XFREE_CLEAR(mp); \
XFREE_CLEAR(mp->buf); XFREE_CLEAR(mp); \
} \
static inline kmptype_t *kmp_alloc_##name(kmp_##name##_t *mp) { \
mp->cnt++; \
if (mp->n == 0) { \
return xcalloc(1, sizeof(kmptype_t)); \
} \
static inline kmptype_t *kmp_alloc_##name(kmp_##name##_t *mp) { \
mp->cnt++; \
if (mp->n == 0) { \
return xcalloc(1, sizeof(kmptype_t)); \
} \
return mp->buf[--mp->n]; \
return mp->buf[--mp->n]; \
} \
static inline void kmp_free_##name(kmp_##name##_t *mp, kmptype_t *p) { \
mp->cnt--; \
if (mp->n == mp->max) { \
mp->max = mp->max ? (mp->max << 1) : 16; \
mp->buf = xrealloc(mp->buf, sizeof(kmptype_t *) * mp->max); \
} \
static inline void kmp_free_##name(kmp_##name##_t *mp, kmptype_t *p) { \
mp->cnt--; \
if (mp->n == mp->max) { \
mp->max = mp->max ? (mp->max << 1) : 16; \
mp->buf = xrealloc(mp->buf, sizeof(kmptype_t *) * mp->max); \
} \
mp->buf[mp->n++] = p; \
}
mp->buf[mp->n++] = p; \
}
#define kmempool_t(name) kmp_##name##_t
#define kmp_init(name) kmp_init_##name()
@ -75,55 +73,55 @@
#define kmp_free(name, mp, p) kmp_free_##name(mp, p)
#define KLIST_INIT(name, kltype_t, kmpfree_t) \
struct __kl1_##name { \
kltype_t data; \
struct __kl1_##name *next; \
}; \
typedef struct __kl1_##name kl1_##name; \
KMEMPOOL_INIT(name, kl1_##name, kmpfree_t) \
typedef struct { \
kl1_##name *head, *tail; \
kmp_##name##_t *mp; \
size_t size; \
} kl_##name##_t; \
static inline kl_##name##_t *kl_init_##name(void) { \
kl_##name##_t *kl = xcalloc(1, sizeof(kl_##name##_t)); \
kl->mp = kmp_init(name); \
kl->head = kl->tail = kmp_alloc(name, kl->mp); \
kl->head->next = 0; \
return kl; \
struct __kl1_##name { \
kltype_t data; \
struct __kl1_##name *next; \
}; \
typedef struct __kl1_##name kl1_##name; \
KMEMPOOL_INIT(name, kl1_##name, kmpfree_t) \
typedef struct { \
kl1_##name *head, *tail; \
kmp_##name##_t *mp; \
size_t size; \
} kl_##name##_t; \
static inline kl_##name##_t *kl_init_##name(void) { \
kl_##name##_t *kl = xcalloc(1, sizeof(kl_##name##_t)); \
kl->mp = kmp_init(name); \
kl->head = kl->tail = kmp_alloc(name, kl->mp); \
kl->head->next = 0; \
return kl; \
} \
static inline void kl_destroy_##name(kl_##name##_t *kl) \
REAL_FATTR_UNUSED; \
static inline void kl_destroy_##name(kl_##name##_t *kl) { \
kl1_##name *p; \
for (p = kl->head; p != kl->tail; p = p->next) { \
kmp_free(name, kl->mp, p); \
} \
static inline void kl_destroy_##name(kl_##name##_t *kl) \
REAL_FATTR_UNUSED; \
static inline void kl_destroy_##name(kl_##name##_t *kl) { \
kl1_##name *p; \
for (p = kl->head; p != kl->tail; p = p->next) { \
kmp_free(name, kl->mp, p); \
} \
kmp_free(name, kl->mp, p); \
kmp_destroy(name, kl->mp); \
XFREE_CLEAR(kl); \
kmp_free(name, kl->mp, p); \
kmp_destroy(name, kl->mp); \
XFREE_CLEAR(kl); \
} \
static inline void kl_push_##name(kl_##name##_t *kl, kltype_t d) { \
kl1_##name *q, *p = kmp_alloc(name, kl->mp); \
q = kl->tail; p->next = 0; kl->tail->next = p; kl->tail = p; \
kl->size++; \
q->data = d; \
} \
static inline kltype_t kl_shift_at_##name(kl_##name##_t *kl, \
kl1_##name **n) { \
assert((*n)->next); \
kl1_##name *p; \
kl->size--; \
p = *n; \
*n = (*n)->next; \
if (p == kl->head) { \
kl->head = *n; \
} \
static inline void kl_push_##name(kl_##name##_t *kl, kltype_t d) { \
kl1_##name *q, *p = kmp_alloc(name, kl->mp); \
q = kl->tail; p->next = 0; kl->tail->next = p; kl->tail = p; \
kl->size++; \
q->data = d; \
} \
static inline kltype_t kl_shift_at_##name(kl_##name##_t *kl, \
kl1_##name **n) { \
assert((*n)->next); \
kl1_##name *p; \
kl->size--; \
p = *n; \
*n = (*n)->next; \
if (p == kl->head) { \
kl->head = *n; \
} \
kltype_t d = p->data; \
kmp_free(name, kl->mp, p); \
return d; \
}
kltype_t d = p->data; \
kmp_free(name, kl->mp, p); \
return d; \
}
#define kliter_t(name) kl1_##name
#define klist_t(name) kl_##name##_t

View File

@ -1,5 +1,3 @@
// uncrustify:off
// The MIT License
//
// Copyright (c) 2008, by Attractive Chaos <attractor@live.co.uk>
@ -46,25 +44,25 @@
#include "nvim/os/os_defs.h"
#define kv_roundup32(x) \
((--(x)), \
((x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16), \
(++(x)))
((--(x)), \
((x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16), \
(++(x)))
#define KV_INITIAL_VALUE { .size = 0, .capacity = 0, .items = NULL }
#define kvec_t(type) \
struct { \
size_t size; \
size_t capacity; \
type *items; \
}
struct { \
size_t size; \
size_t capacity; \
type *items; \
}
#define kv_init(v) ((v).size = (v).capacity = 0, (v).items = 0)
#define kv_destroy(v) \
do { \
xfree((v).items); \
kv_init(v); \
} while (0)
do { \
xfree((v).items); \
kv_init(v); \
} while (0)
#define kv_A(v, i) ((v).items[(i)])
#define kv_pop(v) ((v).items[--(v).size])
#define kv_size(v) ((v).size)
@ -81,34 +79,34 @@
#define kv_drop(v, n) ((v).size -= (n))
#define kv_resize(v, s) \
((v).capacity = (s), \
(v).items = xrealloc((v).items, sizeof((v).items[0]) * (v).capacity))
((v).capacity = (s), \
(v).items = xrealloc((v).items, sizeof((v).items[0]) * (v).capacity))
#define kv_resize_full(v) \
kv_resize(v, (v).capacity ? (v).capacity << 1 : 8)
kv_resize(v, (v).capacity ? (v).capacity << 1 : 8)
#define kv_copy(v1, v0) \
do { \
if ((v1).capacity < (v0).size) { \
kv_resize(v1, (v0).size); \
} \
(v1).size = (v0).size; \
memcpy((v1).items, (v0).items, sizeof((v1).items[0]) * (v0).size); \
} while (0)
do { \
if ((v1).capacity < (v0).size) { \
kv_resize(v1, (v0).size); \
} \
(v1).size = (v0).size; \
memcpy((v1).items, (v0).items, sizeof((v1).items[0]) * (v0).size); \
} while (0)
#define kv_pushp(v) \
((((v).size == (v).capacity) ? (kv_resize_full(v), 0) : 0), \
((v).items + ((v).size++)))
((((v).size == (v).capacity) ? (kv_resize_full(v), 0) : 0), \
((v).items + ((v).size++)))
#define kv_push(v, x) \
(*kv_pushp(v) = (x))
(*kv_pushp(v) = (x))
#define kv_a(v, i) \
(*(((v).capacity <= (size_t) (i) \
(*(((v).capacity <= (size_t)(i) \
? ((v).capacity = (v).size = (i) + 1, \
kv_roundup32((v).capacity), \
kv_resize((v), (v).capacity), 0UL) \
: ((v).size <= (size_t) (i) \
: ((v).size <= (size_t)(i) \
? (v).size = (i) + 1 \
: 0UL)), \
&(v).items[(i)]))
@ -122,24 +120,23 @@
/// @param[in] type Type of vector elements.
/// @param[in] init_size Number of the elements in the initial array.
#define kvec_withinit_t(type, INIT_SIZE) \
struct { \
size_t size; \
size_t capacity; \
type *items; \
type init_array[INIT_SIZE]; \
}
struct { \
size_t size; \
size_t capacity; \
type *items; \
type init_array[INIT_SIZE]; \
}
/// Initialize vector with preallocated array
///
/// @param[out] v Vector to initialize.
#define kvi_init(v) \
((v).capacity = ARRAY_SIZE((v).init_array), \
(v).size = 0, \
(v).items = (v).init_array)
((v).capacity = ARRAY_SIZE((v).init_array), \
(v).size = 0, \
(v).items = (v).init_array)
/// Move data to a new destination and free source
static inline void *_memcpy_free(void *const restrict dest,
void *const restrict src,
static inline void *_memcpy_free(void *const restrict dest, void *const restrict src,
const size_t size)
FUNC_ATTR_NONNULL_ALL FUNC_ATTR_NONNULL_RET FUNC_ATTR_ALWAYS_INLINE
{
@ -158,15 +155,15 @@ static inline void *_memcpy_free(void *const restrict dest,
/// @param[out] v Vector to resize.
/// @param[in] s New size.
#define kvi_resize(v, s) \
((v).capacity = ((s) > ARRAY_SIZE((v).init_array) \
((v).capacity = ((s) > ARRAY_SIZE((v).init_array) \
? (s) \
: ARRAY_SIZE((v).init_array)), \
(v).items = ((v).capacity == ARRAY_SIZE((v).init_array) \
(v).items = ((v).capacity == ARRAY_SIZE((v).init_array) \
? ((v).items == (v).init_array \
? (v).items \
: _memcpy_free((v).init_array, (v).items, \
(v).size * sizeof((v).items[0]))) \
: ((v).items == (v).init_array \
: ((v).items == (v).init_array \
? memcpy(xmalloc((v).capacity * sizeof((v).items[0])), \
(v).items, \
(v).size * sizeof((v).items[0])) \
@ -177,13 +174,13 @@ static inline void *_memcpy_free(void *const restrict dest,
///
/// @param[out] v Vector to resize.
#define kvi_resize_full(v) \
/* ARRAY_SIZE((v).init_array) is the minimal capacity of this vector. */ \
/* Thus when vector is full capacity may not be zero and it is safe */ \
/* not to bother with checking whether (v).capacity is 0. But now */ \
/* capacity is not guaranteed to have size that is a power of 2, it is */ \
/* hard to fix this here and is not very necessary if users will use */ \
/* 2^x initial array size. */ \
kvi_resize(v, (v).capacity << 1)
/* ARRAY_SIZE((v).init_array) is the minimal capacity of this vector. */ \
/* Thus when vector is full capacity may not be zero and it is safe */ \
/* not to bother with checking whether (v).capacity is 0. But now */ \
/* capacity is not guaranteed to have size that is a power of 2, it is */ \
/* hard to fix this here and is not very necessary if users will use */ \
/* 2^x initial array size. */ \
kvi_resize(v, (v).capacity << 1)
/// Get location where to store new element to a vector with preallocated array
///
@ -191,24 +188,24 @@ static inline void *_memcpy_free(void *const restrict dest,
///
/// @return Pointer to the place where new value should be stored.
#define kvi_pushp(v) \
((((v).size == (v).capacity) ? (kvi_resize_full(v), 0) : 0), \
((v).items + ((v).size++)))
((((v).size == (v).capacity) ? (kvi_resize_full(v), 0) : 0), \
((v).items + ((v).size++)))
/// Push value to a vector with preallocated array
///
/// @param[out] v Vector to push to.
/// @param[in] x Value to push.
#define kvi_push(v, x) \
(*kvi_pushp(v) = (x))
(*kvi_pushp(v) = (x))
/// Free array of elements of a vector with preallocated array if needed
///
/// @param[out] v Vector to free.
#define kvi_destroy(v) \
do { \
if ((v).items != (v).init_array) { \
XFREE_CLEAR((v).items); \
} \
} while (0)
do { \
if ((v).items != (v).init_array) { \
XFREE_CLEAR((v).items); \
} \
} while (0)
#endif // NVIM_LIB_KVEC_H

View File

@ -1,5 +1,3 @@
// uncrustify:off
// Queue implemented by circularly-linked list.
//
// Adapted from libuv. Simpler and more efficient than klist.h for implementing
@ -43,7 +41,7 @@ typedef struct _queue {
while((q) != (h)) { \
QUEUE *next = q->next; \
code \
(q) = next; \
(q) = next; \
}

View File

@ -1,5 +1,3 @@
// uncrustify:off
/// Macros-based ring buffer implementation.
///
/// Supported functions:
@ -17,24 +15,29 @@
#ifndef NVIM_LIB_RINGBUF_H
#define NVIM_LIB_RINGBUF_H
#include <stddef.h>
#include <string.h>
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include "nvim/memory.h"
#include "nvim/func_attr.h"
#include "nvim/memory.h"
#define _RINGBUF_LENGTH(rb) \
((rb)->first == NULL ? 0 \
: ((rb)->next == (rb)->first) ? (size_t) ((rb)->buf_end - (rb)->buf) + 1 \
: ((rb)->next > (rb)->first) ? (size_t) ((rb)->next - (rb)->first) \
: (size_t) ((rb)->next - (rb)->buf + (rb)->buf_end - (rb)->first + 1))
((rb)->first == NULL ? 0 \
: ((rb)->next == (rb)->first) ? (size_t)((rb)->buf_end - (rb)->buf) + 1 \
: ((rb)->next > \
(rb)->first) ? (size_t)((rb)->next - \
(rb)->first) \
: (size_t)((rb)->\
next - (rb)->buf + \
(rb)->buf_end - \
(rb)->first + 1))
#define _RINGBUF_NEXT(rb, var) \
((var) == (rb)->buf_end ? (rb)->buf : (var) + 1)
((var) == (rb)->buf_end ? (rb)->buf : (var) + 1)
#define _RINGBUF_PREV(rb, var) \
((var) == (rb)->buf ? (rb)->buf_end : (var) - 1)
((var) == (rb)->buf ? (rb)->buf_end : (var) - 1)
/// Iterate over all ringbuf values
///
@ -42,11 +45,11 @@
/// @param RBType Type of the ring buffer element.
/// @param varname Variable name.
#define RINGBUF_FORALL(rb, RBType, varname) \
size_t varname##_length_fa_ = _RINGBUF_LENGTH(rb); \
for (RBType *varname = ((rb)->first == NULL ? (rb)->next : (rb)->first); \
varname##_length_fa_; \
(varname = _RINGBUF_NEXT(rb, varname)), \
varname##_length_fa_--)
size_t varname##_length_fa_ = _RINGBUF_LENGTH(rb); \
for (RBType *varname = ((rb)->first == NULL ? (rb)->next : (rb)->first); \
varname##_length_fa_; \
(varname = _RINGBUF_NEXT(rb, varname)), \
varname##_length_fa_--)
/// Iterate over all ringbuf values, from end to the beginning
///
@ -57,11 +60,11 @@
/// @param RBType Type of the ring buffer element.
/// @param varname Variable name.
#define RINGBUF_ITER_BACK(rb, RBType, varname) \
size_t varname##_length_ib_ = _RINGBUF_LENGTH(rb); \
for (varname = ((rb)->next == (rb)->buf ? (rb)->buf_end : (rb)->next - 1); \
varname##_length_ib_; \
(varname = _RINGBUF_PREV(rb, varname)), \
varname##_length_ib_--)
size_t varname##_length_ib_ = _RINGBUF_LENGTH(rb); \
for (varname = ((rb)->next == (rb)->buf ? (rb)->buf_end : (rb)->next - 1); \
varname##_length_ib_; \
(varname = _RINGBUF_PREV(rb, varname)), \
varname##_length_ib_--)
/// Define a ring buffer structure
///
@ -69,12 +72,12 @@
/// `{TypeName}RingBuffer`.
/// @param RBType Type of the single ring buffer element.
#define RINGBUF_TYPEDEF(TypeName, RBType) \
typedef struct { \
RBType *buf; \
RBType *next; \
RBType *first; \
RBType *buf_end; \
} TypeName##RingBuffer;
typedef struct { \
RBType *buf; \
RBType *next; \
RBType *first; \
RBType *buf_end; \
} TypeName##RingBuffer;
/// Dummy item free macros, for use in RINGBUF_INIT
///
@ -94,13 +97,13 @@ typedef struct { \
/// @param varname Variable name.
/// @param rbsize Ring buffer size.
#define RINGBUF_STATIC(scope, TypeName, RBType, varname, rbsize) \
static RBType _##varname##_buf[rbsize]; \
scope TypeName##RingBuffer varname = { \
.buf = _##varname##_buf, \
.next = _##varname##_buf, \
.first = NULL, \
.buf_end = _##varname##_buf + rbsize - 1, \
};
static RBType _##varname##_buf[rbsize]; \
scope TypeName##RingBuffer varname = { \
.buf = _##varname##_buf, \
.next = _##varname##_buf, \
.first = NULL, \
.buf_end = _##varname##_buf + rbsize - 1, \
};
/// Initialize a new ring buffer
///
@ -114,195 +117,191 @@ scope TypeName##RingBuffer varname = { \
///
/// Intended function signature: `void *rbfree(RBType *)`;
#define RINGBUF_INIT(TypeName, funcprefix, RBType, rbfree) \
static inline TypeName##RingBuffer funcprefix##_rb_new(const size_t size) \
static inline TypeName##RingBuffer funcprefix##_rb_new(const size_t size) \
REAL_FATTR_WARN_UNUSED_RESULT; \
static inline TypeName##RingBuffer funcprefix##_rb_new(const size_t size) \
{ \
assert(size != 0); \
RBType *buf = xmalloc(size * sizeof(RBType)); \
return (TypeName##RingBuffer) { \
.buf = buf, \
.next = buf, \
.first = NULL, \
.buf_end = buf + size - 1, \
}; \
} \
\
static inline void funcprefix##_rb_free(TypeName##RingBuffer *const rb) \
REAL_FATTR_UNUSED; \
static inline void funcprefix##_rb_free(TypeName##RingBuffer *const rb) \
{ \
if (rb == NULL) { \
return; \
static inline TypeName##RingBuffer funcprefix##_rb_new(const size_t size) \
{ \
assert(size != 0); \
RBType *buf = xmalloc(size * sizeof(RBType)); \
return (TypeName##RingBuffer) { \
.buf = buf, \
.next = buf, \
.first = NULL, \
.buf_end = buf + size - 1, \
}; \
} \
RINGBUF_FORALL(rb, RBType, rbitem) { \
rbfree(rbitem); \
} \
XFREE_CLEAR(rb->buf); \
} \
\
static inline void funcprefix##_rb_dealloc(TypeName##RingBuffer *const rb) \
static inline void funcprefix##_rb_free(TypeName##RingBuffer *const rb) \
REAL_FATTR_UNUSED; \
static inline void funcprefix##_rb_dealloc(TypeName##RingBuffer *const rb) \
{ \
XFREE_CLEAR(rb->buf); \
} \
static inline void funcprefix##_rb_free(TypeName##RingBuffer *const rb) \
{ \
if (rb == NULL) { \
return; \
} \
RINGBUF_FORALL(rb, RBType, rbitem) { \
rbfree(rbitem); \
} \
XFREE_CLEAR(rb->buf); \
} \
\
static inline void funcprefix##_rb_push(TypeName##RingBuffer *const rb, \
RBType item) \
static inline void funcprefix##_rb_dealloc(TypeName##RingBuffer *const rb) \
REAL_FATTR_UNUSED; \
static inline void funcprefix##_rb_dealloc(TypeName##RingBuffer *const rb) \
{ \
XFREE_CLEAR(rb->buf); \
} \
\
static inline void funcprefix##_rb_push(TypeName##RingBuffer *const rb, \
RBType item) \
REAL_FATTR_NONNULL_ARG(1); \
static inline void funcprefix##_rb_push(TypeName##RingBuffer *const rb, \
RBType item) \
{ \
if (rb->next == rb->first) { \
rbfree(rb->first); \
rb->first = _RINGBUF_NEXT(rb, rb->first); \
} else if (rb->first == NULL) { \
rb->first = rb->next; \
} \
*rb->next = item; \
rb->next = _RINGBUF_NEXT(rb, rb->next); \
} \
\
static inline ptrdiff_t funcprefix##_rb_find_idx( \
const TypeName##RingBuffer *const rb, const RBType *const item_p) \
REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE REAL_FATTR_UNUSED; \
static inline ptrdiff_t funcprefix##_rb_find_idx( \
const TypeName##RingBuffer *const rb, const RBType *const item_p) \
{ \
assert(rb->buf <= item_p); \
assert(rb->buf_end >= item_p); \
if (rb->first == NULL) { \
return -1; \
} else if (item_p >= rb->first) { \
return item_p - rb->first; \
} else { \
return item_p - rb->buf + rb->buf_end - rb->first + 1; \
} \
} \
\
static inline size_t funcprefix##_rb_size( \
const TypeName##RingBuffer *const rb) \
REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE; \
static inline size_t funcprefix##_rb_size( \
const TypeName##RingBuffer *const rb) \
{ \
return (size_t) (rb->buf_end - rb->buf) + 1; \
} \
\
static inline size_t funcprefix##_rb_length( \
const TypeName##RingBuffer *const rb) \
REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE; \
static inline size_t funcprefix##_rb_length( \
const TypeName##RingBuffer *const rb) \
{ \
return _RINGBUF_LENGTH(rb); \
} \
\
static inline RBType *funcprefix##_rb_idx_p( \
const TypeName##RingBuffer *const rb, const size_t idx) \
REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE; \
static inline RBType *funcprefix##_rb_idx_p( \
const TypeName##RingBuffer *const rb, const size_t idx) \
{ \
assert(idx <= funcprefix##_rb_size(rb)); \
assert(idx <= funcprefix##_rb_length(rb)); \
if (rb->first + idx > rb->buf_end) { \
return rb->buf + ((rb->first + idx) - (rb->buf_end + 1)); \
} else { \
return rb->first + idx; \
} \
} \
\
static inline RBType funcprefix##_rb_idx(const TypeName##RingBuffer *const rb, \
const size_t idx) \
REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE REAL_FATTR_UNUSED; \
static inline RBType funcprefix##_rb_idx(const TypeName##RingBuffer *const rb, \
const size_t idx) \
{ \
return *funcprefix##_rb_idx_p(rb, idx); \
} \
\
static inline void funcprefix##_rb_insert(TypeName##RingBuffer *const rb, \
const size_t idx, \
static inline void funcprefix##_rb_push(TypeName##RingBuffer *const rb, \
RBType item) \
REAL_FATTR_NONNULL_ARG(1) REAL_FATTR_UNUSED; \
static inline void funcprefix##_rb_insert(TypeName##RingBuffer *const rb, \
const size_t idx, \
RBType item) \
{ \
assert(idx <= funcprefix##_rb_size(rb)); \
assert(idx <= funcprefix##_rb_length(rb)); \
const size_t length = funcprefix##_rb_length(rb); \
if (idx == length) { \
funcprefix##_rb_push(rb, item); \
return; \
{ \
if (rb->next == rb->first) { \
rbfree(rb->first); \
rb->first = _RINGBUF_NEXT(rb, rb->first); \
} else if (rb->first == NULL) { \
rb->first = rb->next; \
} \
*rb->next = item; \
rb->next = _RINGBUF_NEXT(rb, rb->next); \
} \
RBType *const insertpos = funcprefix##_rb_idx_p(rb, idx); \
if (insertpos == rb->next) { \
funcprefix##_rb_push(rb, item); \
return; \
} \
if (length == funcprefix##_rb_size(rb)) { \
rbfree(rb->first); \
} \
if (insertpos < rb->next) { \
memmove(insertpos + 1, insertpos, \
(size_t) ((uintptr_t) rb->next - (uintptr_t) insertpos)); \
} else { \
assert(insertpos > rb->first); \
assert(rb->next <= rb->first); \
memmove(rb->buf + 1, rb->buf, \
(size_t) ((uintptr_t) rb->next - (uintptr_t) rb->buf)); \
*rb->buf = *rb->buf_end; \
memmove(insertpos + 1, insertpos, \
(size_t) ((uintptr_t) (rb->buf_end + 1) - (uintptr_t) insertpos)); \
} \
*insertpos = item; \
if (length == funcprefix##_rb_size(rb)) { \
rb->first = _RINGBUF_NEXT(rb, rb->first); \
} \
rb->next = _RINGBUF_NEXT(rb, rb->next); \
} \
\
static inline void funcprefix##_rb_remove(TypeName##RingBuffer *const rb, \
const size_t idx) \
REAL_FATTR_NONNULL_ARG(1) REAL_FATTR_UNUSED; \
static inline void funcprefix##_rb_remove(TypeName##RingBuffer *const rb, \
const size_t idx) \
{ \
assert(idx < funcprefix##_rb_size(rb)); \
assert(idx < funcprefix##_rb_length(rb)); \
RBType *const rmpos = funcprefix##_rb_idx_p(rb, idx); \
rbfree(rmpos); \
if (rmpos == rb->next - 1) { \
rb->next--; \
if (rb->first == rb->next) { \
rb->first = NULL; \
rb->next = rb->buf; \
static inline ptrdiff_t funcprefix##_rb_find_idx(const TypeName##RingBuffer *const rb, \
const RBType *const item_p) \
REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE REAL_FATTR_UNUSED; \
static inline ptrdiff_t funcprefix##_rb_find_idx(const TypeName##RingBuffer *const rb, \
const RBType *const item_p) \
{ \
assert(rb->buf <= item_p); \
assert(rb->buf_end >= item_p); \
if (rb->first == NULL) { \
return -1; \
} else if (item_p >= rb->first) { \
return item_p - rb->first; \
} else { \
return item_p - rb->buf + rb->buf_end - rb->first + 1; \
} \
} else if (rmpos == rb->first) { \
rb->first = _RINGBUF_NEXT(rb, rb->first); \
if (rb->first == rb->next) { \
rb->first = NULL; \
rb->next = rb->buf; \
} \
} else if (rb->first < rb->next || rb->next == rb->buf) { \
assert(rmpos > rb->first); \
assert(rmpos <= _RINGBUF_PREV(rb, rb->next)); \
memmove(rb->first + 1, rb->first, \
(size_t) ((uintptr_t) rmpos - (uintptr_t) rb->first)); \
rb->first = _RINGBUF_NEXT(rb, rb->first); \
} else if (rmpos < rb->next) { \
memmove(rmpos, rmpos + 1, \
(size_t) ((uintptr_t) rb->next - (uintptr_t) rmpos)); \
rb->next = _RINGBUF_PREV(rb, rb->next); \
} else { \
assert(rb->first < rb->buf_end); \
memmove(rb->first + 1, rb->first, \
(size_t) ((uintptr_t) rmpos - (uintptr_t) rb->first)); \
rb->first = _RINGBUF_NEXT(rb, rb->first); \
} \
}
\
static inline size_t funcprefix##_rb_size(const TypeName##RingBuffer *const rb) \
REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE; \
static inline size_t funcprefix##_rb_size(const TypeName##RingBuffer *const rb) \
{ \
return (size_t)(rb->buf_end - rb->buf) + 1; \
} \
\
static inline size_t funcprefix##_rb_length(const TypeName##RingBuffer *const rb) \
REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE; \
static inline size_t funcprefix##_rb_length(const TypeName##RingBuffer *const rb) \
{ \
return _RINGBUF_LENGTH(rb); \
} \
\
static inline RBType *funcprefix##_rb_idx_p(const TypeName##RingBuffer *const rb, \
const size_t idx) \
REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE; \
static inline RBType *funcprefix##_rb_idx_p(const TypeName##RingBuffer *const rb, \
const size_t idx) \
{ \
assert(idx <= funcprefix##_rb_size(rb)); \
assert(idx <= funcprefix##_rb_length(rb)); \
if (rb->first + idx > rb->buf_end) { \
return rb->buf + ((rb->first + idx) - (rb->buf_end + 1)); \
} else { \
return rb->first + idx; \
} \
} \
\
static inline RBType funcprefix##_rb_idx(const TypeName##RingBuffer *const rb, \
const size_t idx) \
REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE REAL_FATTR_UNUSED; \
static inline RBType funcprefix##_rb_idx(const TypeName##RingBuffer *const rb, \
const size_t idx) \
{ \
return *funcprefix##_rb_idx_p(rb, idx); \
} \
\
static inline void funcprefix##_rb_insert(TypeName##RingBuffer *const rb, \
const size_t idx, \
RBType item) \
REAL_FATTR_NONNULL_ARG(1) REAL_FATTR_UNUSED; \
static inline void funcprefix##_rb_insert(TypeName##RingBuffer *const rb, \
const size_t idx, \
RBType item) \
{ \
assert(idx <= funcprefix##_rb_size(rb)); \
assert(idx <= funcprefix##_rb_length(rb)); \
const size_t length = funcprefix##_rb_length(rb); \
if (idx == length) { \
funcprefix##_rb_push(rb, item); \
return; \
} \
RBType *const insertpos = funcprefix##_rb_idx_p(rb, idx); \
if (insertpos == rb->next) { \
funcprefix##_rb_push(rb, item); \
return; \
} \
if (length == funcprefix##_rb_size(rb)) { \
rbfree(rb->first); \
} \
if (insertpos < rb->next) { \
memmove(insertpos + 1, insertpos, \
(size_t)((uintptr_t)rb->next - (uintptr_t)insertpos)); \
} else { \
assert(insertpos > rb->first); \
assert(rb->next <= rb->first); \
memmove(rb->buf + 1, rb->buf, \
(size_t)((uintptr_t)rb->next - (uintptr_t)rb->buf)); \
*rb->buf = *rb->buf_end; \
memmove(insertpos + 1, insertpos, \
(size_t)((uintptr_t)(rb->buf_end + 1) - (uintptr_t)insertpos)); \
} \
*insertpos = item; \
if (length == funcprefix##_rb_size(rb)) { \
rb->first = _RINGBUF_NEXT(rb, rb->first); \
} \
rb->next = _RINGBUF_NEXT(rb, rb->next); \
} \
\
static inline void funcprefix##_rb_remove(TypeName##RingBuffer *const rb, \
const size_t idx) \
REAL_FATTR_NONNULL_ARG(1) REAL_FATTR_UNUSED; \
static inline void funcprefix##_rb_remove(TypeName##RingBuffer *const rb, \
const size_t idx) \
{ \
assert(idx < funcprefix##_rb_size(rb)); \
assert(idx < funcprefix##_rb_length(rb)); \
RBType *const rmpos = funcprefix##_rb_idx_p(rb, idx); \
rbfree(rmpos); \
if (rmpos == rb->next - 1) { \
rb->next--; \
if (rb->first == rb->next) { \
rb->first = NULL; \
rb->next = rb->buf; \
} \
} else if (rmpos == rb->first) { \
rb->first = _RINGBUF_NEXT(rb, rb->first); \
if (rb->first == rb->next) { \
rb->first = NULL; \
rb->next = rb->buf; \
} \
} else if (rb->first < rb->next || rb->next == rb->buf) { \
assert(rmpos > rb->first); \
assert(rmpos <= _RINGBUF_PREV(rb, rb->next)); \
memmove(rb->first + 1, rb->first, \
(size_t)((uintptr_t)rmpos - (uintptr_t)rb->first)); \
rb->first = _RINGBUF_NEXT(rb, rb->first); \
} else if (rmpos < rb->next) { \
memmove(rmpos, rmpos + 1, \
(size_t)((uintptr_t)rb->next - (uintptr_t)rmpos)); \
rb->next = _RINGBUF_PREV(rb, rb->next); \
} else { \
assert(rb->first < rb->buf_end); \
memmove(rb->first + 1, rb->first, \
(size_t)((uintptr_t)rmpos - (uintptr_t)rb->first)); \
rb->first = _RINGBUF_NEXT(rb, rb->first); \
} \
}
#endif // NVIM_LIB_RINGBUF_H