From 28a549d597a286330ba87ff4fffe1e2d09e0f611 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Linse?= Date: Sun, 28 Aug 2016 13:26:01 +0200 Subject: [PATCH] kbtree: make warning free and delete deprecated macros --- src/clint.py | 2 + src/nvim/buffer.c | 23 ++++++----- src/nvim/bufhl_defs.h | 3 +- src/nvim/lib/kbtree.h | 95 ++++++++++++++++--------------------------- 4 files changed, 53 insertions(+), 70 deletions(-) diff --git a/src/clint.py b/src/clint.py index e6bedd37dc..69a061d2ab 100755 --- a/src/clint.py +++ b/src/clint.py @@ -2529,6 +2529,8 @@ def CheckSpacing(filename, clean_lines, linenum, nesting_state, error): r'(?line = line; - // p->items zero initialized + BufhlLine *p = xmalloc(sizeof(*p)); + *p = (BufhlLine)BUFHLLINE_INIT(line); kb_put(bufhl, b, p); return p; } @@ -5231,7 +5234,7 @@ void bufhl_clear_line_range(buf_T *buf, linenr_T first_changed = MAXLNUM, last_changed = -1; kbitr_t(bufhl) itr; - BufhlLine *l, t = {line_start}; + BufhlLine *l, t = BUFHLLINE_INIT(line_start); if (!kb_itr_get(bufhl, &buf->b_bufhl_info, &t, &itr)) { kb_itr_next(bufhl, &buf->b_bufhl_info, &itr); } @@ -5313,7 +5316,7 @@ void bufhl_mark_adjust(buf_T* buf, // we need to detect this case and kbitr_t(bufhl) itr; - BufhlLine *l, t = {line1}; + BufhlLine *l, t = BUFHLLINE_INIT(line1); if (!kb_itr_get(bufhl, &buf->b_bufhl_info, &t, &itr)) { kb_itr_next(bufhl, &buf->b_bufhl_info, &itr); } diff --git a/src/nvim/bufhl_defs.h b/src/nvim/bufhl_defs.h index 0d2f9c90d0..ddf048dd82 100644 --- a/src/nvim/bufhl_defs.h +++ b/src/nvim/bufhl_defs.h @@ -21,6 +21,7 @@ typedef struct { linenr_T line; bufhl_vec_T items; } BufhlLine; +#define BUFHLLINE_INIT(l) { l, KV_INITIAL_VALUE } typedef struct { bufhl_vec_T entries; @@ -28,7 +29,7 @@ typedef struct { colnr_T valid_to; } bufhl_lineinfo_T; -#define BUFHL_CMP(a, b) (((a)->line - (b)->line)) +#define BUFHL_CMP(a, b) ((int)(((a)->line - (b)->line))) KBTREE_INIT(bufhl, BufhlLine *, BUFHL_CMP, 10) typedef kbtree_t(bufhl) bufhl_info_T; #endif // NVIM_BUFHL_DEFS_H diff --git a/src/nvim/lib/kbtree.h b/src/nvim/lib/kbtree.h index d72c1d5690..f836dec48f 100644 --- a/src/nvim/lib/kbtree.h +++ b/src/nvim/lib/kbtree.h @@ -45,7 +45,7 @@ int32_t n; \ bool is_internal; \ key_t key[2*T-1]; \ - kbnode_##name##_t *ptr[0]; \ + kbnode_##name##_t *ptr[]; \ } ; \ \ typedef struct { \ @@ -88,7 +88,7 @@ } while (0) #define __KB_GET_AUX1(name, key_t, kbnode_t, __cmp) \ - static inline int __kb_getp_aux_##name(const kbnode_t * __restrict x, const key_t * __restrict k, int *r) \ + static inline int __kb_getp_aux_##name(const kbnode_t * __restrict x, key_t * __restrict k, int *r) \ { \ int tr, *rr, begin = 0, end = x->n; \ if (x->n == 0) return -1; \ @@ -104,7 +104,7 @@ } #define __KB_GET(name, key_t, kbnode_t) \ - static key_t *kb_getp_##name(kbtree_##name##_t *b, const key_t * __restrict k) \ + static key_t *kb_getp_##name(kbtree_##name##_t *b, key_t * __restrict k) \ { \ if (!b->root) { \ return 0; \ @@ -119,13 +119,13 @@ } \ return 0; \ } \ - static inline key_t *kb_get_##name(kbtree_##name##_t *b, const key_t k) \ + static inline key_t *kb_get_##name(kbtree_##name##_t *b, key_t k) \ { \ return kb_getp_##name(b, &k); \ } #define __KB_INTERVAL(name, key_t, kbnode_t) \ - static void kb_intervalp_##name(kbtree_##name##_t *b, const key_t * __restrict k, key_t **lower, key_t **upper) \ + static inline void kb_intervalp_##name(kbtree_##name##_t *b, key_t * __restrict k, key_t **lower, key_t **upper) \ { \ if (!b->root) { \ return; \ @@ -145,14 +145,14 @@ x = __KB_PTR(b, x)[i + 1]; \ } \ } \ - static inline void kb_interval_##name(kbtree_##name##_t *b, const key_t k, key_t **lower, key_t **upper) \ + static inline void kb_interval_##name(kbtree_##name##_t *b, key_t k, key_t **lower, key_t **upper) \ { \ kb_intervalp_##name(b, &k, lower, upper); \ } #define __KB_PUT(name, key_t, kbnode_t, __cmp, T, ILEN) \ /* x must be an internal node */ \ - static void __kb_split_##name(kbtree_##name##_t *b, kbnode_t *x, int i, kbnode_t *y) \ + static inline void __kb_split_##name(kbtree_##name##_t *b, kbnode_t *x, int i, kbnode_t *y) \ { \ kbnode_t *z; \ z = (kbnode_t*)xcalloc(1, y->is_internal? ILEN : sizeof(kbnode_##name##_t)); \ @@ -168,7 +168,7 @@ __KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[T - 1]; \ ++x->n; \ } \ - static key_t *__kb_putp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, const key_t * __restrict k) \ + static inline key_t *__kb_putp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, key_t * __restrict k) \ { \ int i = x->n - 1; \ key_t *ret; \ @@ -189,7 +189,7 @@ } \ return ret; \ } \ - static key_t *kb_putp_##name(kbtree_##name##_t *b, const key_t * __restrict k) \ + static inline key_t *kb_putp_##name(kbtree_##name##_t *b, key_t * __restrict k) \ { \ if (!b->root) { \ b->root = (kbnode_t*)xcalloc(1, ILEN); \ @@ -208,14 +208,14 @@ } \ return __kb_putp_aux_##name(b, r, k); \ } \ - static inline void kb_put_##name(kbtree_##name##_t *b, const key_t k) \ + static inline void kb_put_##name(kbtree_##name##_t *b, key_t k) \ { \ kb_putp_##name(b, &k); \ } #define __KB_DEL(name, key_t, kbnode_t, T) \ - static key_t __kb_delp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, const key_t * __restrict k, int s) \ + static inline key_t __kb_delp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, key_t * __restrict k, int s) \ { \ int yn, zn, i, r = 0; \ kbnode_t *xp, *y, *z; \ @@ -295,7 +295,7 @@ } \ return __kb_delp_aux_##name(b, xp, k, s); \ } \ - static key_t kb_delp_##name(kbtree_##name##_t *b, const key_t * __restrict k) \ + static inline key_t kb_delp_##name(kbtree_##name##_t *b, key_t * __restrict k) \ { \ kbnode_t *x; \ key_t ret; \ @@ -309,7 +309,7 @@ } \ return ret; \ } \ - static inline key_t kb_del_##name(kbtree_##name##_t *b, const key_t k) \ + static inline key_t kb_del_##name(kbtree_##name##_t *b, key_t k) \ { \ return kb_delp_##name(b, &k); \ } @@ -357,7 +357,7 @@ if (itr->p->x && itr->p->i >= 0) return 1; \ } \ } \ - static int kb_itr_getp_##name(kbtree_##name##_t *b, const key_t * __restrict k, kbitr_##name##_t *itr) \ + static inline int kb_itr_getp_##name(kbtree_##name##_t *b, key_t * __restrict k, kbitr_##name##_t *itr) \ { \ if (b->n_keys == 0) return 0; \ int i, r = 0; \ @@ -373,13 +373,13 @@ } \ return 0; \ } \ - static int kb_itr_get_##name(kbtree_##name##_t *b, const key_t k, kbitr_##name##_t *itr) \ + static inline int kb_itr_get_##name(kbtree_##name##_t *b, key_t k, kbitr_##name##_t *itr) \ { \ return kb_itr_getp_##name(b,&k,itr); \ } \ static inline void kb_del_itr_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \ { \ - const key_t k = kb_itr_key(itr); \ + key_t k = kb_itr_key(itr); \ kb_delp_##name(b, &k); \ kb_itr_getp_##name(b, &k, itr); \ } @@ -387,15 +387,34 @@ #define KBTREE_INIT(name, key_t, __cmp, T) \ KBTREE_INIT_IMPL(name, key_t, kbnode_##name##_t, __cmp, T, (sizeof(kbnode_##name##_t)+(2*T)*sizeof(void *))) +#if (!defined(__clang__) && !defined(__INTEL_COMPILER)) && (__GNUC__ > 4 ) + +// The index trickery shouldn't be UB anymore, +// still it is to much for gcc:s -Werror=array-bounds +# define __KB_PRAGMA_START \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Warray-bounds\"") + +# define __KB_PRAGMA_END \ + _Pragma("GCC diagnostic pop") \ + +#else + +# define __KB_PRAGMA_START +# define __KB_PRAGMA_END + +#endif #define KBTREE_INIT_IMPL(name, key_t, kbnode_t, __cmp, T, ILEN) \ + __KB_PRAGMA_START \ __KB_TREE_T(name, key_t, T) \ __KB_GET_AUX1(name, key_t, kbnode_t, __cmp) \ __KB_GET(name, key_t, kbnode_t) \ __KB_INTERVAL(name, key_t, kbnode_t) \ __KB_PUT(name, key_t, kbnode_t, __cmp, T, ILEN) \ __KB_DEL(name, key_t, kbnode_t, T) \ - __KB_ITR(name, key_t, kbnode_t) + __KB_ITR(name, key_t, kbnode_t) \ + __KB_PRAGMA_END #define KB_DEFAULT_SIZE 512 @@ -425,46 +444,4 @@ #define kb_generic_cmp(a, b) (((b) < (a)) - ((a) < (b))) #define kb_str_cmp(a, b) strcmp(a, b) -/* The following is *DEPRECATED*!!! Use the iterator interface instead! */ - -#if 0 - -typedef struct { - kbnode_t *x; - int i; -} __kbstack_t; - -#define __kb_traverse(key_t, b, __func) do { \ - int __kmax = 8; \ - __kbstack_t *__kstack, *__kp; \ - __kp = __kstack = (__kbstack_t*)calloc(__kmax, sizeof(__kbstack_t)); \ - __kp->x = (b)->root; __kp->i = 0; \ - for (;;) { \ - while (__kp->x && __kp->i <= __kp->x->n) { \ - if (__kp - __kstack == __kmax - 1) { \ - __kmax <<= 1; \ - __kstack = (__kbstack_t*)realloc(__kstack, __kmax * sizeof(__kbstack_t)); \ - __kp = __kstack + (__kmax>>1) - 1; \ - } \ - (__kp+1)->i = 0; (__kp+1)->x = __kp->x->is_internal? __KB_PTR(b, __kp->x)[__kp->i] : 0; \ - ++__kp; \ - } \ - --__kp; \ - if (__kp >= __kstack) { \ - if (__kp->x && __kp->i < __kp->x->n) __func(&__KB_KEY(key_t, __kp->x)[__kp->i]); \ - ++__kp->i; \ - } else break; \ - } \ - free(__kstack); \ - } while (0) - -#define __kb_get_first(key_t, b, ret) do { \ - kbnode_t *__x = (b)->root; \ - while (__KB_PTR(b, __x)[0] != 0) \ - __x = __KB_PTR(b, __x)[0]; \ - (ret) = __KB_KEY(key_t, __x)[0]; \ - } while (0) - -#endif - #endif // NVIM_LIB_KBTREE_H