aa1039e73c
inetpeer currently uses an AVL tree protected by an rwlock. It's possible to make most lookups use RCU 1) Add a struct rcu_head to struct inet_peer 2) add a lookup_rcu_bh() helper to perform lockless and opportunistic lookup. This is a normal function, not a macro like lookup(). 3) Add a limit to number of links followed by lookup_rcu_bh(). This is needed in case we fall in a loop. 4) add an smp_wmb() in link_to_pool() right before node insert. 5) make unlink_from_pool() use atomic_cmpxchg() to make sure it can take last reference to an inet_peer, since lockless readers could increase refcount, even while we hold peers.lock. 6) Delay struct inet_peer freeing after rcu grace period so that lookup_rcu_bh() cannot crash. 7) inet_getpeer() first attempts lockless lookup. Note this lookup can fail even if target is in AVL tree, but a concurrent writer can let tree in a non correct form. If this attemps fails, lock is taken a regular lookup is performed again. 8) convert peers.lock from rwlock to a spinlock 9) Remove SLAB_HWCACHE_ALIGN when peer_cachep is created, because rcu_head adds 16 bytes on 64bit arches, doubling effective size (64 -> 128 bytes) In a future patch, this is probably possible to revert this part, if rcu field is put in an union to share space with rid, ip_id_count, tcp_ts & tcp_ts_stamp. These fields being manipulated only with refcnt > 0. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
48 lines
1.2 KiB
C
48 lines
1.2 KiB
C
/*
|
|
* INETPEER - A storage for permanent information about peers
|
|
*
|
|
* Authors: Andrey V. Savochkin <saw@msu.ru>
|
|
*/
|
|
|
|
#ifndef _NET_INETPEER_H
|
|
#define _NET_INETPEER_H
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/init.h>
|
|
#include <linux/jiffies.h>
|
|
#include <linux/spinlock.h>
|
|
#include <asm/atomic.h>
|
|
|
|
struct inet_peer {
|
|
/* group together avl_left,avl_right,v4daddr to speedup lookups */
|
|
struct inet_peer *avl_left, *avl_right;
|
|
__be32 v4daddr; /* peer's address */
|
|
__u32 avl_height;
|
|
struct list_head unused;
|
|
__u32 dtime; /* the time of last use of not
|
|
* referenced entries */
|
|
atomic_t refcnt;
|
|
atomic_t rid; /* Frag reception counter */
|
|
atomic_t ip_id_count; /* IP ID for the next packet */
|
|
__u32 tcp_ts;
|
|
__u32 tcp_ts_stamp;
|
|
struct rcu_head rcu;
|
|
};
|
|
|
|
void inet_initpeers(void) __init;
|
|
|
|
/* can be called with or without local BH being disabled */
|
|
struct inet_peer *inet_getpeer(__be32 daddr, int create);
|
|
|
|
/* can be called from BH context or outside */
|
|
extern void inet_putpeer(struct inet_peer *p);
|
|
|
|
/* can be called with or without local BH being disabled */
|
|
static inline __u16 inet_getid(struct inet_peer *p, int more)
|
|
{
|
|
more++;
|
|
return atomic_add_return(more, &p->ip_id_count) - more;
|
|
}
|
|
|
|
#endif /* _NET_INETPEER_H */
|