2015-09-21 01:34:20 -07:00
|
|
|
// Copyright (C) 2015 The Syncthing Authors.
|
|
|
|
//
|
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
2017-02-08 23:52:18 -07:00
|
|
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
2015-09-21 01:34:20 -07:00
|
|
|
|
2015-09-20 06:30:25 -07:00
|
|
|
package discover
|
|
|
|
|
|
|
|
import (
|
2015-09-29 08:40:29 -07:00
|
|
|
"sort"
|
2015-09-20 06:30:25 -07:00
|
|
|
stdsync "sync"
|
|
|
|
"time"
|
|
|
|
|
2015-09-22 10:38:46 -07:00
|
|
|
"github.com/syncthing/syncthing/lib/protocol"
|
2015-09-20 06:30:25 -07:00
|
|
|
"github.com/syncthing/syncthing/lib/sync"
|
2016-06-26 03:47:23 -07:00
|
|
|
"github.com/syncthing/syncthing/lib/util"
|
2015-09-20 06:30:25 -07:00
|
|
|
"github.com/thejerf/suture"
|
|
|
|
)
|
|
|
|
|
|
|
|
// The CachingMux aggregates results from multiple Finders. Each Finder has
|
|
|
|
// an associated cache time and negative cache time. The cache time sets how
|
2016-05-08 03:54:22 -07:00
|
|
|
// long we cache and return successful lookup results, the negative cache
|
2015-09-20 06:30:25 -07:00
|
|
|
// time sets how long we refrain from asking about the same device ID after
|
|
|
|
// receiving a negative answer. The value of zero disables caching (positive
|
|
|
|
// or negative).
|
2016-03-21 12:36:08 -07:00
|
|
|
type CachingMux interface {
|
|
|
|
FinderService
|
|
|
|
Add(finder Finder, cacheTime, negCacheTime time.Duration, priority int)
|
|
|
|
ChildErrors() map[string]error
|
|
|
|
}
|
|
|
|
|
|
|
|
type cachingMux struct {
|
2015-09-20 06:30:25 -07:00
|
|
|
*suture.Supervisor
|
|
|
|
finders []cachedFinder
|
|
|
|
caches []*cache
|
2016-01-08 16:28:13 -07:00
|
|
|
mut sync.RWMutex
|
2015-09-20 06:30:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// A cachedFinder is a Finder with associated cache timeouts.
|
|
|
|
type cachedFinder struct {
|
|
|
|
Finder
|
|
|
|
cacheTime time.Duration
|
|
|
|
negCacheTime time.Duration
|
2015-09-30 13:24:06 -07:00
|
|
|
priority int
|
|
|
|
}
|
|
|
|
|
|
|
|
// A prioritizedAddress is what we use to sort addresses returned from
|
|
|
|
// different sources with different priorities.
|
|
|
|
type prioritizedAddress struct {
|
|
|
|
priority int
|
|
|
|
addr string
|
2015-09-20 06:30:25 -07:00
|
|
|
}
|
|
|
|
|
2015-12-01 01:57:53 -07:00
|
|
|
// An error may implement cachedError, in which case it will be interrogated
|
|
|
|
// to see how long we should cache the error. This overrides the default
|
|
|
|
// negative cache time.
|
|
|
|
type cachedError interface {
|
|
|
|
CacheFor() time.Duration
|
|
|
|
}
|
|
|
|
|
2016-03-21 12:36:08 -07:00
|
|
|
func NewCachingMux() CachingMux {
|
|
|
|
return &cachingMux{
|
2015-09-20 06:30:25 -07:00
|
|
|
Supervisor: suture.NewSimple("discover.cachingMux"),
|
2016-01-08 16:28:13 -07:00
|
|
|
mut: sync.NewRWMutex(),
|
2015-09-20 06:30:25 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add registers a new Finder, with associated cache timeouts.
|
2016-03-21 12:36:08 -07:00
|
|
|
func (m *cachingMux) Add(finder Finder, cacheTime, negCacheTime time.Duration, priority int) {
|
2015-09-20 06:30:25 -07:00
|
|
|
m.mut.Lock()
|
2015-09-30 13:24:06 -07:00
|
|
|
m.finders = append(m.finders, cachedFinder{finder, cacheTime, negCacheTime, priority})
|
2015-09-20 06:30:25 -07:00
|
|
|
m.caches = append(m.caches, newCache())
|
|
|
|
m.mut.Unlock()
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
if service, ok := finder.(suture.Service); ok {
|
|
|
|
m.Supervisor.Add(service)
|
2015-09-20 06:30:25 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup attempts to resolve the device ID using any of the added Finders,
|
|
|
|
// while obeying the cache settings.
|
2016-05-04 12:38:12 -07:00
|
|
|
func (m *cachingMux) Lookup(deviceID protocol.DeviceID) (addresses []string, err error) {
|
|
|
|
var paddresses []prioritizedAddress
|
2015-09-30 13:24:06 -07:00
|
|
|
|
2016-01-08 16:28:13 -07:00
|
|
|
m.mut.RLock()
|
2015-09-20 06:30:25 -07:00
|
|
|
for i, finder := range m.finders {
|
|
|
|
if cacheEntry, ok := m.caches[i].Get(deviceID); ok {
|
|
|
|
// We have a cache entry. Lets see what it says.
|
|
|
|
|
|
|
|
if cacheEntry.found && time.Since(cacheEntry.when) < finder.cacheTime {
|
|
|
|
// It's a positive, valid entry. Use it.
|
2015-10-03 08:25:21 -07:00
|
|
|
l.Debugln("cached discovery entry for", deviceID, "at", finder)
|
|
|
|
l.Debugln(" cache:", cacheEntry)
|
2016-05-04 12:38:12 -07:00
|
|
|
for _, addr := range cacheEntry.Addresses {
|
|
|
|
paddresses = append(paddresses, prioritizedAddress{finder.priority, addr})
|
2015-09-20 06:30:25 -07:00
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2015-12-01 01:57:53 -07:00
|
|
|
valid := time.Now().Before(cacheEntry.validUntil) || time.Since(cacheEntry.when) < finder.negCacheTime
|
|
|
|
if !cacheEntry.found && valid {
|
2015-09-20 06:30:25 -07:00
|
|
|
// It's a negative, valid entry. We should not make another
|
|
|
|
// attempt right now.
|
2015-12-01 01:57:53 -07:00
|
|
|
l.Debugln("negative cache entry for", deviceID, "at", finder, "valid until", cacheEntry.when.Add(finder.negCacheTime), "or", cacheEntry.validUntil)
|
2015-09-20 06:30:25 -07:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// It's expired. Ignore and continue.
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform the actual lookup and cache the result.
|
2016-05-04 12:38:12 -07:00
|
|
|
if addrs, err := finder.Lookup(deviceID); err == nil {
|
2015-10-03 08:25:21 -07:00
|
|
|
l.Debugln("lookup for", deviceID, "at", finder)
|
2016-05-04 12:38:12 -07:00
|
|
|
l.Debugln(" addresses:", addrs)
|
|
|
|
for _, addr := range addrs {
|
|
|
|
paddresses = append(paddresses, prioritizedAddress{finder.priority, addr})
|
2015-09-20 06:30:25 -07:00
|
|
|
}
|
|
|
|
m.caches[i].Set(deviceID, CacheEntry{
|
2016-05-04 12:38:12 -07:00
|
|
|
Addresses: addrs,
|
|
|
|
when: time.Now(),
|
|
|
|
found: len(addrs) > 0,
|
2015-09-20 06:30:25 -07:00
|
|
|
})
|
2015-11-06 09:12:06 -07:00
|
|
|
} else {
|
|
|
|
// Lookup returned error, add a negative cache entry.
|
2015-12-01 01:57:53 -07:00
|
|
|
entry := CacheEntry{
|
2015-11-06 09:12:06 -07:00
|
|
|
when: time.Now(),
|
|
|
|
found: false,
|
2015-12-01 01:57:53 -07:00
|
|
|
}
|
|
|
|
if err, ok := err.(cachedError); ok {
|
|
|
|
entry.validUntil = time.Now().Add(err.CacheFor())
|
|
|
|
}
|
|
|
|
m.caches[i].Set(deviceID, entry)
|
2015-09-20 06:30:25 -07:00
|
|
|
}
|
|
|
|
}
|
2016-01-08 16:28:13 -07:00
|
|
|
m.mut.RUnlock()
|
2015-09-20 06:30:25 -07:00
|
|
|
|
2016-05-04 12:38:12 -07:00
|
|
|
addresses = uniqueSortedAddrs(paddresses)
|
2015-10-03 08:25:21 -07:00
|
|
|
l.Debugln("lookup results for", deviceID)
|
2016-05-04 12:38:12 -07:00
|
|
|
l.Debugln(" addresses: ", addresses)
|
2015-09-20 06:30:25 -07:00
|
|
|
|
2016-05-04 12:38:12 -07:00
|
|
|
return addresses, nil
|
2015-09-20 06:30:25 -07:00
|
|
|
}
|
|
|
|
|
2016-03-21 12:36:08 -07:00
|
|
|
func (m *cachingMux) String() string {
|
2015-09-20 06:30:25 -07:00
|
|
|
return "discovery cache"
|
|
|
|
}
|
|
|
|
|
2016-03-21 12:36:08 -07:00
|
|
|
func (m *cachingMux) Error() error {
|
2015-09-20 06:30:25 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-03-21 12:36:08 -07:00
|
|
|
func (m *cachingMux) ChildErrors() map[string]error {
|
2015-09-20 06:30:25 -07:00
|
|
|
children := make(map[string]error, len(m.finders))
|
2016-01-08 16:28:13 -07:00
|
|
|
m.mut.RLock()
|
2015-09-20 06:30:25 -07:00
|
|
|
for _, f := range m.finders {
|
|
|
|
children[f.String()] = f.Error()
|
|
|
|
}
|
2016-01-08 16:28:13 -07:00
|
|
|
m.mut.RUnlock()
|
2015-09-20 06:30:25 -07:00
|
|
|
return children
|
|
|
|
}
|
|
|
|
|
2016-03-21 12:36:08 -07:00
|
|
|
func (m *cachingMux) Cache() map[protocol.DeviceID]CacheEntry {
|
2015-09-20 06:30:25 -07:00
|
|
|
// Res will be the "total" cache, i.e. the union of our cache and all our
|
|
|
|
// children's caches.
|
|
|
|
res := make(map[protocol.DeviceID]CacheEntry)
|
|
|
|
|
2016-01-08 16:28:13 -07:00
|
|
|
m.mut.RLock()
|
2015-09-20 06:30:25 -07:00
|
|
|
for i := range m.finders {
|
2016-06-26 03:47:23 -07:00
|
|
|
// Each finder[i] has a corresponding cache at cache[i]. Go through
|
|
|
|
// it and populate the total, appending any addresses and keeping
|
|
|
|
// the newest "when" time. We skip any negative cache entries.
|
2015-09-20 06:30:25 -07:00
|
|
|
for k, v := range m.caches[i].Cache() {
|
2016-06-26 03:47:23 -07:00
|
|
|
if v.found {
|
|
|
|
cur := res[k]
|
|
|
|
if v.when.After(cur.when) {
|
|
|
|
cur.when = v.when
|
|
|
|
}
|
|
|
|
cur.Addresses = append(cur.Addresses, v.Addresses...)
|
|
|
|
res[k] = cur
|
2015-09-20 06:30:25 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Then ask the finder itself for it's cache and do the same. If this
|
|
|
|
// finder is a global discovery client, it will have no cache. If it's
|
|
|
|
// a local discovery client, this will be it's current state.
|
|
|
|
for k, v := range m.finders[i].Cache() {
|
2016-06-26 03:47:23 -07:00
|
|
|
if v.found {
|
|
|
|
cur := res[k]
|
|
|
|
if v.when.After(cur.when) {
|
|
|
|
cur.when = v.when
|
|
|
|
}
|
|
|
|
cur.Addresses = append(cur.Addresses, v.Addresses...)
|
|
|
|
res[k] = cur
|
2015-09-20 06:30:25 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-01-08 16:28:13 -07:00
|
|
|
m.mut.RUnlock()
|
2015-09-20 06:30:25 -07:00
|
|
|
|
2016-06-26 03:47:23 -07:00
|
|
|
for k, v := range res {
|
|
|
|
v.Addresses = util.UniqueStrings(v.Addresses)
|
|
|
|
res[k] = v
|
|
|
|
}
|
|
|
|
|
2015-09-20 06:30:25 -07:00
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
|
|
|
// A cache can be embedded wherever useful
|
|
|
|
|
|
|
|
type cache struct {
|
|
|
|
entries map[protocol.DeviceID]CacheEntry
|
|
|
|
mut stdsync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func newCache() *cache {
|
|
|
|
return &cache{
|
|
|
|
entries: make(map[protocol.DeviceID]CacheEntry),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *cache) Set(id protocol.DeviceID, ce CacheEntry) {
|
|
|
|
c.mut.Lock()
|
|
|
|
c.entries[id] = ce
|
|
|
|
c.mut.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *cache) Get(id protocol.DeviceID) (CacheEntry, bool) {
|
|
|
|
c.mut.Lock()
|
|
|
|
ce, ok := c.entries[id]
|
|
|
|
c.mut.Unlock()
|
|
|
|
return ce, ok
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *cache) Cache() map[protocol.DeviceID]CacheEntry {
|
|
|
|
c.mut.Lock()
|
|
|
|
m := make(map[protocol.DeviceID]CacheEntry, len(c.entries))
|
|
|
|
for k, v := range c.entries {
|
|
|
|
m[k] = v
|
|
|
|
}
|
|
|
|
c.mut.Unlock()
|
|
|
|
return m
|
|
|
|
}
|
2015-09-29 08:40:29 -07:00
|
|
|
|
2015-09-30 13:24:06 -07:00
|
|
|
func uniqueSortedAddrs(ss []prioritizedAddress) []string {
|
|
|
|
// We sort the addresses by priority, then filter them based on seen
|
|
|
|
// (first time seen is the on kept, so we retain priority).
|
|
|
|
sort.Sort(prioritizedAddressList(ss))
|
|
|
|
filtered := make([]string, 0, len(ss))
|
|
|
|
seen := make(map[string]struct{}, len(ss))
|
2015-09-29 08:40:29 -07:00
|
|
|
for _, s := range ss {
|
2015-09-30 13:24:06 -07:00
|
|
|
if _, ok := seen[s.addr]; !ok {
|
|
|
|
filtered = append(filtered, s.addr)
|
|
|
|
seen[s.addr] = struct{}{}
|
|
|
|
}
|
2015-09-29 08:40:29 -07:00
|
|
|
}
|
2015-09-30 13:24:06 -07:00
|
|
|
return filtered
|
2015-09-29 08:40:29 -07:00
|
|
|
}
|
|
|
|
|
2015-09-30 13:24:06 -07:00
|
|
|
type prioritizedAddressList []prioritizedAddress
|
|
|
|
|
|
|
|
func (l prioritizedAddressList) Len() int {
|
|
|
|
return len(l)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l prioritizedAddressList) Swap(a, b int) {
|
|
|
|
l[a], l[b] = l[b], l[a]
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l prioritizedAddressList) Less(a, b int) bool {
|
|
|
|
if l[a].priority != l[b].priority {
|
|
|
|
return l[a].priority < l[b].priority
|
|
|
|
}
|
|
|
|
return l[a].addr < l[b].addr
|
|
|
|
}
|