mirror of
https://github.com/syncthing/syncthing.git
synced 2024-11-16 10:28:49 -07:00
lib/db, lib/model: Create temp sorting database in config dir (fixes #3449)
GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/3454
This commit is contained in:
parent
aafc96f58f
commit
72026db599
@ -29,6 +29,7 @@ type deletionHandler func(t readWriteTransaction, folder, device, name []byte, d
|
|||||||
type Instance struct {
|
type Instance struct {
|
||||||
committed int64 // this must be the first attribute in the struct to ensure 64 bit alignment on 32 bit plaforms
|
committed int64 // this must be the first attribute in the struct to ensure 64 bit alignment on 32 bit plaforms
|
||||||
*leveldb.DB
|
*leveldb.DB
|
||||||
|
location string
|
||||||
folderIdx *smallIndex
|
folderIdx *smallIndex
|
||||||
deviceIdx *smallIndex
|
deviceIdx *smallIndex
|
||||||
}
|
}
|
||||||
@ -64,17 +65,18 @@ func Open(file string) (*Instance, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return newDBInstance(db), nil
|
return newDBInstance(db, file), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func OpenMemory() *Instance {
|
func OpenMemory() *Instance {
|
||||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||||
return newDBInstance(db)
|
return newDBInstance(db, "<memory>")
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDBInstance(db *leveldb.DB) *Instance {
|
func newDBInstance(db *leveldb.DB, location string) *Instance {
|
||||||
i := &Instance{
|
i := &Instance{
|
||||||
DB: db,
|
DB: db,
|
||||||
|
location: location,
|
||||||
}
|
}
|
||||||
i.folderIdx = newSmallIndex(i, []byte{KeyTypeFolderIdx})
|
i.folderIdx = newSmallIndex(i, []byte{KeyTypeFolderIdx})
|
||||||
i.deviceIdx = newSmallIndex(i, []byte{KeyTypeDeviceIdx})
|
i.deviceIdx = newSmallIndex(i, []byte{KeyTypeDeviceIdx})
|
||||||
@ -86,6 +88,11 @@ func (db *Instance) Committed() int64 {
|
|||||||
return atomic.LoadInt64(&db.committed)
|
return atomic.LoadInt64(&db.committed)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Location returns the filesystem path where the database is stored
|
||||||
|
func (db *Instance) Location() string {
|
||||||
|
return db.location
|
||||||
|
}
|
||||||
|
|
||||||
func (db *Instance) genericReplace(folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker, deleteFn deletionHandler) {
|
func (db *Instance) genericReplace(folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker, deleteFn deletionHandler) {
|
||||||
sort.Sort(fileList(fs)) // sort list on name, same as in the database
|
sort.Sort(fileList(fs)) // sort list on name, same as in the database
|
||||||
|
|
||||||
|
@ -655,6 +655,8 @@ func (m *Model) ClusterConfig(deviceID protocol.DeviceID, cm protocol.ClusterCon
|
|||||||
panic("bug: ClusterConfig called on closed or nonexistent connection")
|
panic("bug: ClusterConfig called on closed or nonexistent connection")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dbLocation := filepath.Dir(m.db.Location())
|
||||||
|
|
||||||
m.fmut.Lock()
|
m.fmut.Lock()
|
||||||
for _, folder := range cm.Folders {
|
for _, folder := range cm.Folders {
|
||||||
if !m.folderSharedWithUnlocked(folder.ID, deviceID) {
|
if !m.folderSharedWithUnlocked(folder.ID, deviceID) {
|
||||||
@ -741,7 +743,7 @@ func (m *Model) ClusterConfig(deviceID protocol.DeviceID, cm protocol.ClusterCon
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
go sendIndexes(conn, folder.ID, fs, m.folderIgnores[folder.ID], startLocalVersion)
|
go sendIndexes(conn, folder.ID, fs, m.folderIgnores[folder.ID], startLocalVersion, dbLocation)
|
||||||
}
|
}
|
||||||
m.fmut.Unlock()
|
m.fmut.Unlock()
|
||||||
|
|
||||||
@ -1214,7 +1216,7 @@ func (m *Model) receivedFile(folder string, file protocol.FileInfo) {
|
|||||||
m.folderStatRef(folder).ReceivedFile(file.Name, file.IsDeleted())
|
m.folderStatRef(folder).ReceivedFile(file.Name, file.IsDeleted())
|
||||||
}
|
}
|
||||||
|
|
||||||
func sendIndexes(conn protocol.Connection, folder string, fs *db.FileSet, ignores *ignore.Matcher, startLocalVersion int64) {
|
func sendIndexes(conn protocol.Connection, folder string, fs *db.FileSet, ignores *ignore.Matcher, startLocalVersion int64, dbLocation string) {
|
||||||
deviceID := conn.ID()
|
deviceID := conn.ID()
|
||||||
name := conn.Name()
|
name := conn.Name()
|
||||||
var err error
|
var err error
|
||||||
@ -1222,7 +1224,7 @@ func sendIndexes(conn protocol.Connection, folder string, fs *db.FileSet, ignore
|
|||||||
l.Debugf("sendIndexes for %s-%s/%q starting (slv=%d)", deviceID, name, folder, startLocalVersion)
|
l.Debugf("sendIndexes for %s-%s/%q starting (slv=%d)", deviceID, name, folder, startLocalVersion)
|
||||||
defer l.Debugf("sendIndexes for %s-%s/%q exiting: %v", deviceID, name, folder, err)
|
defer l.Debugf("sendIndexes for %s-%s/%q exiting: %v", deviceID, name, folder, err)
|
||||||
|
|
||||||
minLocalVer, err := sendIndexTo(startLocalVersion, conn, folder, fs, ignores)
|
minLocalVer, err := sendIndexTo(startLocalVersion, conn, folder, fs, ignores, dbLocation)
|
||||||
|
|
||||||
// Subscribe to LocalIndexUpdated (we have new information to send) and
|
// Subscribe to LocalIndexUpdated (we have new information to send) and
|
||||||
// DeviceDisconnected (it might be us who disconnected, so we should
|
// DeviceDisconnected (it might be us who disconnected, so we should
|
||||||
@ -1245,7 +1247,7 @@ func sendIndexes(conn protocol.Connection, folder string, fs *db.FileSet, ignore
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
minLocalVer, err = sendIndexTo(minLocalVer, conn, folder, fs, ignores)
|
minLocalVer, err = sendIndexTo(minLocalVer, conn, folder, fs, ignores, dbLocation)
|
||||||
|
|
||||||
// Wait a short amount of time before entering the next loop. If there
|
// Wait a short amount of time before entering the next loop. If there
|
||||||
// are continuous changes happening to the local index, this gives us
|
// are continuous changes happening to the local index, this gives us
|
||||||
@ -1254,7 +1256,7 @@ func sendIndexes(conn protocol.Connection, folder string, fs *db.FileSet, ignore
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func sendIndexTo(minLocalVer int64, conn protocol.Connection, folder string, fs *db.FileSet, ignores *ignore.Matcher) (int64, error) {
|
func sendIndexTo(minLocalVer int64, conn protocol.Connection, folder string, fs *db.FileSet, ignores *ignore.Matcher, dbLocation string) (int64, error) {
|
||||||
deviceID := conn.ID()
|
deviceID := conn.ID()
|
||||||
name := conn.Name()
|
name := conn.Name()
|
||||||
batch := make([]protocol.FileInfo, 0, indexBatchSize)
|
batch := make([]protocol.FileInfo, 0, indexBatchSize)
|
||||||
@ -1263,7 +1265,7 @@ func sendIndexTo(minLocalVer int64, conn protocol.Connection, folder string, fs
|
|||||||
maxLocalVer := minLocalVer
|
maxLocalVer := minLocalVer
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
sorter := NewIndexSorter()
|
sorter := NewIndexSorter(dbLocation)
|
||||||
defer sorter.Close()
|
defer sorter.Close()
|
||||||
|
|
||||||
fs.WithHave(protocol.LocalDeviceID, func(fi db.FileIntf) bool {
|
fs.WithHave(protocol.LocalDeviceID, func(fi db.FileIntf) bool {
|
||||||
|
@ -39,9 +39,10 @@ type internalIndexSorter interface {
|
|||||||
// NewIndexSorter returns a new IndexSorter that will start out in memory
|
// NewIndexSorter returns a new IndexSorter that will start out in memory
|
||||||
// for efficiency but switch to on disk storage once the amount of data
|
// for efficiency but switch to on disk storage once the amount of data
|
||||||
// becomes large.
|
// becomes large.
|
||||||
func NewIndexSorter() IndexSorter {
|
func NewIndexSorter(location string) IndexSorter {
|
||||||
return &autoSwitchingIndexSorter{
|
return &autoSwitchingIndexSorter{
|
||||||
internalIndexSorter: newInMemoryIndexSorter(),
|
internalIndexSorter: newInMemoryIndexSorter(),
|
||||||
|
location: location,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -49,6 +50,7 @@ func NewIndexSorter() IndexSorter {
|
|||||||
// onDiskSorter when the in memory sorter is full().
|
// onDiskSorter when the in memory sorter is full().
|
||||||
type autoSwitchingIndexSorter struct {
|
type autoSwitchingIndexSorter struct {
|
||||||
internalIndexSorter
|
internalIndexSorter
|
||||||
|
location string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *autoSwitchingIndexSorter) Append(f protocol.FileInfo) {
|
func (s *autoSwitchingIndexSorter) Append(f protocol.FileInfo) {
|
||||||
@ -58,7 +60,7 @@ func (s *autoSwitchingIndexSorter) Append(f protocol.FileInfo) {
|
|||||||
// which case we *don't* need to spill. An example of this would be
|
// which case we *don't* need to spill. An example of this would be
|
||||||
// an index containing just a single large file.
|
// an index containing just a single large file.
|
||||||
l.Debugf("sorter %p spills to disk", s)
|
l.Debugf("sorter %p spills to disk", s)
|
||||||
next := newOnDiskIndexSorter()
|
next := newOnDiskIndexSorter(s.location)
|
||||||
s.internalIndexSorter.copyTo(next)
|
s.internalIndexSorter.copyTo(next)
|
||||||
s.internalIndexSorter = next
|
s.internalIndexSorter = next
|
||||||
}
|
}
|
||||||
@ -129,7 +131,7 @@ type onDiskIndexSorter struct {
|
|||||||
dir string
|
dir string
|
||||||
}
|
}
|
||||||
|
|
||||||
func newOnDiskIndexSorter() *onDiskIndexSorter {
|
func newOnDiskIndexSorter(location string) *onDiskIndexSorter {
|
||||||
// Set options to minimize resource usage.
|
// Set options to minimize resource usage.
|
||||||
opts := &opt.Options{
|
opts := &opt.Options{
|
||||||
OpenFilesCacheCapacity: 10,
|
OpenFilesCacheCapacity: 10,
|
||||||
@ -137,7 +139,7 @@ func newOnDiskIndexSorter() *onDiskIndexSorter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Use a temporary database directory.
|
// Use a temporary database directory.
|
||||||
tmp, err := ioutil.TempDir("", "syncthing-db.")
|
tmp, err := ioutil.TempDir(location, "tmp-index-sorter.")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic("creating temporary directory: " + err.Error())
|
panic("creating temporary directory: " + err.Error())
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,7 @@ func TestOnDiskIndexSorter(t *testing.T) {
|
|||||||
// An onDiskSorter should be able to absorb a few files in unsorted
|
// An onDiskSorter should be able to absorb a few files in unsorted
|
||||||
// order, and return them sorted.
|
// order, and return them sorted.
|
||||||
|
|
||||||
s := newOnDiskIndexSorter()
|
s := newOnDiskIndexSorter("testdata")
|
||||||
addFiles(50, s)
|
addFiles(50, s)
|
||||||
verifySorted(t, s, 50)
|
verifySorted(t, s, 50)
|
||||||
verifyBreak(t, s, 50)
|
verifyBreak(t, s, 50)
|
||||||
@ -58,7 +58,7 @@ func TestIndexSorter(t *testing.T) {
|
|||||||
// An default IndexSorter should be able to absorb files, have them in
|
// An default IndexSorter should be able to absorb files, have them in
|
||||||
// memory, and at some point switch to an on disk database.
|
// memory, and at some point switch to an on disk database.
|
||||||
|
|
||||||
s := NewIndexSorter()
|
s := NewIndexSorter("testdata")
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
// We should start out as an in memory store.
|
// We should start out as an in memory store.
|
||||||
|
Loading…
Reference in New Issue
Block a user