2015-10-29 00:07:51 -07:00
|
|
|
// Copyright (C) 2014 The Syncthing Authors.
|
|
|
|
//
|
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
2017-02-08 23:52:18 -07:00
|
|
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
2015-10-29 00:07:51 -07:00
|
|
|
|
|
|
|
package db
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2016-01-03 11:08:19 -07:00
|
|
|
"encoding/binary"
|
2015-10-31 04:31:25 -07:00
|
|
|
"os"
|
2015-10-29 00:07:51 -07:00
|
|
|
"sort"
|
2015-10-31 04:31:25 -07:00
|
|
|
"strings"
|
2016-06-13 10:44:03 -07:00
|
|
|
"sync/atomic"
|
2015-10-29 00:07:51 -07:00
|
|
|
|
|
|
|
"github.com/syncthing/syncthing/lib/protocol"
|
2016-01-03 11:08:19 -07:00
|
|
|
"github.com/syncthing/syncthing/lib/sync"
|
2015-10-29 00:07:51 -07:00
|
|
|
"github.com/syndtr/goleveldb/leveldb"
|
2015-10-31 04:31:25 -07:00
|
|
|
"github.com/syndtr/goleveldb/leveldb/errors"
|
2015-10-29 00:07:51 -07:00
|
|
|
"github.com/syndtr/goleveldb/leveldb/iterator"
|
2015-10-31 04:31:25 -07:00
|
|
|
"github.com/syndtr/goleveldb/leveldb/opt"
|
|
|
|
"github.com/syndtr/goleveldb/leveldb/storage"
|
2015-10-29 00:07:51 -07:00
|
|
|
"github.com/syndtr/goleveldb/leveldb/util"
|
|
|
|
)
|
|
|
|
|
2016-07-23 05:46:31 -07:00
|
|
|
type deletionHandler func(t readWriteTransaction, folder, device, name []byte, dbi iterator.Iterator)
|
2015-10-29 00:07:51 -07:00
|
|
|
|
2015-10-31 04:31:25 -07:00
|
|
|
type Instance struct {
|
2016-06-26 06:40:51 -07:00
|
|
|
committed int64 // this must be the first attribute in the struct to ensure 64 bit alignment on 32 bit plaforms
|
2015-10-29 00:07:51 -07:00
|
|
|
*leveldb.DB
|
2016-07-27 14:38:43 -07:00
|
|
|
location string
|
2016-01-03 11:08:19 -07:00
|
|
|
folderIdx *smallIndex
|
|
|
|
deviceIdx *smallIndex
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
2016-01-03 11:08:19 -07:00
|
|
|
const (
|
|
|
|
keyPrefixLen = 1
|
|
|
|
keyFolderLen = 4 // indexed
|
|
|
|
keyDeviceLen = 4 // indexed
|
|
|
|
keyHashLen = 32
|
|
|
|
)
|
|
|
|
|
2015-10-31 04:31:25 -07:00
|
|
|
func Open(file string) (*Instance, error) {
|
|
|
|
opts := &opt.Options{
|
|
|
|
OpenFilesCacheCapacity: 100,
|
|
|
|
WriteBuffer: 4 << 20,
|
|
|
|
}
|
|
|
|
|
|
|
|
db, err := leveldb.OpenFile(file, opts)
|
|
|
|
if leveldbIsCorrupted(err) {
|
|
|
|
db, err = leveldb.RecoverFile(file, opts)
|
|
|
|
}
|
|
|
|
if leveldbIsCorrupted(err) {
|
|
|
|
// The database is corrupted, and we've tried to recover it but it
|
|
|
|
// didn't work. At this point there isn't much to do beyond dropping
|
|
|
|
// the database and reindexing...
|
|
|
|
l.Infoln("Database corruption detected, unable to recover. Reinitializing...")
|
|
|
|
if err := os.RemoveAll(file); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
db, err = leveldb.OpenFile(file, opts)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-07-27 14:38:43 -07:00
|
|
|
return newDBInstance(db, file), nil
|
2015-10-31 04:31:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func OpenMemory() *Instance {
|
|
|
|
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
2016-07-27 14:38:43 -07:00
|
|
|
return newDBInstance(db, "<memory>")
|
2015-10-31 04:31:25 -07:00
|
|
|
}
|
|
|
|
|
2016-07-27 14:38:43 -07:00
|
|
|
func newDBInstance(db *leveldb.DB, location string) *Instance {
|
2016-01-03 11:08:19 -07:00
|
|
|
i := &Instance{
|
2016-07-27 14:38:43 -07:00
|
|
|
DB: db,
|
|
|
|
location: location,
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
2016-01-03 11:08:19 -07:00
|
|
|
i.folderIdx = newSmallIndex(i, []byte{KeyTypeFolderIdx})
|
|
|
|
i.deviceIdx = newSmallIndex(i, []byte{KeyTypeDeviceIdx})
|
|
|
|
return i
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
2016-06-13 10:44:03 -07:00
|
|
|
// Committed returns the number of items committed to the database since startup
|
|
|
|
func (db *Instance) Committed() int64 {
|
|
|
|
return atomic.LoadInt64(&db.committed)
|
|
|
|
}
|
|
|
|
|
2016-07-27 14:38:43 -07:00
|
|
|
// Location returns the filesystem path where the database is stored
|
|
|
|
func (db *Instance) Location() string {
|
|
|
|
return db.location
|
|
|
|
}
|
|
|
|
|
2017-12-14 02:51:17 -07:00
|
|
|
func (db *Instance) updateFiles(folder, device []byte, fs []protocol.FileInfo, meta *metadataTracker) {
|
2015-10-29 00:07:51 -07:00
|
|
|
t := db.newReadWriteTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
|
|
|
var fk []byte
|
|
|
|
for _, f := range fs {
|
|
|
|
name := []byte(f.Name)
|
2015-10-30 23:20:35 -07:00
|
|
|
fk = db.deviceKeyInto(fk[:cap(fk)], folder, device, name)
|
2017-04-25 15:52:37 -07:00
|
|
|
|
|
|
|
// Get and unmarshal the file entry. If it doesn't exist or can't be
|
|
|
|
// unmarshalled we'll add it as a new entry.
|
2015-10-29 00:07:51 -07:00
|
|
|
bs, err := t.Get(fk, nil)
|
2017-04-25 15:52:37 -07:00
|
|
|
var ef FileInfoTruncated
|
|
|
|
if err == nil {
|
|
|
|
err = ef.Unmarshal(bs)
|
|
|
|
}
|
|
|
|
|
2017-10-24 13:05:29 -07:00
|
|
|
// The Invalid flag might change without the version being bumped.
|
|
|
|
if err == nil && ef.Version.Equal(f.Version) && ef.Invalid == f.Invalid {
|
2015-10-29 00:07:51 -07:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-12-14 02:51:17 -07:00
|
|
|
devID := protocol.DeviceIDFromBytes(device)
|
|
|
|
if err == nil {
|
|
|
|
meta.removeFile(devID, ef)
|
2017-10-24 13:05:29 -07:00
|
|
|
}
|
2017-12-14 02:51:17 -07:00
|
|
|
meta.addFile(devID, f)
|
2015-10-29 00:07:51 -07:00
|
|
|
|
2017-10-24 13:05:29 -07:00
|
|
|
t.insertFile(folder, device, f)
|
2017-12-14 02:51:17 -07:00
|
|
|
t.updateGlobal(folder, device, f, meta)
|
2015-10-29 00:07:51 -07:00
|
|
|
|
|
|
|
// Write out and reuse the batch every few records, to avoid the batch
|
|
|
|
// growing too large and thus allocating unnecessarily much memory.
|
|
|
|
t.checkFlush()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-18 05:16:33 -07:00
|
|
|
func (db *Instance) withHave(folder, device, prefix []byte, truncate bool, fn Iterator) {
|
2015-10-29 00:07:51 -07:00
|
|
|
t := db.newReadOnlyTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
2016-03-18 05:16:33 -07:00
|
|
|
dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, device, prefix)[:keyPrefixLen+keyFolderLen+keyDeviceLen+len(prefix)]), nil)
|
2015-10-29 00:07:51 -07:00
|
|
|
defer dbi.Release()
|
|
|
|
|
2016-05-09 05:56:21 -07:00
|
|
|
slashedPrefix := prefix
|
|
|
|
if !bytes.HasSuffix(prefix, []byte{'/'}) {
|
|
|
|
slashedPrefix = append(slashedPrefix, '/')
|
|
|
|
}
|
|
|
|
|
2015-10-29 00:07:51 -07:00
|
|
|
for dbi.Next() {
|
2016-05-09 05:56:21 -07:00
|
|
|
name := db.deviceKeyName(dbi.Key())
|
|
|
|
if len(prefix) > 0 && !bytes.Equal(name, prefix) && !bytes.HasPrefix(name, slashedPrefix) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-02-02 04:48:09 -07:00
|
|
|
// The iterator function may keep a reference to the unmarshalled
|
|
|
|
// struct, which in turn references the buffer it was unmarshalled
|
|
|
|
// from. dbi.Value() just returns an internal slice that it reuses, so
|
|
|
|
// we need to copy it.
|
|
|
|
f, err := unmarshalTrunc(append([]byte{}, dbi.Value()...), truncate)
|
2015-10-29 00:07:51 -07:00
|
|
|
if err != nil {
|
2017-04-25 15:52:37 -07:00
|
|
|
l.Debugln("unmarshal error:", err)
|
|
|
|
continue
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
if cont := fn(f); !cont {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) withAllFolderTruncated(folder []byte, fn func(device []byte, f FileInfoTruncated) bool) {
|
2015-10-29 00:07:51 -07:00
|
|
|
t := db.newReadWriteTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
2016-01-03 11:08:19 -07:00
|
|
|
dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, nil, nil)[:keyPrefixLen+keyFolderLen]), nil)
|
2015-10-29 00:07:51 -07:00
|
|
|
defer dbi.Release()
|
|
|
|
|
|
|
|
for dbi.Next() {
|
2015-10-30 23:20:35 -07:00
|
|
|
device := db.deviceKeyDevice(dbi.Key())
|
2015-10-29 00:07:51 -07:00
|
|
|
var f FileInfoTruncated
|
2016-02-02 04:48:09 -07:00
|
|
|
// The iterator function may keep a reference to the unmarshalled
|
|
|
|
// struct, which in turn references the buffer it was unmarshalled
|
|
|
|
// from. dbi.Value() just returns an internal slice that it reuses, so
|
|
|
|
// we need to copy it.
|
2016-07-04 03:40:29 -07:00
|
|
|
err := f.Unmarshal(append([]byte{}, dbi.Value()...))
|
2015-10-29 00:07:51 -07:00
|
|
|
if err != nil {
|
2017-04-25 15:52:37 -07:00
|
|
|
l.Debugln("unmarshal error:", err)
|
|
|
|
continue
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
switch f.Name {
|
|
|
|
case "", ".", "..", "/": // A few obviously invalid filenames
|
|
|
|
l.Infof("Dropping invalid filename %q from database", f.Name)
|
|
|
|
t.removeFromGlobal(folder, device, nil, nil)
|
|
|
|
t.Delete(dbi.Key())
|
|
|
|
t.checkFlush()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if cont := fn(device, f); !cont {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) getFile(folder, device, file []byte) (protocol.FileInfo, bool) {
|
2015-10-30 23:20:35 -07:00
|
|
|
return getFile(db, db.deviceKey(folder, device, file))
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) getGlobal(folder, file []byte, truncate bool) (FileIntf, bool) {
|
2015-10-30 23:20:35 -07:00
|
|
|
k := db.globalKey(folder, file)
|
2015-10-29 00:07:51 -07:00
|
|
|
|
|
|
|
t := db.newReadOnlyTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
|
|
|
bs, err := t.Get(k, nil)
|
|
|
|
if err != nil {
|
2017-04-25 15:52:37 -07:00
|
|
|
return nil, false
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
2016-05-31 12:29:26 -07:00
|
|
|
var vl VersionList
|
2016-07-04 03:40:29 -07:00
|
|
|
err = vl.Unmarshal(bs)
|
2017-04-25 15:52:37 -07:00
|
|
|
if err == leveldb.ErrNotFound {
|
|
|
|
return nil, false
|
|
|
|
}
|
2015-10-29 00:07:51 -07:00
|
|
|
if err != nil {
|
2017-04-25 15:52:37 -07:00
|
|
|
l.Debugln("unmarshal error:", k, err)
|
|
|
|
return nil, false
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
2016-07-04 03:40:29 -07:00
|
|
|
if len(vl.Versions) == 0 {
|
2017-04-25 15:52:37 -07:00
|
|
|
l.Debugln("no versions:", k)
|
|
|
|
return nil, false
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
2016-07-04 03:40:29 -07:00
|
|
|
k = db.deviceKey(folder, vl.Versions[0].Device, file)
|
2015-10-29 00:07:51 -07:00
|
|
|
bs, err = t.Get(k, nil)
|
|
|
|
if err != nil {
|
2017-04-25 15:52:37 -07:00
|
|
|
l.Debugln("surprise error:", k, err)
|
|
|
|
return nil, false
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
fi, err := unmarshalTrunc(bs, truncate)
|
|
|
|
if err != nil {
|
2017-04-25 15:52:37 -07:00
|
|
|
l.Debugln("unmarshal error:", k, err)
|
|
|
|
return nil, false
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
return fi, true
|
|
|
|
}
|
|
|
|
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) withGlobal(folder, prefix []byte, truncate bool, fn Iterator) {
|
2015-10-29 00:07:51 -07:00
|
|
|
t := db.newReadOnlyTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
2015-10-30 23:20:35 -07:00
|
|
|
dbi := t.NewIterator(util.BytesPrefix(db.globalKey(folder, prefix)), nil)
|
2015-10-29 00:07:51 -07:00
|
|
|
defer dbi.Release()
|
|
|
|
|
2016-05-09 05:56:21 -07:00
|
|
|
slashedPrefix := prefix
|
|
|
|
if !bytes.HasSuffix(prefix, []byte{'/'}) {
|
|
|
|
slashedPrefix = append(slashedPrefix, '/')
|
|
|
|
}
|
|
|
|
|
2015-10-29 00:07:51 -07:00
|
|
|
var fk []byte
|
|
|
|
for dbi.Next() {
|
2016-05-31 12:29:26 -07:00
|
|
|
var vl VersionList
|
2016-07-04 03:40:29 -07:00
|
|
|
err := vl.Unmarshal(dbi.Value())
|
2015-10-29 00:07:51 -07:00
|
|
|
if err != nil {
|
2017-04-25 15:52:37 -07:00
|
|
|
l.Debugln("unmarshal error:", err)
|
|
|
|
continue
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
2016-07-04 03:40:29 -07:00
|
|
|
if len(vl.Versions) == 0 {
|
2017-04-25 15:52:37 -07:00
|
|
|
l.Debugln("no versions:", dbi.Key())
|
|
|
|
continue
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
2016-05-09 05:56:21 -07:00
|
|
|
|
2015-10-30 23:20:35 -07:00
|
|
|
name := db.globalKeyName(dbi.Key())
|
2016-05-09 05:56:21 -07:00
|
|
|
if len(prefix) > 0 && !bytes.Equal(name, prefix) && !bytes.HasPrefix(name, slashedPrefix) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-07-04 03:40:29 -07:00
|
|
|
fk = db.deviceKeyInto(fk[:cap(fk)], folder, vl.Versions[0].Device, name)
|
2015-10-29 00:07:51 -07:00
|
|
|
bs, err := t.Get(fk, nil)
|
|
|
|
if err != nil {
|
2017-04-25 15:52:37 -07:00
|
|
|
l.Debugln("surprise error:", err)
|
|
|
|
continue
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
f, err := unmarshalTrunc(bs, truncate)
|
|
|
|
if err != nil {
|
2017-04-25 15:52:37 -07:00
|
|
|
l.Debugln("unmarshal error:", err)
|
|
|
|
continue
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if cont := fn(f); !cont {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) availability(folder, file []byte) []protocol.DeviceID {
|
2015-10-30 23:20:35 -07:00
|
|
|
k := db.globalKey(folder, file)
|
2015-10-29 00:07:51 -07:00
|
|
|
bs, err := db.Get(k, nil)
|
|
|
|
if err == leveldb.ErrNotFound {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if err != nil {
|
2017-04-25 15:52:37 -07:00
|
|
|
l.Debugln("surprise error:", err)
|
|
|
|
return nil
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
2016-05-31 12:29:26 -07:00
|
|
|
var vl VersionList
|
2016-07-04 03:40:29 -07:00
|
|
|
err = vl.Unmarshal(bs)
|
2015-10-29 00:07:51 -07:00
|
|
|
if err != nil {
|
2017-04-25 15:52:37 -07:00
|
|
|
l.Debugln("unmarshal error:", err)
|
|
|
|
return nil
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
var devices []protocol.DeviceID
|
2016-07-04 03:40:29 -07:00
|
|
|
for _, v := range vl.Versions {
|
|
|
|
if !v.Version.Equal(vl.Versions[0].Version) {
|
2015-10-29 00:07:51 -07:00
|
|
|
break
|
|
|
|
}
|
2017-11-11 12:18:17 -07:00
|
|
|
if v.Invalid {
|
|
|
|
continue
|
|
|
|
}
|
2016-07-04 03:40:29 -07:00
|
|
|
n := protocol.DeviceIDFromBytes(v.Device)
|
2015-10-29 00:07:51 -07:00
|
|
|
devices = append(devices, n)
|
|
|
|
}
|
|
|
|
|
|
|
|
return devices
|
|
|
|
}
|
|
|
|
|
2018-02-25 01:39:00 -07:00
|
|
|
func (db *Instance) withNeed(folder, device []byte, truncate bool, fn Iterator) {
|
2015-10-29 00:07:51 -07:00
|
|
|
t := db.newReadOnlyTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
2016-01-03 11:08:19 -07:00
|
|
|
dbi := t.NewIterator(util.BytesPrefix(db.globalKey(folder, nil)[:keyPrefixLen+keyFolderLen]), nil)
|
2015-10-29 00:07:51 -07:00
|
|
|
defer dbi.Release()
|
|
|
|
|
|
|
|
var fk []byte
|
|
|
|
for dbi.Next() {
|
2016-05-31 12:29:26 -07:00
|
|
|
var vl VersionList
|
2016-07-04 03:40:29 -07:00
|
|
|
err := vl.Unmarshal(dbi.Value())
|
2015-10-29 00:07:51 -07:00
|
|
|
if err != nil {
|
2017-04-25 15:52:37 -07:00
|
|
|
l.Debugln("unmarshal error:", err)
|
|
|
|
continue
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
2016-07-04 03:40:29 -07:00
|
|
|
if len(vl.Versions) == 0 {
|
2017-04-25 15:52:37 -07:00
|
|
|
l.Debugln("no versions:", dbi.Key())
|
|
|
|
continue
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
have := false // If we have the file, any version
|
|
|
|
need := false // If we have a lower version of the file
|
2017-11-11 12:18:17 -07:00
|
|
|
var haveFileVersion FileVersion
|
2016-07-04 03:40:29 -07:00
|
|
|
for _, v := range vl.Versions {
|
|
|
|
if bytes.Equal(v.Device, device) {
|
2015-10-29 00:07:51 -07:00
|
|
|
have = true
|
2017-11-11 12:18:17 -07:00
|
|
|
haveFileVersion = v
|
2015-10-29 00:07:51 -07:00
|
|
|
// XXX: This marks Concurrent (i.e. conflicting) changes as
|
|
|
|
// needs. Maybe we should do that, but it needs special
|
|
|
|
// handling in the puller.
|
2016-07-04 03:40:29 -07:00
|
|
|
need = !v.Version.GreaterEqual(vl.Versions[0].Version)
|
2015-10-29 00:07:51 -07:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-24 13:05:29 -07:00
|
|
|
if have && !need {
|
|
|
|
continue
|
|
|
|
}
|
2015-10-29 00:07:51 -07:00
|
|
|
|
2017-10-24 13:05:29 -07:00
|
|
|
name := db.globalKeyName(dbi.Key())
|
|
|
|
needVersion := vl.Versions[0].Version
|
2017-11-11 12:18:17 -07:00
|
|
|
needDevice := protocol.DeviceIDFromBytes(vl.Versions[0].Device)
|
2015-10-29 00:07:51 -07:00
|
|
|
|
2017-10-24 13:05:29 -07:00
|
|
|
for i := range vl.Versions {
|
|
|
|
if !vl.Versions[i].Version.Equal(needVersion) {
|
|
|
|
// We haven't found a valid copy of the file with the needed version.
|
|
|
|
break
|
|
|
|
}
|
2017-11-11 12:18:17 -07:00
|
|
|
|
|
|
|
if vl.Versions[i].Invalid {
|
|
|
|
// The file is marked invalid, don't use it.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-10-24 13:05:29 -07:00
|
|
|
fk = db.deviceKeyInto(fk[:cap(fk)], folder, vl.Versions[i].Device, name)
|
|
|
|
bs, err := t.Get(fk, nil)
|
|
|
|
if err != nil {
|
|
|
|
l.Debugln("surprise error:", err)
|
|
|
|
continue
|
|
|
|
}
|
2015-10-29 00:07:51 -07:00
|
|
|
|
2017-10-24 13:05:29 -07:00
|
|
|
gf, err := unmarshalTrunc(bs, truncate)
|
|
|
|
if err != nil {
|
|
|
|
l.Debugln("unmarshal error:", err)
|
|
|
|
continue
|
|
|
|
}
|
2015-10-29 00:07:51 -07:00
|
|
|
|
2017-10-24 13:05:29 -07:00
|
|
|
if gf.IsDeleted() && !have {
|
|
|
|
// We don't need deleted files that we don't have
|
|
|
|
break
|
|
|
|
}
|
2015-10-29 00:07:51 -07:00
|
|
|
|
2017-11-22 01:05:27 -07:00
|
|
|
l.Debugf("need folder=%q device=%v name=%q need=%v have=%v invalid=%v haveV=%v globalV=%v globalDev=%v", folder, protocol.DeviceIDFromBytes(device), name, need, have, haveFileVersion.Invalid, haveFileVersion.Version, needVersion, needDevice)
|
2015-10-29 00:07:51 -07:00
|
|
|
|
2017-10-24 13:05:29 -07:00
|
|
|
if cont := fn(gf); !cont {
|
|
|
|
return
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
2017-10-24 13:05:29 -07:00
|
|
|
|
|
|
|
// This file is handled, no need to look further in the version list
|
|
|
|
break
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) ListFolders() []string {
|
2015-10-29 00:07:51 -07:00
|
|
|
t := db.newReadOnlyTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
|
|
|
dbi := t.NewIterator(util.BytesPrefix([]byte{KeyTypeGlobal}), nil)
|
|
|
|
defer dbi.Release()
|
|
|
|
|
|
|
|
folderExists := make(map[string]bool)
|
|
|
|
for dbi.Next() {
|
2017-01-04 03:34:52 -07:00
|
|
|
folder, ok := db.globalKeyFolder(dbi.Key())
|
|
|
|
if ok && !folderExists[string(folder)] {
|
|
|
|
folderExists[string(folder)] = true
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
folders := make([]string, 0, len(folderExists))
|
|
|
|
for k := range folderExists {
|
|
|
|
folders = append(folders, k)
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Strings(folders)
|
|
|
|
return folders
|
|
|
|
}
|
|
|
|
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) dropFolder(folder []byte) {
|
2015-10-29 00:07:51 -07:00
|
|
|
t := db.newReadOnlyTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
|
|
|
// Remove all items related to the given folder from the device->file bucket
|
|
|
|
dbi := t.NewIterator(util.BytesPrefix([]byte{KeyTypeDevice}), nil)
|
|
|
|
for dbi.Next() {
|
2015-10-30 23:20:35 -07:00
|
|
|
itemFolder := db.deviceKeyFolder(dbi.Key())
|
2016-03-31 08:12:46 -07:00
|
|
|
if bytes.Equal(folder, itemFolder) {
|
2015-10-29 00:07:51 -07:00
|
|
|
db.Delete(dbi.Key(), nil)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dbi.Release()
|
|
|
|
|
|
|
|
// Remove all items related to the given folder from the global bucket
|
|
|
|
dbi = t.NewIterator(util.BytesPrefix([]byte{KeyTypeGlobal}), nil)
|
|
|
|
for dbi.Next() {
|
2017-01-04 03:34:52 -07:00
|
|
|
itemFolder, ok := db.globalKeyFolder(dbi.Key())
|
|
|
|
if ok && bytes.Equal(folder, itemFolder) {
|
2015-10-29 00:07:51 -07:00
|
|
|
db.Delete(dbi.Key(), nil)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dbi.Release()
|
|
|
|
}
|
|
|
|
|
2017-12-14 02:51:17 -07:00
|
|
|
func (db *Instance) dropDeviceFolder(device, folder []byte, meta *metadataTracker) {
|
2017-11-12 13:20:34 -07:00
|
|
|
t := db.newReadWriteTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
|
|
|
dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, device, nil)), nil)
|
|
|
|
defer dbi.Release()
|
|
|
|
|
|
|
|
for dbi.Next() {
|
|
|
|
key := dbi.Key()
|
|
|
|
name := db.deviceKeyName(key)
|
2017-12-14 02:51:17 -07:00
|
|
|
t.removeFromGlobal(folder, device, name, meta)
|
2017-11-12 13:20:34 -07:00
|
|
|
t.Delete(key)
|
|
|
|
t.checkFlush()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-14 02:51:17 -07:00
|
|
|
func (db *Instance) checkGlobals(folder []byte, meta *metadataTracker) {
|
2015-10-29 00:07:51 -07:00
|
|
|
t := db.newReadWriteTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
2016-01-03 11:08:19 -07:00
|
|
|
dbi := t.NewIterator(util.BytesPrefix(db.globalKey(folder, nil)[:keyPrefixLen+keyFolderLen]), nil)
|
2015-10-29 00:07:51 -07:00
|
|
|
defer dbi.Release()
|
|
|
|
|
|
|
|
var fk []byte
|
|
|
|
for dbi.Next() {
|
|
|
|
gk := dbi.Key()
|
2016-05-31 12:29:26 -07:00
|
|
|
var vl VersionList
|
2016-07-04 03:40:29 -07:00
|
|
|
err := vl.Unmarshal(dbi.Value())
|
2015-10-29 00:07:51 -07:00
|
|
|
if err != nil {
|
2017-04-25 15:52:37 -07:00
|
|
|
l.Debugln("unmarshal error:", err)
|
|
|
|
continue
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check the global version list for consistency. An issue in previous
|
|
|
|
// versions of goleveldb could result in reordered writes so that
|
|
|
|
// there are global entries pointing to no longer existing files. Here
|
|
|
|
// we find those and clear them out.
|
|
|
|
|
2015-10-30 23:20:35 -07:00
|
|
|
name := db.globalKeyName(gk)
|
2016-05-31 12:29:26 -07:00
|
|
|
var newVL VersionList
|
2016-07-04 03:40:29 -07:00
|
|
|
for i, version := range vl.Versions {
|
|
|
|
fk = db.deviceKeyInto(fk[:cap(fk)], folder, version.Device, name)
|
2015-10-29 00:07:51 -07:00
|
|
|
|
|
|
|
_, err := t.Get(fk, nil)
|
|
|
|
if err == leveldb.ErrNotFound {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err != nil {
|
2017-04-25 15:52:37 -07:00
|
|
|
l.Debugln("surprise error:", err)
|
|
|
|
return
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
2016-07-04 03:40:29 -07:00
|
|
|
newVL.Versions = append(newVL.Versions, version)
|
2015-10-29 00:07:51 -07:00
|
|
|
|
|
|
|
if i == 0 {
|
2017-04-25 15:52:37 -07:00
|
|
|
if fi, ok := t.getFile(folder, version.Device, name); ok {
|
2017-12-14 02:51:17 -07:00
|
|
|
meta.addFile(globalDeviceID, fi)
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-04 03:40:29 -07:00
|
|
|
if len(newVL.Versions) != len(vl.Versions) {
|
|
|
|
t.Put(dbi.Key(), mustMarshal(&newVL))
|
2015-10-29 00:07:51 -07:00
|
|
|
t.checkFlush()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
l.Debugf("db check completed for %q", folder)
|
|
|
|
}
|
|
|
|
|
2017-02-07 01:34:24 -07:00
|
|
|
// ConvertSymlinkTypes should be run once only on an old database. It
|
|
|
|
// changes SYMLINK_FILE and SYMLINK_DIRECTORY types to the current SYMLINK
|
|
|
|
// type (previously SYMLINK_UNKNOWN). It does this for all devices, both
|
|
|
|
// local and remote, and does not reset delta indexes. It shouldn't really
|
|
|
|
// matter what the symlink type is, but this cleans it up for a possible
|
|
|
|
// future when SYMLINK_FILE and SYMLINK_DIRECTORY are no longer understood.
|
|
|
|
func (db *Instance) ConvertSymlinkTypes() {
|
|
|
|
t := db.newReadWriteTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
|
|
|
dbi := t.NewIterator(util.BytesPrefix([]byte{KeyTypeDevice}), nil)
|
|
|
|
defer dbi.Release()
|
|
|
|
|
|
|
|
conv := 0
|
|
|
|
for dbi.Next() {
|
|
|
|
var f protocol.FileInfo
|
|
|
|
if err := f.Unmarshal(dbi.Value()); err != nil {
|
|
|
|
// probably can't happen
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if f.Type == protocol.FileInfoTypeDeprecatedSymlinkDirectory || f.Type == protocol.FileInfoTypeDeprecatedSymlinkFile {
|
|
|
|
f.Type = protocol.FileInfoTypeSymlink
|
|
|
|
bs, err := f.Marshal()
|
|
|
|
if err != nil {
|
|
|
|
panic("can't happen: " + err.Error())
|
|
|
|
}
|
|
|
|
t.Put(dbi.Key(), bs)
|
|
|
|
t.checkFlush()
|
|
|
|
conv++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
l.Infof("Updated symlink type for %d index entries", conv)
|
|
|
|
}
|
|
|
|
|
2017-11-11 12:18:17 -07:00
|
|
|
// AddInvalidToGlobal searches for invalid files and adds them to the global list.
|
|
|
|
// Invalid files exist in the db if they once were not ignored and subsequently
|
|
|
|
// ignored. In the new system this is still valid, but invalid files must also be
|
|
|
|
// in the global list such that they cannot be mistaken for missing files.
|
|
|
|
func (db *Instance) AddInvalidToGlobal(folder, device []byte) int {
|
|
|
|
t := db.newReadWriteTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
|
|
|
dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, device, nil)[:keyPrefixLen+keyFolderLen+keyDeviceLen]), nil)
|
|
|
|
defer dbi.Release()
|
|
|
|
|
|
|
|
changed := 0
|
|
|
|
for dbi.Next() {
|
|
|
|
var file protocol.FileInfo
|
|
|
|
if err := file.Unmarshal(dbi.Value()); err != nil {
|
|
|
|
// probably can't happen
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if file.Invalid {
|
|
|
|
changed++
|
|
|
|
|
2017-11-22 01:05:27 -07:00
|
|
|
l.Debugf("add invalid to global; folder=%q device=%v file=%q version=%v", folder, protocol.DeviceIDFromBytes(device), file.Name, file.Version)
|
2017-11-11 12:18:17 -07:00
|
|
|
|
|
|
|
// this is an adapted version of readWriteTransaction.updateGlobal
|
|
|
|
name := []byte(file.Name)
|
|
|
|
gk := t.db.globalKey(folder, name)
|
|
|
|
|
|
|
|
var fl VersionList
|
|
|
|
if svl, err := t.Get(gk, nil); err == nil {
|
|
|
|
fl.Unmarshal(svl) // skip error, range handles success case
|
|
|
|
}
|
|
|
|
|
|
|
|
nv := FileVersion{
|
|
|
|
Device: device,
|
|
|
|
Version: file.Version,
|
|
|
|
Invalid: file.Invalid,
|
|
|
|
}
|
|
|
|
|
|
|
|
inserted := false
|
|
|
|
// Find a position in the list to insert this file. The file at the front
|
|
|
|
// of the list is the newer, the "global".
|
|
|
|
insert:
|
|
|
|
for i := range fl.Versions {
|
|
|
|
switch fl.Versions[i].Version.Compare(file.Version) {
|
|
|
|
case protocol.Equal:
|
|
|
|
// Invalid files should go after a valid file of equal version
|
|
|
|
if nv.Invalid {
|
|
|
|
continue insert
|
|
|
|
}
|
|
|
|
fallthrough
|
|
|
|
|
|
|
|
case protocol.Lesser:
|
|
|
|
// The version at this point in the list is equal to or lesser
|
|
|
|
// ("older") than us. We insert ourselves in front of it.
|
|
|
|
fl.Versions = insertVersion(fl.Versions, i, nv)
|
|
|
|
inserted = true
|
|
|
|
break insert
|
|
|
|
|
|
|
|
case protocol.ConcurrentLesser, protocol.ConcurrentGreater:
|
|
|
|
// The version at this point is in conflict with us. We must pull
|
|
|
|
// the actual file metadata to determine who wins. If we win, we
|
|
|
|
// insert ourselves in front of the loser here. (The "Lesser" and
|
|
|
|
// "Greater" in the condition above is just based on the device
|
|
|
|
// IDs in the version vector, which is not the only thing we use
|
|
|
|
// to determine the winner.)
|
|
|
|
//
|
|
|
|
// A surprise missing file entry here is counted as a win for us.
|
|
|
|
of, ok := t.getFile(folder, fl.Versions[i].Device, name)
|
|
|
|
if !ok || file.WinsConflict(of) {
|
|
|
|
fl.Versions = insertVersion(fl.Versions, i, nv)
|
|
|
|
inserted = true
|
|
|
|
break insert
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !inserted {
|
|
|
|
// We didn't find a position for an insert above, so append to the end.
|
|
|
|
fl.Versions = append(fl.Versions, nv)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Put(gk, mustMarshal(&fl))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return changed
|
|
|
|
}
|
|
|
|
|
2015-10-30 23:20:35 -07:00
|
|
|
// deviceKey returns a byte slice encoding the following information:
|
|
|
|
// keyTypeDevice (1 byte)
|
2016-01-03 11:08:19 -07:00
|
|
|
// folder (4 bytes)
|
|
|
|
// device (4 bytes)
|
2015-10-30 23:20:35 -07:00
|
|
|
// name (variable size)
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) deviceKey(folder, device, file []byte) []byte {
|
2015-10-30 23:20:35 -07:00
|
|
|
return db.deviceKeyInto(nil, folder, device, file)
|
|
|
|
}
|
|
|
|
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) deviceKeyInto(k []byte, folder, device, file []byte) []byte {
|
2016-01-03 11:08:19 -07:00
|
|
|
reqLen := keyPrefixLen + keyFolderLen + keyDeviceLen + len(file)
|
2015-10-30 23:20:35 -07:00
|
|
|
if len(k) < reqLen {
|
|
|
|
k = make([]byte, reqLen)
|
|
|
|
}
|
|
|
|
k[0] = KeyTypeDevice
|
2016-01-03 11:08:19 -07:00
|
|
|
binary.BigEndian.PutUint32(k[keyPrefixLen:], db.folderIdx.ID(folder))
|
|
|
|
binary.BigEndian.PutUint32(k[keyPrefixLen+keyFolderLen:], db.deviceIdx.ID(device))
|
2016-12-17 07:37:11 -07:00
|
|
|
copy(k[keyPrefixLen+keyFolderLen+keyDeviceLen:], file)
|
2015-10-30 23:20:35 -07:00
|
|
|
return k[:reqLen]
|
|
|
|
}
|
|
|
|
|
2016-01-03 11:08:19 -07:00
|
|
|
// deviceKeyName returns the device ID from the key
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) deviceKeyName(key []byte) []byte {
|
2016-01-03 11:08:19 -07:00
|
|
|
return key[keyPrefixLen+keyFolderLen+keyDeviceLen:]
|
2015-10-30 23:20:35 -07:00
|
|
|
}
|
|
|
|
|
2016-01-03 11:08:19 -07:00
|
|
|
// deviceKeyFolder returns the folder name from the key
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) deviceKeyFolder(key []byte) []byte {
|
2016-01-03 11:08:19 -07:00
|
|
|
folder, ok := db.folderIdx.Val(binary.BigEndian.Uint32(key[keyPrefixLen:]))
|
|
|
|
if !ok {
|
|
|
|
panic("bug: lookup of nonexistent folder ID")
|
2015-10-30 23:20:35 -07:00
|
|
|
}
|
2016-01-03 11:08:19 -07:00
|
|
|
return folder
|
2015-10-30 23:20:35 -07:00
|
|
|
}
|
|
|
|
|
2016-01-03 11:08:19 -07:00
|
|
|
// deviceKeyDevice returns the device ID from the key
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) deviceKeyDevice(key []byte) []byte {
|
2016-01-03 11:08:19 -07:00
|
|
|
device, ok := db.deviceIdx.Val(binary.BigEndian.Uint32(key[keyPrefixLen+keyFolderLen:]))
|
|
|
|
if !ok {
|
|
|
|
panic("bug: lookup of nonexistent device ID")
|
|
|
|
}
|
|
|
|
return device
|
2015-10-30 23:20:35 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// globalKey returns a byte slice encoding the following information:
|
|
|
|
// keyTypeGlobal (1 byte)
|
2016-01-03 11:08:19 -07:00
|
|
|
// folder (4 bytes)
|
2015-10-30 23:20:35 -07:00
|
|
|
// name (variable size)
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) globalKey(folder, file []byte) []byte {
|
2016-01-03 11:08:19 -07:00
|
|
|
k := make([]byte, keyPrefixLen+keyFolderLen+len(file))
|
2015-10-30 23:20:35 -07:00
|
|
|
k[0] = KeyTypeGlobal
|
2016-01-03 11:08:19 -07:00
|
|
|
binary.BigEndian.PutUint32(k[keyPrefixLen:], db.folderIdx.ID(folder))
|
2016-12-17 07:37:11 -07:00
|
|
|
copy(k[keyPrefixLen+keyFolderLen:], file)
|
2015-10-30 23:20:35 -07:00
|
|
|
return k
|
|
|
|
}
|
|
|
|
|
2016-01-03 11:08:19 -07:00
|
|
|
// globalKeyName returns the filename from the key
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) globalKeyName(key []byte) []byte {
|
2016-01-03 11:08:19 -07:00
|
|
|
return key[keyPrefixLen+keyFolderLen:]
|
2015-10-30 23:20:35 -07:00
|
|
|
}
|
|
|
|
|
2016-01-03 11:08:19 -07:00
|
|
|
// globalKeyFolder returns the folder name from the key
|
2017-01-04 03:34:52 -07:00
|
|
|
func (db *Instance) globalKeyFolder(key []byte) ([]byte, bool) {
|
|
|
|
return db.folderIdx.Val(binary.BigEndian.Uint32(key[keyPrefixLen:]))
|
2015-10-30 23:20:35 -07:00
|
|
|
}
|
|
|
|
|
2016-07-23 05:46:31 -07:00
|
|
|
func (db *Instance) getIndexID(device, folder []byte) protocol.IndexID {
|
|
|
|
key := db.indexIDKey(device, folder)
|
|
|
|
cur, err := db.Get(key, nil)
|
|
|
|
if err != nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
var id protocol.IndexID
|
|
|
|
if err := id.Unmarshal(cur); err != nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return id
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *Instance) setIndexID(device, folder []byte, id protocol.IndexID) {
|
|
|
|
key := db.indexIDKey(device, folder)
|
|
|
|
bs, _ := id.Marshal() // marshalling can't fail
|
|
|
|
if err := db.Put(key, bs, nil); err != nil {
|
|
|
|
panic("storing index ID: " + err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *Instance) indexIDKey(device, folder []byte) []byte {
|
|
|
|
k := make([]byte, keyPrefixLen+keyDeviceLen+keyFolderLen)
|
|
|
|
k[0] = KeyTypeIndexID
|
|
|
|
binary.BigEndian.PutUint32(k[keyPrefixLen:], db.deviceIdx.ID(device))
|
|
|
|
binary.BigEndian.PutUint32(k[keyPrefixLen+keyDeviceLen:], db.folderIdx.ID(folder))
|
|
|
|
return k
|
|
|
|
}
|
|
|
|
|
2018-03-10 03:42:01 -07:00
|
|
|
func (db *Instance) indexIDDevice(key []byte) []byte {
|
|
|
|
device, ok := db.deviceIdx.Val(binary.BigEndian.Uint32(key[keyPrefixLen:]))
|
|
|
|
if !ok {
|
|
|
|
// uuh ...
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return device
|
|
|
|
}
|
|
|
|
|
2016-08-05 10:45:45 -07:00
|
|
|
func (db *Instance) mtimesKey(folder []byte) []byte {
|
|
|
|
prefix := make([]byte, 5) // key type + 4 bytes folder idx number
|
|
|
|
prefix[0] = KeyTypeVirtualMtime
|
|
|
|
binary.BigEndian.PutUint32(prefix[1:], db.folderIdx.ID(folder))
|
|
|
|
return prefix
|
|
|
|
}
|
|
|
|
|
2017-12-14 02:51:17 -07:00
|
|
|
func (db *Instance) folderMetaKey(folder []byte) []byte {
|
|
|
|
prefix := make([]byte, 5) // key type + 4 bytes folder idx number
|
|
|
|
prefix[0] = KeyTypeFolderMeta
|
|
|
|
binary.BigEndian.PutUint32(prefix[1:], db.folderIdx.ID(folder))
|
|
|
|
return prefix
|
|
|
|
}
|
|
|
|
|
2018-03-10 03:42:01 -07:00
|
|
|
// DropLocalDeltaIndexIDs removes all index IDs for the local device ID from
|
|
|
|
// the database. This will cause a full index transmission on the next
|
|
|
|
// connection.
|
|
|
|
func (db *Instance) DropLocalDeltaIndexIDs() {
|
|
|
|
db.dropDeltaIndexIDs(true)
|
|
|
|
}
|
|
|
|
|
|
|
|
// DropRemoteDeltaIndexIDs removes all index IDs for the other devices than
|
|
|
|
// the local one from the database. This will cause them to send us a full
|
|
|
|
// index on the next connection.
|
|
|
|
func (db *Instance) DropRemoteDeltaIndexIDs() {
|
|
|
|
db.dropDeltaIndexIDs(false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *Instance) dropDeltaIndexIDs(local bool) {
|
|
|
|
t := db.newReadWriteTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
|
|
|
dbi := t.NewIterator(util.BytesPrefix([]byte{KeyTypeIndexID}), nil)
|
|
|
|
defer dbi.Release()
|
|
|
|
|
|
|
|
for dbi.Next() {
|
|
|
|
device := db.indexIDDevice(dbi.Key())
|
|
|
|
if bytes.Equal(device, protocol.LocalDeviceID[:]) == local {
|
|
|
|
t.Delete(dbi.Key())
|
|
|
|
}
|
|
|
|
}
|
2016-08-05 10:45:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (db *Instance) dropMtimes(folder []byte) {
|
|
|
|
db.dropPrefix(db.mtimesKey(folder))
|
|
|
|
}
|
|
|
|
|
2017-12-14 02:51:17 -07:00
|
|
|
func (db *Instance) dropFolderMeta(folder []byte) {
|
|
|
|
db.dropPrefix(db.folderMetaKey(folder))
|
|
|
|
}
|
|
|
|
|
2016-08-05 10:45:45 -07:00
|
|
|
func (db *Instance) dropPrefix(prefix []byte) {
|
2016-08-05 00:13:52 -07:00
|
|
|
t := db.newReadWriteTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
2016-08-05 10:45:45 -07:00
|
|
|
dbi := t.NewIterator(util.BytesPrefix(prefix), nil)
|
2016-08-05 00:13:52 -07:00
|
|
|
defer dbi.Release()
|
|
|
|
|
|
|
|
for dbi.Next() {
|
|
|
|
t.Delete(dbi.Key())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-29 00:07:51 -07:00
|
|
|
func unmarshalTrunc(bs []byte, truncate bool) (FileIntf, error) {
|
|
|
|
if truncate {
|
|
|
|
var tf FileInfoTruncated
|
2016-07-04 03:40:29 -07:00
|
|
|
err := tf.Unmarshal(bs)
|
2015-10-29 00:07:51 -07:00
|
|
|
return tf, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var tf protocol.FileInfo
|
2016-07-04 03:40:29 -07:00
|
|
|
err := tf.Unmarshal(bs)
|
2015-10-29 00:07:51 -07:00
|
|
|
return tf, err
|
|
|
|
}
|
2015-10-31 04:31:25 -07:00
|
|
|
|
|
|
|
// A "better" version of leveldb's errors.IsCorrupted.
|
|
|
|
func leveldbIsCorrupted(err error) bool {
|
|
|
|
switch {
|
|
|
|
case err == nil:
|
|
|
|
return false
|
|
|
|
|
|
|
|
case errors.IsCorrupted(err):
|
|
|
|
return true
|
|
|
|
|
|
|
|
case strings.Contains(err.Error(), "corrupted"):
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
2016-01-03 11:08:19 -07:00
|
|
|
|
|
|
|
// A smallIndex is an in memory bidirectional []byte to uint32 map. It gives
|
|
|
|
// fast lookups in both directions and persists to the database. Don't use for
|
|
|
|
// storing more items than fit comfortably in RAM.
|
|
|
|
type smallIndex struct {
|
|
|
|
db *Instance
|
|
|
|
prefix []byte
|
|
|
|
id2val map[uint32]string
|
|
|
|
val2id map[string]uint32
|
|
|
|
nextID uint32
|
|
|
|
mut sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func newSmallIndex(db *Instance, prefix []byte) *smallIndex {
|
|
|
|
idx := &smallIndex{
|
|
|
|
db: db,
|
|
|
|
prefix: prefix,
|
|
|
|
id2val: make(map[uint32]string),
|
|
|
|
val2id: make(map[string]uint32),
|
|
|
|
mut: sync.NewMutex(),
|
|
|
|
}
|
|
|
|
idx.load()
|
|
|
|
return idx
|
|
|
|
}
|
|
|
|
|
|
|
|
// load iterates over the prefix space in the database and populates the in
|
|
|
|
// memory maps.
|
|
|
|
func (i *smallIndex) load() {
|
|
|
|
tr := i.db.newReadOnlyTransaction()
|
|
|
|
it := tr.NewIterator(util.BytesPrefix(i.prefix), nil)
|
|
|
|
for it.Next() {
|
|
|
|
val := string(it.Value())
|
|
|
|
id := binary.BigEndian.Uint32(it.Key()[len(i.prefix):])
|
|
|
|
i.id2val[id] = val
|
|
|
|
i.val2id[val] = id
|
|
|
|
if id >= i.nextID {
|
|
|
|
i.nextID = id + 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
it.Release()
|
|
|
|
tr.close()
|
|
|
|
}
|
|
|
|
|
|
|
|
// ID returns the index number for the given byte slice, allocating a new one
|
|
|
|
// and persisting this to the database if necessary.
|
|
|
|
func (i *smallIndex) ID(val []byte) uint32 {
|
|
|
|
i.mut.Lock()
|
|
|
|
// intentionally avoiding defer here as we want this call to be as fast as
|
|
|
|
// possible in the general case (folder ID already exists). The map lookup
|
|
|
|
// with the conversion of []byte to string is compiler optimized to not
|
|
|
|
// copy the []byte, which is why we don't assign it to a temp variable
|
|
|
|
// here.
|
|
|
|
if id, ok := i.val2id[string(val)]; ok {
|
|
|
|
i.mut.Unlock()
|
|
|
|
return id
|
|
|
|
}
|
|
|
|
|
|
|
|
id := i.nextID
|
|
|
|
i.nextID++
|
|
|
|
|
|
|
|
valStr := string(val)
|
|
|
|
i.val2id[valStr] = id
|
|
|
|
i.id2val[id] = valStr
|
|
|
|
|
|
|
|
key := make([]byte, len(i.prefix)+8) // prefix plus uint32 id
|
|
|
|
copy(key, i.prefix)
|
|
|
|
binary.BigEndian.PutUint32(key[len(i.prefix):], id)
|
|
|
|
i.db.Put(key, val, nil)
|
|
|
|
|
|
|
|
i.mut.Unlock()
|
|
|
|
return id
|
|
|
|
}
|
|
|
|
|
|
|
|
// Val returns the value for the given index number, or (nil, false) if there
|
|
|
|
// is no such index number.
|
|
|
|
func (i *smallIndex) Val(id uint32) ([]byte, bool) {
|
|
|
|
i.mut.Lock()
|
|
|
|
val, ok := i.id2val[id]
|
|
|
|
i.mut.Unlock()
|
|
|
|
if !ok {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
|
|
|
|
return []byte(val), true
|
|
|
|
}
|