2015-10-29 00:07:51 -07:00
|
|
|
// Copyright (C) 2014 The Syncthing Authors.
|
|
|
|
//
|
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
2017-02-08 23:52:18 -07:00
|
|
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
2015-10-29 00:07:51 -07:00
|
|
|
|
|
|
|
package db
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2016-01-03 11:08:19 -07:00
|
|
|
"encoding/binary"
|
2015-10-31 04:31:25 -07:00
|
|
|
"os"
|
2015-10-29 00:07:51 -07:00
|
|
|
"sort"
|
2015-10-31 04:31:25 -07:00
|
|
|
"strings"
|
2016-06-13 10:44:03 -07:00
|
|
|
"sync/atomic"
|
2015-10-29 00:07:51 -07:00
|
|
|
|
|
|
|
"github.com/syncthing/syncthing/lib/protocol"
|
2016-01-03 11:08:19 -07:00
|
|
|
"github.com/syncthing/syncthing/lib/sync"
|
2015-10-29 00:07:51 -07:00
|
|
|
"github.com/syndtr/goleveldb/leveldb"
|
2015-10-31 04:31:25 -07:00
|
|
|
"github.com/syndtr/goleveldb/leveldb/errors"
|
2015-10-29 00:07:51 -07:00
|
|
|
"github.com/syndtr/goleveldb/leveldb/iterator"
|
2015-10-31 04:31:25 -07:00
|
|
|
"github.com/syndtr/goleveldb/leveldb/opt"
|
|
|
|
"github.com/syndtr/goleveldb/leveldb/storage"
|
2015-10-29 00:07:51 -07:00
|
|
|
"github.com/syndtr/goleveldb/leveldb/util"
|
|
|
|
)
|
|
|
|
|
2016-07-23 05:46:31 -07:00
|
|
|
type deletionHandler func(t readWriteTransaction, folder, device, name []byte, dbi iterator.Iterator)
|
2015-10-29 00:07:51 -07:00
|
|
|
|
2015-10-31 04:31:25 -07:00
|
|
|
type Instance struct {
|
2016-06-26 06:40:51 -07:00
|
|
|
committed int64 // this must be the first attribute in the struct to ensure 64 bit alignment on 32 bit plaforms
|
2015-10-29 00:07:51 -07:00
|
|
|
*leveldb.DB
|
2016-07-27 14:38:43 -07:00
|
|
|
location string
|
2016-01-03 11:08:19 -07:00
|
|
|
folderIdx *smallIndex
|
|
|
|
deviceIdx *smallIndex
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
2016-01-03 11:08:19 -07:00
|
|
|
const (
|
2018-05-01 14:39:15 -07:00
|
|
|
keyPrefixLen = 1
|
|
|
|
keyFolderLen = 4 // indexed
|
|
|
|
keyDeviceLen = 4 // indexed
|
|
|
|
keySequenceLen = 8
|
|
|
|
keyHashLen = 32
|
|
|
|
|
|
|
|
maxInt64 int64 = 1<<63 - 1
|
2016-01-03 11:08:19 -07:00
|
|
|
)
|
|
|
|
|
2015-10-31 04:31:25 -07:00
|
|
|
func Open(file string) (*Instance, error) {
|
|
|
|
opts := &opt.Options{
|
|
|
|
OpenFilesCacheCapacity: 100,
|
|
|
|
WriteBuffer: 4 << 20,
|
|
|
|
}
|
|
|
|
|
|
|
|
db, err := leveldb.OpenFile(file, opts)
|
|
|
|
if leveldbIsCorrupted(err) {
|
|
|
|
db, err = leveldb.RecoverFile(file, opts)
|
|
|
|
}
|
|
|
|
if leveldbIsCorrupted(err) {
|
|
|
|
// The database is corrupted, and we've tried to recover it but it
|
|
|
|
// didn't work. At this point there isn't much to do beyond dropping
|
|
|
|
// the database and reindexing...
|
|
|
|
l.Infoln("Database corruption detected, unable to recover. Reinitializing...")
|
|
|
|
if err := os.RemoveAll(file); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
db, err = leveldb.OpenFile(file, opts)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-07-27 14:38:43 -07:00
|
|
|
return newDBInstance(db, file), nil
|
2015-10-31 04:31:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func OpenMemory() *Instance {
|
|
|
|
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
2016-07-27 14:38:43 -07:00
|
|
|
return newDBInstance(db, "<memory>")
|
2015-10-31 04:31:25 -07:00
|
|
|
}
|
|
|
|
|
2016-07-27 14:38:43 -07:00
|
|
|
func newDBInstance(db *leveldb.DB, location string) *Instance {
|
2016-01-03 11:08:19 -07:00
|
|
|
i := &Instance{
|
2016-07-27 14:38:43 -07:00
|
|
|
DB: db,
|
|
|
|
location: location,
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
2016-01-03 11:08:19 -07:00
|
|
|
i.folderIdx = newSmallIndex(i, []byte{KeyTypeFolderIdx})
|
|
|
|
i.deviceIdx = newSmallIndex(i, []byte{KeyTypeDeviceIdx})
|
2018-06-08 03:46:00 -07:00
|
|
|
i.updateSchema()
|
2016-01-03 11:08:19 -07:00
|
|
|
return i
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
2016-06-13 10:44:03 -07:00
|
|
|
// Committed returns the number of items committed to the database since startup
|
|
|
|
func (db *Instance) Committed() int64 {
|
|
|
|
return atomic.LoadInt64(&db.committed)
|
|
|
|
}
|
|
|
|
|
2016-07-27 14:38:43 -07:00
|
|
|
// Location returns the filesystem path where the database is stored
|
|
|
|
func (db *Instance) Location() string {
|
|
|
|
return db.location
|
|
|
|
}
|
|
|
|
|
2017-12-14 02:51:17 -07:00
|
|
|
func (db *Instance) updateFiles(folder, device []byte, fs []protocol.FileInfo, meta *metadataTracker) {
|
2015-10-29 00:07:51 -07:00
|
|
|
t := db.newReadWriteTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
|
|
|
var fk []byte
|
2018-05-01 14:39:15 -07:00
|
|
|
var gk []byte
|
2015-10-29 00:07:51 -07:00
|
|
|
for _, f := range fs {
|
|
|
|
name := []byte(f.Name)
|
2018-05-01 14:39:15 -07:00
|
|
|
fk = db.deviceKeyInto(fk, folder, device, name)
|
2017-04-25 15:52:37 -07:00
|
|
|
|
|
|
|
// Get and unmarshal the file entry. If it doesn't exist or can't be
|
|
|
|
// unmarshalled we'll add it as a new entry.
|
2015-10-29 00:07:51 -07:00
|
|
|
bs, err := t.Get(fk, nil)
|
2017-04-25 15:52:37 -07:00
|
|
|
var ef FileInfoTruncated
|
|
|
|
if err == nil {
|
|
|
|
err = ef.Unmarshal(bs)
|
|
|
|
}
|
|
|
|
|
2017-10-24 13:05:29 -07:00
|
|
|
// The Invalid flag might change without the version being bumped.
|
|
|
|
if err == nil && ef.Version.Equal(f.Version) && ef.Invalid == f.Invalid {
|
2015-10-29 00:07:51 -07:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-12-14 02:51:17 -07:00
|
|
|
devID := protocol.DeviceIDFromBytes(device)
|
|
|
|
if err == nil {
|
|
|
|
meta.removeFile(devID, ef)
|
2017-10-24 13:05:29 -07:00
|
|
|
}
|
2017-12-14 02:51:17 -07:00
|
|
|
meta.addFile(devID, f)
|
2015-10-29 00:07:51 -07:00
|
|
|
|
2018-05-01 14:39:15 -07:00
|
|
|
t.insertFile(fk, folder, device, f)
|
|
|
|
|
|
|
|
gk = db.globalKeyInto(gk, folder, name)
|
|
|
|
t.updateGlobal(gk, folder, device, f, meta)
|
2015-10-29 00:07:51 -07:00
|
|
|
|
|
|
|
// Write out and reuse the batch every few records, to avoid the batch
|
|
|
|
// growing too large and thus allocating unnecessarily much memory.
|
|
|
|
t.checkFlush()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-01 14:39:15 -07:00
|
|
|
func (db *Instance) addSequences(folder []byte, fs []protocol.FileInfo) {
|
|
|
|
t := db.newReadWriteTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
|
|
|
var sk []byte
|
|
|
|
var dk []byte
|
|
|
|
for _, f := range fs {
|
|
|
|
sk = db.sequenceKeyInto(sk, folder, f.Sequence)
|
|
|
|
dk = db.deviceKeyInto(dk, folder, protocol.LocalDeviceID[:], []byte(f.Name))
|
|
|
|
t.Put(sk, dk)
|
|
|
|
l.Debugf("adding sequence; folder=%q sequence=%v %v", folder, f.Sequence, f.Name)
|
|
|
|
t.checkFlush()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *Instance) removeSequences(folder []byte, fs []protocol.FileInfo) {
|
|
|
|
t := db.newReadWriteTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
|
|
|
var sk []byte
|
|
|
|
for _, f := range fs {
|
|
|
|
t.Delete(db.sequenceKeyInto(sk, folder, f.Sequence))
|
|
|
|
l.Debugf("removing sequence; folder=%q sequence=%v %v", folder, f.Sequence, f.Name)
|
|
|
|
t.checkFlush()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-18 05:16:33 -07:00
|
|
|
func (db *Instance) withHave(folder, device, prefix []byte, truncate bool, fn Iterator) {
|
2018-05-17 00:26:40 -07:00
|
|
|
if len(prefix) > 0 {
|
|
|
|
unslashedPrefix := prefix
|
|
|
|
if bytes.HasSuffix(prefix, []byte{'/'}) {
|
|
|
|
unslashedPrefix = unslashedPrefix[:len(unslashedPrefix)-1]
|
|
|
|
} else {
|
|
|
|
prefix = append(prefix, '/')
|
|
|
|
}
|
|
|
|
|
|
|
|
if f, ok := db.getFileTrunc(db.deviceKey(folder, device, unslashedPrefix), true); ok && !fn(f) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-29 00:07:51 -07:00
|
|
|
t := db.newReadOnlyTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
2016-03-18 05:16:33 -07:00
|
|
|
dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, device, prefix)[:keyPrefixLen+keyFolderLen+keyDeviceLen+len(prefix)]), nil)
|
2015-10-29 00:07:51 -07:00
|
|
|
defer dbi.Release()
|
|
|
|
|
|
|
|
for dbi.Next() {
|
2016-05-09 05:56:21 -07:00
|
|
|
name := db.deviceKeyName(dbi.Key())
|
2018-05-17 00:26:40 -07:00
|
|
|
if len(prefix) > 0 && !bytes.HasPrefix(name, prefix) {
|
2016-05-09 05:56:21 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-02-02 04:48:09 -07:00
|
|
|
// The iterator function may keep a reference to the unmarshalled
|
|
|
|
// struct, which in turn references the buffer it was unmarshalled
|
|
|
|
// from. dbi.Value() just returns an internal slice that it reuses, so
|
|
|
|
// we need to copy it.
|
|
|
|
f, err := unmarshalTrunc(append([]byte{}, dbi.Value()...), truncate)
|
2015-10-29 00:07:51 -07:00
|
|
|
if err != nil {
|
2017-04-25 15:52:37 -07:00
|
|
|
l.Debugln("unmarshal error:", err)
|
|
|
|
continue
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
2018-05-01 14:39:15 -07:00
|
|
|
if !fn(f) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *Instance) withHaveSequence(folder []byte, startSeq int64, fn Iterator) {
|
|
|
|
t := db.newReadOnlyTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
|
|
|
dbi := t.NewIterator(&util.Range{Start: db.sequenceKey(folder, startSeq), Limit: db.sequenceKey(folder, maxInt64)}, nil)
|
|
|
|
defer dbi.Release()
|
|
|
|
|
|
|
|
for dbi.Next() {
|
|
|
|
f, ok := db.getFile(dbi.Value())
|
|
|
|
if !ok {
|
|
|
|
l.Debugln("missing file for sequence number", db.sequenceKeySequence(dbi.Key()))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !fn(f) {
|
2015-10-29 00:07:51 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) withAllFolderTruncated(folder []byte, fn func(device []byte, f FileInfoTruncated) bool) {
|
2015-10-29 00:07:51 -07:00
|
|
|
t := db.newReadWriteTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
2016-01-03 11:08:19 -07:00
|
|
|
dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, nil, nil)[:keyPrefixLen+keyFolderLen]), nil)
|
2015-10-29 00:07:51 -07:00
|
|
|
defer dbi.Release()
|
|
|
|
|
2018-05-01 14:39:15 -07:00
|
|
|
var gk []byte
|
|
|
|
|
2015-10-29 00:07:51 -07:00
|
|
|
for dbi.Next() {
|
2015-10-30 23:20:35 -07:00
|
|
|
device := db.deviceKeyDevice(dbi.Key())
|
2015-10-29 00:07:51 -07:00
|
|
|
var f FileInfoTruncated
|
2016-02-02 04:48:09 -07:00
|
|
|
// The iterator function may keep a reference to the unmarshalled
|
|
|
|
// struct, which in turn references the buffer it was unmarshalled
|
|
|
|
// from. dbi.Value() just returns an internal slice that it reuses, so
|
|
|
|
// we need to copy it.
|
2016-07-04 03:40:29 -07:00
|
|
|
err := f.Unmarshal(append([]byte{}, dbi.Value()...))
|
2015-10-29 00:07:51 -07:00
|
|
|
if err != nil {
|
2017-04-25 15:52:37 -07:00
|
|
|
l.Debugln("unmarshal error:", err)
|
|
|
|
continue
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
switch f.Name {
|
|
|
|
case "", ".", "..", "/": // A few obviously invalid filenames
|
|
|
|
l.Infof("Dropping invalid filename %q from database", f.Name)
|
2018-05-01 14:39:15 -07:00
|
|
|
name := []byte(f.Name)
|
|
|
|
gk = db.globalKeyInto(gk, folder, name)
|
|
|
|
t.removeFromGlobal(gk, folder, device, name, nil)
|
2015-10-29 00:07:51 -07:00
|
|
|
t.Delete(dbi.Key())
|
|
|
|
t.checkFlush()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-05-01 14:39:15 -07:00
|
|
|
if !fn(device, f) {
|
2015-10-29 00:07:51 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-01 14:39:15 -07:00
|
|
|
func (db *Instance) getFile(key []byte) (protocol.FileInfo, bool) {
|
2018-05-17 00:26:40 -07:00
|
|
|
if f, ok := db.getFileTrunc(key, false); ok {
|
|
|
|
return f.(protocol.FileInfo), true
|
|
|
|
}
|
|
|
|
return protocol.FileInfo{}, false
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *Instance) getFileTrunc(key []byte, trunc bool) (FileIntf, bool) {
|
2018-05-01 14:39:15 -07:00
|
|
|
bs, err := db.Get(key, nil)
|
|
|
|
if err == leveldb.ErrNotFound {
|
2018-05-17 00:26:40 -07:00
|
|
|
return nil, false
|
2018-05-01 14:39:15 -07:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
l.Debugln("surprise error:", err)
|
2018-05-17 00:26:40 -07:00
|
|
|
return nil, false
|
2018-05-01 14:39:15 -07:00
|
|
|
}
|
|
|
|
|
2018-05-17 00:26:40 -07:00
|
|
|
f, err := unmarshalTrunc(bs, trunc)
|
2018-05-01 14:39:15 -07:00
|
|
|
if err != nil {
|
|
|
|
l.Debugln("unmarshal error:", err)
|
2018-05-17 00:26:40 -07:00
|
|
|
return nil, false
|
2018-05-01 14:39:15 -07:00
|
|
|
}
|
|
|
|
return f, true
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) getGlobal(folder, file []byte, truncate bool) (FileIntf, bool) {
|
2015-10-29 00:07:51 -07:00
|
|
|
t := db.newReadOnlyTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
2018-06-02 06:08:32 -07:00
|
|
|
_, _, f, ok := db.getGlobalInto(t, nil, nil, folder, file, truncate)
|
|
|
|
return f, ok
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *Instance) getGlobalInto(t readOnlyTransaction, gk, dk, folder, file []byte, truncate bool) ([]byte, []byte, FileIntf, bool) {
|
|
|
|
gk = db.globalKeyInto(gk, folder, file)
|
|
|
|
|
|
|
|
bs, err := t.Get(gk, nil)
|
2015-10-29 00:07:51 -07:00
|
|
|
if err != nil {
|
2018-06-02 06:08:32 -07:00
|
|
|
return gk, dk, nil, false
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
2018-05-17 00:26:40 -07:00
|
|
|
vl, ok := unmarshalVersionList(bs)
|
|
|
|
if !ok {
|
2018-06-02 06:08:32 -07:00
|
|
|
return gk, dk, nil, false
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
2018-06-02 06:08:32 -07:00
|
|
|
dk = db.deviceKeyInto(dk, folder, vl.Versions[0].Device, file)
|
|
|
|
if fi, ok := db.getFileTrunc(dk, truncate); ok {
|
|
|
|
return gk, dk, fi, true
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
2018-06-02 06:08:32 -07:00
|
|
|
return gk, dk, nil, false
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) withGlobal(folder, prefix []byte, truncate bool, fn Iterator) {
|
2018-05-17 00:26:40 -07:00
|
|
|
if len(prefix) > 0 {
|
|
|
|
unslashedPrefix := prefix
|
|
|
|
if bytes.HasSuffix(prefix, []byte{'/'}) {
|
|
|
|
unslashedPrefix = unslashedPrefix[:len(unslashedPrefix)-1]
|
|
|
|
} else {
|
|
|
|
prefix = append(prefix, '/')
|
|
|
|
}
|
|
|
|
|
|
|
|
if f, ok := db.getGlobal(folder, unslashedPrefix, truncate); ok && !fn(f) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-29 00:07:51 -07:00
|
|
|
t := db.newReadOnlyTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
2015-10-30 23:20:35 -07:00
|
|
|
dbi := t.NewIterator(util.BytesPrefix(db.globalKey(folder, prefix)), nil)
|
2015-10-29 00:07:51 -07:00
|
|
|
defer dbi.Release()
|
|
|
|
|
|
|
|
var fk []byte
|
|
|
|
for dbi.Next() {
|
2015-10-30 23:20:35 -07:00
|
|
|
name := db.globalKeyName(dbi.Key())
|
2018-05-17 00:26:40 -07:00
|
|
|
if len(prefix) > 0 && !bytes.HasPrefix(name, prefix) {
|
2016-05-09 05:56:21 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-05-17 00:26:40 -07:00
|
|
|
vl, ok := unmarshalVersionList(dbi.Value())
|
|
|
|
if !ok {
|
2017-04-25 15:52:37 -07:00
|
|
|
continue
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
2018-05-17 00:26:40 -07:00
|
|
|
fk = db.deviceKeyInto(fk, folder, vl.Versions[0].Device, name)
|
|
|
|
|
|
|
|
f, ok := db.getFileTrunc(fk, truncate)
|
|
|
|
if !ok {
|
2017-04-25 15:52:37 -07:00
|
|
|
continue
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
2018-05-01 14:39:15 -07:00
|
|
|
if !fn(f) {
|
2015-10-29 00:07:51 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) availability(folder, file []byte) []protocol.DeviceID {
|
2015-10-30 23:20:35 -07:00
|
|
|
k := db.globalKey(folder, file)
|
2015-10-29 00:07:51 -07:00
|
|
|
bs, err := db.Get(k, nil)
|
|
|
|
if err == leveldb.ErrNotFound {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if err != nil {
|
2017-04-25 15:52:37 -07:00
|
|
|
l.Debugln("surprise error:", err)
|
|
|
|
return nil
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
2018-05-17 00:26:40 -07:00
|
|
|
vl, ok := unmarshalVersionList(bs)
|
|
|
|
if !ok {
|
2017-04-25 15:52:37 -07:00
|
|
|
return nil
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
var devices []protocol.DeviceID
|
2016-07-04 03:40:29 -07:00
|
|
|
for _, v := range vl.Versions {
|
|
|
|
if !v.Version.Equal(vl.Versions[0].Version) {
|
2015-10-29 00:07:51 -07:00
|
|
|
break
|
|
|
|
}
|
2017-11-11 12:18:17 -07:00
|
|
|
if v.Invalid {
|
|
|
|
continue
|
|
|
|
}
|
2016-07-04 03:40:29 -07:00
|
|
|
n := protocol.DeviceIDFromBytes(v.Device)
|
2015-10-29 00:07:51 -07:00
|
|
|
devices = append(devices, n)
|
|
|
|
}
|
|
|
|
|
|
|
|
return devices
|
|
|
|
}
|
|
|
|
|
2018-02-25 01:39:00 -07:00
|
|
|
func (db *Instance) withNeed(folder, device []byte, truncate bool, fn Iterator) {
|
2018-06-02 06:08:32 -07:00
|
|
|
if bytes.Equal(device, protocol.LocalDeviceID[:]) {
|
|
|
|
db.withNeedLocal(folder, truncate, fn)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-10-29 00:07:51 -07:00
|
|
|
t := db.newReadOnlyTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
2016-01-03 11:08:19 -07:00
|
|
|
dbi := t.NewIterator(util.BytesPrefix(db.globalKey(folder, nil)[:keyPrefixLen+keyFolderLen]), nil)
|
2015-10-29 00:07:51 -07:00
|
|
|
defer dbi.Release()
|
|
|
|
|
|
|
|
var fk []byte
|
|
|
|
for dbi.Next() {
|
2018-05-17 00:26:40 -07:00
|
|
|
vl, ok := unmarshalVersionList(dbi.Value())
|
|
|
|
if !ok {
|
2017-04-25 15:52:37 -07:00
|
|
|
continue
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
2018-06-02 06:08:32 -07:00
|
|
|
haveFV, have := vl.Get(device)
|
|
|
|
// XXX: This marks Concurrent (i.e. conflicting) changes as
|
|
|
|
// needs. Maybe we should do that, but it needs special
|
|
|
|
// handling in the puller.
|
|
|
|
if have && haveFV.Version.GreaterEqual(vl.Versions[0].Version) {
|
2017-10-24 13:05:29 -07:00
|
|
|
continue
|
|
|
|
}
|
2015-10-29 00:07:51 -07:00
|
|
|
|
2017-10-24 13:05:29 -07:00
|
|
|
name := db.globalKeyName(dbi.Key())
|
|
|
|
needVersion := vl.Versions[0].Version
|
2017-11-11 12:18:17 -07:00
|
|
|
needDevice := protocol.DeviceIDFromBytes(vl.Versions[0].Device)
|
2015-10-29 00:07:51 -07:00
|
|
|
|
2017-10-24 13:05:29 -07:00
|
|
|
for i := range vl.Versions {
|
|
|
|
if !vl.Versions[i].Version.Equal(needVersion) {
|
|
|
|
// We haven't found a valid copy of the file with the needed version.
|
|
|
|
break
|
|
|
|
}
|
2017-11-11 12:18:17 -07:00
|
|
|
|
|
|
|
if vl.Versions[i].Invalid {
|
|
|
|
// The file is marked invalid, don't use it.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-05-01 14:39:15 -07:00
|
|
|
fk = db.deviceKeyInto(fk, folder, vl.Versions[i].Device, name)
|
2017-10-24 13:05:29 -07:00
|
|
|
bs, err := t.Get(fk, nil)
|
|
|
|
if err != nil {
|
|
|
|
l.Debugln("surprise error:", err)
|
|
|
|
continue
|
|
|
|
}
|
2015-10-29 00:07:51 -07:00
|
|
|
|
2017-10-24 13:05:29 -07:00
|
|
|
gf, err := unmarshalTrunc(bs, truncate)
|
|
|
|
if err != nil {
|
|
|
|
l.Debugln("unmarshal error:", err)
|
|
|
|
continue
|
|
|
|
}
|
2015-10-29 00:07:51 -07:00
|
|
|
|
2017-10-24 13:05:29 -07:00
|
|
|
if gf.IsDeleted() && !have {
|
|
|
|
// We don't need deleted files that we don't have
|
|
|
|
break
|
|
|
|
}
|
2015-10-29 00:07:51 -07:00
|
|
|
|
2018-06-02 06:08:32 -07:00
|
|
|
l.Debugf("need folder=%q device=%v name=%q have=%v invalid=%v haveV=%v globalV=%v globalDev=%v", folder, protocol.DeviceIDFromBytes(device), name, have, haveFV.Invalid, haveFV.Version, needVersion, needDevice)
|
2015-10-29 00:07:51 -07:00
|
|
|
|
2018-05-01 14:39:15 -07:00
|
|
|
if !fn(gf) {
|
2017-10-24 13:05:29 -07:00
|
|
|
return
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
2017-10-24 13:05:29 -07:00
|
|
|
|
|
|
|
// This file is handled, no need to look further in the version list
|
|
|
|
break
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-02 06:08:32 -07:00
|
|
|
func (db *Instance) withNeedLocal(folder []byte, truncate bool, fn Iterator) {
|
|
|
|
t := db.newReadOnlyTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
|
|
|
dbi := t.NewIterator(util.BytesPrefix(db.needKey(folder, nil)[:keyPrefixLen+keyFolderLen]), nil)
|
|
|
|
defer dbi.Release()
|
|
|
|
|
|
|
|
var dk []byte
|
|
|
|
var gk []byte
|
|
|
|
var f FileIntf
|
|
|
|
var ok bool
|
|
|
|
for dbi.Next() {
|
|
|
|
gk, dk, f, ok = db.getGlobalInto(t, gk, dk, folder, db.globalKeyName(dbi.Key()), truncate)
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !fn(f) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) ListFolders() []string {
|
2015-10-29 00:07:51 -07:00
|
|
|
t := db.newReadOnlyTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
|
|
|
dbi := t.NewIterator(util.BytesPrefix([]byte{KeyTypeGlobal}), nil)
|
|
|
|
defer dbi.Release()
|
|
|
|
|
|
|
|
folderExists := make(map[string]bool)
|
|
|
|
for dbi.Next() {
|
2017-01-04 03:34:52 -07:00
|
|
|
folder, ok := db.globalKeyFolder(dbi.Key())
|
|
|
|
if ok && !folderExists[string(folder)] {
|
|
|
|
folderExists[string(folder)] = true
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
folders := make([]string, 0, len(folderExists))
|
|
|
|
for k := range folderExists {
|
|
|
|
folders = append(folders, k)
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Strings(folders)
|
|
|
|
return folders
|
|
|
|
}
|
|
|
|
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) dropFolder(folder []byte) {
|
2018-06-02 06:08:32 -07:00
|
|
|
t := db.newReadWriteTransaction()
|
2015-10-29 00:07:51 -07:00
|
|
|
defer t.close()
|
|
|
|
|
2018-06-02 06:08:32 -07:00
|
|
|
for _, key := range [][]byte{
|
|
|
|
// Remove all items related to the given folder from the device->file bucket
|
|
|
|
db.deviceKey(folder, nil, nil)[:keyPrefixLen+keyFolderLen],
|
|
|
|
// Remove all sequences related to the folder
|
|
|
|
db.sequenceKey([]byte(folder), 0)[:keyPrefixLen+keyFolderLen],
|
|
|
|
// Remove all items related to the given folder from the global bucket
|
|
|
|
db.globalKey(folder, nil)[:keyPrefixLen+keyFolderLen],
|
|
|
|
// Remove all needs related to the folder
|
|
|
|
db.needKey(folder, nil)[:keyPrefixLen+keyFolderLen],
|
|
|
|
} {
|
|
|
|
t.deleteKeyPrefix(key)
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-14 02:51:17 -07:00
|
|
|
func (db *Instance) dropDeviceFolder(device, folder []byte, meta *metadataTracker) {
|
2017-11-12 13:20:34 -07:00
|
|
|
t := db.newReadWriteTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
|
|
|
dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, device, nil)), nil)
|
|
|
|
defer dbi.Release()
|
|
|
|
|
2018-05-01 14:39:15 -07:00
|
|
|
var gk []byte
|
|
|
|
|
2017-11-12 13:20:34 -07:00
|
|
|
for dbi.Next() {
|
|
|
|
key := dbi.Key()
|
|
|
|
name := db.deviceKeyName(key)
|
2018-05-01 14:39:15 -07:00
|
|
|
gk = db.globalKeyInto(gk, folder, name)
|
|
|
|
t.removeFromGlobal(gk, folder, device, name, meta)
|
2017-11-12 13:20:34 -07:00
|
|
|
t.Delete(key)
|
|
|
|
t.checkFlush()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-14 02:51:17 -07:00
|
|
|
func (db *Instance) checkGlobals(folder []byte, meta *metadataTracker) {
|
2015-10-29 00:07:51 -07:00
|
|
|
t := db.newReadWriteTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
2016-01-03 11:08:19 -07:00
|
|
|
dbi := t.NewIterator(util.BytesPrefix(db.globalKey(folder, nil)[:keyPrefixLen+keyFolderLen]), nil)
|
2015-10-29 00:07:51 -07:00
|
|
|
defer dbi.Release()
|
|
|
|
|
|
|
|
var fk []byte
|
|
|
|
for dbi.Next() {
|
2018-05-17 00:26:40 -07:00
|
|
|
vl, ok := unmarshalVersionList(dbi.Value())
|
|
|
|
if !ok {
|
2017-04-25 15:52:37 -07:00
|
|
|
continue
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check the global version list for consistency. An issue in previous
|
|
|
|
// versions of goleveldb could result in reordered writes so that
|
|
|
|
// there are global entries pointing to no longer existing files. Here
|
|
|
|
// we find those and clear them out.
|
|
|
|
|
2018-05-17 00:26:40 -07:00
|
|
|
name := db.globalKeyName(dbi.Key())
|
2016-05-31 12:29:26 -07:00
|
|
|
var newVL VersionList
|
2016-07-04 03:40:29 -07:00
|
|
|
for i, version := range vl.Versions {
|
2018-05-01 14:39:15 -07:00
|
|
|
fk = db.deviceKeyInto(fk, folder, version.Device, name)
|
2015-10-29 00:07:51 -07:00
|
|
|
_, err := t.Get(fk, nil)
|
|
|
|
if err == leveldb.ErrNotFound {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err != nil {
|
2017-04-25 15:52:37 -07:00
|
|
|
l.Debugln("surprise error:", err)
|
|
|
|
return
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
2016-07-04 03:40:29 -07:00
|
|
|
newVL.Versions = append(newVL.Versions, version)
|
2015-10-29 00:07:51 -07:00
|
|
|
|
|
|
|
if i == 0 {
|
2018-05-01 14:39:15 -07:00
|
|
|
if fi, ok := db.getFile(fk); ok {
|
2017-12-14 02:51:17 -07:00
|
|
|
meta.addFile(globalDeviceID, fi)
|
2015-10-29 00:07:51 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-04 03:40:29 -07:00
|
|
|
if len(newVL.Versions) != len(vl.Versions) {
|
|
|
|
t.Put(dbi.Key(), mustMarshal(&newVL))
|
2015-10-29 00:07:51 -07:00
|
|
|
t.checkFlush()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
l.Debugf("db check completed for %q", folder)
|
|
|
|
}
|
|
|
|
|
2015-10-30 23:20:35 -07:00
|
|
|
// deviceKey returns a byte slice encoding the following information:
|
|
|
|
// keyTypeDevice (1 byte)
|
2016-01-03 11:08:19 -07:00
|
|
|
// folder (4 bytes)
|
|
|
|
// device (4 bytes)
|
2015-10-30 23:20:35 -07:00
|
|
|
// name (variable size)
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) deviceKey(folder, device, file []byte) []byte {
|
2015-10-30 23:20:35 -07:00
|
|
|
return db.deviceKeyInto(nil, folder, device, file)
|
|
|
|
}
|
|
|
|
|
2018-05-01 14:39:15 -07:00
|
|
|
func (db *Instance) deviceKeyInto(k, folder, device, file []byte) []byte {
|
2016-01-03 11:08:19 -07:00
|
|
|
reqLen := keyPrefixLen + keyFolderLen + keyDeviceLen + len(file)
|
2018-05-01 14:39:15 -07:00
|
|
|
k = resize(k, reqLen)
|
2015-10-30 23:20:35 -07:00
|
|
|
k[0] = KeyTypeDevice
|
2016-01-03 11:08:19 -07:00
|
|
|
binary.BigEndian.PutUint32(k[keyPrefixLen:], db.folderIdx.ID(folder))
|
|
|
|
binary.BigEndian.PutUint32(k[keyPrefixLen+keyFolderLen:], db.deviceIdx.ID(device))
|
2016-12-17 07:37:11 -07:00
|
|
|
copy(k[keyPrefixLen+keyFolderLen+keyDeviceLen:], file)
|
2018-05-01 14:39:15 -07:00
|
|
|
return k
|
2015-10-30 23:20:35 -07:00
|
|
|
}
|
|
|
|
|
2016-01-03 11:08:19 -07:00
|
|
|
// deviceKeyName returns the device ID from the key
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) deviceKeyName(key []byte) []byte {
|
2016-01-03 11:08:19 -07:00
|
|
|
return key[keyPrefixLen+keyFolderLen+keyDeviceLen:]
|
2015-10-30 23:20:35 -07:00
|
|
|
}
|
|
|
|
|
2016-01-03 11:08:19 -07:00
|
|
|
// deviceKeyFolder returns the folder name from the key
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) deviceKeyFolder(key []byte) []byte {
|
2016-01-03 11:08:19 -07:00
|
|
|
folder, ok := db.folderIdx.Val(binary.BigEndian.Uint32(key[keyPrefixLen:]))
|
|
|
|
if !ok {
|
|
|
|
panic("bug: lookup of nonexistent folder ID")
|
2015-10-30 23:20:35 -07:00
|
|
|
}
|
2016-01-03 11:08:19 -07:00
|
|
|
return folder
|
2015-10-30 23:20:35 -07:00
|
|
|
}
|
|
|
|
|
2016-01-03 11:08:19 -07:00
|
|
|
// deviceKeyDevice returns the device ID from the key
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) deviceKeyDevice(key []byte) []byte {
|
2016-01-03 11:08:19 -07:00
|
|
|
device, ok := db.deviceIdx.Val(binary.BigEndian.Uint32(key[keyPrefixLen+keyFolderLen:]))
|
|
|
|
if !ok {
|
|
|
|
panic("bug: lookup of nonexistent device ID")
|
|
|
|
}
|
|
|
|
return device
|
2015-10-30 23:20:35 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// globalKey returns a byte slice encoding the following information:
|
|
|
|
// keyTypeGlobal (1 byte)
|
2016-01-03 11:08:19 -07:00
|
|
|
// folder (4 bytes)
|
2015-10-30 23:20:35 -07:00
|
|
|
// name (variable size)
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) globalKey(folder, file []byte) []byte {
|
2018-05-01 14:39:15 -07:00
|
|
|
return db.globalKeyInto(nil, folder, file)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *Instance) globalKeyInto(gk, folder, file []byte) []byte {
|
|
|
|
reqLen := keyPrefixLen + keyFolderLen + len(file)
|
|
|
|
gk = resize(gk, reqLen)
|
|
|
|
gk[0] = KeyTypeGlobal
|
|
|
|
binary.BigEndian.PutUint32(gk[keyPrefixLen:], db.folderIdx.ID(folder))
|
|
|
|
copy(gk[keyPrefixLen+keyFolderLen:], file)
|
2018-06-02 06:08:32 -07:00
|
|
|
return gk
|
2015-10-30 23:20:35 -07:00
|
|
|
}
|
|
|
|
|
2016-01-03 11:08:19 -07:00
|
|
|
// globalKeyName returns the filename from the key
|
2015-10-31 04:31:25 -07:00
|
|
|
func (db *Instance) globalKeyName(key []byte) []byte {
|
2016-01-03 11:08:19 -07:00
|
|
|
return key[keyPrefixLen+keyFolderLen:]
|
2015-10-30 23:20:35 -07:00
|
|
|
}
|
|
|
|
|
2016-01-03 11:08:19 -07:00
|
|
|
// globalKeyFolder returns the folder name from the key
|
2017-01-04 03:34:52 -07:00
|
|
|
func (db *Instance) globalKeyFolder(key []byte) ([]byte, bool) {
|
|
|
|
return db.folderIdx.Val(binary.BigEndian.Uint32(key[keyPrefixLen:]))
|
2015-10-30 23:20:35 -07:00
|
|
|
}
|
|
|
|
|
2018-06-02 06:08:32 -07:00
|
|
|
// needKey is a globalKey with a different prefix
|
|
|
|
func (db *Instance) needKey(folder, file []byte) []byte {
|
|
|
|
return db.needKeyInto(nil, folder, file)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *Instance) needKeyInto(k, folder, file []byte) []byte {
|
|
|
|
k = db.globalKeyInto(k, folder, file)
|
|
|
|
k[0] = KeyTypeNeed
|
|
|
|
return k
|
|
|
|
}
|
|
|
|
|
2018-05-01 14:39:15 -07:00
|
|
|
// sequenceKey returns a byte slice encoding the following information:
|
|
|
|
// KeyTypeSequence (1 byte)
|
|
|
|
// folder (4 bytes)
|
|
|
|
// sequence number (8 bytes)
|
|
|
|
func (db *Instance) sequenceKey(folder []byte, seq int64) []byte {
|
|
|
|
return db.sequenceKeyInto(nil, folder, seq)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *Instance) sequenceKeyInto(k []byte, folder []byte, seq int64) []byte {
|
|
|
|
reqLen := keyPrefixLen + keyFolderLen + keySequenceLen
|
|
|
|
k = resize(k, reqLen)
|
|
|
|
k[0] = KeyTypeSequence
|
|
|
|
binary.BigEndian.PutUint32(k[keyPrefixLen:], db.folderIdx.ID(folder))
|
|
|
|
binary.BigEndian.PutUint64(k[keyPrefixLen+keyFolderLen:], uint64(seq))
|
2018-06-02 06:08:32 -07:00
|
|
|
return k
|
2018-05-01 14:39:15 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// sequenceKeySequence returns the sequence number from the key
|
|
|
|
func (db *Instance) sequenceKeySequence(key []byte) int64 {
|
|
|
|
return int64(binary.BigEndian.Uint64(key[keyPrefixLen+keyFolderLen:]))
|
|
|
|
}
|
|
|
|
|
2016-07-23 05:46:31 -07:00
|
|
|
func (db *Instance) getIndexID(device, folder []byte) protocol.IndexID {
|
|
|
|
key := db.indexIDKey(device, folder)
|
|
|
|
cur, err := db.Get(key, nil)
|
|
|
|
if err != nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
var id protocol.IndexID
|
|
|
|
if err := id.Unmarshal(cur); err != nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return id
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *Instance) setIndexID(device, folder []byte, id protocol.IndexID) {
|
|
|
|
key := db.indexIDKey(device, folder)
|
|
|
|
bs, _ := id.Marshal() // marshalling can't fail
|
|
|
|
if err := db.Put(key, bs, nil); err != nil {
|
|
|
|
panic("storing index ID: " + err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *Instance) indexIDKey(device, folder []byte) []byte {
|
|
|
|
k := make([]byte, keyPrefixLen+keyDeviceLen+keyFolderLen)
|
|
|
|
k[0] = KeyTypeIndexID
|
|
|
|
binary.BigEndian.PutUint32(k[keyPrefixLen:], db.deviceIdx.ID(device))
|
|
|
|
binary.BigEndian.PutUint32(k[keyPrefixLen+keyDeviceLen:], db.folderIdx.ID(folder))
|
|
|
|
return k
|
|
|
|
}
|
|
|
|
|
2018-03-10 03:42:01 -07:00
|
|
|
func (db *Instance) indexIDDevice(key []byte) []byte {
|
|
|
|
device, ok := db.deviceIdx.Val(binary.BigEndian.Uint32(key[keyPrefixLen:]))
|
|
|
|
if !ok {
|
|
|
|
// uuh ...
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return device
|
|
|
|
}
|
|
|
|
|
2016-08-05 10:45:45 -07:00
|
|
|
func (db *Instance) mtimesKey(folder []byte) []byte {
|
|
|
|
prefix := make([]byte, 5) // key type + 4 bytes folder idx number
|
|
|
|
prefix[0] = KeyTypeVirtualMtime
|
|
|
|
binary.BigEndian.PutUint32(prefix[1:], db.folderIdx.ID(folder))
|
|
|
|
return prefix
|
|
|
|
}
|
|
|
|
|
2017-12-14 02:51:17 -07:00
|
|
|
func (db *Instance) folderMetaKey(folder []byte) []byte {
|
|
|
|
prefix := make([]byte, 5) // key type + 4 bytes folder idx number
|
|
|
|
prefix[0] = KeyTypeFolderMeta
|
|
|
|
binary.BigEndian.PutUint32(prefix[1:], db.folderIdx.ID(folder))
|
|
|
|
return prefix
|
|
|
|
}
|
|
|
|
|
2018-03-10 03:42:01 -07:00
|
|
|
// DropLocalDeltaIndexIDs removes all index IDs for the local device ID from
|
|
|
|
// the database. This will cause a full index transmission on the next
|
|
|
|
// connection.
|
|
|
|
func (db *Instance) DropLocalDeltaIndexIDs() {
|
|
|
|
db.dropDeltaIndexIDs(true)
|
|
|
|
}
|
|
|
|
|
|
|
|
// DropRemoteDeltaIndexIDs removes all index IDs for the other devices than
|
|
|
|
// the local one from the database. This will cause them to send us a full
|
|
|
|
// index on the next connection.
|
|
|
|
func (db *Instance) DropRemoteDeltaIndexIDs() {
|
|
|
|
db.dropDeltaIndexIDs(false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *Instance) dropDeltaIndexIDs(local bool) {
|
|
|
|
t := db.newReadWriteTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
|
|
|
dbi := t.NewIterator(util.BytesPrefix([]byte{KeyTypeIndexID}), nil)
|
|
|
|
defer dbi.Release()
|
|
|
|
|
|
|
|
for dbi.Next() {
|
|
|
|
device := db.indexIDDevice(dbi.Key())
|
|
|
|
if bytes.Equal(device, protocol.LocalDeviceID[:]) == local {
|
|
|
|
t.Delete(dbi.Key())
|
|
|
|
}
|
|
|
|
}
|
2016-08-05 10:45:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (db *Instance) dropMtimes(folder []byte) {
|
|
|
|
db.dropPrefix(db.mtimesKey(folder))
|
|
|
|
}
|
|
|
|
|
2017-12-14 02:51:17 -07:00
|
|
|
func (db *Instance) dropFolderMeta(folder []byte) {
|
|
|
|
db.dropPrefix(db.folderMetaKey(folder))
|
|
|
|
}
|
|
|
|
|
2016-08-05 10:45:45 -07:00
|
|
|
func (db *Instance) dropPrefix(prefix []byte) {
|
2016-08-05 00:13:52 -07:00
|
|
|
t := db.newReadWriteTransaction()
|
|
|
|
defer t.close()
|
|
|
|
|
2016-08-05 10:45:45 -07:00
|
|
|
dbi := t.NewIterator(util.BytesPrefix(prefix), nil)
|
2016-08-05 00:13:52 -07:00
|
|
|
defer dbi.Release()
|
|
|
|
|
|
|
|
for dbi.Next() {
|
|
|
|
t.Delete(dbi.Key())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-29 00:07:51 -07:00
|
|
|
func unmarshalTrunc(bs []byte, truncate bool) (FileIntf, error) {
|
|
|
|
if truncate {
|
|
|
|
var tf FileInfoTruncated
|
2016-07-04 03:40:29 -07:00
|
|
|
err := tf.Unmarshal(bs)
|
2015-10-29 00:07:51 -07:00
|
|
|
return tf, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var tf protocol.FileInfo
|
2016-07-04 03:40:29 -07:00
|
|
|
err := tf.Unmarshal(bs)
|
2015-10-29 00:07:51 -07:00
|
|
|
return tf, err
|
|
|
|
}
|
2015-10-31 04:31:25 -07:00
|
|
|
|
2018-05-17 00:26:40 -07:00
|
|
|
func unmarshalVersionList(data []byte) (VersionList, bool) {
|
|
|
|
var vl VersionList
|
|
|
|
if err := vl.Unmarshal(data); err != nil {
|
|
|
|
l.Debugln("unmarshal error:", err)
|
|
|
|
return VersionList{}, false
|
|
|
|
}
|
|
|
|
if len(vl.Versions) == 0 {
|
|
|
|
l.Debugln("empty version list")
|
|
|
|
return VersionList{}, false
|
|
|
|
}
|
|
|
|
return vl, true
|
|
|
|
}
|
|
|
|
|
2015-10-31 04:31:25 -07:00
|
|
|
// A "better" version of leveldb's errors.IsCorrupted.
|
|
|
|
func leveldbIsCorrupted(err error) bool {
|
|
|
|
switch {
|
|
|
|
case err == nil:
|
|
|
|
return false
|
|
|
|
|
|
|
|
case errors.IsCorrupted(err):
|
|
|
|
return true
|
|
|
|
|
|
|
|
case strings.Contains(err.Error(), "corrupted"):
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
2016-01-03 11:08:19 -07:00
|
|
|
|
|
|
|
// A smallIndex is an in memory bidirectional []byte to uint32 map. It gives
|
|
|
|
// fast lookups in both directions and persists to the database. Don't use for
|
|
|
|
// storing more items than fit comfortably in RAM.
|
|
|
|
type smallIndex struct {
|
|
|
|
db *Instance
|
|
|
|
prefix []byte
|
|
|
|
id2val map[uint32]string
|
|
|
|
val2id map[string]uint32
|
|
|
|
nextID uint32
|
|
|
|
mut sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func newSmallIndex(db *Instance, prefix []byte) *smallIndex {
|
|
|
|
idx := &smallIndex{
|
|
|
|
db: db,
|
|
|
|
prefix: prefix,
|
|
|
|
id2val: make(map[uint32]string),
|
|
|
|
val2id: make(map[string]uint32),
|
|
|
|
mut: sync.NewMutex(),
|
|
|
|
}
|
|
|
|
idx.load()
|
|
|
|
return idx
|
|
|
|
}
|
|
|
|
|
|
|
|
// load iterates over the prefix space in the database and populates the in
|
|
|
|
// memory maps.
|
|
|
|
func (i *smallIndex) load() {
|
|
|
|
tr := i.db.newReadOnlyTransaction()
|
|
|
|
it := tr.NewIterator(util.BytesPrefix(i.prefix), nil)
|
|
|
|
for it.Next() {
|
|
|
|
val := string(it.Value())
|
|
|
|
id := binary.BigEndian.Uint32(it.Key()[len(i.prefix):])
|
|
|
|
i.id2val[id] = val
|
|
|
|
i.val2id[val] = id
|
|
|
|
if id >= i.nextID {
|
|
|
|
i.nextID = id + 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
it.Release()
|
|
|
|
tr.close()
|
|
|
|
}
|
|
|
|
|
|
|
|
// ID returns the index number for the given byte slice, allocating a new one
|
|
|
|
// and persisting this to the database if necessary.
|
|
|
|
func (i *smallIndex) ID(val []byte) uint32 {
|
|
|
|
i.mut.Lock()
|
|
|
|
// intentionally avoiding defer here as we want this call to be as fast as
|
|
|
|
// possible in the general case (folder ID already exists). The map lookup
|
|
|
|
// with the conversion of []byte to string is compiler optimized to not
|
|
|
|
// copy the []byte, which is why we don't assign it to a temp variable
|
|
|
|
// here.
|
|
|
|
if id, ok := i.val2id[string(val)]; ok {
|
|
|
|
i.mut.Unlock()
|
|
|
|
return id
|
|
|
|
}
|
|
|
|
|
|
|
|
id := i.nextID
|
|
|
|
i.nextID++
|
|
|
|
|
|
|
|
valStr := string(val)
|
|
|
|
i.val2id[valStr] = id
|
|
|
|
i.id2val[id] = valStr
|
|
|
|
|
|
|
|
key := make([]byte, len(i.prefix)+8) // prefix plus uint32 id
|
|
|
|
copy(key, i.prefix)
|
|
|
|
binary.BigEndian.PutUint32(key[len(i.prefix):], id)
|
|
|
|
i.db.Put(key, val, nil)
|
|
|
|
|
|
|
|
i.mut.Unlock()
|
|
|
|
return id
|
|
|
|
}
|
|
|
|
|
|
|
|
// Val returns the value for the given index number, or (nil, false) if there
|
|
|
|
// is no such index number.
|
|
|
|
func (i *smallIndex) Val(id uint32) ([]byte, bool) {
|
|
|
|
i.mut.Lock()
|
|
|
|
val, ok := i.id2val[id]
|
|
|
|
i.mut.Unlock()
|
|
|
|
if !ok {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
|
|
|
|
return []byte(val), true
|
|
|
|
}
|
2018-05-01 14:39:15 -07:00
|
|
|
|
|
|
|
// resize returns a byte array of length reqLen, reusing k if possible
|
|
|
|
func resize(k []byte, reqLen int) []byte {
|
|
|
|
if cap(k) < reqLen {
|
|
|
|
return make([]byte, reqLen)
|
|
|
|
}
|
|
|
|
return k[:reqLen]
|
|
|
|
}
|