2014-11-16 13:13:20 -07:00
|
|
|
// Copyright (C) 2014 The Syncthing Authors.
|
2014-09-29 12:43:32 -07:00
|
|
|
//
|
2015-03-07 13:36:35 -07:00
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
2017-02-08 23:52:18 -07:00
|
|
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
2014-06-01 13:50:14 -07:00
|
|
|
|
2015-01-12 06:50:30 -07:00
|
|
|
// Package db provides a set type to track local/remote files with newness
|
2014-08-15 03:52:16 -07:00
|
|
|
// checks. We must do a certain amount of normalization in here. We will get
|
|
|
|
// fed paths with either native or wire-format separators and encodings
|
|
|
|
// depending on who calls us. We transform paths to wire-format (NFC and
|
|
|
|
// slashes) on the way to the database, and transform to native format
|
|
|
|
// (varying separator and encoding) on the way back out.
|
2015-01-12 06:50:30 -07:00
|
|
|
package db
|
2014-03-28 06:36:57 -07:00
|
|
|
|
|
|
|
import (
|
2017-12-14 02:51:17 -07:00
|
|
|
"time"
|
2015-10-20 06:58:18 -07:00
|
|
|
|
2019-11-29 01:11:52 -07:00
|
|
|
"github.com/syncthing/syncthing/lib/db/backend"
|
2016-08-05 10:45:45 -07:00
|
|
|
"github.com/syncthing/syncthing/lib/fs"
|
2015-08-06 02:29:25 -07:00
|
|
|
"github.com/syncthing/syncthing/lib/osutil"
|
2015-09-22 10:38:46 -07:00
|
|
|
"github.com/syncthing/syncthing/lib/protocol"
|
2015-08-06 02:29:25 -07:00
|
|
|
"github.com/syncthing/syncthing/lib/sync"
|
2014-03-28 06:36:57 -07:00
|
|
|
)
|
|
|
|
|
2015-01-12 06:52:24 -07:00
|
|
|
type FileSet struct {
|
2019-01-23 02:22:33 -07:00
|
|
|
folder string
|
|
|
|
fs fs.Filesystem
|
2019-12-02 00:18:04 -07:00
|
|
|
db *Lowlevel
|
2019-01-23 02:22:33 -07:00
|
|
|
meta *metadataTracker
|
2017-12-14 02:51:17 -07:00
|
|
|
|
|
|
|
updateMutex sync.Mutex // protects database updates and the corresponding metadata changes
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
|
2015-01-09 00:18:42 -07:00
|
|
|
// FileIntf is the set of methods implemented by both protocol.FileInfo and
|
2016-07-04 03:40:29 -07:00
|
|
|
// FileInfoTruncated.
|
2015-01-09 00:18:42 -07:00
|
|
|
type FileIntf interface {
|
2016-07-04 03:40:29 -07:00
|
|
|
FileSize() int64
|
|
|
|
FileName() string
|
2018-07-12 01:15:57 -07:00
|
|
|
FileLocalFlags() uint32
|
2015-01-09 00:18:42 -07:00
|
|
|
IsDeleted() bool
|
|
|
|
IsInvalid() bool
|
2018-07-12 01:15:57 -07:00
|
|
|
IsIgnored() bool
|
|
|
|
IsUnsupported() bool
|
|
|
|
MustRescan() bool
|
2018-12-11 01:59:04 -07:00
|
|
|
IsReceiveOnlyChanged() bool
|
2015-01-09 00:18:42 -07:00
|
|
|
IsDirectory() bool
|
|
|
|
IsSymlink() bool
|
2018-08-25 01:32:35 -07:00
|
|
|
ShouldConflict() bool
|
2015-01-09 00:18:42 -07:00
|
|
|
HasPermissionBits() bool
|
2017-12-14 02:51:17 -07:00
|
|
|
SequenceNo() int64
|
2018-04-16 11:08:50 -07:00
|
|
|
BlockSize() int
|
2018-06-02 06:08:32 -07:00
|
|
|
FileVersion() protocol.Vector
|
2019-10-15 02:25:12 -07:00
|
|
|
FileType() protocol.FileInfoType
|
|
|
|
FilePermissions() uint32
|
|
|
|
FileModifiedBy() protocol.ShortID
|
|
|
|
ModTime() time.Time
|
2015-01-09 00:18:42 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// The Iterator is called with either a protocol.FileInfo or a
|
2016-07-04 03:40:29 -07:00
|
|
|
// FileInfoTruncated (depending on the method) and returns true to
|
2015-01-09 00:18:42 -07:00
|
|
|
// continue iteration, false to stop.
|
|
|
|
type Iterator func(f FileIntf) bool
|
|
|
|
|
2019-12-02 00:18:04 -07:00
|
|
|
func NewFileSet(folder string, fs fs.Filesystem, db *Lowlevel) *FileSet {
|
2020-02-22 01:36:59 -07:00
|
|
|
return &FileSet{
|
2017-12-14 02:51:17 -07:00
|
|
|
folder: folder,
|
|
|
|
fs: fs,
|
|
|
|
db: db,
|
2020-03-19 07:58:32 -07:00
|
|
|
meta: db.loadMetadataTracker(folder),
|
2017-12-14 02:51:17 -07:00
|
|
|
updateMutex: sync.NewMutex(),
|
2015-10-20 06:58:18 -07:00
|
|
|
}
|
2020-02-22 01:36:59 -07:00
|
|
|
}
|
|
|
|
|
2017-11-12 13:20:34 -07:00
|
|
|
func (s *FileSet) Drop(device protocol.DeviceID) {
|
|
|
|
l.Debugf("%s Drop(%v)", s.folder, device)
|
2016-07-23 11:32:10 -07:00
|
|
|
|
|
|
|
s.updateMutex.Lock()
|
|
|
|
defer s.updateMutex.Unlock()
|
|
|
|
|
2019-11-29 01:11:52 -07:00
|
|
|
if err := s.db.dropDeviceFolder(device[:], []byte(s.folder), s.meta); backend.IsClosed(err) {
|
|
|
|
return
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2017-11-12 13:20:34 -07:00
|
|
|
|
2014-10-07 14:15:01 -07:00
|
|
|
if device == protocol.LocalDeviceID {
|
2017-12-14 02:51:17 -07:00
|
|
|
s.meta.resetCounts(device)
|
|
|
|
// We deliberately do not reset the sequence number here. Dropping
|
|
|
|
// all files for the local device ID only happens in testing - which
|
|
|
|
// expects the sequence to be retained, like an old Replace() of all
|
|
|
|
// files would do. However, if we ever did it "in production" we
|
|
|
|
// would anyway want to retain the sequence for delta indexes to be
|
|
|
|
// happy.
|
2017-11-12 13:20:34 -07:00
|
|
|
} else {
|
|
|
|
// Here, on the other hand, we want to make sure that any file
|
|
|
|
// announced from the remote is newer than our current sequence
|
|
|
|
// number.
|
2017-12-14 02:51:17 -07:00
|
|
|
s.meta.resetAll(device)
|
2014-10-07 14:15:01 -07:00
|
|
|
}
|
2017-12-14 02:51:17 -07:00
|
|
|
|
2020-02-13 07:23:08 -07:00
|
|
|
t, err := s.db.newReadWriteTransaction()
|
|
|
|
if backend.IsClosed(err) {
|
|
|
|
return
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
defer t.close()
|
|
|
|
|
|
|
|
if err := s.meta.toDB(t, []byte(s.folder)); backend.IsClosed(err) {
|
|
|
|
return
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
if err := t.Commit(); backend.IsClosed(err) {
|
2019-11-29 01:11:52 -07:00
|
|
|
return
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
|
2015-01-12 06:52:24 -07:00
|
|
|
func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
|
2015-10-03 08:25:21 -07:00
|
|
|
l.Debugf("%s Update(%v, [%d])", s.folder, device, len(fs))
|
2018-01-18 05:40:43 -07:00
|
|
|
|
|
|
|
// do not modify fs in place, it is still used in outer scope
|
|
|
|
fs = append([]protocol.FileInfo(nil), fs...)
|
|
|
|
|
2014-08-15 03:52:16 -07:00
|
|
|
normalizeFilenames(fs)
|
2016-07-23 11:32:10 -07:00
|
|
|
|
|
|
|
s.updateMutex.Lock()
|
|
|
|
defer s.updateMutex.Unlock()
|
|
|
|
|
2019-01-23 02:22:33 -07:00
|
|
|
if device == protocol.LocalDeviceID {
|
|
|
|
// For the local device we have a bunch of metadata to track.
|
2019-11-29 01:11:52 -07:00
|
|
|
if err := s.db.updateLocalFiles([]byte(s.folder), fs, s.meta); err != nil && !backend.IsClosed(err) {
|
|
|
|
panic(err)
|
|
|
|
}
|
2018-09-02 11:58:32 -07:00
|
|
|
return
|
|
|
|
}
|
2019-01-23 02:22:33 -07:00
|
|
|
// Easy case, just update the files and we're done.
|
2019-11-29 01:11:52 -07:00
|
|
|
if err := s.db.updateRemoteFiles([]byte(s.folder), device[:], fs, s.meta); err != nil && !backend.IsClosed(err) {
|
|
|
|
panic(err)
|
|
|
|
}
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
|
2020-01-21 10:23:08 -07:00
|
|
|
type Snapshot struct {
|
|
|
|
folder string
|
|
|
|
t readOnlyTransaction
|
|
|
|
meta *countsMap
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *FileSet) Snapshot() *Snapshot {
|
|
|
|
t, err := s.db.newReadOnlyTransaction()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return &Snapshot{
|
|
|
|
folder: s.folder,
|
|
|
|
t: t,
|
|
|
|
meta: s.meta.Snapshot(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Snapshot) Release() {
|
|
|
|
s.t.close()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Snapshot) WithNeed(device protocol.DeviceID, fn Iterator) {
|
2015-10-03 08:25:21 -07:00
|
|
|
l.Debugf("%s WithNeed(%v)", s.folder, device)
|
2020-01-21 10:23:08 -07:00
|
|
|
if err := s.t.withNeed([]byte(s.folder), device[:], false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
2019-11-29 01:11:52 -07:00
|
|
|
panic(err)
|
|
|
|
}
|
2014-08-12 04:53:31 -07:00
|
|
|
}
|
|
|
|
|
2020-01-21 10:23:08 -07:00
|
|
|
func (s *Snapshot) WithNeedTruncated(device protocol.DeviceID, fn Iterator) {
|
2015-10-03 08:25:21 -07:00
|
|
|
l.Debugf("%s WithNeedTruncated(%v)", s.folder, device)
|
2020-01-21 10:23:08 -07:00
|
|
|
if err := s.t.withNeed([]byte(s.folder), device[:], true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
2019-11-29 01:11:52 -07:00
|
|
|
panic(err)
|
|
|
|
}
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
|
2020-01-21 10:23:08 -07:00
|
|
|
func (s *Snapshot) WithHave(device protocol.DeviceID, fn Iterator) {
|
2015-10-03 08:25:21 -07:00
|
|
|
l.Debugf("%s WithHave(%v)", s.folder, device)
|
2020-01-21 10:23:08 -07:00
|
|
|
if err := s.t.withHave([]byte(s.folder), device[:], nil, false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
2019-11-29 01:11:52 -07:00
|
|
|
panic(err)
|
|
|
|
}
|
2014-08-12 04:53:31 -07:00
|
|
|
}
|
|
|
|
|
2020-01-21 10:23:08 -07:00
|
|
|
func (s *Snapshot) WithHaveTruncated(device protocol.DeviceID, fn Iterator) {
|
2015-10-03 08:25:21 -07:00
|
|
|
l.Debugf("%s WithHaveTruncated(%v)", s.folder, device)
|
2020-01-21 10:23:08 -07:00
|
|
|
if err := s.t.withHave([]byte(s.folder), device[:], nil, true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
2019-11-29 01:11:52 -07:00
|
|
|
panic(err)
|
|
|
|
}
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
|
2020-01-21 10:23:08 -07:00
|
|
|
func (s *Snapshot) WithHaveSequence(startSeq int64, fn Iterator) {
|
2018-05-01 14:39:15 -07:00
|
|
|
l.Debugf("%s WithHaveSequence(%v)", s.folder, startSeq)
|
2020-01-21 10:23:08 -07:00
|
|
|
if err := s.t.withHaveSequence([]byte(s.folder), startSeq, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
2019-11-29 01:11:52 -07:00
|
|
|
panic(err)
|
|
|
|
}
|
2018-05-01 14:39:15 -07:00
|
|
|
}
|
|
|
|
|
2018-05-17 00:26:40 -07:00
|
|
|
// Except for an item with a path equal to prefix, only children of prefix are iterated.
|
|
|
|
// E.g. for prefix "dir", "dir/file" is iterated, but "dir.file" is not.
|
2020-01-21 10:23:08 -07:00
|
|
|
func (s *Snapshot) WithPrefixedHaveTruncated(device protocol.DeviceID, prefix string, fn Iterator) {
|
2018-05-17 00:26:40 -07:00
|
|
|
l.Debugf(`%s WithPrefixedHaveTruncated(%v, "%v")`, s.folder, device, prefix)
|
2020-01-21 10:23:08 -07:00
|
|
|
if err := s.t.withHave([]byte(s.folder), device[:], []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
2019-11-29 01:11:52 -07:00
|
|
|
panic(err)
|
|
|
|
}
|
2016-03-18 05:16:33 -07:00
|
|
|
}
|
2019-11-29 01:11:52 -07:00
|
|
|
|
2020-01-21 10:23:08 -07:00
|
|
|
func (s *Snapshot) WithGlobal(fn Iterator) {
|
2015-10-03 08:25:21 -07:00
|
|
|
l.Debugf("%s WithGlobal()", s.folder)
|
2020-01-21 10:23:08 -07:00
|
|
|
if err := s.t.withGlobal([]byte(s.folder), nil, false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
2019-11-29 01:11:52 -07:00
|
|
|
panic(err)
|
|
|
|
}
|
2014-08-12 07:17:28 -07:00
|
|
|
}
|
|
|
|
|
2020-01-21 10:23:08 -07:00
|
|
|
func (s *Snapshot) WithGlobalTruncated(fn Iterator) {
|
2015-10-03 08:25:21 -07:00
|
|
|
l.Debugf("%s WithGlobalTruncated()", s.folder)
|
2020-01-21 10:23:08 -07:00
|
|
|
if err := s.t.withGlobal([]byte(s.folder), nil, true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
2019-11-29 01:11:52 -07:00
|
|
|
panic(err)
|
|
|
|
}
|
2015-02-07 03:52:42 -07:00
|
|
|
}
|
|
|
|
|
2018-05-17 00:26:40 -07:00
|
|
|
// Except for an item with a path equal to prefix, only children of prefix are iterated.
|
|
|
|
// E.g. for prefix "dir", "dir/file" is iterated, but "dir.file" is not.
|
2020-01-21 10:23:08 -07:00
|
|
|
func (s *Snapshot) WithPrefixedGlobalTruncated(prefix string, fn Iterator) {
|
2018-05-17 00:26:40 -07:00
|
|
|
l.Debugf(`%s WithPrefixedGlobalTruncated("%v")`, s.folder, prefix)
|
2020-01-21 10:23:08 -07:00
|
|
|
if err := s.t.withGlobal([]byte(s.folder), []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
2019-11-29 01:11:52 -07:00
|
|
|
panic(err)
|
|
|
|
}
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
|
2020-01-21 10:23:08 -07:00
|
|
|
func (s *Snapshot) Get(device protocol.DeviceID, file string) (protocol.FileInfo, bool) {
|
|
|
|
f, ok, err := s.t.getFile([]byte(s.folder), device[:], []byte(osutil.NormalizedFilename(file)))
|
2019-11-29 01:11:52 -07:00
|
|
|
if backend.IsClosed(err) {
|
|
|
|
return protocol.FileInfo{}, false
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2014-11-05 16:41:51 -07:00
|
|
|
f.Name = osutil.NativeFilename(f.Name)
|
2015-01-06 14:12:45 -07:00
|
|
|
return f, ok
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
|
2020-01-21 10:23:08 -07:00
|
|
|
func (s *Snapshot) GetGlobal(file string) (protocol.FileInfo, bool) {
|
|
|
|
_, fi, ok, err := s.t.getGlobal(nil, []byte(s.folder), []byte(osutil.NormalizedFilename(file)), false)
|
2019-11-29 01:11:52 -07:00
|
|
|
if backend.IsClosed(err) {
|
|
|
|
return protocol.FileInfo{}, false
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2015-01-09 00:41:02 -07:00
|
|
|
if !ok {
|
|
|
|
return protocol.FileInfo{}, false
|
|
|
|
}
|
|
|
|
f := fi.(protocol.FileInfo)
|
2014-11-05 16:41:51 -07:00
|
|
|
f.Name = osutil.NativeFilename(f.Name)
|
2015-01-09 00:41:02 -07:00
|
|
|
return f, true
|
|
|
|
}
|
|
|
|
|
2020-01-21 10:23:08 -07:00
|
|
|
func (s *Snapshot) GetGlobalTruncated(file string) (FileInfoTruncated, bool) {
|
|
|
|
_, fi, ok, err := s.t.getGlobal(nil, []byte(s.folder), []byte(osutil.NormalizedFilename(file)), true)
|
2019-11-29 01:11:52 -07:00
|
|
|
if backend.IsClosed(err) {
|
|
|
|
return FileInfoTruncated{}, false
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2015-01-09 00:41:02 -07:00
|
|
|
if !ok {
|
|
|
|
return FileInfoTruncated{}, false
|
|
|
|
}
|
|
|
|
f := fi.(FileInfoTruncated)
|
|
|
|
f.Name = osutil.NativeFilename(f.Name)
|
|
|
|
return f, true
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
|
2020-01-21 10:23:08 -07:00
|
|
|
func (s *Snapshot) Availability(file string) []protocol.DeviceID {
|
|
|
|
av, err := s.t.availability([]byte(s.folder), []byte(osutil.NormalizedFilename(file)))
|
2019-11-29 01:11:52 -07:00
|
|
|
if backend.IsClosed(err) {
|
|
|
|
return nil
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return av
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
|
2020-01-21 10:23:08 -07:00
|
|
|
func (s *Snapshot) Sequence(device protocol.DeviceID) int64 {
|
|
|
|
return s.meta.Counts(device, 0).Sequence
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
2014-08-15 03:52:16 -07:00
|
|
|
|
2020-01-21 10:23:08 -07:00
|
|
|
// RemoteSequence returns the change version for the given folder, as
|
|
|
|
// sent by remote peers. This is guaranteed to increment if the contents of
|
|
|
|
// the remote or global folder has changed.
|
|
|
|
func (s *Snapshot) RemoteSequence() int64 {
|
|
|
|
var ver int64
|
|
|
|
|
|
|
|
for _, device := range s.meta.devices() {
|
|
|
|
ver += s.Sequence(device)
|
|
|
|
}
|
|
|
|
|
|
|
|
return ver
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Snapshot) LocalSize() Counts {
|
2018-07-12 01:15:57 -07:00
|
|
|
local := s.meta.Counts(protocol.LocalDeviceID, 0)
|
2020-01-21 10:23:08 -07:00
|
|
|
return local.Add(s.ReceiveOnlyChangedSize())
|
2018-07-12 01:15:57 -07:00
|
|
|
}
|
|
|
|
|
2020-01-21 10:23:08 -07:00
|
|
|
func (s *Snapshot) ReceiveOnlyChangedSize() Counts {
|
2018-07-12 01:15:57 -07:00
|
|
|
return s.meta.Counts(protocol.LocalDeviceID, protocol.FlagLocalReceiveOnly)
|
2015-10-21 00:10:26 -07:00
|
|
|
}
|
|
|
|
|
2020-01-21 10:23:08 -07:00
|
|
|
func (s *Snapshot) GlobalSize() Counts {
|
2018-07-12 01:15:57 -07:00
|
|
|
global := s.meta.Counts(protocol.GlobalDeviceID, 0)
|
|
|
|
recvOnlyChanged := s.meta.Counts(protocol.GlobalDeviceID, protocol.FlagLocalReceiveOnly)
|
|
|
|
return global.Add(recvOnlyChanged)
|
2015-10-21 00:10:26 -07:00
|
|
|
}
|
|
|
|
|
2020-05-11 06:07:06 -07:00
|
|
|
func (s *Snapshot) NeedSize(device protocol.DeviceID) Counts {
|
|
|
|
return s.meta.Counts(device, needFlag)
|
2020-01-21 10:23:08 -07:00
|
|
|
}
|
|
|
|
|
2020-04-02 07:14:25 -07:00
|
|
|
// LocalChangedFiles returns a paginated list of files that were changed locally.
|
2020-01-21 10:23:08 -07:00
|
|
|
func (s *Snapshot) LocalChangedFiles(page, perpage int) []FileInfoTruncated {
|
|
|
|
if s.ReceiveOnlyChangedSize().TotalItems() == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
files := make([]FileInfoTruncated, 0, perpage)
|
|
|
|
|
|
|
|
skip := (page - 1) * perpage
|
|
|
|
get := perpage
|
|
|
|
|
|
|
|
s.WithHaveTruncated(protocol.LocalDeviceID, func(f FileIntf) bool {
|
|
|
|
if !f.IsReceiveOnlyChanged() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if skip > 0 {
|
|
|
|
skip--
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
ft := f.(FileInfoTruncated)
|
|
|
|
files = append(files, ft)
|
|
|
|
get--
|
|
|
|
return get > 0
|
|
|
|
})
|
|
|
|
|
|
|
|
return files
|
|
|
|
}
|
|
|
|
|
|
|
|
// RemoteNeedFolderFiles returns paginated list of currently needed files in
|
|
|
|
// progress, queued, and to be queued on next puller iteration, as well as the
|
|
|
|
// total number of files currently needed.
|
|
|
|
func (s *Snapshot) RemoteNeedFolderFiles(device protocol.DeviceID, page, perpage int) []FileInfoTruncated {
|
|
|
|
files := make([]FileInfoTruncated, 0, perpage)
|
|
|
|
skip := (page - 1) * perpage
|
|
|
|
get := perpage
|
|
|
|
s.WithNeedTruncated(device, func(f FileIntf) bool {
|
|
|
|
if skip > 0 {
|
|
|
|
skip--
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
files = append(files, f.(FileInfoTruncated))
|
|
|
|
get--
|
|
|
|
return get > 0
|
|
|
|
})
|
|
|
|
return files
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *FileSet) Sequence(device protocol.DeviceID) int64 {
|
|
|
|
return s.meta.Sequence(device)
|
|
|
|
}
|
|
|
|
|
2016-07-23 05:46:31 -07:00
|
|
|
func (s *FileSet) IndexID(device protocol.DeviceID) protocol.IndexID {
|
2019-11-29 01:11:52 -07:00
|
|
|
id, err := s.db.getIndexID(device[:], []byte(s.folder))
|
|
|
|
if backend.IsClosed(err) {
|
|
|
|
return 0
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2016-07-23 05:46:31 -07:00
|
|
|
if id == 0 && device == protocol.LocalDeviceID {
|
|
|
|
// No index ID set yet. We create one now.
|
|
|
|
id = protocol.NewIndexID()
|
2019-11-29 01:11:52 -07:00
|
|
|
err := s.db.setIndexID(device[:], []byte(s.folder), id)
|
|
|
|
if backend.IsClosed(err) {
|
|
|
|
return 0
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2016-07-23 05:46:31 -07:00
|
|
|
}
|
|
|
|
return id
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *FileSet) SetIndexID(device protocol.DeviceID, id protocol.IndexID) {
|
|
|
|
if device == protocol.LocalDeviceID {
|
|
|
|
panic("do not explicitly set index ID for local device")
|
|
|
|
}
|
2019-11-29 01:11:52 -07:00
|
|
|
if err := s.db.setIndexID(device[:], []byte(s.folder), id); err != nil && !backend.IsClosed(err) {
|
|
|
|
panic(err)
|
|
|
|
}
|
2016-07-23 05:46:31 -07:00
|
|
|
}
|
|
|
|
|
2016-08-05 10:45:45 -07:00
|
|
|
func (s *FileSet) MtimeFS() *fs.MtimeFS {
|
2019-11-29 01:11:52 -07:00
|
|
|
prefix, err := s.db.keyer.GenerateMtimesKey(nil, []byte(s.folder))
|
|
|
|
if backend.IsClosed(err) {
|
|
|
|
return nil
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2019-12-02 00:18:04 -07:00
|
|
|
kv := NewNamespacedKV(s.db, string(prefix))
|
2017-08-19 07:36:56 -07:00
|
|
|
return fs.NewMtimeFS(s.fs, kv)
|
2016-08-05 10:45:45 -07:00
|
|
|
}
|
|
|
|
|
2016-08-07 09:21:59 -07:00
|
|
|
func (s *FileSet) ListDevices() []protocol.DeviceID {
|
2017-12-14 02:51:17 -07:00
|
|
|
return s.meta.devices()
|
2016-07-23 05:46:31 -07:00
|
|
|
}
|
|
|
|
|
2020-03-18 09:34:46 -07:00
|
|
|
func (s *FileSet) RepairSequence() (int, error) {
|
|
|
|
s.updateAndGCMutexLock() // Ensures consistent locking order
|
|
|
|
defer s.updateMutex.Unlock()
|
|
|
|
defer s.db.gcMut.RUnlock()
|
|
|
|
return s.db.repairSequenceGCLocked(s.folder, s.meta)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *FileSet) updateAndGCMutexLock() {
|
|
|
|
s.updateMutex.Lock()
|
|
|
|
s.db.gcMut.RLock()
|
|
|
|
}
|
|
|
|
|
2014-09-28 04:00:38 -07:00
|
|
|
// DropFolder clears out all information related to the given folder from the
|
2014-08-31 04:34:17 -07:00
|
|
|
// database.
|
2019-12-02 00:18:04 -07:00
|
|
|
func DropFolder(db *Lowlevel, folder string) {
|
2019-11-29 01:11:52 -07:00
|
|
|
droppers := []func([]byte) error{
|
|
|
|
db.dropFolder,
|
|
|
|
db.dropMtimes,
|
|
|
|
db.dropFolderMeta,
|
|
|
|
db.folderIdx.Delete,
|
|
|
|
}
|
|
|
|
for _, drop := range droppers {
|
|
|
|
if err := drop([]byte(folder)); backend.IsClosed(err) {
|
|
|
|
return
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
2018-10-10 02:34:24 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// DropDeltaIndexIDs removes all delta index IDs from the database.
|
|
|
|
// This will cause a full index transmission on the next connection.
|
|
|
|
func DropDeltaIndexIDs(db *Lowlevel) {
|
2019-11-29 01:11:52 -07:00
|
|
|
dbi, err := db.NewPrefixIterator([]byte{KeyTypeIndexID})
|
|
|
|
if backend.IsClosed(err) {
|
|
|
|
return
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2018-10-10 02:34:24 -07:00
|
|
|
defer dbi.Release()
|
|
|
|
for dbi.Next() {
|
2019-11-29 01:11:52 -07:00
|
|
|
if err := db.Delete(dbi.Key()); err != nil && !backend.IsClosed(err) {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err := dbi.Error(); err != nil && !backend.IsClosed(err) {
|
|
|
|
panic(err)
|
2018-10-10 02:34:24 -07:00
|
|
|
}
|
2014-08-31 04:34:17 -07:00
|
|
|
}
|
|
|
|
|
2014-08-15 03:52:16 -07:00
|
|
|
func normalizeFilenames(fs []protocol.FileInfo) {
|
|
|
|
for i := range fs {
|
2014-11-05 16:41:51 -07:00
|
|
|
fs[i].Name = osutil.NormalizedFilename(fs[i].Name)
|
2014-08-15 03:52:16 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-09 00:18:42 -07:00
|
|
|
func nativeFileIterator(fn Iterator) Iterator {
|
|
|
|
return func(fi FileIntf) bool {
|
2014-08-15 03:52:16 -07:00
|
|
|
switch f := fi.(type) {
|
|
|
|
case protocol.FileInfo:
|
2014-11-05 16:41:51 -07:00
|
|
|
f.Name = osutil.NativeFilename(f.Name)
|
2014-08-15 03:52:16 -07:00
|
|
|
return fn(f)
|
2015-01-09 00:19:32 -07:00
|
|
|
case FileInfoTruncated:
|
2014-11-05 16:41:51 -07:00
|
|
|
f.Name = osutil.NativeFilename(f.Name)
|
2014-08-15 03:52:16 -07:00
|
|
|
return fn(f)
|
|
|
|
default:
|
|
|
|
panic("unknown interface type")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|