2014-11-16 13:13:20 -07:00
// Copyright (C) 2014 The Syncthing Authors.
2014-09-29 12:43:32 -07:00
//
2015-03-07 13:36:35 -07:00
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
2017-02-08 23:52:18 -07:00
// You can obtain one at https://mozilla.org/MPL/2.0/.
2014-06-01 13:50:14 -07:00
2014-05-14 20:26:55 -07:00
package model
2013-12-15 03:43:31 -07:00
import (
2018-05-05 01:24:44 -07:00
"bytes"
2019-11-19 01:56:53 -07:00
"context"
2015-03-10 15:45:43 -07:00
"encoding/json"
2013-12-23 10:12:44 -07:00
"fmt"
2014-01-05 15:54:57 -07:00
"net"
2014-03-28 06:36:57 -07:00
"path/filepath"
2015-06-03 00:47:39 -07:00
"reflect"
2015-04-29 11:46:32 -07:00
"runtime"
2014-08-11 11:20:01 -07:00
"strings"
2018-10-05 01:26:25 -07:00
stdsync "sync"
2013-12-15 03:43:31 -07:00
"time"
2020-04-14 11:26:26 -07:00
"unicode"
2014-06-21 00:43:12 -07:00
2019-11-23 08:20:54 -07:00
"github.com/pkg/errors"
"github.com/thejerf/suture"
2015-08-06 02:29:25 -07:00
"github.com/syncthing/syncthing/lib/config"
2016-05-04 12:38:12 -07:00
"github.com/syncthing/syncthing/lib/connections"
2015-08-06 02:29:25 -07:00
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/events"
2016-08-05 10:45:45 -07:00
"github.com/syncthing/syncthing/lib/fs"
2015-08-06 02:29:25 -07:00
"github.com/syncthing/syncthing/lib/ignore"
"github.com/syncthing/syncthing/lib/osutil"
2015-09-22 10:38:46 -07:00
"github.com/syncthing/syncthing/lib/protocol"
2015-08-06 02:29:25 -07:00
"github.com/syncthing/syncthing/lib/scanner"
"github.com/syncthing/syncthing/lib/stats"
"github.com/syncthing/syncthing/lib/sync"
2020-06-23 01:47:15 -07:00
"github.com/syncthing/syncthing/lib/ur/contract"
2019-07-09 02:40:30 -07:00
"github.com/syncthing/syncthing/lib/util"
2015-08-06 02:29:25 -07:00
"github.com/syncthing/syncthing/lib/versioner"
2013-12-15 03:43:31 -07:00
)
2014-07-15 04:04:37 -07:00
// How many files to send in each Index/IndexUpdate message.
2014-08-11 11:54:59 -07:00
const (
2017-04-22 07:23:33 -07:00
maxBatchSizeBytes = 250 * 1024 // Aim for making index messages no larger than 250 KiB (uncompressed)
maxBatchSizeFiles = 1000 // Either way, don't include more files than this
2014-08-11 11:54:59 -07:00
)
2014-07-15 04:04:37 -07:00
2014-09-30 08:52:05 -07:00
type service interface {
2014-12-30 01:35:21 -07:00
BringToFront ( string )
2019-04-07 04:29:17 -07:00
Override ( )
Revert ( )
2015-05-01 05:30:17 -07:00
DelayScan ( d time . Duration )
2019-06-27 11:25:38 -07:00
SchedulePull ( ) // something relevant changed, we should try a pull
Jobs ( page , perpage int ) ( [ ] string , [ ] string , int ) // In progress, Queued, skipped
2015-06-20 10:26:25 -07:00
Scan ( subs [ ] string ) error
2016-06-28 23:37:34 -07:00
Serve ( )
Stop ( )
2018-11-07 03:04:41 -07:00
Errors ( ) [ ] FileError
2018-02-04 14:46:24 -07:00
WatchError ( ) error
2020-05-01 02:08:59 -07:00
ScheduleForceRescan ( path string )
2019-11-30 05:03:24 -07:00
GetStatistics ( ) ( stats . FolderStatistics , error )
2015-03-16 13:14:19 -07:00
2016-06-28 23:37:34 -07:00
getState ( ) ( folderState , time . Time , error )
2014-09-30 08:52:05 -07:00
}
2016-04-15 03:59:41 -07:00
type Availability struct {
ID protocol . DeviceID ` json:"id" `
FromTemporary bool ` json:"fromTemporary" `
}
2019-02-26 01:09:25 -07:00
type Model interface {
suture . Service
connections . Model
ResetFolder ( folder string )
DelayScan ( folder string , next time . Duration )
ScanFolder ( folder string ) error
ScanFolders ( ) map [ string ] error
ScanFolderSubdirs ( folder string , subs [ ] string ) error
State ( folder string ) ( string , time . Time , error )
FolderErrors ( folder string ) ( [ ] FileError , error )
WatchError ( folder string ) error
Override ( folder string )
Revert ( folder string )
BringToFront ( folder , file string )
GetIgnores ( folder string ) ( [ ] string , [ ] string , error )
SetIgnores ( folder string , content [ ] string ) error
GetFolderVersions ( folder string ) ( map [ string ] [ ] versioner . FileVersion , error )
RestoreFolderVersions ( folder string , versions map [ string ] time . Time ) ( map [ string ] string , error )
2020-01-21 10:23:08 -07:00
DBSnapshot ( folder string ) ( * db . Snapshot , error )
2019-02-26 01:09:25 -07:00
NeedFolderFiles ( folder string , page , perpage int ) ( [ ] db . FileInfoTruncated , [ ] db . FileInfoTruncated , [ ] db . FileInfoTruncated )
2020-01-21 10:23:08 -07:00
FolderProgressBytesCompleted ( folder string ) int64
2019-02-26 01:09:25 -07:00
CurrentFolderFile ( folder string , file string ) ( protocol . FileInfo , bool )
CurrentGlobalFile ( folder string , file string ) ( protocol . FileInfo , bool )
Availability ( folder string , file protocol . FileInfo , block protocol . BlockInfo ) [ ] Availability
Completion ( device protocol . DeviceID , folder string ) FolderCompletion
ConnectionStats ( ) map [ string ] interface { }
2019-11-30 05:03:24 -07:00
DeviceStatistics ( ) ( map [ string ] stats . DeviceStatistics , error )
FolderStatistics ( ) ( map [ string ] stats . FolderStatistics , error )
2020-06-23 01:47:15 -07:00
UsageReportingStats ( report * contract . Report , version int , preview bool )
2019-02-26 01:09:25 -07:00
StartDeadlockDetector ( timeout time . Duration )
GlobalDirectoryTree ( folder , prefix string , levels int , dirsonly bool ) map [ string ] interface { }
}
type model struct {
2015-06-12 04:04:00 -07:00
* suture . Supervisor
2020-02-01 00:12:25 -07:00
// constructor parameters
cfg config . Wrapper
id protocol . DeviceID
clientName string
clientVersion string
db * db . Lowlevel
protectedFiles [ ] string
evLogger events . Logger
// constant or concurrency safe fields
2015-07-23 07:13:53 -07:00
finder * db . BlockFinder
progressEmitter * ProgressEmitter
2016-01-20 12:10:22 -07:00
shortID protocol . ShortID
2015-07-23 07:13:53 -07:00
cacheIgnoredFiles bool
2020-02-01 00:12:25 -07:00
// globalRequestLimiter limits the amount of data in concurrent incoming
// requests
2020-02-01 00:02:18 -07:00
globalRequestLimiter * byteSemaphore
// folderIOLimiter limits the number of concurrent I/O heavy operations,
2020-02-01 00:12:25 -07:00
// such as scans and pulls.
2020-02-01 00:02:18 -07:00
folderIOLimiter * byteSemaphore
2020-02-01 00:12:25 -07:00
// fields protected by fmut
fmut sync . RWMutex
2015-11-13 05:30:52 -07:00
folderCfgs map [ string ] config . FolderConfiguration // folder -> cfg
folderFiles map [ string ] * db . FileSet // folder -> files
deviceStatRefs map [ protocol . DeviceID ] * stats . DeviceStatisticsReference // deviceID -> statsRef
folderIgnores map [ string ] * ignore . Matcher // folder -> matcher object
folderRunners map [ string ] service // folder -> puller or scanner
folderRunnerTokens map [ string ] [ ] suture . ServiceToken // folder -> tokens for puller or scanner
2018-10-05 01:26:25 -07:00
folderRestartMuts syncMutexMap // folder -> restart mutex
2019-11-26 00:39:31 -07:00
folderVersioners map [ string ] versioner . Versioner // folder -> versioner (may be nil)
2014-03-29 10:53:48 -07:00
2020-02-01 00:12:25 -07:00
// fields protected by pmut
pmut sync . RWMutex
2016-12-21 11:41:25 -07:00
conn map [ protocol . DeviceID ] connections . Connection
2018-11-13 00:53:55 -07:00
connRequestLimiters map [ protocol . DeviceID ] * byteSemaphore
2016-12-21 11:41:25 -07:00
closed map [ protocol . DeviceID ] chan struct { }
helloMessages map [ protocol . DeviceID ] protocol . HelloResult
deviceDownloads map [ protocol . DeviceID ] * deviceDownloadState
remotePausedFolders map [ protocol . DeviceID ] [ ] string // deviceID -> folders
2018-10-05 01:26:25 -07:00
foldersRunning int32 // for testing only
2013-12-15 03:43:31 -07:00
}
2020-02-01 00:02:18 -07:00
type folderFactory func ( * model , * db . FileSet , * ignore . Matcher , config . FolderConfiguration , versioner . Versioner , fs . Filesystem , events . Logger , * byteSemaphore ) service
2016-05-04 03:47:33 -07:00
2014-01-07 14:44:21 -07:00
var (
2019-02-02 04:09:07 -07:00
folderFactories = make ( map [ config . FolderType ] folderFactory )
2014-01-07 14:44:21 -07:00
)
2014-01-06 13:31:36 -07:00
2016-06-26 03:07:27 -07:00
var (
2017-10-24 00:58:55 -07:00
errDeviceUnknown = errors . New ( "unknown device" )
errDevicePaused = errors . New ( "device is paused" )
errDeviceIgnored = errors . New ( "device is ignored" )
2020-05-13 22:50:53 -07:00
errDeviceRemoved = errors . New ( "device has been removed" )
2018-01-14 10:01:06 -07:00
ErrFolderPaused = errors . New ( "folder is paused" )
2017-12-15 13:01:56 -07:00
errFolderNotRunning = errors . New ( "folder is not running" )
2017-10-24 00:58:55 -07:00
errFolderMissing = errors . New ( "no such folder" )
errNetworkNotAllowed = errors . New ( "network not allowed" )
2019-11-26 00:39:31 -07:00
errNoVersioner = errors . New ( "folder has no versioner" )
2019-01-09 09:31:09 -07:00
// errors about why a connection is closed
errIgnoredFolderRemoved = errors . New ( "folder no longer ignored" )
errReplacingConnection = errors . New ( "replacing connection" )
2019-05-25 07:00:32 -07:00
errStopped = errors . New ( "Syncthing is being stopped" )
2016-06-26 03:07:27 -07:00
)
2014-01-06 03:11:18 -07:00
// NewModel creates and starts a new model. The model starts in read-only mode,
// where it sends index information to connected peers and responds to requests
2014-09-28 04:00:38 -07:00
// for file data without altering the local folder in any way.
2019-08-15 07:29:37 -07:00
func NewModel ( cfg config . Wrapper , id protocol . DeviceID , clientName , clientVersion string , ldb * db . Lowlevel , protectedFiles [ ] string , evLogger events . Logger ) Model {
2019-02-26 01:09:25 -07:00
m := & model {
2015-07-10 18:12:20 -07:00
Supervisor : suture . New ( "model" , suture . Spec {
Log : func ( line string ) {
2015-10-03 08:25:21 -07:00
l . Debugln ( line )
2015-07-10 18:12:20 -07:00
} ,
2018-09-08 02:56:56 -07:00
PassThroughPanics : true ,
2015-07-10 18:12:20 -07:00
} ) ,
2020-02-01 00:12:25 -07:00
// constructor parameters
cfg : cfg ,
id : id ,
clientName : clientName ,
clientVersion : clientVersion ,
db : ldb ,
protectedFiles : protectedFiles ,
evLogger : evLogger ,
// constant or concurrency safe fields
2020-02-01 00:02:18 -07:00
finder : db . NewBlockFinder ( ldb ) ,
progressEmitter : NewProgressEmitter ( cfg , evLogger ) ,
shortID : id . Short ( ) ,
cacheIgnoredFiles : cfg . Options ( ) . CacheIgnoredFiles ,
globalRequestLimiter : newByteSemaphore ( 1024 * cfg . Options ( ) . MaxConcurrentIncomingRequestKiB ( ) ) ,
folderIOLimiter : newByteSemaphore ( cfg . Options ( ) . MaxFolderConcurrency ( ) ) ,
2020-02-01 00:12:25 -07:00
// fields protected by fmut
fmut : sync . NewRWMutex ( ) ,
folderCfgs : make ( map [ string ] config . FolderConfiguration ) ,
folderFiles : make ( map [ string ] * db . FileSet ) ,
deviceStatRefs : make ( map [ protocol . DeviceID ] * stats . DeviceStatisticsReference ) ,
folderIgnores : make ( map [ string ] * ignore . Matcher ) ,
folderRunners : make ( map [ string ] service ) ,
folderRunnerTokens : make ( map [ string ] [ ] suture . ServiceToken ) ,
folderVersioners : make ( map [ string ] versioner . Versioner ) ,
// fields protected by pmut
pmut : sync . NewRWMutex ( ) ,
conn : make ( map [ protocol . DeviceID ] connections . Connection ) ,
connRequestLimiters : make ( map [ protocol . DeviceID ] * byteSemaphore ) ,
closed : make ( map [ protocol . DeviceID ] chan struct { } ) ,
helloMessages : make ( map [ protocol . DeviceID ] protocol . HelloResult ) ,
deviceDownloads : make ( map [ protocol . DeviceID ] * deviceDownloadState ) ,
remotePausedFolders : make ( map [ protocol . DeviceID ] [ ] string ) ,
2013-12-15 03:43:31 -07:00
}
2019-11-08 04:32:51 -07:00
for devID := range cfg . Devices ( ) {
m . deviceStatRefs [ devID ] = stats . NewDeviceStatisticsReference ( m . db , devID . String ( ) )
}
2019-04-13 05:20:51 -07:00
m . Add ( m . progressEmitter )
2013-12-15 03:43:31 -07:00
return m
}
2019-11-08 02:56:16 -07:00
func ( m * model ) Serve ( ) {
2019-11-22 13:30:16 -07:00
m . onServe ( )
m . Supervisor . Serve ( )
}
func ( m * model ) ServeBackground ( ) {
m . onServe ( )
m . Supervisor . ServeBackground ( )
}
func ( m * model ) onServe ( ) {
2019-11-08 02:56:16 -07:00
// Add and start folders
for _ , folderCfg := range m . cfg . Folders ( ) {
if folderCfg . Paused {
folderCfg . CreateRoot ( )
continue
}
2019-11-18 13:15:26 -07:00
m . newFolder ( folderCfg )
2019-11-08 02:56:16 -07:00
}
2019-12-03 23:15:00 -07:00
m . cfg . Subscribe ( m )
2019-11-08 02:56:16 -07:00
}
2019-05-25 07:00:32 -07:00
func ( m * model ) Stop ( ) {
2019-12-03 23:15:00 -07:00
m . cfg . Unsubscribe ( m )
2019-05-25 07:00:32 -07:00
m . Supervisor . Stop ( )
devs := m . cfg . Devices ( )
ids := make ( [ ] protocol . DeviceID , 0 , len ( devs ) )
for id := range devs {
ids = append ( ids , id )
}
2019-07-14 02:03:55 -07:00
w := m . closeConns ( ids , errStopped )
w . Wait ( )
2019-05-25 07:00:32 -07:00
}
2015-04-28 13:32:10 -07:00
// StartDeadlockDetector starts a deadlock detector on the models locks which
// causes panics in case the locks cannot be acquired in the given timeout
// period.
2019-02-26 01:09:25 -07:00
func ( m * model ) StartDeadlockDetector ( timeout time . Duration ) {
2015-04-08 05:35:03 -07:00
l . Infof ( "Starting deadlock detector with %v timeout" , timeout )
2016-10-29 16:14:38 -07:00
detector := newDeadlockDetector ( timeout )
detector . Watch ( "fmut" , m . fmut )
detector . Watch ( "pmut" , m . pmut )
2015-04-08 05:35:03 -07:00
}
2020-05-05 23:34:54 -07:00
// Need to hold lock on m.fmut when calling this.
func ( m * model ) addAndStartFolderLocked ( cfg config . FolderConfiguration , fset * db . FileSet ) {
ignores := ignore . New ( cfg . Filesystem ( ) , ignore . WithCache ( m . cacheIgnoredFiles ) )
if err := ignores . Load ( ".stignore" ) ; err != nil && ! fs . IsNotExist ( err ) {
l . Warnln ( "Loading ignores:" , err )
}
2019-11-18 13:15:26 -07:00
2020-05-05 23:34:54 -07:00
m . addAndStartFolderLockedWithIgnores ( cfg , fset , ignores )
2016-08-07 09:21:59 -07:00
}
2020-05-05 23:34:54 -07:00
// Only needed for testing, use addAndStartFolderLocked instead.
func ( m * model ) addAndStartFolderLockedWithIgnores ( cfg config . FolderConfiguration , fset * db . FileSet , ignores * ignore . Matcher ) {
m . folderCfgs [ cfg . ID ] = cfg
m . folderFiles [ cfg . ID ] = fset
m . folderIgnores [ cfg . ID ] = ignores
2019-07-23 01:51:16 -07:00
_ , ok := m . folderRunners [ cfg . ID ]
if ok {
2019-07-10 01:57:49 -07:00
l . Warnln ( "Cannot start already running folder" , cfg . Description ( ) )
panic ( "cannot start already running folder" )
2014-09-27 05:44:15 -07:00
}
2016-05-04 03:47:33 -07:00
folderFactory , ok := folderFactories [ cfg . Type ]
if ! ok {
2016-05-04 04:26:36 -07:00
panic ( fmt . Sprintf ( "unknown folder type 0x%x" , cfg . Type ) )
2016-05-04 03:47:33 -07:00
}
2019-04-21 05:21:36 -07:00
folder := cfg . ID
2016-08-07 09:21:59 -07:00
// Find any devices for which we hold the index in the db, but the folder
// is not shared, and drop it.
expected := mapDevices ( cfg . DeviceIDs ( ) )
2019-03-10 23:28:54 -07:00
for _ , available := range fset . ListDevices ( ) {
2016-08-07 09:21:59 -07:00
if _ , ok := expected [ available ] ; ! ok {
l . Debugln ( "dropping" , folder , "state for" , available )
2019-03-10 23:28:54 -07:00
fset . Drop ( available )
2016-08-07 09:21:59 -07:00
}
}
2019-03-10 23:28:54 -07:00
v , ok := fset . Sequence ( protocol . LocalDeviceID ) , true
2016-06-26 03:07:27 -07:00
indexHasFiles := ok && v > 0
if ! indexHasFiles {
// It's a blank folder, so this may the first time we're looking at
// it. Attempt to create and tag with our marker as appropriate. We
// don't really do anything with errors at this point except warn -
// if these things don't work, we still want to start the folder and
// it'll show up as errored later.
2018-06-10 06:41:20 -07:00
if err := cfg . CreateRoot ( ) ; err != nil {
l . Warnln ( "Failed to create folder root directory" , err )
} else if err = cfg . CreateMarker ( ) ; err != nil {
l . Warnln ( "Failed to create folder marker:" , err )
2016-06-26 03:07:27 -07:00
}
}
2019-03-10 23:28:54 -07:00
ffs := fset . MtimeFS ( )
2017-09-19 23:49:04 -07:00
// These are our metadata files, and they should always be hidden.
2019-11-26 00:39:31 -07:00
_ = ffs . Hide ( config . DefaultMarkerName )
_ = ffs . Hide ( ".stversions" )
_ = ffs . Hide ( ".stignore" )
var ver versioner . Versioner
if cfg . Versioning . Type != "" {
var err error
2020-06-17 23:15:47 -07:00
ver , err = versioner . New ( cfg )
2019-11-26 00:39:31 -07:00
if err != nil {
2020-03-03 14:40:00 -07:00
panic ( fmt . Errorf ( "creating versioner: %w" , err ) )
2019-11-26 00:39:31 -07:00
}
if service , ok := ver . ( suture . Service ) ; ok {
// The versioner implements the suture.Service interface, so
// expects to be run in the background in addition to being called
// when files are going to be archived.
token := m . Add ( service )
m . folderRunnerTokens [ folder ] = append ( m . folderRunnerTokens [ folder ] , token )
}
}
m . folderVersioners [ folder ] = ver
2017-09-19 23:49:04 -07:00
2020-02-01 00:02:18 -07:00
p := folderFactory ( m , fset , ignores , cfg , ver , ffs , m . evLogger , m . folderIOLimiter )
2017-10-20 07:52:55 -07:00
2016-05-04 03:47:33 -07:00
m . folderRunners [ folder ] = p
2019-11-18 13:15:26 -07:00
m . warnAboutOverwritingProtectedFiles ( cfg , ignores )
2015-10-18 17:13:58 -07:00
2015-11-13 05:30:52 -07:00
token := m . Add ( p )
m . folderRunnerTokens [ folder ] = append ( m . folderRunnerTokens [ folder ] , token )
2019-11-18 13:15:26 -07:00
l . Infof ( "Ready to synchronize %s (%s)" , cfg . Description ( ) , cfg . Type )
2014-03-28 06:36:57 -07:00
}
2014-01-06 03:11:18 -07:00
2019-11-18 13:15:26 -07:00
func ( m * model ) warnAboutOverwritingProtectedFiles ( cfg config . FolderConfiguration , ignores * ignore . Matcher ) {
if cfg . Type == config . FolderTypeSendOnly {
2015-10-18 17:13:58 -07:00
return
}
2017-08-19 07:36:56 -07:00
// This is a bit of a hack.
2019-11-18 13:15:26 -07:00
ffs := cfg . Filesystem ( )
2017-08-19 07:36:56 -07:00
if ffs . Type ( ) != fs . FilesystemTypeBasic {
return
}
folderLocation := ffs . URI ( )
2015-10-18 17:13:58 -07:00
var filesAtRisk [ ] string
for _ , protectedFilePath := range m . protectedFiles {
// check if file is synced in this folder
2018-11-22 03:16:45 -07:00
if protectedFilePath != folderLocation && ! fs . IsParent ( protectedFilePath , folderLocation ) {
2015-10-18 17:13:58 -07:00
continue
}
// check if file is ignored
2017-03-04 00:49:48 -07:00
relPath , _ := filepath . Rel ( folderLocation , protectedFilePath )
if ignores . Match ( relPath ) . IsIgnored ( ) {
2015-10-18 17:13:58 -07:00
continue
}
filesAtRisk = append ( filesAtRisk , protectedFilePath )
}
if len ( filesAtRisk ) > 0 {
2016-10-27 10:02:19 -07:00
l . Warnln ( "Some protected files may be overwritten and cause issues. See https://docs.syncthing.net/users/config.html#syncing-configuration-files for more information. The at risk files are:" , strings . Join ( filesAtRisk , ", " ) )
2015-10-18 17:13:58 -07:00
}
}
2019-11-08 02:56:16 -07:00
func ( m * model ) removeFolder ( cfg config . FolderConfiguration ) {
2019-11-18 13:15:26 -07:00
m . stopFolder ( cfg , fmt . Errorf ( "removing folder %v" , cfg . Description ( ) ) )
2015-11-13 05:30:52 -07:00
m . fmut . Lock ( )
2019-04-21 05:21:36 -07:00
2019-10-30 08:11:07 -07:00
isPathUnique := true
for folderID , folderCfg := range m . folderCfgs {
if folderID != cfg . ID && folderCfg . Path == cfg . Path {
isPathUnique = false
break
}
}
if isPathUnique {
// Delete syncthing specific files
cfg . Filesystem ( ) . RemoveAll ( config . DefaultMarkerName )
}
2017-01-07 10:05:30 -07:00
2020-05-05 23:34:54 -07:00
m . cleanupFolderLocked ( cfg )
2019-11-18 13:15:26 -07:00
m . fmut . Unlock ( )
2016-08-07 09:21:59 -07:00
// Remove it from the database
2017-10-03 15:53:02 -07:00
db . DropFolder ( m . db , cfg . ID )
2016-08-07 09:21:59 -07:00
}
2019-11-18 13:15:26 -07:00
func ( m * model ) stopFolder ( cfg config . FolderConfiguration , err error ) {
2018-06-10 04:24:59 -07:00
// Stop the services running for this folder and wait for them to finish
// stopping to prevent races on restart.
2019-11-18 13:15:26 -07:00
m . fmut . RLock ( )
2018-06-10 04:24:59 -07:00
tokens := m . folderRunnerTokens [ cfg . ID ]
2019-11-18 13:15:26 -07:00
m . fmut . RUnlock ( )
2019-04-28 03:58:51 -07:00
2018-06-10 04:24:59 -07:00
for _ , id := range tokens {
2019-02-02 04:16:27 -07:00
m . RemoveAndWait ( id , 0 )
2015-11-13 05:30:52 -07:00
}
2019-04-28 03:58:51 -07:00
2019-07-14 02:03:55 -07:00
// Wait for connections to stop to ensure that no more calls to methods
// expecting this folder to exist happen (e.g. .IndexUpdate).
2019-11-19 01:56:53 -07:00
m . closeConns ( cfg . DeviceIDs ( ) , err ) . Wait ( )
2019-11-18 13:15:26 -07:00
}
2019-07-14 02:03:55 -07:00
2019-11-18 13:15:26 -07:00
// Need to hold lock on m.fmut when calling this.
2020-05-05 23:34:54 -07:00
func ( m * model ) cleanupFolderLocked ( cfg config . FolderConfiguration ) {
2020-06-23 01:47:15 -07:00
// clear up our config maps
2018-06-06 14:34:11 -07:00
delete ( m . folderCfgs , cfg . ID )
delete ( m . folderFiles , cfg . ID )
delete ( m . folderIgnores , cfg . ID )
delete ( m . folderRunners , cfg . ID )
delete ( m . folderRunnerTokens , cfg . ID )
2019-11-26 00:39:31 -07:00
delete ( m . folderVersioners , cfg . ID )
2016-08-07 09:21:59 -07:00
}
2015-11-13 05:30:52 -07:00
2019-11-08 02:56:16 -07:00
func ( m * model ) restartFolder ( from , to config . FolderConfiguration ) {
2018-06-06 14:34:11 -07:00
if len ( to . ID ) == 0 {
2018-10-05 01:26:25 -07:00
panic ( "bug: cannot restart empty folder ID" )
}
if to . ID != from . ID {
2019-07-10 01:57:49 -07:00
l . Warnf ( "bug: folder restart cannot change ID %q -> %q" , from . ID , to . ID )
panic ( "bug: folder restart cannot change ID" )
2016-08-07 09:21:59 -07:00
}
2018-10-05 01:26:25 -07:00
// This mutex protects the entirety of the restart operation, preventing
// there from being more than one folder restart operation in progress
// at any given time. The usual fmut/pmut stuff doesn't cover this,
// because those locks are released while we are waiting for the folder
// to shut down (and must be so because the folder might need them as
// part of its operations before shutting down).
restartMut := m . folderRestartMuts . Get ( to . ID )
restartMut . Lock ( )
defer restartMut . Unlock ( )
2019-01-09 09:31:09 -07:00
var infoMsg string
var errMsg string
switch {
case to . Paused :
infoMsg = "Paused"
errMsg = "pausing"
case from . Paused :
infoMsg = "Unpaused"
errMsg = "unpausing"
default :
infoMsg = "Restarted"
errMsg = "restarting"
}
2019-11-18 13:15:26 -07:00
var fset * db . FileSet
if ! to . Paused {
// Creating the fileset can take a long time (metadata calculation)
// so we do it outside of the lock.
fset = db . NewFileSet ( to . ID , to . Filesystem ( ) , m . db )
}
2020-06-22 13:26:26 -07:00
err := fmt . Errorf ( "%v folder %v" , errMsg , to . Description ( ) )
m . stopFolder ( from , err )
// Need to send CC change to both from and to devices.
m . closeConns ( to . DeviceIDs ( ) , err )
2019-11-18 13:15:26 -07:00
2019-04-28 03:58:51 -07:00
m . fmut . Lock ( )
defer m . fmut . Unlock ( )
2020-05-05 23:34:54 -07:00
m . cleanupFolderLocked ( from )
2019-01-09 09:31:09 -07:00
if ! to . Paused {
2020-05-05 23:34:54 -07:00
m . addAndStartFolderLocked ( to , fset )
2016-12-21 11:41:25 -07:00
}
2019-01-09 09:31:09 -07:00
l . Infof ( "%v folder %v (%v)" , infoMsg , to . Description ( ) , to . Type )
2015-11-13 05:30:52 -07:00
}
2019-11-18 13:15:26 -07:00
func ( m * model ) newFolder ( cfg config . FolderConfiguration ) {
// Creating the fileset can take a long time (metadata calculation) so
// we do it outside of the lock.
fset := db . NewFileSet ( cfg . ID , cfg . Filesystem ( ) , m . db )
// Close connections to affected devices
m . closeConns ( cfg . DeviceIDs ( ) , fmt . Errorf ( "started folder %v" , cfg . Description ( ) ) )
m . fmut . Lock ( )
defer m . fmut . Unlock ( )
2020-05-05 23:34:54 -07:00
m . addAndStartFolderLocked ( cfg , fset )
2019-11-18 13:15:26 -07:00
}
2020-06-23 01:47:15 -07:00
func ( m * model ) UsageReportingStats ( report * contract . Report , version int , preview bool ) {
2017-10-11 23:16:46 -07:00
if version >= 3 {
// Block stats
2017-11-09 14:16:29 -07:00
blockStatsMut . Lock ( )
for k , v := range blockStats {
2020-06-23 01:47:15 -07:00
switch k {
case "total" :
report . BlockStats . Total = v
case "renamed" :
report . BlockStats . Renamed = v
case "reused" :
report . BlockStats . Reused = v
case "pulled" :
report . BlockStats . Pulled = v
case "copyOrigin" :
report . BlockStats . CopyOrigin = v
case "copyOriginShifted" :
report . BlockStats . CopyOriginShifted = v
case "copyElsewhere" :
report . BlockStats . CopyElsewhere = v
}
// Reset counts, as these are incremental
2017-11-09 14:16:29 -07:00
if ! preview {
blockStats [ k ] = 0
2017-10-11 23:16:46 -07:00
}
}
2017-11-09 14:16:29 -07:00
blockStatsMut . Unlock ( )
2017-10-11 23:16:46 -07:00
// Transport stats
2019-04-21 05:21:36 -07:00
m . pmut . RLock ( )
2017-10-11 23:16:46 -07:00
for _ , conn := range m . conn {
2020-06-23 01:47:15 -07:00
report . TransportStats [ conn . Transport ( ) ] ++
2017-10-11 23:16:46 -07:00
}
2019-04-21 05:21:36 -07:00
m . pmut . RUnlock ( )
2017-10-11 23:16:46 -07:00
// Ignore stats
var seenPrefix [ 3 ] bool
for folder := range m . cfg . Folders ( ) {
lines , _ , err := m . GetIgnores ( folder )
if err != nil {
continue
}
2020-06-23 01:47:15 -07:00
report . IgnoreStats . Lines += len ( lines )
2017-10-11 23:16:46 -07:00
for _ , line := range lines {
// Allow prefixes to be specified in any order, but only once.
for {
if strings . HasPrefix ( line , "!" ) && ! seenPrefix [ 0 ] {
seenPrefix [ 0 ] = true
line = line [ 1 : ]
2020-06-23 01:47:15 -07:00
report . IgnoreStats . Inverts ++
2017-10-11 23:16:46 -07:00
} else if strings . HasPrefix ( line , "(?i)" ) && ! seenPrefix [ 1 ] {
seenPrefix [ 1 ] = true
line = line [ 4 : ]
2020-06-23 01:47:15 -07:00
report . IgnoreStats . Folded ++
2017-10-11 23:16:46 -07:00
} else if strings . HasPrefix ( line , "(?d)" ) && ! seenPrefix [ 2 ] {
seenPrefix [ 2 ] = true
line = line [ 4 : ]
2020-06-23 01:47:15 -07:00
report . IgnoreStats . Deletable ++
2017-10-11 23:16:46 -07:00
} else {
seenPrefix [ 0 ] = false
seenPrefix [ 1 ] = false
seenPrefix [ 2 ] = false
break
}
}
// Noops, remove
2019-02-02 04:09:07 -07:00
line = strings . TrimSuffix ( line , "**" )
line = strings . TrimPrefix ( line , "**/" )
2017-10-11 23:16:46 -07:00
if strings . HasPrefix ( line , "/" ) {
2020-06-23 01:47:15 -07:00
report . IgnoreStats . Rooted ++
2017-10-11 23:16:46 -07:00
} else if strings . HasPrefix ( line , "#include " ) {
2020-06-23 01:47:15 -07:00
report . IgnoreStats . Includes ++
2017-10-11 23:16:46 -07:00
if strings . Contains ( line , ".." ) {
2020-06-23 01:47:15 -07:00
report . IgnoreStats . EscapedIncludes ++
2017-10-11 23:16:46 -07:00
}
}
if strings . Contains ( line , "**" ) {
2020-06-23 01:47:15 -07:00
report . IgnoreStats . DoubleStars ++
2017-10-11 23:16:46 -07:00
// Remove not to trip up star checks.
2019-02-02 04:09:07 -07:00
line = strings . Replace ( line , "**" , "" , - 1 )
2017-10-11 23:16:46 -07:00
}
if strings . Contains ( line , "*" ) {
2020-06-23 01:47:15 -07:00
report . IgnoreStats . Stars ++
2017-10-11 23:16:46 -07:00
}
}
}
}
}
2014-01-05 15:54:57 -07:00
type ConnectionInfo struct {
protocol . Statistics
2015-08-23 12:56:10 -07:00
Connected bool
Paused bool
2014-01-23 05:12:45 -07:00
Address string
ClientVersion string
2016-05-04 12:38:12 -07:00
Type string
2019-02-26 03:49:02 -07:00
Crypto string
2014-01-05 15:54:57 -07:00
}
2015-03-10 15:45:43 -07:00
func ( info ConnectionInfo ) MarshalJSON ( ) ( [ ] byte , error ) {
return json . Marshal ( map [ string ] interface { } {
"at" : info . At ,
"inBytesTotal" : info . InBytesTotal ,
"outBytesTotal" : info . OutBytesTotal ,
2015-08-23 12:56:10 -07:00
"connected" : info . Connected ,
"paused" : info . Paused ,
2015-03-10 15:45:43 -07:00
"address" : info . Address ,
"clientVersion" : info . ClientVersion ,
2016-05-04 12:38:12 -07:00
"type" : info . Type ,
2019-02-26 03:49:02 -07:00
"crypto" : info . Crypto ,
2015-03-10 15:45:43 -07:00
} )
}
2015-11-09 15:48:58 -07:00
// ConnectionStats returns a map with connection statistics for each device.
2019-02-26 01:09:25 -07:00
func ( m * model ) ConnectionStats ( ) map [ string ] interface { } {
2016-09-14 12:38:55 -07:00
m . pmut . RLock ( )
2019-05-02 10:55:39 -07:00
defer m . pmut . RUnlock ( )
2014-01-05 08:16:37 -07:00
2015-08-23 12:56:10 -07:00
res := make ( map [ string ] interface { } )
devs := m . cfg . Devices ( )
conns := make ( map [ string ] ConnectionInfo , len ( devs ) )
2016-12-21 11:41:25 -07:00
for device , deviceCfg := range devs {
2016-03-25 13:29:07 -07:00
hello := m . helloMessages [ device ]
versionString := hello . ClientVersion
if hello . ClientName != "syncthing" {
versionString = hello . ClientName + " " + hello . ClientVersion
}
2014-01-05 15:54:57 -07:00
ci := ConnectionInfo {
2016-05-04 12:38:12 -07:00
ClientVersion : strings . TrimSpace ( versionString ) ,
2016-12-21 11:41:25 -07:00
Paused : deviceCfg . Paused ,
2014-01-05 15:54:57 -07:00
}
2015-08-23 12:56:10 -07:00
if conn , ok := m . conn [ device ] ; ok {
2016-11-30 00:54:20 -07:00
ci . Type = conn . Type ( )
2019-02-26 03:49:02 -07:00
ci . Crypto = conn . Crypto ( )
2015-08-23 12:56:10 -07:00
ci . Connected = ok
ci . Statistics = conn . Statistics ( )
if addr := conn . RemoteAddr ( ) ; addr != nil {
ci . Address = addr . String ( )
}
2014-01-05 15:54:57 -07:00
}
2014-02-13 04:41:37 -07:00
2015-04-07 05:20:40 -07:00
conns [ device . String ( ) ] = ci
2013-12-30 07:30:29 -07:00
}
2014-01-17 20:06:44 -07:00
2015-04-07 05:20:40 -07:00
res [ "connections" ] = conns
2014-05-24 12:34:11 -07:00
in , out := protocol . TotalInOut ( )
res [ "total" ] = ConnectionInfo {
Statistics : protocol . Statistics {
At : time . Now ( ) ,
2014-06-01 12:56:05 -07:00
InBytesTotal : in ,
OutBytesTotal : out ,
2014-05-24 12:34:11 -07:00
} ,
}
2014-01-05 08:16:37 -07:00
return res
2013-12-30 07:30:29 -07:00
}
2015-04-28 13:32:10 -07:00
// DeviceStatistics returns statistics about each device
2019-11-30 05:03:24 -07:00
func ( m * model ) DeviceStatistics ( ) ( map [ string ] stats . DeviceStatistics , error ) {
2019-09-11 21:55:24 -07:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
res := make ( map [ string ] stats . DeviceStatistics , len ( m . deviceStatRefs ) )
for id , sr := range m . deviceStatRefs {
2019-11-30 05:03:24 -07:00
stats , err := sr . GetStatistics ( )
if err != nil {
return nil , err
}
res [ id . String ( ) ] = stats
2014-08-21 15:45:40 -07:00
}
2019-11-30 05:03:24 -07:00
return res , nil
2014-08-21 15:45:40 -07:00
}
2015-04-28 13:32:10 -07:00
// FolderStatistics returns statistics about each folder
2019-11-30 05:03:24 -07:00
func ( m * model ) FolderStatistics ( ) ( map [ string ] stats . FolderStatistics , error ) {
2016-12-06 01:54:04 -07:00
res := make ( map [ string ] stats . FolderStatistics )
2019-03-11 09:57:21 -07:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
for id , runner := range m . folderRunners {
2019-11-30 05:03:24 -07:00
stats , err := runner . GetStatistics ( )
if err != nil {
return nil , err
}
res [ id ] = stats
2014-12-07 13:21:12 -07:00
}
2019-11-30 05:03:24 -07:00
return res , nil
2014-12-07 13:21:12 -07:00
}
2016-08-11 23:41:43 -07:00
type FolderCompletion struct {
CompletionPct float64
GlobalBytes int64
2020-07-02 23:48:37 -07:00
NeedBytes int64
GlobalItems int32
2020-05-11 06:07:06 -07:00
NeedItems int32
NeedDeletes int32
2016-08-11 23:41:43 -07:00
}
2020-07-02 23:48:37 -07:00
func newFolderCompletion ( global , need db . Counts ) FolderCompletion {
comp := FolderCompletion {
GlobalBytes : global . Bytes ,
NeedBytes : need . Bytes ,
GlobalItems : global . Files + global . Directories + global . Symlinks ,
NeedItems : need . Files + need . Directories + need . Symlinks ,
NeedDeletes : need . Deleted ,
}
comp . setComplectionPct ( )
return comp
}
func ( comp * FolderCompletion ) add ( other FolderCompletion ) {
comp . GlobalBytes += other . GlobalBytes
comp . NeedBytes += other . NeedBytes
comp . GlobalItems += other . GlobalItems
comp . NeedItems += other . NeedItems
comp . NeedDeletes += other . NeedDeletes
comp . setComplectionPct ( )
}
func ( comp * FolderCompletion ) setComplectionPct ( ) {
if comp . GlobalBytes == 0 {
comp . CompletionPct = 100
} else {
needRatio := float64 ( comp . NeedBytes ) / float64 ( comp . GlobalBytes )
comp . CompletionPct = 100 * ( 1 - needRatio )
}
// If the completion is 100% but there are deletes we need to handle,
// drop it down a notch. Hack for consumers that look only at the
// percentage (our own GUI does the same calculation as here on its own
// and needs the same fixup).
if comp . NeedBytes == 0 && comp . NeedDeletes > 0 {
comp . CompletionPct = 95 // chosen by fair dice roll
}
}
2019-03-26 12:53:58 -07:00
// Map returns the members as a map, e.g. used in api to serialize as Json.
func ( comp FolderCompletion ) Map ( ) map [ string ] interface { } {
return map [ string ] interface { } {
"completion" : comp . CompletionPct ,
2020-07-02 23:48:37 -07:00
"globalBytes" : comp . GlobalBytes ,
2019-03-26 12:53:58 -07:00
"needBytes" : comp . NeedBytes ,
2020-07-02 23:48:37 -07:00
"globalItems" : comp . GlobalItems ,
2019-03-26 12:53:58 -07:00
"needItems" : comp . NeedItems ,
"needDeletes" : comp . NeedDeletes ,
}
}
2020-07-02 23:48:37 -07:00
// Completion returns the completion status, in percent with some counters,
// for the given device and folder. The device can be any known device ID
// (including the local device) or explicitly protocol.LocalDeviceID. An
// empty folder string means the aggregate of all folders shared with the
// given device.
2019-02-26 01:09:25 -07:00
func ( m * model ) Completion ( device protocol . DeviceID , folder string ) FolderCompletion {
2020-07-02 23:48:37 -07:00
// The user specifically asked for our own device ID. Internally that is
// known as protocol.LocalDeviceID so translate.
if device == m . id {
device = protocol . LocalDeviceID
}
if folder != "" {
// We want completion for a specific folder.
return m . folderCompletion ( device , folder )
}
// We want completion for all (shared) folders as an aggregate.
var comp FolderCompletion
for _ , fcfg := range m . cfg . FolderList ( ) {
if device == protocol . LocalDeviceID || fcfg . SharedWith ( device ) {
comp . add ( m . folderCompletion ( device , fcfg . ID ) )
}
}
return comp
}
func ( m * model ) folderCompletion ( device protocol . DeviceID , folder string ) FolderCompletion {
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2014-09-28 04:00:38 -07:00
rf , ok := m . folderFiles [ folder ]
2014-09-28 04:39:39 -07:00
m . fmut . RUnlock ( )
2014-08-05 11:16:25 -07:00
if ! ok {
2016-08-11 23:41:43 -07:00
return FolderCompletion { } // Folder doesn't exist, so we hardly have any of it
2014-08-05 11:16:25 -07:00
}
2020-01-21 10:23:08 -07:00
snap := rf . Snapshot ( )
defer snap . Release ( )
2020-07-02 23:48:37 -07:00
global := snap . GlobalSize ( )
if global . Bytes == 0 {
2016-08-11 23:41:43 -07:00
// Folder is empty, so we have all of it
return FolderCompletion {
CompletionPct : 100 ,
}
2014-08-05 11:16:25 -07:00
}
2016-05-25 23:53:27 -07:00
m . pmut . RLock ( )
2020-05-11 06:07:06 -07:00
downloaded := m . deviceDownloads [ device ] . BytesDownloaded ( folder )
2016-05-25 23:53:27 -07:00
m . pmut . RUnlock ( )
2020-05-11 06:07:06 -07:00
need := snap . NeedSize ( device )
need . Bytes -= downloaded
// This might might be more than it really is, because some blocks can be of a smaller size.
if need . Bytes < 0 {
need . Bytes = 0
}
2014-07-29 02:06:52 -07:00
2020-07-02 23:48:37 -07:00
comp := newFolderCompletion ( global , need )
2016-09-01 23:45:46 -07:00
2020-07-02 23:48:37 -07:00
l . Debugf ( "%v Completion(%s, %q): %v" , m , device , folder , comp . Map ( ) )
return comp
2014-07-29 02:06:52 -07:00
}
2020-01-21 10:23:08 -07:00
// DBSnapshot returns a snapshot of the database content relevant to the given folder.
func ( m * model ) DBSnapshot ( folder string ) ( * db . Snapshot , error ) {
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2020-02-11 23:35:24 -07:00
err := m . checkFolderRunningLocked ( folder )
rf := m . folderFiles [ folder ]
2019-08-13 00:04:43 -07:00
m . fmut . RUnlock ( )
2020-02-11 23:35:24 -07:00
if err != nil {
return nil , err
2014-03-29 10:53:48 -07:00
}
2020-01-21 10:23:08 -07:00
return rf . Snapshot ( ) , nil
2014-01-05 22:38:01 -07:00
}
2020-01-21 10:23:08 -07:00
func ( m * model ) FolderProgressBytesCompleted ( folder string ) int64 {
return m . progressEmitter . BytesCompleted ( folder )
2013-12-23 10:12:44 -07:00
}
2015-04-28 13:32:10 -07:00
// NeedFolderFiles returns paginated list of currently needed files in
2019-06-27 11:25:38 -07:00
// progress, queued, and to be queued on next puller iteration.
2019-02-26 01:09:25 -07:00
func ( m * model ) NeedFolderFiles ( folder string , page , perpage int ) ( [ ] db . FileInfoTruncated , [ ] db . FileInfoTruncated , [ ] db . FileInfoTruncated ) {
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2019-05-02 10:55:39 -07:00
rf , rfOk := m . folderFiles [ folder ]
runner , runnerOk := m . folderRunners [ folder ]
cfg := m . folderCfgs [ folder ]
m . fmut . RUnlock ( )
2015-01-17 13:51:46 -07:00
2019-05-02 10:55:39 -07:00
if ! rfOk {
2017-12-15 13:01:56 -07:00
return nil , nil , nil
2015-04-25 14:53:44 -07:00
}
2014-12-30 01:31:34 -07:00
2020-01-21 10:23:08 -07:00
snap := rf . Snapshot ( )
defer snap . Release ( )
2015-04-25 14:53:44 -07:00
var progress , queued , rest [ ] db . FileInfoTruncated
var seen map [ string ] struct { }
2014-12-30 01:31:34 -07:00
2015-04-25 14:53:44 -07:00
skip := ( page - 1 ) * perpage
get := perpage
2014-12-30 01:31:34 -07:00
2019-05-02 10:55:39 -07:00
if runnerOk {
2019-06-27 11:25:38 -07:00
progressNames , queuedNames , skipped := runner . Jobs ( page , perpage )
2015-04-25 14:53:44 -07:00
2019-07-19 10:39:52 -07:00
progress = make ( [ ] db . FileInfoTruncated , len ( progressNames ) )
queued = make ( [ ] db . FileInfoTruncated , len ( queuedNames ) )
seen = make ( map [ string ] struct { } , len ( progressNames ) + len ( queuedNames ) )
2015-04-25 14:53:44 -07:00
for i , name := range progressNames {
2020-01-21 10:23:08 -07:00
if f , ok := snap . GetGlobalTruncated ( name ) ; ok {
2015-04-25 14:53:44 -07:00
progress [ i ] = f
seen [ name ] = struct { } { }
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
}
}
2015-04-25 14:53:44 -07:00
for i , name := range queuedNames {
2020-01-21 10:23:08 -07:00
if f , ok := snap . GetGlobalTruncated ( name ) ; ok {
2015-04-25 14:53:44 -07:00
queued [ i ] = f
seen [ name ] = struct { } { }
}
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
}
2019-06-27 11:25:38 -07:00
get -= len ( seen )
if get == 0 {
return progress , queued , nil
}
skip -= skipped
2014-04-09 13:03:30 -07:00
}
2015-04-25 14:53:44 -07:00
rest = make ( [ ] db . FileInfoTruncated , 0 , perpage )
2020-05-30 00:50:23 -07:00
snap . WithNeedTruncated ( protocol . LocalDeviceID , func ( f protocol . FileIntf ) bool {
2017-11-11 12:18:17 -07:00
if cfg . IgnoreDelete && f . IsDeleted ( ) {
2016-08-05 00:13:52 -07:00
return true
}
2015-04-25 14:53:44 -07:00
if skip > 0 {
skip --
return true
}
2018-12-11 01:59:04 -07:00
ft := f . ( db . FileInfoTruncated )
if _ , ok := seen [ ft . Name ] ; ! ok {
rest = append ( rest , ft )
get --
2015-04-25 14:53:44 -07:00
}
2017-12-15 13:01:56 -07:00
return get > 0
2015-04-25 14:53:44 -07:00
} )
2017-12-15 13:01:56 -07:00
return progress , queued , rest
}
2014-09-28 04:00:38 -07:00
// Index is called when a new device is connected and we receive their full index.
2014-01-06 03:11:18 -07:00
// Implements the protocol.Model interface.
2019-12-04 02:46:55 -07:00
func ( m * model ) Index ( deviceID protocol . DeviceID , folder string , fs [ ] protocol . FileInfo ) error {
return m . handleIndex ( deviceID , folder , fs , false )
2013-12-28 06:10:36 -07:00
}
2014-09-28 04:00:38 -07:00
// IndexUpdate is called for incremental updates to connected devices' indexes.
2014-01-06 03:11:18 -07:00
// Implements the protocol.Model interface.
2019-12-04 02:46:55 -07:00
func ( m * model ) IndexUpdate ( deviceID protocol . DeviceID , folder string , fs [ ] protocol . FileInfo ) error {
return m . handleIndex ( deviceID , folder , fs , true )
2018-01-19 07:33:16 -07:00
}
2019-12-04 02:46:55 -07:00
func ( m * model ) handleIndex ( deviceID protocol . DeviceID , folder string , fs [ ] protocol . FileInfo , update bool ) error {
2018-01-19 07:33:16 -07:00
op := "Index"
if update {
op += " update"
}
l . Debugf ( "%v (in): %s / %q: %d files" , op , deviceID , folder , len ( fs ) )
2014-03-29 10:53:48 -07:00
2018-06-06 14:34:11 -07:00
if cfg , ok := m . cfg . Folder ( folder ) ; ! ok || ! cfg . SharedWith ( deviceID ) {
l . Infof ( "%v for unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration." , op , folder , deviceID )
2019-12-04 02:46:55 -07:00
return errors . Wrap ( errFolderMissing , folder )
2018-06-06 14:34:11 -07:00
} else if cfg . Paused {
l . Debugf ( "%v for paused folder (ID %q) sent from device %q." , op , folder , deviceID )
2019-12-04 02:46:55 -07:00
return errors . Wrap ( ErrFolderPaused , folder )
2014-06-06 12:48:29 -07:00
}
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2018-01-19 07:33:16 -07:00
files , existing := m . folderFiles [ folder ]
runner , running := m . folderRunners [ folder ]
2014-09-28 04:39:39 -07:00
m . fmut . RUnlock ( )
2014-09-04 13:29:53 -07:00
2018-01-19 07:33:16 -07:00
if ! existing {
2019-12-04 02:46:55 -07:00
l . Infof ( "%v for nonexistent folder %q" , op , folder )
return errors . Wrap ( errFolderMissing , folder )
2018-01-19 07:33:16 -07:00
}
if running {
defer runner . SchedulePull ( )
2013-12-28 06:10:36 -07:00
}
2014-07-13 12:07:24 -07:00
2016-04-30 23:49:29 -07:00
m . pmut . RLock ( )
2019-08-13 00:04:43 -07:00
downloads := m . deviceDownloads [ deviceID ]
2016-04-30 23:49:29 -07:00
m . pmut . RUnlock ( )
2019-08-13 00:04:43 -07:00
downloads . Update ( folder , makeForgetUpdate ( fs ) )
2016-04-30 23:49:29 -07:00
2018-01-19 07:33:16 -07:00
if ! update {
files . Drop ( deviceID )
}
2018-06-24 00:50:18 -07:00
for i := range fs {
lib/db: Deduplicate block lists in database (fixes #5898) (#6283)
* lib/db: Deduplicate block lists in database (fixes #5898)
This moves the block list in the database out from being just a field on
the FileInfo to being an object of its own. When putting a FileInfo we
marshal the block list separately and store it keyed by the sha256 of
the marshalled block list. When getting, if we are not doing a
"truncated" get, we do an extra read and unmarshal for the block list.
Old block lists are cleared out by a periodic GC sweep. The alternative
would be to use refcounting, but:
- There is a larger risk of getting that wrong and either dropping a
block list in error or keeping them around forever.
- It's tricky with our current database, as we don't have dirty reads.
This means that if we update two FileInfos with identical block lists in
the same transaction we can't just do read/modify/write for the ref
counters as we wouldn't see our own first update. See above about
tracking this and risks about getting it wrong.
GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run
concurrently with FileInfo updates so there is a new lock around those
operation at the lowlevel.
The end result is a much more compact database, especially for setups
with many peers where files get duplicated many times.
This is per-key-class stats for a large database I'm currently working
with, under the current schema:
```
0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
Total 10426475 items, 968490 KB keys + 9202925 KB data.
```
Note 7.4 GB of data in class 00, total size 9.2 GB. After running the
migration we get this instead:
```
0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max
0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max
0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max
0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max
0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max
0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max
0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max
0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max
0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max
0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max
0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max
Total 10469408 items, 969939 KB keys + 4477905 KB data.
```
Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d.
There will be some additional reads in some cases which theoretically
hurts performance, but this will be more than compensated for by smaller
writes and better compaction.
On my own home setup which just has three devices and a handful of
folders the difference is smaller in absolute numbers of course, but
still less than half the old size:
```
0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
Total 1947412 items, 151268 KB keys + 337485 KB data.
```
to:
```
0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max
0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max
0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max
...
0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max
Total 1965447 items, 151863 KB keys + 139628 KB data.
```
* wip
* wip
* wip
* wip
2020-01-24 00:35:44 -07:00
// The local attributes should never be transmitted over the wire.
// Make sure they look like they weren't.
2018-06-24 00:50:18 -07:00
fs [ i ] . LocalFlags = 0
2020-05-13 05:28:42 -07:00
fs [ i ] . VersionHash = nil
2018-06-24 00:50:18 -07:00
}
2014-09-28 04:00:38 -07:00
files . Update ( deviceID , fs )
2014-09-04 13:29:53 -07:00
2019-08-15 07:29:37 -07:00
m . evLogger . Log ( events . RemoteIndexUpdated , map [ string ] interface { } {
2014-09-28 04:05:25 -07:00
"device" : deviceID . String ( ) ,
"folder" : folder ,
2014-07-17 04:38:36 -07:00
"items" : len ( fs ) ,
2016-07-29 12:54:24 -07:00
"version" : files . Sequence ( deviceID ) ,
2014-07-13 12:07:24 -07:00
} )
2019-12-04 02:46:55 -07:00
return nil
2014-01-09 02:59:09 -07:00
}
2019-12-04 02:46:55 -07:00
func ( m * model ) ClusterConfig ( deviceID protocol . DeviceID , cm protocol . ClusterConfig ) error {
2016-01-01 12:11:12 -07:00
// Check the peer device's announced folders against our own. Emits events
// for folders that we don't expect (unknown or not shared).
2016-04-15 03:59:41 -07:00
// Also, collect a list of folders we do share, and if he's interested in
// temporary indexes, subscribe the connection.
tempIndexFolders := make ( [ ] string , 0 , len ( cm . Folders ) )
2016-01-01 12:11:12 -07:00
2016-07-23 05:46:31 -07:00
m . pmut . RLock ( )
2016-07-27 14:36:25 -07:00
conn , ok := m . conn [ deviceID ]
2019-06-10 04:27:22 -07:00
closed := m . closed [ deviceID ]
2016-07-23 05:46:31 -07:00
m . pmut . RUnlock ( )
2016-07-27 14:36:25 -07:00
if ! ok {
panic ( "bug: ClusterConfig called on closed or nonexistent connection" )
}
2016-07-23 05:46:31 -07:00
2017-12-07 00:08:24 -07:00
changed := false
2020-05-20 02:13:55 -07:00
deviceCfg , ok := m . cfg . Devices ( ) [ deviceID ]
if ! ok {
l . Debugln ( "Device disappeared from config while processing cluster-config" )
return errDeviceUnknown
}
2016-07-27 14:38:43 -07:00
2017-12-07 00:08:24 -07:00
// Needs to happen outside of the fmut, as can cause CommitConfiguration
if deviceCfg . AutoAcceptFolders {
2020-05-26 23:05:26 -07:00
changedFolders := make ( [ ] config . FolderConfiguration , 0 , len ( cm . Folders ) )
2017-12-07 00:08:24 -07:00
for _ , folder := range cm . Folders {
2020-05-26 23:05:26 -07:00
if fcfg , fchanged := m . handleAutoAccepts ( deviceCfg , folder ) ; fchanged {
changedFolders = append ( changedFolders , fcfg )
}
}
if len ( changedFolders ) > 0 {
// Need to wait for the waiter, as this calls CommitConfiguration,
// which sets up the folder and as we return from this call,
// ClusterConfig starts poking at m.folderFiles and other things
// that might not exist until the config is committed.
w , _ := m . cfg . SetFolders ( changedFolders )
w . Wait ( )
2017-12-07 00:08:24 -07:00
}
}
2019-08-11 10:30:24 -07:00
m . fmut . RLock ( )
2016-12-21 11:41:25 -07:00
var paused [ ] string
2016-01-01 12:11:12 -07:00
for _ , folder := range cm . Folders {
2018-06-06 14:34:11 -07:00
cfg , ok := m . cfg . Folder ( folder . ID )
if ! ok || ! cfg . SharedWith ( deviceID ) {
2018-08-25 03:36:10 -07:00
if deviceCfg . IgnoredFolder ( folder . ID ) {
2018-06-06 14:34:11 -07:00
l . Infof ( "Ignoring folder %s from device %s since we are configured to" , folder . Description ( ) , deviceID )
continue
}
2018-08-25 03:36:10 -07:00
m . cfg . AddOrUpdatePendingFolder ( folder . ID , folder . Label , deviceID )
2019-07-27 03:05:00 -07:00
changed = true
2019-08-15 07:29:37 -07:00
m . evLogger . Log ( events . FolderRejected , map [ string ] string {
2016-03-11 02:48:46 -07:00
"folder" : folder . ID ,
"folderLabel" : folder . Label ,
"device" : deviceID . String ( ) ,
2016-01-01 12:11:12 -07:00
} )
2016-11-22 00:36:14 -07:00
l . Infof ( "Unexpected folder %s sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration." , folder . Description ( ) , deviceID )
2016-01-01 12:11:12 -07:00
continue
}
2018-06-06 14:34:11 -07:00
if folder . Paused {
paused = append ( paused , folder . ID )
continue
}
if cfg . Paused {
continue
}
2018-08-11 00:10:29 -07:00
fs , ok := m . folderFiles [ folder . ID ]
if ! ok {
// Shouldn't happen because !cfg.Paused, but might happen
// if the folder is about to be unpaused, but not yet.
continue
}
2017-12-07 00:08:24 -07:00
2016-07-04 03:40:29 -07:00
if ! folder . DisableTempIndexes {
2016-04-15 03:59:41 -07:00
tempIndexFolders = append ( tempIndexFolders , folder . ID )
}
2016-07-23 05:46:31 -07:00
myIndexID := fs . IndexID ( protocol . LocalDeviceID )
2016-07-29 12:54:24 -07:00
mySequence := fs . Sequence ( protocol . LocalDeviceID )
var startSequence int64
2016-07-23 05:46:31 -07:00
for _ , dev := range folder . Devices {
2016-10-29 14:56:24 -07:00
if dev . ID == m . id {
2016-07-23 05:46:31 -07:00
// This is the other side's description of what it knows
// about us. Lets check to see if we can start sending index
// updates directly or need to send the index from start...
if dev . IndexID == myIndexID {
// They say they've seen our index ID before, so we can
// send a delta update only.
2016-07-29 12:54:24 -07:00
if dev . MaxSequence > mySequence {
2016-07-23 05:46:31 -07:00
// Safety check. They claim to have more or newer
// index data than we have - either we have lost
// index data, or reset the index without resetting
// the IndexID, or something else weird has
// happened. We send a full index to reset the
// situation.
2016-11-22 00:36:14 -07:00
l . Infof ( "Device %v folder %s is delta index compatible, but seems out of sync with reality" , deviceID , folder . Description ( ) )
2016-07-29 12:54:24 -07:00
startSequence = 0
2016-07-23 05:46:31 -07:00
continue
}
2016-11-22 00:36:14 -07:00
l . Debugf ( "Device %v folder %s is delta index compatible (mlv=%d)" , deviceID , folder . Description ( ) , dev . MaxSequence )
2016-07-29 12:54:24 -07:00
startSequence = dev . MaxSequence
2016-07-23 05:46:31 -07:00
} else if dev . IndexID != 0 {
// They say they've seen an index ID from us, but it's
// not the right one. Either they are confused or we
// must have reset our database since last talking to
// them. We'll start with a full index transfer.
2016-11-22 00:36:14 -07:00
l . Infof ( "Device %v folder %s has mismatching index ID for us (%v != %v)" , deviceID , folder . Description ( ) , dev . IndexID , myIndexID )
2016-07-29 12:54:24 -07:00
startSequence = 0
2016-07-23 05:46:31 -07:00
}
2019-05-12 12:17:55 -07:00
} else if dev . ID == deviceID {
2016-07-23 05:46:31 -07:00
// This is the other side's description of themselves. We
// check to see that it matches the IndexID we have on file,
// otherwise we drop our old index data and expect to get a
// completely new set.
theirIndexID := fs . IndexID ( deviceID )
if dev . IndexID == 0 {
// They're not announcing an index ID. This means they
// do not support delta indexes and we should clear any
// information we have from them before accepting their
// index, which will presumably be a full index.
2017-11-12 13:20:34 -07:00
fs . Drop ( deviceID )
2016-07-23 05:46:31 -07:00
} else if dev . IndexID != theirIndexID {
// The index ID we have on file is not what they're
// announcing. They must have reset their database and
// will probably send us a full index. We drop any
// information we have and remember this new index ID
// instead.
2016-11-22 00:36:14 -07:00
l . Infof ( "Device %v folder %s has a new index ID (%v)" , deviceID , folder . Description ( ) , dev . IndexID )
2017-11-12 13:20:34 -07:00
fs . Drop ( deviceID )
2016-07-23 05:46:31 -07:00
fs . SetIndexID ( deviceID , dev . IndexID )
2016-07-27 14:35:41 -07:00
} else {
// They're sending a recognized index ID and will most
// likely use delta indexes. We might already have files
// that we need to pull so let the folder runner know
// that it should recheck the index data.
if runner := m . folderRunners [ folder . ID ] ; runner != nil {
2017-11-06 23:59:35 -07:00
defer runner . SchedulePull ( )
2016-07-27 14:35:41 -07:00
}
2016-07-23 05:46:31 -07:00
}
}
}
2019-07-09 02:40:30 -07:00
is := & indexSender {
2019-06-10 04:27:22 -07:00
conn : conn ,
connClosed : closed ,
folder : folder . ID ,
fset : fs ,
prevSequence : startSequence ,
2019-08-15 07:29:37 -07:00
evLogger : m . evLogger ,
2019-07-09 02:40:30 -07:00
}
2019-11-21 00:41:15 -07:00
is . Service = util . AsService ( is . serve , is . String ( ) )
2019-07-09 02:40:30 -07:00
// The token isn't tracked as the service stops when the connection
// terminates and is automatically removed from supervisor (by
// implementing suture.IsCompletable).
m . Add ( is )
2016-01-01 12:11:12 -07:00
}
2019-08-11 10:30:24 -07:00
m . fmut . RUnlock ( )
2016-01-01 12:11:12 -07:00
2016-12-21 11:41:25 -07:00
m . pmut . Lock ( )
m . remotePausedFolders [ deviceID ] = paused
m . pmut . Unlock ( )
2016-04-15 03:59:41 -07:00
// This breaks if we send multiple CM messages during the same connection.
if len ( tempIndexFolders ) > 0 {
m . pmut . RLock ( )
conn , ok := m . conn [ deviceID ]
m . pmut . RUnlock ( )
// In case we've got ClusterConfig, and the connection disappeared
// from infront of our nose.
if ok {
m . progressEmitter . temporaryIndexSubscribe ( conn , tempIndexFolders )
}
}
2017-12-07 00:08:24 -07:00
if deviceCfg . Introducer {
2019-11-21 00:41:41 -07:00
folders , devices , foldersDevices , introduced := m . handleIntroductions ( deviceCfg , cm )
folders , devices , deintroduced := m . handleDeintroductions ( deviceCfg , foldersDevices , folders , devices )
if introduced || deintroduced {
2016-11-07 09:40:48 -07:00
changed = true
2019-11-21 00:41:41 -07:00
cfg := m . cfg . RawCopy ( )
cfg . Folders = make ( [ ] config . FolderConfiguration , 0 , len ( folders ) )
for _ , fcfg := range folders {
cfg . Folders = append ( cfg . Folders , fcfg )
}
cfg . Devices = make ( [ ] config . DeviceConfiguration , len ( devices ) )
for _ , dcfg := range devices {
cfg . Devices = append ( cfg . Devices , dcfg )
}
m . cfg . Replace ( cfg )
2016-11-07 09:40:48 -07:00
}
}
2014-09-23 07:04:20 -07:00
2016-11-07 09:40:48 -07:00
if changed {
if err := m . cfg . Save ( ) ; err != nil {
l . Warnln ( "Failed to save config" , err )
}
}
2019-12-04 02:46:55 -07:00
return nil
2016-11-07 09:40:48 -07:00
}
2014-09-23 07:04:20 -07:00
2019-11-21 00:41:41 -07:00
// handleIntroductions handles adding devices/folders that are shared by an introducer device
func ( m * model ) handleIntroductions ( introducerCfg config . DeviceConfiguration , cm protocol . ClusterConfig ) ( map [ string ] config . FolderConfiguration , map [ protocol . DeviceID ] config . DeviceConfiguration , folderDeviceSet , bool ) {
2016-11-07 09:40:48 -07:00
changed := false
2019-11-21 00:41:41 -07:00
folders := m . cfg . Folders ( )
devices := m . cfg . Devices ( )
2014-09-23 07:04:20 -07:00
2016-11-07 09:40:48 -07:00
foldersDevices := make ( folderDeviceSet )
2014-09-23 07:04:20 -07:00
2016-11-07 09:40:48 -07:00
for _ , folder := range cm . Folders {
// Adds devices which we do not have, but the introducer has
// for the folders that we have in common. Also, shares folders
// with devices that we have in common, yet are currently not sharing
// the folder.
2018-01-03 00:42:25 -07:00
2019-11-21 00:41:41 -07:00
fcfg , ok := folders [ folder . ID ]
2018-01-03 00:42:25 -07:00
if ! ok {
// Don't have this folder, carry on.
continue
}
2019-11-21 00:41:41 -07:00
folderChanged := false
2016-11-07 09:40:48 -07:00
for _ , device := range folder . Devices {
2017-12-07 00:08:24 -07:00
// No need to share with self.
if device . ID == m . id {
continue
}
2016-11-07 09:40:48 -07:00
foldersDevices . set ( device . ID , folder . ID )
2016-11-07 09:40:48 -07:00
2016-11-07 09:40:48 -07:00
if _ , ok := m . cfg . Devices ( ) [ device . ID ] ; ! ok {
// The device is currently unknown. Add it to the config.
2019-11-21 00:41:41 -07:00
devices [ device . ID ] = m . introduceDevice ( device , introducerCfg )
2018-06-06 14:34:11 -07:00
} else if fcfg . SharedWith ( device . ID ) {
// We already share the folder with this device, so
// nothing to do.
continue
2016-11-07 09:40:48 -07:00
}
2016-11-07 09:40:48 -07:00
2016-11-07 09:40:48 -07:00
// We don't yet share this folder with this device. Add the device
// to sharing list of the folder.
2017-12-07 00:08:24 -07:00
l . Infof ( "Sharing folder %s with %v (vouched for by introducer %v)" , folder . Description ( ) , device . ID , introducerCfg . DeviceID )
2018-01-03 00:42:25 -07:00
fcfg . Devices = append ( fcfg . Devices , config . FolderDeviceConfiguration {
DeviceID : device . ID ,
IntroducedBy : introducerCfg . DeviceID ,
} )
2019-11-21 00:41:41 -07:00
folderChanged = true
2016-11-07 09:40:48 -07:00
}
2018-01-03 00:42:25 -07:00
2019-11-21 00:41:41 -07:00
if folderChanged {
folders [ fcfg . ID ] = fcfg
changed = true
2018-01-03 00:42:25 -07:00
}
2016-11-07 09:40:48 -07:00
}
2016-11-07 09:40:48 -07:00
2019-11-21 00:41:41 -07:00
return folders , devices , foldersDevices , changed
2016-11-07 09:40:48 -07:00
}
2016-11-07 09:40:48 -07:00
2017-12-07 00:08:24 -07:00
// handleDeintroductions handles removals of devices/shares that are removed by an introducer device
2019-11-21 00:41:41 -07:00
func ( m * model ) handleDeintroductions ( introducerCfg config . DeviceConfiguration , foldersDevices folderDeviceSet , folders map [ string ] config . FolderConfiguration , devices map [ protocol . DeviceID ] config . DeviceConfiguration ) ( map [ string ] config . FolderConfiguration , map [ protocol . DeviceID ] config . DeviceConfiguration , bool ) {
if introducerCfg . SkipIntroductionRemovals {
return folders , devices , false
}
2016-11-07 09:40:48 -07:00
changed := false
2018-06-06 14:34:11 -07:00
devicesNotIntroduced := make ( map [ protocol . DeviceID ] struct { } )
2016-11-07 09:40:48 -07:00
2016-11-07 09:40:48 -07:00
// Check if we should unshare some folders, if the introducer has unshared them.
2019-11-21 00:41:41 -07:00
for folderID , folderCfg := range folders {
for k := 0 ; k < len ( folderCfg . Devices ) ; k ++ {
if folderCfg . Devices [ k ] . IntroducedBy != introducerCfg . DeviceID {
devicesNotIntroduced [ folderCfg . Devices [ k ] . DeviceID ] = struct { } { }
2018-06-06 14:34:11 -07:00
continue
}
2019-11-21 00:41:41 -07:00
if ! foldersDevices . has ( folderCfg . Devices [ k ] . DeviceID , folderCfg . ID ) {
2018-06-06 14:34:11 -07:00
// We could not find that folder shared on the
// introducer with the device that was introduced to us.
// We should follow and unshare as well.
2019-11-21 00:41:41 -07:00
l . Infof ( "Unsharing folder %s with %v as introducer %v no longer shares the folder with that device" , folderCfg . Description ( ) , folderCfg . Devices [ k ] . DeviceID , folderCfg . Devices [ k ] . IntroducedBy )
folderCfg . Devices = append ( folderCfg . Devices [ : k ] , folderCfg . Devices [ k + 1 : ] ... )
folders [ folderID ] = folderCfg
2018-06-06 14:34:11 -07:00
k --
changed = true
2016-11-07 09:40:48 -07:00
}
}
}
2016-11-07 09:40:48 -07:00
2016-11-17 01:56:55 -07:00
// Check if we should remove some devices, if the introducer no longer
// shares any folder with them. Yet do not remove if we share other
// folders that haven't been introduced by the introducer.
2019-11-21 00:41:41 -07:00
for deviceID , device := range devices {
2016-11-17 01:56:55 -07:00
if device . IntroducedBy == introducerCfg . DeviceID {
2018-06-06 14:34:11 -07:00
if ! foldersDevices . hasDevice ( deviceID ) {
if _ , ok := devicesNotIntroduced [ deviceID ] ; ! ok {
// The introducer no longer shares any folder with the
// device, remove the device.
l . Infof ( "Removing device %v as introducer %v no longer shares any folders with that device" , deviceID , device . IntroducedBy )
changed = true
2019-11-21 00:41:41 -07:00
delete ( devices , deviceID )
2016-11-07 09:40:48 -07:00
continue
}
2018-06-06 14:34:11 -07:00
l . Infof ( "Would have removed %v as %v no longer shares any folders, yet there are other folders that are shared with this device that haven't been introduced by this introducer." , deviceID , device . IntroducedBy )
2016-11-12 00:38:29 -07:00
}
}
2016-11-07 09:40:48 -07:00
}
2019-11-21 00:41:41 -07:00
return folders , devices , changed
2016-11-07 09:40:48 -07:00
}
2017-12-07 00:08:24 -07:00
// handleAutoAccepts handles adding and sharing folders for devices that have
// AutoAcceptFolders set to true.
2020-05-26 23:05:26 -07:00
func ( m * model ) handleAutoAccepts ( deviceCfg config . DeviceConfiguration , folder protocol . Folder ) ( config . FolderConfiguration , bool ) {
2018-01-03 00:42:25 -07:00
if cfg , ok := m . cfg . Folder ( folder . ID ) ; ! ok {
2017-12-07 00:08:24 -07:00
defaultPath := m . cfg . Options ( ) . DefaultFolderPath
defaultPathFs := fs . NewFilesystem ( fs . FilesystemTypeBasic , defaultPath )
2019-01-05 10:10:02 -07:00
pathAlternatives := [ ] string {
sanitizePath ( folder . Label ) ,
sanitizePath ( folder . ID ) ,
}
for _ , path := range pathAlternatives {
2017-12-07 00:08:24 -07:00
if _ , err := defaultPathFs . Lstat ( path ) ; ! fs . IsNotExist ( err ) {
continue
}
fcfg := config . NewFolderConfiguration ( m . id , folder . ID , folder . Label , fs . FilesystemTypeBasic , filepath . Join ( defaultPath , path ) )
2018-01-03 00:42:25 -07:00
fcfg . Devices = append ( fcfg . Devices , config . FolderDeviceConfiguration {
DeviceID : deviceCfg . DeviceID ,
} )
2017-12-07 00:08:24 -07:00
l . Infof ( "Auto-accepted %s folder %s at path %s" , deviceCfg . DeviceID , folder . Description ( ) , fcfg . Path )
2020-05-26 23:05:26 -07:00
return fcfg , true
2017-12-07 00:08:24 -07:00
}
l . Infof ( "Failed to auto-accept folder %s from %s due to path conflict" , folder . Description ( ) , deviceCfg . DeviceID )
2020-05-26 23:05:26 -07:00
return config . FolderConfiguration { } , false
2018-01-03 00:42:25 -07:00
} else {
for _ , device := range cfg . DeviceIDs ( ) {
if device == deviceCfg . DeviceID {
// Already shared nothing todo.
2020-05-26 23:05:26 -07:00
return config . FolderConfiguration { } , false
2018-01-03 00:42:25 -07:00
}
}
cfg . Devices = append ( cfg . Devices , config . FolderDeviceConfiguration {
DeviceID : deviceCfg . DeviceID ,
} )
l . Infof ( "Shared %s with %s due to auto-accept" , folder . ID , deviceCfg . DeviceID )
2020-05-26 23:05:26 -07:00
return cfg , true
2017-12-07 00:08:24 -07:00
}
}
2019-11-21 00:41:41 -07:00
func ( m * model ) introduceDevice ( device protocol . Device , introducerCfg config . DeviceConfiguration ) config . DeviceConfiguration {
2016-11-07 09:40:48 -07:00
addresses := [ ] string { "dynamic" }
for _ , addr := range device . Addresses {
if addr != "dynamic" {
addresses = append ( addresses , addr )
}
}
l . Infof ( "Adding device %v to config (vouched for by introducer %v)" , device . ID , introducerCfg . DeviceID )
newDeviceCfg := config . DeviceConfiguration {
DeviceID : device . ID ,
Name : device . Name ,
Compression : introducerCfg . Compression ,
Addresses : addresses ,
CertName : device . CertName ,
IntroducedBy : introducerCfg . DeviceID ,
}
// The introducers' introducers are also our introducers.
if device . Introducer {
l . Infof ( "Device %v is now also an introducer" , device . ID )
newDeviceCfg . Introducer = true
newDeviceCfg . SkipIntroductionRemovals = device . SkipIntroductionRemovals
2014-09-23 07:04:20 -07:00
}
2016-11-07 09:40:48 -07:00
2019-11-21 00:41:41 -07:00
return newDeviceCfg
2016-11-07 09:40:48 -07:00
}
2016-08-10 02:37:32 -07:00
// Closed is called when a connection has been closed
2019-02-26 01:09:25 -07:00
func ( m * model ) Closed ( conn protocol . Connection , err error ) {
2016-08-10 02:37:32 -07:00
device := conn . ID ( )
2014-02-09 15:13:06 -07:00
2014-07-15 04:04:37 -07:00
m . pmut . Lock ( )
2015-06-28 08:05:29 -07:00
conn , ok := m . conn [ device ]
2019-05-02 05:24:55 -07:00
if ! ok {
2019-08-16 00:35:19 -07:00
m . pmut . Unlock ( )
2019-05-02 05:24:55 -07:00
return
2013-12-30 19:21:57 -07:00
}
2015-06-28 08:05:29 -07:00
delete ( m . conn , device )
2018-11-13 00:53:55 -07:00
delete ( m . connRequestLimiters , device )
2016-03-25 13:29:07 -07:00
delete ( m . helloMessages , device )
2016-04-15 03:59:41 -07:00
delete ( m . deviceDownloads , device )
2016-12-21 11:41:25 -07:00
delete ( m . remotePausedFolders , device )
2016-08-10 02:37:32 -07:00
closed := m . closed [ device ]
delete ( m . closed , device )
2019-08-16 00:35:19 -07:00
m . pmut . Unlock ( )
m . progressEmitter . temporaryIndexUnsubscribe ( conn )
2016-08-10 02:37:32 -07:00
2018-01-12 04:27:55 -07:00
l . Infof ( "Connection to %s at %s closed: %v" , device , conn . Name ( ) , err )
2019-08-15 07:29:37 -07:00
m . evLogger . Log ( events . DeviceDisconnected , map [ string ] string {
2016-08-10 02:37:32 -07:00
"id" : device . String ( ) ,
"error" : err . Error ( ) ,
} )
close ( closed )
}
2019-07-14 02:03:55 -07:00
// closeConns will close the underlying connection for given devices and return
// a waiter that will return once all the connections are finished closing.
func ( m * model ) closeConns ( devs [ ] protocol . DeviceID , err error ) config . Waiter {
2019-04-28 03:58:51 -07:00
conns := make ( [ ] connections . Connection , 0 , len ( devs ) )
2019-07-14 02:03:55 -07:00
closed := make ( [ ] chan struct { } , 0 , len ( devs ) )
2019-09-11 21:55:24 -07:00
m . pmut . RLock ( )
2019-04-28 03:58:51 -07:00
for _ , dev := range devs {
if conn , ok := m . conn [ dev ] ; ok {
conns = append ( conns , conn )
2019-07-14 02:03:55 -07:00
closed = append ( closed , m . closed [ dev ] )
2019-04-28 03:58:51 -07:00
}
}
2019-09-11 21:55:24 -07:00
m . pmut . RUnlock ( )
2019-04-28 03:58:51 -07:00
for _ , conn := range conns {
conn . Close ( err )
2016-12-21 11:41:25 -07:00
}
2019-07-14 02:03:55 -07:00
return & channelWaiter { chans : closed }
2019-04-28 03:58:51 -07:00
}
2016-12-21 11:41:25 -07:00
2019-07-14 02:03:55 -07:00
// closeConn closes the underlying connection for the given device and returns
// a waiter that will return once the connection is finished closing.
func ( m * model ) closeConn ( dev protocol . DeviceID , err error ) config . Waiter {
return m . closeConns ( [ ] protocol . DeviceID { dev } , err )
}
type channelWaiter struct {
chans [ ] chan struct { }
}
func ( w * channelWaiter ) Wait ( ) {
for _ , c := range w . chans {
<- c
}
2016-12-21 11:41:25 -07:00
}
2018-11-13 00:53:55 -07:00
// Implements protocol.RequestResponse
type requestResponse struct {
data [ ] byte
closed chan struct { }
once stdsync . Once
}
func newRequestResponse ( size int ) * requestResponse {
return & requestResponse {
data : protocol . BufferPool . Get ( size ) ,
closed : make ( chan struct { } ) ,
2015-01-17 18:12:06 -07:00
}
2018-11-13 00:53:55 -07:00
}
2015-01-17 18:12:06 -07:00
2018-11-13 00:53:55 -07:00
func ( r * requestResponse ) Data ( ) [ ] byte {
return r . data
}
func ( r * requestResponse ) Close ( ) {
r . once . Do ( func ( ) {
protocol . BufferPool . Put ( r . data )
close ( r . closed )
} )
}
func ( r * requestResponse ) Wait ( ) {
<- r . closed
}
// Request returns the specified data segment by reading it from local disk.
// Implements the protocol.Model interface.
2019-02-26 01:09:25 -07:00
func ( m * model ) Request ( deviceID protocol . DeviceID , folder , name string , size int32 , offset int64 , hash [ ] byte , weakHash uint32 , fromTemporary bool ) ( out protocol . RequestResponse , err error ) {
2018-11-13 00:53:55 -07:00
if size < 0 || offset < 0 {
return nil , protocol . ErrInvalid
2018-04-09 12:55:52 -07:00
}
2018-08-15 07:33:03 -07:00
m . fmut . RLock ( )
folderCfg , ok := m . folderCfgs [ folder ]
2018-04-09 12:55:52 -07:00
folderIgnores := m . folderIgnores [ folder ]
m . fmut . RUnlock ( )
2018-08-15 07:33:03 -07:00
if ! ok {
// The folder might be already unpaused in the config, but not yet
// in the model.
l . Debugf ( "Request from %s for file %s in unstarted folder %q" , deviceID , name , folder )
2019-04-10 02:47:24 -07:00
return nil , protocol . ErrGeneric
2018-11-13 00:53:55 -07:00
}
if ! folderCfg . SharedWith ( deviceID ) {
l . Warnf ( "Request from %s for file %s in unshared folder %q" , deviceID , name , folder )
2019-04-10 02:47:24 -07:00
return nil , protocol . ErrGeneric
2018-11-13 00:53:55 -07:00
}
if folderCfg . Paused {
l . Debugf ( "Request from %s for file %s in paused folder %q" , deviceID , name , folder )
2019-04-10 02:47:24 -07:00
return nil , protocol . ErrGeneric
2018-08-15 07:33:03 -07:00
}
2018-04-09 12:55:52 -07:00
2018-06-06 14:34:11 -07:00
// Make sure the path is valid and in canonical form
if name , err = fs . Canonicalize ( name ) ; err != nil {
l . Debugf ( "Request from %s in folder %q for invalid filename %s" , deviceID , folder , name )
2019-04-10 02:47:24 -07:00
return nil , protocol . ErrGeneric
2018-04-09 12:55:52 -07:00
}
2015-10-03 08:25:21 -07:00
if deviceID != protocol . LocalDeviceID {
2018-11-13 00:53:55 -07:00
l . Debugf ( "%v REQ(in): %s: %q / %q o=%d s=%d t=%v" , m , deviceID , folder , name , offset , size , fromTemporary )
2013-12-15 03:43:31 -07:00
}
2014-11-08 21:26:52 -07:00
2017-09-01 22:52:38 -07:00
if fs . IsInternal ( name ) {
2018-11-13 00:53:55 -07:00
l . Debugf ( "%v REQ(in) for internal file: %s: %q / %q o=%d s=%d" , m , deviceID , folder , name , offset , size )
2019-04-10 02:47:24 -07:00
return nil , protocol . ErrInvalid
2016-12-01 07:00:11 -07:00
}
if folderIgnores . Match ( name ) . IsIgnored ( ) {
2018-11-13 00:53:55 -07:00
l . Debugf ( "%v REQ(in) for ignored file: %s: %q / %q o=%d s=%d" , m , deviceID , folder , name , offset , size )
2019-04-10 02:47:24 -07:00
return nil , protocol . ErrInvalid
2015-10-13 06:59:31 -07:00
}
2018-11-13 00:53:55 -07:00
folderFs := folderCfg . Filesystem ( )
2017-08-19 07:36:56 -07:00
if err := osutil . TraversesSymlink ( folderFs , filepath . Dir ( name ) ) ; err != nil {
2018-11-13 00:53:55 -07:00
l . Debugf ( "%v REQ(in) traversal check: %s - %s: %q / %q o=%d s=%d" , m , err , deviceID , folder , name , offset , size )
return nil , protocol . ErrNoSuchFile
}
// Restrict parallel requests by connection/device
m . pmut . RLock ( )
limiter := m . connRequestLimiters [ deviceID ]
m . pmut . RUnlock ( )
2020-02-01 00:02:18 -07:00
// The requestResponse releases the bytes to the buffer pool and the
// limiters when its Close method is called.
res := newLimitedRequestResponse ( int ( size ) , limiter , m . globalRequestLimiter )
2018-11-13 00:53:55 -07:00
defer func ( ) {
// Close it ourselves if it isn't returned due to an error
if err != nil {
res . Close ( )
}
} ( )
2016-04-15 03:59:41 -07:00
// Only check temp files if the flag is set, and if we are set to advertise
// the temp indexes.
2016-07-04 03:40:29 -07:00
if fromTemporary && ! folderCfg . DisableTempIndexes {
2017-09-01 22:52:38 -07:00
tempFn := fs . TempName ( name )
2016-12-13 03:24:10 -07:00
2017-08-19 07:36:56 -07:00
if info , err := folderFs . Lstat ( tempFn ) ; err != nil || ! info . IsRegular ( ) {
2016-12-13 03:24:10 -07:00
// Reject reads for anything that doesn't exist or is something
// other than a regular file.
2018-11-13 00:53:55 -07:00
l . Debugf ( "%v REQ(in) failed stating temp file (%v): %s: %q / %q o=%d s=%d" , m , err , deviceID , folder , name , offset , size )
return nil , protocol . ErrNoSuchFile
2016-12-13 03:24:10 -07:00
}
2018-11-13 00:53:55 -07:00
err := readOffsetIntoBuf ( folderFs , tempFn , offset , res . data )
if err == nil && scanner . Validate ( res . data , hash , weakHash ) {
return res , nil
2016-04-15 03:59:41 -07:00
}
// Fall through to reading from a non-temp file, just incase the temp
// file has finished downloading.
2013-12-15 03:43:31 -07:00
}
2017-08-19 07:36:56 -07:00
if info , err := folderFs . Lstat ( name ) ; err != nil || ! info . IsRegular ( ) {
2016-12-13 03:24:10 -07:00
// Reject reads for anything that doesn't exist or is something
// other than a regular file.
2018-11-13 00:53:55 -07:00
l . Debugf ( "%v REQ(in) failed stating file (%v): %s: %q / %q o=%d s=%d" , m , err , deviceID , folder , name , offset , size )
return nil , protocol . ErrNoSuchFile
2016-12-13 03:24:10 -07:00
}
2018-11-13 00:53:55 -07:00
if err := readOffsetIntoBuf ( folderFs , name , offset , res . data ) ; fs . IsNotExist ( err ) {
l . Debugf ( "%v REQ(in) file doesn't exist: %s: %q / %q o=%d s=%d" , m , deviceID , folder , name , offset , size )
return nil , protocol . ErrNoSuchFile
2016-04-15 03:59:41 -07:00
} else if err != nil {
2018-11-13 00:53:55 -07:00
l . Debugf ( "%v REQ(in) failed reading file (%v): %s: %q / %q o=%d s=%d" , m , err , deviceID , folder , name , offset , size )
return nil , protocol . ErrGeneric
2013-12-15 03:43:31 -07:00
}
2018-04-09 12:55:52 -07:00
2018-11-13 00:53:55 -07:00
if ! scanner . Validate ( res . data , hash , weakHash ) {
2020-06-26 07:47:03 -07:00
m . recheckFile ( deviceID , folder , name , offset , hash , weakHash )
2020-06-25 05:47:35 -07:00
l . Debugf ( "%v REQ(in) failed validating data: %s: %q / %q o=%d s=%d" , m , deviceID , folder , name , offset , size )
2018-11-13 00:53:55 -07:00
return nil , protocol . ErrNoSuchFile
2018-05-05 01:24:44 -07:00
}
2018-11-13 00:53:55 -07:00
return res , nil
2013-12-15 03:43:31 -07:00
}
2020-02-01 00:02:18 -07:00
// newLimitedRequestResponse takes size bytes from the limiters in order,
// skipping nil limiters, then returns a requestResponse of the given size.
// When the requestResponse is closed the limiters are given back the bytes,
// in reverse order.
func newLimitedRequestResponse ( size int , limiters ... * byteSemaphore ) * requestResponse {
for _ , limiter := range limiters {
if limiter != nil {
limiter . take ( size )
}
}
res := newRequestResponse ( size )
go func ( ) {
res . Wait ( )
for i := range limiters {
limiter := limiters [ len ( limiters ) - 1 - i ]
if limiter != nil {
limiter . give ( size )
}
}
} ( )
return res
}
2020-06-26 07:47:03 -07:00
func ( m * model ) recheckFile ( deviceID protocol . DeviceID , folder , name string , offset int64 , hash [ ] byte , weakHash uint32 ) {
2018-05-05 01:24:44 -07:00
cf , ok := m . CurrentFolderFile ( folder , name )
if ! ok {
l . Debugf ( "%v recheckFile: %s: %q / %q: no current file" , m , deviceID , folder , name )
return
}
if cf . IsDeleted ( ) || cf . IsInvalid ( ) || cf . IsSymlink ( ) || cf . IsDirectory ( ) {
l . Debugf ( "%v recheckFile: %s: %q / %q: not a regular file" , m , deviceID , folder , name )
return
}
2019-07-12 07:37:12 -07:00
blockIndex := int ( offset / int64 ( cf . BlockSize ( ) ) )
2018-06-13 10:07:52 -07:00
if blockIndex >= len ( cf . Blocks ) {
2018-05-05 01:24:44 -07:00
l . Debugf ( "%v recheckFile: %s: %q / %q i=%d: block index too far" , m , deviceID , folder , name , blockIndex )
return
}
block := cf . Blocks [ blockIndex ]
// Seems to want a different version of the file, whatever.
if ! bytes . Equal ( block . Hash , hash ) {
l . Debugf ( "%v recheckFile: %s: %q / %q i=%d: hash mismatch %x != %x" , m , deviceID , folder , name , blockIndex , block . Hash , hash )
return
}
2020-06-26 07:47:03 -07:00
if weakHash != 0 && block . WeakHash != weakHash {
l . Debugf ( "%v recheckFile: %s: %q / %q i=%d: weak hash mismatch %v != %v" , m , deviceID , folder , name , blockIndex , block . WeakHash , weakHash )
return
}
2018-05-05 01:24:44 -07:00
// The hashes provided part of the request match what we expect to find according
// to what we have in the database, yet the content we've read off the filesystem doesn't
// Something is fishy, invalidate the file and rescan it.
// The file will temporarily become invalid, which is ok as the content is messed up.
2019-08-11 07:10:30 -07:00
m . fmut . RLock ( )
2019-04-07 04:29:17 -07:00
runner , ok := m . folderRunners [ folder ]
2019-08-11 07:10:30 -07:00
m . fmut . RUnlock ( )
2019-04-07 04:29:17 -07:00
if ! ok {
l . Debugf ( "%v recheckFile: %s: %q / %q: Folder stopped before rescan could be scheduled" , m , deviceID , folder , name )
return
}
2020-05-01 02:08:59 -07:00
runner . ScheduleForceRescan ( name )
2019-04-07 04:29:17 -07:00
l . Debugf ( "%v recheckFile: %s: %q / %q" , m , deviceID , folder , name )
2018-05-05 01:24:44 -07:00
}
2019-02-26 01:09:25 -07:00
func ( m * model ) CurrentFolderFile ( folder string , file string ) ( protocol . FileInfo , bool ) {
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2015-04-18 06:41:47 -07:00
fs , ok := m . folderFiles [ folder ]
2014-09-28 04:39:39 -07:00
m . fmut . RUnlock ( )
2015-04-18 06:41:47 -07:00
if ! ok {
return protocol . FileInfo { } , false
}
2020-01-21 10:23:08 -07:00
snap := fs . Snapshot ( )
defer snap . Release ( )
return snap . Get ( protocol . LocalDeviceID , file )
2014-04-01 14:18:32 -07:00
}
2019-02-26 01:09:25 -07:00
func ( m * model ) CurrentGlobalFile ( folder string , file string ) ( protocol . FileInfo , bool ) {
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2015-04-18 06:41:47 -07:00
fs , ok := m . folderFiles [ folder ]
2014-09-28 04:39:39 -07:00
m . fmut . RUnlock ( )
2015-04-18 06:41:47 -07:00
if ! ok {
return protocol . FileInfo { } , false
}
2020-01-21 10:23:08 -07:00
snap := fs . Snapshot ( )
defer snap . Release ( )
return snap . GetGlobal ( file )
2014-04-01 14:18:32 -07:00
}
2018-02-24 00:51:29 -07:00
// Connection returns the current connection for device, and a boolean whether a connection was found.
2019-02-26 01:09:25 -07:00
func ( m * model ) Connection ( deviceID protocol . DeviceID ) ( connections . Connection , bool ) {
2014-01-17 20:06:44 -07:00
m . pmut . RLock ( )
2017-11-21 00:25:38 -07:00
cn , ok := m . conn [ deviceID ]
2014-09-20 10:14:45 -07:00
m . pmut . RUnlock ( )
2014-09-10 02:29:01 -07:00
if ok {
2014-09-28 04:00:38 -07:00
m . deviceWasSeen ( deviceID )
2014-09-10 02:29:01 -07:00
}
2017-11-21 00:25:38 -07:00
return cn , ok
2014-01-06 03:11:18 -07:00
}
2019-02-26 01:09:25 -07:00
func ( m * model ) GetIgnores ( folder string ) ( [ ] string , [ ] string , error ) {
2014-11-03 14:02:55 -07:00
m . fmut . RLock ( )
2019-05-02 10:55:39 -07:00
cfg , cfgOk := m . folderCfgs [ folder ]
ignores , ignoresOk := m . folderIgnores [ folder ]
m . fmut . RUnlock ( )
2017-08-12 10:10:43 -07:00
2019-05-02 10:55:39 -07:00
if ! cfgOk {
cfg , cfgOk = m . cfg . Folders ( ) [ folder ]
if ! cfgOk {
2020-03-03 14:40:00 -07:00
return nil , nil , fmt . Errorf ( "folder %s does not exist" , folder )
2017-04-01 02:58:06 -07:00
}
2017-08-21 23:48:25 -07:00
}
2014-09-14 15:03:53 -07:00
2018-03-17 17:42:31 -07:00
// On creation a new folder with ignore patterns validly has no marker yet.
if err := cfg . CheckPath ( ) ; err != nil && err != config . ErrMarkerMissing {
2017-08-21 23:48:25 -07:00
return nil , nil , err
}
2015-12-30 14:30:47 -07:00
2019-05-02 10:55:39 -07:00
if ! ignoresOk {
2018-05-08 14:37:13 -07:00
ignores = ignore . New ( fs . NewFilesystem ( cfg . FilesystemType , cfg . Path ) )
2014-09-14 15:03:53 -07:00
}
2020-06-18 02:04:00 -07:00
err := ignores . Load ( ".stignore" )
if fs . IsNotExist ( err ) {
// Having no ignores is not an error.
return nil , nil , nil
2014-09-14 15:03:53 -07:00
}
2020-06-18 02:04:00 -07:00
// Return lines and patterns, which may have some meaning even when err
// != nil, depending on the specific error.
return ignores . Lines ( ) , ignores . Patterns ( ) , err
2014-09-14 15:03:53 -07:00
}
2019-02-26 01:09:25 -07:00
func ( m * model ) SetIgnores ( folder string , content [ ] string ) error {
2017-04-01 02:58:06 -07:00
cfg , ok := m . cfg . Folders ( ) [ folder ]
2014-09-14 15:03:53 -07:00
if ! ok {
2018-05-05 01:30:39 -07:00
return fmt . Errorf ( "folder %s does not exist" , cfg . Description ( ) )
}
err := cfg . CheckPath ( )
if err == config . ErrPathMissing {
if err = cfg . CreateRoot ( ) ; err != nil {
2019-11-23 08:20:54 -07:00
return errors . Wrap ( err , "failed to create folder root" )
2018-05-05 01:30:39 -07:00
}
err = cfg . CheckPath ( )
}
if err != nil && err != config . ErrMarkerMissing {
return err
2014-09-14 15:03:53 -07:00
}
2017-08-19 07:36:56 -07:00
if err := ignore . WriteIgnores ( cfg . Filesystem ( ) , ".stignore" , content ) ; err != nil {
2014-09-14 15:03:53 -07:00
l . Warnln ( "Saving .stignore:" , err )
return err
}
2017-04-01 02:58:06 -07:00
m . fmut . RLock ( )
runner , ok := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
if ok {
return runner . Scan ( nil )
2014-09-14 15:03:53 -07:00
}
2017-04-01 02:58:06 -07:00
return nil
2014-09-14 15:03:53 -07:00
}
2016-03-25 13:29:07 -07:00
// OnHello is called when an device connects to us.
// This allows us to extract some information from the Hello message
// and add it to a list of known devices ahead of any checks.
2019-02-26 01:09:25 -07:00
func ( m * model ) OnHello ( remoteID protocol . DeviceID , addr net . Addr , hello protocol . HelloResult ) error {
2016-08-05 02:29:49 -07:00
if m . cfg . IgnoredDevice ( remoteID ) {
return errDeviceIgnored
}
2017-04-01 02:52:31 -07:00
cfg , ok := m . cfg . Device ( remoteID )
if ! ok {
2018-08-25 03:36:10 -07:00
m . cfg . AddOrUpdatePendingDevice ( remoteID , hello . DeviceName , addr . String ( ) )
2019-07-27 03:05:00 -07:00
_ = m . cfg . Save ( ) // best effort
2019-08-15 07:29:37 -07:00
m . evLogger . Log ( events . DeviceRejected , map [ string ] string {
2017-04-01 02:52:31 -07:00
"name" : hello . DeviceName ,
"device" : remoteID . String ( ) ,
"address" : addr . String ( ) ,
} )
return errDeviceUnknown
2016-03-25 13:29:07 -07:00
}
2016-08-05 02:29:49 -07:00
2017-04-01 02:52:31 -07:00
if cfg . Paused {
return errDevicePaused
}
2016-08-05 02:29:49 -07:00
2017-04-01 02:52:31 -07:00
if len ( cfg . AllowedNetworks ) > 0 {
if ! connections . IsAllowedNetwork ( addr . String ( ) , cfg . AllowedNetworks ) {
return errNetworkNotAllowed
}
}
return nil
2016-03-25 13:29:07 -07:00
}
// GetHello is called when we are about to connect to some remote device.
2019-02-26 01:09:25 -07:00
func ( m * model ) GetHello ( id protocol . DeviceID ) protocol . HelloIntf {
2017-05-22 12:58:33 -07:00
name := ""
if _ , ok := m . cfg . Device ( id ) ; ok {
name = m . cfg . MyName ( )
}
2016-07-04 03:40:29 -07:00
return & protocol . Hello {
2017-05-22 12:58:33 -07:00
DeviceName : name ,
2016-03-25 13:29:07 -07:00
ClientName : m . clientName ,
ClientVersion : m . clientVersion ,
}
}
2014-01-06 03:11:18 -07:00
// AddConnection adds a new peer connection to the model. An initial index will
// be sent to the connected peer, thereafter index updates whenever the local
2014-09-28 04:00:38 -07:00
// folder changes.
2019-02-26 01:09:25 -07:00
func ( m * model ) AddConnection ( conn connections . Connection , hello protocol . HelloResult ) {
2015-06-28 08:05:29 -07:00
deviceID := conn . ID ( )
2018-11-13 00:53:55 -07:00
device , ok := m . cfg . Device ( deviceID )
if ! ok {
l . Infoln ( "Trying to add connection to unknown device" )
return
}
2014-07-15 04:04:37 -07:00
2014-01-17 20:06:44 -07:00
m . pmut . Lock ( )
2016-08-10 02:37:32 -07:00
if oldConn , ok := m . conn [ deviceID ] ; ok {
l . Infoln ( "Replacing old connection" , oldConn , "with" , conn , "for" , deviceID )
// There is an existing connection to this device that we are
// replacing. We must close the existing connection and wait for the
// close to complete before adding the new connection. We do the
// actual close without holding pmut as the connection will call
// back into Closed() for the cleanup.
closed := m . closed [ deviceID ]
m . pmut . Unlock ( )
2019-01-09 09:31:09 -07:00
oldConn . Close ( errReplacingConnection )
2016-08-10 02:37:32 -07:00
<- closed
m . pmut . Lock ( )
2014-03-23 00:45:05 -07:00
}
2016-08-10 02:37:32 -07:00
2015-06-28 08:05:29 -07:00
m . conn [ deviceID ] = conn
2016-08-10 02:37:32 -07:00
m . closed [ deviceID ] = make ( chan struct { } )
2016-04-15 03:59:41 -07:00
m . deviceDownloads [ deviceID ] = newDeviceDownloadState ( )
2018-11-13 00:53:55 -07:00
// 0: default, <0: no limiting
switch {
case device . MaxRequestKiB > 0 :
m . connRequestLimiters [ deviceID ] = newByteSemaphore ( 1024 * device . MaxRequestKiB )
case device . MaxRequestKiB == 0 :
m . connRequestLimiters [ deviceID ] = newByteSemaphore ( 1024 * defaultPullerPendingKiB )
}
2014-01-06 03:11:18 -07:00
2016-03-25 13:29:07 -07:00
m . helloMessages [ deviceID ] = hello
event := map [ string ] string {
"id" : deviceID . String ( ) ,
"deviceName" : hello . DeviceName ,
"clientName" : hello . ClientName ,
"clientVersion" : hello . ClientVersion ,
2016-11-30 00:54:20 -07:00
"type" : conn . Type ( ) ,
2016-03-25 13:29:07 -07:00
}
addr := conn . RemoteAddr ( )
if addr != nil {
event [ "addr" ] = addr . String ( )
}
2019-08-15 07:29:37 -07:00
m . evLogger . Log ( events . DeviceConnected , event )
2016-03-25 13:29:07 -07:00
2018-01-12 04:27:55 -07:00
l . Infof ( ` Device %s client is "%s %s" named "%s" at %s ` , deviceID , hello . ClientName , hello . ClientVersion , hello . DeviceName , conn )
2016-03-25 13:29:07 -07:00
2015-06-28 08:05:29 -07:00
conn . Start ( )
2016-12-21 05:22:18 -07:00
m . pmut . Unlock ( )
2015-07-09 23:37:57 -07:00
2016-12-21 05:22:18 -07:00
// Acquires fmut, so has to be done outside of pmut.
2015-11-17 04:08:53 -07:00
cm := m . generateClusterConfig ( deviceID )
2015-06-28 08:05:29 -07:00
conn . ClusterConfig ( cm )
2014-09-20 10:14:45 -07:00
2018-11-13 00:53:55 -07:00
if ( device . Name == "" || m . cfg . Options ( ) . OverwriteRemoteDevNames ) && hello . DeviceName != "" {
2016-04-18 13:25:31 -07:00
device . Name = hello . DeviceName
2019-02-02 04:16:27 -07:00
m . cfg . SetDevice ( device )
m . cfg . Save ( )
2016-04-18 13:25:31 -07:00
}
2014-09-28 04:00:38 -07:00
m . deviceWasSeen ( deviceID )
2014-09-20 10:14:45 -07:00
}
2019-12-04 02:46:55 -07:00
func ( m * model ) DownloadProgress ( device protocol . DeviceID , folder string , updates [ ] protocol . FileDownloadProgressUpdate ) error {
2016-04-15 03:59:41 -07:00
m . fmut . RLock ( )
cfg , ok := m . folderCfgs [ folder ]
m . fmut . RUnlock ( )
2018-08-21 10:49:35 -07:00
if ! ok || cfg . DisableTempIndexes || ! cfg . SharedWith ( device ) {
2019-12-04 02:46:55 -07:00
return nil
2016-04-15 03:59:41 -07:00
}
m . pmut . RLock ( )
2019-08-13 00:04:43 -07:00
downloads := m . deviceDownloads [ device ]
2016-04-15 03:59:41 -07:00
m . pmut . RUnlock ( )
2019-08-13 00:04:43 -07:00
downloads . Update ( folder , updates )
state := downloads . GetBlockCounts ( folder )
2016-05-22 00:52:08 -07:00
2019-08-15 07:29:37 -07:00
m . evLogger . Log ( events . RemoteDownloadProgress , map [ string ] interface { } {
2016-05-22 00:52:08 -07:00
"device" : device . String ( ) ,
"folder" : folder ,
2016-05-25 23:53:27 -07:00
"state" : state ,
2016-05-22 00:52:08 -07:00
} )
2019-12-04 02:46:55 -07:00
return nil
2016-04-15 03:59:41 -07:00
}
2019-02-26 01:09:25 -07:00
func ( m * model ) deviceWasSeen ( deviceID protocol . DeviceID ) {
2019-09-11 21:55:24 -07:00
m . fmut . RLock ( )
sr , ok := m . deviceStatRefs [ deviceID ]
m . fmut . RUnlock ( )
if ok {
sr . WasSeen ( )
}
2014-07-15 04:04:37 -07:00
}
2019-06-10 04:27:22 -07:00
type indexSender struct {
2019-07-09 02:40:30 -07:00
suture . Service
2019-06-10 04:27:22 -07:00
conn protocol . Connection
folder string
dev string
fset * db . FileSet
prevSequence int64
2019-08-15 07:29:37 -07:00
evLogger events . Logger
2019-06-10 04:27:22 -07:00
connClosed chan struct { }
}
2019-11-21 00:41:15 -07:00
func ( s * indexSender ) serve ( ctx context . Context ) {
2014-07-30 11:08:04 -07:00
var err error
2014-07-15 04:04:37 -07:00
2019-06-10 04:27:22 -07:00
l . Debugf ( "Starting indexSender for %s to %s at %s (slv=%d)" , s . folder , s . dev , s . conn , s . prevSequence )
defer l . Debugf ( "Exiting indexSender for %s to %s at %s: %v" , s . folder , s . dev , s . conn , err )
2014-05-04 08:18:58 -07:00
2018-05-01 14:39:15 -07:00
// We need to send one index, regardless of whether there is something to send or not
2019-11-25 03:07:36 -07:00
err = s . sendIndexTo ( ctx )
2014-07-30 11:08:04 -07:00
2016-01-11 08:49:44 -07:00
// Subscribe to LocalIndexUpdated (we have new information to send) and
// DeviceDisconnected (it might be us who disconnected, so we should
// exit).
2019-08-15 07:29:37 -07:00
sub := s . evLogger . Subscribe ( events . LocalIndexUpdated | events . DeviceDisconnected )
defer sub . Unsubscribe ( )
2015-07-28 10:22:44 -07:00
2019-06-10 04:27:22 -07:00
evChan := sub . C ( )
ticker := time . NewTicker ( time . Minute )
defer ticker . Stop ( )
2014-07-15 04:04:37 -07:00
for err == nil {
2019-06-10 04:27:22 -07:00
select {
2019-11-21 00:41:15 -07:00
case <- ctx . Done ( ) :
2016-01-11 08:49:44 -07:00
return
2019-06-10 04:27:22 -07:00
case <- s . connClosed :
return
default :
2016-01-11 08:49:44 -07:00
}
2016-07-29 12:54:24 -07:00
// While we have sent a sequence at least equal to the one
2015-07-28 10:22:44 -07:00
// currently in the database, wait for the local index to update. The
// local index may update for other folders than the one we are
// sending for.
2019-06-10 04:27:22 -07:00
if s . fset . Sequence ( protocol . LocalDeviceID ) <= s . prevSequence {
select {
2019-11-21 00:41:15 -07:00
case <- ctx . Done ( ) :
2019-06-10 04:27:22 -07:00
return
case <- s . connClosed :
return
case <- evChan :
case <- ticker . C :
}
2014-07-30 11:08:04 -07:00
continue
2014-07-15 04:04:37 -07:00
}
2019-11-25 03:07:36 -07:00
err = s . sendIndexTo ( ctx )
2015-07-28 10:22:44 -07:00
// Wait a short amount of time before entering the next loop. If there
2015-11-11 19:20:34 -07:00
// are continuous changes happening to the local index, this gives us
2015-07-28 10:22:44 -07:00
// time to batch them up a little.
time . Sleep ( 250 * time . Millisecond )
2014-07-30 11:08:04 -07:00
}
}
2014-07-15 04:04:37 -07:00
2019-06-10 04:27:22 -07:00
// Complete implements the suture.IsCompletable interface. When Serve terminates
// before Stop is called, the supervisor will check for this method and if it
// returns true removes the service instead of restarting it. Here it always
// returns true, as indexSender only terminates when a connection is
// closed/has failed, in which case retrying doesn't help.
func ( s * indexSender ) Complete ( ) bool { return true }
2018-05-01 14:39:15 -07:00
// sendIndexTo sends file infos with a sequence number higher than prevSequence and
// returns the highest sent sequence number.
2019-11-25 03:07:36 -07:00
func ( s * indexSender ) sendIndexTo ( ctx context . Context ) error {
2019-06-10 04:27:22 -07:00
initial := s . prevSequence == 0
2018-08-25 01:32:35 -07:00
batch := newFileInfoBatch ( nil )
batch . flushFn = func ( fs [ ] protocol . FileInfo ) error {
2019-11-21 00:41:15 -07:00
l . Debugf ( "%v: Sending %d files (<%d bytes)" , s , len ( batch . infos ) , batch . size )
2018-08-25 01:32:35 -07:00
if initial {
initial = false
2019-11-25 03:07:36 -07:00
return s . conn . Index ( ctx , s . folder , fs )
2018-08-25 01:32:35 -07:00
}
2019-11-25 03:07:36 -07:00
return s . conn . IndexUpdate ( ctx , s . folder , fs )
2018-01-12 04:27:55 -07:00
}
2014-07-15 04:04:37 -07:00
2018-08-25 01:32:35 -07:00
var err error
var f protocol . FileInfo
2020-01-21 10:23:08 -07:00
snap := s . fset . Snapshot ( )
defer snap . Release ( )
2020-05-11 11:15:11 -07:00
previousWasDelete := false
2020-05-30 00:50:23 -07:00
snap . WithHaveSequence ( s . prevSequence + 1 , func ( fi protocol . FileIntf ) bool {
2020-05-11 11:15:11 -07:00
// This is to make sure that renames (which is an add followed by a delete) land in the same batch.
// Even if the batch is full, we allow a last delete to slip in, we do this by making sure that
// the batch ends with a non-delete, or that the last item in the batch is already a delete
if batch . full ( ) && ( ! fi . IsDeleted ( ) || previousWasDelete ) {
if err = batch . flush ( ) ; err != nil {
return false
}
2014-07-15 04:04:37 -07:00
}
2018-09-02 12:05:53 -07:00
if shouldDebug ( ) {
2019-06-10 04:27:22 -07:00
if fi . SequenceNo ( ) < s . prevSequence + 1 {
panic ( fmt . Sprintln ( "sequence lower than requested, got:" , fi . SequenceNo ( ) , ", asked to start at:" , s . prevSequence + 1 ) )
2018-09-02 12:05:53 -07:00
}
2020-03-18 09:34:46 -07:00
}
if f . Sequence > 0 && fi . SequenceNo ( ) <= f . Sequence {
l . Warnln ( "Non-increasing sequence detected: Checking and repairing the db..." )
// Abort this round of index sending - the next one will pick
// up from the last successful one with the repeaired db.
defer func ( ) {
if fixed , dbErr := s . fset . RepairSequence ( ) ; dbErr != nil {
l . Warnln ( "Failed repairing sequence entries:" , dbErr )
panic ( "Failed repairing sequence entries" )
} else {
2020-04-16 06:42:45 -07:00
l . Infof ( "Repaired %v sequence entries in database" , fixed )
2020-03-18 09:34:46 -07:00
}
} ( )
return false
2014-07-15 04:04:37 -07:00
}
2018-05-01 14:39:15 -07:00
f = fi . ( protocol . FileInfo )
2018-06-24 00:50:18 -07:00
// Mark the file as invalid if any of the local bad stuff flags are set.
f . RawInvalid = f . IsInvalid ( )
2018-07-12 01:15:57 -07:00
// If the file is marked LocalReceive (i.e., changed locally on a
// receive only folder) we do not want it to ever become the
// globally best version, invalid or not.
if f . IsReceiveOnlyChanged ( ) {
f . Version = protocol . Vector { }
}
2020-05-13 05:28:42 -07:00
// never sent externally
f . LocalFlags = 0
f . VersionHash = nil
2018-06-24 00:50:18 -07:00
2020-05-11 11:15:11 -07:00
previousWasDelete = f . IsDeleted ( )
2018-08-25 01:32:35 -07:00
batch . append ( f )
2014-07-30 11:08:04 -07:00
return true
} )
2018-05-01 14:39:15 -07:00
if err != nil {
2019-06-10 04:27:22 -07:00
return err
2018-05-01 14:39:15 -07:00
}
2018-08-25 01:32:35 -07:00
err = batch . flush ( )
2014-07-30 11:08:04 -07:00
2018-05-01 14:39:15 -07:00
// True if there was nothing to be sent
if f . Sequence == 0 {
2019-06-10 04:27:22 -07:00
return err
2018-05-01 14:39:15 -07:00
}
2019-06-10 04:27:22 -07:00
s . prevSequence = f . Sequence
return err
2014-01-06 03:11:18 -07:00
}
2019-11-21 00:41:15 -07:00
func ( s * indexSender ) String ( ) string {
return fmt . Sprintf ( "indexSender@%p for %s to %s at %s" , s , s . folder , s . dev , s . conn )
}
2019-11-19 01:56:53 -07:00
func ( m * model ) requestGlobal ( ctx context . Context , deviceID protocol . DeviceID , folder , name string , offset int64 , size int , hash [ ] byte , weakHash uint32 , fromTemporary bool ) ( [ ] byte , error ) {
2014-01-17 20:06:44 -07:00
m . pmut . RLock ( )
2015-06-28 08:05:29 -07:00
nc , ok := m . conn [ deviceID ]
2014-01-17 20:06:44 -07:00
m . pmut . RUnlock ( )
2014-01-06 03:11:18 -07:00
if ! ok {
2014-09-28 04:00:38 -07:00
return nil , fmt . Errorf ( "requestGlobal: no such device: %s" , deviceID )
2014-01-06 03:11:18 -07:00
}
2018-05-05 01:24:44 -07:00
l . Debugf ( "%v REQ(out): %s: %q / %q o=%d s=%d h=%x wh=%x ft=%t" , m , deviceID , folder , name , offset , size , hash , weakHash , fromTemporary )
2014-01-06 03:11:18 -07:00
2019-11-19 01:56:53 -07:00
return nc . Request ( ctx , folder , name , offset , size , hash , weakHash , fromTemporary )
2014-01-06 03:11:18 -07:00
}
2019-02-26 01:09:25 -07:00
func ( m * model ) ScanFolders ( ) map [ string ] error {
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2015-04-12 13:12:01 -07:00
folders := make ( [ ] string , 0 , len ( m . folderCfgs ) )
2014-09-28 04:00:38 -07:00
for folder := range m . folderCfgs {
folders = append ( folders , folder )
2014-03-29 10:53:48 -07:00
}
2014-09-28 04:39:39 -07:00
m . fmut . RUnlock ( )
2014-04-14 00:58:17 -07:00
2015-04-12 13:12:01 -07:00
errors := make ( map [ string ] error , len ( m . folderCfgs ) )
2015-04-22 15:54:31 -07:00
errorsMut := sync . NewMutex ( )
2015-02-11 11:52:59 -07:00
2015-04-22 15:54:31 -07:00
wg := sync . NewWaitGroup ( )
2014-09-28 04:00:38 -07:00
wg . Add ( len ( folders ) )
for _ , folder := range folders {
folder := folder
2014-05-13 16:42:12 -07:00
go func ( ) {
2014-09-28 04:00:38 -07:00
err := m . ScanFolder ( folder )
2014-05-27 21:55:30 -07:00
if err != nil {
2015-02-11 11:52:59 -07:00
errorsMut . Lock ( )
errors [ folder ] = err
errorsMut . Unlock ( )
2014-05-27 21:55:30 -07:00
}
2014-05-13 16:42:12 -07:00
wg . Done ( )
} ( )
2014-04-14 00:58:17 -07:00
}
2014-05-13 16:42:12 -07:00
wg . Wait ( )
2015-02-11 11:52:59 -07:00
return errors
2014-03-29 10:53:48 -07:00
}
2013-12-15 03:43:31 -07:00
2019-02-26 01:09:25 -07:00
func ( m * model ) ScanFolder ( folder string ) error {
2016-06-28 23:37:34 -07:00
return m . ScanFolderSubdirs ( folder , nil )
2014-08-11 11:20:01 -07:00
}
2019-02-26 01:09:25 -07:00
func ( m * model ) ScanFolderSubdirs ( folder string , subs [ ] string ) error {
2017-12-15 13:01:56 -07:00
m . fmut . RLock ( )
2019-05-02 10:55:39 -07:00
err := m . checkFolderRunningLocked ( folder )
2017-12-15 13:01:56 -07:00
runner := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
2015-06-20 10:26:25 -07:00
2019-05-02 10:55:39 -07:00
if err != nil {
return err
}
2015-06-20 10:26:25 -07:00
return runner . Scan ( subs )
}
2019-02-26 01:09:25 -07:00
func ( m * model ) DelayScan ( folder string , next time . Duration ) {
2019-09-11 21:55:24 -07:00
m . fmut . RLock ( )
2015-05-01 05:30:17 -07:00
runner , ok := m . folderRunners [ folder ]
2019-09-11 21:55:24 -07:00
m . fmut . RUnlock ( )
2015-05-01 05:30:17 -07:00
if ! ok {
return
}
runner . DelayScan ( next )
}
2015-04-29 11:46:32 -07:00
// numHashers returns the number of hasher routines to use for a given folder,
// taking into account configuration and available CPU cores.
2019-02-26 01:09:25 -07:00
func ( m * model ) numHashers ( folder string ) int {
2019-09-11 21:55:24 -07:00
m . fmut . RLock ( )
2015-04-29 11:46:32 -07:00
folderCfg := m . folderCfgs [ folder ]
numFolders := len ( m . folderCfgs )
2019-09-11 21:55:24 -07:00
m . fmut . RUnlock ( )
2015-04-29 11:46:32 -07:00
if folderCfg . Hashers > 0 {
// Specific value set in the config, use that.
return folderCfg . Hashers
}
2015-09-01 01:05:06 -07:00
if runtime . GOOS == "windows" || runtime . GOOS == "darwin" {
// Interactive operating systems; don't load the system too heavily by
// default.
return 1
}
// For other operating systems and architectures, lets try to get some
// work done... Divide the available CPU cores among the configured
// folders.
2015-04-29 11:46:32 -07:00
if perFolder := runtime . GOMAXPROCS ( - 1 ) / numFolders ; perFolder > 0 {
return perFolder
}
return 1
}
2015-11-17 04:08:53 -07:00
// generateClusterConfig returns a ClusterConfigMessage that is correct for
// the given peer device
2019-02-26 01:09:25 -07:00
func ( m * model ) generateClusterConfig ( device protocol . DeviceID ) protocol . ClusterConfig {
2016-07-04 03:40:29 -07:00
var message protocol . ClusterConfig
2014-04-13 06:28:26 -07:00
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2018-05-08 00:19:34 -07:00
defer m . fmut . RUnlock ( )
2016-07-23 05:46:31 -07:00
2018-04-22 09:01:52 -07:00
for _ , folderCfg := range m . cfg . FolderList ( ) {
2018-06-06 14:34:11 -07:00
if ! folderCfg . SharedWith ( device ) {
2018-05-08 00:19:34 -07:00
continue
}
2016-03-11 02:48:46 -07:00
protocolFolder := protocol . Folder {
2018-04-22 09:01:52 -07:00
ID : folderCfg . ID ,
2016-07-04 03:40:29 -07:00
Label : folderCfg . Label ,
2016-12-16 15:23:35 -07:00
ReadOnly : folderCfg . Type == config . FolderTypeSendOnly ,
2016-07-04 03:40:29 -07:00
IgnorePermissions : folderCfg . IgnorePerms ,
IgnoreDelete : folderCfg . IgnoreDelete ,
DisableTempIndexes : folderCfg . DisableTempIndexes ,
2016-12-21 11:41:25 -07:00
Paused : folderCfg . Paused ,
2015-09-27 04:11:34 -07:00
}
2016-07-04 03:40:29 -07:00
2018-05-08 00:19:34 -07:00
var fs * db . FileSet
2018-04-22 09:01:52 -07:00
if ! folderCfg . Paused {
2018-05-08 00:19:34 -07:00
fs = m . folderFiles [ folderCfg . ID ]
2018-04-22 09:01:52 -07:00
}
2016-07-23 05:46:31 -07:00
2018-04-22 09:01:52 -07:00
for _ , device := range folderCfg . Devices {
deviceCfg , _ := m . cfg . Device ( device . DeviceID )
2016-07-23 05:46:31 -07:00
2016-03-11 02:48:46 -07:00
protocolDevice := protocol . Device {
2018-04-22 09:01:52 -07:00
ID : deviceCfg . DeviceID ,
2016-07-29 12:54:24 -07:00
Name : deviceCfg . Name ,
Addresses : deviceCfg . Addresses ,
Compression : deviceCfg . Compression ,
CertName : deviceCfg . CertName ,
Introducer : deviceCfg . Introducer ,
2018-04-22 09:01:52 -07:00
}
2018-05-08 00:19:34 -07:00
if fs != nil {
2018-04-22 09:01:52 -07:00
if deviceCfg . DeviceID == m . id {
protocolDevice . IndexID = fs . IndexID ( protocol . LocalDeviceID )
protocolDevice . MaxSequence = fs . Sequence ( protocol . LocalDeviceID )
} else {
protocolDevice . IndexID = fs . IndexID ( deviceCfg . DeviceID )
protocolDevice . MaxSequence = fs . Sequence ( deviceCfg . DeviceID )
}
2014-09-23 07:04:20 -07:00
}
2015-09-27 03:39:02 -07:00
2016-03-11 02:48:46 -07:00
protocolFolder . Devices = append ( protocolFolder . Devices , protocolDevice )
2014-01-09 05:58:35 -07:00
}
2018-04-22 09:01:52 -07:00
2016-03-11 02:48:46 -07:00
message . Folders = append ( message . Folders , protocolFolder )
2013-12-29 18:33:57 -07:00
}
2014-04-13 06:28:26 -07:00
2016-03-11 02:48:46 -07:00
return message
2013-12-29 18:33:57 -07:00
}
2014-04-14 00:58:17 -07:00
2019-02-26 01:09:25 -07:00
func ( m * model ) State ( folder string ) ( string , time . Time , error ) {
2015-03-16 13:14:19 -07:00
m . fmut . RLock ( )
runner , ok := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
if ! ok {
2015-04-12 13:12:01 -07:00
// The returned error should be an actual folder error, so returning
// errors.New("does not exist") or similar here would be
// inappropriate.
return "" , time . Time { } , nil
2015-03-16 13:14:19 -07:00
}
2015-04-12 13:12:01 -07:00
state , changed , err := runner . getState ( )
return state . String ( ) , changed , err
2014-04-14 00:58:17 -07:00
}
2014-06-16 01:47:02 -07:00
2019-02-26 01:09:25 -07:00
func ( m * model ) FolderErrors ( folder string ) ( [ ] FileError , error ) {
2018-01-14 10:01:06 -07:00
m . fmut . RLock ( )
2019-08-13 00:04:43 -07:00
err := m . checkFolderRunningLocked ( folder )
runner := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
if err != nil {
2018-01-14 10:01:06 -07:00
return nil , err
}
2019-08-13 00:04:43 -07:00
return runner . Errors ( ) , nil
2018-01-14 10:01:06 -07:00
}
2019-02-26 01:09:25 -07:00
func ( m * model ) WatchError ( folder string ) error {
2018-02-04 14:46:24 -07:00
m . fmut . RLock ( )
2019-08-13 00:04:43 -07:00
err := m . checkFolderRunningLocked ( folder )
runner := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
if err != nil {
2019-07-19 10:41:16 -07:00
return nil // If the folder isn't running, there's no error to report.
2018-02-04 14:46:24 -07:00
}
2019-08-13 00:04:43 -07:00
return runner . WatchError ( )
2018-02-04 14:46:24 -07:00
}
2019-02-26 01:09:25 -07:00
func ( m * model ) Override ( folder string ) {
2018-05-20 23:56:24 -07:00
// Grab the runner and the file set.
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2019-04-07 04:29:17 -07:00
runner , ok := m . folderRunners [ folder ]
2014-09-28 04:39:39 -07:00
m . fmut . RUnlock ( )
2019-04-07 04:29:17 -07:00
if ! ok {
2015-04-18 06:41:47 -07:00
return
}
2014-06-23 02:52:13 -07:00
2018-05-20 23:56:24 -07:00
// Run the override, taking updates as if they came from scanning.
2014-07-15 08:54:00 -07:00
2019-04-07 04:29:17 -07:00
runner . Override ( )
2014-06-16 01:47:02 -07:00
}
2014-06-19 15:27:54 -07:00
2019-02-26 01:09:25 -07:00
func ( m * model ) Revert ( folder string ) {
2018-07-12 01:15:57 -07:00
// Grab the runner and the file set.
m . fmut . RLock ( )
2019-04-07 04:29:17 -07:00
runner , ok := m . folderRunners [ folder ]
2018-07-12 01:15:57 -07:00
m . fmut . RUnlock ( )
2019-04-07 04:29:17 -07:00
if ! ok {
2018-07-12 01:15:57 -07:00
return
}
// Run the revert, taking updates as if they came from scanning.
2019-04-07 04:29:17 -07:00
runner . Revert ( )
2018-07-12 01:15:57 -07:00
}
2019-02-26 01:09:25 -07:00
func ( m * model ) GlobalDirectoryTree ( folder , prefix string , levels int , dirsonly bool ) map [ string ] interface { } {
2015-02-07 03:52:42 -07:00
m . fmut . RLock ( )
files , ok := m . folderFiles [ folder ]
m . fmut . RUnlock ( )
if ! ok {
return nil
}
output := make ( map [ string ] interface { } )
sep := string ( filepath . Separator )
prefix = osutil . NativeFilename ( prefix )
if prefix != "" && ! strings . HasSuffix ( prefix , sep ) {
prefix = prefix + sep
}
2020-01-21 10:23:08 -07:00
snap := files . Snapshot ( )
defer snap . Release ( )
2020-05-30 00:50:23 -07:00
snap . WithPrefixedGlobalTruncated ( prefix , func ( fi protocol . FileIntf ) bool {
2015-02-07 03:52:42 -07:00
f := fi . ( db . FileInfoTruncated )
2018-05-17 00:26:40 -07:00
// Don't include the prefix itself.
if f . IsInvalid ( ) || f . IsDeleted ( ) || strings . HasPrefix ( prefix , f . Name ) {
2015-02-07 03:52:42 -07:00
return true
}
f . Name = strings . Replace ( f . Name , prefix , "" , 1 )
var dir , base string
if f . IsDirectory ( ) && ! f . IsSymlink ( ) {
dir = f . Name
} else {
dir = filepath . Dir ( f . Name )
base = filepath . Base ( f . Name )
}
if levels > - 1 && strings . Count ( f . Name , sep ) > levels {
return true
}
last := output
if dir != "." {
for _ , path := range strings . Split ( dir , sep ) {
directory , ok := last [ path ]
if ! ok {
newdir := make ( map [ string ] interface { } )
last [ path ] = newdir
last = newdir
} else {
last = directory . ( map [ string ] interface { } )
}
}
}
if ! dirsonly && base != "" {
2015-04-20 06:37:04 -07:00
last [ base ] = [ ] interface { } {
2016-08-06 06:05:59 -07:00
f . ModTime ( ) , f . FileSize ( ) ,
2015-02-07 03:52:42 -07:00
}
}
return true
} )
return output
}
2019-02-26 01:09:25 -07:00
func ( m * model ) GetFolderVersions ( folder string ) ( map [ string ] [ ] versioner . FileVersion , error ) {
2019-11-26 00:39:31 -07:00
m . fmut . RLock ( )
2020-02-11 23:35:24 -07:00
err := m . checkFolderRunningLocked ( folder )
ver := m . folderVersioners [ folder ]
2019-11-26 00:39:31 -07:00
m . fmut . RUnlock ( )
2020-02-11 23:35:24 -07:00
if err != nil {
return nil , err
2018-01-01 07:39:23 -07:00
}
2019-04-28 15:30:16 -07:00
if ver == nil {
2019-11-26 00:39:31 -07:00
return nil , errNoVersioner
2018-01-01 07:39:23 -07:00
}
2019-04-28 15:30:16 -07:00
return ver . GetVersions ( )
2018-01-01 07:39:23 -07:00
}
2019-02-26 01:09:25 -07:00
func ( m * model ) RestoreFolderVersions ( folder string , versions map [ string ] time . Time ) ( map [ string ] string , error ) {
2019-11-26 00:39:31 -07:00
m . fmut . RLock ( )
2020-02-11 23:35:24 -07:00
err := m . checkFolderRunningLocked ( folder )
fcfg := m . folderCfgs [ folder ]
2019-11-26 00:39:31 -07:00
ver := m . folderVersioners [ folder ]
m . fmut . RUnlock ( )
2020-02-11 23:35:24 -07:00
if err != nil {
return nil , err
2019-11-26 00:39:31 -07:00
}
if ver == nil {
return nil , errNoVersioner
}
2018-01-01 07:39:23 -07:00
2019-04-28 15:30:16 -07:00
restoreErrors := make ( map [ string ] string )
2018-01-01 07:39:23 -07:00
for file , version := range versions {
2019-04-28 15:30:16 -07:00
if err := ver . Restore ( file , version ) ; err != nil {
restoreErrors [ file ] = err . Error ( )
2018-01-01 07:39:23 -07:00
}
}
// Trigger scan
if ! fcfg . FSWatcherEnabled {
2019-04-28 15:30:16 -07:00
go func ( ) { _ = m . ScanFolder ( folder ) } ( )
2018-01-01 07:39:23 -07:00
}
2019-04-28 15:30:16 -07:00
return restoreErrors , nil
2018-01-01 07:39:23 -07:00
}
2019-02-26 01:09:25 -07:00
func ( m * model ) Availability ( folder string , file protocol . FileInfo , block protocol . BlockInfo ) [ ] Availability {
2016-11-07 23:38:50 -07:00
// The slightly unusual locking sequence here is because we need to hold
// pmut for the duration (as the value returned from foldersFiles can
// get heavily modified on Close()), but also must acquire fmut before
// pmut. (The locks can be *released* in any order.)
m . fmut . RLock ( )
2014-10-31 16:41:18 -07:00
m . pmut . RLock ( )
defer m . pmut . RUnlock ( )
2014-09-28 04:00:38 -07:00
fs , ok := m . folderFiles [ folder ]
2018-06-06 14:34:11 -07:00
cfg := m . folderCfgs [ folder ]
2014-11-03 14:02:55 -07:00
m . fmut . RUnlock ( )
2016-11-07 23:38:50 -07:00
2014-09-27 05:44:15 -07:00
if ! ok {
return nil
}
2016-04-15 03:59:41 -07:00
var availabilities [ ] Availability
2020-01-21 10:23:08 -07:00
snap := fs . Snapshot ( )
defer snap . Release ( )
2016-12-21 11:41:25 -07:00
next :
2020-01-21 10:23:08 -07:00
for _ , device := range snap . Availability ( file . Name ) {
2016-12-21 11:41:25 -07:00
for _ , pausedFolder := range m . remotePausedFolders [ device ] {
if pausedFolder == folder {
continue next
}
}
2015-06-28 08:05:29 -07:00
_ , ok := m . conn [ device ]
2014-10-31 16:41:18 -07:00
if ok {
2016-04-15 03:59:41 -07:00
availabilities = append ( availabilities , Availability { ID : device , FromTemporary : false } )
2014-10-31 16:41:18 -07:00
}
}
2016-04-15 03:59:41 -07:00
2018-06-06 14:34:11 -07:00
for _ , device := range cfg . Devices {
if m . deviceDownloads [ device . DeviceID ] . Has ( folder , file . Name , file . Version , int32 ( block . Offset / int64 ( file . BlockSize ( ) ) ) ) {
availabilities = append ( availabilities , Availability { ID : device . DeviceID , FromTemporary : true } )
2016-04-15 03:59:41 -07:00
}
}
return availabilities
2014-09-27 05:44:15 -07:00
}
2015-04-28 13:32:10 -07:00
// BringToFront bumps the given files priority in the job queue.
2019-02-26 01:09:25 -07:00
func ( m * model ) BringToFront ( folder , file string ) {
2019-05-02 05:09:42 -07:00
m . fmut . RLock ( )
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
runner , ok := m . folderRunners [ folder ]
2019-05-02 10:55:39 -07:00
m . fmut . RUnlock ( )
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
if ok {
2014-12-30 01:35:21 -07:00
runner . BringToFront ( file )
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
}
}
2019-02-26 01:09:25 -07:00
func ( m * model ) ResetFolder ( folder string ) {
2015-06-21 00:35:41 -07:00
l . Infof ( "Cleaning data for folder %q" , folder )
db . DropFolder ( m . db , folder )
2015-04-03 11:06:03 -07:00
}
2019-02-26 01:09:25 -07:00
func ( m * model ) String ( ) string {
2014-09-27 05:44:15 -07:00
return fmt . Sprintf ( "model@%p" , m )
}
2014-10-13 05:43:01 -07:00
2019-02-26 01:09:25 -07:00
func ( m * model ) VerifyConfiguration ( from , to config . Configuration ) error {
2015-06-03 00:47:39 -07:00
return nil
}
2019-02-26 01:09:25 -07:00
func ( m * model ) CommitConfiguration ( from , to config . Configuration ) bool {
2015-06-03 00:47:39 -07:00
// TODO: This should not use reflect, and should take more care to try to handle stuff without restart.
2015-07-22 00:02:55 -07:00
// Go through the folder configs and figure out if we need to restart or not.
fromFolders := mapFolders ( from . Folders )
toFolders := mapFolders ( to . Folders )
2015-07-23 07:13:53 -07:00
for folderID , cfg := range toFolders {
2015-07-22 00:02:55 -07:00
if _ , ok := fromFolders [ folderID ] ; ! ok {
2015-07-23 07:13:53 -07:00
// A folder was added.
2017-04-01 02:58:06 -07:00
if cfg . Paused {
2017-12-07 00:08:24 -07:00
l . Infoln ( "Paused folder" , cfg . Description ( ) )
2017-04-01 02:58:06 -07:00
} else {
2017-12-07 00:08:24 -07:00
l . Infoln ( "Adding folder" , cfg . Description ( ) )
2019-11-18 13:15:26 -07:00
m . newFolder ( cfg )
2017-04-01 02:58:06 -07:00
}
2015-07-22 00:02:55 -07:00
}
2015-06-03 00:47:39 -07:00
}
2015-07-22 00:02:55 -07:00
for folderID , fromCfg := range fromFolders {
toCfg , ok := toFolders [ folderID ]
if ! ok {
2015-11-13 05:30:52 -07:00
// The folder was removed.
2019-11-08 02:56:16 -07:00
m . removeFolder ( fromCfg )
2015-11-13 05:30:52 -07:00
continue
2015-07-22 00:02:55 -07:00
}
2019-01-09 09:31:09 -07:00
if fromCfg . Paused && toCfg . Paused {
continue
}
2016-08-07 09:21:59 -07:00
// This folder exists on both sides. Settings might have changed.
2017-12-07 01:33:32 -07:00
// Check if anything differs that requires a restart.
if ! reflect . DeepEqual ( fromCfg . RequiresRestartOnly ( ) , toCfg . RequiresRestartOnly ( ) ) {
2019-11-08 02:56:16 -07:00
m . restartFolder ( fromCfg , toCfg )
2015-07-22 00:02:55 -07:00
}
2016-12-21 11:41:25 -07:00
// Emit the folder pause/resume event
if fromCfg . Paused != toCfg . Paused {
eventType := events . FolderResumed
if toCfg . Paused {
eventType = events . FolderPaused
}
2019-08-15 07:29:37 -07:00
m . evLogger . Log ( eventType , map [ string ] string { "id" : toCfg . ID , "label" : toCfg . Label } )
2016-12-21 11:41:25 -07:00
}
2015-06-03 00:47:39 -07:00
}
2015-07-22 00:02:55 -07:00
2016-08-07 09:21:59 -07:00
// Removing a device. We actually don't need to do anything.
// Because folder config has changed (since the device lists do not match)
// Folders for that had device got "restarted", which involves killing
// connections to all devices that we were sharing the folder with.
// At some point model.Close() will get called for that device which will
// clean residue device state that is not part of any folder.
2015-06-03 00:47:39 -07:00
2016-12-21 11:41:25 -07:00
// Pausing a device, unpausing is handled by the connection service.
2018-03-26 03:01:59 -07:00
fromDevices := from . DeviceMap ( )
toDevices := to . DeviceMap ( )
2016-12-21 11:41:25 -07:00
for deviceID , toCfg := range toDevices {
fromCfg , ok := fromDevices [ deviceID ]
2019-09-11 21:55:24 -07:00
if ! ok {
sr := stats . NewDeviceStatisticsReference ( m . db , deviceID . String ( ) )
m . fmut . Lock ( )
m . deviceStatRefs [ deviceID ] = sr
m . fmut . Unlock ( )
continue
}
delete ( fromDevices , deviceID )
if fromCfg . Paused == toCfg . Paused {
2016-12-21 11:41:25 -07:00
continue
}
2018-08-25 03:36:10 -07:00
// Ignored folder was removed, reconnect to retrigger the prompt.
if len ( fromCfg . IgnoredFolders ) > len ( toCfg . IgnoredFolders ) {
2019-04-28 03:58:51 -07:00
m . closeConn ( deviceID , errIgnoredFolderRemoved )
2018-08-25 03:36:10 -07:00
}
2016-12-21 11:41:25 -07:00
if toCfg . Paused {
l . Infoln ( "Pausing" , deviceID )
2019-07-19 10:37:29 -07:00
m . closeConn ( deviceID , errDevicePaused )
2019-08-15 07:29:37 -07:00
m . evLogger . Log ( events . DevicePaused , map [ string ] string { "device" : deviceID . String ( ) } )
2016-12-21 11:41:25 -07:00
} else {
2019-08-15 07:29:37 -07:00
m . evLogger . Log ( events . DeviceResumed , map [ string ] string { "device" : deviceID . String ( ) } )
2016-12-21 11:41:25 -07:00
}
}
2020-05-13 22:50:53 -07:00
removedDevices := make ( [ ] protocol . DeviceID , 0 , len ( fromDevices ) )
2019-09-11 21:55:24 -07:00
m . fmut . Lock ( )
for deviceID := range fromDevices {
delete ( m . deviceStatRefs , deviceID )
2020-05-13 22:50:53 -07:00
removedDevices = append ( removedDevices , deviceID )
2019-09-11 21:55:24 -07:00
}
m . fmut . Unlock ( )
2020-05-13 22:50:53 -07:00
m . closeConns ( removedDevices , errDeviceRemoved )
2016-12-21 11:41:25 -07:00
2020-02-01 00:02:18 -07:00
m . globalRequestLimiter . setCapacity ( 1024 * to . Options . MaxConcurrentIncomingRequestKiB ( ) )
m . folderIOLimiter . setCapacity ( to . Options . MaxFolderConcurrency ( ) )
2018-12-05 00:40:05 -07:00
2016-01-18 11:06:31 -07:00
// Some options don't require restart as those components handle it fine
2017-12-07 01:33:32 -07:00
// by themselves. Compare the options structs containing only the
// attributes that require restart and act apprioriately.
if ! reflect . DeepEqual ( from . Options . RequiresRestartOnly ( ) , to . Options . RequiresRestartOnly ( ) ) {
2015-10-03 08:25:21 -07:00
l . Debugln ( m , "requires restart, options differ" )
2015-06-03 00:47:39 -07:00
return false
}
return true
}
2017-12-15 13:01:56 -07:00
// checkFolderRunningLocked returns nil if the folder is up and running and a
// descriptive error if not.
// Need to hold (read) lock on m.fmut when calling this.
2019-02-26 01:09:25 -07:00
func ( m * model ) checkFolderRunningLocked ( folder string ) error {
2017-12-15 13:01:56 -07:00
_ , ok := m . folderRunners [ folder ]
if ok {
return nil
}
if cfg , ok := m . cfg . Folder ( folder ) ; ! ok {
return errFolderMissing
} else if cfg . Paused {
2018-01-14 10:01:06 -07:00
return ErrFolderPaused
2017-12-15 13:01:56 -07:00
}
return errFolderNotRunning
}
2015-07-22 00:02:55 -07:00
// mapFolders returns a map of folder ID to folder configuration for the given
// slice of folder configurations.
func mapFolders ( folders [ ] config . FolderConfiguration ) map [ string ] config . FolderConfiguration {
m := make ( map [ string ] config . FolderConfiguration , len ( folders ) )
for _ , cfg := range folders {
m [ cfg . ID ] = cfg
}
return m
}
// mapDevices returns a map of device ID to nothing for the given slice of
// device IDs.
func mapDevices ( devices [ ] protocol . DeviceID ) map [ protocol . DeviceID ] struct { } {
m := make ( map [ protocol . DeviceID ] struct { } , len ( devices ) )
for _ , dev := range devices {
m [ dev ] = struct { } { }
}
return m
}
2017-08-19 07:36:56 -07:00
func readOffsetIntoBuf ( fs fs . Filesystem , file string , offset int64 , buf [ ] byte ) error {
fd , err := fs . Open ( file )
2016-04-15 03:59:41 -07:00
if err != nil {
l . Debugln ( "readOffsetIntoBuf.Open" , file , err )
return err
}
defer fd . Close ( )
_ , err = fd . ReadAt ( buf , offset )
if err != nil {
l . Debugln ( "readOffsetIntoBuf.ReadAt" , file , err )
}
return err
}
2016-04-30 23:49:29 -07:00
// makeForgetUpdate takes an index update and constructs a download progress update
// causing to forget any progress for files which we've just been sent.
func makeForgetUpdate ( files [ ] protocol . FileInfo ) [ ] protocol . FileDownloadProgressUpdate {
updates := make ( [ ] protocol . FileDownloadProgressUpdate , 0 , len ( files ) )
for _ , file := range files {
if file . IsSymlink ( ) || file . IsDirectory ( ) || file . IsDeleted ( ) {
continue
}
updates = append ( updates , protocol . FileDownloadProgressUpdate {
Name : file . Name ,
Version : file . Version ,
UpdateType : protocol . UpdateTypeForget ,
} )
}
return updates
}
2016-08-05 00:13:52 -07:00
2016-11-07 09:40:48 -07:00
// folderDeviceSet is a set of (folder, deviceID) pairs
type folderDeviceSet map [ string ] map [ protocol . DeviceID ] struct { }
// set adds the (dev, folder) pair to the set
func ( s folderDeviceSet ) set ( dev protocol . DeviceID , folder string ) {
devs , ok := s [ folder ]
if ! ok {
devs = make ( map [ protocol . DeviceID ] struct { } )
s [ folder ] = devs
}
devs [ dev ] = struct { } { }
}
// has returns true if the (dev, folder) pair is in the set
func ( s folderDeviceSet ) has ( dev protocol . DeviceID , folder string ) bool {
_ , ok := s [ folder ] [ dev ]
return ok
}
// hasDevice returns true if the device is set on any folder
func ( s folderDeviceSet ) hasDevice ( dev protocol . DeviceID ) bool {
for _ , devices := range s {
if _ , ok := devices [ dev ] ; ok {
return true
}
}
return false
}
2018-08-25 01:32:35 -07:00
type fileInfoBatch struct {
infos [ ] protocol . FileInfo
size int
flushFn func ( [ ] protocol . FileInfo ) error
}
func newFileInfoBatch ( fn func ( [ ] protocol . FileInfo ) error ) * fileInfoBatch {
return & fileInfoBatch {
infos : make ( [ ] protocol . FileInfo , 0 , maxBatchSizeFiles ) ,
flushFn : fn ,
}
}
func ( b * fileInfoBatch ) append ( f protocol . FileInfo ) {
b . infos = append ( b . infos , f )
b . size += f . ProtoSize ( )
}
2020-05-11 11:15:11 -07:00
func ( b * fileInfoBatch ) full ( ) bool {
return len ( b . infos ) >= maxBatchSizeFiles || b . size >= maxBatchSizeBytes
}
2018-08-25 01:32:35 -07:00
func ( b * fileInfoBatch ) flushIfFull ( ) error {
2020-05-11 11:15:11 -07:00
if b . full ( ) {
2018-08-25 01:32:35 -07:00
return b . flush ( )
}
return nil
}
func ( b * fileInfoBatch ) flush ( ) error {
if len ( b . infos ) == 0 {
return nil
}
if err := b . flushFn ( b . infos ) ; err != nil {
return err
}
b . reset ( )
return nil
}
func ( b * fileInfoBatch ) reset ( ) {
b . infos = b . infos [ : 0 ]
b . size = 0
}
2018-10-05 01:26:25 -07:00
// syncMutexMap is a type safe wrapper for a sync.Map that holds mutexes
type syncMutexMap struct {
inner stdsync . Map
}
func ( m * syncMutexMap ) Get ( key string ) sync . Mutex {
v , _ := m . inner . LoadOrStore ( key , sync . NewMutex ( ) )
return v . ( sync . Mutex )
}
2019-01-05 10:10:02 -07:00
// sanitizePath takes a string that might contain all kinds of special
// characters and makes a valid, similar, path name out of it.
//
2020-04-14 11:26:26 -07:00
// Spans of invalid characters, whitespace and/or non-UTF-8 sequences are
// replaced by a single space. The result is always UTF-8 and contains only
// printable characters, as determined by unicode.IsPrint.
//
// Invalid characters are non-printing runes, things not allowed in file names
2019-01-05 10:10:02 -07:00
// in Windows, and common shell metacharacters. Even if asterisks and pipes
// and stuff are allowed on Unixes in general they might not be allowed by
// the filesystem and may surprise the user and cause shell oddness. This
// function is intended for file names we generate on behalf of the user,
// and surprising them with odd shell characters in file names is unkind.
//
// We include whitespace in the invalid characters so that multiple
// whitespace is collapsed to a single space. Additionally, whitespace at
// either end is removed.
func sanitizePath ( path string ) string {
2020-04-14 11:26:26 -07:00
var b strings . Builder
prev := ' '
for _ , c := range path {
if ! unicode . IsPrint ( c ) || c == unicode . ReplacementChar ||
strings . ContainsRune ( ` <>:"'/\|?*[] { };:!@$%&^# ` , c ) {
c = ' '
}
if ! ( c == ' ' && prev == ' ' ) {
b . WriteRune ( c )
}
prev = c
}
return strings . TrimSpace ( b . String ( ) )
2019-01-05 10:10:02 -07:00
}