2014-11-16 13:13:20 -07:00
// Copyright (C) 2014 The Syncthing Authors.
2014-09-29 12:43:32 -07:00
//
// This program is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation, either version 3 of the License, or (at your option)
// any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along
// with this program. If not, see <http://www.gnu.org/licenses/>.
2014-06-01 13:50:14 -07:00
2014-05-14 20:26:55 -07:00
package model
2013-12-15 03:43:31 -07:00
import (
2014-09-14 15:03:53 -07:00
"bufio"
2014-09-09 23:48:15 -07:00
"crypto/tls"
2014-01-06 13:31:36 -07:00
"errors"
2013-12-23 10:12:44 -07:00
"fmt"
2013-12-31 19:22:49 -07:00
"io"
2014-09-22 06:54:36 -07:00
"io/ioutil"
2014-01-05 15:54:57 -07:00
"net"
2013-12-15 03:43:31 -07:00
"os"
2014-03-28 06:36:57 -07:00
"path/filepath"
2014-06-26 02:24:38 -07:00
"strconv"
2014-08-11 11:20:01 -07:00
"strings"
2013-12-15 03:43:31 -07:00
"sync"
"time"
2014-06-21 00:43:12 -07:00
2015-01-13 05:22:56 -07:00
"github.com/syncthing/protocol"
2014-09-22 12:42:11 -07:00
"github.com/syncthing/syncthing/internal/config"
2015-01-12 06:50:30 -07:00
"github.com/syncthing/syncthing/internal/db"
2014-09-22 12:42:11 -07:00
"github.com/syncthing/syncthing/internal/events"
"github.com/syncthing/syncthing/internal/ignore"
"github.com/syncthing/syncthing/internal/lamport"
"github.com/syncthing/syncthing/internal/osutil"
"github.com/syncthing/syncthing/internal/scanner"
"github.com/syncthing/syncthing/internal/stats"
2014-11-08 21:26:52 -07:00
"github.com/syncthing/syncthing/internal/symlinks"
2014-09-27 05:44:15 -07:00
"github.com/syncthing/syncthing/internal/versioner"
2014-07-06 05:46:48 -07:00
"github.com/syndtr/goleveldb/leveldb"
2013-12-15 03:43:31 -07:00
)
2014-09-28 04:00:38 -07:00
type folderState int
2014-04-14 00:58:17 -07:00
const (
2014-09-28 04:00:38 -07:00
FolderIdle folderState = iota
FolderScanning
FolderSyncing
FolderCleaning
2014-04-14 00:58:17 -07:00
)
2014-09-28 04:00:38 -07:00
func ( s folderState ) String ( ) string {
2014-07-17 04:38:36 -07:00
switch s {
2014-09-28 04:00:38 -07:00
case FolderIdle :
2014-07-17 04:38:36 -07:00
return "idle"
2014-09-28 04:00:38 -07:00
case FolderScanning :
2014-07-17 04:38:36 -07:00
return "scanning"
2014-09-28 04:00:38 -07:00
case FolderCleaning :
2014-07-17 04:38:36 -07:00
return "cleaning"
2014-09-28 04:00:38 -07:00
case FolderSyncing :
2014-07-17 04:38:36 -07:00
return "syncing"
default :
return "unknown"
}
}
2014-07-15 04:04:37 -07:00
// How many files to send in each Index/IndexUpdate message.
2014-08-11 11:54:59 -07:00
const (
indexTargetSize = 250 * 1024 // Aim for making index messages no larger than 250 KiB (uncompressed)
indexPerFileSize = 250 // Each FileInfo is approximately this big, in bytes, excluding BlockInfos
IndexPerBlockSize = 40 // Each BlockInfo is approximately this big
indexBatchSize = 1000 // Either way, don't include more files than this
)
2014-07-15 04:04:37 -07:00
2014-09-30 08:52:05 -07:00
type service interface {
Serve ( )
Stop ( )
2014-12-30 01:31:34 -07:00
Jobs ( ) ( [ ] string , [ ] string ) // In progress, Queued
2014-12-30 01:35:21 -07:00
BringToFront ( string )
2014-09-30 08:52:05 -07:00
}
2013-12-15 03:43:31 -07:00
type Model struct {
2014-12-08 08:39:11 -07:00
cfg * config . Wrapper
2014-11-16 16:18:59 -07:00
db * leveldb . DB
2015-01-12 06:50:30 -07:00
finder * db . BlockFinder
2014-11-16 16:18:59 -07:00
progressEmitter * ProgressEmitter
2014-05-14 20:26:55 -07:00
2014-09-28 04:05:25 -07:00
deviceName string
2014-05-14 20:26:55 -07:00
clientName string
clientVersion string
2014-09-28 04:05:25 -07:00
folderCfgs map [ string ] config . FolderConfiguration // folder -> cfg
2015-01-12 06:52:24 -07:00
folderFiles map [ string ] * db . FileSet // folder -> files
2014-09-28 04:05:25 -07:00
folderDevices map [ string ] [ ] protocol . DeviceID // folder -> deviceIDs
deviceFolders map [ protocol . DeviceID ] [ ] string // deviceID -> folders
2014-09-28 04:00:38 -07:00
deviceStatRefs map [ protocol . DeviceID ] * stats . DeviceStatisticsReference // deviceID -> statsRef
2014-10-12 14:35:15 -07:00
folderIgnores map [ string ] * ignore . Matcher // folder -> matcher object
2014-09-30 08:52:05 -07:00
folderRunners map [ string ] service // folder -> puller or scanner
2014-12-07 13:21:12 -07:00
folderStatRefs map [ string ] * stats . FolderStatisticsReference // folder -> statsRef
2014-09-28 04:39:39 -07:00
fmut sync . RWMutex // protects the above
2014-03-29 10:53:48 -07:00
2014-09-28 04:00:38 -07:00
folderState map [ string ] folderState // folder -> state
2014-09-28 04:05:25 -07:00
folderStateChanged map [ string ] time . Time // folder -> time when state changed
smut sync . RWMutex
2014-05-20 09:41:01 -07:00
2014-09-28 04:00:38 -07:00
protoConn map [ protocol . DeviceID ] protocol . Connection
rawConn map [ protocol . DeviceID ] io . Closer
2014-09-28 04:05:25 -07:00
deviceVer map [ protocol . DeviceID ] string
2014-01-17 20:06:44 -07:00
pmut sync . RWMutex // protects protoConn and rawConn
2013-12-30 07:30:29 -07:00
2014-09-28 04:00:38 -07:00
addedFolder bool
2014-09-28 04:05:25 -07:00
started bool
2013-12-15 03:43:31 -07:00
}
2014-01-07 14:44:21 -07:00
var (
ErrNoSuchFile = errors . New ( "no such file" )
ErrInvalid = errors . New ( "file is invalid" )
2014-11-08 21:26:52 -07:00
SymlinkWarning = sync . Once { }
2014-01-07 14:44:21 -07:00
)
2014-01-06 13:31:36 -07:00
2014-01-06 03:11:18 -07:00
// NewModel creates and starts a new model. The model starts in read-only mode,
// where it sends index information to connected peers and responds to requests
2014-09-28 04:00:38 -07:00
// for file data without altering the local folder in any way.
2015-01-12 06:50:30 -07:00
func NewModel ( cfg * config . Wrapper , deviceName , clientName , clientVersion string , ldb * leveldb . DB ) * Model {
2013-12-15 03:43:31 -07:00
m := & Model {
2014-09-28 04:05:25 -07:00
cfg : cfg ,
2015-01-12 06:50:30 -07:00
db : ldb ,
2014-09-28 04:00:38 -07:00
deviceName : deviceName ,
2014-09-28 04:05:25 -07:00
clientName : clientName ,
clientVersion : clientVersion ,
2014-09-28 04:00:38 -07:00
folderCfgs : make ( map [ string ] config . FolderConfiguration ) ,
2015-01-12 06:52:24 -07:00
folderFiles : make ( map [ string ] * db . FileSet ) ,
2014-09-28 04:05:25 -07:00
folderDevices : make ( map [ string ] [ ] protocol . DeviceID ) ,
deviceFolders : make ( map [ protocol . DeviceID ] [ ] string ) ,
2014-09-28 04:00:38 -07:00
deviceStatRefs : make ( map [ protocol . DeviceID ] * stats . DeviceStatisticsReference ) ,
2014-10-12 14:35:15 -07:00
folderIgnores : make ( map [ string ] * ignore . Matcher ) ,
2014-09-30 08:52:05 -07:00
folderRunners : make ( map [ string ] service ) ,
2014-12-07 13:21:12 -07:00
folderStatRefs : make ( map [ string ] * stats . FolderStatisticsReference ) ,
2014-09-28 04:00:38 -07:00
folderState : make ( map [ string ] folderState ) ,
folderStateChanged : make ( map [ string ] time . Time ) ,
2014-09-28 04:05:25 -07:00
protoConn : make ( map [ protocol . DeviceID ] protocol . Connection ) ,
rawConn : make ( map [ protocol . DeviceID ] io . Closer ) ,
2014-09-28 04:00:38 -07:00
deviceVer : make ( map [ protocol . DeviceID ] string ) ,
2015-01-12 06:50:30 -07:00
finder : db . NewBlockFinder ( ldb , cfg ) ,
2014-11-16 16:18:59 -07:00
progressEmitter : NewProgressEmitter ( cfg ) ,
2013-12-15 03:43:31 -07:00
}
2014-11-25 15:07:18 -07:00
if cfg . Options ( ) . ProgressUpdateIntervalS > - 1 {
go m . progressEmitter . Serve ( )
}
2013-12-15 03:43:31 -07:00
2014-06-26 02:24:38 -07:00
var timeout = 20 * 60 // seconds
if t := os . Getenv ( "STDEADLOCKTIMEOUT" ) ; len ( t ) > 0 {
it , err := strconv . Atoi ( t )
if err == nil {
timeout = it
}
}
2014-09-28 04:39:39 -07:00
deadlockDetect ( & m . fmut , time . Duration ( timeout ) * time . Second )
2014-06-26 02:24:38 -07:00
deadlockDetect ( & m . smut , time . Duration ( timeout ) * time . Second )
deadlockDetect ( & m . pmut , time . Duration ( timeout ) * time . Second )
2013-12-15 03:43:31 -07:00
return m
}
2014-01-06 03:11:18 -07:00
// StartRW starts read/write processing on the current model. When in
// read/write mode the model will attempt to keep in sync with the cluster by
2014-09-28 04:00:38 -07:00
// pulling needed files from peer devices.
func ( m * Model ) StartFolderRW ( folder string ) {
2014-09-28 04:39:39 -07:00
m . fmut . Lock ( )
2014-09-28 04:00:38 -07:00
cfg , ok := m . folderCfgs [ folder ]
2014-09-27 05:44:15 -07:00
if ! ok {
2014-09-28 04:00:38 -07:00
panic ( "cannot start nonexistent folder " + folder )
2014-09-27 05:44:15 -07:00
}
2014-09-30 08:52:05 -07:00
_ , ok = m . folderRunners [ folder ]
if ok {
panic ( "cannot start already running folder " + folder )
}
p := & Puller {
2014-11-16 16:18:59 -07:00
folder : folder ,
dir : cfg . Path ,
scanIntv : time . Duration ( cfg . RescanIntervalS ) * time . Second ,
model : m ,
ignorePerms : cfg . IgnorePerms ,
lenientMtimes : cfg . LenientMtimes ,
progressEmitter : m . progressEmitter ,
copiers : cfg . Copiers ,
pullers : cfg . Pullers ,
2014-12-30 01:35:21 -07:00
queue : newJobQueue ( ) ,
2014-09-27 05:44:15 -07:00
}
2014-09-30 08:52:05 -07:00
m . folderRunners [ folder ] = p
m . fmut . Unlock ( )
2014-09-27 05:44:15 -07:00
if len ( cfg . Versioning . Type ) > 0 {
factory , ok := versioner . Factories [ cfg . Versioning . Type ]
if ! ok {
l . Fatalf ( "Requested versioning type %q that does not exist" , cfg . Versioning . Type )
}
2014-09-28 04:56:35 -07:00
p . versioner = factory ( folder , cfg . Path , cfg . Versioning . Params )
2014-03-29 10:53:48 -07:00
}
2014-09-27 05:44:15 -07:00
2014-10-13 23:48:35 -07:00
if cfg . LenientMtimes {
l . Infof ( "Folder %q is running with LenientMtimes workaround. Syncing may not work properly." , folder )
}
2014-09-27 05:44:15 -07:00
go p . Serve ( )
2014-03-28 06:36:57 -07:00
}
2014-01-06 03:11:18 -07:00
2014-03-28 06:36:57 -07:00
// StartRO starts read only processing on the current model. When in
// read only mode the model will announce files to the cluster but not
// pull in any external changes.
2014-09-28 04:00:38 -07:00
func ( m * Model ) StartFolderRO ( folder string ) {
2014-09-30 08:52:05 -07:00
m . fmut . Lock ( )
cfg , ok := m . folderCfgs [ folder ]
if ! ok {
panic ( "cannot start nonexistent folder " + folder )
}
_ , ok = m . folderRunners [ folder ]
if ok {
panic ( "cannot start already running folder " + folder )
}
s := & Scanner {
folder : folder ,
intv : time . Duration ( cfg . RescanIntervalS ) * time . Second ,
model : m ,
}
m . folderRunners [ folder ] = s
m . fmut . Unlock ( )
go s . Serve ( )
2014-01-20 14:22:27 -07:00
}
2014-01-05 15:54:57 -07:00
type ConnectionInfo struct {
protocol . Statistics
2014-01-23 05:12:45 -07:00
Address string
ClientVersion string
2014-01-05 15:54:57 -07:00
}
2014-09-28 04:00:38 -07:00
// ConnectionStats returns a map with connection statistics for each connected device.
2014-01-05 15:54:57 -07:00
func ( m * Model ) ConnectionStats ( ) map [ string ] ConnectionInfo {
type remoteAddrer interface {
RemoteAddr ( ) net . Addr
}
2014-01-17 20:06:44 -07:00
m . pmut . RLock ( )
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2014-01-05 08:16:37 -07:00
2014-01-05 15:54:57 -07:00
var res = make ( map [ string ] ConnectionInfo )
2014-09-28 04:00:38 -07:00
for device , conn := range m . protoConn {
2014-01-05 15:54:57 -07:00
ci := ConnectionInfo {
2014-01-23 05:12:45 -07:00
Statistics : conn . Statistics ( ) ,
2014-09-28 04:00:38 -07:00
ClientVersion : m . deviceVer [ device ] ,
2014-01-05 15:54:57 -07:00
}
2014-09-28 04:00:38 -07:00
if nc , ok := m . rawConn [ device ] . ( remoteAddrer ) ; ok {
2014-01-05 15:54:57 -07:00
ci . Address = nc . RemoteAddr ( ) . String ( )
}
2014-02-13 04:41:37 -07:00
2014-09-28 04:00:38 -07:00
res [ device . String ( ) ] = ci
2013-12-30 07:30:29 -07:00
}
2014-01-17 20:06:44 -07:00
2014-09-28 04:39:39 -07:00
m . fmut . RUnlock ( )
2014-01-17 20:06:44 -07:00
m . pmut . RUnlock ( )
2014-03-28 06:36:57 -07:00
2014-05-24 12:34:11 -07:00
in , out := protocol . TotalInOut ( )
res [ "total" ] = ConnectionInfo {
Statistics : protocol . Statistics {
At : time . Now ( ) ,
2014-06-01 12:56:05 -07:00
InBytesTotal : in ,
OutBytesTotal : out ,
2014-05-24 12:34:11 -07:00
} ,
}
2014-01-05 08:16:37 -07:00
return res
2013-12-30 07:30:29 -07:00
}
2014-09-28 04:00:38 -07:00
// Returns statistics about each device
func ( m * Model ) DeviceStatistics ( ) map [ string ] stats . DeviceStatistics {
var res = make ( map [ string ] stats . DeviceStatistics )
2014-10-06 00:25:45 -07:00
for id := range m . cfg . Devices ( ) {
res [ id . String ( ) ] = m . deviceStatRef ( id ) . GetStatistics ( )
2014-08-21 15:45:40 -07:00
}
return res
}
2014-12-07 13:21:12 -07:00
// Returns statistics about each folder
func ( m * Model ) FolderStatistics ( ) map [ string ] stats . FolderStatistics {
var res = make ( map [ string ] stats . FolderStatistics )
for id := range m . cfg . Folders ( ) {
res [ id ] = m . folderStatRef ( id ) . GetStatistics ( )
}
return res
}
2014-09-28 04:00:38 -07:00
// Returns the completion status, in percent, for the given device and folder.
func ( m * Model ) Completion ( device protocol . DeviceID , folder string ) float64 {
2014-10-13 05:43:01 -07:00
defer m . leveldbPanicWorkaround ( )
2014-07-29 02:06:52 -07:00
var tot int64
2014-08-05 11:16:25 -07:00
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2014-09-28 04:00:38 -07:00
rf , ok := m . folderFiles [ folder ]
2014-09-28 04:39:39 -07:00
m . fmut . RUnlock ( )
2014-08-05 11:16:25 -07:00
if ! ok {
2014-09-28 04:00:38 -07:00
return 0 // Folder doesn't exist, so we hardly have any of it
2014-08-05 11:16:25 -07:00
}
2015-01-12 06:50:30 -07:00
rf . WithGlobalTruncated ( func ( f db . FileIntf ) bool {
2014-08-12 04:53:31 -07:00
if ! f . IsDeleted ( ) {
tot += f . Size ( )
2014-07-29 02:06:52 -07:00
}
return true
} )
2014-08-05 11:16:25 -07:00
if tot == 0 {
2014-09-28 04:00:38 -07:00
return 100 // Folder is empty, so we have all of it
2014-08-05 11:16:25 -07:00
}
2014-07-29 02:06:52 -07:00
var need int64
2015-01-12 06:50:30 -07:00
rf . WithNeedTruncated ( device , func ( f db . FileIntf ) bool {
2014-08-12 04:53:31 -07:00
if ! f . IsDeleted ( ) {
need += f . Size ( )
2014-07-29 02:06:52 -07:00
}
return true
} )
2014-08-12 04:53:31 -07:00
res := 100 * ( 1 - float64 ( need ) / float64 ( tot ) )
if debug {
2014-09-28 04:00:38 -07:00
l . Debugf ( "%v Completion(%s, %q): %f (%d / %d)" , m , device , folder , res , need , tot )
2014-08-12 04:53:31 -07:00
}
return res
2014-07-29 02:06:52 -07:00
}
2014-07-12 14:06:48 -07:00
func sizeOf ( fs [ ] protocol . FileInfo ) ( files , deleted int , bytes int64 ) {
2014-03-28 06:36:57 -07:00
for _ , f := range fs {
2014-07-06 05:46:48 -07:00
fs , de , by := sizeOfFile ( f )
files += fs
deleted += de
bytes += by
}
return
}
2015-01-12 06:50:30 -07:00
func sizeOfFile ( f db . FileIntf ) ( files , deleted int , bytes int64 ) {
2014-08-12 04:53:31 -07:00
if ! f . IsDeleted ( ) {
2014-07-06 05:46:48 -07:00
files ++
} else {
deleted ++
2013-12-30 07:30:29 -07:00
}
2014-08-12 04:53:31 -07:00
bytes += f . Size ( )
2014-01-05 08:16:37 -07:00
return
}
2013-12-30 07:30:29 -07:00
2014-03-28 06:36:57 -07:00
// GlobalSize returns the number of files, deleted files and total bytes for all
// files in the global model.
2015-01-09 00:18:42 -07:00
func ( m * Model ) GlobalSize ( folder string ) ( nfiles , deleted int , bytes int64 ) {
2014-10-13 05:43:01 -07:00
defer m . leveldbPanicWorkaround ( )
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 04:00:38 -07:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2015-01-12 06:50:30 -07:00
rf . WithGlobalTruncated ( func ( f db . FileIntf ) bool {
2014-07-06 05:46:48 -07:00
fs , de , by := sizeOfFile ( f )
2015-01-09 00:18:42 -07:00
nfiles += fs
2014-07-06 05:46:48 -07:00
deleted += de
bytes += by
return true
} )
2014-03-29 10:53:48 -07:00
}
2014-07-06 05:46:48 -07:00
return
2014-03-28 06:36:57 -07:00
}
2014-01-06 03:11:18 -07:00
// LocalSize returns the number of files, deleted files and total bytes for all
2014-09-28 04:00:38 -07:00
// files in the local folder.
2015-01-09 00:18:42 -07:00
func ( m * Model ) LocalSize ( folder string ) ( nfiles , deleted int , bytes int64 ) {
2014-10-13 05:43:01 -07:00
defer m . leveldbPanicWorkaround ( )
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 04:00:38 -07:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2015-01-12 06:50:30 -07:00
rf . WithHaveTruncated ( protocol . LocalDeviceID , func ( f db . FileIntf ) bool {
2014-09-04 13:29:53 -07:00
if f . IsInvalid ( ) {
return true
}
2014-07-06 05:46:48 -07:00
fs , de , by := sizeOfFile ( f )
2015-01-09 00:18:42 -07:00
nfiles += fs
2014-07-06 05:46:48 -07:00
deleted += de
bytes += by
return true
} )
2014-03-29 10:53:48 -07:00
}
2014-07-06 14:15:28 -07:00
return
2014-01-05 22:38:01 -07:00
}
2014-05-19 13:31:28 -07:00
// NeedSize returns the number and total size of currently needed files.
2015-01-09 00:18:42 -07:00
func ( m * Model ) NeedSize ( folder string ) ( nfiles int , bytes int64 ) {
2014-10-13 05:43:01 -07:00
defer m . leveldbPanicWorkaround ( )
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 04:00:38 -07:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2015-01-12 06:50:30 -07:00
rf . WithNeedTruncated ( protocol . LocalDeviceID , func ( f db . FileIntf ) bool {
2014-07-15 08:54:00 -07:00
fs , de , by := sizeOfFile ( f )
2015-01-09 00:18:42 -07:00
nfiles += fs + de
2014-07-15 08:54:00 -07:00
bytes += by
return true
} )
}
2014-11-16 16:18:59 -07:00
bytes -= m . progressEmitter . BytesCompleted ( folder )
2014-08-12 04:53:31 -07:00
if debug {
2015-01-09 00:18:42 -07:00
l . Debugf ( "%v NeedSize(%q): %d %d" , m , folder , nfiles , bytes )
2014-08-12 04:53:31 -07:00
}
2014-07-15 08:54:00 -07:00
return
2013-12-23 10:12:44 -07:00
}
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
// NeedFiles returns the list of currently needed files in progress, queued,
// and to be queued on next puller iteration. Also takes a soft cap which is
// only respected when adding files from the model rather than the runner queue.
2015-01-12 06:50:30 -07:00
func ( m * Model ) NeedFolderFiles ( folder string , max int ) ( [ ] db . FileInfoTruncated , [ ] db . FileInfoTruncated , [ ] db . FileInfoTruncated ) {
2014-10-13 05:43:01 -07:00
defer m . leveldbPanicWorkaround ( )
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 04:00:38 -07:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2015-01-12 06:50:30 -07:00
var progress , queued , rest [ ] db . FileInfoTruncated
2014-12-30 01:31:34 -07:00
var seen map [ string ] bool
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
runner , ok := m . folderRunners [ folder ]
if ok {
2014-12-30 01:31:34 -07:00
progressNames , queuedNames := runner . Jobs ( )
2015-01-12 06:50:30 -07:00
progress = make ( [ ] db . FileInfoTruncated , len ( progressNames ) )
queued = make ( [ ] db . FileInfoTruncated , len ( queuedNames ) )
2014-12-30 01:31:34 -07:00
seen = make ( map [ string ] bool , len ( progressNames ) + len ( queuedNames ) )
for i , name := range progressNames {
2015-01-09 00:41:02 -07:00
if f , ok := rf . GetGlobalTruncated ( name ) ; ok {
progress [ i ] = f
2015-01-06 14:12:45 -07:00
seen [ name ] = true
}
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
}
2014-12-30 01:31:34 -07:00
for i , name := range queuedNames {
2015-01-09 00:41:02 -07:00
if f , ok := rf . GetGlobalTruncated ( name ) ; ok {
queued [ i ] = f
2015-01-06 14:12:45 -07:00
seen [ name ] = true
}
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
}
}
left := max - len ( progress ) - len ( queued )
if max < 1 || left > 0 {
2015-01-12 06:50:30 -07:00
rf . WithNeedTruncated ( protocol . LocalDeviceID , func ( f db . FileIntf ) bool {
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
left --
2015-01-12 06:50:30 -07:00
ft := f . ( db . FileInfoTruncated )
2014-12-30 01:31:34 -07:00
if ! seen [ ft . Name ] {
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
rest = append ( rest , ft )
}
return max < 1 || left > 0
} )
}
return progress , queued , rest
2014-04-09 13:03:30 -07:00
}
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
return nil , nil , nil
2014-04-01 14:18:32 -07:00
}
2014-09-28 04:00:38 -07:00
// Index is called when a new device is connected and we receive their full index.
2014-01-06 03:11:18 -07:00
// Implements the protocol.Model interface.
2014-09-28 04:00:38 -07:00
func ( m * Model ) Index ( deviceID protocol . DeviceID , folder string , fs [ ] protocol . FileInfo ) {
2014-05-14 20:26:55 -07:00
if debug {
2014-09-28 04:00:38 -07:00
l . Debugf ( "IDX(in): %s %q: %d files" , deviceID , folder , len ( fs ) )
2014-03-29 10:53:48 -07:00
}
2014-09-28 04:00:38 -07:00
if ! m . folderSharedWith ( folder , deviceID ) {
events . Default . Log ( events . FolderRejected , map [ string ] string {
"folder" : folder ,
"device" : deviceID . String ( ) ,
2014-08-18 14:34:03 -07:00
} )
2014-12-27 16:12:12 -07:00
l . Infof ( "Unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration." , folder , deviceID )
2014-06-06 12:48:29 -07:00
return
}
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2014-09-28 04:00:38 -07:00
files , ok := m . folderFiles [ folder ]
2014-09-28 04:39:39 -07:00
m . fmut . RUnlock ( )
2014-09-04 13:29:53 -07:00
if ! ok {
2014-09-28 04:00:38 -07:00
l . Fatalf ( "Index for nonexistant folder %q" , folder )
2013-12-15 03:43:31 -07:00
}
2014-07-13 12:07:24 -07:00
2014-09-04 13:29:53 -07:00
for i := 0 ; i < len ( fs ) ; {
lamport . Default . Tick ( fs [ i ] . Version )
2014-12-23 02:06:51 -07:00
if symlinkInvalid ( fs [ i ] . IsSymlink ( ) ) {
2014-10-16 03:23:33 -07:00
if debug {
2014-12-23 02:06:51 -07:00
l . Debugln ( "dropping update for unsupported symlink" , fs [ i ] )
2014-10-16 00:32:23 -07:00
}
2014-09-04 13:29:53 -07:00
fs [ i ] = fs [ len ( fs ) - 1 ]
fs = fs [ : len ( fs ) - 1 ]
} else {
i ++
}
}
2014-09-28 04:00:38 -07:00
files . Replace ( deviceID , fs )
2014-09-04 13:29:53 -07:00
2014-07-17 04:38:36 -07:00
events . Default . Log ( events . RemoteIndexUpdated , map [ string ] interface { } {
2014-09-28 04:05:25 -07:00
"device" : deviceID . String ( ) ,
"folder" : folder ,
2014-07-17 04:38:36 -07:00
"items" : len ( fs ) ,
2014-09-28 04:00:38 -07:00
"version" : files . LocalVersion ( deviceID ) ,
2014-07-13 12:07:24 -07:00
} )
2013-12-28 06:10:36 -07:00
}
2014-09-28 04:00:38 -07:00
// IndexUpdate is called for incremental updates to connected devices' indexes.
2014-01-06 03:11:18 -07:00
// Implements the protocol.Model interface.
2014-09-28 04:00:38 -07:00
func ( m * Model ) IndexUpdate ( deviceID protocol . DeviceID , folder string , fs [ ] protocol . FileInfo ) {
2014-05-14 20:26:55 -07:00
if debug {
2014-09-28 04:00:38 -07:00
l . Debugf ( "%v IDXUP(in): %s / %q: %d files" , m , deviceID , folder , len ( fs ) )
2014-03-29 10:53:48 -07:00
}
2014-09-28 04:00:38 -07:00
if ! m . folderSharedWith ( folder , deviceID ) {
l . Infof ( "Update for unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration." , folder , deviceID )
2014-06-06 12:48:29 -07:00
return
}
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2014-09-28 04:00:38 -07:00
files , ok := m . folderFiles [ folder ]
2014-09-28 04:39:39 -07:00
m . fmut . RUnlock ( )
2014-09-04 13:29:53 -07:00
if ! ok {
2014-09-28 04:00:38 -07:00
l . Fatalf ( "IndexUpdate for nonexistant folder %q" , folder )
2013-12-28 06:10:36 -07:00
}
2014-07-13 12:07:24 -07:00
2014-09-04 13:29:53 -07:00
for i := 0 ; i < len ( fs ) ; {
lamport . Default . Tick ( fs [ i ] . Version )
2014-12-23 02:06:51 -07:00
if symlinkInvalid ( fs [ i ] . IsSymlink ( ) ) {
2014-10-16 03:23:33 -07:00
if debug {
2014-12-23 02:06:51 -07:00
l . Debugln ( "dropping update for unsupported symlink" , fs [ i ] )
2014-10-16 03:23:33 -07:00
}
2014-09-04 13:29:53 -07:00
fs [ i ] = fs [ len ( fs ) - 1 ]
fs = fs [ : len ( fs ) - 1 ]
} else {
i ++
}
}
2014-09-28 04:00:38 -07:00
files . Update ( deviceID , fs )
2014-09-04 13:29:53 -07:00
2014-07-17 04:38:36 -07:00
events . Default . Log ( events . RemoteIndexUpdated , map [ string ] interface { } {
2014-09-28 04:05:25 -07:00
"device" : deviceID . String ( ) ,
"folder" : folder ,
2014-07-17 04:38:36 -07:00
"items" : len ( fs ) ,
2014-09-28 04:00:38 -07:00
"version" : files . LocalVersion ( deviceID ) ,
2014-07-13 12:07:24 -07:00
} )
2014-01-09 02:59:09 -07:00
}
2014-09-28 04:00:38 -07:00
func ( m * Model ) folderSharedWith ( folder string , deviceID protocol . DeviceID ) bool {
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 04:00:38 -07:00
for _ , nfolder := range m . deviceFolders [ deviceID ] {
if nfolder == folder {
2014-06-06 12:48:29 -07:00
return true
}
}
return false
}
2014-09-28 04:00:38 -07:00
func ( m * Model ) ClusterConfig ( deviceID protocol . DeviceID , cm protocol . ClusterConfigMessage ) {
2014-04-13 06:28:26 -07:00
m . pmut . Lock ( )
2014-09-23 07:04:20 -07:00
if cm . ClientName == "syncthing" {
2014-09-28 04:00:38 -07:00
m . deviceVer [ deviceID ] = cm . ClientVersion
2014-04-13 06:28:26 -07:00
} else {
2014-09-28 04:00:38 -07:00
m . deviceVer [ deviceID ] = cm . ClientName + " " + cm . ClientVersion
2014-04-13 06:28:26 -07:00
}
2014-12-26 16:12:12 -07:00
event := map [ string ] string {
"id" : deviceID . String ( ) ,
"clientName" : cm . ClientName ,
"clientVersion" : cm . ClientVersion ,
}
if conn , ok := m . rawConn [ deviceID ] . ( * tls . Conn ) ; ok {
event [ "addr" ] = conn . RemoteAddr ( ) . String ( )
}
2014-08-16 13:55:02 -07:00
m . pmut . Unlock ( )
2014-12-26 16:12:12 -07:00
events . Default . Log ( events . DeviceConnected , event )
2014-09-28 04:00:38 -07:00
l . Infof ( ` Device %s client is "%s %s" ` , deviceID , cm . ClientName , cm . ClientVersion )
2014-08-16 13:55:02 -07:00
2014-11-12 16:42:17 -07:00
var changed bool
2014-09-23 07:04:20 -07:00
if name := cm . GetOption ( "name" ) ; name != "" {
2014-09-28 04:00:38 -07:00
l . Infof ( "Device %s name is %q" , deviceID , name )
2014-10-06 00:25:45 -07:00
device , ok := m . cfg . Devices ( ) [ deviceID ]
if ok && device . Name == "" {
2014-09-28 04:00:38 -07:00
device . Name = name
2014-10-06 00:25:45 -07:00
m . cfg . SetDevice ( device )
2014-11-12 16:42:17 -07:00
changed = true
2014-08-14 15:15:26 -07:00
}
}
2014-09-23 07:04:20 -07:00
2014-10-06 00:25:45 -07:00
if m . cfg . Devices ( ) [ deviceID ] . Introducer {
2014-09-28 04:00:38 -07:00
// This device is an introducer. Go through the announced lists of folders
// and devices and add what we are missing.
2014-09-23 07:04:20 -07:00
2014-09-28 04:00:38 -07:00
for _ , folder := range cm . Folders {
// If we don't have this folder yet, skip it. Ideally, we'd
// offer up something in the GUI to create the folder, but for the
// moment we only handle folders that we already have.
if _ , ok := m . folderDevices [ folder . ID ] ; ! ok {
2014-09-23 07:04:20 -07:00
continue
}
2014-09-28 04:00:38 -07:00
nextDevice :
for _ , device := range folder . Devices {
var id protocol . DeviceID
copy ( id [ : ] , device . ID )
2014-09-23 07:04:20 -07:00
2014-10-06 00:25:45 -07:00
if _ , ok := m . cfg . Devices ( ) [ id ] ; ! ok {
2014-09-28 04:00:38 -07:00
// The device is currently unknown. Add it to the config.
2014-09-23 07:04:20 -07:00
2014-09-28 04:00:38 -07:00
l . Infof ( "Adding device %v to config (vouched for by introducer %v)" , id , deviceID )
newDeviceCfg := config . DeviceConfiguration {
2014-10-06 00:25:45 -07:00
DeviceID : id ,
2014-12-07 15:43:30 -07:00
Compression : m . cfg . Devices ( ) [ deviceID ] . Compression ,
2014-10-18 11:40:31 -07:00
Addresses : [ ] string { "dynamic" } ,
2014-09-23 07:04:20 -07:00
}
// The introducers' introducers are also our introducers.
2014-09-28 04:00:38 -07:00
if device . Flags & protocol . FlagIntroducer != 0 {
l . Infof ( "Device %v is now also an introducer" , id )
newDeviceCfg . Introducer = true
2014-09-23 07:04:20 -07:00
}
2014-10-06 00:25:45 -07:00
m . cfg . SetDevice ( newDeviceCfg )
2014-09-23 07:04:20 -07:00
changed = true
}
2014-09-28 04:00:38 -07:00
for _ , er := range m . deviceFolders [ id ] {
if er == folder . ID {
// We already share the folder with this device, so
2014-09-23 07:04:20 -07:00
// nothing to do.
2014-09-28 04:00:38 -07:00
continue nextDevice
2014-09-23 07:04:20 -07:00
}
}
2014-09-28 04:00:38 -07:00
// We don't yet share this folder with this device. Add the device
// to sharing list of the folder.
2014-09-23 07:04:20 -07:00
2014-09-28 04:00:38 -07:00
l . Infof ( "Adding device %v to share %q (vouched for by introducer %v)" , id , folder . ID , deviceID )
2014-09-23 07:04:20 -07:00
2014-09-28 04:00:38 -07:00
m . deviceFolders [ id ] = append ( m . deviceFolders [ id ] , folder . ID )
m . folderDevices [ folder . ID ] = append ( m . folderDevices [ folder . ID ] , id )
2014-09-23 07:04:20 -07:00
2014-10-06 00:25:45 -07:00
folderCfg := m . cfg . Folders ( ) [ folder . ID ]
2014-09-28 04:00:38 -07:00
folderCfg . Devices = append ( folderCfg . Devices , config . FolderDeviceConfiguration {
DeviceID : id ,
2014-09-23 07:04:20 -07:00
} )
2014-10-06 00:25:45 -07:00
m . cfg . SetFolder ( folderCfg )
2014-09-23 07:04:20 -07:00
changed = true
}
}
2014-11-12 16:42:17 -07:00
}
2014-09-23 07:04:20 -07:00
2014-11-12 16:42:17 -07:00
if changed {
m . cfg . Save ( )
2014-09-23 07:04:20 -07:00
}
2014-04-13 06:28:26 -07:00
}
2014-01-20 14:22:27 -07:00
// Close removes the peer from the model and closes the underlying connection if possible.
2014-01-06 03:11:18 -07:00
// Implements the protocol.Model interface.
2014-09-28 04:00:38 -07:00
func ( m * Model ) Close ( device protocol . DeviceID , err error ) {
l . Infof ( "Connection to %s closed: %v" , device , err )
events . Default . Log ( events . DeviceDisconnected , map [ string ] string {
"id" : device . String ( ) ,
2014-07-13 12:07:24 -07:00
"error" : err . Error ( ) ,
} )
2014-02-09 15:13:06 -07:00
2014-07-15 04:04:37 -07:00
m . pmut . Lock ( )
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2014-09-28 04:00:38 -07:00
for _ , folder := range m . deviceFolders [ device ] {
m . folderFiles [ folder ] . Replace ( device , nil )
2014-03-29 10:53:48 -07:00
}
2014-09-28 04:39:39 -07:00
m . fmut . RUnlock ( )
2014-01-20 14:22:27 -07:00
2014-09-28 04:00:38 -07:00
conn , ok := m . rawConn [ device ]
2014-01-01 06:09:17 -07:00
if ok {
2014-09-09 23:48:15 -07:00
if conn , ok := conn . ( * tls . Conn ) ; ok {
// If the underlying connection is a *tls.Conn, Close() does more
// than it says on the tin. Specifically, it sends a TLS alert
// message, which might block forever if the connection is dead
// and we don't have a deadline site.
conn . SetWriteDeadline ( time . Now ( ) . Add ( 250 * time . Millisecond ) )
}
2014-01-01 06:09:17 -07:00
conn . Close ( )
2013-12-30 19:21:57 -07:00
}
2014-09-28 04:00:38 -07:00
delete ( m . protoConn , device )
delete ( m . rawConn , device )
delete ( m . deviceVer , device )
2014-01-17 20:06:44 -07:00
m . pmut . Unlock ( )
2013-12-15 03:43:31 -07:00
}
2014-01-06 03:11:18 -07:00
// Request returns the specified data segment by reading it from local disk.
// Implements the protocol.Model interface.
2014-09-28 04:00:38 -07:00
func ( m * Model ) Request ( deviceID protocol . DeviceID , folder , name string , offset int64 , size int ) ( [ ] byte , error ) {
2015-01-16 04:25:54 -07:00
if ! m . folderSharedWith ( folder , deviceID ) {
l . Warnf ( "Request from %s for file %s in unshared folder %q" , deviceID , name , folder )
return nil , ErrNoSuchFile
}
2014-03-28 06:36:57 -07:00
// Verify that the requested file exists in the local model.
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2015-01-16 04:25:54 -07:00
folderFiles , ok := m . folderFiles [ folder ]
2014-09-28 04:39:39 -07:00
m . fmut . RUnlock ( )
2014-03-29 10:53:48 -07:00
if ! ok {
2014-09-28 04:00:38 -07:00
l . Warnf ( "Request from %s for file %s in nonexistent folder %q" , deviceID , name , folder )
2014-03-29 10:53:48 -07:00
return nil , ErrNoSuchFile
}
2015-01-16 04:25:54 -07:00
lf , ok := folderFiles . Get ( protocol . LocalDeviceID , name )
2015-01-06 14:12:45 -07:00
if ! ok {
return nil , ErrNoSuchFile
}
2014-11-04 16:22:15 -07:00
if lf . IsInvalid ( ) || lf . IsDeleted ( ) {
2014-05-20 11:26:44 -07:00
if debug {
2014-09-28 04:00:38 -07:00
l . Debugf ( "%v REQ(in): %s: %q / %q o=%d s=%d; invalid: %v" , m , deviceID , folder , name , offset , size , lf )
2014-05-20 11:26:44 -07:00
}
2014-05-11 10:54:26 -07:00
return nil , ErrInvalid
2014-01-06 13:31:36 -07:00
}
2014-03-29 10:53:48 -07:00
2014-07-12 14:06:48 -07:00
if offset > lf . Size ( ) {
2014-05-14 20:26:55 -07:00
if debug {
2014-09-28 04:00:38 -07:00
l . Debugf ( "%v REQ(in; nonexistent): %s: %q o=%d s=%d" , m , deviceID , name , offset , size )
2014-05-11 10:54:26 -07:00
}
return nil , ErrNoSuchFile
2014-01-07 14:44:21 -07:00
}
2014-01-06 13:31:36 -07:00
2014-09-28 04:00:38 -07:00
if debug && deviceID != protocol . LocalDeviceID {
l . Debugf ( "%v REQ(in): %s: %q / %q o=%d s=%d" , m , deviceID , folder , name , offset , size )
2013-12-15 03:43:31 -07:00
}
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2014-09-28 04:56:35 -07:00
fn := filepath . Join ( m . folderCfgs [ folder ] . Path , name )
2014-09-28 04:39:39 -07:00
m . fmut . RUnlock ( )
2014-11-08 21:26:52 -07:00
var reader io . ReaderAt
var err error
if lf . IsSymlink ( ) {
target , _ , err := symlinks . Read ( fn )
if err != nil {
return nil , err
}
reader = strings . NewReader ( target )
} else {
2014-12-08 04:54:22 -07:00
reader , err = os . Open ( fn ) // XXX: Inefficient, should cache fd?
2014-11-08 21:26:52 -07:00
if err != nil {
return nil , err
}
2014-12-08 04:54:22 -07:00
defer reader . ( * os . File ) . Close ( )
2013-12-15 03:43:31 -07:00
}
2014-06-18 14:57:22 -07:00
buf := make ( [ ] byte , size )
2014-11-08 21:26:52 -07:00
_ , err = reader . ReadAt ( buf , offset )
2013-12-15 03:43:31 -07:00
if err != nil {
return nil , err
}
return buf , nil
}
2014-09-28 04:00:38 -07:00
// ReplaceLocal replaces the local folder index with the given list of files.
func ( m * Model ) ReplaceLocal ( folder string , fs [ ] protocol . FileInfo ) {
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2014-09-28 04:00:38 -07:00
m . folderFiles [ folder ] . ReplaceWithDelete ( protocol . LocalDeviceID , fs )
2014-09-28 04:39:39 -07:00
m . fmut . RUnlock ( )
2013-12-15 03:43:31 -07:00
}
2015-01-06 14:12:45 -07:00
func ( m * Model ) CurrentFolderFile ( folder string , file string ) ( protocol . FileInfo , bool ) {
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2015-01-06 14:12:45 -07:00
f , ok := m . folderFiles [ folder ] . Get ( protocol . LocalDeviceID , file )
2014-09-28 04:39:39 -07:00
m . fmut . RUnlock ( )
2015-01-06 14:12:45 -07:00
return f , ok
2014-04-01 14:18:32 -07:00
}
2015-01-06 14:12:45 -07:00
func ( m * Model ) CurrentGlobalFile ( folder string , file string ) ( protocol . FileInfo , bool ) {
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2015-01-06 14:12:45 -07:00
f , ok := m . folderFiles [ folder ] . GetGlobal ( file )
2014-09-28 04:39:39 -07:00
m . fmut . RUnlock ( )
2015-01-06 14:12:45 -07:00
return f , ok
2014-04-01 14:18:32 -07:00
}
2014-03-29 10:53:48 -07:00
type cFiler struct {
m * Model
r string
2014-01-06 03:11:18 -07:00
}
2014-03-16 00:14:55 -07:00
// Implements scanner.CurrentFiler
2015-01-06 14:12:45 -07:00
func ( cf cFiler ) CurrentFile ( file string ) ( protocol . FileInfo , bool ) {
2014-09-28 04:00:38 -07:00
return cf . m . CurrentFolderFile ( cf . r , file )
2014-03-16 00:14:55 -07:00
}
2014-09-28 04:00:38 -07:00
// ConnectedTo returns true if we are connected to the named device.
func ( m * Model ) ConnectedTo ( deviceID protocol . DeviceID ) bool {
2014-01-17 20:06:44 -07:00
m . pmut . RLock ( )
2014-09-28 04:00:38 -07:00
_ , ok := m . protoConn [ deviceID ]
2014-09-20 10:14:45 -07:00
m . pmut . RUnlock ( )
2014-09-10 02:29:01 -07:00
if ok {
2014-09-28 04:00:38 -07:00
m . deviceWasSeen ( deviceID )
2014-09-10 02:29:01 -07:00
}
2014-01-06 03:11:18 -07:00
return ok
}
2014-11-08 14:12:18 -07:00
func ( m * Model ) GetIgnores ( folder string ) ( [ ] string , [ ] string , error ) {
2014-09-14 15:03:53 -07:00
var lines [ ] string
2014-11-03 14:02:55 -07:00
m . fmut . RLock ( )
2014-09-28 04:00:38 -07:00
cfg , ok := m . folderCfgs [ folder ]
2014-11-03 14:02:55 -07:00
m . fmut . RUnlock ( )
2014-09-14 15:03:53 -07:00
if ! ok {
2014-11-08 14:12:18 -07:00
return lines , nil , fmt . Errorf ( "Folder %s does not exist" , folder )
2014-09-14 15:03:53 -07:00
}
2014-09-28 04:56:35 -07:00
fd , err := os . Open ( filepath . Join ( cfg . Path , ".stignore" ) )
2014-09-14 15:03:53 -07:00
if err != nil {
if os . IsNotExist ( err ) {
2014-11-08 14:12:18 -07:00
return lines , nil , nil
2014-09-14 15:03:53 -07:00
}
l . Warnln ( "Loading .stignore:" , err )
2014-11-08 14:12:18 -07:00
return lines , nil , err
2014-09-14 15:03:53 -07:00
}
defer fd . Close ( )
scanner := bufio . NewScanner ( fd )
for scanner . Scan ( ) {
lines = append ( lines , strings . TrimSpace ( scanner . Text ( ) ) )
}
2014-11-29 14:29:49 -07:00
m . fmut . RLock ( )
2014-11-08 14:12:18 -07:00
var patterns [ ] string
if matcher := m . folderIgnores [ folder ] ; matcher != nil {
patterns = matcher . Patterns ( )
}
2014-11-29 14:29:49 -07:00
m . fmut . RUnlock ( )
2014-11-08 14:12:18 -07:00
return lines , patterns , nil
2014-09-14 15:03:53 -07:00
}
2014-09-28 04:00:38 -07:00
func ( m * Model ) SetIgnores ( folder string , content [ ] string ) error {
cfg , ok := m . folderCfgs [ folder ]
2014-09-14 15:03:53 -07:00
if ! ok {
2014-09-28 04:00:38 -07:00
return fmt . Errorf ( "Folder %s does not exist" , folder )
2014-09-14 15:03:53 -07:00
}
2014-09-28 04:56:35 -07:00
fd , err := ioutil . TempFile ( cfg . Path , ".syncthing.stignore-" + folder )
2014-09-14 15:03:53 -07:00
if err != nil {
l . Warnln ( "Saving .stignore:" , err )
return err
}
2014-09-22 06:54:36 -07:00
defer os . Remove ( fd . Name ( ) )
2014-09-14 15:03:53 -07:00
for _ , line := range content {
2014-09-22 07:53:57 -07:00
_ , err = fmt . Fprintln ( fd , line )
if err != nil {
l . Warnln ( "Saving .stignore:" , err )
return err
}
2014-09-14 15:03:53 -07:00
}
err = fd . Close ( )
if err != nil {
l . Warnln ( "Saving .stignore:" , err )
return err
}
2014-09-28 04:56:35 -07:00
file := filepath . Join ( cfg . Path , ".stignore" )
2014-09-22 06:54:36 -07:00
err = osutil . Rename ( fd . Name ( ) , file )
2014-09-14 15:03:53 -07:00
if err != nil {
l . Warnln ( "Saving .stignore:" , err )
return err
}
2014-09-28 04:00:38 -07:00
return m . ScanFolder ( folder )
2014-09-14 15:03:53 -07:00
}
2014-01-06 03:11:18 -07:00
// AddConnection adds a new peer connection to the model. An initial index will
// be sent to the connected peer, thereafter index updates whenever the local
2014-09-28 04:00:38 -07:00
// folder changes.
2014-03-28 06:36:57 -07:00
func ( m * Model ) AddConnection ( rawConn io . Closer , protoConn protocol . Connection ) {
2014-09-28 04:00:38 -07:00
deviceID := protoConn . ID ( )
2014-07-15 04:04:37 -07:00
2014-01-17 20:06:44 -07:00
m . pmut . Lock ( )
2014-09-28 04:00:38 -07:00
if _ , ok := m . protoConn [ deviceID ] ; ok {
panic ( "add existing device" )
2014-03-23 00:45:05 -07:00
}
2014-09-28 04:00:38 -07:00
m . protoConn [ deviceID ] = protoConn
if _ , ok := m . rawConn [ deviceID ] ; ok {
panic ( "add existing device" )
2014-03-23 00:45:05 -07:00
}
2014-09-28 04:00:38 -07:00
m . rawConn [ deviceID ] = rawConn
2014-01-06 03:11:18 -07:00
2014-09-28 04:00:38 -07:00
cm := m . clusterConfig ( deviceID )
2014-04-13 06:28:26 -07:00
protoConn . ClusterConfig ( cm )
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2014-09-28 04:00:38 -07:00
for _ , folder := range m . deviceFolders [ deviceID ] {
fs := m . folderFiles [ folder ]
go sendIndexes ( protoConn , folder , fs , m . folderIgnores [ folder ] )
2014-05-04 08:18:58 -07:00
}
2014-09-28 04:39:39 -07:00
m . fmut . RUnlock ( )
2014-07-15 04:04:37 -07:00
m . pmut . Unlock ( )
2014-09-20 10:14:45 -07:00
2014-09-28 04:00:38 -07:00
m . deviceWasSeen ( deviceID )
2014-09-20 10:14:45 -07:00
}
2014-09-28 04:00:38 -07:00
func ( m * Model ) deviceStatRef ( deviceID protocol . DeviceID ) * stats . DeviceStatisticsReference {
2014-09-28 04:39:39 -07:00
m . fmut . Lock ( )
defer m . fmut . Unlock ( )
2014-09-20 10:14:45 -07:00
2014-09-28 04:00:38 -07:00
if sr , ok := m . deviceStatRefs [ deviceID ] ; ok {
2014-09-20 10:14:45 -07:00
return sr
}
2014-12-08 08:36:15 -07:00
sr := stats . NewDeviceStatisticsReference ( m . db , deviceID )
m . deviceStatRefs [ deviceID ] = sr
return sr
2014-09-20 10:14:45 -07:00
}
2014-09-28 04:00:38 -07:00
func ( m * Model ) deviceWasSeen ( deviceID protocol . DeviceID ) {
m . deviceStatRef ( deviceID ) . WasSeen ( )
2014-07-15 04:04:37 -07:00
}
2014-12-07 13:21:12 -07:00
func ( m * Model ) folderStatRef ( folder string ) * stats . FolderStatisticsReference {
m . fmut . Lock ( )
defer m . fmut . Unlock ( )
2014-12-16 15:33:28 -07:00
sr , ok := m . folderStatRefs [ folder ]
if ! ok {
2014-12-07 13:21:12 -07:00
sr = stats . NewFolderStatisticsReference ( m . db , folder )
m . folderStatRefs [ folder ] = sr
}
2014-12-16 15:33:28 -07:00
return sr
2014-12-07 13:21:12 -07:00
}
func ( m * Model ) receivedFile ( folder , filename string ) {
m . folderStatRef ( folder ) . ReceivedFile ( filename )
}
2015-01-12 06:52:24 -07:00
func sendIndexes ( conn protocol . Connection , folder string , fs * db . FileSet , ignores * ignore . Matcher ) {
2014-09-28 04:00:38 -07:00
deviceID := conn . ID ( )
2014-07-15 04:04:37 -07:00
name := conn . Name ( )
2014-07-30 11:08:04 -07:00
var err error
2014-07-15 04:04:37 -07:00
if debug {
2014-09-28 04:00:38 -07:00
l . Debugf ( "sendIndexes for %s-%s/%q starting" , deviceID , name , folder )
2014-07-15 04:04:37 -07:00
}
2014-05-04 08:18:58 -07:00
2014-09-28 04:00:38 -07:00
minLocalVer , err := sendIndexTo ( true , 0 , conn , folder , fs , ignores )
2014-07-30 11:08:04 -07:00
2014-07-15 04:04:37 -07:00
for err == nil {
2014-07-30 11:08:04 -07:00
time . Sleep ( 5 * time . Second )
2014-09-28 04:00:38 -07:00
if fs . LocalVersion ( protocol . LocalDeviceID ) <= minLocalVer {
2014-07-30 11:08:04 -07:00
continue
2014-07-15 04:04:37 -07:00
}
2014-09-28 04:00:38 -07:00
minLocalVer , err = sendIndexTo ( false , minLocalVer , conn , folder , fs , ignores )
2014-07-30 11:08:04 -07:00
}
2014-09-27 05:44:15 -07:00
if debug {
2014-09-28 04:00:38 -07:00
l . Debugf ( "sendIndexes for %s-%s/%q exiting: %v" , deviceID , name , folder , err )
2014-09-27 05:44:15 -07:00
}
2014-07-30 11:08:04 -07:00
}
2014-07-15 04:04:37 -07:00
2015-01-12 06:52:24 -07:00
func sendIndexTo ( initial bool , minLocalVer uint64 , conn protocol . Connection , folder string , fs * db . FileSet , ignores * ignore . Matcher ) ( uint64 , error ) {
2014-09-28 04:00:38 -07:00
deviceID := conn . ID ( )
2014-07-30 11:08:04 -07:00
name := conn . Name ( )
batch := make ( [ ] protocol . FileInfo , 0 , indexBatchSize )
2014-08-11 11:54:59 -07:00
currentBatchSize := 0
2014-07-30 11:08:04 -07:00
maxLocalVer := uint64 ( 0 )
var err error
2014-07-15 04:04:37 -07:00
2015-01-12 06:50:30 -07:00
fs . WithHave ( protocol . LocalDeviceID , func ( fi db . FileIntf ) bool {
2014-08-12 04:53:31 -07:00
f := fi . ( protocol . FileInfo )
2014-07-30 11:08:04 -07:00
if f . LocalVersion <= minLocalVer {
return true
}
2014-07-15 04:04:37 -07:00
2014-07-30 11:08:04 -07:00
if f . LocalVersion > maxLocalVer {
maxLocalVer = f . LocalVersion
}
2014-07-15 04:04:37 -07:00
2014-11-08 21:26:52 -07:00
if ( ignores != nil && ignores . Match ( f . Name ) ) || symlinkInvalid ( f . IsSymlink ( ) ) {
2014-10-16 03:23:33 -07:00
if debug {
2014-11-08 21:26:52 -07:00
l . Debugln ( "not sending update for ignored/unsupported symlink" , f )
2014-10-16 03:23:33 -07:00
}
2014-09-04 13:29:53 -07:00
return true
}
2014-08-11 11:54:59 -07:00
if len ( batch ) == indexBatchSize || currentBatchSize > indexTargetSize {
2014-07-30 11:08:04 -07:00
if initial {
2014-09-28 04:00:38 -07:00
if err = conn . Index ( folder , batch ) ; err != nil {
2014-07-30 11:08:04 -07:00
return false
}
if debug {
2014-09-28 04:00:38 -07:00
l . Debugf ( "sendIndexes for %s-%s/%q: %d files (<%d bytes) (initial index)" , deviceID , name , folder , len ( batch ) , currentBatchSize )
2014-07-30 11:08:04 -07:00
}
initial = false
} else {
2014-09-28 04:00:38 -07:00
if err = conn . IndexUpdate ( folder , batch ) ; err != nil {
2014-07-30 11:08:04 -07:00
return false
}
if debug {
2014-09-28 04:00:38 -07:00
l . Debugf ( "sendIndexes for %s-%s/%q: %d files (<%d bytes) (batched update)" , deviceID , name , folder , len ( batch ) , currentBatchSize )
2014-07-30 11:08:04 -07:00
}
2014-07-03 03:30:10 -07:00
}
2014-01-06 03:11:18 -07:00
2014-07-30 11:08:04 -07:00
batch = make ( [ ] protocol . FileInfo , 0 , indexBatchSize )
2014-08-11 11:54:59 -07:00
currentBatchSize = 0
2014-07-15 04:04:37 -07:00
}
2014-07-30 11:08:04 -07:00
batch = append ( batch , f )
2014-08-11 11:54:59 -07:00
currentBatchSize += indexPerFileSize + len ( f . Blocks ) * IndexPerBlockSize
2014-07-30 11:08:04 -07:00
return true
} )
if initial && err == nil {
2014-09-28 04:00:38 -07:00
err = conn . Index ( folder , batch )
2014-07-30 11:08:04 -07:00
if debug && err == nil {
2014-09-28 04:00:38 -07:00
l . Debugf ( "sendIndexes for %s-%s/%q: %d files (small initial index)" , deviceID , name , folder , len ( batch ) )
2014-07-30 11:08:04 -07:00
}
} else if len ( batch ) > 0 && err == nil {
2014-09-28 04:00:38 -07:00
err = conn . IndexUpdate ( folder , batch )
2014-07-30 11:08:04 -07:00
if debug && err == nil {
2014-09-28 04:00:38 -07:00
l . Debugf ( "sendIndexes for %s-%s/%q: %d files (last batch)" , deviceID , name , folder , len ( batch ) )
2014-07-30 11:08:04 -07:00
}
2014-07-15 04:04:37 -07:00
}
2014-07-30 11:08:04 -07:00
return maxLocalVer , err
2014-01-06 03:11:18 -07:00
}
2014-09-28 04:00:38 -07:00
func ( m * Model ) updateLocal ( folder string , f protocol . FileInfo ) {
2014-07-15 04:04:37 -07:00
f . LocalVersion = 0
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2014-09-28 04:00:38 -07:00
m . folderFiles [ folder ] . Update ( protocol . LocalDeviceID , [ ] protocol . FileInfo { f } )
2014-09-28 04:39:39 -07:00
m . fmut . RUnlock ( )
2014-07-17 04:38:36 -07:00
events . Default . Log ( events . LocalIndexUpdated , map [ string ] interface { } {
2014-09-28 04:05:25 -07:00
"folder" : folder ,
2014-07-17 04:38:36 -07:00
"name" : f . Name ,
"modified" : time . Unix ( f . Modified , 0 ) ,
"flags" : fmt . Sprintf ( "0%o" , f . Flags ) ,
"size" : f . Size ( ) ,
} )
2014-03-28 06:36:57 -07:00
}
2014-09-28 04:00:38 -07:00
func ( m * Model ) requestGlobal ( deviceID protocol . DeviceID , folder , name string , offset int64 , size int , hash [ ] byte ) ( [ ] byte , error ) {
2014-01-17 20:06:44 -07:00
m . pmut . RLock ( )
2014-09-28 04:00:38 -07:00
nc , ok := m . protoConn [ deviceID ]
2014-01-17 20:06:44 -07:00
m . pmut . RUnlock ( )
2014-01-06 03:11:18 -07:00
if ! ok {
2014-09-28 04:00:38 -07:00
return nil , fmt . Errorf ( "requestGlobal: no such device: %s" , deviceID )
2014-01-06 03:11:18 -07:00
}
2014-05-14 20:26:55 -07:00
if debug {
2014-09-28 04:00:38 -07:00
l . Debugf ( "%v REQ(out): %s: %q / %q o=%d s=%d h=%x" , m , deviceID , folder , name , offset , size , hash )
2014-01-06 03:11:18 -07:00
}
2014-09-28 04:00:38 -07:00
return nc . Request ( folder , name , offset , size )
2014-01-06 03:11:18 -07:00
}
2014-09-28 04:00:38 -07:00
func ( m * Model ) AddFolder ( cfg config . FolderConfiguration ) {
2014-03-29 10:53:48 -07:00
if m . started {
2014-09-28 04:00:38 -07:00
panic ( "cannot add folder to started model" )
2014-03-29 10:53:48 -07:00
}
2014-05-23 05:31:16 -07:00
if len ( cfg . ID ) == 0 {
2014-09-28 04:00:38 -07:00
panic ( "cannot add empty folder id" )
2014-03-29 10:53:48 -07:00
}
2014-09-28 04:39:39 -07:00
m . fmut . Lock ( )
2014-09-28 04:00:38 -07:00
m . folderCfgs [ cfg . ID ] = cfg
2015-01-12 06:52:24 -07:00
m . folderFiles [ cfg . ID ] = db . NewFileSet ( cfg . ID , m . db )
2013-12-15 03:43:31 -07:00
2014-09-28 04:00:38 -07:00
m . folderDevices [ cfg . ID ] = make ( [ ] protocol . DeviceID , len ( cfg . Devices ) )
for i , device := range cfg . Devices {
m . folderDevices [ cfg . ID ] [ i ] = device . DeviceID
m . deviceFolders [ device . DeviceID ] = append ( m . deviceFolders [ device . DeviceID ] , cfg . ID )
2014-03-29 10:53:48 -07:00
}
2014-01-23 14:20:15 -07:00
2014-12-23 02:05:08 -07:00
ignores := ignore . New ( m . cfg . Options ( ) . CacheIgnoredFiles )
_ = ignores . Load ( filepath . Join ( cfg . Path , ".stignore" ) ) // Ignore error, there might not be an .stignore
2014-11-21 19:19:16 -07:00
m . folderIgnores [ cfg . ID ] = ignores
2014-09-28 04:00:38 -07:00
m . addedFolder = true
2014-09-28 04:39:39 -07:00
m . fmut . Unlock ( )
2014-03-29 10:53:48 -07:00
}
2014-01-23 14:20:15 -07:00
2014-09-28 04:00:38 -07:00
func ( m * Model ) ScanFolders ( ) {
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2014-09-28 04:00:38 -07:00
var folders = make ( [ ] string , 0 , len ( m . folderCfgs ) )
for folder := range m . folderCfgs {
folders = append ( folders , folder )
2014-03-29 10:53:48 -07:00
}
2014-09-28 04:39:39 -07:00
m . fmut . RUnlock ( )
2014-04-14 00:58:17 -07:00
2014-05-13 16:42:12 -07:00
var wg sync . WaitGroup
2014-09-28 04:00:38 -07:00
wg . Add ( len ( folders ) )
for _ , folder := range folders {
folder := folder
2014-05-13 16:42:12 -07:00
go func ( ) {
2014-09-28 04:00:38 -07:00
err := m . ScanFolder ( folder )
2014-05-27 21:55:30 -07:00
if err != nil {
2014-10-06 00:25:45 -07:00
m . cfg . InvalidateFolder ( folder , err . Error ( ) )
2014-05-27 21:55:30 -07:00
}
2014-05-13 16:42:12 -07:00
wg . Done ( )
} ( )
2014-04-14 00:58:17 -07:00
}
2014-05-13 16:42:12 -07:00
wg . Wait ( )
2014-03-29 10:53:48 -07:00
}
2013-12-15 03:43:31 -07:00
2014-09-28 04:00:38 -07:00
func ( m * Model ) ScanFolder ( folder string ) error {
return m . ScanFolderSub ( folder , "" )
2014-08-11 11:20:01 -07:00
}
2014-09-28 04:00:38 -07:00
func ( m * Model ) ScanFolderSub ( folder , sub string ) error {
if p := filepath . Clean ( filepath . Join ( folder , sub ) ) ; ! strings . HasPrefix ( p , folder ) {
2014-08-11 11:20:01 -07:00
return errors . New ( "invalid subpath" )
}
2014-11-29 14:29:49 -07:00
m . fmut . Lock ( )
2014-09-28 04:00:38 -07:00
fs , ok := m . folderFiles [ folder ]
2014-12-23 05:41:02 -07:00
folderCfg := m . folderCfgs [ folder ]
2014-12-23 02:05:08 -07:00
ignores := m . folderIgnores [ folder ]
2014-12-23 05:41:02 -07:00
m . fmut . Unlock ( )
if ! ok {
return errors . New ( "no such folder" )
}
_ = ignores . Load ( filepath . Join ( folderCfg . Path , ".stignore" ) ) // Ignore error, there might not be an .stignore
2014-09-04 13:29:53 -07:00
2014-03-29 10:53:48 -07:00
w := & scanner . Walker {
2014-12-23 05:41:02 -07:00
Dir : folderCfg . Path ,
2014-08-11 11:20:01 -07:00
Sub : sub ,
2014-10-14 13:30:00 -07:00
Matcher : ignores ,
2014-09-29 15:01:17 -07:00
BlockSize : protocol . BlockSize ,
2014-04-08 04:45:18 -07:00
TempNamer : defTempNamer ,
2014-12-09 16:58:58 -07:00
TempLifetime : time . Duration ( m . cfg . Options ( ) . KeepTemporariesH ) * time . Hour ,
2014-09-28 04:00:38 -07:00
CurrentFiler : cFiler { m , folder } ,
2014-12-23 05:41:02 -07:00
IgnorePerms : folderCfg . IgnorePerms ,
2014-12-24 16:12:12 -07:00
Hashers : folderCfg . Hashers ,
2014-08-11 11:20:01 -07:00
}
2014-07-15 05:27:46 -07:00
2014-09-28 04:00:38 -07:00
m . setState ( folder , FolderScanning )
2014-08-26 01:11:25 -07:00
fchan , err := w . Walk ( )
2014-07-15 05:27:46 -07:00
2014-05-04 09:20:25 -07:00
if err != nil {
return err
}
2014-07-15 08:54:00 -07:00
batchSize := 100
2014-11-13 15:30:43 -07:00
batch := make ( [ ] protocol . FileInfo , 0 , batchSize )
2014-07-15 05:27:46 -07:00
for f := range fchan {
2014-07-29 02:53:45 -07:00
events . Default . Log ( events . LocalIndexUpdated , map [ string ] interface { } {
2014-09-28 04:05:25 -07:00
"folder" : folder ,
2014-07-29 02:53:45 -07:00
"name" : f . Name ,
"modified" : time . Unix ( f . Modified , 0 ) ,
"flags" : fmt . Sprintf ( "0%o" , f . Flags ) ,
"size" : f . Size ( ) ,
} )
2014-07-15 08:54:00 -07:00
if len ( batch ) == batchSize {
2014-09-28 04:00:38 -07:00
fs . Update ( protocol . LocalDeviceID , batch )
2014-07-15 05:27:46 -07:00
batch = batch [ : 0 ]
}
batch = append ( batch , f )
}
if len ( batch ) > 0 {
2014-09-28 04:00:38 -07:00
fs . Update ( protocol . LocalDeviceID , batch )
2014-07-15 05:27:46 -07:00
}
batch = batch [ : 0 ]
2014-08-11 11:20:01 -07:00
// TODO: We should limit the Have scanning to start at sub
seenPrefix := false
2015-01-12 06:50:30 -07:00
fs . WithHaveTruncated ( protocol . LocalDeviceID , func ( fi db . FileIntf ) bool {
f := fi . ( db . FileInfoTruncated )
2014-08-11 11:20:01 -07:00
if ! strings . HasPrefix ( f . Name , sub ) {
2014-09-04 13:29:53 -07:00
// Return true so that we keep iterating, until we get to the part
// of the tree we are interested in. Then return false so we stop
// iterating when we've passed the end of the subtree.
2014-08-11 11:20:01 -07:00
return ! seenPrefix
}
2014-09-04 13:29:53 -07:00
2014-08-11 11:20:01 -07:00
seenPrefix = true
2014-11-04 16:22:15 -07:00
if ! f . IsDeleted ( ) {
2014-09-04 13:29:53 -07:00
if f . IsInvalid ( ) {
return true
}
2014-07-15 08:54:00 -07:00
if len ( batch ) == batchSize {
2014-09-28 04:00:38 -07:00
fs . Update ( protocol . LocalDeviceID , batch )
2014-07-15 05:27:46 -07:00
batch = batch [ : 0 ]
}
2014-09-04 13:29:53 -07:00
2014-11-08 21:26:52 -07:00
if ( ignores != nil && ignores . Match ( f . Name ) ) || symlinkInvalid ( f . IsSymlink ( ) ) {
// File has been ignored or an unsupported symlink. Set invalid bit.
2014-11-18 15:57:21 -07:00
if debug {
l . Debugln ( "setting invalid bit on ignored" , f )
}
2014-09-04 13:29:53 -07:00
nf := protocol . FileInfo {
Name : f . Name ,
Flags : f . Flags | protocol . FlagInvalid ,
Modified : f . Modified ,
Version : f . Version , // The file is still the same, so don't bump version
}
events . Default . Log ( events . LocalIndexUpdated , map [ string ] interface { } {
2014-09-28 04:05:25 -07:00
"folder" : folder ,
2014-09-04 13:29:53 -07:00
"name" : f . Name ,
"modified" : time . Unix ( f . Modified , 0 ) ,
"flags" : fmt . Sprintf ( "0%o" , f . Flags ) ,
"size" : f . Size ( ) ,
} )
batch = append ( batch , nf )
2014-12-23 05:41:02 -07:00
} else if _ , err := os . Lstat ( filepath . Join ( folderCfg . Path , f . Name ) ) ; err != nil && os . IsNotExist ( err ) {
2014-07-15 05:27:46 -07:00
// File has been deleted
2014-08-12 04:53:31 -07:00
nf := protocol . FileInfo {
Name : f . Name ,
Flags : f . Flags | protocol . FlagDeleted ,
Modified : f . Modified ,
Version : lamport . Default . Tick ( f . Version ) ,
}
2014-07-29 02:53:45 -07:00
events . Default . Log ( events . LocalIndexUpdated , map [ string ] interface { } {
2014-09-28 04:05:25 -07:00
"folder" : folder ,
2014-07-29 02:53:45 -07:00
"name" : f . Name ,
"modified" : time . Unix ( f . Modified , 0 ) ,
"flags" : fmt . Sprintf ( "0%o" , f . Flags ) ,
"size" : f . Size ( ) ,
} )
2014-08-12 04:53:31 -07:00
batch = append ( batch , nf )
2014-07-15 05:27:46 -07:00
}
}
return true
} )
if len ( batch ) > 0 {
2014-09-28 04:00:38 -07:00
fs . Update ( protocol . LocalDeviceID , batch )
2014-07-15 05:27:46 -07:00
}
2014-09-28 04:00:38 -07:00
m . setState ( folder , FolderIdle )
2014-05-04 09:20:25 -07:00
return nil
2014-03-29 10:53:48 -07:00
}
2014-09-28 04:00:38 -07:00
// clusterConfig returns a ClusterConfigMessage that is correct for the given peer device
func ( m * Model ) clusterConfig ( device protocol . DeviceID ) protocol . ClusterConfigMessage {
2014-04-13 06:28:26 -07:00
cm := protocol . ClusterConfigMessage {
2014-05-14 20:26:55 -07:00
ClientName : m . clientName ,
ClientVersion : m . clientVersion ,
2014-08-14 15:15:26 -07:00
Options : [ ] protocol . Option {
{
Key : "name" ,
2014-09-28 04:00:38 -07:00
Value : m . deviceName ,
2014-08-14 15:15:26 -07:00
} ,
} ,
2014-04-13 06:28:26 -07:00
}
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2014-09-28 04:00:38 -07:00
for _ , folder := range m . deviceFolders [ device ] {
cr := protocol . Folder {
ID : folder ,
2014-01-09 05:58:35 -07:00
}
2014-09-28 04:00:38 -07:00
for _ , device := range m . folderDevices [ folder ] {
// DeviceID is a value type, but with an underlying array. Copy it
// so we don't grab aliases to the same array later on in device[:]
device := device
2014-04-13 06:28:26 -07:00
// TODO: Set read only bit when relevant
2014-09-28 04:00:38 -07:00
cn := protocol . Device {
ID : device [ : ] ,
2014-04-13 06:28:26 -07:00
Flags : protocol . FlagShareTrusted ,
2014-09-23 07:04:20 -07:00
}
2014-10-06 00:25:45 -07:00
if deviceCfg := m . cfg . Devices ( ) [ device ] ; deviceCfg . Introducer {
2014-09-23 07:04:20 -07:00
cn . Flags |= protocol . FlagIntroducer
}
2014-09-28 04:00:38 -07:00
cr . Devices = append ( cr . Devices , cn )
2014-01-09 05:58:35 -07:00
}
2014-09-28 04:00:38 -07:00
cm . Folders = append ( cm . Folders , cr )
2013-12-29 18:33:57 -07:00
}
2014-09-28 04:39:39 -07:00
m . fmut . RUnlock ( )
2014-04-13 06:28:26 -07:00
return cm
2013-12-29 18:33:57 -07:00
}
2014-04-14 00:58:17 -07:00
2014-09-28 04:00:38 -07:00
func ( m * Model ) setState ( folder string , state folderState ) {
2014-05-20 09:41:01 -07:00
m . smut . Lock ( )
2014-09-28 04:00:38 -07:00
oldState := m . folderState [ folder ]
changed , ok := m . folderStateChanged [ folder ]
2014-07-17 04:38:36 -07:00
if state != oldState {
2014-09-28 04:00:38 -07:00
m . folderState [ folder ] = state
m . folderStateChanged [ folder ] = time . Now ( )
2014-07-17 04:38:36 -07:00
eventData := map [ string ] interface { } {
2014-09-28 04:00:38 -07:00
"folder" : folder ,
2014-09-28 04:05:25 -07:00
"to" : state . String ( ) ,
2014-07-17 04:38:36 -07:00
}
if ok {
eventData [ "duration" ] = time . Since ( changed ) . Seconds ( )
eventData [ "from" ] = oldState . String ( )
}
events . Default . Log ( events . StateChanged , eventData )
}
2014-05-20 09:41:01 -07:00
m . smut . Unlock ( )
2014-04-14 00:58:17 -07:00
}
2014-09-28 04:00:38 -07:00
func ( m * Model ) State ( folder string ) ( string , time . Time ) {
2014-05-20 09:41:01 -07:00
m . smut . RLock ( )
2014-09-28 04:00:38 -07:00
state := m . folderState [ folder ]
changed := m . folderStateChanged [ folder ]
2014-05-20 09:41:01 -07:00
m . smut . RUnlock ( )
2014-07-17 04:38:36 -07:00
return state . String ( ) , changed
2014-04-14 00:58:17 -07:00
}
2014-06-16 01:47:02 -07:00
2014-09-28 04:00:38 -07:00
func ( m * Model ) Override ( folder string ) {
2014-09-28 04:39:39 -07:00
m . fmut . RLock ( )
2014-09-28 04:00:38 -07:00
fs := m . folderFiles [ folder ]
2014-09-28 04:39:39 -07:00
m . fmut . RUnlock ( )
2014-06-23 02:52:13 -07:00
2014-09-28 04:00:38 -07:00
m . setState ( folder , FolderScanning )
2014-07-15 08:54:00 -07:00
batch := make ( [ ] protocol . FileInfo , 0 , indexBatchSize )
2015-01-12 06:50:30 -07:00
fs . WithNeed ( protocol . LocalDeviceID , func ( fi db . FileIntf ) bool {
2014-08-12 04:53:31 -07:00
need := fi . ( protocol . FileInfo )
2014-07-15 08:54:00 -07:00
if len ( batch ) == indexBatchSize {
2014-09-28 04:00:38 -07:00
fs . Update ( protocol . LocalDeviceID , batch )
2014-07-15 08:54:00 -07:00
batch = batch [ : 0 ]
}
2015-01-06 14:12:45 -07:00
have , ok := fs . Get ( protocol . LocalDeviceID , need . Name )
if ! ok || have . Name != need . Name {
2014-06-16 01:47:02 -07:00
// We are missing the file
2014-07-15 08:54:00 -07:00
need . Flags |= protocol . FlagDeleted
need . Blocks = nil
2014-06-16 01:47:02 -07:00
} else {
// We have the file, replace with our version
2014-07-15 08:54:00 -07:00
need = have
2014-06-16 01:47:02 -07:00
}
2014-07-15 08:54:00 -07:00
need . Version = lamport . Default . Tick ( need . Version )
need . LocalVersion = 0
batch = append ( batch , need )
return true
} )
if len ( batch ) > 0 {
2014-09-28 04:00:38 -07:00
fs . Update ( protocol . LocalDeviceID , batch )
2014-06-16 01:47:02 -07:00
}
2014-09-28 04:00:38 -07:00
m . setState ( folder , FolderIdle )
2014-06-16 01:47:02 -07:00
}
2014-06-19 15:27:54 -07:00
2014-09-28 04:00:38 -07:00
// CurrentLocalVersion returns the change version for the given folder.
// This is guaranteed to increment if the contents of the local folder has
2014-09-27 05:44:15 -07:00
// changed.
2014-09-28 04:00:38 -07:00
func ( m * Model ) CurrentLocalVersion ( folder string ) uint64 {
2014-11-03 14:02:55 -07:00
m . fmut . RLock ( )
2014-09-28 04:00:38 -07:00
fs , ok := m . folderFiles [ folder ]
2014-11-03 14:02:55 -07:00
m . fmut . RUnlock ( )
2014-09-27 05:44:15 -07:00
if ! ok {
2014-10-12 01:36:04 -07:00
// The folder might not exist, since this can be called with a user
// specified folder name from the REST interface.
return 0
2014-09-27 05:44:15 -07:00
}
2014-09-28 04:00:38 -07:00
return fs . LocalVersion ( protocol . LocalDeviceID )
2014-09-27 05:44:15 -07:00
}
2014-09-28 04:00:38 -07:00
// RemoteLocalVersion returns the change version for the given folder, as
2014-09-27 05:44:15 -07:00
// sent by remote peers. This is guaranteed to increment if the contents of
2014-09-28 04:00:38 -07:00
// the remote or global folder has changed.
func ( m * Model ) RemoteLocalVersion ( folder string ) uint64 {
2014-11-03 14:02:55 -07:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-07-15 08:54:00 -07:00
2014-09-28 04:00:38 -07:00
fs , ok := m . folderFiles [ folder ]
2014-07-15 08:54:00 -07:00
if ! ok {
2014-10-24 05:54:36 -07:00
// The folder might not exist, since this can be called with a user
// specified folder name from the REST interface.
return 0
2014-07-15 08:54:00 -07:00
}
2014-09-27 05:44:15 -07:00
var ver uint64
2014-09-28 04:00:38 -07:00
for _ , n := range m . folderDevices [ folder ] {
2014-07-15 08:54:00 -07:00
ver += fs . LocalVersion ( n )
2014-06-19 15:27:54 -07:00
}
return ver
}
2014-09-27 05:44:15 -07:00
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
func ( m * Model ) availability ( folder , file string ) [ ] protocol . DeviceID {
2014-10-31 16:41:18 -07:00
// Acquire this lock first, as the value returned from foldersFiles can
2014-12-28 16:11:32 -07:00
// get heavily modified on Close()
2014-10-31 16:41:18 -07:00
m . pmut . RLock ( )
defer m . pmut . RUnlock ( )
2014-11-03 14:02:55 -07:00
m . fmut . RLock ( )
2014-09-28 04:00:38 -07:00
fs , ok := m . folderFiles [ folder ]
2014-11-03 14:02:55 -07:00
m . fmut . RUnlock ( )
2014-09-27 05:44:15 -07:00
if ! ok {
return nil
}
2014-10-31 16:41:18 -07:00
availableDevices := [ ] protocol . DeviceID { }
for _ , device := range fs . Availability ( file ) {
_ , ok := m . protoConn [ device ]
if ok {
availableDevices = append ( availableDevices , device )
}
}
return availableDevices
2014-09-27 05:44:15 -07:00
}
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
// Bump the given files priority in the job queue
2014-12-30 01:35:21 -07:00
func ( m * Model ) BringToFront ( folder , file string ) {
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
m . pmut . RLock ( )
defer m . pmut . RUnlock ( )
runner , ok := m . folderRunners [ folder ]
if ok {
2014-12-30 01:35:21 -07:00
runner . BringToFront ( file )
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
}
}
2014-09-27 05:44:15 -07:00
func ( m * Model ) String ( ) string {
return fmt . Sprintf ( "model@%p" , m )
}
2014-10-13 05:43:01 -07:00
func ( m * Model ) leveldbPanicWorkaround ( ) {
// When an inconsistency is detected in leveldb we panic(). This is
// appropriate because it should never happen, but currently it does for
// some reason. However it only seems to trigger in the asynchronous full-
// database scans that happen due to REST and usage-reporting calls. In
// those places we defer to this workaround to catch the panic instead of
// taking down syncthing.
// This is just a band-aid and should be removed as soon as we have found
// a real root cause.
if pnc := recover ( ) ; pnc != nil {
if err , ok := pnc . ( error ) ; ok && strings . Contains ( err . Error ( ) , "leveldb" ) {
2014-10-16 04:45:42 -07:00
l . Infoln ( "recovered:" , err )
2014-10-13 05:43:01 -07:00
} else {
// Any non-leveldb error is genuine and should continue panicing.
panic ( err )
}
}
}
2014-11-08 21:26:52 -07:00
func symlinkInvalid ( isLink bool ) bool {
if ! symlinks . Supported && isLink {
SymlinkWarning . Do ( func ( ) {
2014-11-30 15:10:32 -07:00
l . Warnln ( "Symlinks are disabled, unsupported or require Administrator priviledges. This might cause your folder to appear out of sync." )
2014-11-08 21:26:52 -07:00
} )
return true
}
return false
}