mirror of
https://github.com/syncthing/syncthing.git
synced 2024-11-16 02:18:44 -07:00
Add scan percentages (fixes #1030)
This commit is contained in:
parent
875de4f637
commit
94c52e3a77
2
Godeps/Godeps.json
generated
2
Godeps/Godeps.json
generated
@ -39,7 +39,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/syncthing/protocol",
|
"ImportPath": "github.com/syncthing/protocol",
|
||||||
"Rev": "388a29bbe21d8772ee4c29f4520aa8040309607d"
|
"Rev": "68c5dcd83d9be8f28ae59e951a87cdcf01c6f5cb"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/syncthing/relaysrv/client",
|
"ImportPath": "github.com/syncthing/relaysrv/client",
|
||||||
|
1
Godeps/_workspace/src/github.com/syncthing/protocol/message.go
generated
vendored
1
Godeps/_workspace/src/github.com/syncthing/protocol/message.go
generated
vendored
@ -20,6 +20,7 @@ type FileInfo struct {
|
|||||||
Modified int64
|
Modified int64
|
||||||
Version Vector
|
Version Vector
|
||||||
LocalVersion int64
|
LocalVersion int64
|
||||||
|
CachedSize int64 // noencode (cache only)
|
||||||
Blocks []BlockInfo // max:1000000
|
Blocks []BlockInfo // max:1000000
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -68,7 +68,7 @@ func main() {
|
|||||||
if *standardBlocks || blockSize < protocol.BlockSize {
|
if *standardBlocks || blockSize < protocol.BlockSize {
|
||||||
blockSize = protocol.BlockSize
|
blockSize = protocol.BlockSize
|
||||||
}
|
}
|
||||||
bs, err := scanner.Blocks(fd, blockSize, fi.Size())
|
bs, err := scanner.Blocks(fd, blockSize, fi.Size(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -195,13 +195,20 @@
|
|||||||
<div class="panel panel-default" ng-repeat="folder in folderList()">
|
<div class="panel panel-default" ng-repeat="folder in folderList()">
|
||||||
<div class="panel-heading" data-toggle="collapse" data-parent="#folders" href="#folder-{{$index}}" style="cursor: pointer">
|
<div class="panel-heading" data-toggle="collapse" data-parent="#folders" href="#folder-{{$index}}" style="cursor: pointer">
|
||||||
<div class="panel-progress" ng-show="folderStatus(folder) == 'syncing'" ng-attr-style="width: {{syncPercentage(folder.id)}}%"></div>
|
<div class="panel-progress" ng-show="folderStatus(folder) == 'syncing'" ng-attr-style="width: {{syncPercentage(folder.id)}}%"></div>
|
||||||
|
<div class="panel-progress" ng-show="folderStatus(folder) == 'scanning' && scanProgress[folder.id] != undefined" ng-attr-style="width: {{scanPercentage(folder.id)}}%"></div>
|
||||||
<h3 class="panel-title">
|
<h3 class="panel-title">
|
||||||
<span class="fa fa-folder hidden-xs"></span>{{folder.id}}
|
<span class="fa fa-folder hidden-xs"></span>{{folder.id}}
|
||||||
<span class="pull-right text-{{folderClass(folder)}}" ng-switch="folderStatus(folder)">
|
<span class="pull-right text-{{folderClass(folder)}}" ng-switch="folderStatus(folder)">
|
||||||
<span ng-switch-when="unknown"><span class="hidden-xs" translate>Unknown</span><span class="visible-xs">◼</span></span>
|
<span ng-switch-when="unknown"><span class="hidden-xs" translate>Unknown</span><span class="visible-xs">◼</span></span>
|
||||||
<span ng-switch-when="unshared"><span class="hidden-xs" translate>Unshared</span><span class="visible-xs">◼</span></span>
|
<span ng-switch-when="unshared"><span class="hidden-xs" translate>Unshared</span><span class="visible-xs">◼</span></span>
|
||||||
<span ng-switch-when="stopped"><span class="hidden-xs" translate>Stopped</span><span class="visible-xs">◼</span></span>
|
<span ng-switch-when="stopped"><span class="hidden-xs" translate>Stopped</span><span class="visible-xs">◼</span></span>
|
||||||
<span ng-switch-when="scanning"><span class="hidden-xs" translate>Scanning</span><span class="visible-xs">◼</span></span>
|
<span ng-switch-when="scanning">
|
||||||
|
<span class="hidden-xs" translate>Scanning</span>
|
||||||
|
<span class="hidden-xs" ng-if="scanPercentage(folder.id) != undefined">
|
||||||
|
({{scanPercentage(folder.id)}}%)
|
||||||
|
</span>
|
||||||
|
<span class="visible-xs">◼</span>
|
||||||
|
</span>
|
||||||
<span ng-switch-when="idle"><span class="hidden-xs" translate>Up to Date</span><span class="visible-xs">◼</span></span>
|
<span ng-switch-when="idle"><span class="hidden-xs" translate>Up to Date</span><span class="visible-xs">◼</span></span>
|
||||||
<span ng-switch-when="syncing">
|
<span ng-switch-when="syncing">
|
||||||
<span class="hidden-xs" translate>Syncing</span>
|
<span class="hidden-xs" translate>Syncing</span>
|
||||||
@ -445,7 +452,7 @@
|
|||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<th>
|
<th>
|
||||||
<span class="fa fa-fw fa-link"></span>
|
<span class="fa fa-fw fa-link"></span>
|
||||||
<span translate ng-if="connections[deviceCfg.deviceID].type.indexOf('direct') == 0" >Address</span>
|
<span translate ng-if="connections[deviceCfg.deviceID].type.indexOf('direct') == 0" >Address</span>
|
||||||
<span translate ng-if="connections[deviceCfg.deviceID].type.indexOf('relay') == 0" >Relayed via</span>
|
<span translate ng-if="connections[deviceCfg.deviceID].type.indexOf('relay') == 0" >Relayed via</span>
|
||||||
</th>
|
</th>
|
||||||
|
@ -78,6 +78,7 @@ angular.module('syncthing.core')
|
|||||||
STARTUP_COMPLETED: 'StartupCompleted', // Emitted exactly once, when initialization is complete and Syncthing is ready to start exchanging data with other devices
|
STARTUP_COMPLETED: 'StartupCompleted', // Emitted exactly once, when initialization is complete and Syncthing is ready to start exchanging data with other devices
|
||||||
STATE_CHANGED: 'StateChanged', // Emitted when a folder changes state
|
STATE_CHANGED: 'StateChanged', // Emitted when a folder changes state
|
||||||
FOLDER_ERRORS: 'FolderErrors', // Emitted when a folder has errors preventing a full sync
|
FOLDER_ERRORS: 'FolderErrors', // Emitted when a folder has errors preventing a full sync
|
||||||
|
FOLDER_SCAN_PROGRESS: 'FolderScanProgress', // Emitted every ScanProgressIntervalS seconds, indicating how far into the scan it is at.
|
||||||
|
|
||||||
start: function() {
|
start: function() {
|
||||||
$http.get(urlbase + '/events?limit=1')
|
$http.get(urlbase + '/events?limit=1')
|
||||||
|
@ -48,6 +48,7 @@ angular.module('syncthing.core')
|
|||||||
$scope.failedCurrentPage = 1;
|
$scope.failedCurrentPage = 1;
|
||||||
$scope.failedCurrentFolder = undefined;
|
$scope.failedCurrentFolder = undefined;
|
||||||
$scope.failedPageSize = 10;
|
$scope.failedPageSize = 10;
|
||||||
|
$scope.scanProgress = {};
|
||||||
|
|
||||||
$scope.localStateTotal = {
|
$scope.localStateTotal = {
|
||||||
bytes: 0,
|
bytes: 0,
|
||||||
@ -163,6 +164,12 @@ angular.module('syncthing.core')
|
|||||||
if (data.to === 'syncing') {
|
if (data.to === 'syncing') {
|
||||||
$scope.failed[data.folder] = [];
|
$scope.failed[data.folder] = [];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If a folder has started scanning, then any scan progress is
|
||||||
|
// also obsolete.
|
||||||
|
if (data.to === 'scanning') {
|
||||||
|
delete $scope.scanProgress[data.folder];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -310,6 +317,15 @@ angular.module('syncthing.core')
|
|||||||
$scope.failed[data.folder] = data.errors;
|
$scope.failed[data.folder] = data.errors;
|
||||||
});
|
});
|
||||||
|
|
||||||
|
$scope.$on(Events.FOLDER_SCAN_PROGRESS, function (event, arg) {
|
||||||
|
var data = arg.data;
|
||||||
|
$scope.scanProgress[data.folder] = {
|
||||||
|
current: data.current,
|
||||||
|
total: data.total
|
||||||
|
};
|
||||||
|
console.log("FolderScanProgress", data);
|
||||||
|
});
|
||||||
|
|
||||||
$scope.emitHTTPError = function (data, status, headers, config) {
|
$scope.emitHTTPError = function (data, status, headers, config) {
|
||||||
$scope.$emit('HTTPError', {data: data, status: status, headers: headers, config: config});
|
$scope.$emit('HTTPError', {data: data, status: status, headers: headers, config: config});
|
||||||
};
|
};
|
||||||
@ -634,6 +650,14 @@ angular.module('syncthing.core')
|
|||||||
return Math.floor(pct);
|
return Math.floor(pct);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
$scope.scanPercentage = function (folder) {
|
||||||
|
if (!$scope.scanProgress[folder]) {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
var pct = 100 * $scope.scanProgress[folder].current / $scope.scanProgress[folder].total;
|
||||||
|
return Math.floor(pct);
|
||||||
|
}
|
||||||
|
|
||||||
$scope.deviceStatus = function (deviceCfg) {
|
$scope.deviceStatus = function (deviceCfg) {
|
||||||
if ($scope.deviceFolders(deviceCfg).length === 0) {
|
if ($scope.deviceFolders(deviceCfg).length === 0) {
|
||||||
return 'unused';
|
return 'unused';
|
||||||
|
File diff suppressed because one or more lines are too long
@ -68,20 +68,21 @@ func (cfg Configuration) Copy() Configuration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type FolderConfiguration struct {
|
type FolderConfiguration struct {
|
||||||
ID string `xml:"id,attr" json:"id"`
|
ID string `xml:"id,attr" json:"id"`
|
||||||
RawPath string `xml:"path,attr" json:"path"`
|
RawPath string `xml:"path,attr" json:"path"`
|
||||||
Devices []FolderDeviceConfiguration `xml:"device" json:"devices"`
|
Devices []FolderDeviceConfiguration `xml:"device" json:"devices"`
|
||||||
ReadOnly bool `xml:"ro,attr" json:"readOnly"`
|
ReadOnly bool `xml:"ro,attr" json:"readOnly"`
|
||||||
RescanIntervalS int `xml:"rescanIntervalS,attr" json:"rescanIntervalS"`
|
RescanIntervalS int `xml:"rescanIntervalS,attr" json:"rescanIntervalS"`
|
||||||
IgnorePerms bool `xml:"ignorePerms,attr" json:"ignorePerms"`
|
IgnorePerms bool `xml:"ignorePerms,attr" json:"ignorePerms"`
|
||||||
AutoNormalize bool `xml:"autoNormalize,attr" json:"autoNormalize"`
|
AutoNormalize bool `xml:"autoNormalize,attr" json:"autoNormalize"`
|
||||||
MinDiskFreePct int `xml:"minDiskFreePct" json:"minDiskFreePct"`
|
MinDiskFreePct int `xml:"minDiskFreePct" json:"minDiskFreePct"`
|
||||||
Versioning VersioningConfiguration `xml:"versioning" json:"versioning"`
|
Versioning VersioningConfiguration `xml:"versioning" json:"versioning"`
|
||||||
Copiers int `xml:"copiers" json:"copiers"` // This defines how many files are handled concurrently.
|
Copiers int `xml:"copiers" json:"copiers"` // This defines how many files are handled concurrently.
|
||||||
Pullers int `xml:"pullers" json:"pullers"` // Defines how many blocks are fetched at the same time, possibly between separate copier routines.
|
Pullers int `xml:"pullers" json:"pullers"` // Defines how many blocks are fetched at the same time, possibly between separate copier routines.
|
||||||
Hashers int `xml:"hashers" json:"hashers"` // Less than one sets the value to the number of cores. These are CPU bound due to hashing.
|
Hashers int `xml:"hashers" json:"hashers"` // Less than one sets the value to the number of cores. These are CPU bound due to hashing.
|
||||||
Order PullOrder `xml:"order" json:"order"`
|
Order PullOrder `xml:"order" json:"order"`
|
||||||
IgnoreDelete bool `xml:"ignoreDelete" json:"ignoreDelete"`
|
IgnoreDelete bool `xml:"ignoreDelete" json:"ignoreDelete"`
|
||||||
|
ScanProgressIntervalS int `xml:"scanProgressInterval" json:"scanProgressInterval"` // Set to a negative value to disable. Value of 0 will get replaced with value of 2 (default value)
|
||||||
|
|
||||||
Invalid string `xml:"-" json:"invalid"` // Set at runtime when there is an error, not saved
|
Invalid string `xml:"-" json:"invalid"` // Set at runtime when there is an error, not saved
|
||||||
}
|
}
|
||||||
|
@ -38,6 +38,7 @@ const (
|
|||||||
FolderSummary
|
FolderSummary
|
||||||
FolderCompletion
|
FolderCompletion
|
||||||
FolderErrors
|
FolderErrors
|
||||||
|
FolderScanProgress
|
||||||
|
|
||||||
AllEvents = (1 << iota) - 1
|
AllEvents = (1 << iota) - 1
|
||||||
)
|
)
|
||||||
@ -84,6 +85,8 @@ func (t EventType) String() string {
|
|||||||
return "DevicePaused"
|
return "DevicePaused"
|
||||||
case DeviceResumed:
|
case DeviceResumed:
|
||||||
return "DeviceResumed"
|
return "DeviceResumed"
|
||||||
|
case FolderScanProgress:
|
||||||
|
return "FolderScanProgress"
|
||||||
default:
|
default:
|
||||||
return "Unknown"
|
return "Unknown"
|
||||||
}
|
}
|
||||||
|
@ -1297,18 +1297,20 @@ nextSub:
|
|||||||
subs = unifySubs
|
subs = unifySubs
|
||||||
|
|
||||||
w := &scanner.Walker{
|
w := &scanner.Walker{
|
||||||
Dir: folderCfg.Path(),
|
Folder: folderCfg.ID,
|
||||||
Subs: subs,
|
Dir: folderCfg.Path(),
|
||||||
Matcher: ignores,
|
Subs: subs,
|
||||||
BlockSize: protocol.BlockSize,
|
Matcher: ignores,
|
||||||
TempNamer: defTempNamer,
|
BlockSize: protocol.BlockSize,
|
||||||
TempLifetime: time.Duration(m.cfg.Options().KeepTemporariesH) * time.Hour,
|
TempNamer: defTempNamer,
|
||||||
CurrentFiler: cFiler{m, folder},
|
TempLifetime: time.Duration(m.cfg.Options().KeepTemporariesH) * time.Hour,
|
||||||
MtimeRepo: db.NewVirtualMtimeRepo(m.db, folderCfg.ID),
|
CurrentFiler: cFiler{m, folder},
|
||||||
IgnorePerms: folderCfg.IgnorePerms,
|
MtimeRepo: db.NewVirtualMtimeRepo(m.db, folderCfg.ID),
|
||||||
AutoNormalize: folderCfg.AutoNormalize,
|
IgnorePerms: folderCfg.IgnorePerms,
|
||||||
Hashers: m.numHashers(folder),
|
AutoNormalize: folderCfg.AutoNormalize,
|
||||||
ShortID: m.shortID,
|
Hashers: m.numHashers(folder),
|
||||||
|
ShortID: m.shortID,
|
||||||
|
ProgressTickIntervalS: folderCfg.ScanProgressIntervalS,
|
||||||
}
|
}
|
||||||
|
|
||||||
runner.setState(FolderScanning)
|
runner.setState(FolderScanning)
|
||||||
|
@ -989,7 +989,7 @@ func (p *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocks
|
|||||||
|
|
||||||
// Check for an old temporary file which might have some blocks we could
|
// Check for an old temporary file which might have some blocks we could
|
||||||
// reuse.
|
// reuse.
|
||||||
tempBlocks, err := scanner.HashFile(tempName, protocol.BlockSize)
|
tempBlocks, err := scanner.HashFile(tempName, protocol.BlockSize, 0, nil)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// Check for any reusable blocks in the temp file
|
// Check for any reusable blocks in the temp file
|
||||||
tempCopyBlocks, _ := scanner.BlockDiff(tempBlocks, file.Blocks)
|
tempCopyBlocks, _ := scanner.BlockDiff(tempBlocks, file.Blocks)
|
||||||
|
@ -241,7 +241,7 @@ func TestCopierFinder(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Verify that the fetched blocks have actually been written to the temp file
|
// Verify that the fetched blocks have actually been written to the temp file
|
||||||
blks, err := scanner.HashFile(tempFile, protocol.BlockSize)
|
blks, err := scanner.HashFile(tempFile, protocol.BlockSize, 0, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Log(err)
|
t.Log(err)
|
||||||
}
|
}
|
||||||
|
@ -19,24 +19,27 @@ import (
|
|||||||
// workers are used in parallel. The outbox will become closed when the inbox
|
// workers are used in parallel. The outbox will become closed when the inbox
|
||||||
// is closed and all items handled.
|
// is closed and all items handled.
|
||||||
|
|
||||||
func newParallelHasher(dir string, blockSize, workers int, outbox, inbox chan protocol.FileInfo) {
|
func newParallelHasher(dir string, blockSize, workers int, outbox, inbox chan protocol.FileInfo, counter *uint64, done chan struct{}) {
|
||||||
wg := sync.NewWaitGroup()
|
wg := sync.NewWaitGroup()
|
||||||
wg.Add(workers)
|
wg.Add(workers)
|
||||||
|
|
||||||
for i := 0; i < workers; i++ {
|
for i := 0; i < workers; i++ {
|
||||||
go func() {
|
go func() {
|
||||||
hashFiles(dir, blockSize, outbox, inbox)
|
hashFiles(dir, blockSize, outbox, inbox, counter)
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
if done != nil {
|
||||||
|
close(done)
|
||||||
|
}
|
||||||
close(outbox)
|
close(outbox)
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func HashFile(path string, blockSize int) ([]protocol.BlockInfo, error) {
|
func HashFile(path string, blockSize int, sizeHint int64, counter *uint64) ([]protocol.BlockInfo, error) {
|
||||||
fd, err := os.Open(path)
|
fd, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if debug {
|
if debug {
|
||||||
@ -44,27 +47,29 @@ func HashFile(path string, blockSize int) ([]protocol.BlockInfo, error) {
|
|||||||
}
|
}
|
||||||
return []protocol.BlockInfo{}, err
|
return []protocol.BlockInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
fi, err := fd.Stat()
|
|
||||||
if err != nil {
|
|
||||||
fd.Close()
|
|
||||||
if debug {
|
|
||||||
l.Debugln("stat:", err)
|
|
||||||
}
|
|
||||||
return []protocol.BlockInfo{}, err
|
|
||||||
}
|
|
||||||
defer fd.Close()
|
defer fd.Close()
|
||||||
return Blocks(fd, blockSize, fi.Size())
|
|
||||||
|
if sizeHint == 0 {
|
||||||
|
fi, err := fd.Stat()
|
||||||
|
if err != nil {
|
||||||
|
if debug {
|
||||||
|
l.Debugln("stat:", err)
|
||||||
|
}
|
||||||
|
return []protocol.BlockInfo{}, err
|
||||||
|
}
|
||||||
|
sizeHint = fi.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
return Blocks(fd, blockSize, sizeHint, counter)
|
||||||
}
|
}
|
||||||
|
|
||||||
func hashFiles(dir string, blockSize int, outbox, inbox chan protocol.FileInfo) {
|
func hashFiles(dir string, blockSize int, outbox, inbox chan protocol.FileInfo, counter *uint64) {
|
||||||
for f := range inbox {
|
for f := range inbox {
|
||||||
if f.IsDirectory() || f.IsDeleted() || f.IsSymlink() {
|
if f.IsDirectory() || f.IsDeleted() {
|
||||||
outbox <- f
|
panic("Bug. Asked to hash a directory or a deleted file.")
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
blocks, err := HashFile(filepath.Join(dir, f.Name), blockSize)
|
blocks, err := HashFile(filepath.Join(dir, f.Name), blockSize, f.CachedSize, counter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugln("hash error:", f.Name, err)
|
l.Debugln("hash error:", f.Name, err)
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/syncthing/protocol"
|
"github.com/syncthing/protocol"
|
||||||
)
|
)
|
||||||
@ -18,7 +19,7 @@ import (
|
|||||||
var SHA256OfNothing = []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}
|
var SHA256OfNothing = []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}
|
||||||
|
|
||||||
// Blocks returns the blockwise hash of the reader.
|
// Blocks returns the blockwise hash of the reader.
|
||||||
func Blocks(r io.Reader, blocksize int, sizehint int64) ([]protocol.BlockInfo, error) {
|
func Blocks(r io.Reader, blocksize int, sizehint int64, counter *uint64) ([]protocol.BlockInfo, error) {
|
||||||
var blocks []protocol.BlockInfo
|
var blocks []protocol.BlockInfo
|
||||||
if sizehint > 0 {
|
if sizehint > 0 {
|
||||||
blocks = make([]protocol.BlockInfo, 0, int(sizehint/int64(blocksize)))
|
blocks = make([]protocol.BlockInfo, 0, int(sizehint/int64(blocksize)))
|
||||||
@ -36,6 +37,10 @@ func Blocks(r io.Reader, blocksize int, sizehint int64) ([]protocol.BlockInfo, e
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if counter != nil {
|
||||||
|
atomic.AddUint64(counter, uint64(n))
|
||||||
|
}
|
||||||
|
|
||||||
b := protocol.BlockInfo{
|
b := protocol.BlockInfo{
|
||||||
Size: int32(n),
|
Size: int32(n),
|
||||||
Offset: offset,
|
Offset: offset,
|
||||||
|
@ -51,7 +51,7 @@ var blocksTestData = []struct {
|
|||||||
func TestBlocks(t *testing.T) {
|
func TestBlocks(t *testing.T) {
|
||||||
for _, test := range blocksTestData {
|
for _, test := range blocksTestData {
|
||||||
buf := bytes.NewBuffer(test.data)
|
buf := bytes.NewBuffer(test.data)
|
||||||
blocks, err := Blocks(buf, test.blocksize, 0)
|
blocks, err := Blocks(buf, test.blocksize, 0, nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -105,8 +105,8 @@ var diffTestData = []struct {
|
|||||||
|
|
||||||
func TestDiff(t *testing.T) {
|
func TestDiff(t *testing.T) {
|
||||||
for i, test := range diffTestData {
|
for i, test := range diffTestData {
|
||||||
a, _ := Blocks(bytes.NewBufferString(test.a), test.s, 0)
|
a, _ := Blocks(bytes.NewBufferString(test.a), test.s, 0, nil)
|
||||||
b, _ := Blocks(bytes.NewBufferString(test.b), test.s, 0)
|
b, _ := Blocks(bytes.NewBufferString(test.b), test.s, 0, nil)
|
||||||
_, d := BlockDiff(a, b)
|
_, d := BlockDiff(a, b)
|
||||||
if len(d) != len(test.d) {
|
if len(d) != len(test.d) {
|
||||||
t.Fatalf("Incorrect length for diff %d; %d != %d", i, len(d), len(test.d))
|
t.Fatalf("Incorrect length for diff %d; %d != %d", i, len(d), len(test.d))
|
||||||
|
@ -12,11 +12,13 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/syncthing/protocol"
|
"github.com/syncthing/protocol"
|
||||||
"github.com/syncthing/syncthing/lib/db"
|
"github.com/syncthing/syncthing/lib/db"
|
||||||
|
"github.com/syncthing/syncthing/lib/events"
|
||||||
"github.com/syncthing/syncthing/lib/ignore"
|
"github.com/syncthing/syncthing/lib/ignore"
|
||||||
"github.com/syncthing/syncthing/lib/osutil"
|
"github.com/syncthing/syncthing/lib/osutil"
|
||||||
"github.com/syncthing/syncthing/lib/symlinks"
|
"github.com/syncthing/syncthing/lib/symlinks"
|
||||||
@ -39,6 +41,8 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Walker struct {
|
type Walker struct {
|
||||||
|
// Folder for which the walker has been created
|
||||||
|
Folder string
|
||||||
// Dir is the base directory for the walk
|
// Dir is the base directory for the walk
|
||||||
Dir string
|
Dir string
|
||||||
// Limit walking to these paths within Dir, or no limit if Sub is empty
|
// Limit walking to these paths within Dir, or no limit if Sub is empty
|
||||||
@ -66,6 +70,9 @@ type Walker struct {
|
|||||||
Hashers int
|
Hashers int
|
||||||
// Our vector clock id
|
// Our vector clock id
|
||||||
ShortID uint64
|
ShortID uint64
|
||||||
|
// Optional progress tick interval which defines how often FolderScanProgress
|
||||||
|
// events are emitted. Negative number means disabled.
|
||||||
|
ProgressTickIntervalS int
|
||||||
}
|
}
|
||||||
|
|
||||||
type TempNamer interface {
|
type TempNamer interface {
|
||||||
@ -92,12 +99,13 @@ func (w *Walker) Walk() (chan protocol.FileInfo, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
files := make(chan protocol.FileInfo)
|
toHashChan := make(chan protocol.FileInfo)
|
||||||
hashedFiles := make(chan protocol.FileInfo)
|
finishedChan := make(chan protocol.FileInfo)
|
||||||
newParallelHasher(w.Dir, w.BlockSize, w.Hashers, hashedFiles, files)
|
|
||||||
|
|
||||||
|
// A routine which walks the filesystem tree, and sends files which have
|
||||||
|
// been modified to the counter routine.
|
||||||
go func() {
|
go func() {
|
||||||
hashFiles := w.walkAndHashFiles(files, hashedFiles)
|
hashFiles := w.walkAndHashFiles(toHashChan, finishedChan)
|
||||||
if len(w.Subs) == 0 {
|
if len(w.Subs) == 0 {
|
||||||
filepath.Walk(w.Dir, hashFiles)
|
filepath.Walk(w.Dir, hashFiles)
|
||||||
} else {
|
} else {
|
||||||
@ -105,10 +113,77 @@ func (w *Walker) Walk() (chan protocol.FileInfo, error) {
|
|||||||
filepath.Walk(filepath.Join(w.Dir, sub), hashFiles)
|
filepath.Walk(filepath.Join(w.Dir, sub), hashFiles)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
close(files)
|
close(toHashChan)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return hashedFiles, nil
|
// We're not required to emit scan progress events, just kick off hashers,
|
||||||
|
// and feed inputs directly from the walker.
|
||||||
|
if w.ProgressTickIntervalS < 0 {
|
||||||
|
newParallelHasher(w.Dir, w.BlockSize, w.Hashers, finishedChan, toHashChan, nil, nil)
|
||||||
|
return finishedChan, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Defaults to every 2 seconds.
|
||||||
|
if w.ProgressTickIntervalS == 0 {
|
||||||
|
w.ProgressTickIntervalS = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
ticker := time.NewTicker(time.Duration(w.ProgressTickIntervalS) * time.Second)
|
||||||
|
|
||||||
|
// We need to emit progress events, hence we create a routine which buffers
|
||||||
|
// the list of files to be hashed, counts the total number of
|
||||||
|
// bytes to hash, and once no more files need to be hashed (chan gets closed),
|
||||||
|
// start a routine which periodically emits FolderScanProgress events,
|
||||||
|
// until a stop signal is sent by the parallel hasher.
|
||||||
|
// Parallel hasher is stopped by this routine when we close the channel over
|
||||||
|
// which it receives the files we ask it to hash.
|
||||||
|
go func() {
|
||||||
|
var filesToHash []protocol.FileInfo
|
||||||
|
var total, progress uint64
|
||||||
|
for file := range toHashChan {
|
||||||
|
filesToHash = append(filesToHash, file)
|
||||||
|
total += uint64(file.CachedSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
realToHashChan := make(chan protocol.FileInfo)
|
||||||
|
done := make(chan struct{})
|
||||||
|
newParallelHasher(w.Dir, w.BlockSize, w.Hashers, finishedChan, realToHashChan, &progress, done)
|
||||||
|
|
||||||
|
// A routine which actually emits the FolderScanProgress events
|
||||||
|
// every w.ProgressTicker ticks, until the hasher routines terminate.
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
if debug {
|
||||||
|
l.Debugln("Walk progress done", w.Dir, w.Subs, w.BlockSize, w.Matcher)
|
||||||
|
}
|
||||||
|
ticker.Stop()
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
current := atomic.LoadUint64(&progress)
|
||||||
|
if debug {
|
||||||
|
l.Debugf("Walk %s %s current progress %d/%d (%d%%)", w.Dir, w.Subs, current, total, current*100/total)
|
||||||
|
}
|
||||||
|
events.Default.Log(events.FolderScanProgress, map[string]interface{}{
|
||||||
|
"folder": w.Folder,
|
||||||
|
"current": current,
|
||||||
|
"total": total,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
for _, file := range filesToHash {
|
||||||
|
if debug {
|
||||||
|
l.Debugln("real to hash:", file.Name)
|
||||||
|
}
|
||||||
|
realToHashChan <- file
|
||||||
|
}
|
||||||
|
close(realToHashChan)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return finishedChan, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Walker) walkAndHashFiles(fchan, dchan chan protocol.FileInfo) filepath.WalkFunc {
|
func (w *Walker) walkAndHashFiles(fchan, dchan chan protocol.FileInfo) filepath.WalkFunc {
|
||||||
@ -241,7 +316,7 @@ func (w *Walker) walkAndHashFiles(fchan, dchan chan protocol.FileInfo) filepath.
|
|||||||
return skip
|
return skip
|
||||||
}
|
}
|
||||||
|
|
||||||
blocks, err := Blocks(strings.NewReader(target), w.BlockSize, 0)
|
blocks, err := Blocks(strings.NewReader(target), w.BlockSize, 0, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugln("hash link error:", p, err)
|
l.Debugln("hash link error:", p, err)
|
||||||
@ -272,10 +347,10 @@ func (w *Walker) walkAndHashFiles(fchan, dchan chan protocol.FileInfo) filepath.
|
|||||||
}
|
}
|
||||||
|
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugln("symlink to hash:", p, f)
|
l.Debugln("symlink changedb:", p, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
fchan <- f
|
dchan <- f
|
||||||
|
|
||||||
return skip
|
return skip
|
||||||
}
|
}
|
||||||
@ -349,10 +424,11 @@ func (w *Walker) walkAndHashFiles(fchan, dchan chan protocol.FileInfo) filepath.
|
|||||||
}
|
}
|
||||||
|
|
||||||
f := protocol.FileInfo{
|
f := protocol.FileInfo{
|
||||||
Name: rn,
|
Name: rn,
|
||||||
Version: cf.Version.Update(w.ShortID),
|
Version: cf.Version.Update(w.ShortID),
|
||||||
Flags: flags,
|
Flags: flags,
|
||||||
Modified: mtime.Unix(),
|
Modified: mtime.Unix(),
|
||||||
|
CachedSize: info.Size(),
|
||||||
}
|
}
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugln("to hash:", p, f)
|
l.Debugln("to hash:", p, f)
|
||||||
|
@ -149,8 +149,9 @@ func TestVerify(t *testing.T) {
|
|||||||
// data should be an even multiple of blocksize long
|
// data should be an even multiple of blocksize long
|
||||||
data := []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut e")
|
data := []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut e")
|
||||||
buf := bytes.NewBuffer(data)
|
buf := bytes.NewBuffer(data)
|
||||||
|
var progress uint64
|
||||||
|
|
||||||
blocks, err := Blocks(buf, blocksize, 0)
|
blocks, err := Blocks(buf, blocksize, 0, &progress)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -158,6 +159,10 @@ func TestVerify(t *testing.T) {
|
|||||||
t.Fatalf("Incorrect number of blocks %d != %d", len(blocks), exp)
|
t.Fatalf("Incorrect number of blocks %d != %d", len(blocks), exp)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if uint64(len(data)) != progress {
|
||||||
|
t.Fatalf("Incorrect counter value %d != %d", len(data), progress)
|
||||||
|
}
|
||||||
|
|
||||||
buf = bytes.NewBuffer(data)
|
buf = bytes.NewBuffer(data)
|
||||||
err = Verify(buf, blocksize, blocks)
|
err = Verify(buf, blocksize, blocks)
|
||||||
t.Log(err)
|
t.Log(err)
|
||||||
|
Loading…
Reference in New Issue
Block a user