2014-07-12 15:45:33 -07:00
|
|
|
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
|
|
|
// All rights reserved. Use of this source code is governed by an MIT-style
|
|
|
|
// license that can be found in the LICENSE file.
|
2014-06-01 13:50:14 -07:00
|
|
|
|
2014-05-14 20:26:55 -07:00
|
|
|
package model
|
2014-03-28 06:36:57 -07:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"errors"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"time"
|
2014-06-19 15:27:54 -07:00
|
|
|
|
2014-08-01 07:35:37 -07:00
|
|
|
"github.com/syncthing/syncthing/config"
|
|
|
|
"github.com/syncthing/syncthing/events"
|
|
|
|
"github.com/syncthing/syncthing/osutil"
|
|
|
|
"github.com/syncthing/syncthing/protocol"
|
|
|
|
"github.com/syncthing/syncthing/scanner"
|
|
|
|
"github.com/syncthing/syncthing/versioner"
|
2014-03-28 06:36:57 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
type requestResult struct {
|
2014-06-29 16:42:03 -07:00
|
|
|
node protocol.NodeID
|
2014-07-12 14:06:48 -07:00
|
|
|
file protocol.FileInfo
|
2014-03-28 06:36:57 -07:00
|
|
|
filepath string // full filepath name
|
|
|
|
offset int64
|
|
|
|
data []byte
|
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
|
|
|
type openFile struct {
|
|
|
|
filepath string // full filepath name
|
|
|
|
temp string // temporary filename
|
2014-07-06 05:46:48 -07:00
|
|
|
availability []protocol.NodeID
|
2014-03-28 06:36:57 -07:00
|
|
|
file *os.File
|
|
|
|
err error // error when opening or writing to file, all following operations are cancelled
|
|
|
|
outstanding int // number of requests we still have outstanding
|
|
|
|
done bool // we have sent all requests for this file
|
|
|
|
}
|
|
|
|
|
2014-06-29 16:42:03 -07:00
|
|
|
type activityMap map[protocol.NodeID]int
|
2014-03-28 06:36:57 -07:00
|
|
|
|
2014-07-21 01:48:22 -07:00
|
|
|
func (m activityMap) leastBusyNode(availability []protocol.NodeID, isValid func(protocol.NodeID) bool) protocol.NodeID {
|
2014-03-28 06:36:57 -07:00
|
|
|
var low int = 2<<30 - 1
|
2014-06-29 16:42:03 -07:00
|
|
|
var selected protocol.NodeID
|
2014-07-06 05:46:48 -07:00
|
|
|
for _, node := range availability {
|
2014-03-28 06:36:57 -07:00
|
|
|
usage := m[node]
|
2014-07-21 01:48:22 -07:00
|
|
|
if usage < low && isValid(node) {
|
2014-07-06 05:46:48 -07:00
|
|
|
low = usage
|
|
|
|
selected = node
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
m[selected]++
|
|
|
|
return selected
|
|
|
|
}
|
|
|
|
|
2014-06-29 16:42:03 -07:00
|
|
|
func (m activityMap) decrease(node protocol.NodeID) {
|
2014-03-28 06:36:57 -07:00
|
|
|
m[node]--
|
|
|
|
}
|
|
|
|
|
|
|
|
var errNoNode = errors.New("no available source node")
|
|
|
|
|
|
|
|
type puller struct {
|
2014-05-14 20:26:55 -07:00
|
|
|
cfg *config.Configuration
|
2014-05-23 05:31:16 -07:00
|
|
|
repoCfg config.RepositoryConfiguration
|
2014-07-24 00:38:16 -07:00
|
|
|
bq blockQueue
|
|
|
|
slots int
|
2014-03-28 06:36:57 -07:00
|
|
|
model *Model
|
|
|
|
oustandingPerNode activityMap
|
|
|
|
openFiles map[string]openFile
|
|
|
|
requestSlots chan bool
|
|
|
|
blocks chan bqBlock
|
|
|
|
requestResults chan requestResult
|
2014-05-25 11:49:08 -07:00
|
|
|
versioner versioner.Versioner
|
2014-08-04 13:02:44 -07:00
|
|
|
errors int
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
|
2014-05-23 05:31:16 -07:00
|
|
|
func newPuller(repoCfg config.RepositoryConfiguration, model *Model, slots int, cfg *config.Configuration) *puller {
|
2014-03-28 06:36:57 -07:00
|
|
|
p := &puller{
|
2014-05-14 20:26:55 -07:00
|
|
|
cfg: cfg,
|
2014-07-24 00:38:16 -07:00
|
|
|
repoCfg: repoCfg,
|
|
|
|
slots: slots,
|
2014-03-28 06:36:57 -07:00
|
|
|
model: model,
|
|
|
|
oustandingPerNode: make(activityMap),
|
|
|
|
openFiles: make(map[string]openFile),
|
|
|
|
requestSlots: make(chan bool, slots),
|
|
|
|
blocks: make(chan bqBlock),
|
|
|
|
requestResults: make(chan requestResult),
|
|
|
|
}
|
|
|
|
|
2014-05-25 11:49:08 -07:00
|
|
|
if len(repoCfg.Versioning.Type) > 0 {
|
|
|
|
factory, ok := versioner.Factories[repoCfg.Versioning.Type]
|
|
|
|
if !ok {
|
|
|
|
l.Fatalf("Requested versioning type %q that does not exist", repoCfg.Versioning.Type)
|
|
|
|
}
|
|
|
|
p.versioner = factory(repoCfg.Versioning.Params)
|
|
|
|
}
|
|
|
|
|
2014-03-28 06:36:57 -07:00
|
|
|
if slots > 0 {
|
|
|
|
// Read/write
|
2014-05-14 20:26:55 -07:00
|
|
|
if debug {
|
2014-05-23 05:31:16 -07:00
|
|
|
l.Debugf("starting puller; repo %q dir %q slots %d", repoCfg.ID, repoCfg.Directory, slots)
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
go p.run()
|
|
|
|
} else {
|
|
|
|
// Read only
|
2014-05-14 20:26:55 -07:00
|
|
|
if debug {
|
2014-05-23 05:31:16 -07:00
|
|
|
l.Debugf("starting puller; repo %q dir %q (read only)", repoCfg.ID, repoCfg.Directory)
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
go p.runRO()
|
|
|
|
}
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *puller) run() {
|
2014-04-01 14:18:32 -07:00
|
|
|
changed := true
|
2014-07-17 04:39:04 -07:00
|
|
|
scanintv := time.Duration(p.cfg.Options.RescanIntervalS) * time.Second
|
|
|
|
lastscan := time.Now()
|
2014-06-19 15:27:54 -07:00
|
|
|
var prevVer uint64
|
2014-07-24 00:38:16 -07:00
|
|
|
var queued int
|
|
|
|
|
|
|
|
// Load up the request slots
|
|
|
|
for i := 0; i < cap(p.requestSlots); i++ {
|
|
|
|
p.requestSlots <- true
|
|
|
|
}
|
2014-03-28 06:36:57 -07:00
|
|
|
|
|
|
|
for {
|
|
|
|
// Run the pulling loop as long as there are blocks to fetch
|
2014-07-24 00:38:16 -07:00
|
|
|
|
|
|
|
prevVer, queued = p.queueNeededBlocks(prevVer)
|
|
|
|
if queued > 0 {
|
2014-08-04 13:02:44 -07:00
|
|
|
p.errors = 0
|
2014-07-24 00:38:16 -07:00
|
|
|
|
|
|
|
pull:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case res := <-p.requestResults:
|
|
|
|
p.model.setState(p.repoCfg.ID, RepoSyncing)
|
|
|
|
changed = true
|
2014-04-27 03:06:11 -07:00
|
|
|
p.requestSlots <- true
|
2014-07-24 00:38:16 -07:00
|
|
|
p.handleRequestResult(res)
|
2014-03-28 06:36:57 -07:00
|
|
|
|
2014-07-24 00:38:16 -07:00
|
|
|
case <-p.requestSlots:
|
|
|
|
b, ok := p.bq.get()
|
|
|
|
|
|
|
|
if !ok {
|
|
|
|
if debug {
|
|
|
|
l.Debugf("%q: pulling loop needs more blocks", p.repoCfg.ID)
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
2014-08-04 13:02:44 -07:00
|
|
|
|
|
|
|
if p.errors > 0 && p.errors >= queued {
|
|
|
|
break pull
|
|
|
|
}
|
|
|
|
|
2014-07-24 00:38:16 -07:00
|
|
|
prevVer, _ = p.queueNeededBlocks(prevVer)
|
|
|
|
b, ok = p.bq.get()
|
|
|
|
}
|
|
|
|
|
|
|
|
if !ok && len(p.openFiles) == 0 {
|
|
|
|
// Nothing queued, nothing outstanding
|
|
|
|
if debug {
|
|
|
|
l.Debugf("%q: pulling loop done", p.repoCfg.ID)
|
|
|
|
}
|
|
|
|
break pull
|
|
|
|
}
|
|
|
|
|
|
|
|
if !ok {
|
|
|
|
// Nothing queued, but there are still open files.
|
|
|
|
// Give the situation a moment to change.
|
|
|
|
if debug {
|
|
|
|
l.Debugf("%q: pulling loop paused", p.repoCfg.ID)
|
|
|
|
}
|
|
|
|
p.requestSlots <- true
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
continue pull
|
|
|
|
}
|
|
|
|
|
|
|
|
if debug {
|
|
|
|
l.Debugf("queueing %q / %q offset %d copy %d", p.repoCfg.ID, b.file.Name, b.block.Offset, len(b.copy))
|
|
|
|
}
|
|
|
|
p.model.setState(p.repoCfg.ID, RepoSyncing)
|
|
|
|
changed = true
|
|
|
|
if p.handleBlock(b) {
|
|
|
|
// Block was fully handled, free up the slot
|
|
|
|
p.requestSlots <- true
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-08-04 13:02:44 -07:00
|
|
|
|
|
|
|
if p.errors > 0 && p.errors >= queued {
|
|
|
|
l.Warnf("All remaining files failed to sync. Stopping repo %q.", p.repoCfg.ID)
|
|
|
|
invalidateRepo(p.cfg, p.repoCfg.ID, errors.New("too many errors, check logs"))
|
|
|
|
return
|
|
|
|
}
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
|
2014-04-01 14:18:32 -07:00
|
|
|
if changed {
|
2014-05-23 05:31:16 -07:00
|
|
|
p.model.setState(p.repoCfg.ID, RepoCleaning)
|
2014-04-01 14:18:32 -07:00
|
|
|
p.fixupDirectories()
|
|
|
|
changed = false
|
|
|
|
}
|
|
|
|
|
2014-05-23 05:31:16 -07:00
|
|
|
p.model.setState(p.repoCfg.ID, RepoIdle)
|
2014-04-14 00:58:17 -07:00
|
|
|
|
2014-03-28 06:36:57 -07:00
|
|
|
// Do a rescan if it's time for it
|
2014-07-17 04:39:04 -07:00
|
|
|
if time.Since(lastscan) > scanintv {
|
2014-05-14 20:26:55 -07:00
|
|
|
if debug {
|
2014-05-23 05:31:16 -07:00
|
|
|
l.Debugf("%q: time for rescan", p.repoCfg.ID)
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
2014-07-17 04:39:04 -07:00
|
|
|
|
2014-05-23 05:31:16 -07:00
|
|
|
err := p.model.ScanRepo(p.repoCfg.ID)
|
2014-05-04 09:20:25 -07:00
|
|
|
if err != nil {
|
2014-05-23 05:31:16 -07:00
|
|
|
invalidateRepo(p.cfg, p.repoCfg.ID, err)
|
2014-05-04 09:20:25 -07:00
|
|
|
return
|
|
|
|
}
|
2014-07-17 04:39:04 -07:00
|
|
|
lastscan = time.Now()
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
|
2014-07-24 00:38:16 -07:00
|
|
|
time.Sleep(5 * time.Second)
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *puller) runRO() {
|
2014-05-14 20:26:55 -07:00
|
|
|
walkTicker := time.Tick(time.Duration(p.cfg.Options.RescanIntervalS) * time.Second)
|
2014-03-28 06:36:57 -07:00
|
|
|
|
|
|
|
for _ = range walkTicker {
|
2014-05-14 20:26:55 -07:00
|
|
|
if debug {
|
2014-05-23 05:31:16 -07:00
|
|
|
l.Debugf("%q: time for rescan", p.repoCfg.ID)
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
2014-05-23 05:31:16 -07:00
|
|
|
err := p.model.ScanRepo(p.repoCfg.ID)
|
2014-05-04 09:20:25 -07:00
|
|
|
if err != nil {
|
2014-05-23 05:31:16 -07:00
|
|
|
invalidateRepo(p.cfg, p.repoCfg.ID, err)
|
2014-05-04 09:20:25 -07:00
|
|
|
return
|
|
|
|
}
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-01 14:18:32 -07:00
|
|
|
func (p *puller) fixupDirectories() {
|
|
|
|
var deleteDirs []string
|
2014-05-19 13:31:28 -07:00
|
|
|
var changed = 0
|
|
|
|
|
|
|
|
var walkFn = func(path string, info os.FileInfo, err error) error {
|
2014-05-28 10:08:44 -07:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2014-04-01 14:18:32 -07:00
|
|
|
if !info.IsDir() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-05-23 05:31:16 -07:00
|
|
|
rn, err := filepath.Rel(p.repoCfg.Directory, path)
|
2014-04-01 14:18:32 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if rn == "." {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-05-25 11:49:08 -07:00
|
|
|
if filepath.Base(rn) == ".stversions" {
|
2014-07-06 05:46:48 -07:00
|
|
|
return filepath.SkipDir
|
2014-05-25 11:49:08 -07:00
|
|
|
}
|
|
|
|
|
2014-05-23 05:31:16 -07:00
|
|
|
cur := p.model.CurrentRepoFile(p.repoCfg.ID, rn)
|
2014-04-01 14:18:32 -07:00
|
|
|
if cur.Name != rn {
|
|
|
|
// No matching dir in current list; weird
|
2014-05-19 13:31:28 -07:00
|
|
|
if debug {
|
|
|
|
l.Debugf("missing dir: %s; %v", rn, cur)
|
|
|
|
}
|
2014-04-01 14:18:32 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-05-23 03:53:26 -07:00
|
|
|
if protocol.IsDeleted(cur.Flags) {
|
2014-05-14 20:26:55 -07:00
|
|
|
if debug {
|
2014-05-14 17:08:56 -07:00
|
|
|
l.Debugf("queue delete dir: %v", cur)
|
2014-04-01 14:18:32 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// We queue the directories to delete since we walk the
|
|
|
|
// tree in depth first order and need to remove the
|
|
|
|
// directories in the opposite order.
|
|
|
|
|
|
|
|
deleteDirs = append(deleteDirs, path)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-05-26 02:08:54 -07:00
|
|
|
if !p.repoCfg.IgnorePerms && protocol.HasPermissionBits(cur.Flags) && !scanner.PermsEqual(cur.Flags, uint32(info.Mode())) {
|
2014-05-19 13:31:28 -07:00
|
|
|
err := os.Chmod(path, os.FileMode(cur.Flags)&os.ModePerm)
|
|
|
|
if err != nil {
|
2014-05-23 23:26:05 -07:00
|
|
|
l.Warnf("Restoring folder flags: %q: %v", path, err)
|
2014-05-19 13:31:28 -07:00
|
|
|
} else {
|
|
|
|
changed++
|
|
|
|
if debug {
|
|
|
|
l.Debugf("restored dir flags: %o -> %v", info.Mode()&os.ModePerm, cur)
|
|
|
|
}
|
2014-04-01 14:18:32 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2014-05-19 13:31:28 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
deleteDirs = nil
|
|
|
|
changed = 0
|
2014-05-23 05:31:16 -07:00
|
|
|
filepath.Walk(p.repoCfg.Directory, walkFn)
|
2014-05-19 13:31:28 -07:00
|
|
|
|
|
|
|
var deleted = 0
|
|
|
|
// Delete any queued directories
|
|
|
|
for i := len(deleteDirs) - 1; i >= 0; i-- {
|
|
|
|
dir := deleteDirs[i]
|
|
|
|
if debug {
|
|
|
|
l.Debugln("delete dir:", dir)
|
|
|
|
}
|
|
|
|
err := os.Remove(dir)
|
2014-05-25 11:49:08 -07:00
|
|
|
if err == nil {
|
2014-05-19 13:31:28 -07:00
|
|
|
deleted++
|
2014-07-11 02:44:00 -07:00
|
|
|
} else {
|
|
|
|
l.Warnln("Delete dir:", err)
|
2014-05-19 13:31:28 -07:00
|
|
|
}
|
|
|
|
}
|
2014-04-01 14:18:32 -07:00
|
|
|
|
2014-05-14 20:26:55 -07:00
|
|
|
if debug {
|
2014-05-19 13:31:28 -07:00
|
|
|
l.Debugf("changed %d, deleted %d dirs", changed, deleted)
|
2014-04-01 14:18:32 -07:00
|
|
|
}
|
2014-05-19 13:31:28 -07:00
|
|
|
|
|
|
|
if changed+deleted == 0 {
|
|
|
|
return
|
2014-04-01 14:18:32 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-28 06:36:57 -07:00
|
|
|
func (p *puller) handleRequestResult(res requestResult) {
|
|
|
|
p.oustandingPerNode.decrease(res.node)
|
|
|
|
f := res.file
|
|
|
|
|
|
|
|
of, ok := p.openFiles[f.Name]
|
2014-07-23 01:51:27 -07:00
|
|
|
if !ok {
|
2014-03-28 06:36:57 -07:00
|
|
|
// no entry in openFiles means there was an error and we've cancelled the operation
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-07-15 04:04:37 -07:00
|
|
|
if res.err != nil {
|
2014-07-23 01:51:27 -07:00
|
|
|
// This request resulted in an error
|
2014-07-15 04:04:37 -07:00
|
|
|
of.err = res.err
|
|
|
|
if debug {
|
2014-07-23 01:51:27 -07:00
|
|
|
l.Debugf("pull: not writing %q / %q offset %d: %v; (done=%v, outstanding=%d)", p.repoCfg.ID, f.Name, res.offset, res.err, of.done, of.outstanding)
|
2014-07-15 04:04:37 -07:00
|
|
|
}
|
2014-07-23 01:51:27 -07:00
|
|
|
} else if of.err == nil {
|
|
|
|
// This request was sucessfull and nothing has failed previously either
|
2014-07-15 04:04:37 -07:00
|
|
|
_, of.err = of.file.WriteAt(res.data, res.offset)
|
|
|
|
if debug {
|
|
|
|
l.Debugf("pull: wrote %q / %q offset %d len %d outstanding %d done %v", p.repoCfg.ID, f.Name, res.offset, len(res.data), of.outstanding, of.done)
|
|
|
|
}
|
|
|
|
}
|
2014-03-28 06:36:57 -07:00
|
|
|
|
|
|
|
of.outstanding--
|
|
|
|
p.openFiles[f.Name] = of
|
|
|
|
|
|
|
|
if of.done && of.outstanding == 0 {
|
2014-04-27 03:14:53 -07:00
|
|
|
p.closeFile(f)
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-27 03:06:11 -07:00
|
|
|
// handleBlock fulfills the block request by copying, ignoring or fetching
|
|
|
|
// from the network. Returns true if the block was fully handled
|
|
|
|
// synchronously, i.e. if the slot can be reused.
|
|
|
|
func (p *puller) handleBlock(b bqBlock) bool {
|
2014-03-28 06:36:57 -07:00
|
|
|
f := b.file
|
|
|
|
|
2014-05-19 13:31:28 -07:00
|
|
|
// For directories, making sure they exist is enough.
|
|
|
|
// Deleted directories we mark as handled and delete later.
|
2014-05-23 03:53:26 -07:00
|
|
|
if protocol.IsDirectory(f.Flags) {
|
|
|
|
if !protocol.IsDeleted(f.Flags) {
|
2014-05-23 05:31:16 -07:00
|
|
|
path := filepath.Join(p.repoCfg.Directory, f.Name)
|
2014-05-19 13:31:28 -07:00
|
|
|
_, err := os.Stat(path)
|
|
|
|
if err != nil && os.IsNotExist(err) {
|
|
|
|
if debug {
|
|
|
|
l.Debugf("create dir: %v", f)
|
|
|
|
}
|
2014-07-21 01:48:49 -07:00
|
|
|
err = os.MkdirAll(path, os.FileMode(f.Flags&0777))
|
2014-05-19 13:31:28 -07:00
|
|
|
if err != nil {
|
2014-08-04 13:02:44 -07:00
|
|
|
p.errors++
|
|
|
|
l.Infof("mkdir: error: %q: %v", path, err)
|
2014-05-19 13:31:28 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if debug {
|
|
|
|
l.Debugf("ignore delete dir: %v", f)
|
2014-04-01 14:18:32 -07:00
|
|
|
}
|
2014-05-23 05:31:16 -07:00
|
|
|
p.model.updateLocal(p.repoCfg.ID, f)
|
2014-04-27 03:06:11 -07:00
|
|
|
return true
|
2014-04-01 14:18:32 -07:00
|
|
|
}
|
|
|
|
|
2014-05-28 02:45:45 -07:00
|
|
|
if len(b.copy) > 0 && len(b.copy) == len(b.file.Blocks) && b.last {
|
|
|
|
// We are supposed to copy the entire file, and then fetch nothing.
|
|
|
|
// We don't actually need to make the copy.
|
|
|
|
if debug {
|
|
|
|
l.Debugln("taking shortcut:", f)
|
|
|
|
}
|
|
|
|
fp := filepath.Join(p.repoCfg.Directory, f.Name)
|
|
|
|
t := time.Unix(f.Modified, 0)
|
|
|
|
err := os.Chtimes(fp, t, t)
|
2014-08-04 13:02:44 -07:00
|
|
|
if err != nil {
|
|
|
|
l.Infof("chtimes: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
2014-05-28 02:45:45 -07:00
|
|
|
}
|
|
|
|
if !p.repoCfg.IgnorePerms && protocol.HasPermissionBits(f.Flags) {
|
|
|
|
err = os.Chmod(fp, os.FileMode(f.Flags&0777))
|
2014-08-04 13:02:44 -07:00
|
|
|
if err != nil {
|
|
|
|
l.Infof("chmod: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
2014-05-28 02:45:45 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-13 12:07:24 -07:00
|
|
|
events.Default.Log(events.ItemStarted, map[string]string{
|
|
|
|
"repo": p.repoCfg.ID,
|
|
|
|
"item": f.Name,
|
|
|
|
})
|
|
|
|
|
2014-05-28 02:45:45 -07:00
|
|
|
p.model.updateLocal(p.repoCfg.ID, f)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2014-03-28 06:36:57 -07:00
|
|
|
of, ok := p.openFiles[f.Name]
|
|
|
|
of.done = b.last
|
|
|
|
|
|
|
|
if !ok {
|
2014-05-14 20:26:55 -07:00
|
|
|
if debug {
|
2014-05-23 05:31:16 -07:00
|
|
|
l.Debugf("pull: %q: opening file %q", p.repoCfg.ID, f.Name)
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
|
2014-07-13 12:07:24 -07:00
|
|
|
events.Default.Log(events.ItemStarted, map[string]string{
|
|
|
|
"repo": p.repoCfg.ID,
|
|
|
|
"item": f.Name,
|
|
|
|
})
|
|
|
|
|
2014-07-06 05:46:48 -07:00
|
|
|
of.availability = p.model.repoFiles[p.repoCfg.ID].Availability(f.Name)
|
2014-05-23 05:31:16 -07:00
|
|
|
of.filepath = filepath.Join(p.repoCfg.Directory, f.Name)
|
|
|
|
of.temp = filepath.Join(p.repoCfg.Directory, defTempNamer.TempName(f.Name))
|
2014-03-28 06:36:57 -07:00
|
|
|
|
|
|
|
dirName := filepath.Dir(of.filepath)
|
|
|
|
_, err := os.Stat(dirName)
|
|
|
|
if err != nil {
|
|
|
|
err = os.MkdirAll(dirName, 0777)
|
|
|
|
}
|
|
|
|
if err != nil {
|
2014-08-04 13:02:44 -07:00
|
|
|
l.Infof("mkdir: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
of.file, of.err = os.Create(of.temp)
|
|
|
|
if of.err != nil {
|
2014-08-04 13:02:44 -07:00
|
|
|
p.errors++
|
|
|
|
l.Infof("create: error: %q / %q: %v", p.repoCfg.ID, f.Name, of.err)
|
2014-03-28 06:36:57 -07:00
|
|
|
if !b.last {
|
|
|
|
p.openFiles[f.Name] = of
|
|
|
|
}
|
2014-04-27 03:06:11 -07:00
|
|
|
return true
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
2014-05-25 11:49:08 -07:00
|
|
|
osutil.HideFile(of.temp)
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if of.err != nil {
|
|
|
|
// We have already failed this file.
|
2014-05-14 20:26:55 -07:00
|
|
|
if debug {
|
2014-05-23 05:31:16 -07:00
|
|
|
l.Debugf("pull: error: %q / %q has already failed: %v", p.repoCfg.ID, f.Name, of.err)
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
if b.last {
|
|
|
|
delete(p.openFiles, f.Name)
|
|
|
|
}
|
|
|
|
|
2014-04-27 03:06:11 -07:00
|
|
|
return true
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
p.openFiles[f.Name] = of
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case len(b.copy) > 0:
|
|
|
|
p.handleCopyBlock(b)
|
2014-04-27 03:06:11 -07:00
|
|
|
return true
|
2014-03-28 06:36:57 -07:00
|
|
|
|
|
|
|
case b.block.Size > 0:
|
2014-04-27 03:06:11 -07:00
|
|
|
return p.handleRequestBlock(b)
|
2014-03-28 06:36:57 -07:00
|
|
|
|
|
|
|
default:
|
|
|
|
p.handleEmptyBlock(b)
|
2014-04-27 03:06:11 -07:00
|
|
|
return true
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *puller) handleCopyBlock(b bqBlock) {
|
|
|
|
// We have blocks to copy from the existing file
|
|
|
|
f := b.file
|
|
|
|
of := p.openFiles[f.Name]
|
|
|
|
|
2014-05-14 20:26:55 -07:00
|
|
|
if debug {
|
2014-05-23 05:31:16 -07:00
|
|
|
l.Debugf("pull: copying %d blocks for %q / %q", len(b.copy), p.repoCfg.ID, f.Name)
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
var exfd *os.File
|
|
|
|
exfd, of.err = os.Open(of.filepath)
|
|
|
|
if of.err != nil {
|
2014-08-04 13:02:44 -07:00
|
|
|
p.errors++
|
|
|
|
l.Infof("open: error: %q / %q: %v", p.repoCfg.ID, f.Name, of.err)
|
2014-03-28 06:36:57 -07:00
|
|
|
of.file.Close()
|
|
|
|
of.file = nil
|
|
|
|
|
|
|
|
p.openFiles[f.Name] = of
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer exfd.Close()
|
|
|
|
|
|
|
|
for _, b := range b.copy {
|
2014-06-18 14:57:22 -07:00
|
|
|
bs := make([]byte, b.Size)
|
2014-03-28 06:36:57 -07:00
|
|
|
_, of.err = exfd.ReadAt(bs, b.Offset)
|
|
|
|
if of.err == nil {
|
|
|
|
_, of.err = of.file.WriteAt(bs, b.Offset)
|
|
|
|
}
|
|
|
|
if of.err != nil {
|
2014-08-04 13:02:44 -07:00
|
|
|
p.errors++
|
|
|
|
l.Infof("write: error: %q / %q: %v", p.repoCfg.ID, f.Name, of.err)
|
2014-03-28 06:36:57 -07:00
|
|
|
exfd.Close()
|
|
|
|
of.file.Close()
|
|
|
|
of.file = nil
|
|
|
|
|
|
|
|
p.openFiles[f.Name] = of
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-27 03:06:11 -07:00
|
|
|
// handleRequestBlock tries to pull a block from the network. Returns true if
|
|
|
|
// the block could _not_ be fetched (i.e. it was fully handled, matching the
|
|
|
|
// return criteria of handleBlock)
|
|
|
|
func (p *puller) handleRequestBlock(b bqBlock) bool {
|
2014-03-28 06:36:57 -07:00
|
|
|
f := b.file
|
2014-04-27 03:14:53 -07:00
|
|
|
of, ok := p.openFiles[f.Name]
|
|
|
|
if !ok {
|
|
|
|
panic("bug: request for non-open file")
|
|
|
|
}
|
2014-03-28 06:36:57 -07:00
|
|
|
|
2014-07-21 01:48:22 -07:00
|
|
|
node := p.oustandingPerNode.leastBusyNode(of.availability, p.model.ConnectedTo)
|
2014-07-15 04:04:37 -07:00
|
|
|
if node == (protocol.NodeID{}) {
|
2014-03-28 06:36:57 -07:00
|
|
|
of.err = errNoNode
|
|
|
|
if of.file != nil {
|
|
|
|
of.file.Close()
|
|
|
|
of.file = nil
|
|
|
|
os.Remove(of.temp)
|
2014-07-23 01:51:27 -07:00
|
|
|
if debug {
|
|
|
|
l.Debugf("pull: no source for %q / %q; closed", p.repoCfg.ID, f.Name)
|
|
|
|
}
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
if b.last {
|
2014-07-23 01:51:27 -07:00
|
|
|
if debug {
|
|
|
|
l.Debugf("pull: no source for %q / %q; deleting", p.repoCfg.ID, f.Name)
|
|
|
|
}
|
2014-03-28 06:36:57 -07:00
|
|
|
delete(p.openFiles, f.Name)
|
|
|
|
} else {
|
2014-07-23 01:51:27 -07:00
|
|
|
if debug {
|
|
|
|
l.Debugf("pull: no source for %q / %q; await more blocks", p.repoCfg.ID, f.Name)
|
|
|
|
}
|
2014-03-28 06:36:57 -07:00
|
|
|
p.openFiles[f.Name] = of
|
|
|
|
}
|
2014-04-27 03:06:11 -07:00
|
|
|
return true
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
of.outstanding++
|
|
|
|
p.openFiles[f.Name] = of
|
|
|
|
|
2014-06-29 16:42:03 -07:00
|
|
|
go func(node protocol.NodeID, b bqBlock) {
|
2014-05-14 20:26:55 -07:00
|
|
|
if debug {
|
2014-05-23 05:31:16 -07:00
|
|
|
l.Debugf("pull: requesting %q / %q offset %d size %d from %q outstanding %d", p.repoCfg.ID, f.Name, b.block.Offset, b.block.Size, node, of.outstanding)
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
|
2014-05-23 05:31:16 -07:00
|
|
|
bs, err := p.model.requestGlobal(node, p.repoCfg.ID, f.Name, b.block.Offset, int(b.block.Size), nil)
|
2014-03-28 06:36:57 -07:00
|
|
|
p.requestResults <- requestResult{
|
|
|
|
node: node,
|
|
|
|
file: f,
|
|
|
|
filepath: of.filepath,
|
|
|
|
offset: b.block.Offset,
|
|
|
|
data: bs,
|
|
|
|
err: err,
|
|
|
|
}
|
|
|
|
}(node, b)
|
2014-04-27 03:06:11 -07:00
|
|
|
|
|
|
|
return false
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (p *puller) handleEmptyBlock(b bqBlock) {
|
|
|
|
f := b.file
|
|
|
|
of := p.openFiles[f.Name]
|
|
|
|
|
|
|
|
if b.last {
|
|
|
|
if of.err == nil {
|
|
|
|
of.file.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-23 03:53:26 -07:00
|
|
|
if protocol.IsDeleted(f.Flags) {
|
2014-05-14 20:26:55 -07:00
|
|
|
if debug {
|
2014-05-14 17:08:56 -07:00
|
|
|
l.Debugf("pull: delete %q", f.Name)
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
os.Remove(of.temp)
|
2014-05-19 14:42:08 -07:00
|
|
|
os.Chmod(of.filepath, 0666)
|
2014-05-25 11:49:08 -07:00
|
|
|
if p.versioner != nil {
|
2014-07-11 02:44:00 -07:00
|
|
|
if debug {
|
|
|
|
l.Debugln("pull: deleting with versioner")
|
|
|
|
}
|
|
|
|
if err := p.versioner.Archive(p.repoCfg.Directory, of.filepath); err == nil {
|
2014-05-25 11:49:08 -07:00
|
|
|
p.model.updateLocal(p.repoCfg.ID, f)
|
2014-07-11 02:44:00 -07:00
|
|
|
} else if debug {
|
|
|
|
l.Debugln("pull: error:", err)
|
2014-05-25 11:49:08 -07:00
|
|
|
}
|
|
|
|
} else if err := os.Remove(of.filepath); err == nil || os.IsNotExist(err) {
|
2014-05-23 05:31:16 -07:00
|
|
|
p.model.updateLocal(p.repoCfg.ID, f)
|
2014-05-19 14:42:08 -07:00
|
|
|
}
|
2014-03-28 06:36:57 -07:00
|
|
|
} else {
|
2014-05-14 20:26:55 -07:00
|
|
|
if debug {
|
2014-05-23 05:31:16 -07:00
|
|
|
l.Debugf("pull: no blocks to fetch and nothing to copy for %q / %q", p.repoCfg.ID, f.Name)
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
t := time.Unix(f.Modified, 0)
|
2014-05-19 14:42:08 -07:00
|
|
|
if os.Chtimes(of.temp, t, t) != nil {
|
|
|
|
delete(p.openFiles, f.Name)
|
|
|
|
return
|
|
|
|
}
|
2014-05-23 05:31:16 -07:00
|
|
|
if !p.repoCfg.IgnorePerms && protocol.HasPermissionBits(f.Flags) && os.Chmod(of.temp, os.FileMode(f.Flags&0777)) != nil {
|
2014-05-19 14:42:08 -07:00
|
|
|
delete(p.openFiles, f.Name)
|
|
|
|
return
|
|
|
|
}
|
2014-05-25 11:49:08 -07:00
|
|
|
osutil.ShowFile(of.temp)
|
|
|
|
if osutil.Rename(of.temp, of.filepath) == nil {
|
2014-05-23 05:31:16 -07:00
|
|
|
p.model.updateLocal(p.repoCfg.ID, f)
|
2014-05-19 14:42:08 -07:00
|
|
|
}
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
delete(p.openFiles, f.Name)
|
|
|
|
}
|
|
|
|
|
2014-07-24 00:38:16 -07:00
|
|
|
func (p *puller) queueNeededBlocks(prevVer uint64) (uint64, int) {
|
|
|
|
curVer := p.model.LocalVersion(p.repoCfg.ID)
|
|
|
|
if curVer == prevVer {
|
|
|
|
return curVer, 0
|
|
|
|
}
|
|
|
|
|
|
|
|
if debug {
|
|
|
|
l.Debugf("%q: checking for more needed blocks", p.repoCfg.ID)
|
|
|
|
}
|
|
|
|
|
2014-03-28 06:36:57 -07:00
|
|
|
queued := 0
|
2014-05-23 05:31:16 -07:00
|
|
|
for _, f := range p.model.NeedFilesRepo(p.repoCfg.ID) {
|
2014-07-24 00:38:16 -07:00
|
|
|
if _, ok := p.openFiles[f.Name]; ok {
|
|
|
|
continue
|
|
|
|
}
|
2014-05-23 05:31:16 -07:00
|
|
|
lf := p.model.CurrentRepoFile(p.repoCfg.ID, f.Name)
|
2014-03-28 06:36:57 -07:00
|
|
|
have, need := scanner.BlockDiff(lf.Blocks, f.Blocks)
|
2014-05-14 20:26:55 -07:00
|
|
|
if debug {
|
2014-05-14 17:08:56 -07:00
|
|
|
l.Debugf("need:\n local: %v\n global: %v\n haveBlocks: %v\n needBlocks: %v", lf, f, have, need)
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
|
|
|
queued++
|
|
|
|
p.bq.put(bqAdd{
|
|
|
|
file: f,
|
|
|
|
have: have,
|
|
|
|
need: need,
|
|
|
|
})
|
|
|
|
}
|
2014-05-14 20:26:55 -07:00
|
|
|
if debug && queued > 0 {
|
2014-07-15 08:54:00 -07:00
|
|
|
l.Debugf("%q: queued %d items", p.repoCfg.ID, queued)
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
2014-07-24 00:38:16 -07:00
|
|
|
|
|
|
|
if queued > 0 {
|
|
|
|
return prevVer, queued
|
|
|
|
} else {
|
|
|
|
return curVer, 0
|
|
|
|
}
|
2014-03-28 06:36:57 -07:00
|
|
|
}
|
2014-04-27 03:14:53 -07:00
|
|
|
|
2014-07-12 14:06:48 -07:00
|
|
|
func (p *puller) closeFile(f protocol.FileInfo) {
|
2014-05-14 20:26:55 -07:00
|
|
|
if debug {
|
2014-05-23 05:31:16 -07:00
|
|
|
l.Debugf("pull: closing %q / %q", p.repoCfg.ID, f.Name)
|
2014-04-27 03:14:53 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
of := p.openFiles[f.Name]
|
2014-08-04 13:02:44 -07:00
|
|
|
err := of.file.Close()
|
2014-08-05 00:44:35 -07:00
|
|
|
if err != nil {
|
|
|
|
p.errors++
|
|
|
|
l.Infof("close: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
|
|
|
}
|
2014-04-27 03:14:53 -07:00
|
|
|
defer os.Remove(of.temp)
|
|
|
|
|
|
|
|
delete(p.openFiles, f.Name)
|
|
|
|
|
|
|
|
fd, err := os.Open(of.temp)
|
|
|
|
if err != nil {
|
2014-08-04 13:02:44 -07:00
|
|
|
p.errors++
|
|
|
|
l.Infof("open: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
2014-04-27 03:14:53 -07:00
|
|
|
return
|
|
|
|
}
|
2014-05-14 20:26:55 -07:00
|
|
|
hb, _ := scanner.Blocks(fd, scanner.StandardBlockSize)
|
2014-04-27 03:14:53 -07:00
|
|
|
fd.Close()
|
|
|
|
|
|
|
|
if l0, l1 := len(hb), len(f.Blocks); l0 != l1 {
|
2014-05-14 20:26:55 -07:00
|
|
|
if debug {
|
2014-05-23 05:31:16 -07:00
|
|
|
l.Debugf("pull: %q / %q: nblocks %d != %d", p.repoCfg.ID, f.Name, l0, l1)
|
2014-04-27 03:14:53 -07:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := range hb {
|
|
|
|
if bytes.Compare(hb[i].Hash, f.Blocks[i].Hash) != 0 {
|
2014-08-04 13:02:44 -07:00
|
|
|
if debug {
|
|
|
|
l.Debugf("pull: %q / %q: block %d hash mismatch\n have: %x\n want: %x", p.repoCfg.ID, f.Name, i, hb[i].Hash, f.Blocks[i].Hash)
|
|
|
|
}
|
2014-04-27 03:14:53 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
t := time.Unix(f.Modified, 0)
|
2014-05-23 05:31:16 -07:00
|
|
|
err = os.Chtimes(of.temp, t, t)
|
2014-08-04 13:02:44 -07:00
|
|
|
if err != nil {
|
|
|
|
l.Infof("chtimes: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
2014-05-23 05:31:16 -07:00
|
|
|
}
|
|
|
|
if !p.repoCfg.IgnorePerms && protocol.HasPermissionBits(f.Flags) {
|
|
|
|
err = os.Chmod(of.temp, os.FileMode(f.Flags&0777))
|
2014-08-04 13:02:44 -07:00
|
|
|
if err != nil {
|
|
|
|
l.Infof("chmod: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
2014-05-23 05:31:16 -07:00
|
|
|
}
|
2014-05-23 04:10:26 -07:00
|
|
|
}
|
2014-05-25 11:49:08 -07:00
|
|
|
|
|
|
|
osutil.ShowFile(of.temp)
|
|
|
|
|
|
|
|
if p.versioner != nil {
|
2014-07-11 02:44:00 -07:00
|
|
|
err := p.versioner.Archive(p.repoCfg.Directory, of.filepath)
|
2014-05-25 11:49:08 -07:00
|
|
|
if err != nil {
|
|
|
|
if debug {
|
|
|
|
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-14 20:26:55 -07:00
|
|
|
if debug {
|
2014-05-23 05:31:16 -07:00
|
|
|
l.Debugf("pull: rename %q / %q: %q", p.repoCfg.ID, f.Name, of.filepath)
|
2014-04-27 03:14:53 -07:00
|
|
|
}
|
2014-05-25 11:49:08 -07:00
|
|
|
if err := osutil.Rename(of.temp, of.filepath); err == nil {
|
2014-05-23 05:31:16 -07:00
|
|
|
p.model.updateLocal(p.repoCfg.ID, f)
|
2014-04-27 03:14:53 -07:00
|
|
|
} else {
|
2014-08-04 13:02:44 -07:00
|
|
|
p.errors++
|
|
|
|
l.Infof("rename: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
2014-04-27 03:14:53 -07:00
|
|
|
}
|
|
|
|
}
|
2014-05-14 17:18:09 -07:00
|
|
|
|
2014-05-14 20:26:55 -07:00
|
|
|
func invalidateRepo(cfg *config.Configuration, repoID string, err error) {
|
2014-05-14 17:18:09 -07:00
|
|
|
for i := range cfg.Repositories {
|
|
|
|
repo := &cfg.Repositories[i]
|
|
|
|
if repo.ID == repoID {
|
|
|
|
repo.Invalid = err.Error()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|