2014-11-16 13:13:20 -07:00
|
|
|
// Copyright (C) 2014 The Syncthing Authors.
|
2014-09-29 12:43:32 -07:00
|
|
|
//
|
2015-03-07 13:36:35 -07:00
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
|
|
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
2014-06-01 13:50:14 -07:00
|
|
|
|
2014-03-02 15:58:14 -07:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2015-04-04 10:06:20 -07:00
|
|
|
"bytes"
|
|
|
|
"compress/gzip"
|
2014-08-21 15:45:40 -07:00
|
|
|
"crypto/tls"
|
2014-03-02 15:58:14 -07:00
|
|
|
"encoding/json"
|
2014-05-22 07:12:19 -07:00
|
|
|
"fmt"
|
2014-03-02 15:58:14 -07:00
|
|
|
"io/ioutil"
|
2014-05-22 07:12:19 -07:00
|
|
|
"mime"
|
2014-04-30 13:52:38 -07:00
|
|
|
"net"
|
2014-03-02 15:58:14 -07:00
|
|
|
"net/http"
|
2014-07-22 11:11:36 -07:00
|
|
|
"os"
|
2014-05-22 07:12:19 -07:00
|
|
|
"path/filepath"
|
2015-04-07 12:45:22 -07:00
|
|
|
"reflect"
|
2014-03-02 15:58:14 -07:00
|
|
|
"runtime"
|
2015-10-03 08:25:21 -07:00
|
|
|
"sort"
|
2014-07-13 12:07:24 -07:00
|
|
|
"strconv"
|
2014-07-05 12:40:29 -07:00
|
|
|
"strings"
|
2014-03-02 15:58:14 -07:00
|
|
|
"time"
|
|
|
|
|
2015-11-21 01:48:57 -07:00
|
|
|
"github.com/rcrowley/go-metrics"
|
2015-08-06 02:29:25 -07:00
|
|
|
"github.com/syncthing/syncthing/lib/auto"
|
|
|
|
"github.com/syncthing/syncthing/lib/config"
|
|
|
|
"github.com/syncthing/syncthing/lib/db"
|
|
|
|
"github.com/syncthing/syncthing/lib/discover"
|
|
|
|
"github.com/syncthing/syncthing/lib/events"
|
2015-10-03 08:25:21 -07:00
|
|
|
"github.com/syncthing/syncthing/lib/logger"
|
2016-04-15 03:59:41 -07:00
|
|
|
"github.com/syncthing/syncthing/lib/model"
|
2015-08-06 02:29:25 -07:00
|
|
|
"github.com/syncthing/syncthing/lib/osutil"
|
2015-09-22 10:38:46 -07:00
|
|
|
"github.com/syncthing/syncthing/lib/protocol"
|
2016-05-26 00:02:56 -07:00
|
|
|
"github.com/syncthing/syncthing/lib/rand"
|
2016-03-21 12:36:08 -07:00
|
|
|
"github.com/syncthing/syncthing/lib/stats"
|
2015-08-06 02:29:25 -07:00
|
|
|
"github.com/syncthing/syncthing/lib/sync"
|
2015-09-02 13:05:54 -07:00
|
|
|
"github.com/syncthing/syncthing/lib/tlsutil"
|
2015-08-06 02:29:25 -07:00
|
|
|
"github.com/syncthing/syncthing/lib/upgrade"
|
2014-05-21 11:06:14 -07:00
|
|
|
"github.com/vitrun/qart/qr"
|
2014-11-29 16:17:00 -07:00
|
|
|
"golang.org/x/crypto/bcrypt"
|
2014-03-02 15:58:14 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2015-04-28 13:32:10 -07:00
|
|
|
configInSync = true
|
|
|
|
startTime = time.Now()
|
2014-03-02 15:58:14 -07:00
|
|
|
)
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
type apiService struct {
|
2016-05-04 12:38:12 -07:00
|
|
|
id protocol.DeviceID
|
|
|
|
cfg configIntf
|
|
|
|
httpsCertFile string
|
|
|
|
httpsKeyFile string
|
|
|
|
assetDir string
|
|
|
|
themes []string
|
|
|
|
model modelIntf
|
|
|
|
eventSub events.BufferedSubscription
|
|
|
|
discoverer discover.CachingMux
|
|
|
|
connectionsService connectionsIntf
|
|
|
|
fss *folderSummaryService
|
|
|
|
systemConfigMut sync.Mutex // serializes posts to /rest/system/config
|
|
|
|
stop chan struct{} // signals intentional stop
|
|
|
|
configChanged chan struct{} // signals intentional listener close due to config change
|
|
|
|
started chan struct{} // signals startup complete, for testing only
|
2016-01-14 03:06:36 -07:00
|
|
|
|
|
|
|
listener net.Listener
|
|
|
|
listenerMut sync.Mutex
|
2015-10-03 08:25:21 -07:00
|
|
|
|
2016-03-21 12:36:08 -07:00
|
|
|
guiErrors logger.Recorder
|
|
|
|
systemLog logger.Recorder
|
2015-04-28 14:12:19 -07:00
|
|
|
}
|
2015-03-26 15:26:51 -07:00
|
|
|
|
2016-03-21 12:36:08 -07:00
|
|
|
type modelIntf interface {
|
|
|
|
GlobalDirectoryTree(folder, prefix string, levels int, dirsonly bool) map[string]interface{}
|
|
|
|
Completion(device protocol.DeviceID, folder string) float64
|
|
|
|
Override(folder string)
|
|
|
|
NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, []db.FileInfoTruncated, []db.FileInfoTruncated, int)
|
|
|
|
NeedSize(folder string) (nfiles int, bytes int64)
|
|
|
|
ConnectionStats() map[string]interface{}
|
|
|
|
DeviceStatistics() map[string]stats.DeviceStatistics
|
|
|
|
FolderStatistics() map[string]stats.FolderStatistics
|
|
|
|
CurrentFolderFile(folder string, file string) (protocol.FileInfo, bool)
|
|
|
|
CurrentGlobalFile(folder string, file string) (protocol.FileInfo, bool)
|
|
|
|
ResetFolder(folder string)
|
2016-04-15 03:59:41 -07:00
|
|
|
Availability(folder, file string, version protocol.Vector, block protocol.BlockInfo) []model.Availability
|
2016-03-21 12:36:08 -07:00
|
|
|
GetIgnores(folder string) ([]string, []string, error)
|
|
|
|
SetIgnores(folder string, content []string) error
|
|
|
|
PauseDevice(device protocol.DeviceID)
|
|
|
|
ResumeDevice(device protocol.DeviceID)
|
|
|
|
DelayScan(folder string, next time.Duration)
|
|
|
|
ScanFolder(folder string) error
|
|
|
|
ScanFolders() map[string]error
|
|
|
|
ScanFolderSubs(folder string, subs []string) error
|
|
|
|
BringToFront(folder, file string)
|
|
|
|
ConnectedTo(deviceID protocol.DeviceID) bool
|
|
|
|
GlobalSize(folder string) (nfiles, deleted int, bytes int64)
|
|
|
|
LocalSize(folder string) (nfiles, deleted int, bytes int64)
|
|
|
|
CurrentLocalVersion(folder string) (int64, bool)
|
|
|
|
RemoteLocalVersion(folder string) (int64, bool)
|
|
|
|
State(folder string) (string, time.Time, error)
|
|
|
|
}
|
|
|
|
|
|
|
|
type configIntf interface {
|
|
|
|
GUI() config.GUIConfiguration
|
|
|
|
Raw() config.Configuration
|
|
|
|
Options() config.OptionsConfiguration
|
|
|
|
Replace(cfg config.Configuration) config.CommitResponse
|
|
|
|
Subscribe(c config.Committer)
|
|
|
|
Folders() map[string]config.FolderConfiguration
|
|
|
|
Devices() map[protocol.DeviceID]config.DeviceConfiguration
|
|
|
|
Save() error
|
2016-05-04 12:38:12 -07:00
|
|
|
ListenAddresses() []string
|
2016-03-21 12:36:08 -07:00
|
|
|
}
|
|
|
|
|
2016-05-04 12:38:12 -07:00
|
|
|
type connectionsIntf interface {
|
|
|
|
Status() map[string]interface{}
|
|
|
|
}
|
|
|
|
|
|
|
|
func newAPIService(id protocol.DeviceID, cfg configIntf, httpsCertFile, httpsKeyFile, assetDir string, m modelIntf, eventSub events.BufferedSubscription, discoverer discover.CachingMux, connectionsService connectionsIntf, errors, systemLog logger.Recorder) (*apiService, error) {
|
2015-12-23 08:31:12 -07:00
|
|
|
service := &apiService{
|
2016-05-04 12:38:12 -07:00
|
|
|
id: id,
|
|
|
|
cfg: cfg,
|
|
|
|
httpsCertFile: httpsCertFile,
|
|
|
|
httpsKeyFile: httpsKeyFile,
|
|
|
|
assetDir: assetDir,
|
|
|
|
model: m,
|
|
|
|
eventSub: eventSub,
|
|
|
|
discoverer: discoverer,
|
|
|
|
connectionsService: connectionsService,
|
|
|
|
systemConfigMut: sync.NewMutex(),
|
|
|
|
stop: make(chan struct{}),
|
|
|
|
configChanged: make(chan struct{}),
|
|
|
|
listenerMut: sync.NewMutex(),
|
|
|
|
guiErrors: errors,
|
|
|
|
systemLog: systemLog,
|
2015-04-28 14:12:19 -07:00
|
|
|
}
|
2015-04-19 20:34:04 -07:00
|
|
|
|
2016-01-10 08:37:31 -07:00
|
|
|
seen := make(map[string]struct{})
|
2016-03-12 05:17:25 -07:00
|
|
|
// Load themes from compiled in assets.
|
2016-01-10 08:37:31 -07:00
|
|
|
for file := range auto.Assets() {
|
|
|
|
theme := strings.Split(file, "/")[0]
|
|
|
|
if _, ok := seen[theme]; !ok {
|
|
|
|
seen[theme] = struct{}{}
|
|
|
|
service.themes = append(service.themes, theme)
|
|
|
|
}
|
|
|
|
}
|
2016-03-12 05:17:25 -07:00
|
|
|
if assetDir != "" {
|
|
|
|
// Load any extra themes from the asset override dir.
|
|
|
|
for _, dir := range dirNames(assetDir) {
|
|
|
|
if _, ok := seen[dir]; !ok {
|
|
|
|
seen[dir] = struct{}{}
|
|
|
|
service.themes = append(service.themes, dir)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-01-10 08:37:31 -07:00
|
|
|
|
2015-04-28 14:12:19 -07:00
|
|
|
var err error
|
2015-12-23 08:31:12 -07:00
|
|
|
service.listener, err = service.getListener(cfg.GUI())
|
|
|
|
return service, err
|
2015-04-28 14:12:19 -07:00
|
|
|
}
|
2014-09-12 12:28:47 -07:00
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getListener(guiCfg config.GUIConfiguration) (net.Listener, error) {
|
2016-03-13 03:03:00 -07:00
|
|
|
cert, err := tls.LoadX509KeyPair(s.httpsCertFile, s.httpsKeyFile)
|
2014-09-12 12:28:47 -07:00
|
|
|
if err != nil {
|
|
|
|
l.Infoln("Loading HTTPS certificate:", err)
|
|
|
|
l.Infoln("Creating new HTTPS certificate")
|
2014-12-09 02:42:56 -07:00
|
|
|
|
|
|
|
// When generating the HTTPS certificate, use the system host name per
|
|
|
|
// default. If that isn't available, use the "syncthing" default.
|
2014-12-16 14:55:44 -07:00
|
|
|
var name string
|
|
|
|
name, err = os.Hostname()
|
2014-12-09 02:42:56 -07:00
|
|
|
if err != nil {
|
|
|
|
name = tlsDefaultCommonName
|
|
|
|
}
|
|
|
|
|
2016-03-13 03:03:00 -07:00
|
|
|
cert, err = tlsutil.NewCertificate(s.httpsCertFile, s.httpsKeyFile, name, httpsRSABits)
|
2014-09-12 12:28:47 -07:00
|
|
|
}
|
|
|
|
if err != nil {
|
2015-04-28 14:12:19 -07:00
|
|
|
return nil, err
|
2014-09-12 12:28:47 -07:00
|
|
|
}
|
|
|
|
tlsCfg := &tls.Config{
|
|
|
|
Certificates: []tls.Certificate{cert},
|
2014-12-09 02:42:56 -07:00
|
|
|
MinVersion: tls.VersionTLS10, // No SSLv3
|
|
|
|
CipherSuites: []uint16{
|
|
|
|
// No RC4
|
|
|
|
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
|
|
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
|
|
|
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
|
|
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
|
|
|
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
|
|
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
|
|
|
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
|
|
|
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
|
|
|
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
|
|
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
|
|
},
|
2014-04-30 13:52:38 -07:00
|
|
|
}
|
|
|
|
|
2015-10-12 06:27:57 -07:00
|
|
|
rawListener, err := net.Listen("tcp", guiCfg.Address())
|
2014-09-12 12:28:47 -07:00
|
|
|
if err != nil {
|
2015-04-28 14:12:19 -07:00
|
|
|
return nil, err
|
2014-04-30 13:52:38 -07:00
|
|
|
}
|
2015-04-28 14:12:19 -07:00
|
|
|
|
2016-04-13 04:50:51 -07:00
|
|
|
listener := &tlsutil.DowngradingListener{
|
|
|
|
Listener: rawListener,
|
|
|
|
TLSConfig: tlsCfg,
|
|
|
|
}
|
2015-04-28 14:12:19 -07:00
|
|
|
return listener, nil
|
|
|
|
}
|
|
|
|
|
2015-12-15 14:40:38 -07:00
|
|
|
func sendJSON(w http.ResponseWriter, jsonObject interface{}) {
|
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
2016-03-20 03:54:53 -07:00
|
|
|
// Marshalling might fail, in which case we should return a 500 with the
|
|
|
|
// actual error.
|
|
|
|
bs, err := json.Marshal(jsonObject)
|
|
|
|
if err != nil {
|
|
|
|
// This Marshal() can't fail though.
|
|
|
|
bs, _ = json.Marshal(map[string]string{"error": err.Error()})
|
|
|
|
http.Error(w, string(bs), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.Write(bs)
|
2015-12-15 14:40:38 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) Serve() {
|
2016-01-14 03:06:36 -07:00
|
|
|
s.listenerMut.Lock()
|
|
|
|
listener := s.listener
|
|
|
|
s.listenerMut.Unlock()
|
|
|
|
|
|
|
|
if listener == nil {
|
|
|
|
// Not much we can do here other than exit quickly. The supervisor
|
|
|
|
// will log an error at some point.
|
|
|
|
return
|
|
|
|
}
|
2015-06-03 00:47:39 -07:00
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
// The GET handlers
|
|
|
|
getRestMux := http.NewServeMux()
|
2015-04-28 14:12:19 -07:00
|
|
|
getRestMux.HandleFunc("/rest/db/completion", s.getDBCompletion) // device folder
|
|
|
|
getRestMux.HandleFunc("/rest/db/file", s.getDBFile) // folder file
|
|
|
|
getRestMux.HandleFunc("/rest/db/ignores", s.getDBIgnores) // folder
|
|
|
|
getRestMux.HandleFunc("/rest/db/need", s.getDBNeed) // folder [perpage] [page]
|
|
|
|
getRestMux.HandleFunc("/rest/db/status", s.getDBStatus) // folder
|
|
|
|
getRestMux.HandleFunc("/rest/db/browse", s.getDBBrowse) // folder [prefix] [dirsonly] [levels]
|
|
|
|
getRestMux.HandleFunc("/rest/events", s.getEvents) // since [limit]
|
|
|
|
getRestMux.HandleFunc("/rest/stats/device", s.getDeviceStats) // -
|
|
|
|
getRestMux.HandleFunc("/rest/stats/folder", s.getFolderStats) // -
|
|
|
|
getRestMux.HandleFunc("/rest/svc/deviceid", s.getDeviceID) // id
|
|
|
|
getRestMux.HandleFunc("/rest/svc/lang", s.getLang) // -
|
|
|
|
getRestMux.HandleFunc("/rest/svc/report", s.getReport) // -
|
|
|
|
getRestMux.HandleFunc("/rest/system/browse", s.getSystemBrowse) // current
|
|
|
|
getRestMux.HandleFunc("/rest/system/config", s.getSystemConfig) // -
|
|
|
|
getRestMux.HandleFunc("/rest/system/config/insync", s.getSystemConfigInsync) // -
|
|
|
|
getRestMux.HandleFunc("/rest/system/connections", s.getSystemConnections) // -
|
|
|
|
getRestMux.HandleFunc("/rest/system/discovery", s.getSystemDiscovery) // -
|
|
|
|
getRestMux.HandleFunc("/rest/system/error", s.getSystemError) // -
|
|
|
|
getRestMux.HandleFunc("/rest/system/ping", s.restPing) // -
|
|
|
|
getRestMux.HandleFunc("/rest/system/status", s.getSystemStatus) // -
|
|
|
|
getRestMux.HandleFunc("/rest/system/upgrade", s.getSystemUpgrade) // -
|
|
|
|
getRestMux.HandleFunc("/rest/system/version", s.getSystemVersion) // -
|
2015-10-03 08:25:21 -07:00
|
|
|
getRestMux.HandleFunc("/rest/system/debug", s.getSystemDebug) // -
|
|
|
|
getRestMux.HandleFunc("/rest/system/log", s.getSystemLog) // [since]
|
|
|
|
getRestMux.HandleFunc("/rest/system/log.txt", s.getSystemLogTxt) // [since]
|
2014-07-29 04:01:27 -07:00
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
// The POST handlers
|
|
|
|
postRestMux := http.NewServeMux()
|
2015-04-28 14:12:19 -07:00
|
|
|
postRestMux.HandleFunc("/rest/db/prio", s.postDBPrio) // folder file [perpage] [page]
|
|
|
|
postRestMux.HandleFunc("/rest/db/ignores", s.postDBIgnores) // folder
|
|
|
|
postRestMux.HandleFunc("/rest/db/override", s.postDBOverride) // folder
|
2015-05-01 05:30:17 -07:00
|
|
|
postRestMux.HandleFunc("/rest/db/scan", s.postDBScan) // folder [sub...] [delay]
|
2015-04-28 14:12:19 -07:00
|
|
|
postRestMux.HandleFunc("/rest/system/config", s.postSystemConfig) // <body>
|
|
|
|
postRestMux.HandleFunc("/rest/system/error", s.postSystemError) // <body>
|
|
|
|
postRestMux.HandleFunc("/rest/system/error/clear", s.postSystemErrorClear) // -
|
|
|
|
postRestMux.HandleFunc("/rest/system/ping", s.restPing) // -
|
|
|
|
postRestMux.HandleFunc("/rest/system/reset", s.postSystemReset) // [folder]
|
|
|
|
postRestMux.HandleFunc("/rest/system/restart", s.postSystemRestart) // -
|
|
|
|
postRestMux.HandleFunc("/rest/system/shutdown", s.postSystemShutdown) // -
|
|
|
|
postRestMux.HandleFunc("/rest/system/upgrade", s.postSystemUpgrade) // -
|
2015-08-23 12:56:10 -07:00
|
|
|
postRestMux.HandleFunc("/rest/system/pause", s.postSystemPause) // device
|
|
|
|
postRestMux.HandleFunc("/rest/system/resume", s.postSystemResume) // device
|
2015-10-03 08:25:21 -07:00
|
|
|
postRestMux.HandleFunc("/rest/system/debug", s.postSystemDebug) // [enable] [disable]
|
2015-04-06 01:23:27 -07:00
|
|
|
|
|
|
|
// Debug endpoints, not for general use
|
2015-04-28 14:12:19 -07:00
|
|
|
getRestMux.HandleFunc("/rest/debug/peerCompletion", s.getPeerCompletion)
|
2015-11-21 01:48:57 -07:00
|
|
|
getRestMux.HandleFunc("/rest/debug/httpmetrics", s.getSystemHTTPMetrics)
|
2014-07-05 12:40:29 -07:00
|
|
|
|
|
|
|
// A handler that splits requests between the two above and disables
|
|
|
|
// caching
|
2015-11-21 01:48:57 -07:00
|
|
|
restMux := noCacheMiddleware(metricsMiddleware(getPostHandler(getRestMux, postRestMux)))
|
2014-07-05 12:40:29 -07:00
|
|
|
|
|
|
|
// The main routing handler
|
|
|
|
mux := http.NewServeMux()
|
|
|
|
mux.Handle("/rest/", restMux)
|
2015-04-28 14:12:19 -07:00
|
|
|
mux.HandleFunc("/qr/", s.getQR)
|
2014-07-05 12:40:29 -07:00
|
|
|
|
|
|
|
// Serve compiled in assets unless an asset directory was set (for development)
|
2016-01-10 08:37:31 -07:00
|
|
|
assets := &embeddedStatic{
|
|
|
|
theme: s.cfg.GUI().Theme,
|
2016-05-23 05:16:14 -07:00
|
|
|
lastModified: time.Now().Truncate(time.Second), // must truncate, for the wire precision is 1s
|
2016-01-10 08:37:31 -07:00
|
|
|
mut: sync.NewRWMutex(),
|
|
|
|
assetDir: s.assetDir,
|
|
|
|
assets: auto.Assets(),
|
|
|
|
}
|
|
|
|
mux.Handle("/", assets)
|
|
|
|
|
2016-05-22 03:26:09 -07:00
|
|
|
// Handle the special meta.js path
|
|
|
|
mux.HandleFunc("/meta.js", s.getJSMetadata)
|
|
|
|
|
2016-01-10 08:37:31 -07:00
|
|
|
s.cfg.Subscribe(assets)
|
2014-05-22 07:12:19 -07:00
|
|
|
|
2015-09-29 11:05:22 -07:00
|
|
|
guiCfg := s.cfg.GUI()
|
|
|
|
|
2014-07-06 06:00:44 -07:00
|
|
|
// Wrap everything in CSRF protection. The /rest prefix should be
|
|
|
|
// protected, other requests will grant cookies.
|
2016-04-03 04:24:55 -07:00
|
|
|
handler := csrfMiddleware(s.id.String()[:5], "/rest", guiCfg, mux)
|
2016-01-26 00:05:24 -07:00
|
|
|
|
2015-06-22 08:57:08 -07:00
|
|
|
// Add our version and ID as a header to responses
|
|
|
|
handler = withDetailsMiddleware(s.id, handler)
|
2014-08-31 03:59:20 -07:00
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
// Wrap everything in basic auth, if user/password is set.
|
2015-09-29 11:05:22 -07:00
|
|
|
if len(guiCfg.User) > 0 && len(guiCfg.Password) > 0 {
|
|
|
|
handler = basicAuthAndSessionMiddleware("sessionid-"+s.id.String()[:5], guiCfg, handler)
|
2014-07-05 12:40:29 -07:00
|
|
|
}
|
2014-04-30 13:52:38 -07:00
|
|
|
|
2014-09-14 15:18:05 -07:00
|
|
|
// Redirect to HTTPS if we are supposed to
|
2015-10-12 06:27:57 -07:00
|
|
|
if guiCfg.UseTLS() {
|
2014-09-14 15:18:05 -07:00
|
|
|
handler = redirectToHTTPSMiddleware(handler)
|
|
|
|
}
|
2014-09-12 12:28:47 -07:00
|
|
|
|
2016-04-03 04:24:55 -07:00
|
|
|
// Add the CORS handling
|
|
|
|
handler = corsMiddleware(handler)
|
|
|
|
|
2015-10-03 08:25:21 -07:00
|
|
|
handler = debugMiddleware(handler)
|
2015-04-07 12:45:22 -07:00
|
|
|
|
2014-10-13 10:34:26 -07:00
|
|
|
srv := http.Server{
|
|
|
|
Handler: handler,
|
2014-11-30 02:35:04 -07:00
|
|
|
ReadTimeout: 10 * time.Second,
|
2014-10-13 10:34:26 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
s.fss = newFolderSummaryService(s.cfg, s.model)
|
2015-06-03 00:47:39 -07:00
|
|
|
defer s.fss.Stop()
|
2015-04-28 14:12:19 -07:00
|
|
|
s.fss.ServeBackground()
|
2015-03-26 15:26:51 -07:00
|
|
|
|
2016-03-06 15:04:12 -07:00
|
|
|
l.Infoln("GUI and API listening on", listener.Addr())
|
|
|
|
l.Infoln("Access the GUI via the following URL:", guiCfg.URL())
|
2016-01-14 03:06:36 -07:00
|
|
|
if s.started != nil {
|
|
|
|
// only set when run by the tests
|
|
|
|
close(s.started)
|
|
|
|
}
|
|
|
|
err := srv.Serve(listener)
|
2015-06-03 00:47:39 -07:00
|
|
|
|
|
|
|
// The return could be due to an intentional close. Wait for the stop
|
|
|
|
// signal before returning. IF there is no stop signal within a second, we
|
|
|
|
// assume it was unintentional and log the error before retrying.
|
|
|
|
select {
|
|
|
|
case <-s.stop:
|
2016-01-14 03:06:36 -07:00
|
|
|
case <-s.configChanged:
|
2015-06-03 00:47:39 -07:00
|
|
|
case <-time.After(time.Second):
|
|
|
|
l.Warnln("API:", err)
|
|
|
|
}
|
2015-04-28 14:12:19 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) Stop() {
|
2016-01-14 03:06:36 -07:00
|
|
|
s.listenerMut.Lock()
|
|
|
|
listener := s.listener
|
|
|
|
s.listenerMut.Unlock()
|
|
|
|
|
2015-06-03 00:47:39 -07:00
|
|
|
close(s.stop)
|
2016-01-14 03:06:36 -07:00
|
|
|
|
|
|
|
// listener may be nil here if we've had a config change to a broken
|
|
|
|
// configuration, in which case we shouldn't try to close it.
|
|
|
|
if listener != nil {
|
|
|
|
listener.Close()
|
|
|
|
}
|
2015-06-03 00:47:39 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) String() string {
|
|
|
|
return fmt.Sprintf("apiService@%p", s)
|
2015-06-03 00:47:39 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) VerifyConfiguration(from, to config.Configuration) error {
|
2015-06-03 00:47:39 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) CommitConfiguration(from, to config.Configuration) bool {
|
2015-06-03 00:47:39 -07:00
|
|
|
if to.GUI == from.GUI {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Order here is important. We must close the listener to stop Serve(). We
|
|
|
|
// must create a new listener before Serve() starts again. We can't create
|
|
|
|
// a new listener on the same port before the previous listener is closed.
|
|
|
|
// To assist in this little dance the Serve() method will wait for a
|
2016-01-14 03:06:36 -07:00
|
|
|
// signal on the configChanged channel after the listener has closed.
|
|
|
|
|
|
|
|
s.listenerMut.Lock()
|
|
|
|
defer s.listenerMut.Unlock()
|
2015-06-03 00:47:39 -07:00
|
|
|
|
2015-04-28 14:12:19 -07:00
|
|
|
s.listener.Close()
|
2015-06-03 00:47:39 -07:00
|
|
|
|
|
|
|
var err error
|
|
|
|
s.listener, err = s.getListener(to.GUI)
|
|
|
|
if err != nil {
|
|
|
|
// Ideally this should be a verification error, but we check it by
|
|
|
|
// creating a new listener which requires shutting down the previous
|
|
|
|
// one first, which is too destructive for the VerifyConfiguration
|
|
|
|
// method.
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2016-01-14 03:06:36 -07:00
|
|
|
s.configChanged <- struct{}{}
|
2015-06-03 00:47:39 -07:00
|
|
|
|
|
|
|
return true
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func getPostHandler(get, post http.Handler) http.Handler {
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
switch r.Method {
|
|
|
|
case "GET":
|
|
|
|
get.ServeHTTP(w, r)
|
|
|
|
case "POST":
|
|
|
|
post.ServeHTTP(w, r)
|
|
|
|
default:
|
|
|
|
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
|
|
|
}
|
|
|
|
})
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
|
2015-04-07 12:45:22 -07:00
|
|
|
func debugMiddleware(h http.Handler) http.Handler {
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
t0 := time.Now()
|
|
|
|
h.ServeHTTP(w, r)
|
2015-11-21 01:39:40 -07:00
|
|
|
|
|
|
|
if shouldDebugHTTP() {
|
|
|
|
ms := 1000 * time.Since(t0).Seconds()
|
|
|
|
|
|
|
|
// The variable `w` is most likely a *http.response, which we can't do
|
|
|
|
// much with since it's a non exported type. We can however peek into
|
|
|
|
// it with reflection to get at the status code and number of bytes
|
|
|
|
// written.
|
|
|
|
var status, written int64
|
|
|
|
if rw := reflect.Indirect(reflect.ValueOf(w)); rw.IsValid() && rw.Kind() == reflect.Struct {
|
|
|
|
if rf := rw.FieldByName("status"); rf.IsValid() && rf.Kind() == reflect.Int {
|
|
|
|
status = rf.Int()
|
|
|
|
}
|
|
|
|
if rf := rw.FieldByName("written"); rf.IsValid() && rf.Kind() == reflect.Int64 {
|
|
|
|
written = rf.Int()
|
|
|
|
}
|
2015-04-07 12:45:22 -07:00
|
|
|
}
|
2015-12-05 07:59:35 -07:00
|
|
|
httpl.Debugf("http: %s %q: status %d, %d bytes in %.02f ms", r.Method, r.URL.String(), status, written, ms)
|
2015-04-07 12:45:22 -07:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-01-26 00:05:24 -07:00
|
|
|
func corsMiddleware(next http.Handler) http.Handler {
|
|
|
|
// Handle CORS headers and CORS OPTIONS request.
|
|
|
|
// CORS OPTIONS request are typically sent by browser during AJAX preflight
|
|
|
|
// when the browser initiate a POST request.
|
2016-02-12 14:10:08 -07:00
|
|
|
//
|
|
|
|
// As the OPTIONS request is unauthorized, this handler must be the first
|
2016-04-03 04:24:55 -07:00
|
|
|
// of the chain (hence added at the end).
|
2016-02-12 14:10:08 -07:00
|
|
|
//
|
2016-01-26 00:05:24 -07:00
|
|
|
// See https://www.w3.org/TR/cors/ for details.
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
// Process OPTIONS requests
|
|
|
|
if r.Method == "OPTIONS" {
|
|
|
|
// Only GET/POST Methods are supported
|
|
|
|
w.Header().Set("Access-Control-Allow-Methods", "GET, POST")
|
|
|
|
// Only this custom header can be set
|
|
|
|
w.Header().Set("Access-Control-Allow-Headers", "X-API-Key")
|
|
|
|
// The request is meant to be cached 10 minutes
|
|
|
|
w.Header().Set("Access-Control-Max-Age", "600")
|
|
|
|
|
|
|
|
// Indicate that no content will be returned
|
|
|
|
w.WriteHeader(204)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// For everything else, pass to the next handler
|
|
|
|
next.ServeHTTP(w, r)
|
|
|
|
return
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-11-21 01:48:57 -07:00
|
|
|
func metricsMiddleware(h http.Handler) http.Handler {
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
t := metrics.GetOrRegisterTimer(r.URL.Path, nil)
|
|
|
|
t0 := time.Now()
|
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
t.UpdateSince(t0)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2014-09-14 15:18:05 -07:00
|
|
|
func redirectToHTTPSMiddleware(h http.Handler) http.Handler {
|
2014-09-12 12:28:47 -07:00
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
2014-09-14 15:18:05 -07:00
|
|
|
if r.TLS == nil {
|
|
|
|
// Redirect HTTP requests to HTTPS
|
|
|
|
r.URL.Host = r.Host
|
2014-09-12 12:28:47 -07:00
|
|
|
r.URL.Scheme = "https"
|
2016-01-29 03:07:51 -07:00
|
|
|
http.Redirect(w, r, r.URL.String(), http.StatusTemporaryRedirect)
|
2014-09-12 12:28:47 -07:00
|
|
|
} else {
|
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func noCacheMiddleware(h http.Handler) http.Handler {
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
2015-04-27 00:08:55 -07:00
|
|
|
w.Header().Set("Cache-Control", "max-age=0, no-cache, no-store")
|
|
|
|
w.Header().Set("Expires", time.Now().UTC().Format(http.TimeFormat))
|
|
|
|
w.Header().Set("Pragma", "no-cache")
|
2014-07-05 12:40:29 -07:00
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-06-22 08:57:08 -07:00
|
|
|
func withDetailsMiddleware(id protocol.DeviceID, h http.Handler) http.Handler {
|
2014-08-31 03:59:20 -07:00
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.Header().Set("X-Syncthing-Version", Version)
|
2015-06-22 08:57:08 -07:00
|
|
|
w.Header().Set("X-Syncthing-ID", id.String())
|
2014-08-31 03:59:20 -07:00
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) restPing(w http.ResponseWriter, r *http.Request) {
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, map[string]string{"ping": "pong"})
|
2014-09-18 03:55:28 -07:00
|
|
|
}
|
|
|
|
|
2016-05-22 03:26:09 -07:00
|
|
|
func (s *apiService) getJSMetadata(w http.ResponseWriter, r *http.Request) {
|
|
|
|
meta, _ := json.Marshal(map[string]string{
|
|
|
|
"deviceID": s.id.String(),
|
|
|
|
})
|
|
|
|
w.Header().Set("Content-Type", "application/javascript")
|
|
|
|
fmt.Fprintf(w, "var metadata = %s;\n", meta)
|
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getSystemVersion(w http.ResponseWriter, r *http.Request) {
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, map[string]string{
|
2014-09-18 03:52:45 -07:00
|
|
|
"version": Version,
|
2015-08-18 04:30:25 -07:00
|
|
|
"codename": Codename,
|
2014-09-18 03:52:45 -07:00
|
|
|
"longVersion": LongVersion,
|
|
|
|
"os": runtime.GOOS,
|
|
|
|
"arch": runtime.GOARCH,
|
|
|
|
})
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getSystemDebug(w http.ResponseWriter, r *http.Request) {
|
2015-10-03 08:25:21 -07:00
|
|
|
names := l.Facilities()
|
|
|
|
enabled := l.FacilityDebugging()
|
|
|
|
sort.Strings(enabled)
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, map[string]interface{}{
|
2015-10-03 08:25:21 -07:00
|
|
|
"facilities": names,
|
|
|
|
"enabled": enabled,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) postSystemDebug(w http.ResponseWriter, r *http.Request) {
|
2015-10-03 08:25:21 -07:00
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
|
|
|
q := r.URL.Query()
|
|
|
|
for _, f := range strings.Split(q.Get("enable"), ",") {
|
2015-12-06 10:15:55 -07:00
|
|
|
if f == "" || l.ShouldDebug(f) {
|
2015-10-03 08:25:21 -07:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
l.SetDebug(f, true)
|
|
|
|
l.Infof("Enabled debug data for %q", f)
|
|
|
|
}
|
|
|
|
for _, f := range strings.Split(q.Get("disable"), ",") {
|
2015-12-06 10:15:55 -07:00
|
|
|
if f == "" || !l.ShouldDebug(f) {
|
2015-10-03 08:25:21 -07:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
l.SetDebug(f, false)
|
|
|
|
l.Infof("Disabled debug data for %q", f)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getDBBrowse(w http.ResponseWriter, r *http.Request) {
|
2015-02-07 03:52:42 -07:00
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
|
|
|
prefix := qs.Get("prefix")
|
|
|
|
dirsonly := qs.Get("dirsonly") != ""
|
|
|
|
|
|
|
|
levels, err := strconv.Atoi(qs.Get("levels"))
|
|
|
|
if err != nil {
|
|
|
|
levels = -1
|
|
|
|
}
|
|
|
|
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, s.model.GlobalDirectoryTree(folder, prefix, levels, dirsonly))
|
2015-02-07 03:52:42 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getDBCompletion(w http.ResponseWriter, r *http.Request) {
|
2014-07-29 02:06:52 -07:00
|
|
|
var qs = r.URL.Query()
|
2014-09-28 04:00:38 -07:00
|
|
|
var folder = qs.Get("folder")
|
|
|
|
var deviceStr = qs.Get("device")
|
2014-07-29 02:06:52 -07:00
|
|
|
|
2014-09-28 04:00:38 -07:00
|
|
|
device, err := protocol.DeviceIDFromString(deviceStr)
|
2014-07-29 02:06:52 -07:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, map[string]float64{
|
2015-04-28 14:12:19 -07:00
|
|
|
"completion": s.model.Completion(device, folder),
|
2015-12-15 14:40:38 -07:00
|
|
|
})
|
2014-07-29 02:06:52 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getDBStatus(w http.ResponseWriter, r *http.Request) {
|
2015-03-26 15:26:51 -07:00
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, folderSummary(s.cfg, s.model, folder))
|
2015-03-26 15:26:51 -07:00
|
|
|
}
|
|
|
|
|
2016-03-21 12:36:08 -07:00
|
|
|
func folderSummary(cfg configIntf, m modelIntf, folder string) map[string]interface{} {
|
2014-03-02 15:58:14 -07:00
|
|
|
var res = make(map[string]interface{})
|
|
|
|
|
2014-10-06 00:25:45 -07:00
|
|
|
res["invalid"] = cfg.Folders()[folder].Invalid
|
2014-04-27 12:53:27 -07:00
|
|
|
|
2014-09-28 04:00:38 -07:00
|
|
|
globalFiles, globalDeleted, globalBytes := m.GlobalSize(folder)
|
2014-03-02 15:58:14 -07:00
|
|
|
res["globalFiles"], res["globalDeleted"], res["globalBytes"] = globalFiles, globalDeleted, globalBytes
|
|
|
|
|
2014-09-28 04:00:38 -07:00
|
|
|
localFiles, localDeleted, localBytes := m.LocalSize(folder)
|
2014-03-02 15:58:14 -07:00
|
|
|
res["localFiles"], res["localDeleted"], res["localBytes"] = localFiles, localDeleted, localBytes
|
|
|
|
|
2014-09-28 04:00:38 -07:00
|
|
|
needFiles, needBytes := m.NeedSize(folder)
|
2014-04-09 13:03:30 -07:00
|
|
|
res["needFiles"], res["needBytes"] = needFiles, needBytes
|
2014-03-02 15:58:14 -07:00
|
|
|
|
2014-04-09 13:03:30 -07:00
|
|
|
res["inSyncFiles"], res["inSyncBytes"] = globalFiles-needFiles, globalBytes-needBytes
|
2014-03-02 15:58:14 -07:00
|
|
|
|
2015-04-12 13:12:01 -07:00
|
|
|
var err error
|
|
|
|
res["state"], res["stateChanged"], err = m.State(folder)
|
|
|
|
if err != nil {
|
|
|
|
res["error"] = err.Error()
|
|
|
|
}
|
|
|
|
|
2015-06-24 00:52:38 -07:00
|
|
|
lv, _ := m.CurrentLocalVersion(folder)
|
|
|
|
rv, _ := m.RemoteLocalVersion(folder)
|
|
|
|
|
|
|
|
res["version"] = lv + rv
|
2014-04-14 00:58:17 -07:00
|
|
|
|
2015-01-27 07:27:44 -07:00
|
|
|
ignorePatterns, _, _ := m.GetIgnores(folder)
|
|
|
|
res["ignorePatterns"] = false
|
|
|
|
for _, line := range ignorePatterns {
|
|
|
|
if len(line) > 0 && !strings.HasPrefix(line, "//") {
|
|
|
|
res["ignorePatterns"] = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-26 15:26:51 -07:00
|
|
|
return res
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) postDBOverride(w http.ResponseWriter, r *http.Request) {
|
2014-06-16 01:47:02 -07:00
|
|
|
var qs = r.URL.Query()
|
2014-09-28 04:00:38 -07:00
|
|
|
var folder = qs.Get("folder")
|
2015-04-28 14:12:19 -07:00
|
|
|
go s.model.Override(folder)
|
2014-06-16 01:47:02 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getDBNeed(w http.ResponseWriter, r *http.Request) {
|
2015-04-25 14:53:44 -07:00
|
|
|
qs := r.URL.Query()
|
|
|
|
|
|
|
|
folder := qs.Get("folder")
|
|
|
|
|
|
|
|
page, err := strconv.Atoi(qs.Get("page"))
|
|
|
|
if err != nil || page < 1 {
|
|
|
|
page = 1
|
|
|
|
}
|
|
|
|
perpage, err := strconv.Atoi(qs.Get("perpage"))
|
|
|
|
if err != nil || perpage < 1 {
|
|
|
|
perpage = 1 << 16
|
|
|
|
}
|
|
|
|
|
2015-04-28 14:12:19 -07:00
|
|
|
progress, queued, rest, total := s.model.NeedFolderFiles(folder, page, perpage)
|
2014-05-19 13:31:28 -07:00
|
|
|
|
2014-11-22 17:52:48 -07:00
|
|
|
// Convert the struct to a more loose structure, and inject the size.
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, map[string]interface{}{
|
2015-04-28 14:12:19 -07:00
|
|
|
"progress": s.toNeedSlice(progress),
|
|
|
|
"queued": s.toNeedSlice(queued),
|
|
|
|
"rest": s.toNeedSlice(rest),
|
2015-04-25 14:53:44 -07:00
|
|
|
"total": total,
|
|
|
|
"page": page,
|
|
|
|
"perpage": perpage,
|
2015-12-15 14:40:38 -07:00
|
|
|
})
|
2014-05-19 13:31:28 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getSystemConnections(w http.ResponseWriter, r *http.Request) {
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, s.model.ConnectionStats())
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getDeviceStats(w http.ResponseWriter, r *http.Request) {
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, s.model.DeviceStatistics())
|
2014-08-21 15:46:34 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getFolderStats(w http.ResponseWriter, r *http.Request) {
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, s.model.FolderStatistics())
|
2014-12-07 13:21:12 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getDBFile(w http.ResponseWriter, r *http.Request) {
|
2015-03-17 10:51:50 -07:00
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
|
|
|
file := qs.Get("file")
|
2016-02-12 06:53:09 -07:00
|
|
|
gf, gfOk := s.model.CurrentGlobalFile(folder, file)
|
|
|
|
lf, lfOk := s.model.CurrentFolderFile(folder, file)
|
|
|
|
|
|
|
|
if !(gfOk || lfOk) {
|
|
|
|
// This file for sure does not exist.
|
|
|
|
http.Error(w, "No such object in the index", http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
2015-03-17 10:51:50 -07:00
|
|
|
|
2016-04-15 03:59:41 -07:00
|
|
|
av := s.model.Availability(folder, file, protocol.Vector{}, protocol.BlockInfo{})
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, map[string]interface{}{
|
2015-04-20 06:37:04 -07:00
|
|
|
"global": jsonFileInfo(gf),
|
|
|
|
"local": jsonFileInfo(lf),
|
2015-03-17 10:51:50 -07:00
|
|
|
"availability": av,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getSystemConfig(w http.ResponseWriter, r *http.Request) {
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, s.cfg.Raw())
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) postSystemConfig(w http.ResponseWriter, r *http.Request) {
|
2015-06-03 00:47:39 -07:00
|
|
|
s.systemConfigMut.Lock()
|
|
|
|
defer s.systemConfigMut.Unlock()
|
|
|
|
|
2015-11-04 13:30:47 -07:00
|
|
|
to, err := config.ReadJSON(r.Body, myID)
|
2016-05-06 15:01:56 -07:00
|
|
|
r.Body.Close()
|
2014-03-02 15:58:14 -07:00
|
|
|
if err != nil {
|
2014-08-17 01:28:36 -07:00
|
|
|
l.Warnln("decoding posted config:", err)
|
2015-12-05 07:56:10 -07:00
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
2014-08-08 05:09:27 -07:00
|
|
|
return
|
2014-12-08 08:36:15 -07:00
|
|
|
}
|
2014-06-06 19:00:46 -07:00
|
|
|
|
2015-09-29 11:05:22 -07:00
|
|
|
if to.GUI.Password != s.cfg.GUI().Password {
|
2015-06-03 00:47:39 -07:00
|
|
|
if to.GUI.Password != "" {
|
|
|
|
hash, err := bcrypt.GenerateFromPassword([]byte(to.GUI.Password), 0)
|
2014-06-11 16:05:00 -07:00
|
|
|
if err != nil {
|
2014-12-08 08:36:15 -07:00
|
|
|
l.Warnln("bcrypting password:", err)
|
2015-12-05 07:56:10 -07:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
2014-12-08 08:36:15 -07:00
|
|
|
return
|
2014-06-11 16:05:00 -07:00
|
|
|
}
|
2014-12-08 08:36:15 -07:00
|
|
|
|
2015-06-03 00:47:39 -07:00
|
|
|
to.GUI.Password = string(hash)
|
2014-06-11 11:04:23 -07:00
|
|
|
}
|
2014-12-08 08:36:15 -07:00
|
|
|
}
|
2014-06-11 11:04:23 -07:00
|
|
|
|
2015-05-12 00:35:37 -07:00
|
|
|
// Fixup usage reporting settings
|
2014-06-06 19:00:46 -07:00
|
|
|
|
2015-09-29 11:05:22 -07:00
|
|
|
if curAcc := s.cfg.Options().URAccepted; to.Options.URAccepted > curAcc {
|
2014-12-08 08:36:15 -07:00
|
|
|
// UR was enabled
|
2015-06-03 00:47:39 -07:00
|
|
|
to.Options.URAccepted = usageReportVersion
|
2016-05-26 00:02:56 -07:00
|
|
|
to.Options.URUniqueID = rand.String(8)
|
2015-06-03 00:47:39 -07:00
|
|
|
} else if to.Options.URAccepted < curAcc {
|
2014-12-08 08:36:15 -07:00
|
|
|
// UR was disabled
|
2015-06-03 00:47:39 -07:00
|
|
|
to.Options.URAccepted = -1
|
|
|
|
to.Options.URUniqueID = ""
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
2014-12-08 08:36:15 -07:00
|
|
|
|
|
|
|
// Activate and save
|
|
|
|
|
2015-09-29 11:05:22 -07:00
|
|
|
resp := s.cfg.Replace(to)
|
2015-06-03 00:47:39 -07:00
|
|
|
configInSync = !resp.RequiresRestart
|
2015-09-29 11:05:22 -07:00
|
|
|
s.cfg.Save()
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getSystemConfigInsync(w http.ResponseWriter, r *http.Request) {
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, map[string]bool{"configInSync": configInSync})
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) postSystemRestart(w http.ResponseWriter, r *http.Request) {
|
2015-04-28 14:12:19 -07:00
|
|
|
s.flushResponse(`{"ok": "restarting"}`, w)
|
2014-04-03 13:10:51 -07:00
|
|
|
go restart()
|
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) postSystemReset(w http.ResponseWriter, r *http.Request) {
|
2015-04-03 11:06:03 -07:00
|
|
|
var qs = r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
2015-06-21 00:35:41 -07:00
|
|
|
|
|
|
|
if len(folder) > 0 {
|
2015-09-29 11:05:22 -07:00
|
|
|
if _, ok := s.cfg.Folders()[folder]; !ok {
|
2015-06-21 00:35:41 -07:00
|
|
|
http.Error(w, "Invalid folder ID", 500)
|
|
|
|
return
|
2015-06-04 05:35:03 -07:00
|
|
|
}
|
2015-04-03 11:06:03 -07:00
|
|
|
}
|
2015-06-21 00:35:41 -07:00
|
|
|
|
2015-04-03 11:06:03 -07:00
|
|
|
if len(folder) == 0 {
|
2015-06-21 00:35:41 -07:00
|
|
|
// Reset all folders.
|
2015-09-29 11:05:22 -07:00
|
|
|
for folder := range s.cfg.Folders() {
|
2015-06-21 00:35:41 -07:00
|
|
|
s.model.ResetFolder(folder)
|
|
|
|
}
|
2015-04-28 14:12:19 -07:00
|
|
|
s.flushResponse(`{"ok": "resetting database"}`, w)
|
2015-04-03 11:06:03 -07:00
|
|
|
} else {
|
2015-06-21 00:35:41 -07:00
|
|
|
// Reset a specific folder, assuming it's supposed to exist.
|
|
|
|
s.model.ResetFolder(folder)
|
2015-06-18 23:30:19 -07:00
|
|
|
s.flushResponse(`{"ok": "resetting folder `+folder+`"}`, w)
|
2015-04-03 11:06:03 -07:00
|
|
|
}
|
2015-06-21 00:35:41 -07:00
|
|
|
|
2014-04-03 13:10:51 -07:00
|
|
|
go restart()
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) postSystemShutdown(w http.ResponseWriter, r *http.Request) {
|
2015-04-28 14:12:19 -07:00
|
|
|
s.flushResponse(`{"ok": "shutting down"}`, w)
|
2014-05-11 16:16:27 -07:00
|
|
|
go shutdown()
|
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) flushResponse(resp string, w http.ResponseWriter) {
|
2015-04-28 14:12:19 -07:00
|
|
|
w.Write([]byte(resp + "\n"))
|
2014-05-12 17:15:18 -07:00
|
|
|
f := w.(http.Flusher)
|
|
|
|
f.Flush()
|
|
|
|
}
|
|
|
|
|
2014-04-14 03:02:40 -07:00
|
|
|
var cpuUsagePercent [10]float64 // The last ten seconds
|
2015-04-28 13:32:10 -07:00
|
|
|
var cpuUsageLock = sync.NewRWMutex()
|
2014-03-02 15:58:14 -07:00
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getSystemStatus(w http.ResponseWriter, r *http.Request) {
|
2014-03-02 15:58:14 -07:00
|
|
|
var m runtime.MemStats
|
|
|
|
runtime.ReadMemStats(&m)
|
|
|
|
|
2014-10-06 00:25:45 -07:00
|
|
|
tilde, _ := osutil.ExpandTilde("~")
|
2014-03-02 15:58:14 -07:00
|
|
|
res := make(map[string]interface{})
|
2014-06-29 16:42:03 -07:00
|
|
|
res["myID"] = myID.String()
|
2014-03-02 15:58:14 -07:00
|
|
|
res["goroutines"] = runtime.NumGoroutine()
|
|
|
|
res["alloc"] = m.Alloc
|
2014-08-05 13:14:11 -07:00
|
|
|
res["sys"] = m.Sys - m.HeapReleased
|
2014-10-06 00:25:45 -07:00
|
|
|
res["tilde"] = tilde
|
2015-09-29 11:05:22 -07:00
|
|
|
if s.cfg.Options().LocalAnnEnabled || s.cfg.Options().GlobalAnnEnabled {
|
2015-09-20 06:30:25 -07:00
|
|
|
res["discoveryEnabled"] = true
|
|
|
|
discoErrors := make(map[string]string)
|
|
|
|
discoMethods := 0
|
|
|
|
for disco, err := range s.discoverer.ChildErrors() {
|
|
|
|
discoMethods++
|
|
|
|
if err != nil {
|
|
|
|
discoErrors[disco] = err.Error()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
res["discoveryMethods"] = discoMethods
|
|
|
|
res["discoveryErrors"] = discoErrors
|
|
|
|
}
|
2016-05-04 12:38:12 -07:00
|
|
|
|
|
|
|
res["connectionServiceStatus"] = s.connectionsService.Status()
|
|
|
|
|
2014-03-02 15:58:14 -07:00
|
|
|
cpuUsageLock.RLock()
|
2014-04-14 03:02:40 -07:00
|
|
|
var cpusum float64
|
|
|
|
for _, p := range cpuUsagePercent {
|
|
|
|
cpusum += p
|
|
|
|
}
|
2014-03-02 15:58:14 -07:00
|
|
|
cpuUsageLock.RUnlock()
|
2015-02-25 15:30:24 -07:00
|
|
|
res["cpuPercent"] = cpusum / float64(len(cpuUsagePercent)) / float64(runtime.NumCPU())
|
2014-12-14 16:12:12 -07:00
|
|
|
res["pathSeparator"] = string(filepath.Separator)
|
2015-04-03 11:00:13 -07:00
|
|
|
res["uptime"] = int(time.Since(startTime).Seconds())
|
2015-08-25 15:36:46 -07:00
|
|
|
res["startTime"] = startTime
|
2016-01-10 08:37:31 -07:00
|
|
|
res["themes"] = s.themes
|
2014-03-02 15:58:14 -07:00
|
|
|
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, res)
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getSystemError(w http.ResponseWriter, r *http.Request) {
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, map[string][]logger.Line{
|
2015-10-03 08:25:21 -07:00
|
|
|
"errors": s.guiErrors.Since(time.Time{}),
|
|
|
|
})
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) postSystemError(w http.ResponseWriter, r *http.Request) {
|
2014-07-05 12:40:29 -07:00
|
|
|
bs, _ := ioutil.ReadAll(r.Body)
|
|
|
|
r.Body.Close()
|
2015-10-03 08:25:21 -07:00
|
|
|
l.Warnln(string(bs))
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) postSystemErrorClear(w http.ResponseWriter, r *http.Request) {
|
2015-10-03 08:25:21 -07:00
|
|
|
s.guiErrors.Clear()
|
2014-04-16 07:30:49 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getSystemLog(w http.ResponseWriter, r *http.Request) {
|
2015-10-03 08:25:21 -07:00
|
|
|
q := r.URL.Query()
|
|
|
|
since, err := time.Parse(time.RFC3339, q.Get("since"))
|
|
|
|
l.Debugln(err)
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, map[string][]logger.Line{
|
2015-10-03 08:25:21 -07:00
|
|
|
"messages": s.systemLog.Since(since),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getSystemLogTxt(w http.ResponseWriter, r *http.Request) {
|
2015-10-03 08:25:21 -07:00
|
|
|
q := r.URL.Query()
|
|
|
|
since, err := time.Parse(time.RFC3339, q.Get("since"))
|
|
|
|
l.Debugln(err)
|
|
|
|
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
|
|
|
|
|
|
|
for _, line := range s.systemLog.Since(since) {
|
|
|
|
fmt.Fprintf(w, "%s: %s\n", line.When.Format(time.RFC3339), line.Message)
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
}
|
2014-04-19 04:33:51 -07:00
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getSystemHTTPMetrics(w http.ResponseWriter, r *http.Request) {
|
2015-11-21 01:48:57 -07:00
|
|
|
stats := make(map[string]interface{})
|
|
|
|
metrics.Each(func(name string, intf interface{}) {
|
|
|
|
if m, ok := intf.(*metrics.StandardTimer); ok {
|
|
|
|
pct := m.Percentiles([]float64{0.50, 0.95, 0.99})
|
|
|
|
for i := range pct {
|
|
|
|
pct[i] /= 1e6 // ns to ms
|
|
|
|
}
|
|
|
|
stats[name] = map[string]interface{}{
|
|
|
|
"count": m.Count(),
|
|
|
|
"sumMs": m.Sum() / 1e6, // ns to ms
|
|
|
|
"ratesPerS": []float64{m.Rate1(), m.Rate5(), m.Rate15()},
|
|
|
|
"percentilesMs": pct,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
bs, _ := json.MarshalIndent(stats, "", " ")
|
|
|
|
w.Write(bs)
|
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getSystemDiscovery(w http.ResponseWriter, r *http.Request) {
|
2015-09-20 06:30:25 -07:00
|
|
|
devices := make(map[string]discover.CacheEntry)
|
2014-10-28 12:40:04 -07:00
|
|
|
|
2015-09-12 12:59:15 -07:00
|
|
|
if s.discoverer != nil {
|
2014-10-28 12:40:04 -07:00
|
|
|
// Device ids can't be marshalled as keys so we need to manually
|
|
|
|
// rebuild this map using strings. Discoverer may be nil if discovery
|
|
|
|
// has not started yet.
|
2015-09-20 06:30:25 -07:00
|
|
|
for device, entry := range s.discoverer.Cache() {
|
|
|
|
devices[device.String()] = entry
|
2014-10-28 12:40:04 -07:00
|
|
|
}
|
2014-10-15 11:23:28 -07:00
|
|
|
}
|
2014-10-15 13:52:06 -07:00
|
|
|
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, devices)
|
2014-05-12 18:08:55 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getReport(w http.ResponseWriter, r *http.Request) {
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, reportData(s.cfg, s.model))
|
2014-06-11 11:04:23 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getDBIgnores(w http.ResponseWriter, r *http.Request) {
|
2014-09-15 15:12:29 -07:00
|
|
|
qs := r.URL.Query()
|
|
|
|
|
2015-04-28 14:12:19 -07:00
|
|
|
ignores, patterns, err := s.model.GetIgnores(qs.Get("folder"))
|
2014-09-15 15:12:29 -07:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
2014-11-08 14:12:18 -07:00
|
|
|
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, map[string][]string{
|
2014-11-08 14:12:18 -07:00
|
|
|
"ignore": ignores,
|
2016-04-02 12:03:24 -07:00
|
|
|
"expanded": patterns,
|
2014-09-15 15:12:29 -07:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) postDBIgnores(w http.ResponseWriter, r *http.Request) {
|
2014-09-15 15:12:29 -07:00
|
|
|
qs := r.URL.Query()
|
|
|
|
|
2016-05-06 15:01:56 -07:00
|
|
|
bs, err := ioutil.ReadAll(r.Body)
|
2014-09-15 15:12:29 -07:00
|
|
|
r.Body.Close()
|
2016-05-06 15:01:56 -07:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
2014-09-19 13:02:53 -07:00
|
|
|
|
2016-05-06 15:01:56 -07:00
|
|
|
var data map[string][]string
|
|
|
|
err = json.Unmarshal(bs, &data)
|
2014-09-15 15:12:29 -07:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-04-28 14:12:19 -07:00
|
|
|
err = s.model.SetIgnores(qs.Get("folder"), data["ignore"])
|
2014-09-15 15:12:29 -07:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-04-28 14:12:19 -07:00
|
|
|
s.getDBIgnores(w, r)
|
2014-09-15 15:12:29 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getEvents(w http.ResponseWriter, r *http.Request) {
|
2014-07-13 12:07:24 -07:00
|
|
|
qs := r.URL.Query()
|
2014-07-29 02:06:52 -07:00
|
|
|
sinceStr := qs.Get("since")
|
|
|
|
limitStr := qs.Get("limit")
|
|
|
|
since, _ := strconv.Atoi(sinceStr)
|
|
|
|
limit, _ := strconv.Atoi(limitStr)
|
|
|
|
|
2015-04-28 14:12:19 -07:00
|
|
|
s.fss.gotEventRequest()
|
2015-03-26 15:26:51 -07:00
|
|
|
|
2016-02-02 04:40:42 -07:00
|
|
|
// Flush before blocking, to indicate that we've received the request and
|
|
|
|
// that it should not be retried. Must set Content-Type header before
|
|
|
|
// flushing.
|
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
2014-08-19 15:18:28 -07:00
|
|
|
f := w.(http.Flusher)
|
|
|
|
f.Flush()
|
|
|
|
|
2015-06-16 00:17:58 -07:00
|
|
|
evs := s.eventSub.Since(since, nil)
|
2014-07-29 02:06:52 -07:00
|
|
|
if 0 < limit && limit < len(evs) {
|
|
|
|
evs = evs[len(evs)-limit:]
|
|
|
|
}
|
2014-07-13 12:07:24 -07:00
|
|
|
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, evs)
|
2014-07-13 12:07:24 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getSystemUpgrade(w http.ResponseWriter, r *http.Request) {
|
2015-01-06 14:40:52 -07:00
|
|
|
if noUpgrade {
|
|
|
|
http.Error(w, upgrade.ErrUpgradeUnsupported.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
2015-09-29 11:05:22 -07:00
|
|
|
rel, err := upgrade.LatestRelease(s.cfg.Options().ReleasesURL, Version)
|
2014-07-14 01:45:29 -07:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
res := make(map[string]interface{})
|
|
|
|
res["running"] = Version
|
|
|
|
res["latest"] = rel.Tag
|
2015-04-22 05:41:08 -07:00
|
|
|
res["newer"] = upgrade.CompareVersions(rel.Tag, Version) == upgrade.Newer
|
|
|
|
res["majorNewer"] = upgrade.CompareVersions(rel.Tag, Version) == upgrade.MajorNewer
|
2014-07-14 01:45:29 -07:00
|
|
|
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, res)
|
2014-07-14 01:45:29 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getDeviceID(w http.ResponseWriter, r *http.Request) {
|
2014-07-18 01:00:02 -07:00
|
|
|
qs := r.URL.Query()
|
|
|
|
idStr := qs.Get("id")
|
2014-09-28 04:00:38 -07:00
|
|
|
id, err := protocol.DeviceIDFromString(idStr)
|
2015-12-15 14:40:38 -07:00
|
|
|
|
2014-07-18 01:00:02 -07:00
|
|
|
if err == nil {
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, map[string]string{
|
2014-07-18 01:00:02 -07:00
|
|
|
"id": id.String(),
|
|
|
|
})
|
|
|
|
} else {
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, map[string]string{
|
2014-07-18 01:00:02 -07:00
|
|
|
"error": err.Error(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getLang(w http.ResponseWriter, r *http.Request) {
|
2014-07-26 13:30:29 -07:00
|
|
|
lang := r.Header.Get("Accept-Language")
|
|
|
|
var langs []string
|
|
|
|
for _, l := range strings.Split(lang, ",") {
|
2014-08-14 08:04:17 -07:00
|
|
|
parts := strings.SplitN(l, ";", 2)
|
2014-08-28 04:23:23 -07:00
|
|
|
langs = append(langs, strings.ToLower(strings.TrimSpace(parts[0])))
|
2014-07-26 13:30:29 -07:00
|
|
|
}
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, langs)
|
2014-07-26 13:30:29 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) postSystemUpgrade(w http.ResponseWriter, r *http.Request) {
|
2015-09-29 11:05:22 -07:00
|
|
|
rel, err := upgrade.LatestRelease(s.cfg.Options().ReleasesURL, Version)
|
2014-07-14 01:45:29 -07:00
|
|
|
if err != nil {
|
2014-08-17 01:28:36 -07:00
|
|
|
l.Warnln("getting latest release:", err)
|
2014-07-14 01:45:29 -07:00
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-04-22 05:41:08 -07:00
|
|
|
if upgrade.CompareVersions(rel.Tag, Version) > upgrade.Equal {
|
2014-12-08 08:36:15 -07:00
|
|
|
err = upgrade.To(rel)
|
2014-07-31 07:01:23 -07:00
|
|
|
if err != nil {
|
2014-08-17 01:28:36 -07:00
|
|
|
l.Warnln("upgrading:", err)
|
2014-07-31 07:01:23 -07:00
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-04-28 14:12:19 -07:00
|
|
|
s.flushResponse(`{"ok": "restarting"}`, w)
|
2014-09-13 07:25:39 -07:00
|
|
|
l.Infoln("Upgrading")
|
|
|
|
stop <- exitUpgrading
|
2014-07-31 07:01:23 -07:00
|
|
|
}
|
2014-07-14 01:45:29 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) postSystemPause(w http.ResponseWriter, r *http.Request) {
|
2015-08-23 12:56:10 -07:00
|
|
|
var qs = r.URL.Query()
|
|
|
|
var deviceStr = qs.Get("device")
|
|
|
|
|
|
|
|
device, err := protocol.DeviceIDFromString(deviceStr)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
s.model.PauseDevice(device)
|
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) postSystemResume(w http.ResponseWriter, r *http.Request) {
|
2015-08-23 12:56:10 -07:00
|
|
|
var qs = r.URL.Query()
|
|
|
|
var deviceStr = qs.Get("device")
|
|
|
|
|
|
|
|
device, err := protocol.DeviceIDFromString(deviceStr)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
s.model.ResumeDevice(device)
|
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) postDBScan(w http.ResponseWriter, r *http.Request) {
|
2014-08-11 11:20:01 -07:00
|
|
|
qs := r.URL.Query()
|
2014-09-28 04:00:38 -07:00
|
|
|
folder := qs.Get("folder")
|
2015-02-11 11:52:59 -07:00
|
|
|
if folder != "" {
|
2015-05-03 05:18:50 -07:00
|
|
|
nextStr := qs.Get("next")
|
|
|
|
next, err := strconv.Atoi(nextStr)
|
|
|
|
if err == nil {
|
|
|
|
s.model.DelayScan(folder, time.Duration(next)*time.Second)
|
|
|
|
}
|
|
|
|
|
2015-03-27 01:51:18 -07:00
|
|
|
subs := qs["sub"]
|
2015-05-03 05:18:50 -07:00
|
|
|
err = s.model.ScanFolderSubs(folder, subs)
|
2015-02-11 11:52:59 -07:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
2015-05-01 05:30:17 -07:00
|
|
|
return
|
2015-02-11 11:52:59 -07:00
|
|
|
}
|
|
|
|
} else {
|
2015-04-28 14:12:19 -07:00
|
|
|
errors := s.model.ScanFolders()
|
2015-02-11 11:52:59 -07:00
|
|
|
if len(errors) > 0 {
|
|
|
|
http.Error(w, "Error scanning folders", 500)
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, errors)
|
2015-05-01 05:30:17 -07:00
|
|
|
return
|
2015-02-11 11:52:59 -07:00
|
|
|
}
|
2014-08-11 11:20:01 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) postDBPrio(w http.ResponseWriter, r *http.Request) {
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
|
|
|
file := qs.Get("file")
|
2015-04-28 14:12:19 -07:00
|
|
|
s.model.BringToFront(folder, file)
|
|
|
|
s.getDBNeed(w, r)
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getQR(w http.ResponseWriter, r *http.Request) {
|
2014-08-04 13:53:37 -07:00
|
|
|
var qs = r.URL.Query()
|
|
|
|
var text = qs.Get("text")
|
2014-07-05 12:40:29 -07:00
|
|
|
code, err := qr.Encode(text, qr.M)
|
2014-05-21 11:06:14 -07:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, "Invalid", 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "image/png")
|
|
|
|
w.Write(code.PNG())
|
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getPeerCompletion(w http.ResponseWriter, r *http.Request) {
|
2014-07-29 04:01:27 -07:00
|
|
|
tot := map[string]float64{}
|
|
|
|
count := map[string]float64{}
|
|
|
|
|
2015-09-29 11:05:22 -07:00
|
|
|
for _, folder := range s.cfg.Folders() {
|
2014-09-28 04:00:38 -07:00
|
|
|
for _, device := range folder.DeviceIDs() {
|
|
|
|
deviceStr := device.String()
|
2015-04-28 14:12:19 -07:00
|
|
|
if s.model.ConnectedTo(device) {
|
|
|
|
tot[deviceStr] += s.model.Completion(device, folder.ID)
|
2014-07-29 04:01:27 -07:00
|
|
|
} else {
|
2014-09-28 04:00:38 -07:00
|
|
|
tot[deviceStr] = 0
|
2014-07-29 04:01:27 -07:00
|
|
|
}
|
2014-09-28 04:00:38 -07:00
|
|
|
count[deviceStr]++
|
2014-07-29 04:01:27 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
comp := map[string]int{}
|
2014-09-28 04:00:38 -07:00
|
|
|
for device := range tot {
|
|
|
|
comp[device] = int(tot[device] / count[device])
|
2014-07-29 04:01:27 -07:00
|
|
|
}
|
|
|
|
|
2015-12-15 14:40:38 -07:00
|
|
|
sendJSON(w, comp)
|
2014-07-29 04:01:27 -07:00
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) getSystemBrowse(w http.ResponseWriter, r *http.Request) {
|
2014-11-16 12:30:49 -07:00
|
|
|
qs := r.URL.Query()
|
|
|
|
current := qs.Get("current")
|
|
|
|
search, _ := osutil.ExpandTilde(current)
|
|
|
|
pathSeparator := string(os.PathSeparator)
|
|
|
|
if strings.HasSuffix(current, pathSeparator) && !strings.HasSuffix(search, pathSeparator) {
|
|
|
|
search = search + pathSeparator
|
|
|
|
}
|
2015-04-26 07:41:04 -07:00
|
|
|
subdirectories, _ := osutil.Glob(search + "*")
|
2014-11-16 12:30:49 -07:00
|
|
|
ret := make([]string, 0, 10)
|
|
|
|
for _, subdirectory := range subdirectories {
|
|
|
|
info, err := os.Stat(subdirectory)
|
|
|
|
if err == nil && info.IsDir() {
|
2014-11-22 17:52:48 -07:00
|
|
|
ret = append(ret, subdirectory+pathSeparator)
|
2014-11-16 12:30:49 -07:00
|
|
|
if len(ret) > 9 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-12-15 14:40:38 -07:00
|
|
|
|
|
|
|
sendJSON(w, ret)
|
2014-11-16 12:30:49 -07:00
|
|
|
}
|
|
|
|
|
2015-04-28 14:12:19 -07:00
|
|
|
type embeddedStatic struct {
|
2016-01-10 08:37:31 -07:00
|
|
|
theme string
|
|
|
|
lastModified time.Time
|
|
|
|
mut sync.RWMutex
|
|
|
|
assetDir string
|
|
|
|
assets map[string][]byte
|
2015-04-28 14:12:19 -07:00
|
|
|
}
|
2014-05-22 07:12:19 -07:00
|
|
|
|
2015-04-28 14:12:19 -07:00
|
|
|
func (s embeddedStatic) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|
|
|
file := r.URL.Path
|
2014-05-22 07:12:19 -07:00
|
|
|
|
2015-04-28 14:12:19 -07:00
|
|
|
if file[0] == '/' {
|
|
|
|
file = file[1:]
|
|
|
|
}
|
2014-05-22 07:12:19 -07:00
|
|
|
|
2015-04-28 14:12:19 -07:00
|
|
|
if len(file) == 0 {
|
|
|
|
file = "index.html"
|
|
|
|
}
|
2014-05-22 07:12:19 -07:00
|
|
|
|
2016-03-12 05:17:25 -07:00
|
|
|
s.mut.RLock()
|
|
|
|
theme := s.theme
|
|
|
|
modified := s.lastModified
|
|
|
|
s.mut.RUnlock()
|
|
|
|
|
|
|
|
// Check for an override for the current theme.
|
2015-04-28 14:12:19 -07:00
|
|
|
if s.assetDir != "" {
|
2016-03-12 05:17:25 -07:00
|
|
|
p := filepath.Join(s.assetDir, s.theme, filepath.FromSlash(file))
|
|
|
|
if _, err := os.Stat(p); err == nil {
|
2015-04-28 14:12:19 -07:00
|
|
|
http.ServeFile(w, r, p)
|
2014-07-22 11:11:36 -07:00
|
|
|
return
|
|
|
|
}
|
2015-04-28 14:12:19 -07:00
|
|
|
}
|
2014-07-22 11:11:36 -07:00
|
|
|
|
2016-03-12 05:17:25 -07:00
|
|
|
// Check for a compiled in asset for the current theme.
|
2016-01-10 08:37:31 -07:00
|
|
|
bs, ok := s.assets[theme+"/"+file]
|
2015-04-28 14:12:19 -07:00
|
|
|
if !ok {
|
2016-05-08 03:54:22 -07:00
|
|
|
// Check for an overridden default asset.
|
2016-03-12 05:17:25 -07:00
|
|
|
if s.assetDir != "" {
|
|
|
|
p := filepath.Join(s.assetDir, config.DefaultTheme, filepath.FromSlash(file))
|
|
|
|
if _, err := os.Stat(p); err == nil {
|
|
|
|
http.ServeFile(w, r, p)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for a compiled in default asset.
|
2016-01-10 08:37:31 -07:00
|
|
|
bs, ok = s.assets[config.DefaultTheme+"/"+file]
|
|
|
|
if !ok {
|
|
|
|
http.NotFound(w, r)
|
|
|
|
return
|
|
|
|
}
|
2015-04-28 14:12:19 -07:00
|
|
|
}
|
2014-07-05 12:40:29 -07:00
|
|
|
|
2016-05-23 05:16:14 -07:00
|
|
|
modifiedSince, err := http.ParseTime(r.Header.Get("If-Modified-Since"))
|
|
|
|
if err == nil && !modified.After(modifiedSince) {
|
2015-08-08 04:48:09 -07:00
|
|
|
w.WriteHeader(http.StatusNotModified)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-04-28 14:12:19 -07:00
|
|
|
mtype := s.mimeTypeForFile(file)
|
|
|
|
if len(mtype) != 0 {
|
|
|
|
w.Header().Set("Content-Type", mtype)
|
|
|
|
}
|
|
|
|
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
|
|
|
w.Header().Set("Content-Encoding", "gzip")
|
|
|
|
} else {
|
|
|
|
// ungzip if browser not send gzip accepted header
|
|
|
|
var gr *gzip.Reader
|
|
|
|
gr, _ = gzip.NewReader(bytes.NewReader(bs))
|
|
|
|
bs, _ = ioutil.ReadAll(gr)
|
|
|
|
gr.Close()
|
|
|
|
}
|
|
|
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(bs)))
|
2016-05-23 05:16:14 -07:00
|
|
|
w.Header().Set("Last-Modified", modified.UTC().Format(http.TimeFormat))
|
2015-08-08 04:48:09 -07:00
|
|
|
w.Header().Set("Cache-Control", "public")
|
2015-04-28 14:12:19 -07:00
|
|
|
|
|
|
|
w.Write(bs)
|
2014-05-22 07:12:19 -07:00
|
|
|
}
|
2014-09-03 23:24:42 -07:00
|
|
|
|
2015-04-28 14:12:19 -07:00
|
|
|
func (s embeddedStatic) mimeTypeForFile(file string) string {
|
2014-09-03 23:24:42 -07:00
|
|
|
// We use a built in table of the common types since the system
|
|
|
|
// TypeByExtension might be unreliable. But if we don't know, we delegate
|
|
|
|
// to the system.
|
|
|
|
ext := filepath.Ext(file)
|
|
|
|
switch ext {
|
|
|
|
case ".htm", ".html":
|
|
|
|
return "text/html"
|
|
|
|
case ".css":
|
|
|
|
return "text/css"
|
|
|
|
case ".js":
|
|
|
|
return "application/javascript"
|
|
|
|
case ".json":
|
|
|
|
return "application/json"
|
|
|
|
case ".png":
|
|
|
|
return "image/png"
|
|
|
|
case ".ttf":
|
|
|
|
return "application/x-font-ttf"
|
2014-09-03 23:47:23 -07:00
|
|
|
case ".woff":
|
|
|
|
return "application/x-font-woff"
|
2015-03-22 04:55:44 -07:00
|
|
|
case ".svg":
|
|
|
|
return "image/svg+xml"
|
2014-09-03 23:24:42 -07:00
|
|
|
default:
|
|
|
|
return mime.TypeByExtension(ext)
|
|
|
|
}
|
|
|
|
}
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
|
|
|
|
2016-01-10 08:37:31 -07:00
|
|
|
// VerifyConfiguration implements the config.Committer interface
|
|
|
|
func (s *embeddedStatic) VerifyConfiguration(from, to config.Configuration) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CommitConfiguration implements the config.Committer interface
|
|
|
|
func (s *embeddedStatic) CommitConfiguration(from, to config.Configuration) bool {
|
|
|
|
s.mut.Lock()
|
|
|
|
if s.theme != to.GUI.Theme {
|
|
|
|
s.theme = to.GUI.Theme
|
|
|
|
s.lastModified = time.Now()
|
|
|
|
}
|
|
|
|
s.mut.Unlock()
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *embeddedStatic) String() string {
|
|
|
|
return fmt.Sprintf("embeddedStatic@%p", s)
|
|
|
|
}
|
|
|
|
|
2015-12-23 08:31:12 -07:00
|
|
|
func (s *apiService) toNeedSlice(fs []db.FileInfoTruncated) []jsonDBFileInfo {
|
2015-04-20 06:37:04 -07:00
|
|
|
res := make([]jsonDBFileInfo, len(fs))
|
|
|
|
for i, f := range fs {
|
|
|
|
res[i] = jsonDBFileInfo(f)
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
|
|
|
// Type wrappers for nice JSON serialization
|
|
|
|
|
|
|
|
type jsonFileInfo protocol.FileInfo
|
|
|
|
|
|
|
|
func (f jsonFileInfo) MarshalJSON() ([]byte, error) {
|
|
|
|
return json.Marshal(map[string]interface{}{
|
|
|
|
"name": f.Name,
|
|
|
|
"size": protocol.FileInfo(f).Size(),
|
|
|
|
"flags": fmt.Sprintf("%#o", f.Flags),
|
|
|
|
"modified": time.Unix(f.Modified, 0),
|
|
|
|
"localVersion": f.LocalVersion,
|
|
|
|
"numBlocks": len(f.Blocks),
|
|
|
|
"version": jsonVersionVector(f.Version),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
type jsonDBFileInfo db.FileInfoTruncated
|
|
|
|
|
|
|
|
func (f jsonDBFileInfo) MarshalJSON() ([]byte, error) {
|
|
|
|
return json.Marshal(map[string]interface{}{
|
|
|
|
"name": f.Name,
|
|
|
|
"size": db.FileInfoTruncated(f).Size(),
|
|
|
|
"flags": fmt.Sprintf("%#o", f.Flags),
|
|
|
|
"modified": time.Unix(f.Modified, 0),
|
|
|
|
"localVersion": f.LocalVersion,
|
|
|
|
"version": jsonVersionVector(f.Version),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
type jsonVersionVector protocol.Vector
|
|
|
|
|
|
|
|
func (v jsonVersionVector) MarshalJSON() ([]byte, error) {
|
|
|
|
res := make([]string, len(v))
|
|
|
|
for i, c := range v {
|
2016-01-20 12:10:22 -07:00
|
|
|
res[i] = fmt.Sprintf("%v:%d", c.ID, c.Value)
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
|
|
|
}
|
2015-04-20 06:37:04 -07:00
|
|
|
return json.Marshal(res)
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
|
|
|
}
|
2016-03-12 05:17:25 -07:00
|
|
|
|
|
|
|
func dirNames(dir string) []string {
|
|
|
|
fd, err := os.Open(dir)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
defer fd.Close()
|
|
|
|
|
|
|
|
fis, err := fd.Readdir(-1)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var dirs []string
|
|
|
|
for _, fi := range fis {
|
|
|
|
if fi.IsDir() {
|
|
|
|
dirs = append(dirs, filepath.Base(fi.Name()))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Strings(dirs)
|
|
|
|
return dirs
|
|
|
|
}
|