2014-11-16 13:13:20 -07:00
|
|
|
// Copyright (C) 2014 The Syncthing Authors.
|
2014-09-29 12:43:32 -07:00
|
|
|
//
|
2015-03-07 13:36:35 -07:00
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
|
|
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
2014-06-01 13:50:14 -07:00
|
|
|
|
2014-03-02 15:58:14 -07:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2014-08-21 15:45:40 -07:00
|
|
|
"crypto/tls"
|
2014-03-02 15:58:14 -07:00
|
|
|
"encoding/json"
|
2014-05-22 07:12:19 -07:00
|
|
|
"fmt"
|
2014-03-02 15:58:14 -07:00
|
|
|
"io/ioutil"
|
2014-05-22 07:12:19 -07:00
|
|
|
"mime"
|
2014-04-30 13:52:38 -07:00
|
|
|
"net"
|
2014-03-02 15:58:14 -07:00
|
|
|
"net/http"
|
2014-07-22 11:11:36 -07:00
|
|
|
"os"
|
2014-05-22 07:12:19 -07:00
|
|
|
"path/filepath"
|
2014-03-02 15:58:14 -07:00
|
|
|
"runtime"
|
2014-07-13 12:07:24 -07:00
|
|
|
"strconv"
|
2014-07-05 12:40:29 -07:00
|
|
|
"strings"
|
2014-03-02 15:58:14 -07:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2014-10-26 05:15:14 -07:00
|
|
|
"github.com/calmh/logger"
|
2015-01-13 05:22:56 -07:00
|
|
|
"github.com/syncthing/protocol"
|
2014-09-22 12:42:11 -07:00
|
|
|
"github.com/syncthing/syncthing/internal/auto"
|
|
|
|
"github.com/syncthing/syncthing/internal/config"
|
2015-01-12 06:50:30 -07:00
|
|
|
"github.com/syncthing/syncthing/internal/db"
|
2014-10-15 13:52:06 -07:00
|
|
|
"github.com/syncthing/syncthing/internal/discover"
|
2014-09-22 12:42:11 -07:00
|
|
|
"github.com/syncthing/syncthing/internal/events"
|
|
|
|
"github.com/syncthing/syncthing/internal/model"
|
2014-10-06 00:25:45 -07:00
|
|
|
"github.com/syncthing/syncthing/internal/osutil"
|
2014-09-22 12:42:11 -07:00
|
|
|
"github.com/syncthing/syncthing/internal/upgrade"
|
2014-05-21 11:06:14 -07:00
|
|
|
"github.com/vitrun/qart/qr"
|
2014-11-29 16:17:00 -07:00
|
|
|
"golang.org/x/crypto/bcrypt"
|
2014-03-02 15:58:14 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
type guiError struct {
|
2015-03-10 15:45:43 -07:00
|
|
|
Time time.Time `json:"time"`
|
|
|
|
Error string `json:"error"`
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
configInSync = true
|
|
|
|
guiErrors = []guiError{}
|
|
|
|
guiErrorsMut sync.Mutex
|
2014-07-05 12:40:29 -07:00
|
|
|
modt = time.Now().UTC().Format(http.TimeFormat)
|
2014-07-29 02:06:52 -07:00
|
|
|
eventSub *events.BufferedSubscription
|
2014-03-02 15:58:14 -07:00
|
|
|
)
|
|
|
|
|
2015-03-26 15:26:51 -07:00
|
|
|
var (
|
|
|
|
lastEventRequest time.Time
|
|
|
|
lastEventRequestMut sync.Mutex
|
|
|
|
)
|
|
|
|
|
2014-05-14 17:08:56 -07:00
|
|
|
func init() {
|
|
|
|
l.AddHandler(logger.LevelWarn, showGuiError)
|
2014-08-05 04:14:04 -07:00
|
|
|
sub := events.Default.Subscribe(events.AllEvents)
|
2014-07-29 02:06:52 -07:00
|
|
|
eventSub = events.NewBufferedSubscription(sub, 1000)
|
2014-05-14 17:08:56 -07:00
|
|
|
}
|
|
|
|
|
2014-05-22 07:12:19 -07:00
|
|
|
func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) error {
|
2014-05-21 05:04:16 -07:00
|
|
|
var err error
|
2014-09-12 12:28:47 -07:00
|
|
|
|
2015-03-29 03:55:27 -07:00
|
|
|
cert, err := tls.LoadX509KeyPair(locations[locHttpsCertFile], locations[locHttpsKeyFile])
|
2014-09-12 12:28:47 -07:00
|
|
|
if err != nil {
|
|
|
|
l.Infoln("Loading HTTPS certificate:", err)
|
|
|
|
l.Infoln("Creating new HTTPS certificate")
|
2014-12-09 02:42:56 -07:00
|
|
|
|
|
|
|
// When generating the HTTPS certificate, use the system host name per
|
|
|
|
// default. If that isn't available, use the "syncthing" default.
|
2014-12-16 14:55:44 -07:00
|
|
|
var name string
|
|
|
|
name, err = os.Hostname()
|
2014-12-09 02:42:56 -07:00
|
|
|
if err != nil {
|
|
|
|
name = tlsDefaultCommonName
|
|
|
|
}
|
|
|
|
|
2015-03-29 03:55:27 -07:00
|
|
|
cert, err = newCertificate(locations[locHttpsCertFile], locations[locHttpsKeyFile], name)
|
2014-09-12 12:28:47 -07:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
tlsCfg := &tls.Config{
|
|
|
|
Certificates: []tls.Certificate{cert},
|
2014-12-09 02:42:56 -07:00
|
|
|
MinVersion: tls.VersionTLS10, // No SSLv3
|
|
|
|
CipherSuites: []uint16{
|
|
|
|
// No RC4
|
|
|
|
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
|
|
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
|
|
|
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
|
|
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
|
|
|
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
|
|
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
|
|
|
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
|
|
|
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
|
|
|
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
|
|
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
|
|
},
|
2014-04-30 13:52:38 -07:00
|
|
|
}
|
|
|
|
|
2014-09-14 15:18:05 -07:00
|
|
|
rawListener, err := net.Listen("tcp", cfg.Address)
|
2014-09-12 12:28:47 -07:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2014-04-30 13:52:38 -07:00
|
|
|
}
|
2014-09-14 15:18:05 -07:00
|
|
|
listener := &DowngradingListener{rawListener, tlsCfg}
|
2014-04-30 13:52:38 -07:00
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
// The GET handlers
|
|
|
|
getRestMux := http.NewServeMux()
|
2014-09-18 03:55:28 -07:00
|
|
|
getRestMux.HandleFunc("/rest/ping", restPing)
|
2014-07-29 02:06:52 -07:00
|
|
|
getRestMux.HandleFunc("/rest/completion", withModel(m, restGetCompletion))
|
2014-07-05 12:40:29 -07:00
|
|
|
getRestMux.HandleFunc("/rest/config", restGetConfig)
|
|
|
|
getRestMux.HandleFunc("/rest/config/sync", restGetConfigInSync)
|
2014-07-29 02:06:52 -07:00
|
|
|
getRestMux.HandleFunc("/rest/connections", withModel(m, restGetConnections))
|
2014-11-16 12:30:49 -07:00
|
|
|
getRestMux.HandleFunc("/rest/autocomplete/directory", restGetAutocompleteDirectory)
|
2014-07-05 12:40:29 -07:00
|
|
|
getRestMux.HandleFunc("/rest/discovery", restGetDiscovery)
|
2014-07-29 02:06:52 -07:00
|
|
|
getRestMux.HandleFunc("/rest/errors", restGetErrors)
|
2014-07-13 12:07:24 -07:00
|
|
|
getRestMux.HandleFunc("/rest/events", restGetEvents)
|
2014-09-15 15:12:29 -07:00
|
|
|
getRestMux.HandleFunc("/rest/ignores", withModel(m, restGetIgnores))
|
2014-07-26 13:30:29 -07:00
|
|
|
getRestMux.HandleFunc("/rest/lang", restGetLang)
|
2014-07-29 02:06:52 -07:00
|
|
|
getRestMux.HandleFunc("/rest/model", withModel(m, restGetModel))
|
|
|
|
getRestMux.HandleFunc("/rest/need", withModel(m, restGetNeed))
|
2014-09-28 04:00:38 -07:00
|
|
|
getRestMux.HandleFunc("/rest/deviceid", restGetDeviceID)
|
2014-07-29 02:06:52 -07:00
|
|
|
getRestMux.HandleFunc("/rest/report", withModel(m, restGetReport))
|
|
|
|
getRestMux.HandleFunc("/rest/system", restGetSystem)
|
|
|
|
getRestMux.HandleFunc("/rest/upgrade", restGetUpgrade)
|
|
|
|
getRestMux.HandleFunc("/rest/version", restGetVersion)
|
2015-02-07 03:52:42 -07:00
|
|
|
getRestMux.HandleFunc("/rest/tree", withModel(m, restGetTree))
|
2014-09-28 04:00:38 -07:00
|
|
|
getRestMux.HandleFunc("/rest/stats/device", withModel(m, restGetDeviceStats))
|
2014-12-07 13:21:12 -07:00
|
|
|
getRestMux.HandleFunc("/rest/stats/folder", withModel(m, restGetFolderStats))
|
2015-03-17 10:51:50 -07:00
|
|
|
getRestMux.HandleFunc("/rest/filestatus", withModel(m, restGetFileStatus))
|
2014-07-05 12:40:29 -07:00
|
|
|
|
2014-07-29 04:01:27 -07:00
|
|
|
// Debug endpoints, not for general use
|
|
|
|
getRestMux.HandleFunc("/rest/debug/peerCompletion", withModel(m, restGetPeerCompletion))
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
// The POST handlers
|
|
|
|
postRestMux := http.NewServeMux()
|
2014-09-18 03:55:28 -07:00
|
|
|
postRestMux.HandleFunc("/rest/ping", restPing)
|
2014-07-05 12:40:29 -07:00
|
|
|
postRestMux.HandleFunc("/rest/config", withModel(m, restPostConfig))
|
2014-07-29 02:06:52 -07:00
|
|
|
postRestMux.HandleFunc("/rest/discovery/hint", restPostDiscoveryHint)
|
2014-07-05 12:40:29 -07:00
|
|
|
postRestMux.HandleFunc("/rest/error", restPostError)
|
|
|
|
postRestMux.HandleFunc("/rest/error/clear", restClearErrors)
|
2014-09-15 15:12:29 -07:00
|
|
|
postRestMux.HandleFunc("/rest/ignores", withModel(m, restPostIgnores))
|
2014-07-05 12:40:29 -07:00
|
|
|
postRestMux.HandleFunc("/rest/model/override", withModel(m, restPostOverride))
|
2014-07-29 02:06:52 -07:00
|
|
|
postRestMux.HandleFunc("/rest/reset", restPostReset)
|
|
|
|
postRestMux.HandleFunc("/rest/restart", restPostRestart)
|
|
|
|
postRestMux.HandleFunc("/rest/shutdown", restPostShutdown)
|
2014-07-14 01:45:29 -07:00
|
|
|
postRestMux.HandleFunc("/rest/upgrade", restPostUpgrade)
|
2014-08-11 11:20:01 -07:00
|
|
|
postRestMux.HandleFunc("/rest/scan", withModel(m, restPostScan))
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
|
|
|
postRestMux.HandleFunc("/rest/bump", withModel(m, restPostBump))
|
2014-07-05 12:40:29 -07:00
|
|
|
|
|
|
|
// A handler that splits requests between the two above and disables
|
|
|
|
// caching
|
|
|
|
restMux := noCacheMiddleware(getPostHandler(getRestMux, postRestMux))
|
|
|
|
|
|
|
|
// The main routing handler
|
|
|
|
mux := http.NewServeMux()
|
|
|
|
mux.Handle("/rest/", restMux)
|
|
|
|
mux.HandleFunc("/qr/", getQR)
|
|
|
|
|
|
|
|
// Serve compiled in assets unless an asset directory was set (for development)
|
2014-07-22 11:11:36 -07:00
|
|
|
mux.Handle("/", embeddedStatic(assetDir))
|
2014-05-22 07:12:19 -07:00
|
|
|
|
2014-07-06 06:00:44 -07:00
|
|
|
// Wrap everything in CSRF protection. The /rest prefix should be
|
|
|
|
// protected, other requests will grant cookies.
|
2014-09-01 13:51:44 -07:00
|
|
|
handler := csrfMiddleware("/rest", cfg.APIKey, mux)
|
2014-06-04 12:20:07 -07:00
|
|
|
|
2014-08-31 03:59:20 -07:00
|
|
|
// Add our version as a header to responses
|
|
|
|
handler = withVersionMiddleware(handler)
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
// Wrap everything in basic auth, if user/password is set.
|
2014-09-13 14:06:25 -07:00
|
|
|
if len(cfg.User) > 0 && len(cfg.Password) > 0 {
|
2014-09-01 13:51:44 -07:00
|
|
|
handler = basicAuthAndSessionMiddleware(cfg, handler)
|
2014-07-05 12:40:29 -07:00
|
|
|
}
|
2014-04-30 13:52:38 -07:00
|
|
|
|
2014-09-14 15:18:05 -07:00
|
|
|
// Redirect to HTTPS if we are supposed to
|
|
|
|
if cfg.UseTLS {
|
|
|
|
handler = redirectToHTTPSMiddleware(handler)
|
|
|
|
}
|
2014-09-12 12:28:47 -07:00
|
|
|
|
2014-10-13 10:34:26 -07:00
|
|
|
srv := http.Server{
|
|
|
|
Handler: handler,
|
2014-11-30 02:35:04 -07:00
|
|
|
ReadTimeout: 10 * time.Second,
|
2014-10-13 10:34:26 -07:00
|
|
|
}
|
|
|
|
|
2015-03-26 15:26:51 -07:00
|
|
|
csrv := &folderSummarySvc{model: m}
|
|
|
|
go csrv.Serve()
|
|
|
|
|
2014-09-18 00:27:26 -07:00
|
|
|
go func() {
|
2014-10-13 10:34:26 -07:00
|
|
|
err := srv.Serve(listener)
|
2014-09-18 00:27:26 -07:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}()
|
2014-04-30 13:52:38 -07:00
|
|
|
return nil
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func getPostHandler(get, post http.Handler) http.Handler {
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
switch r.Method {
|
|
|
|
case "GET":
|
|
|
|
get.ServeHTTP(w, r)
|
|
|
|
case "POST":
|
|
|
|
post.ServeHTTP(w, r)
|
|
|
|
default:
|
|
|
|
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
|
|
|
}
|
|
|
|
})
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
|
2014-09-14 15:18:05 -07:00
|
|
|
func redirectToHTTPSMiddleware(h http.Handler) http.Handler {
|
2014-09-12 12:28:47 -07:00
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
2014-09-14 15:18:05 -07:00
|
|
|
// Add a generous access-control-allow-origin header since we may be
|
|
|
|
// redirecting REST requests over protocols
|
|
|
|
w.Header().Add("Access-Control-Allow-Origin", "*")
|
|
|
|
|
|
|
|
if r.TLS == nil {
|
|
|
|
// Redirect HTTP requests to HTTPS
|
|
|
|
r.URL.Host = r.Host
|
2014-09-12 12:28:47 -07:00
|
|
|
r.URL.Scheme = "https"
|
|
|
|
http.Redirect(w, r, r.URL.String(), http.StatusFound)
|
|
|
|
} else {
|
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func noCacheMiddleware(h http.Handler) http.Handler {
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
2014-03-26 12:32:35 -07:00
|
|
|
w.Header().Set("Cache-Control", "no-cache")
|
2014-07-05 12:40:29 -07:00
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2014-08-31 03:59:20 -07:00
|
|
|
func withVersionMiddleware(h http.Handler) http.Handler {
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.Header().Set("X-Syncthing-Version", Version)
|
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func withModel(m *model.Model, h func(m *model.Model, w http.ResponseWriter, r *http.Request)) http.HandlerFunc {
|
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
h(m, w, r)
|
2014-03-26 12:32:35 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-18 03:55:28 -07:00
|
|
|
func restPing(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
|
|
|
json.NewEncoder(w).Encode(map[string]string{
|
|
|
|
"ping": "pong",
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func restGetVersion(w http.ResponseWriter, r *http.Request) {
|
2014-09-18 03:52:45 -07:00
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
|
|
|
json.NewEncoder(w).Encode(map[string]string{
|
|
|
|
"version": Version,
|
|
|
|
"longVersion": LongVersion,
|
|
|
|
"os": runtime.GOOS,
|
|
|
|
"arch": runtime.GOARCH,
|
|
|
|
})
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
|
2015-02-07 03:52:42 -07:00
|
|
|
func restGetTree(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
|
|
|
prefix := qs.Get("prefix")
|
|
|
|
dirsonly := qs.Get("dirsonly") != ""
|
|
|
|
|
|
|
|
levels, err := strconv.Atoi(qs.Get("levels"))
|
|
|
|
if err != nil {
|
|
|
|
levels = -1
|
|
|
|
}
|
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
|
|
|
|
|
|
|
tree := m.GlobalDirectoryTree(folder, prefix, levels, dirsonly)
|
|
|
|
|
|
|
|
json.NewEncoder(w).Encode(tree)
|
|
|
|
}
|
|
|
|
|
2014-07-29 02:06:52 -07:00
|
|
|
func restGetCompletion(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
|
|
|
var qs = r.URL.Query()
|
2014-09-28 04:00:38 -07:00
|
|
|
var folder = qs.Get("folder")
|
|
|
|
var deviceStr = qs.Get("device")
|
2014-07-29 02:06:52 -07:00
|
|
|
|
2014-09-28 04:00:38 -07:00
|
|
|
device, err := protocol.DeviceIDFromString(deviceStr)
|
2014-07-29 02:06:52 -07:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
res := map[string]float64{
|
2014-09-28 04:00:38 -07:00
|
|
|
"completion": m.Completion(device, folder),
|
2014-07-29 02:06:52 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
|
|
|
json.NewEncoder(w).Encode(res)
|
|
|
|
}
|
|
|
|
|
2014-05-14 20:26:55 -07:00
|
|
|
func restGetModel(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
2015-03-26 15:26:51 -07:00
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
|
|
|
res := folderSummary(m, folder)
|
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
|
|
|
json.NewEncoder(w).Encode(res)
|
|
|
|
}
|
|
|
|
|
|
|
|
func folderSummary(m *model.Model, folder string) map[string]interface{} {
|
2014-03-02 15:58:14 -07:00
|
|
|
var res = make(map[string]interface{})
|
|
|
|
|
2014-10-06 00:25:45 -07:00
|
|
|
res["invalid"] = cfg.Folders()[folder].Invalid
|
2014-04-27 12:53:27 -07:00
|
|
|
|
2014-09-28 04:00:38 -07:00
|
|
|
globalFiles, globalDeleted, globalBytes := m.GlobalSize(folder)
|
2014-03-02 15:58:14 -07:00
|
|
|
res["globalFiles"], res["globalDeleted"], res["globalBytes"] = globalFiles, globalDeleted, globalBytes
|
|
|
|
|
2014-09-28 04:00:38 -07:00
|
|
|
localFiles, localDeleted, localBytes := m.LocalSize(folder)
|
2014-03-02 15:58:14 -07:00
|
|
|
res["localFiles"], res["localDeleted"], res["localBytes"] = localFiles, localDeleted, localBytes
|
|
|
|
|
2014-09-28 04:00:38 -07:00
|
|
|
needFiles, needBytes := m.NeedSize(folder)
|
2014-04-09 13:03:30 -07:00
|
|
|
res["needFiles"], res["needBytes"] = needFiles, needBytes
|
2014-03-02 15:58:14 -07:00
|
|
|
|
2014-04-09 13:03:30 -07:00
|
|
|
res["inSyncFiles"], res["inSyncBytes"] = globalFiles-needFiles, globalBytes-needBytes
|
2014-03-02 15:58:14 -07:00
|
|
|
|
2014-09-28 04:00:38 -07:00
|
|
|
res["state"], res["stateChanged"] = m.State(folder)
|
|
|
|
res["version"] = m.CurrentLocalVersion(folder) + m.RemoteLocalVersion(folder)
|
2014-04-14 00:58:17 -07:00
|
|
|
|
2015-01-27 07:27:44 -07:00
|
|
|
ignorePatterns, _, _ := m.GetIgnores(folder)
|
|
|
|
res["ignorePatterns"] = false
|
|
|
|
for _, line := range ignorePatterns {
|
|
|
|
if len(line) > 0 && !strings.HasPrefix(line, "//") {
|
|
|
|
res["ignorePatterns"] = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-26 15:26:51 -07:00
|
|
|
return res
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func restPostOverride(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
2014-06-16 01:47:02 -07:00
|
|
|
var qs = r.URL.Query()
|
2014-09-28 04:00:38 -07:00
|
|
|
var folder = qs.Get("folder")
|
|
|
|
go m.Override(folder)
|
2014-06-16 01:47:02 -07:00
|
|
|
}
|
|
|
|
|
2014-05-19 13:31:28 -07:00
|
|
|
func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
|
|
|
var qs = r.URL.Query()
|
2014-09-28 04:00:38 -07:00
|
|
|
var folder = qs.Get("folder")
|
2014-05-19 13:31:28 -07:00
|
|
|
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
|
|
|
progress, queued, rest := m.NeedFolderFiles(folder, 100)
|
2014-11-22 17:52:48 -07:00
|
|
|
// Convert the struct to a more loose structure, and inject the size.
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
|
|
|
output := map[string][]map[string]interface{}{
|
|
|
|
"progress": toNeedSlice(progress),
|
|
|
|
"queued": toNeedSlice(queued),
|
|
|
|
"rest": toNeedSlice(rest),
|
2014-11-22 17:52:48 -07:00
|
|
|
}
|
2014-05-19 13:31:28 -07:00
|
|
|
|
2014-06-22 08:26:31 -07:00
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
2014-11-22 17:52:48 -07:00
|
|
|
json.NewEncoder(w).Encode(output)
|
2014-05-19 13:31:28 -07:00
|
|
|
}
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func restGetConnections(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
2014-03-02 15:58:14 -07:00
|
|
|
var res = m.ConnectionStats()
|
2014-06-22 08:26:31 -07:00
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
2014-03-02 15:58:14 -07:00
|
|
|
json.NewEncoder(w).Encode(res)
|
|
|
|
}
|
|
|
|
|
2014-09-28 04:00:38 -07:00
|
|
|
func restGetDeviceStats(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
|
|
|
var res = m.DeviceStatistics()
|
2014-08-21 15:46:34 -07:00
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
|
|
|
json.NewEncoder(w).Encode(res)
|
|
|
|
}
|
|
|
|
|
2014-12-07 13:21:12 -07:00
|
|
|
func restGetFolderStats(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
|
|
|
var res = m.FolderStatistics()
|
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
|
|
|
json.NewEncoder(w).Encode(res)
|
|
|
|
}
|
|
|
|
|
2015-03-17 10:51:50 -07:00
|
|
|
func restGetFileStatus(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
|
|
|
file := qs.Get("file")
|
|
|
|
withBlocks := qs.Get("blocks") != ""
|
|
|
|
gf, _ := m.CurrentGlobalFile(folder, file)
|
|
|
|
lf, _ := m.CurrentFolderFile(folder, file)
|
|
|
|
|
|
|
|
if !withBlocks {
|
|
|
|
gf.Blocks = nil
|
|
|
|
lf.Blocks = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
av := m.Availability(folder, file)
|
|
|
|
json.NewEncoder(w).Encode(map[string]interface{}{
|
|
|
|
"global": gf,
|
|
|
|
"local": lf,
|
|
|
|
"availability": av,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func restGetConfig(w http.ResponseWriter, r *http.Request) {
|
2014-06-22 08:26:31 -07:00
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
2014-10-06 00:25:45 -07:00
|
|
|
json.NewEncoder(w).Encode(cfg.Raw())
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func restPostConfig(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
2014-05-24 12:00:47 -07:00
|
|
|
var newCfg config.Configuration
|
2014-07-05 12:40:29 -07:00
|
|
|
err := json.NewDecoder(r.Body).Decode(&newCfg)
|
2014-03-02 15:58:14 -07:00
|
|
|
if err != nil {
|
2014-08-17 01:28:36 -07:00
|
|
|
l.Warnln("decoding posted config:", err)
|
2014-08-08 05:09:27 -07:00
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
2014-12-08 08:36:15 -07:00
|
|
|
}
|
2014-06-06 19:00:46 -07:00
|
|
|
|
2014-12-08 08:36:15 -07:00
|
|
|
if newCfg.GUI.Password != cfg.GUI().Password {
|
|
|
|
if newCfg.GUI.Password != "" {
|
|
|
|
hash, err := bcrypt.GenerateFromPassword([]byte(newCfg.GUI.Password), 0)
|
2014-06-11 16:05:00 -07:00
|
|
|
if err != nil {
|
2014-12-08 08:36:15 -07:00
|
|
|
l.Warnln("bcrypting password:", err)
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
2014-06-11 16:05:00 -07:00
|
|
|
}
|
2014-12-08 08:36:15 -07:00
|
|
|
|
|
|
|
newCfg.GUI.Password = string(hash)
|
2014-06-11 11:04:23 -07:00
|
|
|
}
|
2014-12-08 08:36:15 -07:00
|
|
|
}
|
2014-06-11 11:04:23 -07:00
|
|
|
|
2014-12-08 08:36:15 -07:00
|
|
|
// Start or stop usage reporting as appropriate
|
2014-06-06 19:00:46 -07:00
|
|
|
|
2014-12-08 08:36:15 -07:00
|
|
|
if curAcc := cfg.Options().URAccepted; newCfg.Options.URAccepted > curAcc {
|
|
|
|
// UR was enabled
|
|
|
|
newCfg.Options.URAccepted = usageReportVersion
|
|
|
|
newCfg.Options.URUniqueID = randomString(8)
|
|
|
|
err := sendUsageReport(m)
|
|
|
|
if err != nil {
|
|
|
|
l.Infoln("Usage report:", err)
|
|
|
|
}
|
|
|
|
go usageReportingLoop(m)
|
|
|
|
} else if newCfg.Options.URAccepted < curAcc {
|
|
|
|
// UR was disabled
|
|
|
|
newCfg.Options.URAccepted = -1
|
|
|
|
newCfg.Options.URUniqueID = ""
|
|
|
|
stopUsageReporting()
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
2014-12-08 08:36:15 -07:00
|
|
|
|
|
|
|
// Activate and save
|
|
|
|
|
|
|
|
configInSync = !config.ChangeRequiresRestart(cfg.Raw(), newCfg)
|
|
|
|
cfg.Replace(newCfg)
|
|
|
|
cfg.Save()
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func restGetConfigInSync(w http.ResponseWriter, r *http.Request) {
|
2014-06-22 08:26:31 -07:00
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
2014-03-02 15:58:14 -07:00
|
|
|
json.NewEncoder(w).Encode(map[string]bool{"configInSync": configInSync})
|
|
|
|
}
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func restPostRestart(w http.ResponseWriter, r *http.Request) {
|
2014-05-12 17:15:18 -07:00
|
|
|
flushResponse(`{"ok": "restarting"}`, w)
|
2014-04-03 13:10:51 -07:00
|
|
|
go restart()
|
|
|
|
}
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func restPostReset(w http.ResponseWriter, r *http.Request) {
|
2014-09-28 04:00:38 -07:00
|
|
|
flushResponse(`{"ok": "resetting folders"}`, w)
|
|
|
|
resetFolders()
|
2014-04-03 13:10:51 -07:00
|
|
|
go restart()
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func restPostShutdown(w http.ResponseWriter, r *http.Request) {
|
2014-05-12 17:15:18 -07:00
|
|
|
flushResponse(`{"ok": "shutting down"}`, w)
|
2014-05-11 16:16:27 -07:00
|
|
|
go shutdown()
|
|
|
|
}
|
|
|
|
|
2014-05-12 17:15:18 -07:00
|
|
|
func flushResponse(s string, w http.ResponseWriter) {
|
|
|
|
w.Write([]byte(s + "\n"))
|
|
|
|
f := w.(http.Flusher)
|
|
|
|
f.Flush()
|
|
|
|
}
|
|
|
|
|
2014-04-14 03:02:40 -07:00
|
|
|
var cpuUsagePercent [10]float64 // The last ten seconds
|
2014-03-02 15:58:14 -07:00
|
|
|
var cpuUsageLock sync.RWMutex
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func restGetSystem(w http.ResponseWriter, r *http.Request) {
|
2014-03-02 15:58:14 -07:00
|
|
|
var m runtime.MemStats
|
|
|
|
runtime.ReadMemStats(&m)
|
|
|
|
|
2014-10-06 00:25:45 -07:00
|
|
|
tilde, _ := osutil.ExpandTilde("~")
|
2014-03-02 15:58:14 -07:00
|
|
|
res := make(map[string]interface{})
|
2014-06-29 16:42:03 -07:00
|
|
|
res["myID"] = myID.String()
|
2014-03-02 15:58:14 -07:00
|
|
|
res["goroutines"] = runtime.NumGoroutine()
|
|
|
|
res["alloc"] = m.Alloc
|
2014-08-05 13:14:11 -07:00
|
|
|
res["sys"] = m.Sys - m.HeapReleased
|
2014-10-06 00:25:45 -07:00
|
|
|
res["tilde"] = tilde
|
|
|
|
if cfg.Options().GlobalAnnEnabled && discoverer != nil {
|
2014-04-16 08:36:09 -07:00
|
|
|
res["extAnnounceOK"] = discoverer.ExtAnnounceOK()
|
|
|
|
}
|
2014-03-02 15:58:14 -07:00
|
|
|
cpuUsageLock.RLock()
|
2014-04-14 03:02:40 -07:00
|
|
|
var cpusum float64
|
|
|
|
for _, p := range cpuUsagePercent {
|
|
|
|
cpusum += p
|
|
|
|
}
|
2014-03-02 15:58:14 -07:00
|
|
|
cpuUsageLock.RUnlock()
|
2015-02-25 15:30:24 -07:00
|
|
|
res["cpuPercent"] = cpusum / float64(len(cpuUsagePercent)) / float64(runtime.NumCPU())
|
2014-12-14 16:12:12 -07:00
|
|
|
res["pathSeparator"] = string(filepath.Separator)
|
2014-03-02 15:58:14 -07:00
|
|
|
|
2014-06-22 08:26:31 -07:00
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
2014-03-02 15:58:14 -07:00
|
|
|
json.NewEncoder(w).Encode(res)
|
|
|
|
}
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func restGetErrors(w http.ResponseWriter, r *http.Request) {
|
2014-06-22 08:26:31 -07:00
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
2014-03-02 15:58:14 -07:00
|
|
|
guiErrorsMut.Lock()
|
2014-09-18 03:49:59 -07:00
|
|
|
json.NewEncoder(w).Encode(map[string][]guiError{"errors": guiErrors})
|
2014-03-02 15:58:14 -07:00
|
|
|
guiErrorsMut.Unlock()
|
|
|
|
}
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func restPostError(w http.ResponseWriter, r *http.Request) {
|
|
|
|
bs, _ := ioutil.ReadAll(r.Body)
|
|
|
|
r.Body.Close()
|
2014-05-14 17:08:56 -07:00
|
|
|
showGuiError(0, string(bs))
|
2014-03-02 15:58:14 -07:00
|
|
|
}
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func restClearErrors(w http.ResponseWriter, r *http.Request) {
|
2014-04-16 07:30:49 -07:00
|
|
|
guiErrorsMut.Lock()
|
2014-05-17 04:54:11 -07:00
|
|
|
guiErrors = []guiError{}
|
2014-04-16 07:30:49 -07:00
|
|
|
guiErrorsMut.Unlock()
|
|
|
|
}
|
|
|
|
|
2014-05-14 17:08:56 -07:00
|
|
|
func showGuiError(l logger.LogLevel, err string) {
|
2014-03-02 15:58:14 -07:00
|
|
|
guiErrorsMut.Lock()
|
|
|
|
guiErrors = append(guiErrors, guiError{time.Now(), err})
|
|
|
|
if len(guiErrors) > 5 {
|
|
|
|
guiErrors = guiErrors[len(guiErrors)-5:]
|
|
|
|
}
|
|
|
|
guiErrorsMut.Unlock()
|
|
|
|
}
|
2014-04-19 04:33:51 -07:00
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func restPostDiscoveryHint(w http.ResponseWriter, r *http.Request) {
|
2014-05-12 17:50:54 -07:00
|
|
|
var qs = r.URL.Query()
|
2014-09-28 04:00:38 -07:00
|
|
|
var device = qs.Get("device")
|
2014-05-12 17:50:54 -07:00
|
|
|
var addr = qs.Get("addr")
|
2014-09-28 04:00:38 -07:00
|
|
|
if len(device) != 0 && len(addr) != 0 && discoverer != nil {
|
|
|
|
discoverer.Hint(device, []string{addr})
|
2014-05-12 17:50:54 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func restGetDiscovery(w http.ResponseWriter, r *http.Request) {
|
2014-10-15 11:23:52 -07:00
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
2014-10-28 12:40:04 -07:00
|
|
|
devices := map[string][]discover.CacheEntry{}
|
|
|
|
|
|
|
|
if discoverer != nil {
|
|
|
|
// Device ids can't be marshalled as keys so we need to manually
|
|
|
|
// rebuild this map using strings. Discoverer may be nil if discovery
|
|
|
|
// has not started yet.
|
|
|
|
for device, entries := range discoverer.All() {
|
|
|
|
devices[device.String()] = entries
|
|
|
|
}
|
2014-10-15 11:23:28 -07:00
|
|
|
}
|
2014-10-15 13:52:06 -07:00
|
|
|
|
2014-10-15 11:23:28 -07:00
|
|
|
json.NewEncoder(w).Encode(devices)
|
2014-05-12 18:08:55 -07:00
|
|
|
}
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func restGetReport(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
2014-06-22 08:26:31 -07:00
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
2014-06-11 11:04:23 -07:00
|
|
|
json.NewEncoder(w).Encode(reportData(m))
|
|
|
|
}
|
|
|
|
|
2014-09-15 15:12:29 -07:00
|
|
|
func restGetIgnores(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
|
|
|
qs := r.URL.Query()
|
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
|
|
|
|
2014-11-08 14:12:18 -07:00
|
|
|
ignores, patterns, err := m.GetIgnores(qs.Get("folder"))
|
2014-09-15 15:12:29 -07:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
2014-11-08 14:12:18 -07:00
|
|
|
|
2014-09-15 15:12:29 -07:00
|
|
|
json.NewEncoder(w).Encode(map[string][]string{
|
2014-11-08 14:12:18 -07:00
|
|
|
"ignore": ignores,
|
|
|
|
"patterns": patterns,
|
2014-09-15 15:12:29 -07:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func restPostIgnores(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
|
|
|
qs := r.URL.Query()
|
|
|
|
|
|
|
|
var data map[string][]string
|
|
|
|
err := json.NewDecoder(r.Body).Decode(&data)
|
|
|
|
r.Body.Close()
|
2014-09-19 13:02:53 -07:00
|
|
|
|
2014-09-15 15:12:29 -07:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-09-28 04:00:38 -07:00
|
|
|
err = m.SetIgnores(qs.Get("folder"), data["ignore"])
|
2014-09-15 15:12:29 -07:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
restGetIgnores(m, w, r)
|
|
|
|
}
|
|
|
|
|
2014-07-13 12:07:24 -07:00
|
|
|
func restGetEvents(w http.ResponseWriter, r *http.Request) {
|
|
|
|
qs := r.URL.Query()
|
2014-07-29 02:06:52 -07:00
|
|
|
sinceStr := qs.Get("since")
|
|
|
|
limitStr := qs.Get("limit")
|
|
|
|
since, _ := strconv.Atoi(sinceStr)
|
|
|
|
limit, _ := strconv.Atoi(limitStr)
|
|
|
|
|
2015-03-26 15:26:51 -07:00
|
|
|
lastEventRequestMut.Lock()
|
|
|
|
lastEventRequest = time.Now()
|
|
|
|
lastEventRequestMut.Unlock()
|
|
|
|
|
2014-08-19 15:30:32 -07:00
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
|
|
|
|
2014-08-19 15:18:28 -07:00
|
|
|
// Flush before blocking, to indicate that we've received the request
|
|
|
|
// and that it should not be retried.
|
|
|
|
f := w.(http.Flusher)
|
|
|
|
f.Flush()
|
|
|
|
|
2014-07-29 02:06:52 -07:00
|
|
|
evs := eventSub.Since(since, nil)
|
|
|
|
if 0 < limit && limit < len(evs) {
|
|
|
|
evs = evs[len(evs)-limit:]
|
|
|
|
}
|
2014-07-13 12:07:24 -07:00
|
|
|
|
2014-07-29 02:06:52 -07:00
|
|
|
json.NewEncoder(w).Encode(evs)
|
2014-07-13 12:07:24 -07:00
|
|
|
}
|
|
|
|
|
2014-07-14 01:45:29 -07:00
|
|
|
func restGetUpgrade(w http.ResponseWriter, r *http.Request) {
|
2015-01-06 14:40:52 -07:00
|
|
|
if noUpgrade {
|
|
|
|
http.Error(w, upgrade.ErrUpgradeUnsupported.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
2015-03-28 02:21:00 -07:00
|
|
|
rel, err := upgrade.LatestGithubRelease(Version)
|
2014-07-14 01:45:29 -07:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
res := make(map[string]interface{})
|
|
|
|
res["running"] = Version
|
|
|
|
res["latest"] = rel.Tag
|
2014-07-31 07:01:23 -07:00
|
|
|
res["newer"] = upgrade.CompareVersions(rel.Tag, Version) == 1
|
2014-07-14 01:45:29 -07:00
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
|
|
|
json.NewEncoder(w).Encode(res)
|
|
|
|
}
|
|
|
|
|
2014-09-28 04:00:38 -07:00
|
|
|
func restGetDeviceID(w http.ResponseWriter, r *http.Request) {
|
2014-07-18 01:00:02 -07:00
|
|
|
qs := r.URL.Query()
|
|
|
|
idStr := qs.Get("id")
|
2014-09-28 04:00:38 -07:00
|
|
|
id, err := protocol.DeviceIDFromString(idStr)
|
2014-07-18 01:00:02 -07:00
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
|
|
|
if err == nil {
|
|
|
|
json.NewEncoder(w).Encode(map[string]string{
|
|
|
|
"id": id.String(),
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
json.NewEncoder(w).Encode(map[string]string{
|
|
|
|
"error": err.Error(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-26 13:30:29 -07:00
|
|
|
func restGetLang(w http.ResponseWriter, r *http.Request) {
|
|
|
|
lang := r.Header.Get("Accept-Language")
|
|
|
|
var langs []string
|
|
|
|
for _, l := range strings.Split(lang, ",") {
|
2014-08-14 08:04:17 -07:00
|
|
|
parts := strings.SplitN(l, ";", 2)
|
2014-08-28 04:23:23 -07:00
|
|
|
langs = append(langs, strings.ToLower(strings.TrimSpace(parts[0])))
|
2014-07-26 13:30:29 -07:00
|
|
|
}
|
2014-08-05 00:38:38 -07:00
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
2014-07-26 13:30:29 -07:00
|
|
|
json.NewEncoder(w).Encode(langs)
|
|
|
|
}
|
|
|
|
|
2014-07-14 01:45:29 -07:00
|
|
|
func restPostUpgrade(w http.ResponseWriter, r *http.Request) {
|
2015-03-28 02:21:00 -07:00
|
|
|
rel, err := upgrade.LatestGithubRelease(Version)
|
2014-07-14 01:45:29 -07:00
|
|
|
if err != nil {
|
2014-08-17 01:28:36 -07:00
|
|
|
l.Warnln("getting latest release:", err)
|
2014-07-14 01:45:29 -07:00
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-07-31 07:01:23 -07:00
|
|
|
if upgrade.CompareVersions(rel.Tag, Version) == 1 {
|
2014-12-08 08:36:15 -07:00
|
|
|
err = upgrade.To(rel)
|
2014-07-31 07:01:23 -07:00
|
|
|
if err != nil {
|
2014-08-17 01:28:36 -07:00
|
|
|
l.Warnln("upgrading:", err)
|
2014-07-31 07:01:23 -07:00
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-09-13 07:25:39 -07:00
|
|
|
flushResponse(`{"ok": "restarting"}`, w)
|
|
|
|
l.Infoln("Upgrading")
|
|
|
|
stop <- exitUpgrading
|
2014-07-31 07:01:23 -07:00
|
|
|
}
|
2014-07-14 01:45:29 -07:00
|
|
|
}
|
|
|
|
|
2014-08-11 11:20:01 -07:00
|
|
|
func restPostScan(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
|
|
|
qs := r.URL.Query()
|
2014-09-28 04:00:38 -07:00
|
|
|
folder := qs.Get("folder")
|
2015-02-11 11:52:59 -07:00
|
|
|
if folder != "" {
|
2015-03-27 01:51:18 -07:00
|
|
|
subs := qs["sub"]
|
|
|
|
err := m.ScanFolderSubs(folder, subs)
|
2015-02-11 11:52:59 -07:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
errors := m.ScanFolders()
|
|
|
|
if len(errors) > 0 {
|
|
|
|
http.Error(w, "Error scanning folders", 500)
|
|
|
|
json.NewEncoder(w).Encode(errors)
|
|
|
|
}
|
2014-08-11 11:20:01 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
|
|
|
func restPostBump(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
|
|
|
file := qs.Get("file")
|
2014-12-30 01:35:21 -07:00
|
|
|
m.BringToFront(folder, file)
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
|
|
|
restGetNeed(m, w, r)
|
|
|
|
}
|
|
|
|
|
2014-07-05 12:40:29 -07:00
|
|
|
func getQR(w http.ResponseWriter, r *http.Request) {
|
2014-08-04 13:53:37 -07:00
|
|
|
var qs = r.URL.Query()
|
|
|
|
var text = qs.Get("text")
|
2014-07-05 12:40:29 -07:00
|
|
|
code, err := qr.Encode(text, qr.M)
|
2014-05-21 11:06:14 -07:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, "Invalid", 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "image/png")
|
|
|
|
w.Write(code.PNG())
|
|
|
|
}
|
|
|
|
|
2014-07-29 04:01:27 -07:00
|
|
|
func restGetPeerCompletion(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
|
|
|
tot := map[string]float64{}
|
|
|
|
count := map[string]float64{}
|
|
|
|
|
2014-10-06 00:25:45 -07:00
|
|
|
for _, folder := range cfg.Folders() {
|
2014-09-28 04:00:38 -07:00
|
|
|
for _, device := range folder.DeviceIDs() {
|
|
|
|
deviceStr := device.String()
|
|
|
|
if m.ConnectedTo(device) {
|
|
|
|
tot[deviceStr] += m.Completion(device, folder.ID)
|
2014-07-29 04:01:27 -07:00
|
|
|
} else {
|
2014-09-28 04:00:38 -07:00
|
|
|
tot[deviceStr] = 0
|
2014-07-29 04:01:27 -07:00
|
|
|
}
|
2014-09-28 04:00:38 -07:00
|
|
|
count[deviceStr]++
|
2014-07-29 04:01:27 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
comp := map[string]int{}
|
2014-09-28 04:00:38 -07:00
|
|
|
for device := range tot {
|
|
|
|
comp[device] = int(tot[device] / count[device])
|
2014-07-29 04:01:27 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
|
|
|
json.NewEncoder(w).Encode(comp)
|
|
|
|
}
|
|
|
|
|
2014-11-16 12:30:49 -07:00
|
|
|
func restGetAutocompleteDirectory(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
|
|
|
qs := r.URL.Query()
|
|
|
|
current := qs.Get("current")
|
|
|
|
search, _ := osutil.ExpandTilde(current)
|
|
|
|
pathSeparator := string(os.PathSeparator)
|
|
|
|
if strings.HasSuffix(current, pathSeparator) && !strings.HasSuffix(search, pathSeparator) {
|
|
|
|
search = search + pathSeparator
|
|
|
|
}
|
|
|
|
subdirectories, _ := filepath.Glob(search + "*")
|
|
|
|
ret := make([]string, 0, 10)
|
|
|
|
for _, subdirectory := range subdirectories {
|
|
|
|
info, err := os.Stat(subdirectory)
|
|
|
|
if err == nil && info.IsDir() {
|
2014-11-22 17:52:48 -07:00
|
|
|
ret = append(ret, subdirectory+pathSeparator)
|
2014-11-16 12:30:49 -07:00
|
|
|
if len(ret) > 9 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
json.NewEncoder(w).Encode(ret)
|
|
|
|
}
|
|
|
|
|
2014-07-22 11:11:36 -07:00
|
|
|
func embeddedStatic(assetDir string) http.Handler {
|
2014-09-02 04:07:33 -07:00
|
|
|
assets := auto.Assets()
|
|
|
|
|
2014-07-22 11:11:36 -07:00
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
file := r.URL.Path
|
2014-05-22 07:12:19 -07:00
|
|
|
|
2014-07-22 11:11:36 -07:00
|
|
|
if file[0] == '/' {
|
|
|
|
file = file[1:]
|
|
|
|
}
|
2014-05-22 07:12:19 -07:00
|
|
|
|
2014-07-22 11:11:36 -07:00
|
|
|
if len(file) == 0 {
|
|
|
|
file = "index.html"
|
|
|
|
}
|
2014-05-22 07:12:19 -07:00
|
|
|
|
2014-07-22 11:11:36 -07:00
|
|
|
if assetDir != "" {
|
|
|
|
p := filepath.Join(assetDir, filepath.FromSlash(file))
|
|
|
|
_, err := os.Stat(p)
|
|
|
|
if err == nil {
|
|
|
|
http.ServeFile(w, r, p)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2014-05-22 07:12:19 -07:00
|
|
|
|
2014-09-02 04:07:33 -07:00
|
|
|
bs, ok := assets[file]
|
2014-07-22 11:11:36 -07:00
|
|
|
if !ok {
|
|
|
|
http.NotFound(w, r)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-09-03 23:24:42 -07:00
|
|
|
mtype := mimeTypeForFile(file)
|
2014-07-22 11:11:36 -07:00
|
|
|
if len(mtype) != 0 {
|
|
|
|
w.Header().Set("Content-Type", mtype)
|
|
|
|
}
|
|
|
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(bs)))
|
|
|
|
w.Header().Set("Last-Modified", modt)
|
2014-07-05 12:40:29 -07:00
|
|
|
|
2014-07-22 11:11:36 -07:00
|
|
|
w.Write(bs)
|
|
|
|
})
|
2014-05-22 07:12:19 -07:00
|
|
|
}
|
2014-09-03 23:24:42 -07:00
|
|
|
|
|
|
|
func mimeTypeForFile(file string) string {
|
|
|
|
// We use a built in table of the common types since the system
|
|
|
|
// TypeByExtension might be unreliable. But if we don't know, we delegate
|
|
|
|
// to the system.
|
|
|
|
ext := filepath.Ext(file)
|
|
|
|
switch ext {
|
|
|
|
case ".htm", ".html":
|
|
|
|
return "text/html"
|
|
|
|
case ".css":
|
|
|
|
return "text/css"
|
|
|
|
case ".js":
|
|
|
|
return "application/javascript"
|
|
|
|
case ".json":
|
|
|
|
return "application/json"
|
|
|
|
case ".png":
|
|
|
|
return "image/png"
|
|
|
|
case ".ttf":
|
|
|
|
return "application/x-font-ttf"
|
2014-09-03 23:47:23 -07:00
|
|
|
case ".woff":
|
|
|
|
return "application/x-font-woff"
|
2015-03-22 04:55:44 -07:00
|
|
|
case ".svg":
|
|
|
|
return "image/svg+xml"
|
2014-09-03 23:24:42 -07:00
|
|
|
default:
|
|
|
|
return mime.TypeByExtension(ext)
|
|
|
|
}
|
|
|
|
}
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
|
|
|
|
2015-01-12 06:50:30 -07:00
|
|
|
func toNeedSlice(fs []db.FileInfoTruncated) []map[string]interface{} {
|
2015-01-09 00:19:32 -07:00
|
|
|
output := make([]map[string]interface{}, len(fs))
|
|
|
|
for i, file := range fs {
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
|
|
|
output[i] = map[string]interface{}{
|
2015-03-10 15:45:43 -07:00
|
|
|
"name": file.Name,
|
|
|
|
"flags": file.Flags,
|
|
|
|
"modified": file.Modified,
|
|
|
|
"version": file.Version,
|
|
|
|
"localVersion": file.LocalVersion,
|
|
|
|
"size": file.Size(),
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 12:23:06 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return output
|
|
|
|
}
|