Merge branch 'v0.8'

* v0.8:
  Don't leak writer and index goroutines on close
  Clean up protocol locking and closing
  Send initial index in batches
  Always send initial index, even if empty (ref #344)
  Simplify locking in protocol.Index
  Protocol state machine on receiving side
  Log client version on connect
  Handle query parameters in UPnP control URL (fixes #211)
  Avoid deadlock during initial scan (fixes #389)
  Add temporary debug logging for #344 (revert later)
  Tone down UPnP not found message (fixes #406)
This commit is contained in:
Jakob Borg 2014-07-04 15:16:41 +02:00
commit 3c4002e149
6 changed files with 194 additions and 103 deletions

View File

@ -483,7 +483,10 @@ func setupUPnP(r rand.Source) int {
l.Warnln("Failed to create UPnP port mapping") l.Warnln("Failed to create UPnP port mapping")
} }
} else { } else {
l.Infof("No UPnP IGD device found, no port mapping created (%v)", err) l.Infof("No UPnP gateway detected")
if debugNet {
l.Debugf("UPnP: %v", err)
}
} }
} }
} else { } else {

View File

@ -49,6 +49,7 @@ func (m *Set) Replace(id uint, fs []scanner.File) {
} }
m.Lock() m.Lock()
log("Replace", id, len(fs))
if len(fs) == 0 || !m.equals(id, fs) { if len(fs) == 0 || !m.equals(id, fs) {
m.changes[id]++ m.changes[id]++
m.replace(id, fs) m.replace(id, fs)
@ -65,6 +66,7 @@ func (m *Set) ReplaceWithDelete(id uint, fs []scanner.File) {
} }
m.Lock() m.Lock()
log("ReplaceWithDelete", id, len(fs))
if len(fs) == 0 || !m.equals(id, fs) { if len(fs) == 0 || !m.equals(id, fs) {
m.changes[id]++ m.changes[id]++
@ -102,7 +104,9 @@ func (m *Set) Update(id uint, fs []scanner.File) {
if debug { if debug {
l.Debugf("Update(%d, [%d])", id, len(fs)) l.Debugf("Update(%d, [%d])", id, len(fs))
} }
m.Lock() m.Lock()
log("Update", id, len(fs))
m.update(id, fs) m.update(id, fs)
m.changes[id]++ m.changes[id]++
m.Unlock() m.Unlock()
@ -220,6 +224,7 @@ func (m *Set) equals(id uint, fs []scanner.File) bool {
func (m *Set) update(cid uint, fs []scanner.File) { func (m *Set) update(cid uint, fs []scanner.File) {
remFiles := m.remoteKey[cid] remFiles := m.remoteKey[cid]
if remFiles == nil { if remFiles == nil {
printLog()
l.Fatalln("update before replace for cid", cid) l.Fatalln("update before replace for cid", cid)
} }
for _, f := range fs { for _, f := range fs {

51
files/set_debug.go Normal file
View File

@ -0,0 +1,51 @@
package files
import (
"fmt"
"time"
"github.com/calmh/syncthing/cid"
)
type logEntry struct {
time time.Time
method string
cid uint
node string
nfiles int
}
func (l logEntry) String() string {
return fmt.Sprintf("%v: %s cid:%d node:%s nfiles:%d", l.time, l.method, l.cid, l.node, l.nfiles)
}
var (
debugLog [10]logEntry
debugNext int
cm *cid.Map
)
func SetCM(m *cid.Map) {
cm = m
}
func log(method string, id uint, nfiles int) {
e := logEntry{
time: time.Now(),
method: method,
cid: id,
nfiles: nfiles,
}
if cm != nil {
e.node = cm.Name(id)
}
debugLog[debugNext] = e
debugNext = (debugNext + 1) % len(debugLog)
}
func printLog() {
l.Debugln("--- Consistency error ---")
for _, e := range debugLog {
l.Debugln(e)
}
}

View File

@ -98,6 +98,9 @@ func NewModel(indexDir string, cfg *config.Configuration, clientName, clientVers
sup: suppressor{threshold: int64(cfg.Options.MaxChangeKbps)}, sup: suppressor{threshold: int64(cfg.Options.MaxChangeKbps)},
} }
// TEMP: #344
files.SetCM(m.cm)
var timeout = 20 * 60 // seconds var timeout = 20 * 60 // seconds
if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 { if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 {
it, err := strconv.Atoi(t) it, err := strconv.Atoi(t)
@ -369,6 +372,8 @@ func (m *Model) ClusterConfig(nodeID protocol.NodeID, config protocol.ClusterCon
m.nodeVer[nodeID] = config.ClientName + " " + config.ClientVersion m.nodeVer[nodeID] = config.ClientName + " " + config.ClientVersion
} }
m.pmut.Unlock() m.pmut.Unlock()
l.Infof(`Node %s client is "%s %s"`, nodeID, config.ClientName, config.ClientVersion)
} }
// Close removes the peer from the model and closes the underlying connection if possible. // Close removes the peer from the model and closes the underlying connection if possible.
@ -451,19 +456,6 @@ func (m *Model) ReplaceLocal(repo string, fs []scanner.File) {
m.rmut.RUnlock() m.rmut.RUnlock()
} }
func (m *Model) SeedLocal(repo string, fs []protocol.FileInfo) {
var sfs = make([]scanner.File, len(fs))
for i := 0; i < len(fs); i++ {
lamport.Default.Tick(fs[i].Version)
sfs[i] = fileFromFileInfo(fs[i])
sfs[i].Suppressed = false // we might have saved an index with files that were suppressed; the should not be on startup
}
m.rmut.RLock()
m.repoFiles[repo].Replace(cid.LocalID, sfs)
m.rmut.RUnlock()
}
func (m *Model) CurrentRepoFile(repo string, file string) scanner.File { func (m *Model) CurrentRepoFile(repo string, file string) scanner.File {
m.rmut.RLock() m.rmut.RLock()
f := m.repoFiles[repo].Get(cid.LocalID, file) f := m.repoFiles[repo].Get(cid.LocalID, file)
@ -528,7 +520,14 @@ func (m *Model) AddConnection(rawConn io.Closer, protoConn protocol.Connection)
if debug { if debug {
l.Debugf("IDX(out/initial): %s: %q: %d files", nodeID, repo, len(idx)) l.Debugf("IDX(out/initial): %s: %q: %d files", nodeID, repo, len(idx))
} }
protoConn.Index(repo, idx) const batchSize = 1000
for i := 0; i < len(idx); i += batchSize {
if len(idx[i:]) < batchSize {
protoConn.Index(repo, idx[i:])
} else {
protoConn.Index(repo, idx[i:i+batchSize])
}
}
} }
}() }()
} }
@ -733,7 +732,15 @@ func (m *Model) LoadIndexes(dir string) {
m.rmut.RLock() m.rmut.RLock()
for repo := range m.repoCfgs { for repo := range m.repoCfgs {
fs := m.loadIndex(repo, dir) fs := m.loadIndex(repo, dir)
m.SeedLocal(repo, fs)
var sfs = make([]scanner.File, len(fs))
for i := 0; i < len(fs); i++ {
lamport.Default.Tick(fs[i].Version)
sfs[i] = fileFromFileInfo(fs[i])
sfs[i].Suppressed = false // we might have saved an index with files that were suppressed; the should not be on startup
}
m.repoFiles[repo].Replace(cid.LocalID, sfs)
} }
m.rmut.RUnlock() m.rmut.RUnlock()
} }

View File

@ -28,6 +28,12 @@ const (
messageTypeIndexUpdate = 6 messageTypeIndexUpdate = 6
) )
const (
stateInitial = iota
stateCCRcvd
stateIdxRcvd
)
const ( const (
FlagDeleted uint32 = 1 << 12 FlagDeleted uint32 = 1 << 12
FlagInvalid = 1 << 13 FlagInvalid = 1 << 13
@ -70,26 +76,27 @@ type Connection interface {
type rawConnection struct { type rawConnection struct {
id NodeID id NodeID
receiver Model receiver Model
state int
reader io.ReadCloser reader io.ReadCloser
cr *countingReader cr *countingReader
xr *xdr.Reader xr *xdr.Reader
writer io.WriteCloser
writer io.WriteCloser
cw *countingWriter cw *countingWriter
wb *bufio.Writer wb *bufio.Writer
xw *xdr.Writer xw *xdr.Writer
wmut sync.Mutex
indexSent map[string]map[string]uint64
awaiting []chan asyncResult awaiting []chan asyncResult
imut sync.Mutex awaitingMut sync.Mutex
idxSent map[string]map[string]uint64
idxMut sync.Mutex // ensures serialization of Index calls idxMut sync.Mutex // ensures serialization of Index calls
nextID chan int nextID chan int
outbox chan []encodable outbox chan []encodable
closed chan struct{} closed chan struct{}
once sync.Once
} }
type asyncResult struct { type asyncResult struct {
@ -116,6 +123,7 @@ func NewConnection(nodeID NodeID, reader io.Reader, writer io.Writer, receiver M
c := rawConnection{ c := rawConnection{
id: nodeID, id: nodeID,
receiver: nativeModel{receiver}, receiver: nativeModel{receiver},
state: stateInitial,
reader: flrd, reader: flrd,
cr: cr, cr: cr,
xr: xdr.NewReader(flrd), xr: xdr.NewReader(flrd),
@ -124,7 +132,7 @@ func NewConnection(nodeID NodeID, reader io.Reader, writer io.Writer, receiver M
wb: wb, wb: wb,
xw: xdr.NewWriter(wb), xw: xdr.NewWriter(wb),
awaiting: make([]chan asyncResult, 0x1000), awaiting: make([]chan asyncResult, 0x1000),
indexSent: make(map[string]map[string]uint64), idxSent: make(map[string]map[string]uint64),
outbox: make(chan []encodable), outbox: make(chan []encodable),
nextID: make(chan int), nextID: make(chan int),
closed: make(chan struct{}), closed: make(chan struct{}),
@ -148,31 +156,29 @@ func (c *rawConnection) Index(repo string, idx []FileInfo) {
c.idxMut.Lock() c.idxMut.Lock()
defer c.idxMut.Unlock() defer c.idxMut.Unlock()
c.imut.Lock()
var msgType int var msgType int
if c.indexSent[repo] == nil { if c.idxSent[repo] == nil {
// This is the first time we send an index. // This is the first time we send an index.
msgType = messageTypeIndex msgType = messageTypeIndex
c.indexSent[repo] = make(map[string]uint64) c.idxSent[repo] = make(map[string]uint64)
for _, f := range idx { for _, f := range idx {
c.indexSent[repo][f.Name] = f.Version c.idxSent[repo][f.Name] = f.Version
} }
} else { } else {
// We have sent one full index. Only send updates now. // We have sent one full index. Only send updates now.
msgType = messageTypeIndexUpdate msgType = messageTypeIndexUpdate
var diff []FileInfo var diff []FileInfo
for _, f := range idx { for _, f := range idx {
if vs, ok := c.indexSent[repo][f.Name]; !ok || f.Version != vs { if vs, ok := c.idxSent[repo][f.Name]; !ok || f.Version != vs {
diff = append(diff, f) diff = append(diff, f)
c.indexSent[repo][f.Name] = f.Version c.idxSent[repo][f.Name] = f.Version
} }
} }
idx = diff idx = diff
} }
c.imut.Unlock()
if len(idx) > 0 { if msgType == messageTypeIndex || len(idx) > 0 {
c.send(header{0, -1, msgType}, IndexMessage{repo, idx}) c.send(header{0, -1, msgType}, IndexMessage{repo, idx})
} }
} }
@ -186,13 +192,13 @@ func (c *rawConnection) Request(repo string, name string, offset int64, size int
return nil, ErrClosed return nil, ErrClosed
} }
c.imut.Lock() c.awaitingMut.Lock()
if ch := c.awaiting[id]; ch != nil { if ch := c.awaiting[id]; ch != nil {
panic("id taken") panic("id taken")
} }
rc := make(chan asyncResult) rc := make(chan asyncResult, 1)
c.awaiting[id] = rc c.awaiting[id] = rc
c.imut.Unlock() c.awaitingMut.Unlock()
ok := c.send(header{0, id, messageTypeRequest}, ok := c.send(header{0, id, messageTypeRequest},
RequestMessage{repo, name, uint64(offset), uint32(size)}) RequestMessage{repo, name, uint64(offset), uint32(size)})
@ -221,9 +227,9 @@ func (c *rawConnection) ping() bool {
} }
rc := make(chan asyncResult, 1) rc := make(chan asyncResult, 1)
c.imut.Lock() c.awaitingMut.Lock()
c.awaiting[id] = rc c.awaiting[id] = rc
c.imut.Unlock() c.awaitingMut.Unlock()
ok := c.send(header{0, id, messageTypePing}) ok := c.send(header{0, id, messageTypePing})
if !ok { if !ok {
@ -257,21 +263,34 @@ func (c *rawConnection) readerLoop() (err error) {
switch hdr.msgType { switch hdr.msgType {
case messageTypeIndex: case messageTypeIndex:
if c.state < stateCCRcvd {
return fmt.Errorf("protocol error: index message in state %d", c.state)
}
if err := c.handleIndex(); err != nil { if err := c.handleIndex(); err != nil {
return err return err
} }
c.state = stateIdxRcvd
case messageTypeIndexUpdate: case messageTypeIndexUpdate:
if c.state < stateIdxRcvd {
return fmt.Errorf("protocol error: index update message in state %d", c.state)
}
if err := c.handleIndexUpdate(); err != nil { if err := c.handleIndexUpdate(); err != nil {
return err return err
} }
case messageTypeRequest: case messageTypeRequest:
if c.state < stateIdxRcvd {
return fmt.Errorf("protocol error: request message in state %d", c.state)
}
if err := c.handleRequest(hdr); err != nil { if err := c.handleRequest(hdr); err != nil {
return err return err
} }
case messageTypeResponse: case messageTypeResponse:
if c.state < stateIdxRcvd {
return fmt.Errorf("protocol error: response message in state %d", c.state)
}
if err := c.handleResponse(hdr); err != nil { if err := c.handleResponse(hdr); err != nil {
return err return err
} }
@ -283,9 +302,13 @@ func (c *rawConnection) readerLoop() (err error) {
c.handlePong(hdr) c.handlePong(hdr)
case messageTypeClusterConfig: case messageTypeClusterConfig:
if c.state != stateInitial {
return fmt.Errorf("protocol error: cluster config message in state %d", c.state)
}
if err := c.handleClusterConfig(); err != nil { if err := c.handleClusterConfig(); err != nil {
return err return err
} }
c.state = stateCCRcvd
default: default:
return fmt.Errorf("protocol error: %s: unknown message type %#x", c.id, hdr.msgType) return fmt.Errorf("protocol error: %s: unknown message type %#x", c.id, hdr.msgType)
@ -309,12 +332,17 @@ func (c *rawConnection) indexSerializerLoop() {
// large index update from the other side. But we must also ensure to // large index update from the other side. But we must also ensure to
// process the indexes in the order they are received, hence the separate // process the indexes in the order they are received, hence the separate
// routine and buffered channel. // routine and buffered channel.
for ii := range incomingIndexes { for {
select {
case ii := <-incomingIndexes:
if ii.update { if ii.update {
c.receiver.IndexUpdate(ii.id, ii.repo, ii.files) c.receiver.IndexUpdate(ii.id, ii.repo, ii.files)
} else { } else {
c.receiver.Index(ii.id, ii.repo, ii.files) c.receiver.Index(ii.id, ii.repo, ii.files)
} }
case <-c.closed:
return
}
} }
} }
@ -365,32 +393,25 @@ func (c *rawConnection) handleResponse(hdr header) error {
return err return err
} }
go func(hdr header, err error) { c.awaitingMut.Lock()
c.imut.Lock() if rc := c.awaiting[hdr.msgID]; rc != nil {
rc := c.awaiting[hdr.msgID]
c.awaiting[hdr.msgID] = nil c.awaiting[hdr.msgID] = nil
c.imut.Unlock() rc <- asyncResult{data, nil}
if rc != nil {
rc <- asyncResult{data, err}
close(rc) close(rc)
} }
}(hdr, c.xr.Error()) c.awaitingMut.Unlock()
return nil return nil
} }
func (c *rawConnection) handlePong(hdr header) { func (c *rawConnection) handlePong(hdr header) {
c.imut.Lock() c.awaitingMut.Lock()
if rc := c.awaiting[hdr.msgID]; rc != nil { if rc := c.awaiting[hdr.msgID]; rc != nil {
go func() { c.awaiting[hdr.msgID] = nil
rc <- asyncResult{} rc <- asyncResult{}
close(rc) close(rc)
}()
c.awaiting[hdr.msgID] = nil
} }
c.imut.Unlock() c.awaitingMut.Unlock()
} }
func (c *rawConnection) handleClusterConfig() error { func (c *rawConnection) handleClusterConfig() error {
@ -434,18 +455,20 @@ func (c *rawConnection) send(h header, es ...encodable) bool {
func (c *rawConnection) writerLoop() { func (c *rawConnection) writerLoop() {
var err error var err error
for es := range c.outbox { for {
c.wmut.Lock() select {
case es := <-c.outbox:
for _, e := range es { for _, e := range es {
e.encodeXDR(c.xw) e.encodeXDR(c.xw)
} }
if err = c.flush(); err != nil { if err = c.flush(); err != nil {
c.wmut.Unlock()
c.close(err) c.close(err)
return return
} }
c.wmut.Unlock() case <-c.closed:
return
}
} }
} }
@ -470,29 +493,20 @@ func (c *rawConnection) flush() error {
} }
func (c *rawConnection) close(err error) { func (c *rawConnection) close(err error) {
c.imut.Lock() c.once.Do(func() {
c.wmut.Lock()
defer c.imut.Unlock()
defer c.wmut.Unlock()
select {
case <-c.closed:
return
default:
close(c.closed) close(c.closed)
c.awaitingMut.Lock()
for i, ch := range c.awaiting { for i, ch := range c.awaiting {
if ch != nil { if ch != nil {
close(ch) close(ch)
c.awaiting[i] = nil c.awaiting[i] = nil
} }
} }
c.awaitingMut.Unlock()
c.writer.Close()
c.reader.Close()
go c.receiver.Close(c.id, err) go c.receiver.Close(c.id, err)
} })
} }
func (c *rawConnection) idGenerator() { func (c *rawConnection) idGenerator() {
@ -554,8 +568,7 @@ func (c *rawConnection) pingerLoop() {
func (c *rawConnection) processRequest(msgID int, req RequestMessage) { func (c *rawConnection) processRequest(msgID int, req RequestMessage) {
data, _ := c.receiver.Request(c.id, req.Repository, req.Name, int64(req.Offset), int(req.Size)) data, _ := c.receiver.Request(c.id, req.Repository, req.Name, int64(req.Offset), int(req.Size))
c.send(header{0, msgID, messageTypeResponse}, c.send(header{0, msgID, messageTypeResponse}, encodableBytes(data))
encodableBytes(data))
} }
type Statistics struct { type Statistics struct {

View File

@ -203,14 +203,26 @@ func getServiceURL(rootURL string) (string, error) {
} }
u, _ := url.Parse(rootURL) u, _ := url.Parse(rootURL)
if svc.ControlURL[0] == '/' { replaceRawPath(u, svc.ControlURL)
u.Path = svc.ControlURL
} else {
u.Path += svc.ControlURL
}
return u.String(), nil return u.String(), nil
} }
func replaceRawPath(u *url.URL, rp string) {
var p, q string
fs := strings.Split(rp, "?")
p = fs[0]
if len(fs) > 1 {
q = fs[1]
}
if p[0] == '/' {
u.Path = p
} else {
u.Path += p
}
u.RawQuery = q
}
func soapRequest(url, function, message string) error { func soapRequest(url, function, message string) error {
tpl := `<?xml version="1.0" ?> tpl := `<?xml version="1.0" ?>
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"> <s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">