Merge branch 'master' into pr/511

* master: (21 commits)
  Mechanism to stop external announcement routine
  Update goleveldb
  Perfstats are not supported on Windows
  Build should fail if a platform does not build
  Include perfstats and heap profiles in standard build
  Actually no, lets not do uploads at all from the build script.
  ./build.sh upload build server artifacts
  Sign checksums, not files.
  Badges, add build server
  Remove Solaris build again, for now
  Travis should build with 1.3 + tip
  Translation update
  Indicate aproximativeness of repo sizes...
  Slightly more conservative guess on file size
  Fix set tests
  Small goleveldb hack to reduce allocations somewhat
  Don't load block lists from db unless necessary
  Rip out the Suppressor (maybe to be reintroduced)
  Reduce allocations while hash scanning
  Add heap profiling support
  ...

Conflicts:
	discover/discover.go
This commit is contained in:
Jakob Borg 2014-08-14 12:48:33 +02:00
commit 5f1bf9d9d6
43 changed files with 1416 additions and 609 deletions

View File

@ -1,6 +1,7 @@
language: go language: go
go: go:
- 1.3
- tip - tip
install: install:

4
Godeps/Godeps.json generated
View File

@ -1,6 +1,6 @@
{ {
"ImportPath": "github.com/syncthing/syncthing", "ImportPath": "github.com/syncthing/syncthing",
"GoVersion": "go1.3", "GoVersion": "go1.3.1",
"Packages": [ "Packages": [
"./cmd/..." "./cmd/..."
], ],
@ -49,7 +49,7 @@
}, },
{ {
"ImportPath": "github.com/syndtr/goleveldb/leveldb", "ImportPath": "github.com/syndtr/goleveldb/leveldb",
"Rev": "c5955912e3287376475731c5bc59c79a5a799105" "Rev": "6f6f5d93f7499d2c505c2839c1d6b28b25a2ce21"
}, },
{ {
"ImportPath": "github.com/vitrun/qart/coding", "ImportPath": "github.com/vitrun/qart/coding",

View File

@ -13,6 +13,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"runtime" "runtime"
"sync/atomic"
"testing" "testing"
"github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/iterator"
@ -170,7 +171,7 @@ func (p *dbBench) writes(perBatch int) {
b.SetBytes(116) b.SetBytes(116)
} }
func (p *dbBench) drop() { func (p *dbBench) gc() {
p.keys, p.values = nil, nil p.keys, p.values = nil, nil
runtime.GC() runtime.GC()
} }
@ -249,6 +250,7 @@ func (p *dbBench) newIter() iterator.Iterator {
} }
func (p *dbBench) close() { func (p *dbBench) close() {
p.b.Log(p.db.s.tops.bpool)
p.db.Close() p.db.Close()
p.stor.Close() p.stor.Close()
os.RemoveAll(benchDB) os.RemoveAll(benchDB)
@ -331,7 +333,7 @@ func BenchmarkDBRead(b *testing.B) {
p := openDBBench(b, false) p := openDBBench(b, false)
p.populate(b.N) p.populate(b.N)
p.fill() p.fill()
p.drop() p.gc()
iter := p.newIter() iter := p.newIter()
b.ResetTimer() b.ResetTimer()
@ -343,6 +345,50 @@ func BenchmarkDBRead(b *testing.B) {
p.close() p.close()
} }
func BenchmarkDBReadConcurrent(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
defer p.close()
b.ResetTimer()
b.SetBytes(116)
b.RunParallel(func(pb *testing.PB) {
iter := p.newIter()
defer iter.Release()
for pb.Next() && iter.Next() {
}
})
}
func BenchmarkDBReadConcurrent2(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
defer p.close()
b.ResetTimer()
b.SetBytes(116)
var dir uint32
b.RunParallel(func(pb *testing.PB) {
iter := p.newIter()
defer iter.Release()
if atomic.AddUint32(&dir, 1)%2 == 0 {
for pb.Next() && iter.Next() {
}
} else {
if pb.Next() && iter.Last() {
for pb.Next() && iter.Prev() {
}
}
}
})
}
func BenchmarkDBReadGC(b *testing.B) { func BenchmarkDBReadGC(b *testing.B) {
p := openDBBench(b, false) p := openDBBench(b, false)
p.populate(b.N) p.populate(b.N)
@ -362,7 +408,7 @@ func BenchmarkDBReadUncompressed(b *testing.B) {
p := openDBBench(b, true) p := openDBBench(b, true)
p.populate(b.N) p.populate(b.N)
p.fill() p.fill()
p.drop() p.gc()
iter := p.newIter() iter := p.newIter()
b.ResetTimer() b.ResetTimer()
@ -379,7 +425,7 @@ func BenchmarkDBReadTable(b *testing.B) {
p.populate(b.N) p.populate(b.N)
p.fill() p.fill()
p.reopen() p.reopen()
p.drop() p.gc()
iter := p.newIter() iter := p.newIter()
b.ResetTimer() b.ResetTimer()
@ -395,7 +441,7 @@ func BenchmarkDBReadReverse(b *testing.B) {
p := openDBBench(b, false) p := openDBBench(b, false)
p.populate(b.N) p.populate(b.N)
p.fill() p.fill()
p.drop() p.gc()
iter := p.newIter() iter := p.newIter()
b.ResetTimer() b.ResetTimer()
@ -413,7 +459,7 @@ func BenchmarkDBReadReverseTable(b *testing.B) {
p.populate(b.N) p.populate(b.N)
p.fill() p.fill()
p.reopen() p.reopen()
p.drop() p.gc()
iter := p.newIter() iter := p.newIter()
b.ResetTimer() b.ResetTimer()

View File

@ -257,6 +257,7 @@ func recoverTable(s *session, o *opt.Options) error {
var mSeq uint64 var mSeq uint64
var good, corrupted int var good, corrupted int
rec := new(sessionRecord) rec := new(sessionRecord)
bpool := util.NewBufferPool(o.GetBlockSize() + 5)
buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) { buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) {
tmp = s.newTemp() tmp = s.newTemp()
writer, err := tmp.Create() writer, err := tmp.Create()
@ -314,7 +315,7 @@ func recoverTable(s *session, o *opt.Options) error {
var tSeq uint64 var tSeq uint64
var tgood, tcorrupted, blockerr int var tgood, tcorrupted, blockerr int
var imin, imax []byte var imin, imax []byte
tr := table.NewReader(reader, size, nil, o) tr := table.NewReader(reader, size, nil, bpool, o)
iter := tr.NewIterator(nil, nil) iter := tr.NewIterator(nil, nil)
iter.(iterator.ErrorCallbackSetter).SetErrorCallback(func(err error) { iter.(iterator.ErrorCallbackSetter).SetErrorCallback(func(err error) {
s.logf("table@recovery found error @%d %q", file.Num(), err) s.logf("table@recovery found error @%d %q", file.Num(), err)
@ -481,10 +482,11 @@ func (db *DB) recoverJournal() error {
buf.Reset() buf.Reset()
if _, err := buf.ReadFrom(r); err != nil { if _, err := buf.ReadFrom(r); err != nil {
if strict { if err == io.ErrUnexpectedEOF {
continue
} else {
return err return err
} }
continue
} }
if err := batch.decode(buf.Bytes()); err != nil { if err := batch.decode(buf.Bytes()); err != nil {
return err return err

View File

@ -103,18 +103,18 @@ type flusher interface {
Flush() error Flush() error
} }
// DroppedError is the error type that passed to Dropper.Drop method. // ErrCorrupted is the error type that generated by corrupted block or chunk.
type DroppedError struct { type ErrCorrupted struct {
Size int Size int
Reason string Reason string
} }
func (e DroppedError) Error() string { func (e ErrCorrupted) Error() string {
return fmt.Sprintf("leveldb/journal: dropped %d bytes: %s", e.Size, e.Reason) return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size)
} }
// Dropper is the interface that wrap simple Drop method. The Drop // Dropper is the interface that wrap simple Drop method. The Drop
// method will be called when the journal reader dropping a chunk. // method will be called when the journal reader dropping a block or chunk.
type Dropper interface { type Dropper interface {
Drop(err error) Drop(err error)
} }
@ -158,76 +158,78 @@ func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader {
} }
} }
var errSkip = errors.New("leveldb/journal: skipped")
func (r *Reader) corrupt(n int, reason string, skip bool) error {
if r.dropper != nil {
r.dropper.Drop(ErrCorrupted{n, reason})
}
if r.strict && !skip {
r.err = ErrCorrupted{n, reason}
return r.err
}
return errSkip
}
// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the // nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the
// next block into the buffer if necessary. // next block into the buffer if necessary.
func (r *Reader) nextChunk(wantFirst, skip bool) error { func (r *Reader) nextChunk(first bool) error {
for { for {
if r.j+headerSize <= r.n { if r.j+headerSize <= r.n {
checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4]) checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4])
length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6]) length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6])
chunkType := r.buf[r.j+6] chunkType := r.buf[r.j+6]
var err error
if checksum == 0 && length == 0 && chunkType == 0 { if checksum == 0 && length == 0 && chunkType == 0 {
// Drop entire block. // Drop entire block.
err = DroppedError{r.n - r.j, "zero header"} m := r.n - r.j
r.i = r.n r.i = r.n
r.j = r.n r.j = r.n
return r.corrupt(m, "zero header", false)
} else { } else {
m := r.n - r.j m := r.n - r.j
r.i = r.j + headerSize r.i = r.j + headerSize
r.j = r.j + headerSize + int(length) r.j = r.j + headerSize + int(length)
if r.j > r.n { if r.j > r.n {
// Drop entire block. // Drop entire block.
err = DroppedError{m, "chunk length overflows block"}
r.i = r.n r.i = r.n
r.j = r.n r.j = r.n
return r.corrupt(m, "chunk length overflows block", false)
} else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() { } else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() {
// Drop entire block. // Drop entire block.
err = DroppedError{m, "checksum mismatch"}
r.i = r.n r.i = r.n
r.j = r.n r.j = r.n
return r.corrupt(m, "checksum mismatch", false)
} }
} }
if wantFirst && err == nil && chunkType != fullChunkType && chunkType != firstChunkType { if first && chunkType != fullChunkType && chunkType != firstChunkType {
if skip { m := r.j - r.i
// The chunk are intentionally skipped. r.i = r.j
if chunkType == lastChunkType { // Report the error, but skip it.
skip = false return r.corrupt(m+headerSize, "orphan chunk", true)
} }
continue
} else {
// Drop the chunk.
err = DroppedError{r.j - r.i + headerSize, "orphan chunk"}
}
}
if err == nil {
r.last = chunkType == fullChunkType || chunkType == lastChunkType r.last = chunkType == fullChunkType || chunkType == lastChunkType
} else { return nil
if r.dropper != nil {
r.dropper.Drop(err)
} }
if r.strict {
r.err = err // The last block.
if r.n < blockSize && r.n > 0 {
if !first {
return r.corrupt(0, "missing chunk part", false)
} }
r.err = io.EOF
return r.err
} }
// Read block.
n, err := io.ReadFull(r.r, r.buf[:])
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
return err return err
} }
if r.n < blockSize && r.n > 0 {
// This is the last block.
if r.j != r.n {
r.err = io.ErrUnexpectedEOF
} else {
r.err = io.EOF
}
return r.err
}
n, err := io.ReadFull(r.r, r.buf[:])
if err != nil && err != io.ErrUnexpectedEOF {
r.err = err
return r.err
}
if n == 0 { if n == 0 {
if !first {
return r.corrupt(0, "missing chunk part", false)
}
r.err = io.EOF r.err = io.EOF
return r.err return r.err
} }
@ -237,29 +239,26 @@ func (r *Reader) nextChunk(wantFirst, skip bool) error {
// Next returns a reader for the next journal. It returns io.EOF if there are no // Next returns a reader for the next journal. It returns io.EOF if there are no
// more journals. The reader returned becomes stale after the next Next call, // more journals. The reader returned becomes stale after the next Next call,
// and should no longer be used. // and should no longer be used. If strict is false, the reader will returns
// io.ErrUnexpectedEOF error when found corrupted journal.
func (r *Reader) Next() (io.Reader, error) { func (r *Reader) Next() (io.Reader, error) {
r.seq++ r.seq++
if r.err != nil { if r.err != nil {
return nil, r.err return nil, r.err
} }
skip := !r.last
for {
r.i = r.j r.i = r.j
if r.nextChunk(true, skip) != nil { for {
// So that 'orphan chunk' drop will be reported. if err := r.nextChunk(true); err == nil {
skip = false
} else {
break break
} } else if err != errSkip {
if r.err != nil { return nil, err
return nil, r.err
} }
} }
return &singleReader{r, r.seq, nil}, nil return &singleReader{r, r.seq, nil}, nil
} }
// Reset resets the journal reader, allows reuse of the journal reader. // Reset resets the journal reader, allows reuse of the journal reader. Reset returns
// last accumulated error.
func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error { func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error {
r.seq++ r.seq++
err := r.err err := r.err
@ -296,7 +295,11 @@ func (x *singleReader) Read(p []byte) (int, error) {
if r.last { if r.last {
return 0, io.EOF return 0, io.EOF
} }
if x.err = r.nextChunk(false, false); x.err != nil { x.err = r.nextChunk(false)
if x.err != nil {
if x.err == errSkip {
x.err = io.ErrUnexpectedEOF
}
return 0, x.err return 0, x.err
} }
} }
@ -320,7 +323,11 @@ func (x *singleReader) ReadByte() (byte, error) {
if r.last { if r.last {
return 0, io.EOF return 0, io.EOF
} }
if x.err = r.nextChunk(false, false); x.err != nil { x.err = r.nextChunk(false)
if x.err != nil {
if x.err == errSkip {
x.err = io.ErrUnexpectedEOF
}
return 0, x.err return 0, x.err
} }
} }

View File

@ -12,6 +12,7 @@ package journal
import ( import (
"bytes" "bytes"
"encoding/binary"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@ -326,3 +327,492 @@ func TestStaleWriter(t *testing.T) {
t.Fatalf("stale write #1: unexpected error: %v", err) t.Fatalf("stale write #1: unexpected error: %v", err)
} }
} }
func TestCorrupt_MissingLastBlock(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-1024)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
// Cut the last block.
b := buf.Bytes()[:blockSize]
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read.
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if n != blockSize-1024 {
t.Fatalf("read #0: got %d bytes want %d", n, blockSize-1024)
}
// Second read.
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != io.ErrUnexpectedEOF {
t.Fatalf("read #1: unexpected error: %v", err)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}
func TestCorrupt_CorruptedFirstBlock(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
// Third record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
t.Fatalf("write #2: unexpected error: %v", err)
}
// Fourth record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
t.Fatalf("write #3: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
// Corrupting block #0.
for i := 0; i < 1024; i++ {
b[i] = '1'
}
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read (third record).
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if want := int64(blockSize-headerSize) + 1; n != want {
t.Fatalf("read #0: got %d bytes want %d", n, want)
}
// Second read (fourth record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #1: %v", err)
}
if want := int64(blockSize-headerSize) + 2; n != want {
t.Fatalf("read #1: got %d bytes want %d", n, want)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}
func TestCorrupt_CorruptedMiddleBlock(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
// Third record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
t.Fatalf("write #2: unexpected error: %v", err)
}
// Fourth record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
t.Fatalf("write #3: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
// Corrupting block #1.
for i := 0; i < 1024; i++ {
b[blockSize+i] = '1'
}
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read (first record).
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if want := int64(blockSize / 2); n != want {
t.Fatalf("read #0: got %d bytes want %d", n, want)
}
// Second read (second record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != io.ErrUnexpectedEOF {
t.Fatalf("read #1: unexpected error: %v", err)
}
// Third read (fourth record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #2: %v", err)
}
if want := int64(blockSize-headerSize) + 2; n != want {
t.Fatalf("read #2: got %d bytes want %d", n, want)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}
func TestCorrupt_CorruptedLastBlock(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
// Third record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
t.Fatalf("write #2: unexpected error: %v", err)
}
// Fourth record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
t.Fatalf("write #3: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
// Corrupting block #3.
for i := len(b) - 1; i > len(b)-1024; i-- {
b[i] = '1'
}
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read (first record).
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if want := int64(blockSize / 2); n != want {
t.Fatalf("read #0: got %d bytes want %d", n, want)
}
// Second read (second record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #1: %v", err)
}
if want := int64(blockSize - headerSize); n != want {
t.Fatalf("read #1: got %d bytes want %d", n, want)
}
// Third read (third record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #2: %v", err)
}
if want := int64(blockSize-headerSize) + 1; n != want {
t.Fatalf("read #2: got %d bytes want %d", n, want)
}
// Fourth read (fourth record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != io.ErrUnexpectedEOF {
t.Fatalf("read #3: unexpected error: %v", err)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}
func TestCorrupt_FirstChuckLengthOverflow(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
// Third record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
t.Fatalf("write #2: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
// Corrupting record #1.
x := blockSize
binary.LittleEndian.PutUint16(b[x+4:], 0xffff)
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read (first record).
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if want := int64(blockSize / 2); n != want {
t.Fatalf("read #0: got %d bytes want %d", n, want)
}
// Second read (second record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != io.ErrUnexpectedEOF {
t.Fatalf("read #1: unexpected error: %v", err)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}
func TestCorrupt_MiddleChuckLengthOverflow(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
// Third record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
t.Fatalf("write #2: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
// Corrupting record #1.
x := blockSize/2 + headerSize
binary.LittleEndian.PutUint16(b[x+4:], 0xffff)
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read (first record).
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if want := int64(blockSize / 2); n != want {
t.Fatalf("read #0: got %d bytes want %d", n, want)
}
// Second read (third record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #1: %v", err)
}
if want := int64(blockSize-headerSize) + 1; n != want {
t.Fatalf("read #1: got %d bytes want %d", n, want)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}

View File

@ -22,7 +22,7 @@ type dropper struct {
} }
func (d dropper) Drop(err error) { func (d dropper) Drop(err error) {
if e, ok := err.(journal.DroppedError); ok { if e, ok := err.(journal.ErrCorrupted); ok {
d.s.logf("journal@drop %s-%d S·%s %q", d.file.Type(), d.file.Num(), shortenb(e.Size), e.Reason) d.s.logf("journal@drop %s-%d S·%s %q", d.file.Type(), d.file.Num(), shortenb(e.Size), e.Reason)
} else { } else {
d.s.logf("journal@drop %s-%d %q", d.file.Type(), d.file.Num(), err) d.s.logf("journal@drop %s-%d %q", d.file.Type(), d.file.Num(), err)

View File

@ -275,6 +275,7 @@ type tOps struct {
s *session s *session
cache cache.Cache cache cache.Cache
cacheNS cache.Namespace cacheNS cache.Namespace
bpool *util.BufferPool
} }
// Creates an empty table and returns table writer. // Creates an empty table and returns table writer.
@ -340,7 +341,7 @@ func (t *tOps) open(f *tFile) (c cache.Object, err error) {
} }
ok = true ok = true
value = table.NewReader(r, int64(f.size), cacheNS, o) value = table.NewReader(r, int64(f.size), cacheNS, t.bpool, o)
charge = 1 charge = 1
fin = func() { fin = func() {
r.Close() r.Close()
@ -412,8 +413,12 @@ func (t *tOps) close() {
// Creates new initialized table ops instance. // Creates new initialized table ops instance.
func newTableOps(s *session, cacheCap int) *tOps { func newTableOps(s *session, cacheCap int) *tOps {
c := cache.NewLRUCache(cacheCap) c := cache.NewLRUCache(cacheCap)
ns := c.GetNamespace(0) return &tOps{
return &tOps{s, c, ns} s: s,
cache: c,
cacheNS: c.GetNamespace(0),
bpool: util.NewBufferPool(s.o.GetBlockSize() + 5),
}
} }
// tWriter wraps the table writer. It keep track of file descriptor // tWriter wraps the table writer. It keep track of file descriptor

View File

@ -437,6 +437,7 @@ func (i *blockIter) Value() []byte {
} }
func (i *blockIter) Release() { func (i *blockIter) Release() {
if i.dir > dirReleased {
i.prevNode = nil i.prevNode = nil
i.prevKeys = nil i.prevKeys = nil
i.key = nil i.key = nil
@ -450,6 +451,7 @@ func (i *blockIter) Release() {
i.releaser.Release() i.releaser.Release()
i.releaser = nil i.releaser = nil
} }
}
} }
func (i *blockIter) SetReleaser(releaser util.Releaser) { func (i *blockIter) SetReleaser(releaser util.Releaser) {
@ -519,6 +521,7 @@ type Reader struct {
reader io.ReaderAt reader io.ReaderAt
cache cache.Namespace cache cache.Namespace
err error err error
bpool *util.BufferPool
// Options // Options
cmp comparer.Comparer cmp comparer.Comparer
filter filter.Filter filter filter.Filter
@ -538,7 +541,7 @@ func verifyChecksum(data []byte) bool {
} }
func (r *Reader) readRawBlock(bh blockHandle, checksum bool) ([]byte, error) { func (r *Reader) readRawBlock(bh blockHandle, checksum bool) ([]byte, error) {
data := make([]byte, bh.length+blockTrailerLen) data := r.bpool.Get(int(bh.length + blockTrailerLen))
if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF { if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF {
return nil, err return nil, err
} }
@ -551,8 +554,13 @@ func (r *Reader) readRawBlock(bh blockHandle, checksum bool) ([]byte, error) {
case blockTypeNoCompression: case blockTypeNoCompression:
data = data[:bh.length] data = data[:bh.length]
case blockTypeSnappyCompression: case blockTypeSnappyCompression:
var err error decLen, err := snappy.DecodedLen(data[:bh.length])
data, err = snappy.Decode(nil, data[:bh.length]) if err != nil {
return nil, err
}
tmp := data
data, err = snappy.Decode(r.bpool.Get(decLen), tmp[:bh.length])
r.bpool.Put(tmp)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -602,6 +610,18 @@ func (r *Reader) readFilterBlock(bh blockHandle, filter filter.Filter) (*filterB
return b, nil return b, nil
} }
type releaseBlock struct {
r *Reader
b *block
}
func (r releaseBlock) Release() {
if r.b.data != nil {
r.r.bpool.Put(r.b.data)
r.b.data = nil
}
}
func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fillCache bool) iterator.Iterator { func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fillCache bool) iterator.Iterator {
if r.cache != nil { if r.cache != nil {
// Get/set block cache. // Get/set block cache.
@ -616,6 +636,10 @@ func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fi
ok = true ok = true
value = dataBlock value = dataBlock
charge = int(dataBH.length) charge = int(dataBH.length)
fin = func() {
r.bpool.Put(dataBlock.data)
dataBlock.data = nil
}
} }
return return
}) })
@ -638,7 +662,7 @@ func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fi
if err != nil { if err != nil {
return iterator.NewEmptyIterator(err) return iterator.NewEmptyIterator(err)
} }
iter := dataBlock.newIterator(slice, false, nil) iter := dataBlock.newIterator(slice, false, releaseBlock{r, dataBlock})
return iter return iter
} }
@ -708,8 +732,11 @@ func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err
} }
return return
} }
// Don't use block buffer, no need to copy the buffer.
rkey = data.Key() rkey = data.Key()
value = data.Value() // Use block buffer, and since the buffer will be recycled, the buffer
// need to be copied.
value = append([]byte{}, data.Value()...)
return return
} }
@ -760,13 +787,17 @@ func (r *Reader) OffsetOf(key []byte) (offset int64, err error) {
} }
// NewReader creates a new initialized table reader for the file. // NewReader creates a new initialized table reader for the file.
// The cache is optional and can be nil. // The cache and bpool is optional and can be nil.
// //
// The returned table reader instance is goroutine-safe. // The returned table reader instance is goroutine-safe.
func NewReader(f io.ReaderAt, size int64, cache cache.Namespace, o *opt.Options) *Reader { func NewReader(f io.ReaderAt, size int64, cache cache.Namespace, bpool *util.BufferPool, o *opt.Options) *Reader {
if bpool == nil {
bpool = util.NewBufferPool(o.GetBlockSize() + blockTrailerLen)
}
r := &Reader{ r := &Reader{
reader: f, reader: f,
cache: cache, cache: cache,
bpool: bpool,
cmp: o.GetComparer(), cmp: o.GetComparer(),
checksum: o.GetStrict(opt.StrictBlockChecksum), checksum: o.GetStrict(opt.StrictBlockChecksum),
strictIter: o.GetStrict(opt.StrictIterator), strictIter: o.GetStrict(opt.StrictIterator),

View File

@ -59,7 +59,7 @@ var _ = testutil.Defer(func() {
It("Should be able to approximate offset of a key correctly", func() { It("Should be able to approximate offset of a key correctly", func() {
Expect(err).ShouldNot(HaveOccurred()) Expect(err).ShouldNot(HaveOccurred())
tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, o) tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, o)
CheckOffset := func(key string, expect, threshold int) { CheckOffset := func(key string, expect, threshold int) {
offset, err := tr.OffsetOf([]byte(key)) offset, err := tr.OffsetOf([]byte(key))
Expect(err).ShouldNot(HaveOccurred()) Expect(err).ShouldNot(HaveOccurred())
@ -95,7 +95,7 @@ var _ = testutil.Defer(func() {
tw.Close() tw.Close()
// Opening the table. // Opening the table.
tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, o) tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, o)
return tableWrapper{tr} return tableWrapper{tr}
} }
Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() { Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() {

View File

@ -0,0 +1,126 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package util
import (
"fmt"
"sync"
"sync/atomic"
)
type buffer struct {
b []byte
miss int
}
// BufferPool is a 'buffer pool'.
type BufferPool struct {
pool [4]sync.Pool
size [3]uint32
sizeMiss [3]uint32
baseline0 int
baseline1 int
baseline2 int
less uint32
equal uint32
greater uint32
miss uint32
}
func (p *BufferPool) poolNum(n int) int {
switch {
case n <= p.baseline0:
return 0
case n <= p.baseline1:
return 1
case n <= p.baseline2:
return 2
default:
return 3
}
}
// Get returns buffer with length of n.
func (p *BufferPool) Get(n int) []byte {
if poolNum := p.poolNum(n); poolNum == 0 {
// Fast path.
if b, ok := p.pool[0].Get().([]byte); ok {
switch {
case cap(b) > n:
atomic.AddUint32(&p.less, 1)
return b[:n]
case cap(b) == n:
atomic.AddUint32(&p.equal, 1)
return b[:n]
default:
panic("not reached")
}
} else {
atomic.AddUint32(&p.miss, 1)
}
return make([]byte, n, p.baseline0)
} else {
sizePtr := &p.size[poolNum-1]
if b, ok := p.pool[poolNum].Get().([]byte); ok {
switch {
case cap(b) > n:
atomic.AddUint32(&p.less, 1)
return b[:n]
case cap(b) == n:
atomic.AddUint32(&p.equal, 1)
return b[:n]
default:
atomic.AddUint32(&p.greater, 1)
if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) {
p.pool[poolNum].Put(b)
}
}
} else {
atomic.AddUint32(&p.miss, 1)
}
if size := atomic.LoadUint32(sizePtr); uint32(n) > size {
if size == 0 {
atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n))
} else {
sizeMissPtr := &p.sizeMiss[poolNum-1]
if atomic.AddUint32(sizeMissPtr, 1) == 20 {
atomic.StoreUint32(sizePtr, uint32(n))
atomic.StoreUint32(sizeMissPtr, 0)
}
}
return make([]byte, n)
} else {
return make([]byte, n, size)
}
}
}
// Put adds given buffer to the pool.
func (p *BufferPool) Put(b []byte) {
p.pool[p.poolNum(cap(b))].Put(b)
}
func (p *BufferPool) String() string {
return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v L·%d E·%d G·%d M·%d}",
p.baseline0, p.size, p.sizeMiss, p.less, p.equal, p.greater, p.miss)
}
// NewBufferPool creates a new initialized 'buffer pool'.
func NewBufferPool(baseline int) *BufferPool {
if baseline <= 0 {
panic("baseline can't be <= 0")
}
return &BufferPool{
baseline0: baseline,
baseline1: baseline * 2,
baseline2: baseline * 4,
}
}

View File

@ -1,10 +1,11 @@
syncthing syncthing
========= =========
[![Build Status](https://img.shields.io/travis/syncthing/syncthing.svg?style=flat)](https://travis-ci.org/syncthing/syncthing) [![Latest Build](http://img.shields.io/jenkins/s/http/build.syncthing.net/syncthing.svg?style=flat-square)](http://build.syncthing.net/job/syncthing/lastSuccessfulBuild/artifact/)
[![Coverage Status](https://img.shields.io/coveralls/syncthing/syncthing.svg?style=flat)](https://coveralls.io/r/syncthing/syncthing?branch=master) [![Build Status](https://img.shields.io/travis/syncthing/syncthing.svg?style=flat-square)](https://travis-ci.org/syncthing/syncthing)
[![API Documentation](http://img.shields.io/badge/api-Godoc-blue.svg?style=flat)](http://godoc.org/github.com/syncthing/syncthing) [![Coverage Status](https://img.shields.io/coveralls/syncthing/syncthing.svg?style=flat-square)](https://coveralls.io/r/syncthing/syncthing?branch=master)
[![MIT License](http://img.shields.io/badge/license-MIT-blue.svg?style=flat)](http://opensource.org/licenses/MIT) [![API Documentation](http://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](http://godoc.org/github.com/syncthing/syncthing)
[![MIT License](http://img.shields.io/badge/license-MIT-blue.svg?style=flat-square)](http://opensource.org/licenses/MIT)
This is the `syncthing` project. The following are the project goals: This is the `syncthing` project. The following are the project goals:
@ -32,8 +33,9 @@ Signed Releases
--------------- ---------------
As of v0.7.0 and onwards, git tags and release binaries are GPG signed with As of v0.7.0 and onwards, git tags and release binaries are GPG signed with
the key BCE524C7 (http://nym.se/gpg.txt). The signature is included in the the key BCE524C7 (http://nym.se/gpg.txt). For release binaries, MD5 and
normal release bundle as `syncthing.asc` or `syncthing.exe.asc`. SHA1 checksums are calculated and signed, available in the
md5sum.txt.asc and sha1sum.txt.asc files.
Documentation Documentation
============= =============

File diff suppressed because one or more lines are too long

View File

@ -1,4 +1,6 @@
#!/usr/bin/env bash #!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
export COPYFILE_DISABLE=true export COPYFILE_DISABLE=true
export GO386=387 # Don't use SSE on 32 bit builds export GO386=387 # Don't use SSE on 32 bit builds
@ -54,22 +56,11 @@ test() {
godep go test -cpu=1,2,4 $* ./... godep go test -cpu=1,2,4 $* ./...
} }
sign() {
if git describe --exact-match 2>/dev/null >/dev/null ; then
# HEAD is a tag
id=BCE524C7
if gpg --list-keys "$id" >/dev/null 2>&1 ; then
gpg -ab -u "$id" "$1"
fi
fi
}
tarDist() { tarDist() {
name="$1" name="$1"
rm -rf "$name" rm -rf "$name"
mkdir -p "$name" mkdir -p "$name"
cp syncthing "${distFiles[@]}" "$name" cp syncthing "${distFiles[@]}" "$name"
sign "$name/syncthing"
tar zcvf "$name.tar.gz" "$name" tar zcvf "$name.tar.gz" "$name"
rm -rf "$name" rm -rf "$name"
} }
@ -82,7 +73,6 @@ zipDist() {
GOARCH="" GOOS="" go run cmd/todos/main.go < "$f" > "$name/$f.txt" GOARCH="" GOOS="" go run cmd/todos/main.go < "$f" > "$name/$f.txt"
done done
cp syncthing.exe "$name" cp syncthing.exe "$name"
sign "$name/syncthing.exe"
zip -r "$name.zip" "$name" zip -r "$name.zip" "$name"
rm -rf "$name" rm -rf "$name"
} }
@ -121,11 +111,11 @@ transifex() {
build-all() { build-all() {
rm -f *.tar.gz *.zip rm -f *.tar.gz *.zip
test -short || exit 1 test -short
assets assets
rm -rf bin Godeps/_workspace/pkg $GOPATH/pkg/*/github.com/syncthing rm -rf bin Godeps/_workspace/pkg $GOPATH/pkg/*/github.com/syncthing
for os in darwin-amd64 freebsd-amd64 freebsd-386 linux-amd64 linux-386 windows-amd64 windows-386 solaris-amd64 ; do for os in darwin-amd64 freebsd-amd64 freebsd-386 linux-amd64 linux-386 windows-amd64 windows-386 ; do
export GOOS=${os%-*} export GOOS=${os%-*}
export GOARCH=${os#*-} export GOARCH=${os#*-}
@ -165,9 +155,11 @@ build-all() {
tarDist "syncthing-linux-armv5-$version" tarDist "syncthing-linux-armv5-$version"
} }
case "$1" in case "${1:-default}" in
"") default)
if [[ $# -gt 1 ]] ; then
shift shift
fi
export GOBIN=$(pwd)/bin export GOBIN=$(pwd)/bin
godep go install $* -ldflags "$ldflags" ./cmd/... godep go install $* -ldflags "$ldflags" ./cmd/...
;; ;;
@ -200,7 +192,7 @@ case "$1" in
tar) tar)
rm -f *.tar.gz *.zip rm -f *.tar.gz *.zip
test -short || exit 1 test -short
assets assets
build build
@ -220,14 +212,6 @@ case "$1" in
build-all -tags noupgrade build-all -tags noupgrade
;; ;;
upload)
tag=$(git describe)
shopt -s nullglob
for f in *.tar.gz *.zip *.asc ; do
relup syncthing/syncthing "$tag" "$f"
done
;;
deps) deps)
deps deps
;; ;;
@ -253,6 +237,6 @@ case "$1" in
;; ;;
*) *)
echo "Unknown build parameter $1" echo "Unknown build command $1"
;; ;;
esac esac

View File

@ -32,7 +32,8 @@ func main() {
if *node == "" { if *node == "" {
log.Printf("*** Global index for repo %q", *repo) log.Printf("*** Global index for repo %q", *repo)
fs.WithGlobal(func(f protocol.FileInfo) bool { fs.WithGlobalTruncated(func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfoTruncated)
fmt.Println(f) fmt.Println(f)
fmt.Println("\t", fs.Availability(f.Name)) fmt.Println("\t", fs.Availability(f.Name))
return true return true
@ -43,7 +44,8 @@ func main() {
log.Fatal(err) log.Fatal(err)
} }
log.Printf("*** Have index for repo %q node %q", *repo, n) log.Printf("*** Have index for repo %q node %q", *repo, n)
fs.WithHave(n, func(f protocol.FileInfo) bool { fs.WithHaveTruncated(n, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfoTruncated)
fmt.Println(f) fmt.Println(f)
return true return true
}) })

46
cmd/syncthing/heapprof.go Normal file
View File

@ -0,0 +1,46 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package main
import (
"fmt"
"os"
"runtime"
"runtime/pprof"
"syscall"
"time"
)
func init() {
if os.Getenv("STHEAPPROFILE") != "" {
go saveHeapProfiles()
}
}
func saveHeapProfiles() {
runtime.MemProfileRate = 1
var memstats, prevMemstats runtime.MemStats
t0 := time.Now()
for t := range time.NewTicker(250 * time.Millisecond).C {
startms := int(t.Sub(t0).Seconds() * 1000)
runtime.ReadMemStats(&memstats)
if memstats.HeapInuse > prevMemstats.HeapInuse {
fd, err := os.Create(fmt.Sprintf("heap-%05d-%07d.pprof", syscall.Getpid(), startms))
if err != nil {
panic(err)
}
err = pprof.WriteHeapProfile(fd)
if err != nil {
panic(err)
}
err = fd.Close()
if err != nil {
panic(err)
}
prevMemstats = memstats
}
}
}

View File

@ -106,9 +106,6 @@ The following enviroment variables are interpreted by syncthing:
Set this variable when running under a service manager such as Set this variable when running under a service manager such as
runit, launchd, etc. runit, launchd, etc.
STPROFILER Set to a listen address such as "127.0.0.1:9090" to start the
profiler with HTTP access.
STTRACE A comma separated string of facilities to trace. The valid STTRACE A comma separated string of facilities to trace. The valid
facility strings: facility strings:
- "beacon" (the beacon package) - "beacon" (the beacon package)
@ -122,10 +119,19 @@ The following enviroment variables are interpreted by syncthing:
- "xdr" (the xdr package) - "xdr" (the xdr package)
- "all" (all of the above) - "all" (all of the above)
STCPUPROFILE Write CPU profile to the specified file.
STGUIASSETS Directory to load GUI assets from. Overrides compiled in assets. STGUIASSETS Directory to load GUI assets from. Overrides compiled in assets.
STPROFILER Set to a listen address such as "127.0.0.1:9090" to start the
profiler with HTTP access.
STCPUPROFILE Write a CPU profile to cpu-$pid.pprof on exit.
STHEAPPROFILE Write heap profiles to heap-$pid-$timestamp.pprof each time
heap usage increases.
STPERFSTATS Write running performance statistics to perf-$pid.csv. Not
supported on Windows.
STDEADLOCKTIMEOUT Alter deadlock detection timeout (seconds; default 1200).` STDEADLOCKTIMEOUT Alter deadlock detection timeout (seconds; default 1200).`
) )
@ -497,7 +503,7 @@ nextRepo:
} }
if cpuprof := os.Getenv("STCPUPROFILE"); len(cpuprof) > 0 { if cpuprof := os.Getenv("STCPUPROFILE"); len(cpuprof) > 0 {
f, err := os.Create(cpuprof) f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid()))
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }

View File

@ -1,4 +1,8 @@
// +build perfstats // Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build !windows
package main package main
@ -11,7 +15,9 @@ import (
) )
func init() { func init() {
if os.Getenv("STPERFSTATS") != "" {
go savePerfStats(fmt.Sprintf("perfstats-%d.csv", syscall.Getpid())) go savePerfStats(fmt.Sprintf("perfstats-%d.csv", syscall.Getpid()))
}
} }
func savePerfStats(file string) { func savePerfStats(file string) {

View File

@ -110,7 +110,6 @@ type OptionsConfiguration struct {
MaxSendKbps int `xml:"maxSendKbps"` MaxSendKbps int `xml:"maxSendKbps"`
RescanIntervalS int `xml:"rescanIntervalS" default:"60"` RescanIntervalS int `xml:"rescanIntervalS" default:"60"`
ReconnectIntervalS int `xml:"reconnectionIntervalS" default:"60"` ReconnectIntervalS int `xml:"reconnectionIntervalS" default:"60"`
MaxChangeKbps int `xml:"maxChangeKbps" default:"10000"`
StartBrowser bool `xml:"startBrowser" default:"true"` StartBrowser bool `xml:"startBrowser" default:"true"`
UPnPEnabled bool `xml:"upnpEnabled" default:"true"` UPnPEnabled bool `xml:"upnpEnabled" default:"true"`
UPnPLease int `xml:"upnpLeaseMinutes" default:"0"` UPnPLease int `xml:"upnpLeaseMinutes" default:"0"`

View File

@ -34,7 +34,6 @@ func TestDefaultValues(t *testing.T) {
MaxSendKbps: 0, MaxSendKbps: 0,
RescanIntervalS: 60, RescanIntervalS: 60,
ReconnectIntervalS: 60, ReconnectIntervalS: 60,
MaxChangeKbps: 10000,
StartBrowser: true, StartBrowser: true,
UPnPEnabled: true, UPnPEnabled: true,
} }
@ -189,7 +188,6 @@ func TestOverriddenValues(t *testing.T) {
<maxSendKbps>1234</maxSendKbps> <maxSendKbps>1234</maxSendKbps>
<rescanIntervalS>600</rescanIntervalS> <rescanIntervalS>600</rescanIntervalS>
<reconnectionIntervalS>6000</reconnectionIntervalS> <reconnectionIntervalS>6000</reconnectionIntervalS>
<maxChangeKbps>2345</maxChangeKbps>
<startBrowser>false</startBrowser> <startBrowser>false</startBrowser>
<upnpEnabled>false</upnpEnabled> <upnpEnabled>false</upnpEnabled>
</options> </options>
@ -206,7 +204,6 @@ func TestOverriddenValues(t *testing.T) {
MaxSendKbps: 1234, MaxSendKbps: 1234,
RescanIntervalS: 600, RescanIntervalS: 600,
ReconnectIntervalS: 6000, ReconnectIntervalS: 6000,
MaxChangeKbps: 2345,
StartBrowser: false, StartBrowser: false,
UPnPEnabled: false, UPnPEnabled: false,
} }

View File

@ -24,12 +24,15 @@ type Discoverer struct {
listenAddrs []string listenAddrs []string
localBcastIntv time.Duration localBcastIntv time.Duration
globalBcastIntv time.Duration globalBcastIntv time.Duration
errorRetryIntv time.Duration
beacon *beacon.Beacon beacon *beacon.Beacon
registry map[protocol.NodeID][]string registry map[protocol.NodeID][]string
registryLock sync.RWMutex registryLock sync.RWMutex
extServer string extServer string
extPort uint16 extPort uint16
localBcastTick <-chan time.Time localBcastTick <-chan time.Time
stopGlobal chan struct{}
globalWG sync.WaitGroup
forcedBcastTick chan time.Time forcedBcastTick chan time.Time
extAnnounceOK bool extAnnounceOK bool
extAnnounceOKmut sync.Mutex extAnnounceOKmut sync.Mutex
@ -55,6 +58,7 @@ func NewDiscoverer(id protocol.NodeID, addresses []string, localPort int) (*Disc
listenAddrs: addresses, listenAddrs: addresses,
localBcastIntv: 30 * time.Second, localBcastIntv: 30 * time.Second,
globalBcastIntv: 1800 * time.Second, globalBcastIntv: 1800 * time.Second,
errorRetryIntv: 60 * time.Second,
beacon: b, beacon: b,
registry: make(map[protocol.NodeID][]string), registry: make(map[protocol.NodeID][]string),
} }
@ -71,16 +75,20 @@ func (d *Discoverer) StartLocal() {
} }
func (d *Discoverer) StartGlobal(server string, extPort uint16) { func (d *Discoverer) StartGlobal(server string, extPort uint16) {
if d.globalBcastStop != nil { // Wait for any previous announcer to stop before starting a new one.
d.globalBcastStop <- true d.globalWG.Wait()
} else {
d.globalBcastStop = make(chan bool)
}
d.extServer = server d.extServer = server
d.extPort = extPort d.extPort = extPort
d.stopGlobal = make(chan struct{})
d.globalWG.Add(1)
go d.sendExternalAnnouncements() go d.sendExternalAnnouncements()
} }
func (d *Discoverer) StopGlobal() {
close(d.stopGlobal)
d.globalWG.Wait()
}
func (d *Discoverer) ExtAnnounceOK() bool { func (d *Discoverer) ExtAnnounceOK() bool {
d.extAnnounceOKmut.Lock() d.extAnnounceOKmut.Lock()
defer d.extAnnounceOKmut.Unlock() defer d.extAnnounceOKmut.Unlock()
@ -179,20 +187,19 @@ func (d *Discoverer) sendLocalAnnouncements() {
} }
func (d *Discoverer) sendExternalAnnouncements() { func (d *Discoverer) sendExternalAnnouncements() {
// this should go in the Discoverer struct defer d.globalWG.Done()
errorRetryIntv := 60 * time.Second
remote, err := net.ResolveUDPAddr("udp", d.extServer) remote, err := net.ResolveUDPAddr("udp", d.extServer)
for err != nil { for err != nil {
l.Warnf("Global discovery: %v; trying again in %v", err, errorRetryIntv) l.Warnf("Global discovery: %v; trying again in %v", err, d.errorRetryIntv)
time.Sleep(errorRetryIntv) time.Sleep(d.errorRetryIntv)
remote, err = net.ResolveUDPAddr("udp", d.extServer) remote, err = net.ResolveUDPAddr("udp", d.extServer)
} }
conn, err := net.ListenUDP("udp", nil) conn, err := net.ListenUDP("udp", nil)
for err != nil { for err != nil {
l.Warnf("Global discovery: %v; trying again in %v", err, errorRetryIntv) l.Warnf("Global discovery: %v; trying again in %v", err, d.errorRetryIntv)
time.Sleep(errorRetryIntv) time.Sleep(d.errorRetryIntv)
conn, err = net.ListenUDP("udp", nil) conn, err = net.ListenUDP("udp", nil)
} }
@ -207,7 +214,10 @@ func (d *Discoverer) sendExternalAnnouncements() {
buf = d.announcementPkt() buf = d.announcementPkt()
} }
for { var bcastTick = time.Tick(d.globalBcastIntv)
var errTick <-chan time.Time
sendOneAnnouncement := func() {
var ok bool var ok bool
if debug { if debug {
@ -236,21 +246,32 @@ func (d *Discoverer) sendExternalAnnouncements() {
d.extAnnounceOKmut.Unlock() d.extAnnounceOKmut.Unlock()
if ok { if ok {
// Don't do a long sleep, listen for a stop signal, just incase errTick = nil
// the UPnP mapping has changed, and a new routine should be started. } else if errTick != nil {
for i := time.Duration(0); i < d.globalBcastIntv; i += time.Duration(1) { errTick = time.Tick(d.errorRetryIntv)
select {
case <-d.globalBcastStop:
return
default:
time.Sleep(1 * time.Second)
} }
} }
} else { // Announce once, immediately
time.Sleep(errorRetryIntv) sendOneAnnouncement()
loop:
for {
select {
case <-d.stopGlobal:
break loop
case <-errTick:
sendOneAnnouncement()
case <-bcastTick:
sendOneAnnouncement()
} }
} }
if debug {
l.Debugln("discover: stopping global")
}
} }
func (d *Discoverer) recvAnnouncements() { func (d *Discoverer) recvAnnouncements() {
@ -311,7 +332,7 @@ func (d *Discoverer) registerNode(addr net.Addr, node Node) bool {
} }
} }
if debug { if debug {
l.Debugf("discover: register: %s -> %#v", node.ID, addrs) l.Debugf("discover: register: %v -> %#v", node.ID, addrs)
} }
var id protocol.NodeID var id protocol.NodeID
copy(id[:], node.ID) copy(id[:], node.ID)

View File

@ -119,7 +119,7 @@ func globalKeyName(key []byte) []byte {
type deletionHandler func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) uint64 type deletionHandler func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) uint64
type fileIterator func(f protocol.FileInfo) bool type fileIterator func(f protocol.FileIntf) bool
func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo, deleteFn deletionHandler) uint64 { func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo, deleteFn deletionHandler) uint64 {
defer runtime.GC() defer runtime.GC()
@ -181,7 +181,7 @@ func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo
case moreFs && moreDb && cmp == 0: case moreFs && moreDb && cmp == 0:
// File exists on both sides - compare versions. // File exists on both sides - compare versions.
var ef protocol.FileInfo var ef protocol.FileInfoTruncated
ef.UnmarshalXDR(dbi.Value()) ef.UnmarshalXDR(dbi.Value())
if fs[fsi].Version > ef.Version { if fs[fsi].Version > ef.Version {
if lv := ldbInsert(batch, repo, node, newName, fs[fsi]); lv > maxLocalVer { if lv := ldbInsert(batch, repo, node, newName, fs[fsi]); lv > maxLocalVer {
@ -226,20 +226,23 @@ func ldbReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint6
func ldbReplaceWithDelete(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64 { func ldbReplaceWithDelete(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64 {
return ldbGenericReplace(db, repo, node, fs, func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) uint64 { return ldbGenericReplace(db, repo, node, fs, func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) uint64 {
var f protocol.FileInfo var tf protocol.FileInfoTruncated
err := f.UnmarshalXDR(dbi.Value()) err := tf.UnmarshalXDR(dbi.Value())
if err != nil { if err != nil {
panic(err) panic(err)
} }
if !protocol.IsDeleted(f.Flags) { if !tf.IsDeleted() {
if debug { if debug {
l.Debugf("mark deleted; repo=%q node=%v name=%q", repo, protocol.NodeIDFromBytes(node), name) l.Debugf("mark deleted; repo=%q node=%v name=%q", repo, protocol.NodeIDFromBytes(node), name)
} }
ts := clock(f.LocalVersion) ts := clock(tf.LocalVersion)
f.Blocks = nil f := protocol.FileInfo{
f.Version = lamport.Default.Tick(f.Version) Name: tf.Name,
f.Flags |= protocol.FlagDeleted Version: lamport.Default.Tick(tf.Version),
f.LocalVersion = ts LocalVersion: ts,
Flags: tf.Flags | protocol.FlagDeleted,
Modified: tf.Modified,
}
batch.Put(dbi.Key(), f.MarshalXDR()) batch.Put(dbi.Key(), f.MarshalXDR())
ldbUpdateGlobal(db, batch, repo, node, nodeKeyName(dbi.Key()), f.Version) ldbUpdateGlobal(db, batch, repo, node, nodeKeyName(dbi.Key()), f.Version)
return ts return ts
@ -271,7 +274,7 @@ func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64
continue continue
} }
var ef protocol.FileInfo var ef protocol.FileInfoTruncated
err = ef.UnmarshalXDR(bs) err = ef.UnmarshalXDR(bs)
if err != nil { if err != nil {
panic(err) panic(err)
@ -395,7 +398,7 @@ func ldbRemoveFromGlobal(db dbReader, batch dbWriter, repo, node, file []byte) {
} }
} }
func ldbWithHave(db *leveldb.DB, repo, node []byte, fn fileIterator) { func ldbWithHave(db *leveldb.DB, repo, node []byte, truncate bool, fn fileIterator) {
start := nodeKey(repo, node, nil) // before all repo/node files start := nodeKey(repo, node, nil) // before all repo/node files
limit := nodeKey(repo, node, []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files limit := nodeKey(repo, node, []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files
snap, err := db.GetSnapshot() snap, err := db.GetSnapshot()
@ -407,8 +410,7 @@ func ldbWithHave(db *leveldb.DB, repo, node []byte, fn fileIterator) {
defer dbi.Release() defer dbi.Release()
for dbi.Next() { for dbi.Next() {
var f protocol.FileInfo f, err := unmarshalTrunc(dbi.Value(), truncate)
err := f.UnmarshalXDR(dbi.Value())
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -418,7 +420,7 @@ func ldbWithHave(db *leveldb.DB, repo, node []byte, fn fileIterator) {
} }
} }
func ldbWithAllRepo(db *leveldb.DB, repo []byte, fn func(node []byte, f protocol.FileInfo) bool) { func ldbWithAllRepoTruncated(db *leveldb.DB, repo []byte, fn func(node []byte, f protocol.FileInfoTruncated) bool) {
defer runtime.GC() defer runtime.GC()
start := nodeKey(repo, nil, nil) // before all repo/node files start := nodeKey(repo, nil, nil) // before all repo/node files
@ -433,7 +435,7 @@ func ldbWithAllRepo(db *leveldb.DB, repo []byte, fn func(node []byte, f protocol
for dbi.Next() { for dbi.Next() {
node := nodeKeyNode(dbi.Key()) node := nodeKeyNode(dbi.Key())
var f protocol.FileInfo var f protocol.FileInfoTruncated
err := f.UnmarshalXDR(dbi.Value()) err := f.UnmarshalXDR(dbi.Value())
if err != nil { if err != nil {
panic(err) panic(err)
@ -444,40 +446,6 @@ func ldbWithAllRepo(db *leveldb.DB, repo []byte, fn func(node []byte, f protocol
} }
} }
/*
func ldbCheckGlobalConsistency(db *leveldb.DB, repo []byte) {
l.Debugf("Checking global consistency for %q", repo)
start := nodeKey(repo, nil, nil) // before all repo/node files
limit := nodeKey(repo, protocol.LocalNodeID[:], []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files
snap, err := db.GetSnapshot()
if err != nil {
panic(err)
}
defer snap.Release()
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
defer dbi.Release()
batch := new(leveldb.Batch)
i := 0
for dbi.Next() {
repo := nodeKeyRepo(dbi.Key())
node := nodeKeyNode(dbi.Key())
var f protocol.FileInfo
err := f.UnmarshalXDR(dbi.Value())
if err != nil {
panic(err)
}
if ldbUpdateGlobal(snap, batch, repo, node, []byte(f.Name), f.Version) {
var nodeID protocol.NodeID
copy(nodeID[:], node)
l.Debugf("fixed global for %q %s %q", repo, nodeID, f.Name)
}
i++
}
l.Debugln("Done", i)
}
*/
func ldbGet(db *leveldb.DB, repo, node, file []byte) protocol.FileInfo { func ldbGet(db *leveldb.DB, repo, node, file []byte) protocol.FileInfo {
nk := nodeKey(repo, node, file) nk := nodeKey(repo, node, file)
bs, err := db.Get(nk, nil) bs, err := db.Get(nk, nil)
@ -536,7 +504,7 @@ func ldbGetGlobal(db *leveldb.DB, repo, file []byte) protocol.FileInfo {
return f return f
} }
func ldbWithGlobal(db *leveldb.DB, repo []byte, fn fileIterator) { func ldbWithGlobal(db *leveldb.DB, repo []byte, truncate bool, fn fileIterator) {
defer runtime.GC() defer runtime.GC()
start := globalKey(repo, nil) start := globalKey(repo, nil)
@ -565,8 +533,7 @@ func ldbWithGlobal(db *leveldb.DB, repo []byte, fn fileIterator) {
panic(err) panic(err)
} }
var f protocol.FileInfo f, err := unmarshalTrunc(bs, truncate)
err = f.UnmarshalXDR(bs)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -605,7 +572,7 @@ func ldbAvailability(db *leveldb.DB, repo, file []byte) []protocol.NodeID {
return nodes return nodes
} }
func ldbWithNeed(db *leveldb.DB, repo, node []byte, fn fileIterator) { func ldbWithNeed(db *leveldb.DB, repo, node []byte, truncate bool, fn fileIterator) {
defer runtime.GC() defer runtime.GC()
start := globalKey(repo, nil) start := globalKey(repo, nil)
@ -649,13 +616,12 @@ func ldbWithNeed(db *leveldb.DB, repo, node []byte, fn fileIterator) {
panic(err) panic(err)
} }
var gf protocol.FileInfo gf, err := unmarshalTrunc(bs, truncate)
err = gf.UnmarshalXDR(bs)
if err != nil { if err != nil {
panic(err) panic(err)
} }
if protocol.IsDeleted(gf.Flags) && !have { if gf.IsDeleted() && !have {
// We don't need deleted files that we don't have // We don't need deleted files that we don't have
continue continue
} }
@ -670,3 +636,15 @@ func ldbWithNeed(db *leveldb.DB, repo, node []byte, fn fileIterator) {
} }
} }
} }
func unmarshalTrunc(bs []byte, truncate bool) (protocol.FileIntf, error) {
if truncate {
var tf protocol.FileInfoTruncated
err := tf.UnmarshalXDR(bs)
return tf, err
} else {
var tf protocol.FileInfo
err := tf.UnmarshalXDR(bs)
return tf, err
}
}

View File

@ -36,7 +36,7 @@ func NewSet(repo string, db *leveldb.DB) *Set {
} }
var nodeID protocol.NodeID var nodeID protocol.NodeID
ldbWithAllRepo(db, []byte(repo), func(node []byte, f protocol.FileInfo) bool { ldbWithAllRepoTruncated(db, []byte(repo), func(node []byte, f protocol.FileInfoTruncated) bool {
copy(nodeID[:], node) copy(nodeID[:], node)
if f.LocalVersion > s.localVersion[nodeID] { if f.LocalVersion > s.localVersion[nodeID] {
s.localVersion[nodeID] = f.LocalVersion s.localVersion[nodeID] = f.LocalVersion
@ -87,21 +87,42 @@ func (s *Set) WithNeed(node protocol.NodeID, fn fileIterator) {
if debug { if debug {
l.Debugf("%s WithNeed(%v)", s.repo, node) l.Debugf("%s WithNeed(%v)", s.repo, node)
} }
ldbWithNeed(s.db, []byte(s.repo), node[:], fn) ldbWithNeed(s.db, []byte(s.repo), node[:], false, fn)
}
func (s *Set) WithNeedTruncated(node protocol.NodeID, fn fileIterator) {
if debug {
l.Debugf("%s WithNeedTruncated(%v)", s.repo, node)
}
ldbWithNeed(s.db, []byte(s.repo), node[:], true, fn)
} }
func (s *Set) WithHave(node protocol.NodeID, fn fileIterator) { func (s *Set) WithHave(node protocol.NodeID, fn fileIterator) {
if debug { if debug {
l.Debugf("%s WithHave(%v)", s.repo, node) l.Debugf("%s WithHave(%v)", s.repo, node)
} }
ldbWithHave(s.db, []byte(s.repo), node[:], fn) ldbWithHave(s.db, []byte(s.repo), node[:], false, fn)
}
func (s *Set) WithHaveTruncated(node protocol.NodeID, fn fileIterator) {
if debug {
l.Debugf("%s WithHaveTruncated(%v)", s.repo, node)
}
ldbWithHave(s.db, []byte(s.repo), node[:], true, fn)
} }
func (s *Set) WithGlobal(fn fileIterator) { func (s *Set) WithGlobal(fn fileIterator) {
if debug { if debug {
l.Debugf("%s WithGlobal()", s.repo) l.Debugf("%s WithGlobal()", s.repo)
} }
ldbWithGlobal(s.db, []byte(s.repo), fn) ldbWithGlobal(s.db, []byte(s.repo), false, fn)
}
func (s *Set) WithGlobalTruncated(fn fileIterator) {
if debug {
l.Debugf("%s WithGlobalTruncated()", s.repo)
}
ldbWithGlobal(s.db, []byte(s.repo), true, fn)
} }
func (s *Set) Get(node protocol.NodeID, file string) protocol.FileInfo { func (s *Set) Get(node protocol.NodeID, file string) protocol.FileInfo {

View File

@ -37,7 +37,8 @@ func genBlocks(n int) []protocol.BlockInfo {
func globalList(s *files.Set) []protocol.FileInfo { func globalList(s *files.Set) []protocol.FileInfo {
var fs []protocol.FileInfo var fs []protocol.FileInfo
s.WithGlobal(func(f protocol.FileInfo) bool { s.WithGlobal(func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfo)
fs = append(fs, f) fs = append(fs, f)
return true return true
}) })
@ -46,7 +47,8 @@ func globalList(s *files.Set) []protocol.FileInfo {
func haveList(s *files.Set, n protocol.NodeID) []protocol.FileInfo { func haveList(s *files.Set, n protocol.NodeID) []protocol.FileInfo {
var fs []protocol.FileInfo var fs []protocol.FileInfo
s.WithHave(n, func(f protocol.FileInfo) bool { s.WithHave(n, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfo)
fs = append(fs, f) fs = append(fs, f)
return true return true
}) })
@ -55,7 +57,8 @@ func haveList(s *files.Set, n protocol.NodeID) []protocol.FileInfo {
func needList(s *files.Set, n protocol.NodeID) []protocol.FileInfo { func needList(s *files.Set, n protocol.NodeID) []protocol.FileInfo {
var fs []protocol.FileInfo var fs []protocol.FileInfo
s.WithNeed(n, func(f protocol.FileInfo) bool { s.WithNeed(n, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfo)
fs = append(fs, f) fs = append(fs, f)
return true return true
}) })

View File

@ -195,16 +195,16 @@
</tr> </tr>
<tr> <tr>
<th><span class="glyphicon glyphicon-globe"></span>&emsp;<span translate>Global Repository</span></th> <th><span class="glyphicon glyphicon-globe"></span>&emsp;<span translate>Global Repository</span></th>
<td class="text-right">{{model[repo.ID].globalFiles | alwaysNumber}} <span translate>items</span>, {{model[repo.ID].globalBytes | binary}}B</td> <td class="text-right">{{model[repo.ID].globalFiles | alwaysNumber}} <span translate>items</span>, ~{{model[repo.ID].globalBytes | binary}}B</td>
</tr> </tr>
<tr> <tr>
<th><span class="glyphicon glyphicon-home"></span>&emsp;<span translate>Local Repository</span></th> <th><span class="glyphicon glyphicon-home"></span>&emsp;<span translate>Local Repository</span></th>
<td class="text-right">{{model[repo.ID].localFiles | alwaysNumber}} <span translate>items</span>, {{model[repo.ID].localBytes | binary}}B</td> <td class="text-right">{{model[repo.ID].localFiles | alwaysNumber}} <span translate>items</span>, ~{{model[repo.ID].localBytes | binary}}B</td>
</tr> </tr>
<tr> <tr>
<th><span class="glyphicon glyphicon-cloud-download"></span>&emsp;<span translate>Out Of Sync</span></th> <th><span class="glyphicon glyphicon-cloud-download"></span>&emsp;<span translate>Out Of Sync</span></th>
<td class="text-right"> <td class="text-right">
<a ng-if="model[repo.ID].needFiles > 0" ng-click="showNeed(repo.ID)" href="">{{model[repo.ID].needFiles | alwaysNumber}} <span translate>items</span>, {{model[repo.ID].needBytes | binary}}B</a> <a ng-if="model[repo.ID].needFiles > 0" ng-click="showNeed(repo.ID)" href="">{{model[repo.ID].needFiles | alwaysNumber}} <span translate>items</span>, ~{{model[repo.ID].needBytes | binary}}B</a>
<span ng-if="model[repo.ID].needFiles == 0">0 <span translate>items</span>, 0 B</span> <span ng-if="model[repo.ID].needFiles == 0">0 <span translate>items</span>, 0 B</span>
</td> </td>
</tr> </tr>

View File

@ -1,6 +1,6 @@
{ {
"API Key": "API-Schlüssel", "API Key": "API-Key",
"About": "Über", "About": "Über Syncthing",
"Add Node": "Knoten hinzufügen", "Add Node": "Knoten hinzufügen",
"Add Repository": "Verzeichnis hinzufügen", "Add Repository": "Verzeichnis hinzufügen",
"Address": "Adresse", "Address": "Adresse",
@ -20,18 +20,18 @@
"Edit": "Bearbeiten", "Edit": "Bearbeiten",
"Edit Node": "Knoten bearbeiten", "Edit Node": "Knoten bearbeiten",
"Edit Repository": "Verzeichnis ändern", "Edit Repository": "Verzeichnis ändern",
"Enable UPnP": "Aktiviere UPnP", "Enable UPnP": "UPnP aktivieren",
"Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.": "Trage durch ein Komma getrennte \"IP:Port\" Adressen oder \"dynamic\" ein um automatische Adresserkennung durchzuführen.", "Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.": "Trage durch ein Komma getrennte \"IP:Port\" Adressen oder \"dynamic\" ein um automatische Adresserkennung durchzuführen.",
"Error": "Fehler", "Error": "Fehler",
"File Versioning": "Dateiversionierung", "File Versioning": "Dateiversionierung",
"File permission bits are ignored when looking for changes. Use on FAT filesystems.": "Dateizugriffsrechte beim Suchen nach Veränderungen ignorieren. Bei FAT-Dateisystemen verwenden.", "File permission bits are ignored when looking for changes. Use on FAT filesystems.": "Dateizugriffsrechte beim Suchen nach Veränderungen ignorieren. Bei FAT-Dateisystemen verwenden.",
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by syncthing.": "Dateien werden beim Löschen oder Ersetzen als datierte Versionen in einen .stversions -Ordner verschoben.", "Files are moved to date stamped versions in a .stversions folder when replaced or deleted by syncthing.": "Dateien werden, bevor syncthing sie löscht oder ersetzt, als datierte Versionen in einen Ordner names .stversions verschoben.",
"Files are protected from changes made on other nodes, but changes made on this node will be sent to the rest of the cluster.": "Dateien sind vor Veränderung durch andere Knoten geschützt, auf diesem Knoten durchgeführte Veränderungen werden aber auf den Rest des Netzwerks übertragen.", "Files are protected from changes made on other nodes, but changes made on this node will be sent to the rest of the cluster.": "Dateien sind vor Veränderung durch andere Knoten geschützt, auf diesem Knoten durchgeführte Veränderungen werden aber auf den Rest des Netzwerks übertragen.",
"Folder": "Ordner", "Folder": "Ordner",
"GUI Authentication Password": "Passwort für Zugang zur Benutzeroberfläche", "GUI Authentication Password": "Passwort für Zugang zur Benutzeroberfläche",
"GUI Authentication User": "Nutzername für Zugang zur Benutzeroberfläche.", "GUI Authentication User": "Nutzername für Zugang zur Benutzeroberfläche.",
"GUI Listen Addresses": "Adresse(n) für die Benutzeroberfläche", "GUI Listen Addresses": "Adresse(n) für die Benutzeroberfläche",
"Generate": "Generiere", "Generate": "Generieren",
"Global Discovery": "Globale Auffindung", "Global Discovery": "Globale Auffindung",
"Global Discovery Server": "Globaler Auffindungsserver", "Global Discovery Server": "Globaler Auffindungsserver",
"Global Repository": "Globales Verzeichnis", "Global Repository": "Globales Verzeichnis",
@ -42,7 +42,7 @@
"Local Discovery": "Lokale Auffindung", "Local Discovery": "Lokale Auffindung",
"Local Discovery Port": "Lokaler Aufindungsport", "Local Discovery Port": "Lokaler Aufindungsport",
"Local Repository": "Lokales Verzeichnis", "Local Repository": "Lokales Verzeichnis",
"Master Repo": "Keine Veränderungen zugelassen", "Master Repo": "Originalverzeichnis",
"Max File Change Rate (KiB/s)": "Maximale Datenänderungsrate (KiB/s)", "Max File Change Rate (KiB/s)": "Maximale Datenänderungsrate (KiB/s)",
"Max Outstanding Requests": "Max. ausstehende Anfragen", "Max Outstanding Requests": "Max. ausstehende Anfragen",
"No": "Nein", "No": "Nein",
@ -62,14 +62,14 @@
"RAM Utilization": "Verwendeter Arbeitsspeicher", "RAM Utilization": "Verwendeter Arbeitsspeicher",
"Reconnect Interval (s)": "Wiederverbindungsintervall (s)", "Reconnect Interval (s)": "Wiederverbindungsintervall (s)",
"Repository ID": "Verzeichnis-ID", "Repository ID": "Verzeichnis-ID",
"Repository Master": "Keine Veränderungen zulassen", "Repository Master": "Originalverzeichnis",
"Repository Path": "Pfad zum Verzeichnis", "Repository Path": "Pfad zum Verzeichnis",
"Rescan Interval (s)": "Suchintervall (s)", "Rescan Interval (s)": "Suchintervall (s)",
"Restart": "Neustart", "Restart": "Neustart",
"Restart Needed": "Neustart notwendig", "Restart Needed": "Neustart notwendig",
"Restarting": "Wird neu gestartet", "Restarting": "Wird neu gestartet",
"Save": "Speichern", "Save": "Speichern",
"Scanning": "Überprüfe", "Scanning": "Sucht",
"Select the nodes to share this repository with.": "Wähle die Knoten aus, mit denen du dieses Verzeichnis teilen willst.", "Select the nodes to share this repository with.": "Wähle die Knoten aus, mit denen du dieses Verzeichnis teilen willst.",
"Settings": "Einstellungen", "Settings": "Einstellungen",
"Share With Nodes": "Teile mit diesen Knoten", "Share With Nodes": "Teile mit diesen Knoten",
@ -78,7 +78,7 @@
"Show ID": "ID anzeigen", "Show ID": "ID anzeigen",
"Shown instead of Node ID in the cluster status.": "Wird anstatt der Knoten-ID im Verbunds-Status angezeigt.", "Shown instead of Node ID in the cluster status.": "Wird anstatt der Knoten-ID im Verbunds-Status angezeigt.",
"Shutdown": "Herunterfahren", "Shutdown": "Herunterfahren",
"Source Code": "Quellcode", "Source Code": "Sourcecode",
"Start Browser": "Starte Browser", "Start Browser": "Starte Browser",
"Stopped": "Gestoppt", "Stopped": "Gestoppt",
"Support / Forum": "Support / Forum", "Support / Forum": "Support / Forum",
@ -90,7 +90,7 @@
"Syncthing is restarting.": "Syncthing wird neu gestartet", "Syncthing is restarting.": "Syncthing wird neu gestartet",
"Syncthing is upgrading.": "Syncthing wird aktualisiert", "Syncthing is upgrading.": "Syncthing wird aktualisiert",
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing scheint nicht erreichbar zu sein oder es gibt ein Problem mit Ihrer Internetverbindung. Versuche erneut...", "Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing scheint nicht erreichbar zu sein oder es gibt ein Problem mit Ihrer Internetverbindung. Versuche erneut...",
"The aggregated statistics are publicly available at {%url%}.": "Die aggregierten Statistiken sind öffentlich verfügbar unter {{url}}.", "The aggregated statistics are publicly available at {%url%}.": "Die gesammelten Statistiken sind öffentlich verfügbar unter {{url}}.",
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Die Konfiguration wurde gespeichert, aber nicht aktiviert. Syncthing muss neugestartet werden um die neue Konfiguration zu aktivieren.", "The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Die Konfiguration wurde gespeichert, aber nicht aktiviert. Syncthing muss neugestartet werden um die neue Konfiguration zu aktivieren.",
"The encrypted usage report is sent daily. It is used to track common platforms, repo sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Der verschlüsselte Benutzungsbericht wird täglich gesendet. Er wird benutzt um Statistiken über verwendete Betriebssysteme, Verzeichnis-Größen und Programm-Versionen zu erstellen. Sobald der Bericht in Zukunft weitere Daten erfasst, wird dir dieses Fenster erneut angezeigt.", "The encrypted usage report is sent daily. It is used to track common platforms, repo sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Der verschlüsselte Benutzungsbericht wird täglich gesendet. Er wird benutzt um Statistiken über verwendete Betriebssysteme, Verzeichnis-Größen und Programm-Versionen zu erstellen. Sobald der Bericht in Zukunft weitere Daten erfasst, wird dir dieses Fenster erneut angezeigt.",
"The entered node ID does not look valid. It should be a 52 character string consisting of letters and numbers, with spaces and dashes being optional.": "Die eingegebene Knoten-ID scheint nicht gültig zu sein. Sie sollte eine 52 Stellen lange Zeichenkette aus Buchstaben und Zahlen sein. Leerzeichen und Striche sind optional (werden ignoriert).", "The entered node ID does not look valid. It should be a 52 character string consisting of letters and numbers, with spaces and dashes being optional.": "Die eingegebene Knoten-ID scheint nicht gültig zu sein. Sie sollte eine 52 Stellen lange Zeichenkette aus Buchstaben und Zahlen sein. Leerzeichen und Striche sind optional (werden ignoriert).",
@ -98,7 +98,7 @@
"The node ID cannot be blank.": "Die Knoten-ID darf nicht leer sein.", "The node ID cannot be blank.": "Die Knoten-ID darf nicht leer sein.",
"The node ID to enter here can be found in the \"Edit > Show ID\" dialog on the other node. Spaces and dashes are optional (ignored).": "Die hier einzutragende Knoten-ID kann im \"Bearbeiten > Zeige ID\"-Dialog auf dem anderen Knoten gefunden werden. Leerzeichen und Striche sind optional (werden ignoriert).", "The node ID to enter here can be found in the \"Edit > Show ID\" dialog on the other node. Spaces and dashes are optional (ignored).": "Die hier einzutragende Knoten-ID kann im \"Bearbeiten > Zeige ID\"-Dialog auf dem anderen Knoten gefunden werden. Leerzeichen und Striche sind optional (werden ignoriert).",
"The number of old versions to keep, per file.": "Anzahl der alten Versionen, die von jeder Datei gespeichert werden sollen.", "The number of old versions to keep, per file.": "Anzahl der alten Versionen, die von jeder Datei gespeichert werden sollen.",
"The number of versions must be a number and cannot be blank.": "Die Anzahl von Versionen muss eine Zahl sein und darf nicht leer sein.", "The number of versions must be a number and cannot be blank.": "Die Anzahl von Versionen muss eine Zahl und darf nicht leer sein.",
"The repository ID cannot be blank.": "Die Verzeichnis-ID darf nicht leer sein.", "The repository ID cannot be blank.": "Die Verzeichnis-ID darf nicht leer sein.",
"The repository ID must be a short identifier (64 characters or less) consisting of letters, numbers and the the dot (.), dash (-) and underscode (_) characters only.": "Die Verzeichnis-ID muss eine kurze Kennung (64 Zeichen oder weniger) sein. Sie kann aus Buchstaben, Zahlen und den Punkt- (.), Strich- (-), und Unterstrich- (_) Zeichen bestehen.", "The repository ID must be a short identifier (64 characters or less) consisting of letters, numbers and the the dot (.), dash (-) and underscode (_) characters only.": "Die Verzeichnis-ID muss eine kurze Kennung (64 Zeichen oder weniger) sein. Sie kann aus Buchstaben, Zahlen und den Punkt- (.), Strich- (-), und Unterstrich- (_) Zeichen bestehen.",
"The repository ID must be unique.": "Die Verzeichnis-ID muss eindeutig sein.", "The repository ID must be unique.": "Die Verzeichnis-ID muss eindeutig sein.",

View File

@ -1,8 +1,8 @@
{ {
"API Key": "Κλειδί API", "API Key": "Κλειδί API",
"About": "Σχετικά", "About": "Σχετικά",
"Add Node": "Πρόσθεσε Κόμβο", "Add Node": "Προσθήκη Κόμβου",
"Add Repository": "Πρόσθεσε Αποθετήριο", "Add Repository": "Προσθήκη Αποθετηρίου",
"Address": "Διεύθυνση", "Address": "Διεύθυνση",
"Addresses": "Διευθύνσεις", "Addresses": "Διευθύνσεις",
"Allow Anonymous Usage Reporting?": "Να επιτρέπεται Ανώνυμη Αποστολή Αναφοράς Χρήσης?", "Allow Anonymous Usage Reporting?": "Να επιτρέπεται Ανώνυμη Αποστολή Αναφοράς Χρήσης?",
@ -28,19 +28,19 @@
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by syncthing.": "Όταν τα αρχεία αντικατασταθούν ή διαγραφούν από το syncthing, μεταφέρονται σε φάκελο .stversions με χρονική σήμανση.", "Files are moved to date stamped versions in a .stversions folder when replaced or deleted by syncthing.": "Όταν τα αρχεία αντικατασταθούν ή διαγραφούν από το syncthing, μεταφέρονται σε φάκελο .stversions με χρονική σήμανση.",
"Files are protected from changes made on other nodes, but changes made on this node will be sent to the rest of the cluster.": "Τα αρχεία προστατεύονται από αλλαγές που γίνονται σε άλλους κόμβους, αλλά όποιες αλλαγές γίνουν εδώ θα αποσταλούν στο όλο το cluster.", "Files are protected from changes made on other nodes, but changes made on this node will be sent to the rest of the cluster.": "Τα αρχεία προστατεύονται από αλλαγές που γίνονται σε άλλους κόμβους, αλλά όποιες αλλαγές γίνουν εδώ θα αποσταλούν στο όλο το cluster.",
"Folder": "Κατάλογος", "Folder": "Κατάλογος",
"GUI Authentication Password": "GUI Authentication Password", "GUI Authentication Password": "Κωδικός πιστοποίησης στο GUI",
"GUI Authentication User": "GUI Authentication User", "GUI Authentication User": "Χρήστης πιστοποίησης στο GUI",
"GUI Listen Addresses": "GUI Listen Addresses", "GUI Listen Addresses": "GUI Listen διευθύνσεις",
"Generate": "Δημιουργία", "Generate": "Δημιουργία",
"Global Discovery": "Global Discovery", "Global Discovery": "Global Discovery",
"Global Discovery Server": "Διακομιστής Ανεύρεσης Κόμβου", "Global Discovery Server": "Διακομιστής Ανεύρεσης Κόμβου",
"Global Repository": "Global Repository", "Global Repository": "Global Repository",
"Idle": "Ανενεργός", "Idle": "Ανενεργό",
"Ignore Permissions": "Ignore Permissions", "Ignore Permissions": "Αγνόησε Δικαιώματα",
"Keep Versions": "Keep Versions", "Keep Versions": "Διατήρησε Εκδόσεις",
"Latest Release": "Τελευταία Έκδοση", "Latest Release": "Τελευταία Έκδοση",
"Local Discovery": "Local Discovery", "Local Discovery": "Τοπική Ανεύρεση",
"Local Discovery Port": "Local Discovery Port", "Local Discovery Port": "Port Τοπικής Ανεύρεσης",
"Local Repository": "Τοπικό Αποθετήριο", "Local Repository": "Τοπικό Αποθετήριο",
"Master Repo": "Master Repo", "Master Repo": "Master Repo",
"Max File Change Rate (KiB/s)": "Max File Change Rate (KiB/s)", "Max File Change Rate (KiB/s)": "Max File Change Rate (KiB/s)",
@ -51,20 +51,20 @@
"Node Name": "Όνομα Κόμβου", "Node Name": "Όνομα Κόμβου",
"Notice": "Notice", "Notice": "Notice",
"OK": "OK", "OK": "OK",
"Offline": "Ανεργός", "Offline": "Ανεργό",
"Online": "Ενεργός", "Online": "Ενεργός",
"Out Of Sync": "Εκτός Συγχρονισμού", "Out Of Sync": "Μη Συγχρονισμένα",
"Outgoing Rate Limit (KiB/s)": "Outgoing Rate Limit (KiB/s)", "Outgoing Rate Limit (KiB/s)": "Outgoing Rate Limit (KiB/s)",
"Override Changes": "Override Changes", "Override Changes": "Override Changes",
"Path to the repository on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Μονοπάτι του αποθετηρίου στον τοπικό υπολογιστή. Σε περίπτωση που δεν υπάρχει, θα δημιουργηθεί. Ο χαρακτήρας tilde (~) μπορεί να χρησιμοποιηθεί σαν συντόμευση για", "Path to the repository on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Μονοπάτι του αποθετηρίου στον τοπικό υπολογιστή. Σε περίπτωση που δεν υπάρχει, θα δημιουργηθεί. Ο χαρακτήρας tilde (~) μπορεί να χρησιμοποιηθεί σαν συντόμευση για",
"Please wait": "Παρακαλώ περιμένετε", "Please wait": "Παρακαλώ περιμένετε",
"Preview Usage Report": "Preview Usage Report", "Preview Usage Report": "Προεπισκόπηση αναφοράς χρήσης",
"RAM Utilization": "Χρήση RAM", "RAM Utilization": "Χρήση RAM",
"Reconnect Interval (s)": "Reconnect Interval (s)", "Reconnect Interval (s)": "Χρονικό διάστημα επανασύνδεσης (s)",
"Repository ID": "ID Αποθετηρίου", "Repository ID": "ID Αποθετηρίου",
"Repository Master": "Repository Master", "Repository Master": "Repository Master",
"Repository Path": "Μονοπάτι Αποθετηρίου", "Repository Path": "Μονοπάτι Αποθετηρίου",
"Rescan Interval (s)": "Rescan Interval (s)", "Rescan Interval (s)": "Χρονικό διάστημα Επανασάρρωσης (s)",
"Restart": "Επανεκκίνηση", "Restart": "Επανεκκίνηση",
"Restart Needed": "Απαιτείται Επανεκκίνηση", "Restart Needed": "Απαιτείται Επανεκκίνηση",
"Restarting": "Επανεκκίνηση", "Restarting": "Επανεκκίνηση",
@ -79,40 +79,40 @@
"Shown instead of Node ID in the cluster status.": "Εμφάνιση στη θέση του ID Αποθετηρίου, στην κατάσταση του cluster.", "Shown instead of Node ID in the cluster status.": "Εμφάνιση στη θέση του ID Αποθετηρίου, στην κατάσταση του cluster.",
"Shutdown": "Απενεργοποίηση", "Shutdown": "Απενεργοποίηση",
"Source Code": "Πηγαίος Κώδικας", "Source Code": "Πηγαίος Κώδικας",
"Start Browser": "Start Browser", "Start Browser": "Έναρξη Φυλλομετρητή",
"Stopped": "Απενεργοποιημένο", "Stopped": "Απενεργοποιημένο",
"Support / Forum": "Υποστήριξη / Forum", "Support / Forum": "Υποστήριξη / Forum",
"Sync Protocol Listen Addresses": "Sync Protocol Listen Addresses", "Sync Protocol Listen Addresses": "Sync Protocol Listen Addresses",
"Synchronization": "Συγχρονισμός", "Synchronization": "Συγχρονισμός",
"Syncing": "Συγχρονισμός", "Syncing": "Συγχρονισμός",
"Syncthing has been shut down.": "Syncthing έχει απενεργοποιηθεί.", "Syncthing has been shut down.": "Το Syncthing έχει απενεργοποιηθεί.",
"Syncthing includes the following software or portions thereof:": "Syncthing includes the following software or portions thereof:", "Syncthing includes the following software or portions thereof:": "Το Syncthing συμπεριλαμβάνει τα παρακάτω λογισμικά ή μέρη αυτών:",
"Syncthing is restarting.": "Το Syncthing επανεκκινεί.", "Syncthing is restarting.": "Το Syncthing επανεκκινεί.",
"Syncthing is upgrading.": "Το Syncthing αναβαθμίζεται.", "Syncthing is upgrading.": "Το Syncthing αναβαθμίζεται.",
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…", "Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Το Syncthing φαίνεται πως είναι απενεργοποιημένο ή υπάρχει πρόβλημα στη σύνδεσή σας στο Internet. Προσπάθεια ξανά…",
"The aggregated statistics are publicly available at {%url%}.": "Τα στατιστικά που έχουν συλλεγεί είναι διαθέσιμα στο κοινό, στο {{url}}.", "The aggregated statistics are publicly available at {%url%}.": "Τα στατιστικά που έχουν συλλεγεί είναι διαθέσιμα στο κοινό, στο {{url}}.",
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.", "The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Οι ρυθμίσεις έχουν αποθηκευτεί αλλά δεν έχουν ενεργοποιηθεί. Πρέπει να επανεκκινήσετε το Syncthing για να ισχύσουν οι νέες ρυθμίσεις.",
"The encrypted usage report is sent daily. It is used to track common platforms, repo sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "The encrypted usage report is sent daily. It is used to track common platforms, repo sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.", "The encrypted usage report is sent daily. It is used to track common platforms, repo sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Η κρυπτογραφημένη αναφοράς χρήσης στέλνεται καθημερινά. Χρησιμοποιείται ανίχνευση πληροφοριών πλατφόρμας, μεγέθους αποθετηρίων και εκδόσεων της εφαρμογής. Αν τα δεδομένα που αποστέλονται αλλάξουν, θα πληροφορηθείτε ξανά με αυτό το διάλογο.",
"The entered node ID does not look valid. It should be a 52 character string consisting of letters and numbers, with spaces and dashes being optional.": "The entered node ID does not look valid. It should be a 52 character string consisting of letters and numbers, with spaces and dashes being optional.", "The entered node ID does not look valid. It should be a 52 character string consisting of letters and numbers, with spaces and dashes being optional.": "Το ID Κόμβου που έχει εισαχθεί δεν είναι σωστό. Θα πρέπει να είναι αλφαριθμητικό 52 χαρακτήρων που να αποτελείται από γράμματα και αριθμούς, όπου τα κενά και οι παύλες είναι προαιρετικά.",
"The entered node ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "Το ID Κόμβου δεν είναι έγκυρο. Θα πρέπει να είναι αλφαριθμιτικό με 52 ή 56 χαρακτήρες και να αποτελείται από γράμματα και αριθμούς, που προαιρετικά χωρίζονται με κενά και παύλες.", "The entered node ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "Το ID Κόμβου δεν είναι έγκυρο. Θα πρέπει να είναι αλφαριθμιτικό με 52 ή 56 χαρακτήρες και να αποτελείται από γράμματα και αριθμούς, που προαιρετικά χωρίζονται με κενά και παύλες.",
"The node ID cannot be blank.": "The node ID cannot be blank.", "The node ID cannot be blank.": "Το ID Κόμβου δε μπορεί να είναι κενό.",
"The node ID to enter here can be found in the \"Edit > Show ID\" dialog on the other node. Spaces and dashes are optional (ignored).": "Το ID Κόμβου μπορείτε να βρείτε στο μενού \"Επεξεργασία > Εμφάνιση ID\" του άλλου κόμβου. Κενά και παύλες είναι προαιρετικά (αγνοούνται).", "The node ID to enter here can be found in the \"Edit > Show ID\" dialog on the other node. Spaces and dashes are optional (ignored).": "Το ID Κόμβου μπορείτε να βρείτε στο μενού \"Επεξεργασία > Εμφάνιση ID\" του άλλου κόμβου. Κενά και παύλες είναι προαιρετικά (αγνοούνται).",
"The number of old versions to keep, per file.": "The number of old versions to keep, per file.", "The number of old versions to keep, per file.": "The number of old versions to keep, per file.",
"The number of versions must be a number and cannot be blank.": "The number of versions must be a number and cannot be blank.", "The number of versions must be a number and cannot be blank.": "Ο αριθμός εκδόσεων πρέπει να είναι αριθμός και σίγουρα όχι κενό.",
"The repository ID cannot be blank.": "The repository ID cannot be blank.", "The repository ID cannot be blank.": "Το ID Αποθετηρίου δε μπορεί να είναι κενό.",
"The repository ID must be a short identifier (64 characters or less) consisting of letters, numbers and the the dot (.), dash (-) and underscode (_) characters only.": "The repository ID must be a short identifier (64 characters or less) consisting of letters, numbers and the the dot (.), dash (-) and underscode (_) characters only.", "The repository ID must be a short identifier (64 characters or less) consisting of letters, numbers and the the dot (.), dash (-) and underscode (_) characters only.": "The repository ID must be a short identifier (64 characters or less) consisting of letters, numbers and the the dot (.), dash (-) and underscode (_) characters only.",
"The repository ID must be unique.": "The repository ID must be unique.", "The repository ID must be unique.": "Το ID Αποθετηρίου πρέπει να είναι μοναδικό.",
"The repository path cannot be blank.": "The repository path cannot be blank.", "The repository path cannot be blank.": "Το μονοπάτι του αποθετηρίου δε μπορεί να είναι κενό.",
"Unknown": "Άγνωστο", "Unknown": "Άγνωστο",
"Up to Date": "Ενημερώμενο", "Up to Date": "Ενημερωμένος",
"Upgrade To {%version%}": "Αναβάθμιση στην έκδοση {{version}}", "Upgrade To {%version%}": "Αναβάθμιση στην έκδοση {{version}}",
"Upgrading": "Αναβάθμιση", "Upgrading": "Αναβάθμιση",
"Upload Rate": "Upload Rate", "Upload Rate": "Upload Rate",
"Usage": "Usage", "Usage": "Usage",
"Use Compression": "Χρήση συμπίεσης", "Use Compression": "Χρήση συμπίεσης",
"Use HTTPS for GUI": "Use HTTPS for GUI", "Use HTTPS for GUI": "Χρήση HTTPS για το GUI",
"Version": "Έκδοση", "Version": "Έκδοση",
"When adding a new node, keep in mind that this node must be added on the other side too.": "When adding a new node, keep in mind that this node must be added on the other side too.", "When adding a new node, keep in mind that this node must be added on the other side too.": "Προσθέτοντας έναν καινούργιο κόμβο, θυμηθείται πως θα πρέπει να προσθέσετε και τον παρόν κόμβο στην άλλη πλευρά.",
"When adding a new repository, keep in mind that the Repository ID is used to tie repositories together between nodes. They are case sensitive and must match exactly between all nodes.": "Κατά την πρόσθεση νέου αποθετηρίου, να γνωρίζεται πως το ID Αποθετηρίου χρησιμοποιείται για να συνδέει Αποθετήρια μεταξύ κόμβων. Τα ID είναι case sensitive και θα πρέπει να είναι ταυτόσημα μεταξύ όλων των κόμβων.", "When adding a new repository, keep in mind that the Repository ID is used to tie repositories together between nodes. They are case sensitive and must match exactly between all nodes.": "Κατά την πρόσθεση νέου αποθετηρίου, να γνωρίζεται πως το ID Αποθετηρίου χρησιμοποιείται για να συνδέει Αποθετήρια μεταξύ κόμβων. Τα ID είναι case sensitive και θα πρέπει να είναι ταυτόσημα μεταξύ όλων των κόμβων.",
"Yes": "Ναι", "Yes": "Ναι",
"You must keep at least one version.": "You must keep at least one version.", "You must keep at least one version.": "You must keep at least one version.",

View File

@ -70,7 +70,7 @@
"Restarting": "Redémarrage", "Restarting": "Redémarrage",
"Save": "Sauver", "Save": "Sauver",
"Scanning": "En cours de scan", "Scanning": "En cours de scan",
"Select the nodes to share this repository with.": "Sélectionner les nœuds qui partageront ce répertoire.", "Select the nodes to share this repository with.": "Sélectionner les nœuds qui partagent ce répertoire.",
"Settings": "Configuration", "Settings": "Configuration",
"Share With Nodes": "Partager avec les nœuds", "Share With Nodes": "Partager avec les nœuds",
"Shared With": "Partagé avec", "Shared With": "Partagé avec",

View File

@ -9,9 +9,9 @@
"Announce Server": "Server di Presenza Globale dei Nodi", "Announce Server": "Server di Presenza Globale dei Nodi",
"Anonymous Usage Reporting": "Statistiche Anonime di Utilizzo", "Anonymous Usage Reporting": "Statistiche Anonime di Utilizzo",
"Bugs": "Bug", "Bugs": "Bug",
"CPU Utilization": "Utilizzo della CPU", "CPU Utilization": "Utilizzo CPU",
"Close": "Chiudi", "Close": "Chiudi",
"Connection Error": "Connection Error", "Connection Error": "Errore di Connessione",
"Copyright © 2014 Jakob Borg and the following Contributors:": "Copyright © 2014 Jakob Borg e i seguenti Collaboratori:", "Copyright © 2014 Jakob Borg and the following Contributors:": "Copyright © 2014 Jakob Borg e i seguenti Collaboratori:",
"Delete": "Elimina", "Delete": "Elimina",
"Disconnected": "Disconnesso", "Disconnected": "Disconnesso",
@ -38,7 +38,7 @@
"Idle": "Inattivo", "Idle": "Inattivo",
"Ignore Permissions": "Ignora Permessi", "Ignore Permissions": "Ignora Permessi",
"Keep Versions": "Mantieni le Versioni", "Keep Versions": "Mantieni le Versioni",
"Latest Release": "Ultimo Rilascio", "Latest Release": "Ultima Versione",
"Local Discovery": "Individuazione Locale", "Local Discovery": "Individuazione Locale",
"Local Discovery Port": "Porta di Individuazione Locale", "Local Discovery Port": "Porta di Individuazione Locale",
"Local Repository": "Deposito Locale", "Local Repository": "Deposito Locale",
@ -47,7 +47,7 @@
"Max Outstanding Requests": "Numero Massimo di Richieste Simultanee per i Blocchi di File", "Max Outstanding Requests": "Numero Massimo di Richieste Simultanee per i Blocchi di File",
"No": "No", "No": "No",
"Node ID": "ID Nodo", "Node ID": "ID Nodo",
"Node Identification": "Node Identification", "Node Identification": "Identificazione Nodo",
"Node Name": "Nome Nodo", "Node Name": "Nome Nodo",
"Notice": "Avviso", "Notice": "Avviso",
"OK": "OK", "OK": "OK",
@ -59,7 +59,7 @@
"Path to the repository on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Percorso del deposito nel computer locale. Verrà creato se non esiste già. Il carattere tilde (~) può essere utilizzato come scorciatoia per", "Path to the repository on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Percorso del deposito nel computer locale. Verrà creato se non esiste già. Il carattere tilde (~) può essere utilizzato come scorciatoia per",
"Please wait": "Attendere prego", "Please wait": "Attendere prego",
"Preview Usage Report": "Anteprima Statistiche di Utilizzo", "Preview Usage Report": "Anteprima Statistiche di Utilizzo",
"RAM Utilization": "Utilizzo della RAM", "RAM Utilization": "Utilizzo RAM",
"Reconnect Interval (s)": "Intervallo di Riconnessione (s)", "Reconnect Interval (s)": "Intervallo di Riconnessione (s)",
"Repository ID": "ID Deposito", "Repository ID": "ID Deposito",
"Repository Master": "Deposito Principale", "Repository Master": "Deposito Principale",
@ -67,7 +67,7 @@
"Rescan Interval (s)": "Intervallo di Scansione dei File (s)", "Rescan Interval (s)": "Intervallo di Scansione dei File (s)",
"Restart": "Riavvia", "Restart": "Riavvia",
"Restart Needed": "Riavvio Necessario", "Restart Needed": "Riavvio Necessario",
"Restarting": "Restarting", "Restarting": "Riavvio",
"Save": "Salva", "Save": "Salva",
"Scanning": "Scansione in corso", "Scanning": "Scansione in corso",
"Select the nodes to share this repository with.": "Seleziona i nodi con i quali vuoi condividere questo deposito.", "Select the nodes to share this repository with.": "Seleziona i nodi con i quali vuoi condividere questo deposito.",
@ -87,8 +87,8 @@
"Syncing": "Sincronizzazione in corso", "Syncing": "Sincronizzazione in corso",
"Syncthing has been shut down.": "Syncthing è stato arrestato.", "Syncthing has been shut down.": "Syncthing è stato arrestato.",
"Syncthing includes the following software or portions thereof:": "Syncthing include i seguenti software o porzioni di questi:", "Syncthing includes the following software or portions thereof:": "Syncthing include i seguenti software o porzioni di questi:",
"Syncthing is restarting.": "Syncthing is restarting.", "Syncthing is restarting.": "Riavvio di Syncthing in corso.",
"Syncthing is upgrading.": "Syncthing is upgrading.", "Syncthing is upgrading.": "Aggiornamento di Syncthing in corso.",
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing sembra inattivo, oppure c'è un problema con la tua connessione a Internet. Nuovo tentativo…", "Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing sembra inattivo, oppure c'è un problema con la tua connessione a Internet. Nuovo tentativo…",
"The aggregated statistics are publicly available at {%url%}.": "Le statistiche aggregate sono disponibili pubblicamente su {{url}}.", "The aggregated statistics are publicly available at {%url%}.": "Le statistiche aggregate sono disponibili pubblicamente su {{url}}.",
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "La configurazione è stata salvata ma non attivata. Devi riavviare Syncthing per attivare la nuova configurazione.", "The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "La configurazione è stata salvata ma non attivata. Devi riavviare Syncthing per attivare la nuova configurazione.",
@ -96,7 +96,7 @@
"The entered node ID does not look valid. It should be a 52 character string consisting of letters and numbers, with spaces and dashes being optional.": "L'ID del nodo inserito non sembra valido. Dovrebbe essere una stringa di 52 caratteri costituita da lettere e numeri, con spazi e trattini opzionali.", "The entered node ID does not look valid. It should be a 52 character string consisting of letters and numbers, with spaces and dashes being optional.": "L'ID del nodo inserito non sembra valido. Dovrebbe essere una stringa di 52 caratteri costituita da lettere e numeri, con spazi e trattini opzionali.",
"The entered node ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "L'ID del nodo inserito non sembra valido. Dovrebbe essere una stringa di 52 o 56 caratteri costituita da lettere e numeri, con spazi e trattini opzionali.", "The entered node ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "L'ID del nodo inserito non sembra valido. Dovrebbe essere una stringa di 52 o 56 caratteri costituita da lettere e numeri, con spazi e trattini opzionali.",
"The node ID cannot be blank.": "L'ID del nodo non può essere vuoto.", "The node ID cannot be blank.": "L'ID del nodo non può essere vuoto.",
"The node ID to enter here can be found in the \"Edit > Show ID\" dialog on the other node. Spaces and dashes are optional (ignored).": "Trova l'ID del nodo nella finestra di dialogo \"Modifica > Mostra ID\" dell'altro nodo e poi inseriscilo qui. Gli spazi e i trattini sono opzionali (ignorati).", "The node ID to enter here can be found in the \"Edit > Show ID\" dialog on the other node. Spaces and dashes are optional (ignored).": "Trova l'ID nella finestra di dialogo \"Modifica > Mostra ID\" dell'altro nodo, poi inseriscilo qui. Gli spazi e i trattini sono opzionali (ignorati).",
"The number of old versions to keep, per file.": "Il numero di vecchie versioni da mantenere, per file.", "The number of old versions to keep, per file.": "Il numero di vecchie versioni da mantenere, per file.",
"The number of versions must be a number and cannot be blank.": "Il numero di versioni dev'essere un numero e non può essere vuoto.", "The number of versions must be a number and cannot be blank.": "Il numero di versioni dev'essere un numero e non può essere vuoto.",
"The repository ID cannot be blank.": "L'ID del deposito non può essere vuoto.", "The repository ID cannot be blank.": "L'ID del deposito non può essere vuoto.",
@ -106,13 +106,13 @@
"Unknown": "Sconosciuto", "Unknown": "Sconosciuto",
"Up to Date": "Sincronizzato", "Up to Date": "Sincronizzato",
"Upgrade To {%version%}": "Aggiorna Alla {{version}}", "Upgrade To {%version%}": "Aggiorna Alla {{version}}",
"Upgrading": "Upgrading", "Upgrading": "Aggiornamento",
"Upload Rate": "Velocità Upload", "Upload Rate": "Velocità Upload",
"Usage": "Utilizzo", "Usage": "Utilizzo",
"Use Compression": "Utilizza Compressione", "Use Compression": "Utilizza Compressione",
"Use HTTPS for GUI": "Utilizza HTTPS per l'interfaccia grafica", "Use HTTPS for GUI": "Utilizza HTTPS per l'interfaccia grafica",
"Version": "Versione", "Version": "Versione",
"When adding a new node, keep in mind that this node must be added on the other side too.": "Quando aggiungi un nuovo nodo, ricordati di aggiungerlo anche dall'altro lato.", "When adding a new node, keep in mind that this node must be added on the other side too.": "Ora la stessa operazione deve essere eseguita anche nel nuovo nodo inserendo l'ID di questo nodo.",
"When adding a new repository, keep in mind that the Repository ID is used to tie repositories together between nodes. They are case sensitive and must match exactly between all nodes.": "Quando aggiungi un nuovo deposito ricordati che gli ID vengono utilizzati per collegare i depositi nei nodi. Distinguono maiuscole e minuscole e devono corrispondere esattamente su tutti i nodi.", "When adding a new repository, keep in mind that the Repository ID is used to tie repositories together between nodes. They are case sensitive and must match exactly between all nodes.": "Quando aggiungi un nuovo deposito ricordati che gli ID vengono utilizzati per collegare i depositi nei nodi. Distinguono maiuscole e minuscole e devono corrispondere esattamente su tutti i nodi.",
"Yes": "Sì", "Yes": "Sì",
"You must keep at least one version.": "È necessario mantenere almeno una versione.", "You must keep at least one version.": "È necessario mantenere almeno una versione.",

View File

@ -11,7 +11,7 @@
"Bugs": "Ошибки", "Bugs": "Ошибки",
"CPU Utilization": "Загрузка ЦПУ", "CPU Utilization": "Загрузка ЦПУ",
"Close": "Закрыть", "Close": "Закрыть",
"Connection Error": "Connection Error", "Connection Error": "Ошибка подключения",
"Copyright © 2014 Jakob Borg and the following Contributors:": "Все права защищены © 2014 Jakob Borg и следующие участники:", "Copyright © 2014 Jakob Borg and the following Contributors:": "Все права защищены © 2014 Jakob Borg и следующие участники:",
"Delete": "Удалить", "Delete": "Удалить",
"Disconnected": "Нет соединения", "Disconnected": "Нет соединения",
@ -47,7 +47,7 @@
"Max Outstanding Requests": "Максимальное количество исходящих запросов", "Max Outstanding Requests": "Максимальное количество исходящих запросов",
"No": "Нет", "No": "Нет",
"Node ID": "ID Узла", "Node ID": "ID Узла",
"Node Identification": "Node Identification", "Node Identification": "Идентификация узла",
"Node Name": "Имя Узла", "Node Name": "Имя Узла",
"Notice": "Внимание", "Notice": "Внимание",
"OK": "ОК", "OK": "ОК",
@ -67,7 +67,7 @@
"Rescan Interval (s)": "Интервал между сканированием (сек)", "Rescan Interval (s)": "Интервал между сканированием (сек)",
"Restart": "Перезапуск", "Restart": "Перезапуск",
"Restart Needed": "Требуется перезапуск", "Restart Needed": "Требуется перезапуск",
"Restarting": "Restarting", "Restarting": "Перезапуск",
"Save": "Сохранить", "Save": "Сохранить",
"Scanning": "Сканирование", "Scanning": "Сканирование",
"Select the nodes to share this repository with.": "Выберите узлы для которых будет доступен данный репозиторий.", "Select the nodes to share this repository with.": "Выберите узлы для которых будет доступен данный репозиторий.",
@ -87,14 +87,14 @@
"Syncing": "Синхронизация", "Syncing": "Синхронизация",
"Syncthing has been shut down.": "Syncthing выключен.", "Syncthing has been shut down.": "Syncthing выключен.",
"Syncthing includes the following software or portions thereof:": "Syncthing включает в себя следующее ПО или его части:", "Syncthing includes the following software or portions thereof:": "Syncthing включает в себя следующее ПО или его части:",
"Syncthing is restarting.": "Syncthing is restarting.", "Syncthing is restarting.": "Перезапуск Syncthing",
"Syncthing is upgrading.": "Syncthing is upgrading.", "Syncthing is upgrading.": "Обновление Syncthing ",
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Кажется, Syncthing не запущен или есть проблемы с подключением к Интернету. Переподключаюсь...", "Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Кажется, Syncthing не запущен или есть проблемы с подключением к Интернету. Переподключаюсь...",
"The aggregated statistics are publicly available at {%url%}.": "Суммарная статистика общедоступна на {{url}}.", "The aggregated statistics are publicly available at {%url%}.": "Суммарная статистика общедоступна на {{url}}.",
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Конфигурация была сохранена но не активирована. Для активации новой конфигурации необходимо рестартовать Syncthing.", "The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Конфигурация была сохранена но не активирована. Для активации новой конфигурации необходимо рестартовать Syncthing.",
"The encrypted usage report is sent daily. It is used to track common platforms, repo sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Зашифрованные отчёты об использовании отправляются ежедневно. Они используются для отслеживания проблем, размеров репозиториев и версий Syncthing. Если набор данных в отчёте будет изменён, то вы получите уведомление об этом в этом диалоге.", "The encrypted usage report is sent daily. It is used to track common platforms, repo sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Зашифрованные отчёты об использовании отправляются ежедневно. Они используются для отслеживания проблем, размеров репозиториев и версий Syncthing. Если набор данных в отчёте будет изменён, то вы получите уведомление об этом в этом диалоге.",
"The entered node ID does not look valid. It should be a 52 character string consisting of letters and numbers, with spaces and dashes being optional.": "Введённый ID узла выглядит неправильно: ID должен быть строкой, длинной 52 символа, обязательно содержащей группы букв и цифр которые могут быть разделены пробелами или тире.", "The entered node ID does not look valid. It should be a 52 character string consisting of letters and numbers, with spaces and dashes being optional.": "Введённый ID узла выглядит неправильно: ID должен быть строкой, длинной 52 символа, обязательно содержащей группы букв и цифр которые могут быть разделены пробелами или тире.",
"The entered node ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "The entered node ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.", "The entered node ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "Введённый ID узла выглядит неправильно: ID должен быть строкой, длинной 52 или 56 символов, обязательно содержащей группы букв и цифр которые могут быть разделены пробелами или тире.",
"The node ID cannot be blank.": "ID узла не может быть пустым.", "The node ID cannot be blank.": "ID узла не может быть пустым.",
"The node ID to enter here can be found in the \"Edit > Show ID\" dialog on the other node. Spaces and dashes are optional (ignored).": "ID узла можно узнать в окне \"Редактировать > Показать ID\" на требуемом узле. Пробелы и тире используются для удобства и не обязательны (игнорируются).", "The node ID to enter here can be found in the \"Edit > Show ID\" dialog on the other node. Spaces and dashes are optional (ignored).": "ID узла можно узнать в окне \"Редактировать > Показать ID\" на требуемом узле. Пробелы и тире используются для удобства и не обязательны (игнорируются).",
"The number of old versions to keep, per file.": "Количество хранимых версий файла.", "The number of old versions to keep, per file.": "Количество хранимых версий файла.",
@ -106,7 +106,7 @@
"Unknown": "Неизвестно", "Unknown": "Неизвестно",
"Up to Date": "Обновлено", "Up to Date": "Обновлено",
"Upgrade To {%version%}": "Обновить до {{version}}", "Upgrade To {%version%}": "Обновить до {{version}}",
"Upgrading": "Upgrading", "Upgrading": "Обновление",
"Upload Rate": "Скорость отдачи", "Upload Rate": "Скорость отдачи",
"Usage": "Справка", "Usage": "Справка",
"Use Compression": "Использовать сжатие", "Use Compression": "Использовать сжатие",

View File

@ -11,20 +11,20 @@
"Bugs": "Hatalar", "Bugs": "Hatalar",
"CPU Utilization": "İşlemci Kullanımı", "CPU Utilization": "İşlemci Kullanımı",
"Close": "Kapat", "Close": "Kapat",
"Connection Error": "Connection Error", "Connection Error": "Bağlantı hatası",
"Copyright © 2014 Jakob Borg and the following Contributors:": "Telif Hakkı © 2014 Jakob Borg ve Katkıda Bulunanlar", "Copyright © 2014 Jakob Borg and the following Contributors:": "Telif hakkı © 2014 Jakob Borg ve aşağıdaki katkıda bulunanlar",
"Delete": "Sil", "Delete": "Sil",
"Disconnected": "Bağlantı Kesildi", "Disconnected": "Bağlantı Kesik",
"Documentation": "Dökümanlar", "Documentation": "Dokümantasyon",
"Download Rate": "İndirme Hızı", "Download Rate": "İndirme Hızı",
"Edit": "Düzenle", "Edit": "Seçenekler",
"Edit Node": "Düğümü Düzenle", "Edit Node": "Düğümü Düzenle",
"Edit Repository": "Depoyu düzenle", "Edit Repository": "Depoyu düzenle",
"Enable UPnP": "UPnP Etkinleştir", "Enable UPnP": "UPnP Etkinleştir",
"Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.": "IP adresleri eklemek için virgül ile ayırarak \"ip:port\" yazın, ya da \"dynamic\" yazarak otomatik bulma işlemini seçin.", "Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.": "IP adresleri eklemek için virgül ile ayırarak \"ip:port\" yazın, ya da \"dynamic\" yazarak otomatik bulma işlemini seçin.",
"Error": "Hata", "Error": "Hata",
"File Versioning": "Dosya Sürümlendirme", "File Versioning": "Dosya Sürümlendirme",
"File permission bits are ignored when looking for changes. Use on FAT filesystems.": "Değişimleri yoklarken dosya izin bilgilerini ihmal et. FAT dosya sistemlerinde kullanın.", "File permission bits are ignored when looking for changes. Use on FAT filesystems.": "Değişim yoklarken dosya izin bilgilerini ihmal et. FAT dosya sisteminde kullanın.",
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by syncthing.": "Dosyalar syncthing tarafından değiştirildiğinde ya da silindiğinde, tarih damgalı sürümleri .stversions dizinine taşınır.", "Files are moved to date stamped versions in a .stversions folder when replaced or deleted by syncthing.": "Dosyalar syncthing tarafından değiştirildiğinde ya da silindiğinde, tarih damgalı sürümleri .stversions dizinine taşınır.",
"Files are protected from changes made on other nodes, but changes made on this node will be sent to the rest of the cluster.": "Dosyalar diğer düğümlerde yapılan değişikliklerden korunur, ancak bu düğümdeki değişiklikler kümedeki diğer düğümlere gönderilir.", "Files are protected from changes made on other nodes, but changes made on this node will be sent to the rest of the cluster.": "Dosyalar diğer düğümlerde yapılan değişikliklerden korunur, ancak bu düğümdeki değişiklikler kümedeki diğer düğümlere gönderilir.",
"Folder": "Dizin", "Folder": "Dizin",
@ -38,23 +38,23 @@
"Idle": "Boşta", "Idle": "Boşta",
"Ignore Permissions": "İzinleri yoksay", "Ignore Permissions": "İzinleri yoksay",
"Keep Versions": "Sürüm tut", "Keep Versions": "Sürüm tut",
"Latest Release": "En son sürüm", "Latest Release": "Son sürüm",
"Local Discovery": "Yerel bulma", "Local Discovery": "Yerel bulma",
"Local Discovery Port": "Yerel bulma portları", "Local Discovery Port": "Yerel bulma portları",
"Local Repository": "Yerel Depo", "Local Repository": "Yerel Depo",
"Master Repo": "Master Depo", "Master Repo": "Ana depo",
"Max File Change Rate (KiB/s)": "Mak. Dosya değiştirme oranı (KB/sn)", "Max File Change Rate (KiB/s)": "Mak. Dosya değiştirme oranı (KB/sn)",
"Max Outstanding Requests": "Maks Öncellikli İstekler", "Max Outstanding Requests": "Maks Öncellikli İstekler",
"No": "Hayır", "No": "Hayır",
"Node ID": "Düğüm ID", "Node ID": "Düğüm ID",
"Node Identification": "Node Identification", "Node Identification": "Düğüm Kimliği",
"Node Name": "Düğüm İsmi", "Node Name": "Düğüm İsmi",
"Notice": "Uyarı", "Notice": "Uyarı",
"OK": "Tamam", "OK": "Tamam",
"Offline": "Çevrim dışı", "Offline": "Çevrim dışı",
"Online": "Çevrim içi", "Online": "Çevrim içi",
"Out Of Sync": "Senkronize değil", "Out Of Sync": "Senkronize edilmemiş",
"Outgoing Rate Limit (KiB/s)": "Yükleme Oranı Limiti (KB/sn)", "Outgoing Rate Limit (KiB/s)": "Yükleme hız sınırı (KB/sn)",
"Override Changes": "Değişiklikleri Geçersiz kıl", "Override Changes": "Değişiklikleri Geçersiz kıl",
"Path to the repository on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Yerel bilgisayardaki depoya ulaşım yolu. Dizin yoksa yaratılacak. (~) karakterinin kısayol olarak kullanılabileceği yol", "Path to the repository on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Yerel bilgisayardaki depoya ulaşım yolu. Dizin yoksa yaratılacak. (~) karakterinin kısayol olarak kullanılabileceği yol",
"Please wait": "Lütfen Bekleyin", "Please wait": "Lütfen Bekleyin",
@ -62,21 +62,21 @@
"RAM Utilization": "RAM Kullanımı", "RAM Utilization": "RAM Kullanımı",
"Reconnect Interval (s)": "Yeniden bağlanma süresi (sn)", "Reconnect Interval (s)": "Yeniden bağlanma süresi (sn)",
"Repository ID": "Depo ID", "Repository ID": "Depo ID",
"Repository Master": "Master Depo", "Repository Master": "Ana depo",
"Repository Path": "Depo Yolu", "Repository Path": "Depo Yolu",
"Rescan Interval (s)": "Yeni tarama süresi (sn)", "Rescan Interval (s)": "Yeni tarama süresi (sn)",
"Restart": "Yeniden Başlat", "Restart": "Yeniden Başlat",
"Restart Needed": "Yeniden başlatma gereklidir", "Restart Needed": "Yeniden başlatma gereklidir",
"Restarting": "Restarting", "Restarting": "Yeniden başlatılıyor",
"Save": "Kaydet", "Save": "Kaydet",
"Scanning": "Taranıyor", "Scanning": "Taranıyor",
"Select the nodes to share this repository with.": "Bu depo ile paylaşılacak olan düğümü seçin.", "Select the nodes to share this repository with.": "Bu deponun paylaşılacağı düğümleri seçin.",
"Settings": "Ayarlar", "Settings": "Ayarlar",
"Share With Nodes": "Düğüm ile paylaş", "Share With Nodes": "Paylaşılan düğümler:",
"Shared With": "ile paylaş", "Shared With": "Paylaşılan düğümler",
"Short identifier for the repository. Must be the same on all cluster nodes.": "Depo için kısa tanımlayıcı. Tüm küme düğümlerinde aynı olmalı.", "Short identifier for the repository. Must be the same on all cluster nodes.": "Depo için kısa tanımlayıcı. Kümedeki tüm düğümlerde aynı olmalı.",
"Show ID": "ID Göster", "Show ID": "ID Göster",
"Shown instead of Node ID in the cluster status.": "Küme durumu yerine Düğüm ID göster.", "Shown instead of Node ID in the cluster status.": "Ana ekranda Düğüm ID yerine bunu göster.",
"Shutdown": "Kapat", "Shutdown": "Kapat",
"Source Code": "Kaynak Kodu", "Source Code": "Kaynak Kodu",
"Start Browser": "Tarayıcıyı Başlat", "Start Browser": "Tarayıcıyı Başlat",
@ -87,14 +87,14 @@
"Syncing": "Senkronize ediliyor", "Syncing": "Senkronize ediliyor",
"Syncthing has been shut down.": "Syncthing durduruldu", "Syncthing has been shut down.": "Syncthing durduruldu",
"Syncthing includes the following software or portions thereof:": "Syncthing aşağıdaki yazılımları veya bunların bölümlerini içermektedir:", "Syncthing includes the following software or portions thereof:": "Syncthing aşağıdaki yazılımları veya bunların bölümlerini içermektedir:",
"Syncthing is restarting.": "Syncthing is restarting.", "Syncthing is restarting.": "Syncthing yeniden başlatılıyor.",
"Syncthing is upgrading.": "Syncthing is upgrading.", "Syncthing is upgrading.": "Syncthing yükseltiliyor.",
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing görünüşe durdu veya internetin bağlantınızda problem var. Tekrar deniyor....", "Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing görünüşe durdu veya internetin bağlantınızda problem var. Tekrar deniyor....",
"The aggregated statistics are publicly available at {%url%}.": "The aggregated statistics are publicly available at {{url}}.", "The aggregated statistics are publicly available at {%url%}.": "Toplanan halka açık istatistiklere ulaşabileceğiniz adres {{url}}.",
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Ayarlar kaydedildi ancak aktifleştirilmedi. Aktifleştirmek için Syncthing yeniden başlatılmalı.", "The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Ayarlar kaydedildi ancak aktifleştirilmedi. Aktifleştirmek için Syncthing yeniden başlatılmalı.",
"The encrypted usage report is sent daily. It is used to track common platforms, repo sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Şifrelenmiş kullanım bilgisi günlük olarak gönderilir. Platform, depo büyüklüğü ve uygulama sürümü hakkında bilgi toplanır. Toplanan bilgi çeşidi değişecek olursa, sizden tekrar onay istenecek.", "The encrypted usage report is sent daily. It is used to track common platforms, repo sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Şifrelenmiş kullanım bilgisi günlük olarak gönderilir. Platform, depo büyüklüğü ve uygulama sürümü hakkında bilgi toplanır. Toplanan bilgi çeşidi değişecek olursa, sizden tekrar onay istenecek.",
"The entered node ID does not look valid. It should be a 52 character string consisting of letters and numbers, with spaces and dashes being optional.": "Girilen düğüm ID'si geçerli gibi gözükmüyor. 52 karakter uzunluğunda, harf ve rakamlardan oluşmalı. Boşlukların ve kısa çizgilerin olup olmaması önemli değildir.", "The entered node ID does not look valid. It should be a 52 character string consisting of letters and numbers, with spaces and dashes being optional.": "Girilen düğüm ID'si geçerli gibi gözükmüyor. 52 karakter uzunluğunda, harf ve rakamlardan oluşmalı. Boşlukların ve kısa çizgilerin olup olmaması önemli değildir.",
"The entered node ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "The entered node ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.", "The entered node ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "Girilen düğüm ID'si geçerli gibi gözükmüyor. 52 ya da 56 karakter uzunluğunda, harf ve rakamlardan oluşmalı. Boşlukların ve kısa çizgilerin olup olmaması önemli değildir.",
"The node ID cannot be blank.": "Düğüm ID'si boş olamaz.", "The node ID cannot be blank.": "Düğüm ID'si boş olamaz.",
"The node ID to enter here can be found in the \"Edit > Show ID\" dialog on the other node. Spaces and dashes are optional (ignored).": "Buraya girilecek düğüm ID'si diğer düğümde \"Düzenle > ID Göster\" menüsünden bulunabilir. Boşluk ve kısa çizginin olup olmaması önemli değildir. (İhmal edilir)", "The node ID to enter here can be found in the \"Edit > Show ID\" dialog on the other node. Spaces and dashes are optional (ignored).": "Buraya girilecek düğüm ID'si diğer düğümde \"Düzenle > ID Göster\" menüsünden bulunabilir. Boşluk ve kısa çizginin olup olmaması önemli değildir. (İhmal edilir)",
"The number of old versions to keep, per file.": "Dosya başına saklanacak eski sürüm.", "The number of old versions to keep, per file.": "Dosya başına saklanacak eski sürüm.",
@ -106,8 +106,8 @@
"Unknown": "Bilinmiyor", "Unknown": "Bilinmiyor",
"Up to Date": "Güncel", "Up to Date": "Güncel",
"Upgrade To {%version%}": "{{version}} sürümüne yükselt", "Upgrade To {%version%}": "{{version}} sürümüne yükselt",
"Upgrading": "Upgrading", "Upgrading": "Yükseltiliyor",
"Upload Rate": "Yükleme Oranı", "Upload Rate": "Yükleme hızı",
"Usage": "Kullanım", "Usage": "Kullanım",
"Use Compression": "Sıkıştırma kullan", "Use Compression": "Sıkıştırma kullan",
"Use HTTPS for GUI": "GUI için HTTPS kullan", "Use HTTPS for GUI": "GUI için HTTPS kullan",
@ -116,5 +116,5 @@
"When adding a new repository, keep in mind that the Repository ID is used to tie repositories together between nodes. They are case sensitive and must match exactly between all nodes.": "Unutmayın ki; Depo ID, depoları düğümler arasında bağlamak için kullanılıyor. Büyük - küçük harf duyarlı, ve bütün düğümlerde aynı olmalı.", "When adding a new repository, keep in mind that the Repository ID is used to tie repositories together between nodes. They are case sensitive and must match exactly between all nodes.": "Unutmayın ki; Depo ID, depoları düğümler arasında bağlamak için kullanılıyor. Büyük - küçük harf duyarlı, ve bütün düğümlerde aynı olmalı.",
"Yes": "Evet", "Yes": "Evet",
"You must keep at least one version.": "En az bir sürümü tutmalısınız.", "You must keep at least one version.": "En az bir sürümü tutmalısınız.",
"items": "öğeler" "items": "öğe"
} }

120
gui/lang-uk.json Normal file
View File

@ -0,0 +1,120 @@
{
"API Key": "API ключ",
"About": "Про програму",
"Add Node": "Додати вузол",
"Add Repository": "Додати репозиторій",
"Address": "Адреса",
"Addresses": "Адреси",
"Allow Anonymous Usage Reporting?": "Дозволити програмі збирати анонімну статистику викроистання?",
"Announce Server": "Сервер анонсування",
"Anonymous Usage Reporting": "Анонімна статистика використання",
"Bugs": "Помилки",
"CPU Utilization": "Навантаження CPU",
"Close": "Закрити",
"Connection Error": "Помилка з’єднання",
"Copyright © 2014 Jakob Borg and the following Contributors:": "Copyright © 2014 Jakob Borg та наступні контриб’ютори:",
"Delete": "Видалити",
"Disconnected": "З’єднання відсутнє",
"Documentation": "Документація",
"Download Rate": "Швидкість завантаження",
"Edit": "Редагувати",
"Edit Node": "Редагувати вузол",
"Edit Repository": "Редагувати репозиторій",
"Enable UPnP": "Увімкнути UPnP",
"Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.": "Уведіть адреси \"ip:port\" розділені комою, або слово \"dynamic\" для здійснення автоматичного виявлення адреси.",
"Error": "Помилка",
"File Versioning": "Керування версіями",
"File permission bits are ignored when looking for changes. Use on FAT filesystems.": "Біти прав доступу до файлів будуть проігноровані під час визначення змін. Використовуйте на файлових системах FAT.",
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by syncthing.": "Файли будуть поміщатися у директорію .stversions із відповідною позначкою часу, коли вони будуть замінятися або видалятися програмою.",
"Files are protected from changes made on other nodes, but changes made on this node will be sent to the rest of the cluster.": "Файли захищені від змін на інших вузлах, але зміни на цьому вузлі будуть розіслані на інші вузли кластера.",
"Folder": "Директорія",
"GUI Authentication Password": "Пароль для доступу до панелі управління",
"GUI Authentication User": "Логін користувача для доступу до панелі управління",
"GUI Listen Addresses": "Адреса доступу до панелі управління",
"Generate": "Згенерувати",
"Global Discovery": "Глобальне виявлення",
"Global Discovery Server": "Сервер для глобального виявлення",
"Global Repository": "Глобальний репозиторій",
"Idle": "Очікування",
"Ignore Permissions": "Ігнорувати права доступу до файлів",
"Keep Versions": "Зберігати версії",
"Latest Release": "Останній реліз",
"Local Discovery": "Локальне виявлення",
"Local Discovery Port": "Локальний порт для виявлення",
"Local Repository": "Локальний репозиторій",
"Master Repo": "Центральний репозиторій",
"Max File Change Rate (KiB/s)": "Максимальна швидкість змінення файлів (КіБ/с)",
"Max Outstanding Requests": "Максимальна кількість вихідних запитів",
"No": "Ні",
"Node ID": "ID вузла",
"Node Identification": "Ідентифікатор вузла",
"Node Name": "Назва вузла",
"Notice": "Повідомлення",
"OK": "OK",
"Offline": "Офлайн",
"Online": "Онлайн",
"Out Of Sync": "Не синхронізовано",
"Outgoing Rate Limit (KiB/s)": "Ліміт швидкості віддачі (КіБ/с)",
"Override Changes": "Перезаписати зміни",
"Path to the repository on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Шлях до репозиторія на локальному комп’ютері. Буде створений, якщо такий не існує. Символ тильди (~) може бути використаний як ярлик для",
"Please wait": "Будь ласка, зачекайте",
"Preview Usage Report": "Попередній перегляд статистичного звіту",
"RAM Utilization": "Використання RAM",
"Reconnect Interval (s)": "Інтервал повторного з’єднання (с)",
"Repository ID": "ID репозиторія",
"Repository Master": "Центральний репозиторій",
"Repository Path": "Шлях до репозиторія",
"Rescan Interval (s)": "Інтервал для повторного сканування (с)",
"Restart": "Перезапуск",
"Restart Needed": "Необхідний перезапуск",
"Restarting": "Відбувається перезапуск",
"Save": "Зберегти",
"Scanning": "Сканування",
"Select the nodes to share this repository with.": "Оберіть вузли із якими обміняти даний репозиторій.",
"Settings": "Налаштування",
"Share With Nodes": "Обмінювати із вузлами",
"Shared With": "Доступно для",
"Short identifier for the repository. Must be the same on all cluster nodes.": "Короткий ідентифікатор репозиторія. Повинен бути однаковим на всіх вузлах кластера.",
"Show ID": "Показати ID",
"Shown instead of Node ID in the cluster status.": "Показано замість ID вузла в статусі кластера.",
"Shutdown": "Вимкнути",
"Source Code": "Сирцевий код",
"Start Browser": "Запустити браузер",
"Stopped": "Зупинено",
"Support / Forum": "Підтримка / Форум",
"Sync Protocol Listen Addresses": "Адреса панелі управління",
"Synchronization": "Синхронізація",
"Syncing": "Синхронізація",
"Syncthing has been shut down.": "Syncthing вимкнено (закрито).",
"Syncthing includes the following software or portions thereof:": "Syncthing містить наступне програмне забезпечення (або його частини):",
"Syncthing is restarting.": "Syncthing перезавантажується.",
"Syncthing is upgrading.": "Syncthing оновлюється.",
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Схоже на те, що Syncthing закритий, або виникла проблема із Інтернет-з’єднанням. Проводиться повторна спроба з’єднання…",
"The aggregated statistics are publicly available at {%url%}.": "Зібрана статистика публічно доступна за посиланням {{url}}.",
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Конфігурацію збережено, але не активовано. Необхідно перезапустити Syncthing для того, щоби активувати нову конфігурацію.",
"The encrypted usage report is sent daily. It is used to track common platforms, repo sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Зашифрована статистика використання відсилається щоденно. Вона використовується для того, щоб розробники розуміли, на яких платформах працює програма, розміри репозиторіїв та версії програми. Якщо набір даних, що збирається зазнає змін, ви обов’язково будете повідомлені через це діалогове вікно.",
"The entered node ID does not look valid. It should be a 52 character string consisting of letters and numbers, with spaces and dashes being optional.": "Введений ID вузла невалідний. Ідентифікатор має вигляд строки довжиною 52 символи, що містить цифри та літери, із опціональними пробілами та тире.",
"The entered node ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "Введений ID вузла невалідний. Ідентифікатор має вигляд строки довжиною 52 або 56 символів, що містить цифри та літери, із опціональними пробілами та тире.",
"The node ID cannot be blank.": "ID вузла не може бути порожнім.",
"The node ID to enter here can be found in the \"Edit > Show ID\" dialog on the other node. Spaces and dashes are optional (ignored).": "ID вузла, який необхідно додати. Може бути знайдений у вікні \"Редагувати > Показати ID\" на іншому вузлі. Пробіли та тире опціональні (вони ігноруються програмою).",
"The number of old versions to keep, per file.": "Кількість старих версій, яку необхідно зберігати для кожного файлу.",
"The number of versions must be a number and cannot be blank.": "Кількість версій повинна бути цифрою та не може бути порожньою.",
"The repository ID cannot be blank.": "ID репозиторія не може бути порожнім.",
"The repository ID must be a short identifier (64 characters or less) consisting of letters, numbers and the the dot (.), dash (-) and underscode (_) characters only.": "ID репозиторія повинен бути коротким ідентифікатором (64 символи або менше), що містить лише цифри та літери, знак крапки (.), тире (-) та нижнього підкреслення (_).",
"The repository ID must be unique.": "ID репозиторія повинен бути унікальним.",
"The repository path cannot be blank.": "Шлях до репозиторія не може бути порожнім.",
"Unknown": "Невідомо",
"Up to Date": "Актуальа версія",
"Upgrade To {%version%}": "Оновити до {{version}}",
"Upgrading": "Оновлення",
"Upload Rate": "Швидкість віддачі",
"Usage": "Допомога",
"Use Compression": "Використовувати компресію",
"Use HTTPS for GUI": "Використовувати HTTPS для доступу до панелі управління",
"Version": "Версія",
"When adding a new node, keep in mind that this node must be added on the other side too.": "Коли додаєте новий вузол, пам’ятайте, що цей вузол повинен бути доданий і на іншій стороні.",
"When adding a new repository, keep in mind that the Repository ID is used to tie repositories together between nodes. They are case sensitive and must match exactly between all nodes.": "Коли додаєте новий репозиторій, пам’ятайте, що ID репозиторія використовується для того, щоб зв’язувати репозиторії разом між вузлами. Назви є чутливими до регістра та повинні співпадати точно між усіма вузлами.",
"Yes": "Так",
"You must keep at least one version.": "Ви повинні зберігати щонайменше одну версію.",
"items": "елементи"
}

View File

@ -1 +1 @@
var validLangs = ["da","de","el","en","es","fr","it","nl","pt","ru","sv","tr"] var validLangs = ["da","de","el","en","es","fr","it","nl","pt","ru","sv","tr","uk"]

View File

@ -75,7 +75,6 @@ type Model struct {
repoFiles map[string]*files.Set // repo -> files repoFiles map[string]*files.Set // repo -> files
repoNodes map[string][]protocol.NodeID // repo -> nodeIDs repoNodes map[string][]protocol.NodeID // repo -> nodeIDs
nodeRepos map[protocol.NodeID][]string // nodeID -> repos nodeRepos map[protocol.NodeID][]string // nodeID -> repos
suppressor map[string]*suppressor // repo -> suppressor
rmut sync.RWMutex // protects the above rmut sync.RWMutex // protects the above
repoState map[string]repoState // repo -> state repoState map[string]repoState // repo -> state
@ -90,8 +89,6 @@ type Model struct {
sentLocalVer map[protocol.NodeID]map[string]uint64 sentLocalVer map[protocol.NodeID]map[string]uint64
slMut sync.Mutex slMut sync.Mutex
sup suppressor
addedRepo bool addedRepo bool
started bool started bool
} }
@ -117,12 +114,10 @@ func NewModel(indexDir string, cfg *config.Configuration, clientName, clientVers
nodeRepos: make(map[protocol.NodeID][]string), nodeRepos: make(map[protocol.NodeID][]string),
repoState: make(map[string]repoState), repoState: make(map[string]repoState),
repoStateChanged: make(map[string]time.Time), repoStateChanged: make(map[string]time.Time),
suppressor: make(map[string]*suppressor),
protoConn: make(map[protocol.NodeID]protocol.Connection), protoConn: make(map[protocol.NodeID]protocol.Connection),
rawConn: make(map[protocol.NodeID]io.Closer), rawConn: make(map[protocol.NodeID]io.Closer),
nodeVer: make(map[protocol.NodeID]string), nodeVer: make(map[protocol.NodeID]string),
sentLocalVer: make(map[protocol.NodeID]map[string]uint64), sentLocalVer: make(map[protocol.NodeID]map[string]uint64),
sup: suppressor{threshold: int64(cfg.Options.MaxChangeKbps)},
} }
var timeout = 20 * 60 // seconds var timeout = 20 * 60 // seconds
@ -213,15 +208,9 @@ func (m *Model) Completion(node protocol.NodeID, repo string) float64 {
return 0 // Repo doesn't exist, so we hardly have any of it return 0 // Repo doesn't exist, so we hardly have any of it
} }
rf.WithGlobal(func(f protocol.FileInfo) bool { rf.WithGlobalTruncated(func(f protocol.FileIntf) bool {
if !protocol.IsDeleted(f.Flags) { if !f.IsDeleted() {
var size int64 tot += f.Size()
if protocol.IsDirectory(f.Flags) {
size = zeroEntrySize
} else {
size = f.Size()
}
tot += size
} }
return true return true
}) })
@ -231,20 +220,19 @@ func (m *Model) Completion(node protocol.NodeID, repo string) float64 {
} }
var need int64 var need int64
rf.WithNeed(node, func(f protocol.FileInfo) bool { rf.WithNeedTruncated(node, func(f protocol.FileIntf) bool {
if !protocol.IsDeleted(f.Flags) { if !f.IsDeleted() {
var size int64 need += f.Size()
if protocol.IsDirectory(f.Flags) {
size = zeroEntrySize
} else {
size = f.Size()
}
need += size
} }
return true return true
}) })
return 100 * (1 - float64(need)/float64(tot)) res := 100 * (1 - float64(need)/float64(tot))
if debug {
l.Debugf("Completion(%s, %q): %f (%d / %d)", node, repo, res, need, tot)
}
return res
} }
func sizeOf(fs []protocol.FileInfo) (files, deleted int, bytes int64) { func sizeOf(fs []protocol.FileInfo) (files, deleted int, bytes int64) {
@ -257,18 +245,13 @@ func sizeOf(fs []protocol.FileInfo) (files, deleted int, bytes int64) {
return return
} }
func sizeOfFile(f protocol.FileInfo) (files, deleted int, bytes int64) { func sizeOfFile(f protocol.FileIntf) (files, deleted int, bytes int64) {
if !protocol.IsDeleted(f.Flags) { if !f.IsDeleted() {
files++ files++
if !protocol.IsDirectory(f.Flags) {
bytes += f.Size()
} else {
bytes += zeroEntrySize
}
} else { } else {
deleted++ deleted++
bytes += zeroEntrySize
} }
bytes += f.Size()
return return
} }
@ -278,7 +261,7 @@ func (m *Model) GlobalSize(repo string) (files, deleted int, bytes int64) {
m.rmut.RLock() m.rmut.RLock()
defer m.rmut.RUnlock() defer m.rmut.RUnlock()
if rf, ok := m.repoFiles[repo]; ok { if rf, ok := m.repoFiles[repo]; ok {
rf.WithGlobal(func(f protocol.FileInfo) bool { rf.WithGlobalTruncated(func(f protocol.FileIntf) bool {
fs, de, by := sizeOfFile(f) fs, de, by := sizeOfFile(f)
files += fs files += fs
deleted += de deleted += de
@ -295,7 +278,7 @@ func (m *Model) LocalSize(repo string) (files, deleted int, bytes int64) {
m.rmut.RLock() m.rmut.RLock()
defer m.rmut.RUnlock() defer m.rmut.RUnlock()
if rf, ok := m.repoFiles[repo]; ok { if rf, ok := m.repoFiles[repo]; ok {
rf.WithHave(protocol.LocalNodeID, func(f protocol.FileInfo) bool { rf.WithHaveTruncated(protocol.LocalNodeID, func(f protocol.FileIntf) bool {
fs, de, by := sizeOfFile(f) fs, de, by := sizeOfFile(f)
files += fs files += fs
deleted += de deleted += de
@ -311,13 +294,16 @@ func (m *Model) NeedSize(repo string) (files int, bytes int64) {
m.rmut.RLock() m.rmut.RLock()
defer m.rmut.RUnlock() defer m.rmut.RUnlock()
if rf, ok := m.repoFiles[repo]; ok { if rf, ok := m.repoFiles[repo]; ok {
rf.WithNeed(protocol.LocalNodeID, func(f protocol.FileInfo) bool { rf.WithNeedTruncated(protocol.LocalNodeID, func(f protocol.FileIntf) bool {
fs, de, by := sizeOfFile(f) fs, de, by := sizeOfFile(f)
files += fs + de files += fs + de
bytes += by bytes += by
return true return true
}) })
} }
if debug {
l.Debugf("NeedSize(%q): %d %d", repo, files, bytes)
}
return return
} }
@ -327,8 +313,8 @@ func (m *Model) NeedFilesRepo(repo string) []protocol.FileInfo {
defer m.rmut.RUnlock() defer m.rmut.RUnlock()
if rf, ok := m.repoFiles[repo]; ok { if rf, ok := m.repoFiles[repo]; ok {
fs := make([]protocol.FileInfo, 0, indexBatchSize) fs := make([]protocol.FileInfo, 0, indexBatchSize)
rf.WithNeed(protocol.LocalNodeID, func(f protocol.FileInfo) bool { rf.WithNeed(protocol.LocalNodeID, func(f protocol.FileIntf) bool {
fs = append(fs, f) fs = append(fs, f.(protocol.FileInfo))
return len(fs) < indexBatchSize return len(fs) < indexBatchSize
}) })
return fs return fs
@ -602,7 +588,8 @@ func sendIndexTo(initial bool, minLocalVer uint64, conn protocol.Connection, rep
maxLocalVer := uint64(0) maxLocalVer := uint64(0)
var err error var err error
fs.WithHave(protocol.LocalNodeID, func(f protocol.FileInfo) bool { fs.WithHave(protocol.LocalNodeID, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfo)
if f.LocalVersion <= minLocalVer { if f.LocalVersion <= minLocalVer {
return true return true
} }
@ -694,7 +681,6 @@ func (m *Model) AddRepo(cfg config.RepositoryConfiguration) {
m.rmut.Lock() m.rmut.Lock()
m.repoCfgs[cfg.ID] = cfg m.repoCfgs[cfg.ID] = cfg
m.repoFiles[cfg.ID] = files.NewSet(cfg.ID, m.db) m.repoFiles[cfg.ID] = files.NewSet(cfg.ID, m.db)
m.suppressor[cfg.ID] = &suppressor{threshold: int64(m.cfg.Options.MaxChangeKbps)}
m.repoNodes[cfg.ID] = make([]protocol.NodeID, len(cfg.Nodes)) m.repoNodes[cfg.ID] = make([]protocol.NodeID, len(cfg.Nodes))
for i, node := range cfg.Nodes { for i, node := range cfg.Nodes {
@ -771,7 +757,6 @@ func (m *Model) ScanRepoSub(repo, sub string) error {
IgnoreFile: ".stignore", IgnoreFile: ".stignore",
BlockSize: scanner.StandardBlockSize, BlockSize: scanner.StandardBlockSize,
TempNamer: defTempNamer, TempNamer: defTempNamer,
Suppressor: m.suppressor[repo],
CurrentFiler: cFiler{m, repo}, CurrentFiler: cFiler{m, repo},
IgnorePerms: m.repoCfgs[repo].IgnorePerms, IgnorePerms: m.repoCfgs[repo].IgnorePerms,
} }
@ -809,7 +794,8 @@ func (m *Model) ScanRepoSub(repo, sub string) error {
batch = batch[:0] batch = batch[:0]
// TODO: We should limit the Have scanning to start at sub // TODO: We should limit the Have scanning to start at sub
seenPrefix := false seenPrefix := false
fs.WithHave(protocol.LocalNodeID, func(f protocol.FileInfo) bool { fs.WithHaveTruncated(protocol.LocalNodeID, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfoTruncated)
if !strings.HasPrefix(f.Name, sub) { if !strings.HasPrefix(f.Name, sub) {
return !seenPrefix return !seenPrefix
} }
@ -821,10 +807,12 @@ func (m *Model) ScanRepoSub(repo, sub string) error {
} }
if _, err := os.Stat(filepath.Join(dir, f.Name)); err != nil && os.IsNotExist(err) { if _, err := os.Stat(filepath.Join(dir, f.Name)); err != nil && os.IsNotExist(err) {
// File has been deleted // File has been deleted
f.Blocks = nil nf := protocol.FileInfo{
f.Flags |= protocol.FlagDeleted Name: f.Name,
f.Version = lamport.Default.Tick(f.Version) Flags: f.Flags | protocol.FlagDeleted,
f.LocalVersion = 0 Modified: f.Modified,
Version: lamport.Default.Tick(f.Version),
}
events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{ events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
"repo": repo, "repo": repo,
"name": f.Name, "name": f.Name,
@ -832,7 +820,7 @@ func (m *Model) ScanRepoSub(repo, sub string) error {
"flags": fmt.Sprintf("0%o", f.Flags), "flags": fmt.Sprintf("0%o", f.Flags),
"size": f.Size(), "size": f.Size(),
}) })
batch = append(batch, f) batch = append(batch, nf)
} }
} }
return true return true
@ -905,7 +893,8 @@ func (m *Model) Override(repo string) {
m.rmut.RUnlock() m.rmut.RUnlock()
batch := make([]protocol.FileInfo, 0, indexBatchSize) batch := make([]protocol.FileInfo, 0, indexBatchSize)
fs.WithNeed(protocol.LocalNodeID, func(need protocol.FileInfo) bool { fs.WithNeed(protocol.LocalNodeID, func(fi protocol.FileIntf) bool {
need := fi.(protocol.FileInfo)
if len(batch) == indexBatchSize { if len(batch) == indexBatchSize {
fs.Update(protocol.LocalNodeID, batch) fs.Update(protocol.LocalNodeID, batch)
batch = batch[:0] batch = batch[:0]

View File

@ -728,7 +728,7 @@ func (p *puller) closeFile(f protocol.FileInfo) {
l.Infof("open: error: %q / %q: %v", p.repoCfg.ID, f.Name, err) l.Infof("open: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
return return
} }
hb, _ := scanner.Blocks(fd, scanner.StandardBlockSize) hb, _ := scanner.Blocks(fd, scanner.StandardBlockSize, f.Size())
fd.Close() fd.Close()
if l0, l1 := len(hb), len(f.Blocks); l0 != l1 { if l0, l1 := len(hb), len(f.Blocks); l0 != l1 {

View File

@ -1,81 +0,0 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package model
import (
"os"
"sync"
"time"
)
const (
MaxChangeHistory = 4
)
type change struct {
size int64
when time.Time
}
type changeHistory struct {
changes []change
next int64
prevSup bool
}
type suppressor struct {
sync.Mutex
changes map[string]changeHistory
threshold int64 // bytes/s
}
func (h changeHistory) bandwidth(t time.Time) int64 {
if len(h.changes) == 0 {
return 0
}
var t0 = h.changes[0].when
if t == t0 {
return 0
}
var bw float64
for _, c := range h.changes {
bw += float64(c.size)
}
return int64(bw / t.Sub(t0).Seconds())
}
func (h *changeHistory) append(size int64, t time.Time) {
c := change{size, t}
if len(h.changes) == MaxChangeHistory {
h.changes = h.changes[1:MaxChangeHistory]
}
h.changes = append(h.changes, c)
}
func (s *suppressor) Suppress(name string, fi os.FileInfo) (cur, prev bool) {
return s.suppress(name, fi.Size(), time.Now())
}
func (s *suppressor) suppress(name string, size int64, t time.Time) (bool, bool) {
s.Lock()
if s.changes == nil {
s.changes = make(map[string]changeHistory)
}
h := s.changes[name]
sup := h.bandwidth(t) > s.threshold
prevSup := h.prevSup
h.prevSup = sup
if !sup {
h.append(size, t)
}
s.changes[name] = h
s.Unlock()
return sup, prevSup
}

View File

@ -1,117 +0,0 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package model
import (
"testing"
"time"
)
func TestSuppressor(t *testing.T) {
s := suppressor{threshold: 10000}
t0 := time.Now()
t1 := t0
sup, prev := s.suppress("foo", 10000, t1)
if sup {
t.Fatal("Never suppress first change")
}
if prev {
t.Fatal("Incorrect prev status")
}
// bw is 10000 / 10 = 1000
t1 = t0.Add(10 * time.Second)
if bw := s.changes["foo"].bandwidth(t1); bw != 1000 {
t.Errorf("Incorrect bw %d", bw)
}
sup, prev = s.suppress("foo", 10000, t1)
if sup {
t.Fatal("Should still be fine")
}
if prev {
t.Fatal("Incorrect prev status")
}
// bw is (10000 + 10000) / 11 = 1818
t1 = t0.Add(11 * time.Second)
if bw := s.changes["foo"].bandwidth(t1); bw != 1818 {
t.Errorf("Incorrect bw %d", bw)
}
sup, prev = s.suppress("foo", 100500, t1)
if sup {
t.Fatal("Should still be fine")
}
if prev {
t.Fatal("Incorrect prev status")
}
// bw is (10000 + 10000 + 100500) / 12 = 10041
t1 = t0.Add(12 * time.Second)
if bw := s.changes["foo"].bandwidth(t1); bw != 10041 {
t.Errorf("Incorrect bw %d", bw)
}
sup, prev = s.suppress("foo", 10000000, t1) // value will be ignored
if !sup {
t.Fatal("Should be over threshold")
}
if prev {
t.Fatal("Incorrect prev status")
}
// bw is (10000 + 10000 + 100500) / 15 = 8033
t1 = t0.Add(15 * time.Second)
if bw := s.changes["foo"].bandwidth(t1); bw != 8033 {
t.Errorf("Incorrect bw %d", bw)
}
sup, prev = s.suppress("foo", 10000000, t1)
if sup {
t.Fatal("Should be Ok")
}
if !prev {
t.Fatal("Incorrect prev status")
}
}
func TestHistory(t *testing.T) {
h := changeHistory{}
t0 := time.Now()
h.append(40, t0)
if l := len(h.changes); l != 1 {
t.Errorf("Incorrect history length %d", l)
}
if s := h.changes[0].size; s != 40 {
t.Errorf("Incorrect first record size %d", s)
}
for i := 1; i < MaxChangeHistory; i++ {
h.append(int64(40+i), t0.Add(time.Duration(i)*time.Second))
}
if l := len(h.changes); l != MaxChangeHistory {
t.Errorf("Incorrect history length %d", l)
}
if s := h.changes[0].size; s != 40 {
t.Errorf("Incorrect first record size %d", s)
}
if s := h.changes[MaxChangeHistory-1].size; s != 40+MaxChangeHistory-1 {
t.Errorf("Incorrect last record size %d", s)
}
h.append(999, t0.Add(time.Duration(999)*time.Second))
if l := len(h.changes); l != MaxChangeHistory {
t.Errorf("Incorrect history length %d", l)
}
if s := h.changes[0].size; s != 41 {
t.Errorf("Incorrect first record size %d", s)
}
if s := h.changes[MaxChangeHistory-1].size; s != 999 {
t.Errorf("Incorrect last record size %d", s)
}
}

View File

@ -26,12 +26,50 @@ func (f FileInfo) String() string {
} }
func (f FileInfo) Size() (bytes int64) { func (f FileInfo) Size() (bytes int64) {
if IsDeleted(f.Flags) || IsDirectory(f.Flags) {
return 128
}
for _, b := range f.Blocks { for _, b := range f.Blocks {
bytes += int64(b.Size) bytes += int64(b.Size)
} }
return return
} }
func (f FileInfo) IsDeleted() bool {
return IsDeleted(f.Flags)
}
// Used for unmarshalling a FileInfo structure but skipping the actual block list
type FileInfoTruncated struct {
Name string // max:1024
Flags uint32
Modified int64
Version uint64
LocalVersion uint64
NumBlocks uint32
}
// Returns a statistical guess on the size, not the exact figure
func (f FileInfoTruncated) Size() int64 {
if IsDeleted(f.Flags) || IsDirectory(f.Flags) {
return 128
}
if f.NumBlocks < 2 {
return BlockSize / 2
} else {
return int64(f.NumBlocks-1)*BlockSize + BlockSize/2
}
}
func (f FileInfoTruncated) IsDeleted() bool {
return IsDeleted(f.Flags)
}
type FileIntf interface {
Size() int64
IsDeleted() bool
}
type BlockInfo struct { type BlockInfo struct {
Offset int64 // noencode (cache only) Offset int64 // noencode (cache only)
Size uint32 Size uint32

View File

@ -199,6 +199,98 @@ func (o *FileInfo) decodeXDR(xr *xdr.Reader) error {
/* /*
FileInfoTruncated Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of Name |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Name (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Flags |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ Modified (64 bits) +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ Version (64 bits) +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ Local Version (64 bits) +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Num Blocks |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct FileInfoTruncated {
string Name<1024>;
unsigned int Flags;
hyper Modified;
unsigned hyper Version;
unsigned hyper LocalVersion;
unsigned int NumBlocks;
}
*/
func (o FileInfoTruncated) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.encodeXDR(xw)
}
func (o FileInfoTruncated) MarshalXDR() []byte {
return o.AppendXDR(make([]byte, 0, 128))
}
func (o FileInfoTruncated) AppendXDR(bs []byte) []byte {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
o.encodeXDR(xw)
return []byte(aw)
}
func (o FileInfoTruncated) encodeXDR(xw *xdr.Writer) (int, error) {
if len(o.Name) > 1024 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
xw.WriteString(o.Name)
xw.WriteUint32(o.Flags)
xw.WriteUint64(uint64(o.Modified))
xw.WriteUint64(o.Version)
xw.WriteUint64(o.LocalVersion)
xw.WriteUint32(o.NumBlocks)
return xw.Tot(), xw.Error()
}
func (o *FileInfoTruncated) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.decodeXDR(xr)
}
func (o *FileInfoTruncated) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.decodeXDR(xr)
}
func (o *FileInfoTruncated) decodeXDR(xr *xdr.Reader) error {
o.Name = xr.ReadStringMax(1024)
o.Flags = xr.ReadUint32()
o.Modified = int64(xr.ReadUint64())
o.Version = xr.ReadUint64()
o.LocalVersion = xr.ReadUint64()
o.NumBlocks = xr.ReadUint32()
return xr.Error()
}
/*
BlockInfo Structure: BlockInfo Structure:
0 1 2 3 0 1 2 3

View File

@ -49,7 +49,15 @@ func hashFile(dir string, blockSize int, outbox, inbox chan protocol.FileInfo) {
continue continue
} }
blocks, err := Blocks(fd, blockSize) fi, err := fd.Stat()
if err != nil {
fd.Close()
if debug {
l.Debugln("stat:", err)
}
continue
}
blocks, err := Blocks(fd, blockSize, fi.Size())
fd.Close() fd.Close()
if err != nil { if err != nil {

View File

@ -17,12 +17,15 @@ const StandardBlockSize = 128 * 1024
var sha256OfNothing = []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55} var sha256OfNothing = []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}
// Blocks returns the blockwise hash of the reader. // Blocks returns the blockwise hash of the reader.
func Blocks(r io.Reader, blocksize int) ([]protocol.BlockInfo, error) { func Blocks(r io.Reader, blocksize int, sizehint int64) ([]protocol.BlockInfo, error) {
var blocks []protocol.BlockInfo var blocks []protocol.BlockInfo
if sizehint > 0 {
blocks = make([]protocol.BlockInfo, 0, int(sizehint/int64(blocksize)))
}
var offset int64 var offset int64
hf := sha256.New()
for { for {
lr := &io.LimitedReader{R: r, N: int64(blocksize)} lr := &io.LimitedReader{R: r, N: int64(blocksize)}
hf := sha256.New()
n, err := io.Copy(hf, lr) n, err := io.Copy(hf, lr)
if err != nil { if err != nil {
return nil, err return nil, err
@ -39,6 +42,8 @@ func Blocks(r io.Reader, blocksize int) ([]protocol.BlockInfo, error) {
} }
blocks = append(blocks, b) blocks = append(blocks, b)
offset += int64(n) offset += int64(n)
hf.Reset()
} }
if len(blocks) == 0 { if len(blocks) == 0 {

View File

@ -49,7 +49,7 @@ var blocksTestData = []struct {
func TestBlocks(t *testing.T) { func TestBlocks(t *testing.T) {
for _, test := range blocksTestData { for _, test := range blocksTestData {
buf := bytes.NewBuffer(test.data) buf := bytes.NewBuffer(test.data)
blocks, err := Blocks(buf, test.blocksize) blocks, err := Blocks(buf, test.blocksize, 0)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -103,8 +103,8 @@ var diffTestData = []struct {
func TestDiff(t *testing.T) { func TestDiff(t *testing.T) {
for i, test := range diffTestData { for i, test := range diffTestData {
a, _ := Blocks(bytes.NewBufferString(test.a), test.s) a, _ := Blocks(bytes.NewBufferString(test.a), test.s, 0)
b, _ := Blocks(bytes.NewBufferString(test.b), test.s) b, _ := Blocks(bytes.NewBufferString(test.b), test.s, 0)
_, d := BlockDiff(a, b) _, d := BlockDiff(a, b)
if len(d) != len(test.d) { if len(d) != len(test.d) {
t.Fatalf("Incorrect length for diff %d; %d != %d", i, len(d), len(test.d)) t.Fatalf("Incorrect length for diff %d; %d != %d", i, len(d), len(test.d))

View File

@ -32,10 +32,6 @@ type Walker struct {
TempNamer TempNamer TempNamer TempNamer
// If CurrentFiler is not nil, it is queried for the current file before rescanning. // If CurrentFiler is not nil, it is queried for the current file before rescanning.
CurrentFiler CurrentFiler CurrentFiler CurrentFiler
// If Suppressor is not nil, it is queried for supression of modified files.
// Suppressed files will be returned with empty metadata and the Suppressed flag set.
// Requires CurrentFiler to be set.
Suppressor Suppressor
// If IgnorePerms is true, changes to permission bits will not be // If IgnorePerms is true, changes to permission bits will not be
// detected. Scanned files will get zero permission bits and the // detected. Scanned files will get zero permission bits and the
// NoPermissionBits flag set. // NoPermissionBits flag set.
@ -49,11 +45,6 @@ type TempNamer interface {
IsTemporary(path string) bool IsTemporary(path string) bool
} }
type Suppressor interface {
// Supress returns true if the update to the named file should be ignored.
Suppress(name string, fi os.FileInfo) (bool, bool)
}
type CurrentFiler interface { type CurrentFiler interface {
// CurrentFile returns the file as seen at last scan. // CurrentFile returns the file as seen at last scan.
CurrentFile(name string) protocol.FileInfo CurrentFile(name string) protocol.FileInfo
@ -201,22 +192,6 @@ func (w *Walker) walkAndHashFiles(fchan chan protocol.FileInfo, ign map[string][
return nil return nil
} }
if w.Suppressor != nil {
if cur, prev := w.Suppressor.Suppress(rn, info); cur && !prev {
l.Infof("Changes to %q are being temporarily suppressed because it changes too frequently.", p)
cf.Flags |= protocol.FlagInvalid
cf.Version = lamport.Default.Tick(cf.Version)
cf.LocalVersion = 0
if debug {
l.Debugln("suppressed:", cf)
}
fchan <- cf
return nil
} else if prev && !cur {
l.Infof("Changes to %q are no longer suppressed.", p)
}
}
if debug { if debug {
l.Debugln("rescan:", cf, info.ModTime().Unix(), info.Mode()&os.ModePerm) l.Debugln("rescan:", cf, info.ModTime().Unix(), info.Mode()&os.ModePerm)
} }