lib/db: Filter unchanged files when updating and polish

GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/4426
This commit is contained in:
Simon Frei 2017-10-24 20:05:29 +00:00 committed by Jakob Borg
parent dc42db444b
commit 7ba9e7c322
3 changed files with 90 additions and 70 deletions

View File

@ -206,33 +206,23 @@ func (db *Instance) updateFiles(folder, device []byte, fs []protocol.FileInfo, l
err = ef.Unmarshal(bs)
}
if err != nil {
if isLocalDevice {
localSize.addFile(f)
}
t.insertFile(folder, device, f)
if f.IsInvalid() {
t.removeFromGlobal(folder, device, name, globalSize)
} else {
t.updateGlobal(folder, device, f, globalSize)
}
// The Invalid flag might change without the version being bumped.
if err == nil && ef.Version.Equal(f.Version) && ef.Invalid == f.Invalid {
continue
}
// The Invalid flag might change without the version being bumped.
if !ef.Version.Equal(f.Version) || ef.Invalid != f.Invalid {
if isLocalDevice {
if isLocalDevice {
if err == nil {
localSize.removeFile(ef)
localSize.addFile(f)
}
localSize.addFile(f)
}
t.insertFile(folder, device, f)
if f.IsInvalid() {
t.removeFromGlobal(folder, device, name, globalSize)
} else {
t.updateGlobal(folder, device, f, globalSize)
}
t.insertFile(folder, device, f)
if f.IsInvalid() {
t.removeFromGlobal(folder, device, name, globalSize)
} else {
t.updateGlobal(folder, device, f, globalSize)
}
// Write out and reuse the batch every few records, to avoid the batch
@ -440,7 +430,6 @@ func (db *Instance) withNeed(folder, device []byte, truncate bool, fn Iterator)
defer dbi.Release()
var fk []byte
nextFile:
for dbi.Next() {
var vl VersionList
err := vl.Unmarshal(dbi.Value())
@ -468,48 +457,49 @@ nextFile:
}
}
if need || !have {
name := db.globalKeyName(dbi.Key())
needVersion := vl.Versions[0].Version
if have && !need {
continue
}
nextVersion:
for i := range vl.Versions {
if !vl.Versions[i].Version.Equal(needVersion) {
// We haven't found a valid copy of the file with the needed version.
continue nextFile
}
fk = db.deviceKeyInto(fk[:cap(fk)], folder, vl.Versions[i].Device, name)
bs, err := t.Get(fk, nil)
if err != nil {
l.Debugln("surprise error:", err)
continue nextVersion
}
name := db.globalKeyName(dbi.Key())
needVersion := vl.Versions[0].Version
gf, err := unmarshalTrunc(bs, truncate)
if err != nil {
l.Debugln("unmarshal error:", err)
continue nextVersion
}
if gf.IsInvalid() {
// The file is marked invalid for whatever reason, don't use it.
continue nextVersion
}
if gf.IsDeleted() && !have {
// We don't need deleted files that we don't have
continue nextFile
}
l.Debugf("need folder=%q device=%v name=%q need=%v have=%v haveV=%d globalV=%d", folder, protocol.DeviceIDFromBytes(device), name, need, have, haveVersion, vl.Versions[0].Version)
if cont := fn(gf); !cont {
return
}
// This file is handled, no need to look further in the version list
continue nextFile
for i := range vl.Versions {
if !vl.Versions[i].Version.Equal(needVersion) {
// We haven't found a valid copy of the file with the needed version.
break
}
fk = db.deviceKeyInto(fk[:cap(fk)], folder, vl.Versions[i].Device, name)
bs, err := t.Get(fk, nil)
if err != nil {
l.Debugln("surprise error:", err)
continue
}
gf, err := unmarshalTrunc(bs, truncate)
if err != nil {
l.Debugln("unmarshal error:", err)
continue
}
if gf.IsInvalid() {
// The file is marked invalid for whatever reason, don't use it.
continue
}
if gf.IsDeleted() && !have {
// We don't need deleted files that we don't have
break
}
l.Debugf("need folder=%q device=%v name=%q need=%v have=%v haveV=%d globalV=%d", folder, protocol.DeviceIDFromBytes(device), name, need, have, haveVersion, vl.Versions[0].Version)
if cont := fn(gf); !cont {
return
}
// This file is handled, no need to look further in the version list
break
}
}
}

View File

@ -182,13 +182,19 @@ func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
if device == protocol.LocalDeviceID {
discards := make([]protocol.FileInfo, 0, len(fs))
updates := make([]protocol.FileInfo, 0, len(fs))
for i, newFile := range fs {
fs[i].Sequence = atomic.AddInt64(&s.sequence, 1)
existingFile, ok := s.db.getFile([]byte(s.folder), device[:], []byte(newFile.Name))
if !ok || !existingFile.Version.Equal(newFile.Version) {
discards = append(discards, existingFile)
updates = append(updates, newFile)
// db.UpdateFiles will sort unchanged files out -> save one db lookup
// filter slice according to https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating
oldFs := fs
fs = fs[:0]
for _, nf := range oldFs {
ef, ok := s.db.getFile([]byte(s.folder), device[:], []byte(nf.Name))
if ok && ef.Version.Equal(nf.Version) && ef.Invalid == nf.Invalid {
continue
}
nf.Sequence = atomic.AddInt64(&s.sequence, 1)
fs = append(fs, nf)
discards = append(discards, ef)
updates = append(updates, nf)
}
s.blockmap.Discard(discards)
s.blockmap.Update(updates)

View File

@ -350,13 +350,16 @@ func TestNeedWithInvalid(t *testing.T) {
func TestUpdateToInvalid(t *testing.T) {
ldb := db.OpenMemory()
s := db.NewFileSet("test)", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
folder := "test)"
s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
f := db.NewBlockFinder(ldb)
localHave := fileList{
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)},
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(5), Invalid: true},
protocol.FileInfo{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}, Blocks: genBlocks(7)},
protocol.FileInfo{Name: "e", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}, Invalid: true},
}
s.Replace(protocol.LocalDeviceID, localHave)
@ -368,8 +371,12 @@ func TestUpdateToInvalid(t *testing.T) {
t.Errorf("Have incorrect before invalidation;\n A: %v !=\n E: %v", have, localHave)
}
localHave[1] = protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Invalid: true}
s.Update(protocol.LocalDeviceID, localHave[1:2])
oldBlockHash := localHave[1].Blocks[0].Hash
localHave[1].Invalid = true
localHave[1].Blocks = nil
localHave[4].Invalid = false
localHave[4].Blocks = genBlocks(3)
s.Update(protocol.LocalDeviceID, append(fileList{}, localHave[1], localHave[4]))
have = fileList(haveList(s, protocol.LocalDeviceID))
sort.Sort(have)
@ -377,6 +384,23 @@ func TestUpdateToInvalid(t *testing.T) {
if fmt.Sprint(have) != fmt.Sprint(localHave) {
t.Errorf("Have incorrect after invalidation;\n A: %v !=\n E: %v", have, localHave)
}
f.Iterate([]string{folder}, oldBlockHash, func(folder, file string, index int32) bool {
if file == localHave[1].Name {
t.Errorf("Found unexpected block in blockmap for invalidated file")
return true
}
return false
})
if !f.Iterate([]string{folder}, localHave[4].Blocks[0].Hash, func(folder, file string, index int32) bool {
if file == localHave[4].Name {
return true
}
return false
}) {
t.Errorf("First block of un-invalidated file is missing from blockmap")
}
}
func TestInvalidAvailability(t *testing.T) {