lib/model: Do file recheck in folder loop (fixes #6583) (#6585)

This commit is contained in:
Audrius Butkevicius 2020-05-01 10:08:59 +01:00 committed by GitHub
parent f86deedd9c
commit bd0c2bf237
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 63 additions and 16 deletions

View File

@ -59,6 +59,10 @@ type folder struct {
doInSyncChan chan syncRequest doInSyncChan chan syncRequest
forcedRescanRequested chan struct{}
forcedRescanPaths map[string]struct{}
forcedRescanPathsMut sync.Mutex
watchCancel context.CancelFunc watchCancel context.CancelFunc
watchChan chan []string watchChan chan []string
restartWatchChan chan struct{} restartWatchChan chan struct{}
@ -99,6 +103,10 @@ func newFolder(model *model, fset *db.FileSet, ignores *ignore.Matcher, cfg conf
doInSyncChan: make(chan syncRequest), doInSyncChan: make(chan syncRequest),
forcedRescanRequested: make(chan struct{}, 1),
forcedRescanPaths: make(map[string]struct{}),
forcedRescanPathsMut: sync.NewMutex(),
watchCancel: func() {}, watchCancel: func() {},
restartWatchChan: make(chan struct{}, 1), restartWatchChan: make(chan struct{}, 1),
watchMut: sync.NewMutex(), watchMut: sync.NewMutex(),
@ -170,6 +178,9 @@ func (f *folder) serve(ctx context.Context) {
pullFailTimer.Reset(pause) pullFailTimer.Reset(pause)
} }
case <-f.forcedRescanRequested:
f.handleForcedRescans()
case <-f.scanTimer.C: case <-f.scanTimer.C:
l.Debugln(f, "Scanning due to timer") l.Debugln(f, "Scanning due to timer")
f.scanTimerFired() f.scanTimerFired()
@ -841,13 +852,16 @@ func (f *folder) Errors() []FileError {
return append([]FileError{}, f.scanErrors...) return append([]FileError{}, f.scanErrors...)
} }
// ForceRescan marks the file such that it gets rehashed on next scan and then // ScheduleForceRescan marks the file such that it gets rehashed on next scan, and schedules a scan.
// immediately executes that scan. func (f *folder) ScheduleForceRescan(path string) {
func (f *folder) ForceRescan(file protocol.FileInfo) error { f.forcedRescanPathsMut.Lock()
file.SetMustRescan(f.shortID) f.forcedRescanPaths[path] = struct{}{}
f.fset.Update(protocol.LocalDeviceID, []protocol.FileInfo{file}) f.forcedRescanPathsMut.Unlock()
return f.Scan([]string{file.Name}) select {
case f.forcedRescanRequested <- struct{}{}:
default:
}
} }
func (f *folder) updateLocalsFromScanning(fs []protocol.FileInfo) { func (f *folder) updateLocalsFromScanning(fs []protocol.FileInfo) {
@ -921,6 +935,40 @@ func (f *folder) emitDiskChangeEvents(fs []protocol.FileInfo, typeOfEvent events
} }
} }
func (f *folder) handleForcedRescans() {
f.forcedRescanPathsMut.Lock()
paths := make([]string, 0, len(f.forcedRescanPaths))
for path := range f.forcedRescanPaths {
paths = append(paths, path)
}
f.forcedRescanPaths = make(map[string]struct{})
f.forcedRescanPathsMut.Unlock()
batch := newFileInfoBatch(func(fs []protocol.FileInfo) error {
f.fset.Update(protocol.LocalDeviceID, fs)
return nil
})
snap := f.fset.Snapshot()
for _, path := range paths {
_ = batch.flushIfFull()
fi, ok := snap.Get(protocol.LocalDeviceID, path)
if !ok {
continue
}
fi.SetMustRescan(f.shortID)
batch.append(fi)
}
snap.Release()
_ = batch.flush()
_ = f.scanSubdirs(paths)
}
// The exists function is expected to return true for all known paths // The exists function is expected to return true for all known paths
// (excluding "" and ".") // (excluding "" and ".")
func unifySubs(dirs []string, exists func(dir string) bool) []string { func unifySubs(dirs []string, exists func(dir string) bool) []string {

View File

@ -56,7 +56,7 @@ type service interface {
Stop() Stop()
Errors() []FileError Errors() []FileError
WatchError() error WatchError() error
ForceRescan(file protocol.FileInfo) error ScheduleForceRescan(path string)
GetStatistics() (stats.FolderStatistics, error) GetStatistics() (stats.FolderStatistics, error)
getState() (folderState, time.Time, error) getState() (folderState, time.Time, error)
@ -1565,7 +1565,7 @@ func (m *model) Request(deviceID protocol.DeviceID, folder, name string, size in
} }
if !scanner.Validate(res.data, hash, weakHash) { if !scanner.Validate(res.data, hash, weakHash) {
m.recheckFile(deviceID, folderFs, folder, name, size, offset, hash) m.recheckFile(deviceID, folder, name, offset, hash)
l.Debugf("%v REQ(in) failed validating data (%v): %s: %q / %q o=%d s=%d", m, err, deviceID, folder, name, offset, size) l.Debugf("%v REQ(in) failed validating data (%v): %s: %q / %q o=%d s=%d", m, err, deviceID, folder, name, offset, size)
return nil, protocol.ErrNoSuchFile return nil, protocol.ErrNoSuchFile
} }
@ -1599,7 +1599,7 @@ func newLimitedRequestResponse(size int, limiters ...*byteSemaphore) *requestRes
return res return res
} }
func (m *model) recheckFile(deviceID protocol.DeviceID, folderFs fs.Filesystem, folder, name string, size int32, offset int64, hash []byte) { func (m *model) recheckFile(deviceID protocol.DeviceID, folder, name string, offset int64, hash []byte) {
cf, ok := m.CurrentFolderFile(folder, name) cf, ok := m.CurrentFolderFile(folder, name)
if !ok { if !ok {
l.Debugf("%v recheckFile: %s: %q / %q: no current file", m, deviceID, folder, name) l.Debugf("%v recheckFile: %s: %q / %q: no current file", m, deviceID, folder, name)
@ -1636,10 +1636,9 @@ func (m *model) recheckFile(deviceID protocol.DeviceID, folderFs fs.Filesystem,
l.Debugf("%v recheckFile: %s: %q / %q: Folder stopped before rescan could be scheduled", m, deviceID, folder, name) l.Debugf("%v recheckFile: %s: %q / %q: Folder stopped before rescan could be scheduled", m, deviceID, folder, name)
return return
} }
if err := runner.ForceRescan(cf); err != nil {
l.Debugf("%v recheckFile: %s: %q / %q rescan: %s", m, deviceID, folder, name, err) runner.ScheduleForceRescan(name)
return
}
l.Debugf("%v recheckFile: %s: %q / %q", m, deviceID, folder, name) l.Debugf("%v recheckFile: %s: %q / %q", m, deviceID, folder, name)
} }

View File

@ -3190,9 +3190,9 @@ func TestIssue5002(t *testing.T) {
} }
blockSize := int32(file.BlockSize()) blockSize := int32(file.BlockSize())
m.recheckFile(protocol.LocalDeviceID, defaultFolderConfig.Filesystem(), "default", "foo", blockSize, file.Size-int64(blockSize), []byte{1, 2, 3, 4}) m.recheckFile(protocol.LocalDeviceID, "default", "foo", file.Size-int64(blockSize), []byte{1, 2, 3, 4})
m.recheckFile(protocol.LocalDeviceID, defaultFolderConfig.Filesystem(), "default", "foo", blockSize, file.Size, []byte{1, 2, 3, 4}) // panic m.recheckFile(protocol.LocalDeviceID, "default", "foo", file.Size, []byte{1, 2, 3, 4}) // panic
m.recheckFile(protocol.LocalDeviceID, defaultFolderConfig.Filesystem(), "default", "foo", blockSize, file.Size+int64(blockSize), []byte{1, 2, 3, 4}) m.recheckFile(protocol.LocalDeviceID, "default", "foo", file.Size+int64(blockSize), []byte{1, 2, 3, 4})
} }
func TestParentOfUnignored(t *testing.T) { func TestParentOfUnignored(t *testing.T) {