From f78133b8e9aa55a5ab8f919f0c2445bbf74c748b Mon Sep 17 00:00:00 2001 From: Jakob Borg Date: Wed, 27 May 2020 12:15:00 +0200 Subject: [PATCH] lib/db: Adjust transaction flush sizes downwards (#6686) This reduces the size of our write batches before we flush them. This has two effects: reducing the amount of data lost if we crash when updating the database, and reducing the amount of memory used when we do large updates without checkpoint (e.g., deleting a folder). I ran our SyncManyFiles benchmark as it is the one doing most transactions, however there was no relevant change in any metric (it's limited by our fsync I expect). This is good as any visible change would just be a decrease in performance. I don't have a benchmark on deleting a large folder, taking that part on trust for now... --- lib/db/backend/leveldb_backend.go | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/lib/db/backend/leveldb_backend.go b/lib/db/backend/leveldb_backend.go index c45cf9dd7..959b6558a 100644 --- a/lib/db/backend/leveldb_backend.go +++ b/lib/db/backend/leveldb_backend.go @@ -13,10 +13,17 @@ import ( ) const ( - // Never flush transactions smaller than this, even on Checkpoint() - dbFlushBatchMin = 1 << MiB - // Once a transaction reaches this size, flush it unconditionally. - dbFlushBatchMax = 128 << MiB + // Never flush transactions smaller than this, even on Checkpoint(). + // This just needs to be just large enough to avoid flushing + // transactions when they are super tiny, thus creating millions of tiny + // transactions unnecessarily. + dbFlushBatchMin = 64 << KiB + // Once a transaction reaches this size, flush it unconditionally. This + // should be large enough to avoid forcing a flush between Checkpoint() + // calls in loops where we do those, so in principle just large enough + // to hold a FileInfo plus corresponding version list and metadata + // updates or two. + dbFlushBatchMax = 1 << MiB ) // leveldbBackend implements Backend on top of a leveldb