eed29f0f50
936b461 Merge upstream LevelDB 1.13. 748539c LevelDB 1.13 git-subtree-dir: src/leveldb git-subtree-split: 936b4613ea4551992e6096b1e05eeefc09a20e3b
118 lines
3.3 KiB
C++
118 lines
3.3 KiB
C++
// Copyright (c) 2013 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#include "leveldb/db.h"
|
|
#include "db/db_impl.h"
|
|
#include "leveldb/cache.h"
|
|
#include "util/testharness.h"
|
|
#include "util/testutil.h"
|
|
|
|
namespace leveldb {
|
|
|
|
class AutoCompactTest {
|
|
public:
|
|
std::string dbname_;
|
|
Cache* tiny_cache_;
|
|
Options options_;
|
|
DB* db_;
|
|
|
|
AutoCompactTest() {
|
|
dbname_ = test::TmpDir() + "/autocompact_test";
|
|
tiny_cache_ = NewLRUCache(100);
|
|
options_.block_cache = tiny_cache_;
|
|
DestroyDB(dbname_, options_);
|
|
options_.create_if_missing = true;
|
|
options_.compression = kNoCompression;
|
|
ASSERT_OK(DB::Open(options_, dbname_, &db_));
|
|
}
|
|
|
|
~AutoCompactTest() {
|
|
delete db_;
|
|
DestroyDB(dbname_, Options());
|
|
delete tiny_cache_;
|
|
}
|
|
|
|
std::string Key(int i) {
|
|
char buf[100];
|
|
snprintf(buf, sizeof(buf), "key%06d", i);
|
|
return std::string(buf);
|
|
}
|
|
|
|
uint64_t Size(const Slice& start, const Slice& limit) {
|
|
Range r(start, limit);
|
|
uint64_t size;
|
|
db_->GetApproximateSizes(&r, 1, &size);
|
|
return size;
|
|
}
|
|
|
|
void DoReads(int n);
|
|
};
|
|
|
|
static const int kValueSize = 200 * 1024;
|
|
static const int kTotalSize = 100 * 1024 * 1024;
|
|
static const int kCount = kTotalSize / kValueSize;
|
|
|
|
// Read through the first n keys repeatedly and check that they get
|
|
// compacted (verified by checking the size of the key space).
|
|
void AutoCompactTest::DoReads(int n) {
|
|
std::string value(kValueSize, 'x');
|
|
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
|
|
|
|
// Fill database
|
|
for (int i = 0; i < kCount; i++) {
|
|
ASSERT_OK(db_->Put(WriteOptions(), Key(i), value));
|
|
}
|
|
ASSERT_OK(dbi->TEST_CompactMemTable());
|
|
|
|
// Delete everything
|
|
for (int i = 0; i < kCount; i++) {
|
|
ASSERT_OK(db_->Delete(WriteOptions(), Key(i)));
|
|
}
|
|
ASSERT_OK(dbi->TEST_CompactMemTable());
|
|
|
|
// Get initial measurement of the space we will be reading.
|
|
const int64_t initial_size = Size(Key(0), Key(n));
|
|
const int64_t initial_other_size = Size(Key(n), Key(kCount));
|
|
|
|
// Read until size drops significantly.
|
|
std::string limit_key = Key(n);
|
|
for (int read = 0; true; read++) {
|
|
ASSERT_LT(read, 100) << "Taking too long to compact";
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
|
for (iter->SeekToFirst();
|
|
iter->Valid() && iter->key().ToString() < limit_key;
|
|
iter->Next()) {
|
|
// Drop data
|
|
}
|
|
delete iter;
|
|
// Wait a little bit to allow any triggered compactions to complete.
|
|
Env::Default()->SleepForMicroseconds(1000000);
|
|
uint64_t size = Size(Key(0), Key(n));
|
|
fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n",
|
|
read+1, size/1048576.0, Size(Key(n), Key(kCount))/1048576.0);
|
|
if (size <= initial_size/10) {
|
|
break;
|
|
}
|
|
}
|
|
|
|
// Verify that the size of the key space not touched by the reads
|
|
// is pretty much unchanged.
|
|
const int64_t final_other_size = Size(Key(n), Key(kCount));
|
|
ASSERT_LE(final_other_size, initial_other_size + 1048576);
|
|
ASSERT_GE(final_other_size, initial_other_size/5 - 1048576);
|
|
}
|
|
|
|
TEST(AutoCompactTest, ReadAll) {
|
|
DoReads(kCount);
|
|
}
|
|
|
|
TEST(AutoCompactTest, ReadHalf) {
|
|
DoReads(kCount/2);
|
|
}
|
|
|
|
} // namespace leveldb
|
|
|
|
int main(int argc, char** argv) {
|
|
return leveldb::test::RunAllTests();
|
|
}
|