Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(blockstore): benchmarks #275

Merged
merged 38 commits into from
Oct 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
2db1e1b
Added benchWriteSmall benchmark
dadepo Sep 18, 2024
b8ef631
updated test data
dadepo Sep 18, 2024
e9eb195
Added benchReadSequential
dadepo Sep 18, 2024
baa2fd8
Rename test file
dadepo Sep 18, 2024
6f46e71
Added benchReadRandom
dadepo Sep 18, 2024
fd8952a
Set c allocator
dadepo Sep 19, 2024
a7e6bfb
typo
dadepo Sep 19, 2024
fe078fb
Check in test data
dadepo Sep 19, 2024
439100b
Update test name
dadepo Sep 29, 2024
7f3c380
Reuse ledger.insert_shred.insertShredsForTest
dadepo Sep 29, 2024
4d55ccd
Add benchSerializeWriteBincode
dadepo Sep 29, 2024
8c80082
Update title
dadepo Sep 29, 2024
4528620
Add benchReadBincode
dadepo Sep 29, 2024
5fa8282
Moved ledger benchmark to own file
dadepo Oct 7, 2024
ab2086b
Typo fix
dadepo Oct 7, 2024
077cda2
Fmt
dadepo Oct 7, 2024
692d561
import via the root source
dadepo Oct 7, 2024
44fa660
camelCase for functions
dadepo Oct 7, 2024
cc8ac6c
Merge branch 'main' into dade/blockstore-benchmark
dadepo Oct 7, 2024
dbcd012
Pass std.heap.c_allocator to TestState in ledger benchmark
dadepo Oct 7, 2024
b84cc5c
Drop benchShreds in favour of re-using testShreds
dadepo Oct 7, 2024
dcccd63
Remove unused imports
dadepo Oct 7, 2024
aea8d1d
No need to put shreds into a tuple to later loop to deinit
dadepo Oct 9, 2024
7b04328
Merge branch 'main' into dade/blockstore-benchmark
dadepo Oct 9, 2024
9a1dfd9
Switch to sig.time.Timer
dadepo Oct 9, 2024
82a531d
Fix typo
dadepo Oct 18, 2024
872a32b
Merge branch 'main' into dade/blockstore-benchmark
dadepo Oct 19, 2024
6249323
typo
dadepo Oct 19, 2024
6fdf8f3
Fixes after merging.
dadepo Oct 19, 2024
2a1dbc6
Use num_reads sized random sample of indexes for benchReadRandom
dadepo Oct 19, 2024
d684d62
Added comment
dadepo Oct 21, 2024
035963e
Merge branch 'main' into dade/blockstore-benchmark
dadepo Oct 24, 2024
d8a9555
Fixes after merge
dadepo Oct 24, 2024
5972a1e
Switch to nanoseconds
dadepo Oct 24, 2024
f4eb228
Update iteration
dadepo Oct 24, 2024
1719072
Set iteration back to 5 for CI
dadepo Oct 24, 2024
49d9eb5
test(ledger): benchmark naming, iterations, units
dnut Oct 24, 2024
b82c367
fix(ledger): typo
dnut Oct 24, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions build.zig
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,7 @@ pub fn build(b: *Build) void {
benchmark_exe.root_module.addImport("zig-network", zig_network_module);
benchmark_exe.root_module.addImport("httpz", httpz_mod);
benchmark_exe.root_module.addImport("zstd", zstd_mod);
benchmark_exe.root_module.addImport("rocksdb", rocksdb_mod);
benchmark_exe.linkLibC();

const benchmark_exe_run = b.addRunArtifact(benchmark_exe);
Expand Down
Binary file not shown.
Binary file not shown.
8 changes: 8 additions & 0 deletions src/benchmarks.zig
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,14 @@ pub fn main() !void {
.microseconds,
);
}

if (std.mem.startsWith(u8, filter, "ledger") or run_all_benchmarks) {
try benchmark(
@import("ledger/benchmarks.zig").BenchmarkLedger,
max_time_per_bench,
.microseconds,
);
}
}

const TimeUnits = enum {
Expand Down
4 changes: 0 additions & 4 deletions src/gossip/service.zig
Original file line number Diff line number Diff line change
Expand Up @@ -3336,10 +3336,6 @@ pub const BenchmarkGossipServicePullRequests = struct {
var contact_info = ContactInfo.init(allocator, pubkey, 0, 19);
try contact_info.setSocket(.gossip, address);

// const logger = Logger.init(allocator, .debug);
// defer logger.deinit();
// logger.spawn();

const logger = .noop;

// process incoming packets/messsages
Expand Down
163 changes: 163 additions & 0 deletions src/ledger/benchmarks.zig
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
const std = @import("std");
const sig = @import("../sig.zig");
const ledger_tests = @import("./tests.zig");
const ledger = @import("lib.zig");

const Reward = ledger.transaction_status.Reward;
const Rewards = ledger.transaction_status.Rewards;
const RewardType = ledger.transaction_status.RewardType;
const Pubkey = sig.core.Pubkey;
const TestState = ledger_tests.TestState;
const TestDB = ledger_tests.TestDB;

const schema = ledger.schema.schema;
const deinitShreds = ledger_tests.deinitShreds;
const testShreds = ledger_tests.testShreds;

const test_shreds_dir = sig.TEST_DATA_DIR ++ "/shreds";
const State = TestState("global");
const DB = TestDB("global");

fn createRewards(allocator: std.mem.Allocator, count: usize) !Rewards {
var rng = std.Random.DefaultPrng.init(100);
const rand = rng.random();
var rewards: Rewards = Rewards.init(allocator);
for (0..count) |i| {
try rewards.append(Reward{
.pubkey = &Pubkey.initRandom(rand).data,
.lamports = @intCast(42 + i),
.post_balance = std.math.maxInt(u64),
.reward_type = RewardType.Fee,
.commission = null,
});
}
return rewards;
}

pub const BenchmarkLedger = struct {
pub const min_iterations = 25;
pub const max_iterations = 25;

/// Analogous to [bench_write_small](https://github.com/anza-xyz/agave/blob/cfd393654f84c36a3c49f15dbe25e16a0269008d/ledger/benches/blockstore.rs#L59)
///
/// There is a notable difference from agave: This does not measure the
/// creation of shreds from entries. But even if you remove that from
/// the agave benchmark, the benchmark result is the same.
pub fn @"ShredInserter.insertShreds - 1751 shreds"() !sig.time.Duration {
const allocator = std.heap.c_allocator;
var state = try State.init(allocator, "bench write small", .noop);
defer state.deinit();
var inserter = try state.shredInserter();

const shreds_path = "agave.blockstore.bench_write_small.shreds.bin";
const shreds = try testShreds(std.heap.c_allocator, shreds_path);
defer deinitShreds(allocator, shreds);

const is_repairs = try inserter.allocator.alloc(bool, shreds.len);
defer inserter.allocator.free(is_repairs);
for (0..shreds.len) |i| {
is_repairs[i] = false;
}

var timer = try sig.time.Timer.start();
_ = try inserter.insertShreds(shreds, is_repairs, null, false, null);
return timer.read();
}

/// Analogous to [bench_read_sequential]https://github.com/anza-xyz/agave/blob/cfd393654f84c36a3c49f15dbe25e16a0269008d/ledger/benches/blockstore.rs#L78
pub fn @"BlockstoreReader.getDataShred - Sequential"() !sig.time.Duration {
const allocator = std.heap.c_allocator;
var state = try State.init(allocator, "bench read sequential", .noop);
defer state.deinit();
var inserter = try state.shredInserter();
var reader = try state.reader();

const shreds_path = "agave.blockstore.bench_read.shreds.bin";
const shreds = try testShreds(std.heap.c_allocator, shreds_path);
defer deinitShreds(allocator, shreds);

const total_shreds = shreds.len;

_ = try ledger.shred_inserter.shred_inserter.insertShredsForTest(&inserter, shreds);

const slot: u32 = 0;
const num_reads = total_shreds / 15;

var rng = std.Random.DefaultPrng.init(100);

var timer = try sig.time.Timer.start();
const start_index = rng.random().intRangeAtMost(u32, 0, @intCast(total_shreds));
for (start_index..start_index + num_reads) |i| {
const shred_index = i % total_shreds;
_ = try reader.getDataShred(slot, shred_index);
}
return timer.read();
}

/// Analogous to [bench_read_random]https://github.com/anza-xyz/agave/blob/92eca1192b055d896558a78759d4e79ab4721ff1/ledger/benches/blockstore.rs#L103
pub fn @"BlockstoreReader.getDataShred - Random"() !sig.time.Duration {
const allocator = std.heap.c_allocator;
var state = try State.init(allocator, "bench read randmom", .noop);
defer state.deinit();
var inserter = try state.shredInserter();
var reader = try state.reader();

const shreds_path = "agave.blockstore.bench_read.shreds.bin";
const shreds = try testShreds(std.heap.c_allocator, shreds_path);
defer deinitShreds(allocator, shreds);

const total_shreds = shreds.len;
_ = try ledger.shred_inserter.shred_inserter.insertShredsForTest(&inserter, shreds);
const num_reads = total_shreds / 15;

const slot: u32 = 0;

var rng = std.Random.DefaultPrng.init(100);

var indices = try std.ArrayList(u32).initCapacity(inserter.allocator, num_reads);
defer indices.deinit();
for (num_reads) |_| {
indices.appendAssumeCapacity(rng.random().uintAtMost(u32, @intCast(total_shreds)));
}

var timer = try sig.time.Timer.start();
for (indices.items) |shred_index| {
_ = try reader.getDataShred(slot, shred_index);
}
return timer.read();
}

/// Analogous to [bench_serialize_write_bincode](https://github.com/anza-xyz/agave/blob/9c2098450ca7e5271e3690277992fbc910be27d0/ledger/benches/protobuf.rs#L88)
pub fn @"Database.put Rewards"() !sig.time.Duration {
const allocator = std.heap.c_allocator;
var state = try State.init(allocator, "bench serialize write bincode", .noop);
defer state.deinit();
const slot: u32 = 0;

var rewards: Rewards = try createRewards(allocator, 100);
const rewards_slice = try rewards.toOwnedSlice();
var timer = try sig.time.Timer.start();
try state.db.put(schema.rewards, slot, .{
.rewards = rewards_slice,
.num_partitions = null,
});
return timer.read();
}

/// Analogous to [bench_read_bincode](https://github.com/anza-xyz/agave/blob/9c2098450ca7e5271e3690277992fbc910be27d0/ledger/benches/protobuf.rs#L100)
pub fn @"Database.get Rewards"() !sig.time.Duration {
const allocator = std.heap.c_allocator;
var state = try State.init(allocator, "bench read bincode", .noop);
defer state.deinit();
const slot: u32 = 1;

var rewards: Rewards = try createRewards(allocator, 100);
try state.db.put(schema.rewards, slot, .{
.rewards = try rewards.toOwnedSlice(),
.num_partitions = null,
});
var timer = try sig.time.Timer.start();
_ = try state.db.get(allocator, schema.rewards, slot);
return timer.read();
}
};
1 change: 0 additions & 1 deletion src/ledger/database/rocksdb.zig
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,6 @@ pub fn RocksDB(comptime column_families: []const ColumnFamily) type {
defer key_bytes.deinit();
const val_bytes = try value_serializer.serializeToRef(self.allocator, value);
defer val_bytes.deinit();

self.inner.put(
self.cf_handles[cf.find(column_families)],
key_bytes.data,
Expand Down
23 changes: 18 additions & 5 deletions src/ledger/tests.zig
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
const std = @import("std");
const sig = @import("../sig.zig");
const ledger = @import("lib.zig");
const transaction_status = @import("./transaction_status.zig");
dnut marked this conversation as resolved.
Show resolved Hide resolved

const Allocator = std.mem.Allocator;

Expand Down Expand Up @@ -77,9 +78,9 @@ test "insert shreds and transaction statuses then get blocks" {
const blockhash = entries[entries.len - 1].hash;
const blockhash_string = blockhash.base58String();

const shreds = try testShreds(prefix ++ "shreds.bin");
const more_shreds = try testShreds(prefix ++ "more_shreds.bin");
const unrooted_shreds = try testShreds(prefix ++ "unrooted_shreds.bin");
const shreds = try testShreds(std.testing.allocator, prefix ++ "shreds.bin");
const more_shreds = try testShreds(std.testing.allocator, prefix ++ "more_shreds.bin");
const unrooted_shreds = try testShreds(std.testing.allocator, prefix ++ "unrooted_shreds.bin");
defer inline for (.{ shreds, more_shreds, unrooted_shreds }) |slice| {
deinitShreds(std.testing.allocator, slice);
};
Expand Down Expand Up @@ -255,9 +256,9 @@ pub fn freshDir(path: []const u8) !void {

const test_shreds_dir = sig.TEST_DATA_DIR ++ "/shreds";

fn testShreds(comptime filename: []const u8) ![]const Shred {
pub fn testShreds(allocator: std.mem.Allocator, comptime filename: []const u8) ![]const Shred {
const path = comptimePrint("{s}/{s}", .{ test_shreds_dir, filename });
return loadShredsFromFile(std.testing.allocator, path);
return loadShredsFromFile(allocator, path);
}

/// Read shreds from binary file structured like this:
Expand Down Expand Up @@ -329,6 +330,18 @@ pub fn deinitShreds(allocator: Allocator, shreds: []const Shred) void {

/// Read entries from binary file structured like this:
/// [entry0_len: u64(little endian)][entry0_bincode][entry1_len...
///
/// loadEntriesFromFile can read entries produced by this rust function:
/// ```rust
/// fn save_entries_to_file(shreds: &[Entry], path: &str) {
/// let mut file = std::fs::File::create(path).unwrap();
/// for entry in &entries {
/// let payload = bincode::serialize(&entry).unwrap();
/// file.write(&payload.len().to_le_bytes()).unwrap();
/// file.write(&*payload).unwrap();
/// }
/// }
/// ```
pub fn loadEntriesFromFile(allocator: Allocator, path: []const u8) ![]const Entry {
const file = try std.fs.cwd().openFile(path, .{});
const reader = file.reader();
Expand Down
Loading