diff --git a/build.zig b/build.zig index e027d8e25..25b555694 100644 --- a/build.zig +++ b/build.zig @@ -143,6 +143,7 @@ pub fn build(b: *Build) void { benchmark_exe.root_module.addImport("zig-network", zig_network_module); benchmark_exe.root_module.addImport("httpz", httpz_mod); benchmark_exe.root_module.addImport("zstd", zstd_mod); + benchmark_exe.root_module.addImport("rocksdb", rocksdb_mod); benchmark_exe.linkLibC(); const benchmark_exe_run = b.addRunArtifact(benchmark_exe); diff --git a/data/test-data/shreds/agave.blockstore.bench_read.shreds.bin b/data/test-data/shreds/agave.blockstore.bench_read.shreds.bin new file mode 100644 index 000000000..e9ece87b6 Binary files /dev/null and b/data/test-data/shreds/agave.blockstore.bench_read.shreds.bin differ diff --git a/data/test-data/shreds/agave.blockstore.bench_write_small.shreds.bin b/data/test-data/shreds/agave.blockstore.bench_write_small.shreds.bin new file mode 100644 index 000000000..28c7def1d Binary files /dev/null and b/data/test-data/shreds/agave.blockstore.bench_write_small.shreds.bin differ diff --git a/src/benchmarks.zig b/src/benchmarks.zig index 5bdcb8eed..74df9e9f6 100644 --- a/src/benchmarks.zig +++ b/src/benchmarks.zig @@ -102,6 +102,14 @@ pub fn main() !void { .microseconds, ); } + + if (std.mem.startsWith(u8, filter, "ledger") or run_all_benchmarks) { + try benchmark( + @import("ledger/benchmarks.zig").BenchmarkLedger, + max_time_per_bench, + .microseconds, + ); + } } const TimeUnits = enum { diff --git a/src/gossip/service.zig b/src/gossip/service.zig index 26c013846..46adb727e 100644 --- a/src/gossip/service.zig +++ b/src/gossip/service.zig @@ -3336,10 +3336,6 @@ pub const BenchmarkGossipServicePullRequests = struct { var contact_info = ContactInfo.init(allocator, pubkey, 0, 19); try contact_info.setSocket(.gossip, address); - // const logger = Logger.init(allocator, .debug); - // defer logger.deinit(); - // logger.spawn(); - const logger = .noop; // process incoming packets/messsages diff --git a/src/ledger/benchmarks.zig b/src/ledger/benchmarks.zig new file mode 100644 index 000000000..08514d54a --- /dev/null +++ b/src/ledger/benchmarks.zig @@ -0,0 +1,163 @@ +const std = @import("std"); +const sig = @import("../sig.zig"); +const ledger_tests = @import("./tests.zig"); +const ledger = @import("lib.zig"); + +const Reward = ledger.transaction_status.Reward; +const Rewards = ledger.transaction_status.Rewards; +const RewardType = ledger.transaction_status.RewardType; +const Pubkey = sig.core.Pubkey; +const TestState = ledger_tests.TestState; +const TestDB = ledger_tests.TestDB; + +const schema = ledger.schema.schema; +const deinitShreds = ledger_tests.deinitShreds; +const testShreds = ledger_tests.testShreds; + +const test_shreds_dir = sig.TEST_DATA_DIR ++ "/shreds"; +const State = TestState("global"); +const DB = TestDB("global"); + +fn createRewards(allocator: std.mem.Allocator, count: usize) !Rewards { + var rng = std.Random.DefaultPrng.init(100); + const rand = rng.random(); + var rewards: Rewards = Rewards.init(allocator); + for (0..count) |i| { + try rewards.append(Reward{ + .pubkey = &Pubkey.initRandom(rand).data, + .lamports = @intCast(42 + i), + .post_balance = std.math.maxInt(u64), + .reward_type = RewardType.Fee, + .commission = null, + }); + } + return rewards; +} + +pub const BenchmarkLedger = struct { + pub const min_iterations = 25; + pub const max_iterations = 25; + + /// Analogous to [bench_write_small](https://github.com/anza-xyz/agave/blob/cfd393654f84c36a3c49f15dbe25e16a0269008d/ledger/benches/blockstore.rs#L59) + /// + /// There is a notable difference from agave: This does not measure the + /// creation of shreds from entries. But even if you remove that from + /// the agave benchmark, the benchmark result is the same. + pub fn @"ShredInserter.insertShreds - 1751 shreds"() !sig.time.Duration { + const allocator = std.heap.c_allocator; + var state = try State.init(allocator, "bench write small", .noop); + defer state.deinit(); + var inserter = try state.shredInserter(); + + const shreds_path = "agave.blockstore.bench_write_small.shreds.bin"; + const shreds = try testShreds(std.heap.c_allocator, shreds_path); + defer deinitShreds(allocator, shreds); + + const is_repairs = try inserter.allocator.alloc(bool, shreds.len); + defer inserter.allocator.free(is_repairs); + for (0..shreds.len) |i| { + is_repairs[i] = false; + } + + var timer = try sig.time.Timer.start(); + _ = try inserter.insertShreds(shreds, is_repairs, null, false, null); + return timer.read(); + } + + /// Analogous to [bench_read_sequential]https://github.com/anza-xyz/agave/blob/cfd393654f84c36a3c49f15dbe25e16a0269008d/ledger/benches/blockstore.rs#L78 + pub fn @"BlockstoreReader.getDataShred - Sequential"() !sig.time.Duration { + const allocator = std.heap.c_allocator; + var state = try State.init(allocator, "bench read sequential", .noop); + defer state.deinit(); + var inserter = try state.shredInserter(); + var reader = try state.reader(); + + const shreds_path = "agave.blockstore.bench_read.shreds.bin"; + const shreds = try testShreds(std.heap.c_allocator, shreds_path); + defer deinitShreds(allocator, shreds); + + const total_shreds = shreds.len; + + _ = try ledger.shred_inserter.shred_inserter.insertShredsForTest(&inserter, shreds); + + const slot: u32 = 0; + const num_reads = total_shreds / 15; + + var rng = std.Random.DefaultPrng.init(100); + + var timer = try sig.time.Timer.start(); + const start_index = rng.random().intRangeAtMost(u32, 0, @intCast(total_shreds)); + for (start_index..start_index + num_reads) |i| { + const shred_index = i % total_shreds; + _ = try reader.getDataShred(slot, shred_index); + } + return timer.read(); + } + + /// Analogous to [bench_read_random]https://github.com/anza-xyz/agave/blob/92eca1192b055d896558a78759d4e79ab4721ff1/ledger/benches/blockstore.rs#L103 + pub fn @"BlockstoreReader.getDataShred - Random"() !sig.time.Duration { + const allocator = std.heap.c_allocator; + var state = try State.init(allocator, "bench read randmom", .noop); + defer state.deinit(); + var inserter = try state.shredInserter(); + var reader = try state.reader(); + + const shreds_path = "agave.blockstore.bench_read.shreds.bin"; + const shreds = try testShreds(std.heap.c_allocator, shreds_path); + defer deinitShreds(allocator, shreds); + + const total_shreds = shreds.len; + _ = try ledger.shred_inserter.shred_inserter.insertShredsForTest(&inserter, shreds); + const num_reads = total_shreds / 15; + + const slot: u32 = 0; + + var rng = std.Random.DefaultPrng.init(100); + + var indices = try std.ArrayList(u32).initCapacity(inserter.allocator, num_reads); + defer indices.deinit(); + for (num_reads) |_| { + indices.appendAssumeCapacity(rng.random().uintAtMost(u32, @intCast(total_shreds))); + } + + var timer = try sig.time.Timer.start(); + for (indices.items) |shred_index| { + _ = try reader.getDataShred(slot, shred_index); + } + return timer.read(); + } + + /// Analogous to [bench_serialize_write_bincode](https://github.com/anza-xyz/agave/blob/9c2098450ca7e5271e3690277992fbc910be27d0/ledger/benches/protobuf.rs#L88) + pub fn @"Database.put Rewards"() !sig.time.Duration { + const allocator = std.heap.c_allocator; + var state = try State.init(allocator, "bench serialize write bincode", .noop); + defer state.deinit(); + const slot: u32 = 0; + + var rewards: Rewards = try createRewards(allocator, 100); + const rewards_slice = try rewards.toOwnedSlice(); + var timer = try sig.time.Timer.start(); + try state.db.put(schema.rewards, slot, .{ + .rewards = rewards_slice, + .num_partitions = null, + }); + return timer.read(); + } + + /// Analogous to [bench_read_bincode](https://github.com/anza-xyz/agave/blob/9c2098450ca7e5271e3690277992fbc910be27d0/ledger/benches/protobuf.rs#L100) + pub fn @"Database.get Rewards"() !sig.time.Duration { + const allocator = std.heap.c_allocator; + var state = try State.init(allocator, "bench read bincode", .noop); + defer state.deinit(); + const slot: u32 = 1; + + var rewards: Rewards = try createRewards(allocator, 100); + try state.db.put(schema.rewards, slot, .{ + .rewards = try rewards.toOwnedSlice(), + .num_partitions = null, + }); + var timer = try sig.time.Timer.start(); + _ = try state.db.get(allocator, schema.rewards, slot); + return timer.read(); + } +}; diff --git a/src/ledger/database/rocksdb.zig b/src/ledger/database/rocksdb.zig index 7dd8a5ee7..d6a62a573 100644 --- a/src/ledger/database/rocksdb.zig +++ b/src/ledger/database/rocksdb.zig @@ -212,7 +212,6 @@ pub fn RocksDB(comptime column_families: []const ColumnFamily) type { defer key_bytes.deinit(); const val_bytes = try value_serializer.serializeToRef(self.allocator, value); defer val_bytes.deinit(); - self.inner.put( self.cf_handles[cf.find(column_families)], key_bytes.data, diff --git a/src/ledger/tests.zig b/src/ledger/tests.zig index c516c036b..19cd4b873 100644 --- a/src/ledger/tests.zig +++ b/src/ledger/tests.zig @@ -3,6 +3,7 @@ const std = @import("std"); const sig = @import("../sig.zig"); const ledger = @import("lib.zig"); +const transaction_status = @import("./transaction_status.zig"); const Allocator = std.mem.Allocator; @@ -77,9 +78,9 @@ test "insert shreds and transaction statuses then get blocks" { const blockhash = entries[entries.len - 1].hash; const blockhash_string = blockhash.base58String(); - const shreds = try testShreds(prefix ++ "shreds.bin"); - const more_shreds = try testShreds(prefix ++ "more_shreds.bin"); - const unrooted_shreds = try testShreds(prefix ++ "unrooted_shreds.bin"); + const shreds = try testShreds(std.testing.allocator, prefix ++ "shreds.bin"); + const more_shreds = try testShreds(std.testing.allocator, prefix ++ "more_shreds.bin"); + const unrooted_shreds = try testShreds(std.testing.allocator, prefix ++ "unrooted_shreds.bin"); defer inline for (.{ shreds, more_shreds, unrooted_shreds }) |slice| { deinitShreds(std.testing.allocator, slice); }; @@ -255,9 +256,9 @@ pub fn freshDir(path: []const u8) !void { const test_shreds_dir = sig.TEST_DATA_DIR ++ "/shreds"; -fn testShreds(comptime filename: []const u8) ![]const Shred { +pub fn testShreds(allocator: std.mem.Allocator, comptime filename: []const u8) ![]const Shred { const path = comptimePrint("{s}/{s}", .{ test_shreds_dir, filename }); - return loadShredsFromFile(std.testing.allocator, path); + return loadShredsFromFile(allocator, path); } /// Read shreds from binary file structured like this: @@ -329,6 +330,18 @@ pub fn deinitShreds(allocator: Allocator, shreds: []const Shred) void { /// Read entries from binary file structured like this: /// [entry0_len: u64(little endian)][entry0_bincode][entry1_len... +/// +/// loadEntriesFromFile can read entries produced by this rust function: +/// ```rust +/// fn save_entries_to_file(shreds: &[Entry], path: &str) { +/// let mut file = std::fs::File::create(path).unwrap(); +/// for entry in &entries { +/// let payload = bincode::serialize(&entry).unwrap(); +/// file.write(&payload.len().to_le_bytes()).unwrap(); +/// file.write(&*payload).unwrap(); +/// } +/// } +/// ``` pub fn loadEntriesFromFile(allocator: Allocator, path: []const u8) ![]const Entry { const file = try std.fs.cwd().openFile(path, .{}); const reader = file.reader();