Skip to content

Commit

Permalink
Cosmetics
Browse files Browse the repository at this point in the history
 * Clarify some comments
 * Log less where it is arguably not very important
  • Loading branch information
michaelklishin committed Apr 24, 2024
1 parent 46cf4cb commit 4f19473
Show file tree
Hide file tree
Showing 5 changed files with 14 additions and 15 deletions.
2 changes: 1 addition & 1 deletion src/ra_server.erl
Original file line number Diff line number Diff line change
Expand Up @@ -573,7 +573,7 @@ handle_leader({commands, Cmds}, #{cfg := #cfg{id = Self,
{State, _, Effects} = make_pipelined_rpc_effects(State0, Effects0),
{leader, State, Effects};
{not_appended, wal_down, State0, Effects} ->
?WARN("~ts ~b commands NOT appended to log. Reason: wal_down",
?WARN("~ts ~b commands NOT appended to Raft log. Reason: wal_down",
[LogId, length(Cmds)]),
CondEffs = case maps:to_list(maps:remove(Self, Cluster)) of
[] -> [];
Expand Down
7 changes: 3 additions & 4 deletions test/coordination_SUITE.erl
Original file line number Diff line number Diff line change
Expand Up @@ -816,7 +816,6 @@ segment_writer_or_wal_crash_follower(Config) ->
end || {Name, _Node} = NodeId <- ServerIds],
{ok, Started, []} = ra:start_cluster(?SYS, Configs),


{ok, _, Leader} = ra:members(hd(Started)),
[{FollowerName, FollowerNode} = Follower, _] = lists:delete(Leader, Started),

Expand Down Expand Up @@ -893,12 +892,12 @@ segment_writer_or_wal_crash_follower(Config) ->
ok.

segment_writer_or_wal_crash_leader(Config) ->
%% this test crashes the segment writer for a follower node whilst the
%% This test crashes the segment writer for a follower node whilst the
%% ra cluster is active and receiving and replicating commands.
%% it tests the segment writer and wal is able to recover without the
%% It tests the segment writer and wal are able to recover without the
%% follower crashing.
%% Finally we stop and restart the follower to make sure it can recover
%% correactly and that the log data contains no missing entries
%% correctly and that the log data does not miss any entries
PrivDir = ?config(data_dir, Config),
ClusterName = ?config(cluster_name, Config),
ServerNames = [s1, s2, s3],
Expand Down
6 changes: 3 additions & 3 deletions test/ra_log_2_SUITE.erl
Original file line number Diff line number Diff line change
Expand Up @@ -607,16 +607,16 @@ resend_write_after_tick(Config) ->
ct:pal("ra_log_init"),
Log0 = ra_log_init(Config),
{0, 0} = ra_log:last_index_term(Log0),
ct:pal("appending"),
%% ct:pal("appending"),
meck:expect(ra_log_wal, write, fun (_, _, _, _, _) ->
{ok, WalPid}
end),
Log1 = ra_log:append({1, 2, banana}, Log0),
%% this append should be lost
meck:unload(ra_log_wal),
%% restart wal to get a new wal pid so that the ra_log detects on tick
%% that the walhas changed
ct:pal("restart wal"),
%% that the wal process has changed
%% ct:pal("restart wal"),
restart_wal(),

Ms = erlang:system_time(millisecond) + 5001,
Expand Down
6 changes: 3 additions & 3 deletions test/ra_log_segment_SUITE.erl
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ corrupted_segment(Config) ->
ok = open_write_close(1, 2, Data, Fn),
%% truncate file a bit to simulate lost bytes
truncate(Fn, {eof, -2}),
% ct:pal("DUMP PRE ~p", [ra_log_segment:dump_index(Fn)]),
%% ct:pal("DUMP PRE ~p", [ra_log_segment:dump_index(Fn)]),
%% check that the current state throws a missing key
{ok, SegR0} = ra_log_segment:open(Fn, #{mode => read}),
?assertExit({missing_key, 2},
Expand All @@ -92,8 +92,8 @@ corrupted_segment(Config) ->
% write_trunc_until_full(Fn),

{ok, SegR} = ra_log_segment:open(Fn, #{mode => read}),
ct:pal("Range ~p", [ra_log_segment:segref(SegR)]),
ct:pal("SegR ~p", [SegR]),
%% ct:pal("Range ~p", [ra_log_segment:segref(SegR)]),
%% ct:pal("SegR ~p", [SegR]),
[{1, 2, Data}] =
ra_log_segment:fold(SegR, 1, 1,
fun ra_lib:id/1,
Expand Down
8 changes: 4 additions & 4 deletions test/ra_server_SUITE.erl
Original file line number Diff line number Diff line change
Expand Up @@ -800,8 +800,8 @@ wal_down_condition_leader(_Config) ->
meck:expect(ra_log, can_write, fun (_L) -> false end),
meck:expect(ra_log, reset_to_last_known_written, fun (L) -> L end),

%% when the wal is down the leader should transition to awai_condition,
%% on timeout it should attempt a leader change effect should be emitted
%% when the wal is down the leader should transition to await_condition,
%% on timeout a leader change effect should be emitted
%% such that it can concede leadership in the interest of Ra cluster
%% progress
{await_condition,
Expand All @@ -814,13 +814,13 @@ wal_down_condition_leader(_Config) ->
transition_to := leader}}} = State1, _}
= ra_server:handle_leader({command, Cmd}, State0),

% if there condition times out, return to leader and begin transfer leadership
% if awaiting for the condition times out, return to the leader and begin transferring leadership
% process
{leader, #{} = State2, [{next_event, cast,
{transfer_leadership, _}}]}
= ra_server:handle_await_condition(await_condition_timeout, State1),

%% but not if condition no longer manifests
%% but not if the condition no longer manifests
meck:expect(ra_log, append, fun (_Es, L) -> L end),
meck:expect(ra_log, can_write, fun (_L) -> true end),
{leader, #{} = State2, []}
Expand Down

0 comments on commit 4f19473

Please sign in to comment.