Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixing some remaining riak-admin relics #1886

Open
wants to merge 17 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/erlang.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ jobs:
image: erlang:${{ matrix.otp }}

steps:
- uses: lukka/get-cmake@latest
- uses: actions/checkout@v2
- name: Compile
run: ./rebar3 compile
Expand Down
2 changes: 1 addition & 1 deletion eqc/crdt_statem_eqc.erl
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ next_state(#state{vnodes=VNodes0, mod_state=Expected, mod=Mod}=S,V,
VNodes = lists:keyreplace(ID, 1, VNodes0, {ID, V}),
S#state{vnodes=VNodes, mod_state=Mod:update_expected(ID, Op, Expected)};
next_state(#state{vnodes=VNodes0, mod_state=Expected0, mod=Mod}=S,V,
{call,?MODULE, merge, [_Mod, {IDS, _C}=_Source, {ID, _C}=_Dest]}) ->
{call,?MODULE, merge, [_Mod, {IDS, C}=_Source, {ID, C}=_Dest]}) ->
VNodes = lists:keyreplace(ID, 1, VNodes0, {ID, V}),
Expected = Mod:update_expected(ID, {merge, IDS}, Expected0),
S#state{vnodes=VNodes, mod_state=Expected};
Expand Down
64 changes: 33 additions & 31 deletions priv/riak_kv.schema
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@
]}.

%% @doc A path under which the repl real-time overload queue will be stored.
%% @doc A path under which the reaper overload queue will be stored.
{mapping, "replrtq_dataroot", "riak_kv.replrtq_dataroot", [
{default, "$(platform_data_dir)/kv_replrtqsrc"},
{datatype, directory}
Expand Down Expand Up @@ -108,8 +107,8 @@

%% @doc Parallel key store type
%% When running in parallel mode, which will be the default if the backend does
%% not support native tictac aae (i.e. is not leveled), what type of parallel
%% key store should be kept - leveled_ko (leveled and key-ordered), or
%% not support native tictac aae (i.e. is not leveled), what type of parallel
%% key store should be kept - leveled_ko (leveled and key-ordered), or
%% leveled_so (leveled and segment ordered).
%% When running in native mode, this setting is ignored
{mapping, "tictacaae_parallelstore", "riak_kv.tictacaae_parallelstore", [
Expand All @@ -119,7 +118,7 @@
]}.

%% @doc Minimum Rebuild Wait
%% The minimum number of hours to wait between rebuilds. Default value is 2
%% The minimum number of hours to wait between rebuilds. Default value is 2
%% weeks
{mapping, "tictacaae_rebuildwait", "riak_kv.tictacaae_rebuildwait", [
{datatype, integer},
Expand All @@ -128,18 +127,18 @@

%% @doc Maximum Rebuild Delay
%% The number of seconds which represents the length of the period in which the
%% next rebuild will be scheduled. So if all vnodes are scheduled to rebuild
%% at the same time, they will actually rebuild randomly between 0 an this
%% next rebuild will be scheduled. So if all vnodes are scheduled to rebuild
%% at the same time, they will actually rebuild randomly between 0 an this
%% value (in seconds) after the rebuild time. Default value is 4 days
{mapping, "tictacaae_rebuilddelay", "riak_kv.tictacaae_rebuilddelay", [
{datatype, integer},
{default, 345600}
]}.

%% @doc Store heads in parallel key stores
%% If running a parallel key store, the whole "head" object may be stored to
%% allow for fold_heads queries to be run against the parallel store.
%% Alternatively, the cost of the parallel key store can be reduced by storing
%% If running a parallel key store, the whole "head" object may be stored to
%% allow for fold_heads queries to be run against the parallel store.
%% Alternatively, the cost of the parallel key store can be reduced by storing
%% only a minimal data set necessary for AAE and monitoring
{mapping, "tictacaae_storeheads", "riak_kv.tictacaae_storeheads", [
{datatype, {flag, enabled, disabled}},
Expand All @@ -151,10 +150,10 @@
%% The number of milliseconds which the vnode must wait between self-pokes to
%% maybe prompt the next exchange. Default is 8 minutes - check all partitions
%% when n=3 once every hour (in each direction). A cycle of exchanges will
%% take (n - 1) * n + 1 exchange ticks for each nval.
%% take (n - 1) * n + 1 exchange ticks for each nval.
%% Note if this is to be reduced further the riak_core vnode_inactivity_timeout
%% should also be reduced or handoffs may be blocked. To be safe the
%% vnode_inactivity_timeout must be < 0.5 * the tictacaae_exchangetick.
%% should also be reduced or handoffs may be blocked. To be safe the
%% vnode_inactivity_timeout must be < 0.5 * the tictacaae_exchangetick.
{mapping, "tictacaae_exchangetick", "riak_kv.tictacaae_exchangetick", [
{datatype, integer},
{default, 480000},
Expand All @@ -180,7 +179,7 @@
%% faster by doubling. There are 1M segments in a standard tree overall.
%% Performance tuning can also be made by adjusting the `tictacaae_repairloops`
%% and `tictacaae_rangeboost` - but `tictacaae_maxresults` is the simplest
%% factor that is likely to result in a relatively predictable (and linear)
%% factor that is likely to result in a relatively predictable (and linear)
%% outcome in terms of both CPU cost and repair speed.
{mapping, "tictacaae_maxresults", "riak_kv.tictacaae_maxresults", [
{datatype, integer},
Expand All @@ -204,18 +203,18 @@
hidden
]}.

%% @doc Multiplier to the `tictcaaae_maxresults` when following an initial AAE
%% @doc Multiplier to the `tictcaaae_maxresults` when following an initial AAE
%% exchange with a range-limited exchange.
%% After each exchange, where sufficient deltas are discovered there will be a
%% `tictacaae_repairloops` number of range-limited queries (assuming
%% sufficient results continue to be found). Each of these may have the
%% the number of max results boosted by this integer factor.
%% the number of max results boosted by this integer factor.
%% For example, if `tictacaae_maxresuts` is set to 64, and
%% `tictacaae_repairloops` is set to 4, and the `tictacaae_rangeboost` is set
%% to 2 - the initial loop will use `tictacaae_maxresuts` of 64, but any
%% AAE exchanges on loops 1 to 4 will use 128.
%% Exchanges with range-limited queries are more efficient, and so more tree
%% segments can be fetched without creating significant CPU overheads, hence
%% segments can be fetched without creating significant CPU overheads, hence
%% the use of this boost to maxresults.
{mapping, "tictacaae_rangeboost", "riak_kv.tictacaae_rangeboost", [
{datatype, integer},
Expand Down Expand Up @@ -249,7 +248,7 @@
%% Separate assured forwarding pools will be used of `af_worker_pool_size` for
%% informational aae_folds (find_keys, object_stats) and functional folds
%% (merge_tree_range, fetch_clock_range). The be_pool is used only for tictac
%% AAE rebuilds at present
%% AAE rebuilds at present
{mapping, "node_worker_pool_size", "riak_kv.node_worker_pool_size", [
{datatype, integer},
{default, 4}
Expand Down Expand Up @@ -470,7 +469,10 @@

{translation,
"riak_kv.aae_throttle_limits",
riak_core_throttle:create_limits_translator_fun("anti_entropy", "mailbox_size")
begin
lists:foreach(fun code:add_path/1, filelib:wildcard("lib/*/ebin")),
riak_core_throttle:create_limits_translator_fun("anti_entropy", "mailbox_size")
end
}.

%% @see leveldb.bloomfilter
Expand Down Expand Up @@ -943,11 +945,11 @@
]}.


%% @doc For Tictac full-sync does all data need to be sync'd, or should a
%% specific bucket be sync'd (bucket), or a specific bucket type (type).
%% @doc For Tictac full-sync does all data need to be sync'd, or should a
%% specific bucket be sync'd (bucket), or a specific bucket type (type).
%% Note that in most cases sync of all data is lower overhead than sync of
%% a subset of data - as cached AAE trees will be used.
%% TODO: type is not yet implemented.
%% TODO: type is not yet implemented.
{mapping, "ttaaefs_scope", "riak_kv.ttaaefs_scope", [
{datatype, {enum, [all, bucket, type, disabled]}},
{default, disabled}
Expand Down Expand Up @@ -993,7 +995,7 @@
%% If using range_check to speed-up repairs, this can be reduced as the
%% range_check maxresults will be boosted by the ttaaefs_rangeboost When using
%% range_check a value of 64 is recommended, which may be reduced to 32 or 16
%% if the cluster has a very large volume of keys and/or limited capacity.
%% if the cluster has a very large volume of keys and/or limited capacity.
%% Only reduce below 16 in exceptional circumstances.
%% More capacity to process sync queries can be added by increaseing the af2
%% and af3 queue sizes - but this will be at the risk of there being a bigger
Expand All @@ -1008,29 +1010,29 @@
%% ttaaefs_max results * ttaaefs_rangeboost.
%% When using range_check, a small maxresults can be used, in effect using
%% other *_check syncs as discovery queries (to find the range_check for the
%% range_check to do the heavy lifting)
%% range_check to do the heavy lifting)
{mapping, "ttaaefs_rangeboost", "riak_kv.ttaaefs_rangeboost", [
{datatype, integer},
{default, 16}
]}.

%% @doc For Tictac bucket full-sync which bucket should be sync'd by this
%% node. Only ascii string bucket definitions supported (which will be
%% converted using list_to_binary).
%% node. Only ascii string bucket definitions supported (which will be
%% converted using list_to_binary).
{mapping, "ttaaefs_bucketfilter_name", "riak_kv.ttaaefs_bucketfilter_name", [
{datatype, string},
{commented, "sample_bucketname"}
]}.

%% @doc For Tictac bucket full-sync what is the bucket type of the bucket name.
%% @doc For Tictac bucket full-sync what is the bucket type of the bucket name.
%% Only ascii string type bucket definitions supported (these
%% definitions will be converted to binary using list_to_binary)
{mapping, "ttaaefs_bucketfilter_type", "riak_kv.ttaaefs_bucketfilter_type", [
{datatype, string},
{commented, "default"}
]}.

%% @doc For Tictac bucket-type full-sync what is the bucket type to be sync'd.
%% @doc For Tictac bucket-type full-sync what is the bucket type to be sync'd.
%% Only ascii string type bucket definitions supported (these
%% definitions will be converted to binary using list_to_binary).
%% TODO: Type-based filtering is not yet supported
Expand Down Expand Up @@ -1131,7 +1133,7 @@
%% The af3_queue size, and the ttaaefs_maxresults, both need to be tuned to
%% ensure that the allcheck can run wihtin the 30 minute timeout.
%% For per-bucket replication all is a reference to all of the data for that
%% bucket, and warnings about sizing are specially relevant.
%% bucket, and warnings about sizing are specially relevant.
{mapping, "ttaaefs_allcheck", "riak_kv.ttaaefs_allcheck", [
{datatype, integer},
{default, 0}
Expand Down Expand Up @@ -1177,7 +1179,7 @@
%% @doc How many times per 24hour period should the a range_check be run. The
%% range_check is intended to be a smart check, in that it will:
%% - use a last_modified range starting from the last successful check as its
%% range if the last check was successful (i.e. showed the clusters to be
%% range if the last check was successful (i.e. showed the clusters to be
%% in sync);
%% - use a range identified by the last check (a last modified range, and
%% perhaps also a specific Bucket) if a range to limit the issues has been
Expand Down Expand Up @@ -1404,7 +1406,7 @@

%% @doc Enable the `recalc` compaction strategy within the leveled backend in
%% riak. The default (when disabled) is `retain`, but this will leave
%% uncollected garbage within the, journal.
%% uncollected garbage within the, journal.
%% It is now recommended from Riak KV 2.9.2 to consider the `recalc` strategy.
%% This strategy has a side effect of slower startups, and slower recovery
%% from a wiped ledger - but it will not keep an overhead of garbage within
Expand Down Expand Up @@ -1439,7 +1441,7 @@
%% each worker is taking per query in microseconds, so the overall queries
%% per second supported will be:
%% (1000000 div worker_vnode_pool_worktime) * n_val * worker_count
%% It should normally be possible to support >> 100 queries per second with
%% It should normally be possible to support >> 100 queries per second with
%% just a single worker per vnode.
%% The statistic worker_vnode_pool_queuetime_mean will track the average time
%% a query is spending on a queue, should the vnode pool be exhausted.
Expand Down
17 changes: 9 additions & 8 deletions rebar.config
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
%% -*- mode: erlang -*-
{minimum_otp_vsn, "22.0"}.

{src_dirs, ["./priv/tracers", "./src"]}.
Expand Down Expand Up @@ -42,16 +43,16 @@
]}.

{deps, [
{riak_core, {git, "https://github.com/basho/riak_core.git", {branch, "develop"}}},
{sidejob, {git, "https://github.com/basho/sidejob.git", {branch, "develop"}}},
{redbug, "2.0.8"},
{sext, "1.8.0"},
{sidejob, "2.1.0"},
{recon, "2.5.2"},
{hyper, {git, "https://github.com/basho/hyper", {tag, "1.1.0"}}},
{riak_core, {git, "https://github.com/TI-Tokyo/riak_core.git", {branch, "develop"}}},
{bitcask, {git, "https://github.com/basho/bitcask.git", {branch, "develop"}}},
{redbug, {git, "https://github.com/shiguredo/redbug", {branch, "otp-25"}}},
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.2"}}},
{sext, {git, "https://github.com/uwiger/sext.git", {tag, "1.8.0"}}},
{riak_pipe, {git, "https://github.com/basho/riak_pipe.git", {branch, "develop"}}},
{riak_dt, {git, "https://github.com/basho/riak_dt.git", {branch, "develop"}}},
{riak_api, {git, "https://github.com/basho/riak_api.git", {branch, "develop"}}},
{hyper, {git, "https://github.com/basho/hyper", {tag, "1.1.0"}}},
{kv_index_tictactree, {git, "https://github.com/martinsumner/kv_index_tictactree.git", {branch, "develop-3.1"}}},
{rhc, {git, "https://github.com/basho/riak-erlang-http-client", {branch, "develop-3.2-otp24"}}}
{kv_index_tictactree, {git, "https://github.com/martinsumner/kv_index_tictactree.git", {tag, "1.0.2"}}},
{riakhttpc, {git, "https://github.com/basho/riak-erlang-http-client", {tag, "3.0.10"}}}
]}.
2 changes: 1 addition & 1 deletion src/riak_kv.app.src
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
redbug,
recon,
riakc,
rhc
riakhttpc
]},
{registered, []},
{mod, {riak_kv_app, []}},
Expand Down
10 changes: 5 additions & 5 deletions src/riak_kv_bucket.erl
Original file line number Diff line number Diff line change
Expand Up @@ -1263,7 +1263,7 @@ immutable_consistent(undefined, _N, undefined, _Bad) ->
immutable_consistent(true, _N, undefined, _Bad) ->
%% consistent still set to true and n_val not modified
true;
immutable_consistent(Consistent, _N, _N, _Bad) when Consistent =:= undefined orelse
immutable_consistent(Consistent, N, N, _Bad) when Consistent =:= undefined orelse
Consistent =:= true ->
%% consistent not modified or still set to true and n_val
%% modified but set to same value
Expand Down Expand Up @@ -1306,10 +1306,10 @@ undefined_props(Names, Props, Errors) ->
immutable_dt(_NewDT=undefined, _NewAllowMult=undefined, _ExistingDT, _Bad) ->
%% datatype and allow_mult are not being modified, so its valid
true;
immutable_dt(_Datatype, undefined, _Datatype, _Bad) ->
immutable_dt(Datatype, undefined, Datatype, _Bad) ->
%% data types from new and existing match and allow mult not modified, valid
true;
immutable_dt(_Datatype, true, _Datatype, _Bad) ->
immutable_dt(Datatype, true, Datatype, _Bad) ->
%% data type from new and existing match and allow mult still set to true,
%% valid
true;
Expand All @@ -1325,7 +1325,7 @@ immutable_dt(_Datatype, true, _Datatype2, Bad) ->
immutable_dt(_Datatype, false, undefined, Bad) ->
%% datatype defined when it wasn't before
has_datatype(Bad);
immutable_dt(_Datatype, false, _Datatype, Bad) ->
immutable_dt(Datatype, false, Datatype, Bad) ->
%% attempt to set allow_mult to false when data type set is invalid, datatype not modified
has_allow_mult(Bad);
immutable_dt(undefined, false, _Datatype, Bad) ->
Expand All @@ -1337,7 +1337,7 @@ immutable_dt(_Datatype, false, _Datatype2, Bad) ->
immutable_dt(undefined, _, _Datatype, Bad) ->
%% datatype not modified but allow_mult is invalid
has_allow_mult(Bad);
immutable_dt(_Datatype, _, _Datatype, Bad) ->
immutable_dt(Datatype, _, Datatype, Bad) ->
%% allow mult is invalid but data types still match
has_allow_mult(Bad);
immutable_dt(_, _, _, Bad) ->
Expand Down
Loading